summaryrefslogtreecommitdiff
path: root/libavfilter
diff options
context:
space:
mode:
Diffstat (limited to 'libavfilter')
-rw-r--r--libavfilter/Makefile293
-rw-r--r--libavfilter/aeval.c489
-rw-r--r--libavfilter/af_acrusher.c362
-rw-r--r--libavfilter/af_adelay.c293
-rw-r--r--libavfilter/af_aecho.c364
-rw-r--r--libavfilter/af_aemphasis.c369
-rw-r--r--libavfilter/af_afade.c669
-rw-r--r--libavfilter/af_afftfilt.c403
-rw-r--r--libavfilter/af_aformat.c56
-rw-r--r--libavfilter/af_agate.c440
-rw-r--r--libavfilter/af_alimiter.c370
-rw-r--r--libavfilter/af_amerge.c366
-rw-r--r--libavfilter/af_amix.c241
-rw-r--r--libavfilter/af_anequalizer.c762
-rw-r--r--libavfilter/af_anull.c26
-rw-r--r--libavfilter/af_apad.c161
-rw-r--r--libavfilter/af_aphaser.c301
-rw-r--r--libavfilter/af_apulsator.c257
-rw-r--r--libavfilter/af_aresample.c351
-rw-r--r--libavfilter/af_asetnsamples.c195
-rw-r--r--libavfilter/af_asetrate.c118
-rw-r--r--libavfilter/af_ashowinfo.c57
-rw-r--r--libavfilter/af_astats.c531
-rw-r--r--libavfilter/af_asyncts.c47
-rw-r--r--libavfilter/af_atempo.c1202
-rw-r--r--libavfilter/af_biquads.c632
-rw-r--r--libavfilter/af_bs2b.c38
-rw-r--r--libavfilter/af_channelmap.c66
-rw-r--r--libavfilter/af_channelsplit.c57
-rw-r--r--libavfilter/af_chorus.c381
-rw-r--r--libavfilter/af_compand.c175
-rw-r--r--libavfilter/af_compensationdelay.c198
-rw-r--r--libavfilter/af_crystalizer.c153
-rw-r--r--libavfilter/af_dcshift.c167
-rw-r--r--libavfilter/af_dynaudnorm.c754
-rw-r--r--libavfilter/af_earwax.c174
-rw-r--r--libavfilter/af_extrastereo.c128
-rw-r--r--libavfilter/af_firequalizer.c838
-rw-r--r--libavfilter/af_flanger.c246
-rw-r--r--libavfilter/af_hdcd.c1731
-rw-r--r--libavfilter/af_join.c137
-rw-r--r--libavfilter/af_ladspa.c748
-rw-r--r--libavfilter/af_loudnorm.c920
-rw-r--r--libavfilter/af_pan.c442
-rw-r--r--libavfilter/af_replaygain.c615
-rw-r--r--libavfilter/af_resample.c57
-rw-r--r--libavfilter/af_rubberband.c271
-rw-r--r--libavfilter/af_sidechaincompress.c450
-rw-r--r--libavfilter/af_silencedetect.c215
-rw-r--r--libavfilter/af_silenceremove.c516
-rw-r--r--libavfilter/af_sofalizer.c1234
-rw-r--r--libavfilter/af_stereotools.c305
-rw-r--r--libavfilter/af_stereowiden.c162
-rw-r--r--libavfilter/af_tremolo.c170
-rw-r--r--libavfilter/af_vibrato.c210
-rw-r--r--libavfilter/af_volume.c253
-rw-r--r--libavfilter/af_volume.h48
-rw-r--r--libavfilter/af_volumedetect.c166
-rw-r--r--libavfilter/all_channel_layouts.inc68
-rw-r--r--libavfilter/allfilters.c275
-rw-r--r--libavfilter/asink_anullsink.c10
-rw-r--r--libavfilter/asrc_anoisesrc.c207
-rw-r--r--libavfilter/asrc_anullsrc.c119
-rw-r--r--libavfilter/asrc_flite.c287
-rw-r--r--libavfilter/asrc_sine.c282
-rw-r--r--libavfilter/audio.c65
-rw-r--r--libavfilter/audio.h30
-rw-r--r--libavfilter/avf_abitscope.c250
-rw-r--r--libavfilter/avf_ahistogram.c413
-rw-r--r--libavfilter/avf_aphasemeter.c280
-rw-r--r--libavfilter/avf_avectorscope.c354
-rw-r--r--libavfilter/avf_concat.c425
-rw-r--r--libavfilter/avf_showcqt.c1577
-rw-r--r--libavfilter/avf_showcqt.h124
-rw-r--r--libavfilter/avf_showfreqs.c525
-rw-r--r--libavfilter/avf_showspectrum.c1314
-rw-r--r--libavfilter/avf_showvolume.c364
-rw-r--r--libavfilter/avf_showwaves.c772
-rw-r--r--libavfilter/avfilter.c1155
-rw-r--r--libavfilter/avfilter.h425
-rw-r--r--libavfilter/avfiltergraph.c761
-rw-r--r--libavfilter/avfiltergraph.h9
-rw-r--r--libavfilter/avfilterres.rc55
-rw-r--r--libavfilter/bbox.c75
-rw-r--r--libavfilter/bbox.h44
-rw-r--r--libavfilter/blend.h79
-rw-r--r--libavfilter/bufferqueue.h121
-rw-r--r--libavfilter/buffersink.c347
-rw-r--r--libavfilter/buffersink.h100
-rw-r--r--libavfilter/buffersrc.c234
-rw-r--r--libavfilter/buffersrc.h66
-rw-r--r--libavfilter/bwdif.h72
-rw-r--r--libavfilter/colorspacedsp.c147
-rw-r--r--libavfilter/colorspacedsp.h83
-rw-r--r--libavfilter/colorspacedsp_template.c342
-rw-r--r--libavfilter/colorspacedsp_yuv2yuv_template.c98
-rw-r--r--libavfilter/deshake.h107
-rw-r--r--libavfilter/deshake_opencl.c200
-rw-r--r--libavfilter/deshake_opencl.h45
-rw-r--r--libavfilter/deshake_opencl_kernel.h225
-rw-r--r--libavfilter/drawutils.c655
-rw-r--r--libavfilter/drawutils.h126
-rw-r--r--libavfilter/dualinput.c90
-rw-r--r--libavfilter/dualinput.h46
-rw-r--r--libavfilter/ebur128.c769
-rw-r--r--libavfilter/ebur128.h296
-rw-r--r--libavfilter/f_bench.c151
-rw-r--r--libavfilter/f_drawgraph.c501
-rw-r--r--libavfilter/f_ebur128.c950
-rw-r--r--libavfilter/f_interleave.c261
-rw-r--r--libavfilter/f_loop.c382
-rw-r--r--libavfilter/f_metadata.c415
-rw-r--r--libavfilter/f_perms.c178
-rw-r--r--libavfilter/f_realtime.c132
-rw-r--r--libavfilter/f_reverse.c251
-rw-r--r--libavfilter/f_select.c532
-rw-r--r--libavfilter/f_sendcmd.c587
-rw-r--r--libavfilter/f_sidedata.c180
-rw-r--r--libavfilter/f_streamselect.c353
-rw-r--r--libavfilter/f_zmq.c275
-rw-r--r--libavfilter/fifo.c20
-rw-r--r--libavfilter/filters.h137
-rw-r--r--libavfilter/formats.c443
-rw-r--r--libavfilter/formats.h99
-rw-r--r--libavfilter/framepool.c296
-rw-r--r--libavfilter/framepool.h118
-rw-r--r--libavfilter/framequeue.c150
-rw-r--r--libavfilter/framequeue.h173
-rw-r--r--libavfilter/framesync.c343
-rw-r--r--libavfilter/framesync.h297
-rw-r--r--libavfilter/generate_wave_table.c82
-rw-r--r--libavfilter/generate_wave_table.h35
-rw-r--r--libavfilter/gradfun.h16
-rw-r--r--libavfilter/graphdump.c166
-rw-r--r--libavfilter/graphparser.c187
-rw-r--r--libavfilter/hermite.h45
-rw-r--r--libavfilter/interlace.h9
-rw-r--r--libavfilter/internal.h229
-rw-r--r--libavfilter/lavfutils.c107
-rw-r--r--libavfilter/lavfutils.h43
-rw-r--r--libavfilter/log2_tab.c1
-rw-r--r--libavfilter/lswsutils.c50
-rw-r--r--libavfilter/lswsutils.h38
-rw-r--r--libavfilter/maskedmerge.h45
-rw-r--r--libavfilter/motion_estimation.c432
-rw-r--r--libavfilter/motion_estimation.h87
-rw-r--r--libavfilter/opencl_allkernels.c41
-rw-r--r--libavfilter/opencl_allkernels.h29
-rw-r--r--libavfilter/psnr.h33
-rw-r--r--libavfilter/pthread.c18
-rw-r--r--libavfilter/removegrain.h45
-rw-r--r--libavfilter/scale.c152
-rw-r--r--libavfilter/scale.h28
-rw-r--r--libavfilter/setpts.c158
-rw-r--r--libavfilter/settb.c73
-rw-r--r--libavfilter/signature.h569
-rw-r--r--libavfilter/signature_lookup.c573
-rw-r--r--libavfilter/split.c96
-rw-r--r--libavfilter/src_movie.c691
-rw-r--r--libavfilter/ssim.h36
-rw-r--r--libavfilter/stereo3d.h36
-rw-r--r--libavfilter/tests/.gitignore3
-rw-r--r--libavfilter/tests/drawutils.c56
-rw-r--r--libavfilter/tests/filtfmts.c82
-rw-r--r--libavfilter/tests/formats.c68
-rw-r--r--libavfilter/tests/integral.c90
-rw-r--r--libavfilter/thread.h8
-rw-r--r--libavfilter/tinterlace.h62
-rw-r--r--libavfilter/transform.c191
-rw-r--r--libavfilter/transform.h127
-rw-r--r--libavfilter/trim.c109
-rw-r--r--libavfilter/unsharp.h85
-rw-r--r--libavfilter/unsharp_opencl.c422
-rw-r--r--libavfilter/unsharp_opencl.h34
-rw-r--r--libavfilter/unsharp_opencl_kernel.h342
-rw-r--r--libavfilter/vaf_spectrumsynth.c543
-rw-r--r--libavfilter/version.h20
-rw-r--r--libavfilter/vf_alphamerge.c217
-rw-r--r--libavfilter/vf_aspect.c184
-rw-r--r--libavfilter/vf_atadenoise.c434
-rw-r--r--libavfilter/vf_avgblur.c326
-rw-r--r--libavfilter/vf_bbox.c134
-rw-r--r--libavfilter/vf_bitplanenoise.c226
-rw-r--r--libavfilter/vf_blackdetect.c211
-rw-r--r--libavfilter/vf_blackframe.c82
-rw-r--r--libavfilter/vf_blend.c675
-rw-r--r--libavfilter/vf_boxblur.c252
-rw-r--r--libavfilter/vf_bwdif.c584
-rw-r--r--libavfilter/vf_chromakey.c208
-rw-r--r--libavfilter/vf_ciescope.c1512
-rw-r--r--libavfilter/vf_codecview.c322
-rw-r--r--libavfilter/vf_colorbalance.c214
-rw-r--r--libavfilter/vf_colorchannelmixer.c362
-rw-r--r--libavfilter/vf_colorkey.c169
-rw-r--r--libavfilter/vf_colorlevels.c256
-rw-r--r--libavfilter/vf_colormatrix.c520
-rw-r--r--libavfilter/vf_colorspace.c1197
-rw-r--r--libavfilter/vf_convolution.c847
-rw-r--r--libavfilter/vf_copy.c40
-rw-r--r--libavfilter/vf_coreimage.m688
-rw-r--r--libavfilter/vf_cover_rect.c260
-rw-r--r--libavfilter/vf_crop.c226
-rw-r--r--libavfilter/vf_cropdetect.c167
-rw-r--r--libavfilter/vf_curves.c696
-rw-r--r--libavfilter/vf_datascope.c421
-rw-r--r--libavfilter/vf_dctdnoiz.c778
-rw-r--r--libavfilter/vf_deband.c470
-rw-r--r--libavfilter/vf_decimate.c410
-rw-r--r--libavfilter/vf_deinterlace_qsv.c21
-rw-r--r--libavfilter/vf_deinterlace_vaapi.c634
-rw-r--r--libavfilter/vf_dejudder.c187
-rw-r--r--libavfilter/vf_delogo.c185
-rw-r--r--libavfilter/vf_deshake.c579
-rw-r--r--libavfilter/vf_detelecine.c386
-rw-r--r--libavfilter/vf_displace.c395
-rw-r--r--libavfilter/vf_drawbox.c414
-rw-r--r--libavfilter/vf_drawtext.c1457
-rw-r--r--libavfilter/vf_edgedetect.c399
-rw-r--r--libavfilter/vf_elbg.c264
-rw-r--r--libavfilter/vf_eq.c389
-rw-r--r--libavfilter/vf_eq.h105
-rw-r--r--libavfilter/vf_extractplanes.c408
-rw-r--r--libavfilter/vf_fade.c332
-rw-r--r--libavfilter/vf_fftfilt.c346
-rw-r--r--libavfilter/vf_field.c111
-rw-r--r--libavfilter/vf_fieldhint.c284
-rw-r--r--libavfilter/vf_fieldmatch.c988
-rw-r--r--libavfilter/vf_fieldorder.c106
-rw-r--r--libavfilter/vf_find_rect.c305
-rw-r--r--libavfilter/vf_format.c75
-rw-r--r--libavfilter/vf_fps.c103
-rw-r--r--libavfilter/vf_framepack.c41
-rw-r--r--libavfilter/vf_framerate.c731
-rw-r--r--libavfilter/vf_framestep.c100
-rw-r--r--libavfilter/vf_frei0r.c185
-rw-r--r--libavfilter/vf_fspp.c693
-rw-r--r--libavfilter/vf_fspp.h97
-rw-r--r--libavfilter/vf_gblur.c367
-rw-r--r--libavfilter/vf_geq.c287
-rw-r--r--libavfilter/vf_gradfun.c58
-rw-r--r--libavfilter/vf_hflip.c152
-rw-r--r--libavfilter/vf_histeq.c283
-rw-r--r--libavfilter/vf_histogram.c382
-rw-r--r--libavfilter/vf_hqdn3d.c88
-rw-r--r--libavfilter/vf_hqdn3d.h18
-rw-r--r--libavfilter/vf_hqx.c566
-rw-r--r--libavfilter/vf_hue.c453
-rw-r--r--libavfilter/vf_hwdownload.c14
-rw-r--r--libavfilter/vf_hwupload.c20
-rw-r--r--libavfilter/vf_hwupload_cuda.c95
-rw-r--r--libavfilter/vf_hysteresis.c401
-rw-r--r--libavfilter/vf_idet.c452
-rw-r--r--libavfilter/vf_idet.h80
-rw-r--r--libavfilter/vf_il.c213
-rw-r--r--libavfilter/vf_interlace.c75
-rw-r--r--libavfilter/vf_kerndeint.c319
-rw-r--r--libavfilter/vf_lenscorrection.c231
-rw-r--r--libavfilter/vf_libopencv.c88
-rw-r--r--libavfilter/vf_lut.c431
-rw-r--r--libavfilter/vf_lut2.c379
-rw-r--r--libavfilter/vf_lut3d.c820
-rw-r--r--libavfilter/vf_maskedclamp.c345
-rw-r--r--libavfilter/vf_maskedmerge.c313
-rw-r--r--libavfilter/vf_mcdeint.c316
-rw-r--r--libavfilter/vf_mergeplanes.c318
-rw-r--r--libavfilter/vf_mestimate.c377
-rw-r--r--libavfilter/vf_midequalizer.c390
-rw-r--r--libavfilter/vf_minterpolate.c1242
-rw-r--r--libavfilter/vf_mpdecimate.c255
-rw-r--r--libavfilter/vf_neighbor.c323
-rw-r--r--libavfilter/vf_nlmeans.c551
-rw-r--r--libavfilter/vf_nnedi.c1211
-rw-r--r--libavfilter/vf_noise.c351
-rw-r--r--libavfilter/vf_noise.h64
-rw-r--r--libavfilter/vf_null.c23
-rw-r--r--libavfilter/vf_ocr.c151
-rw-r--r--libavfilter/vf_overlay.c881
-rw-r--r--libavfilter/vf_owdenoise.c377
-rw-r--r--libavfilter/vf_pad.c263
-rw-r--r--libavfilter/vf_palettegen.c579
-rw-r--r--libavfilter/vf_paletteuse.c1086
-rw-r--r--libavfilter/vf_perspective.c525
-rw-r--r--libavfilter/vf_phase.c333
-rw-r--r--libavfilter/vf_pixdesctest.c38
-rw-r--r--libavfilter/vf_pp.c196
-rw-r--r--libavfilter/vf_pp7.c406
-rw-r--r--libavfilter/vf_pp7.h46
-rw-r--r--libavfilter/vf_premultiply.c409
-rw-r--r--libavfilter/vf_psnr.c418
-rw-r--r--libavfilter/vf_pullup.c776
-rw-r--r--libavfilter/vf_pullup.h71
-rw-r--r--libavfilter/vf_qp.c183
-rw-r--r--libavfilter/vf_random.c137
-rw-r--r--libavfilter/vf_readeia608.c268
-rw-r--r--libavfilter/vf_readvitc.c258
-rw-r--r--libavfilter/vf_remap.c415
-rw-r--r--libavfilter/vf_removegrain.c660
-rw-r--r--libavfilter/vf_removelogo.c587
-rw-r--r--libavfilter/vf_repeatfields.c192
-rw-r--r--libavfilter/vf_rotate.c616
-rw-r--r--libavfilter/vf_sab.c337
-rw-r--r--libavfilter/vf_scale.c612
-rw-r--r--libavfilter/vf_scale_npp.c139
-rw-r--r--libavfilter/vf_scale_qsv.c12
-rw-r--r--libavfilter/vf_scale_vaapi.c40
-rw-r--r--libavfilter/vf_select.c350
-rw-r--r--libavfilter/vf_selectivecolor.c482
-rw-r--r--libavfilter/vf_separatefields.c146
-rw-r--r--libavfilter/vf_setfield.c94
-rw-r--r--libavfilter/vf_showinfo.c65
-rw-r--r--libavfilter/vf_showpalette.c140
-rw-r--r--libavfilter/vf_shuffleframes.c169
-rw-r--r--libavfilter/vf_shuffleplanes.c19
-rw-r--r--libavfilter/vf_signalstats.c1024
-rw-r--r--libavfilter/vf_signature.c767
-rw-r--r--libavfilter/vf_smartblur.c305
-rw-r--r--libavfilter/vf_spp.c529
-rw-r--r--libavfilter/vf_spp.h59
-rw-r--r--libavfilter/vf_ssim.c509
-rw-r--r--libavfilter/vf_stack.c281
-rw-r--r--libavfilter/vf_stereo3d.c1116
-rw-r--r--libavfilter/vf_subtitles.c496
-rw-r--r--libavfilter/vf_super2xsai.c354
-rw-r--r--libavfilter/vf_swaprect.c256
-rw-r--r--libavfilter/vf_swapuv.c129
-rw-r--r--libavfilter/vf_telecine.c295
-rw-r--r--libavfilter/vf_threshold.c349
-rw-r--r--libavfilter/vf_thumbnail.c237
-rw-r--r--libavfilter/vf_tile.c242
-rw-r--r--libavfilter/vf_tinterlace.c426
-rw-r--r--libavfilter/vf_transpose.c249
-rw-r--r--libavfilter/vf_unsharp.c257
-rw-r--r--libavfilter/vf_uspp.c509
-rw-r--r--libavfilter/vf_vaguedenoiser.c583
-rw-r--r--libavfilter/vf_vectorscope.c1359
-rw-r--r--libavfilter/vf_vflip.c34
-rw-r--r--libavfilter/vf_vidstabdetect.c221
-rw-r--r--libavfilter/vf_vidstabtransform.c322
-rw-r--r--libavfilter/vf_vignette.c359
-rw-r--r--libavfilter/vf_w3fdif.c596
-rw-r--r--libavfilter/vf_waveform.c2833
-rw-r--r--libavfilter/vf_weave.c151
-rw-r--r--libavfilter/vf_xbr.c434
-rw-r--r--libavfilter/vf_yadif.c311
-rw-r--r--libavfilter/vf_zoompan.c372
-rw-r--r--libavfilter/vf_zscale.c808
-rw-r--r--libavfilter/video.c57
-rw-r--r--libavfilter/video.h10
-rw-r--r--libavfilter/vidstabutils.c85
-rw-r--r--libavfilter/vidstabutils.h47
-rw-r--r--libavfilter/vsink_nullsink.c8
-rw-r--r--libavfilter/vsrc_cellauto.c340
-rw-r--r--libavfilter/vsrc_color.c199
-rw-r--r--libavfilter/vsrc_life.c454
-rw-r--r--libavfilter/vsrc_mandelbrot.c432
-rw-r--r--libavfilter/vsrc_movie.c292
-rw-r--r--libavfilter/vsrc_mptestsrc.c363
-rw-r--r--libavfilter/vsrc_nullsrc.c136
-rw-r--r--libavfilter/vsrc_testsrc.c1384
-rw-r--r--libavfilter/w3fdif.h48
-rw-r--r--libavfilter/window_func.c178
-rw-r--r--libavfilter/window_func.h34
-rw-r--r--libavfilter/x86/Makefile39
-rw-r--r--libavfilter/x86/af_volume.asm10
-rw-r--r--libavfilter/x86/af_volume_init.c8
-rw-r--r--libavfilter/x86/avf_showcqt.asm192
-rw-r--r--libavfilter/x86/avf_showcqt_init.c63
-rw-r--r--libavfilter/x86/colorspacedsp.asm1097
-rw-r--r--libavfilter/x86/colorspacedsp_init.c119
-rw-r--r--libavfilter/x86/vf_blend.asm316
-rw-r--r--libavfilter/x86/vf_blend_init.c84
-rw-r--r--libavfilter/x86/vf_bwdif.asm270
-rw-r--r--libavfilter/x86/vf_bwdif_init.c78
-rw-r--r--libavfilter/x86/vf_eq.c96
-rw-r--r--libavfilter/x86/vf_fspp.asm727
-rw-r--r--libavfilter/x86/vf_fspp_init.c49
-rw-r--r--libavfilter/x86/vf_gradfun.asm8
-rw-r--r--libavfilter/x86/vf_gradfun_init.c61
-rw-r--r--libavfilter/x86/vf_hqdn3d.asm12
-rw-r--r--libavfilter/x86/vf_hqdn3d_init.c10
-rw-r--r--libavfilter/x86/vf_idet.asm170
-rw-r--r--libavfilter/x86/vf_idet_init.c87
-rw-r--r--libavfilter/x86/vf_interlace.asm8
-rw-r--r--libavfilter/x86/vf_interlace_init.c8
-rw-r--r--libavfilter/x86/vf_maskedmerge.asm81
-rw-r--r--libavfilter/x86/vf_maskedmerge_init.c40
-rw-r--r--libavfilter/x86/vf_noise.c144
-rw-r--r--libavfilter/x86/vf_pp7.asm57
-rw-r--r--libavfilter/x86/vf_pp7_init.c34
-rw-r--r--libavfilter/x86/vf_psnr.asm140
-rw-r--r--libavfilter/x86/vf_psnr_init.c39
-rw-r--r--libavfilter/x86/vf_pullup.asm178
-rw-r--r--libavfilter/x86/vf_pullup_init.c41
-rw-r--r--libavfilter/x86/vf_removegrain.asm1218
-rw-r--r--libavfilter/x86/vf_removegrain_init.c88
-rw-r--r--libavfilter/x86/vf_spp.c237
-rw-r--r--libavfilter/x86/vf_ssim.asm247
-rw-r--r--libavfilter/x86/vf_ssim_init.c43
-rw-r--r--libavfilter/x86/vf_stereo3d.asm216
-rw-r--r--libavfilter/x86/vf_stereo3d_init.c37
-rw-r--r--libavfilter/x86/vf_tinterlace_init.c47
-rw-r--r--libavfilter/x86/vf_w3fdif.asm259
-rw-r--r--libavfilter/x86/vf_w3fdif_init.c63
-rw-r--r--libavfilter/x86/vf_yadif.asm53
-rw-r--r--libavfilter/x86/vf_yadif_init.c68
-rw-r--r--libavfilter/x86/yadif-10.asm255
-rw-r--r--libavfilter/x86/yadif-16.asm317
-rw-r--r--libavfilter/yadif.h50
408 files changed, 120281 insertions, 5766 deletions
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index c3c1beaa0f..a48ca0ab45 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -1,3 +1,5 @@
+include $(SUBDIR)../config.mak
+
NAME = avfilter
HEADERS = avfilter.h \
@@ -15,92 +17,359 @@ OBJS = allfilters.o \
drawutils.o \
fifo.o \
formats.o \
+ framepool.o \
+ framequeue.o \
+ graphdump.o \
graphparser.o \
+ opencl_allkernels.o \
+ transform.o \
video.o \
OBJS-$(HAVE_THREADS) += pthread.o
# audio filters
+OBJS-$(CONFIG_ABENCH_FILTER) += f_bench.o
+OBJS-$(CONFIG_ACOMPRESSOR_FILTER) += af_sidechaincompress.o
+OBJS-$(CONFIG_ACROSSFADE_FILTER) += af_afade.o
+OBJS-$(CONFIG_ACRUSHER_FILTER) += af_acrusher.o
+OBJS-$(CONFIG_ADELAY_FILTER) += af_adelay.o
+OBJS-$(CONFIG_AECHO_FILTER) += af_aecho.o
+OBJS-$(CONFIG_AEMPHASIS_FILTER) += af_aemphasis.o
+OBJS-$(CONFIG_AEVAL_FILTER) += aeval.o
+OBJS-$(CONFIG_AFADE_FILTER) += af_afade.o
+OBJS-$(CONFIG_AFFTFILT_FILTER) += af_afftfilt.o window_func.o
OBJS-$(CONFIG_AFORMAT_FILTER) += af_aformat.o
+OBJS-$(CONFIG_AGATE_FILTER) += af_agate.o
+OBJS-$(CONFIG_AINTERLEAVE_FILTER) += f_interleave.o
+OBJS-$(CONFIG_ALIMITER_FILTER) += af_alimiter.o
+OBJS-$(CONFIG_ALLPASS_FILTER) += af_biquads.o
+OBJS-$(CONFIG_ALOOP_FILTER) += f_loop.o
+OBJS-$(CONFIG_AMERGE_FILTER) += af_amerge.o
+OBJS-$(CONFIG_AMETADATA_FILTER) += f_metadata.o
OBJS-$(CONFIG_AMIX_FILTER) += af_amix.o
+OBJS-$(CONFIG_ANEQUALIZER_FILTER) += af_anequalizer.o
OBJS-$(CONFIG_ANULL_FILTER) += af_anull.o
+OBJS-$(CONFIG_APAD_FILTER) += af_apad.o
+OBJS-$(CONFIG_APERMS_FILTER) += f_perms.o
+OBJS-$(CONFIG_APHASER_FILTER) += af_aphaser.o generate_wave_table.o
+OBJS-$(CONFIG_APULSATOR_FILTER) += af_apulsator.o
+OBJS-$(CONFIG_AREALTIME_FILTER) += f_realtime.o
+OBJS-$(CONFIG_ARESAMPLE_FILTER) += af_aresample.o
+OBJS-$(CONFIG_AREVERSE_FILTER) += f_reverse.o
+OBJS-$(CONFIG_ASELECT_FILTER) += f_select.o
+OBJS-$(CONFIG_ASENDCMD_FILTER) += f_sendcmd.o
+OBJS-$(CONFIG_ASETNSAMPLES_FILTER) += af_asetnsamples.o
OBJS-$(CONFIG_ASETPTS_FILTER) += setpts.o
+OBJS-$(CONFIG_ASETRATE_FILTER) += af_asetrate.o
OBJS-$(CONFIG_ASETTB_FILTER) += settb.o
OBJS-$(CONFIG_ASHOWINFO_FILTER) += af_ashowinfo.o
+OBJS-$(CONFIG_ASIDEDATA_FILTER) += f_sidedata.o
OBJS-$(CONFIG_ASPLIT_FILTER) += split.o
+OBJS-$(CONFIG_ASTATS_FILTER) += af_astats.o
+OBJS-$(CONFIG_ASTREAMSELECT_FILTER) += f_streamselect.o
OBJS-$(CONFIG_ASYNCTS_FILTER) += af_asyncts.o
+OBJS-$(CONFIG_ATEMPO_FILTER) += af_atempo.o
OBJS-$(CONFIG_ATRIM_FILTER) += trim.o
+OBJS-$(CONFIG_AZMQ_FILTER) += f_zmq.o
+OBJS-$(CONFIG_BANDPASS_FILTER) += af_biquads.o
+OBJS-$(CONFIG_BANDREJECT_FILTER) += af_biquads.o
+OBJS-$(CONFIG_BASS_FILTER) += af_biquads.o
+OBJS-$(CONFIG_BIQUAD_FILTER) += af_biquads.o
OBJS-$(CONFIG_BS2B_FILTER) += af_bs2b.o
OBJS-$(CONFIG_CHANNELMAP_FILTER) += af_channelmap.o
OBJS-$(CONFIG_CHANNELSPLIT_FILTER) += af_channelsplit.o
+OBJS-$(CONFIG_CHORUS_FILTER) += af_chorus.o generate_wave_table.o
OBJS-$(CONFIG_COMPAND_FILTER) += af_compand.o
+OBJS-$(CONFIG_COMPENSATIONDELAY_FILTER) += af_compensationdelay.o
+OBJS-$(CONFIG_CRYSTALIZER_FILTER) += af_crystalizer.o
+OBJS-$(CONFIG_DCSHIFT_FILTER) += af_dcshift.o
+OBJS-$(CONFIG_DYNAUDNORM_FILTER) += af_dynaudnorm.o
+OBJS-$(CONFIG_EARWAX_FILTER) += af_earwax.o
+OBJS-$(CONFIG_EBUR128_FILTER) += f_ebur128.o
+OBJS-$(CONFIG_EQUALIZER_FILTER) += af_biquads.o
+OBJS-$(CONFIG_EXTRASTEREO_FILTER) += af_extrastereo.o
+OBJS-$(CONFIG_FIREQUALIZER_FILTER) += af_firequalizer.o
+OBJS-$(CONFIG_FLANGER_FILTER) += af_flanger.o generate_wave_table.o
OBJS-$(CONFIG_HDCD_FILTER) += af_hdcd.o
+OBJS-$(CONFIG_HIGHPASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_JOIN_FILTER) += af_join.o
+OBJS-$(CONFIG_LADSPA_FILTER) += af_ladspa.o
+OBJS-$(CONFIG_LOUDNORM_FILTER) += af_loudnorm.o ebur128.o
+OBJS-$(CONFIG_LOWPASS_FILTER) += af_biquads.o
+OBJS-$(CONFIG_PAN_FILTER) += af_pan.o
+OBJS-$(CONFIG_REPLAYGAIN_FILTER) += af_replaygain.o
OBJS-$(CONFIG_RESAMPLE_FILTER) += af_resample.o
+OBJS-$(CONFIG_RUBBERBAND_FILTER) += af_rubberband.o
+OBJS-$(CONFIG_SIDECHAINCOMPRESS_FILTER) += af_sidechaincompress.o
+OBJS-$(CONFIG_SIDECHAINGATE_FILTER) += af_agate.o
+OBJS-$(CONFIG_SILENCEDETECT_FILTER) += af_silencedetect.o
+OBJS-$(CONFIG_SILENCEREMOVE_FILTER) += af_silenceremove.o
+OBJS-$(CONFIG_SOFALIZER_FILTER) += af_sofalizer.o
+OBJS-$(CONFIG_STEREOTOOLS_FILTER) += af_stereotools.o
+OBJS-$(CONFIG_STEREOWIDEN_FILTER) += af_stereowiden.o
+OBJS-$(CONFIG_TREBLE_FILTER) += af_biquads.o
+OBJS-$(CONFIG_TREMOLO_FILTER) += af_tremolo.o
+OBJS-$(CONFIG_VIBRATO_FILTER) += af_vibrato.o generate_wave_table.o
OBJS-$(CONFIG_VOLUME_FILTER) += af_volume.o
+OBJS-$(CONFIG_VOLUMEDETECT_FILTER) += af_volumedetect.o
-OBJS-$(CONFIG_ANULLSINK_FILTER) += asink_anullsink.o
+OBJS-$(CONFIG_AEVALSRC_FILTER) += aeval.o
+OBJS-$(CONFIG_ANOISESRC_FILTER) += asrc_anoisesrc.o
OBJS-$(CONFIG_ANULLSRC_FILTER) += asrc_anullsrc.o
+OBJS-$(CONFIG_FLITE_FILTER) += asrc_flite.o
+OBJS-$(CONFIG_SINE_FILTER) += asrc_sine.o
+
+OBJS-$(CONFIG_ANULLSINK_FILTER) += asink_anullsink.o
# video filters
+OBJS-$(CONFIG_ALPHAEXTRACT_FILTER) += vf_extractplanes.o
+OBJS-$(CONFIG_ALPHAMERGE_FILTER) += vf_alphamerge.o
+OBJS-$(CONFIG_ASS_FILTER) += vf_subtitles.o
+OBJS-$(CONFIG_ATADENOISE_FILTER) += vf_atadenoise.o
+OBJS-$(CONFIG_AVGBLUR_FILTER) += vf_avgblur.o
+OBJS-$(CONFIG_BBOX_FILTER) += bbox.o vf_bbox.o
+OBJS-$(CONFIG_BENCH_FILTER) += f_bench.o
+OBJS-$(CONFIG_BITPLANENOISE_FILTER) += vf_bitplanenoise.o
+OBJS-$(CONFIG_BLACKDETECT_FILTER) += vf_blackdetect.o
OBJS-$(CONFIG_BLACKFRAME_FILTER) += vf_blackframe.o
+OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o dualinput.o framesync.o
OBJS-$(CONFIG_BOXBLUR_FILTER) += vf_boxblur.o
+OBJS-$(CONFIG_BWDIF_FILTER) += vf_bwdif.o
+OBJS-$(CONFIG_CHROMAKEY_FILTER) += vf_chromakey.o
+OBJS-$(CONFIG_CIESCOPE_FILTER) += vf_ciescope.o
+OBJS-$(CONFIG_CODECVIEW_FILTER) += vf_codecview.o
+OBJS-$(CONFIG_COLORBALANCE_FILTER) += vf_colorbalance.o
+OBJS-$(CONFIG_COLORCHANNELMIXER_FILTER) += vf_colorchannelmixer.o
+OBJS-$(CONFIG_COLORKEY_FILTER) += vf_colorkey.o
+OBJS-$(CONFIG_COLORLEVELS_FILTER) += vf_colorlevels.o
+OBJS-$(CONFIG_COLORMATRIX_FILTER) += vf_colormatrix.o
+OBJS-$(CONFIG_COLORSPACE_FILTER) += vf_colorspace.o colorspacedsp.o
+OBJS-$(CONFIG_CONVOLUTION_FILTER) += vf_convolution.o
OBJS-$(CONFIG_COPY_FILTER) += vf_copy.o
+OBJS-$(CONFIG_COREIMAGE_FILTER) += vf_coreimage.o
+OBJS-$(CONFIG_COVER_RECT_FILTER) += vf_cover_rect.o lavfutils.o
OBJS-$(CONFIG_CROP_FILTER) += vf_crop.o
OBJS-$(CONFIG_CROPDETECT_FILTER) += vf_cropdetect.o
+OBJS-$(CONFIG_CURVES_FILTER) += vf_curves.o
+OBJS-$(CONFIG_DATASCOPE_FILTER) += vf_datascope.o
+OBJS-$(CONFIG_DCTDNOIZ_FILTER) += vf_dctdnoiz.o
+OBJS-$(CONFIG_DEBAND_FILTER) += vf_deband.o
+OBJS-$(CONFIG_DECIMATE_FILTER) += vf_decimate.o
+OBJS-$(CONFIG_DEFLATE_FILTER) += vf_neighbor.o
OBJS-$(CONFIG_DEINTERLACE_QSV_FILTER) += vf_deinterlace_qsv.o
+OBJS-$(CONFIG_DEINTERLACE_VAAPI_FILTER) += vf_deinterlace_vaapi.o
+OBJS-$(CONFIG_DEJUDDER_FILTER) += vf_dejudder.o
OBJS-$(CONFIG_DELOGO_FILTER) += vf_delogo.o
+OBJS-$(CONFIG_DESHAKE_FILTER) += vf_deshake.o
+OBJS-$(CONFIG_DETELECINE_FILTER) += vf_detelecine.o
+OBJS-$(CONFIG_DILATION_FILTER) += vf_neighbor.o
+OBJS-$(CONFIG_DISPLACE_FILTER) += vf_displace.o framesync.o
OBJS-$(CONFIG_DRAWBOX_FILTER) += vf_drawbox.o
+OBJS-$(CONFIG_DRAWGRAPH_FILTER) += f_drawgraph.o
+OBJS-$(CONFIG_DRAWGRID_FILTER) += vf_drawbox.o
OBJS-$(CONFIG_DRAWTEXT_FILTER) += vf_drawtext.o
+OBJS-$(CONFIG_EDGEDETECT_FILTER) += vf_edgedetect.o
+OBJS-$(CONFIG_ELBG_FILTER) += vf_elbg.o
+OBJS-$(CONFIG_EQ_FILTER) += vf_eq.o
+OBJS-$(CONFIG_EROSION_FILTER) += vf_neighbor.o
+OBJS-$(CONFIG_EXTRACTPLANES_FILTER) += vf_extractplanes.o
OBJS-$(CONFIG_FADE_FILTER) += vf_fade.o
+OBJS-$(CONFIG_FFTFILT_FILTER) += vf_fftfilt.o
+OBJS-$(CONFIG_FIELD_FILTER) += vf_field.o
+OBJS-$(CONFIG_FIELDHINT_FILTER) += vf_fieldhint.o
+OBJS-$(CONFIG_FIELDMATCH_FILTER) += vf_fieldmatch.o
OBJS-$(CONFIG_FIELDORDER_FILTER) += vf_fieldorder.o
+OBJS-$(CONFIG_FIND_RECT_FILTER) += vf_find_rect.o lavfutils.o
OBJS-$(CONFIG_FORMAT_FILTER) += vf_format.o
OBJS-$(CONFIG_FPS_FILTER) += vf_fps.o
OBJS-$(CONFIG_FRAMEPACK_FILTER) += vf_framepack.o
+OBJS-$(CONFIG_FRAMERATE_FILTER) += vf_framerate.o
+OBJS-$(CONFIG_FRAMESTEP_FILTER) += vf_framestep.o
OBJS-$(CONFIG_FREI0R_FILTER) += vf_frei0r.o
+OBJS-$(CONFIG_FSPP_FILTER) += vf_fspp.o
+OBJS-$(CONFIG_GBLUR_FILTER) += vf_gblur.o
+OBJS-$(CONFIG_GEQ_FILTER) += vf_geq.o
OBJS-$(CONFIG_GRADFUN_FILTER) += vf_gradfun.o
+OBJS-$(CONFIG_HALDCLUT_FILTER) += vf_lut3d.o dualinput.o framesync.o
OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o
+OBJS-$(CONFIG_HISTEQ_FILTER) += vf_histeq.o
+OBJS-$(CONFIG_HISTOGRAM_FILTER) += vf_histogram.o
OBJS-$(CONFIG_HQDN3D_FILTER) += vf_hqdn3d.o
+OBJS-$(CONFIG_HQX_FILTER) += vf_hqx.o
+OBJS-$(CONFIG_HSTACK_FILTER) += vf_stack.o framesync.o
+OBJS-$(CONFIG_HUE_FILTER) += vf_hue.o
OBJS-$(CONFIG_HWDOWNLOAD_FILTER) += vf_hwdownload.o
OBJS-$(CONFIG_HWUPLOAD_CUDA_FILTER) += vf_hwupload_cuda.o
OBJS-$(CONFIG_HWUPLOAD_FILTER) += vf_hwupload.o
+OBJS-$(CONFIG_HYSTERESIS_FILTER) += vf_hysteresis.o framesync.o
+OBJS-$(CONFIG_IDET_FILTER) += vf_idet.o
+OBJS-$(CONFIG_IL_FILTER) += vf_il.o
+OBJS-$(CONFIG_INFLATE_FILTER) += vf_neighbor.o
OBJS-$(CONFIG_INTERLACE_FILTER) += vf_interlace.o
+OBJS-$(CONFIG_INTERLEAVE_FILTER) += f_interleave.o
+OBJS-$(CONFIG_KERNDEINT_FILTER) += vf_kerndeint.o
+OBJS-$(CONFIG_LENSCORRECTION_FILTER) += vf_lenscorrection.o
+OBJS-$(CONFIG_LOOP_FILTER) += f_loop.o
OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o
+OBJS-$(CONFIG_LUT2_FILTER) += vf_lut2.o framesync.o
+OBJS-$(CONFIG_LUT3D_FILTER) += vf_lut3d.o
OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTYUV_FILTER) += vf_lut.o
+OBJS-$(CONFIG_MASKEDCLAMP_FILTER) += vf_maskedclamp.o framesync.o
+OBJS-$(CONFIG_MASKEDMERGE_FILTER) += vf_maskedmerge.o framesync.o
+OBJS-$(CONFIG_MCDEINT_FILTER) += vf_mcdeint.o
+OBJS-$(CONFIG_MERGEPLANES_FILTER) += vf_mergeplanes.o framesync.o
+OBJS-$(CONFIG_MESTIMATE_FILTER) += vf_mestimate.o motion_estimation.o
+OBJS-$(CONFIG_METADATA_FILTER) += f_metadata.o
+OBJS-$(CONFIG_MIDEQUALIZER_FILTER) += vf_midequalizer.o framesync.o
+OBJS-$(CONFIG_MINTERPOLATE_FILTER) += vf_minterpolate.o motion_estimation.o
+OBJS-$(CONFIG_MPDECIMATE_FILTER) += vf_mpdecimate.o
OBJS-$(CONFIG_NEGATE_FILTER) += vf_lut.o
+OBJS-$(CONFIG_NLMEANS_FILTER) += vf_nlmeans.o
+OBJS-$(CONFIG_NNEDI_FILTER) += vf_nnedi.o
OBJS-$(CONFIG_NOFORMAT_FILTER) += vf_format.o
+OBJS-$(CONFIG_NOISE_FILTER) += vf_noise.o
OBJS-$(CONFIG_NULL_FILTER) += vf_null.o
+OBJS-$(CONFIG_OCR_FILTER) += vf_ocr.o
OBJS-$(CONFIG_OCV_FILTER) += vf_libopencv.o
-OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o
+OBJS-$(CONFIG_OPENCL) += deshake_opencl.o unsharp_opencl.o
+OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o dualinput.o framesync.o
+OBJS-$(CONFIG_OWDENOISE_FILTER) += vf_owdenoise.o
OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o
+OBJS-$(CONFIG_PALETTEGEN_FILTER) += vf_palettegen.o
+OBJS-$(CONFIG_PALETTEUSE_FILTER) += vf_paletteuse.o dualinput.o framesync.o
+OBJS-$(CONFIG_PERMS_FILTER) += f_perms.o
+OBJS-$(CONFIG_PERSPECTIVE_FILTER) += vf_perspective.o
+OBJS-$(CONFIG_PHASE_FILTER) += vf_phase.o
OBJS-$(CONFIG_PIXDESCTEST_FILTER) += vf_pixdesctest.o
-OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o
-OBJS-$(CONFIG_SCALE_NPP_FILTER) += vf_scale_npp.o
+OBJS-$(CONFIG_PP_FILTER) += vf_pp.o
+OBJS-$(CONFIG_PP7_FILTER) += vf_pp7.o
+OBJS-$(CONFIG_PREMULTIPLY_FILTER) += vf_premultiply.o framesync.o
+OBJS-$(CONFIG_PREWITT_FILTER) += vf_convolution.o
+OBJS-$(CONFIG_PSNR_FILTER) += vf_psnr.o dualinput.o framesync.o
+OBJS-$(CONFIG_PULLUP_FILTER) += vf_pullup.o
+OBJS-$(CONFIG_QP_FILTER) += vf_qp.o
+OBJS-$(CONFIG_RANDOM_FILTER) += vf_random.o
+OBJS-$(CONFIG_READEIA608_FILTER) += vf_readeia608.o
+OBJS-$(CONFIG_READVITC_FILTER) += vf_readvitc.o
+OBJS-$(CONFIG_REALTIME_FILTER) += f_realtime.o
+OBJS-$(CONFIG_REMAP_FILTER) += vf_remap.o framesync.o
+OBJS-$(CONFIG_REMOVEGRAIN_FILTER) += vf_removegrain.o
+OBJS-$(CONFIG_REMOVELOGO_FILTER) += bbox.o lswsutils.o lavfutils.o vf_removelogo.o
+OBJS-$(CONFIG_REPEATFIELDS_FILTER) += vf_repeatfields.o
+OBJS-$(CONFIG_REVERSE_FILTER) += f_reverse.o
+OBJS-$(CONFIG_ROTATE_FILTER) += vf_rotate.o
+OBJS-$(CONFIG_SAB_FILTER) += vf_sab.o
+OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o scale.o
+OBJS-$(CONFIG_SCALE_NPP_FILTER) += vf_scale_npp.o scale.o
OBJS-$(CONFIG_SCALE_QSV_FILTER) += vf_scale_qsv.o
-OBJS-$(CONFIG_SCALE_VAAPI_FILTER) += vf_scale_vaapi.o
-OBJS-$(CONFIG_SELECT_FILTER) += vf_select.o
+OBJS-$(CONFIG_SCALE_VAAPI_FILTER) += vf_scale_vaapi.o scale.o
+OBJS-$(CONFIG_SCALE2REF_FILTER) += vf_scale.o scale.o
+OBJS-$(CONFIG_SELECT_FILTER) += f_select.o
+OBJS-$(CONFIG_SELECTIVECOLOR_FILTER) += vf_selectivecolor.o
+OBJS-$(CONFIG_SENDCMD_FILTER) += f_sendcmd.o
+OBJS-$(CONFIG_SEPARATEFIELDS_FILTER) += vf_separatefields.o
OBJS-$(CONFIG_SETDAR_FILTER) += vf_aspect.o
+OBJS-$(CONFIG_SETFIELD_FILTER) += vf_setfield.o
OBJS-$(CONFIG_SETPTS_FILTER) += setpts.o
OBJS-$(CONFIG_SETSAR_FILTER) += vf_aspect.o
OBJS-$(CONFIG_SETTB_FILTER) += settb.o
OBJS-$(CONFIG_SHOWINFO_FILTER) += vf_showinfo.o
+OBJS-$(CONFIG_SHOWPALETTE_FILTER) += vf_showpalette.o
+OBJS-$(CONFIG_SHUFFLEFRAMES_FILTER) += vf_shuffleframes.o
OBJS-$(CONFIG_SHUFFLEPLANES_FILTER) += vf_shuffleplanes.o
+OBJS-$(CONFIG_SIDEDATA_FILTER) += f_sidedata.o
+OBJS-$(CONFIG_SIGNALSTATS_FILTER) += vf_signalstats.o
+OBJS-$(CONFIG_SIGNATURE_FILTER) += vf_signature.o
+OBJS-$(CONFIG_SMARTBLUR_FILTER) += vf_smartblur.o
+OBJS-$(CONFIG_SOBEL_FILTER) += vf_convolution.o
OBJS-$(CONFIG_SPLIT_FILTER) += split.o
+OBJS-$(CONFIG_SPP_FILTER) += vf_spp.o
+OBJS-$(CONFIG_SSIM_FILTER) += vf_ssim.o dualinput.o framesync.o
+OBJS-$(CONFIG_STEREO3D_FILTER) += vf_stereo3d.o
+OBJS-$(CONFIG_STREAMSELECT_FILTER) += f_streamselect.o
+OBJS-$(CONFIG_SUBTITLES_FILTER) += vf_subtitles.o
+OBJS-$(CONFIG_SUPER2XSAI_FILTER) += vf_super2xsai.o
+OBJS-$(CONFIG_SWAPRECT_FILTER) += vf_swaprect.o
+OBJS-$(CONFIG_SWAPUV_FILTER) += vf_swapuv.o
+OBJS-$(CONFIG_TBLEND_FILTER) += vf_blend.o dualinput.o framesync.o
+OBJS-$(CONFIG_TELECINE_FILTER) += vf_telecine.o
+OBJS-$(CONFIG_THRESHOLD_FILTER) += vf_threshold.o
+OBJS-$(CONFIG_THUMBNAIL_FILTER) += vf_thumbnail.o
+OBJS-$(CONFIG_TILE_FILTER) += vf_tile.o
+OBJS-$(CONFIG_TINTERLACE_FILTER) += vf_tinterlace.o
OBJS-$(CONFIG_TRANSPOSE_FILTER) += vf_transpose.o
OBJS-$(CONFIG_TRIM_FILTER) += trim.o
OBJS-$(CONFIG_UNSHARP_FILTER) += vf_unsharp.o
+OBJS-$(CONFIG_USPP_FILTER) += vf_uspp.o
+OBJS-$(CONFIG_VAGUEDENOISER_FILTER) += vf_vaguedenoiser.o
+OBJS-$(CONFIG_VECTORSCOPE_FILTER) += vf_vectorscope.o
OBJS-$(CONFIG_VFLIP_FILTER) += vf_vflip.o
+OBJS-$(CONFIG_VIDSTABDETECT_FILTER) += vidstabutils.o vf_vidstabdetect.o
+OBJS-$(CONFIG_VIDSTABTRANSFORM_FILTER) += vidstabutils.o vf_vidstabtransform.o
+OBJS-$(CONFIG_VIGNETTE_FILTER) += vf_vignette.o
+OBJS-$(CONFIG_VSTACK_FILTER) += vf_stack.o framesync.o
+OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o
+OBJS-$(CONFIG_WAVEFORM_FILTER) += vf_waveform.o
+OBJS-$(CONFIG_WEAVE_FILTER) += vf_weave.o
+OBJS-$(CONFIG_XBR_FILTER) += vf_xbr.o
OBJS-$(CONFIG_YADIF_FILTER) += vf_yadif.o
+OBJS-$(CONFIG_ZMQ_FILTER) += f_zmq.o
+OBJS-$(CONFIG_ZOOMPAN_FILTER) += vf_zoompan.o
+OBJS-$(CONFIG_ZSCALE_FILTER) += vf_zscale.o
-OBJS-$(CONFIG_NULLSINK_FILTER) += vsink_nullsink.o
-
-OBJS-$(CONFIG_COLOR_FILTER) += vsrc_color.o
+OBJS-$(CONFIG_ALLRGB_FILTER) += vsrc_testsrc.o
+OBJS-$(CONFIG_ALLYUV_FILTER) += vsrc_testsrc.o
+OBJS-$(CONFIG_CELLAUTO_FILTER) += vsrc_cellauto.o
+OBJS-$(CONFIG_COLOR_FILTER) += vsrc_testsrc.o
+OBJS-$(CONFIG_COREIMAGESRC_FILTER) += vf_coreimage.o
OBJS-$(CONFIG_FREI0R_SRC_FILTER) += vf_frei0r.o
-OBJS-$(CONFIG_MOVIE_FILTER) += vsrc_movie.o
-OBJS-$(CONFIG_NULLSRC_FILTER) += vsrc_nullsrc.o
+OBJS-$(CONFIG_HALDCLUTSRC_FILTER) += vsrc_testsrc.o
+OBJS-$(CONFIG_LIFE_FILTER) += vsrc_life.o
+OBJS-$(CONFIG_MANDELBROT_FILTER) += vsrc_mandelbrot.o
+OBJS-$(CONFIG_MPTESTSRC_FILTER) += vsrc_mptestsrc.o
+OBJS-$(CONFIG_NULLSRC_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_RGBTESTSRC_FILTER) += vsrc_testsrc.o
+OBJS-$(CONFIG_SMPTEBARS_FILTER) += vsrc_testsrc.o
+OBJS-$(CONFIG_SMPTEHDBARS_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_TESTSRC_FILTER) += vsrc_testsrc.o
+OBJS-$(CONFIG_TESTSRC2_FILTER) += vsrc_testsrc.o
+OBJS-$(CONFIG_YUVTESTSRC_FILTER) += vsrc_testsrc.o
+
+OBJS-$(CONFIG_NULLSINK_FILTER) += vsink_nullsink.o
+
+# multimedia filters
+OBJS-$(CONFIG_ABITSCOPE_FILTER) += avf_abitscope.o
+OBJS-$(CONFIG_ADRAWGRAPH_FILTER) += f_drawgraph.o
+OBJS-$(CONFIG_AHISTOGRAM_FILTER) += avf_ahistogram.o
+OBJS-$(CONFIG_APHASEMETER_FILTER) += avf_aphasemeter.o
+OBJS-$(CONFIG_AVECTORSCOPE_FILTER) += avf_avectorscope.o
+OBJS-$(CONFIG_CONCAT_FILTER) += avf_concat.o
+OBJS-$(CONFIG_SHOWCQT_FILTER) += avf_showcqt.o lswsutils.o lavfutils.o
+OBJS-$(CONFIG_SHOWFREQS_FILTER) += avf_showfreqs.o window_func.o
+OBJS-$(CONFIG_SHOWSPECTRUM_FILTER) += avf_showspectrum.o window_func.o
+OBJS-$(CONFIG_SHOWSPECTRUMPIC_FILTER) += avf_showspectrum.o window_func.o
+OBJS-$(CONFIG_SHOWVOLUME_FILTER) += avf_showvolume.o
+OBJS-$(CONFIG_SHOWWAVES_FILTER) += avf_showwaves.o
+OBJS-$(CONFIG_SHOWWAVESPIC_FILTER) += avf_showwaves.o
+OBJS-$(CONFIG_SPECTRUMSYNTH_FILTER) += vaf_spectrumsynth.o window_func.o
+
+# multimedia sources
+OBJS-$(CONFIG_AMOVIE_FILTER) += src_movie.o
+OBJS-$(CONFIG_MOVIE_FILTER) += src_movie.o
+
+# Windows resource file
+SLIBOBJS-$(HAVE_GNU_WINDRES) += avfilterres.o
+
+SKIPHEADERS-$(CONFIG_LIBVIDSTAB) += vidstabutils.h
+SKIPHEADERS-$(CONFIG_OPENCL) += opencl_internal.h deshake_opencl_kernel.h unsharp_opencl_kernel.h
+
+OBJS-$(CONFIG_SHARED) += log2_tab.o
TOOLS = graph2dot
-TESTPROGS = filtfmts
+TESTPROGS = drawutils filtfmts formats integral
+
+TOOLS-$(CONFIG_LIBZMQ) += zmqsend
+
+clean::
+ $(RM) $(CLEANSUFFIXES:%=libavfilter/libmpcodecs/%)
diff --git a/libavfilter/aeval.c b/libavfilter/aeval.c
new file mode 100644
index 0000000000..42970f42e7
--- /dev/null
+++ b/libavfilter/aeval.c
@@ -0,0 +1,489 @@
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * eval audio source
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/eval.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+
+static const char * const var_names[] = {
+ "ch", ///< the value of the current channel
+ "n", ///< number of frame
+ "nb_in_channels",
+ "nb_out_channels",
+ "t", ///< timestamp expressed in seconds
+ "s", ///< sample rate
+ NULL
+};
+
+enum var_name {
+ VAR_CH,
+ VAR_N,
+ VAR_NB_IN_CHANNELS,
+ VAR_NB_OUT_CHANNELS,
+ VAR_T,
+ VAR_S,
+ VAR_VARS_NB
+};
+
+typedef struct {
+ const AVClass *class;
+ char *sample_rate_str;
+ int sample_rate;
+ int64_t chlayout;
+ char *chlayout_str;
+ int nb_channels; ///< number of output channels
+ int nb_in_channels; ///< number of input channels
+ int same_chlayout; ///< set output as input channel layout
+ int64_t pts;
+ AVExpr **expr;
+ char *exprs;
+ int nb_samples; ///< number of samples per requested frame
+ int64_t duration;
+ uint64_t n;
+ double var_values[VAR_VARS_NB];
+ double *channel_values;
+ int64_t out_channel_layout;
+} EvalContext;
+
+static double val(void *priv, double ch)
+{
+ EvalContext *eval = priv;
+ return eval->channel_values[FFMIN((int)ch, eval->nb_in_channels-1)];
+}
+
+static double (* const aeval_func1[])(void *, double) = { val, NULL };
+static const char * const aeval_func1_names[] = { "val", NULL };
+
+#define OFFSET(x) offsetof(EvalContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption aevalsrc_options[]= {
+ { "exprs", "set the '|'-separated list of channels expressions", OFFSET(exprs), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS },
+ { "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
+ { "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
+ { "sample_rate", "set the sample rate", OFFSET(sample_rate_str), AV_OPT_TYPE_STRING, {.str = "44100"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "s", "set the sample rate", OFFSET(sample_rate_str), AV_OPT_TYPE_STRING, {.str = "44100"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "duration", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
+ { "d", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
+ { "channel_layout", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "c", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(aevalsrc);
+
+static int parse_channel_expressions(AVFilterContext *ctx,
+ int expected_nb_channels)
+{
+ EvalContext *eval = ctx->priv;
+ char *args1 = av_strdup(eval->exprs);
+ char *expr, *last_expr = NULL, *buf;
+ double (* const *func1)(void *, double) = NULL;
+ const char * const *func1_names = NULL;
+ int i, ret = 0;
+
+ if (!args1)
+ return AVERROR(ENOMEM);
+
+ if (!eval->exprs) {
+ av_log(ctx, AV_LOG_ERROR, "Channels expressions list is empty\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (!strcmp(ctx->filter->name, "aeval")) {
+ func1 = aeval_func1;
+ func1_names = aeval_func1_names;
+ }
+
+#define ADD_EXPRESSION(expr_) do { \
+ if (!av_dynarray2_add((void **)&eval->expr, &eval->nb_channels, \
+ sizeof(*eval->expr), NULL)) { \
+ ret = AVERROR(ENOMEM); \
+ goto end; \
+ } \
+ eval->expr[eval->nb_channels-1] = NULL; \
+ ret = av_expr_parse(&eval->expr[eval->nb_channels - 1], expr_, \
+ var_names, func1_names, func1, \
+ NULL, NULL, 0, ctx); \
+ if (ret < 0) \
+ goto end; \
+ } while (0)
+
+ /* reset expressions */
+ for (i = 0; i < eval->nb_channels; i++) {
+ av_expr_free(eval->expr[i]);
+ eval->expr[i] = NULL;
+ }
+ av_freep(&eval->expr);
+ eval->nb_channels = 0;
+
+ buf = args1;
+ while (expr = av_strtok(buf, "|", &buf)) {
+ ADD_EXPRESSION(expr);
+ last_expr = expr;
+ }
+
+ if (expected_nb_channels > eval->nb_channels)
+ for (i = eval->nb_channels; i < expected_nb_channels; i++)
+ ADD_EXPRESSION(last_expr);
+
+ if (expected_nb_channels > 0 && eval->nb_channels != expected_nb_channels) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Mismatch between the specified number of channel expressions '%d' "
+ "and the number of expected output channels '%d' for the specified channel layout\n",
+ eval->nb_channels, expected_nb_channels);
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+
+end:
+ av_free(args1);
+ return ret;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ EvalContext *eval = ctx->priv;
+ int ret = 0;
+
+ if (eval->chlayout_str) {
+ if (!strcmp(eval->chlayout_str, "same") && !strcmp(ctx->filter->name, "aeval")) {
+ eval->same_chlayout = 1;
+ } else {
+ ret = ff_parse_channel_layout(&eval->chlayout, NULL, eval->chlayout_str, ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = parse_channel_expressions(ctx, av_get_channel_layout_nb_channels(eval->chlayout));
+ if (ret < 0)
+ return ret;
+ }
+ } else {
+ /* guess channel layout from nb expressions/channels */
+ if ((ret = parse_channel_expressions(ctx, -1)) < 0)
+ return ret;
+
+ eval->chlayout = av_get_default_channel_layout(eval->nb_channels);
+ if (!eval->chlayout && eval->nb_channels <= 0) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid number of channels '%d' provided\n",
+ eval->nb_channels);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ if (eval->sample_rate_str)
+ if ((ret = ff_parse_sample_rate(&eval->sample_rate, eval->sample_rate_str, ctx)))
+ return ret;
+ eval->n = 0;
+
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ EvalContext *eval = ctx->priv;
+ int i;
+
+ for (i = 0; i < eval->nb_channels; i++) {
+ av_expr_free(eval->expr[i]);
+ eval->expr[i] = NULL;
+ }
+ av_freep(&eval->expr);
+ av_freep(&eval->channel_values);
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ EvalContext *eval = outlink->src->priv;
+ char buf[128];
+
+ outlink->time_base = (AVRational){1, eval->sample_rate};
+ outlink->sample_rate = eval->sample_rate;
+
+ eval->var_values[VAR_S] = eval->sample_rate;
+ eval->var_values[VAR_NB_IN_CHANNELS] = NAN;
+ eval->var_values[VAR_NB_OUT_CHANNELS] = outlink->channels;
+
+ av_get_channel_layout_string(buf, sizeof(buf), 0, eval->chlayout);
+
+ av_log(outlink->src, AV_LOG_VERBOSE,
+ "sample_rate:%d chlayout:%s duration:%"PRId64"\n",
+ eval->sample_rate, buf, eval->duration);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ EvalContext *eval = ctx->priv;
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE };
+ int64_t chlayouts[] = { eval->chlayout ? eval->chlayout : FF_COUNT2LAYOUT(eval->nb_channels) , -1 };
+ int sample_rates[] = { eval->sample_rate, -1 };
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ int ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats (ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ layouts = avfilter_make_format64_list(chlayouts);
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_rates);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ EvalContext *eval = outlink->src->priv;
+ AVFrame *samplesref;
+ int i, j;
+ int64_t t = av_rescale(eval->n, AV_TIME_BASE, eval->sample_rate);
+ int nb_samples;
+
+ if (eval->duration >= 0 && t >= eval->duration)
+ return AVERROR_EOF;
+
+ if (eval->duration >= 0) {
+ nb_samples = FFMIN(eval->nb_samples, av_rescale(eval->duration, eval->sample_rate, AV_TIME_BASE) - eval->pts);
+ if (!nb_samples)
+ return AVERROR_EOF;
+ } else {
+ nb_samples = eval->nb_samples;
+ }
+ samplesref = ff_get_audio_buffer(outlink, nb_samples);
+ if (!samplesref)
+ return AVERROR(ENOMEM);
+
+ /* evaluate expression for each single sample and for each channel */
+ for (i = 0; i < nb_samples; i++, eval->n++) {
+ eval->var_values[VAR_N] = eval->n;
+ eval->var_values[VAR_T] = eval->var_values[VAR_N] * (double)1/eval->sample_rate;
+
+ for (j = 0; j < eval->nb_channels; j++) {
+ *((double *) samplesref->extended_data[j] + i) =
+ av_expr_eval(eval->expr[j], eval->var_values, NULL);
+ }
+ }
+
+ samplesref->pts = eval->pts;
+ samplesref->sample_rate = eval->sample_rate;
+ eval->pts += nb_samples;
+
+ return ff_filter_frame(outlink, samplesref);
+}
+
+#if CONFIG_AEVALSRC_FILTER
+static const AVFilterPad aevalsrc_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_props,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_asrc_aevalsrc = {
+ .name = "aevalsrc",
+ .description = NULL_IF_CONFIG_SMALL("Generate an audio signal generated by an expression."),
+ .query_formats = query_formats,
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(EvalContext),
+ .inputs = NULL,
+ .outputs = aevalsrc_outputs,
+ .priv_class = &aevalsrc_class,
+};
+
+#endif /* CONFIG_AEVALSRC_FILTER */
+
+#define OFFSET(x) offsetof(EvalContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption aeval_options[]= {
+ { "exprs", "set the '|'-separated list of channels expressions", OFFSET(exprs), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS },
+ { "channel_layout", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "c", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(aeval);
+
+static int aeval_query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ EvalContext *eval = ctx->priv;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ // inlink supports any channel layout
+ layouts = ff_all_channel_counts();
+ if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
+ return ret;
+
+ if (eval->same_chlayout) {
+ layouts = ff_all_channel_counts();
+ if ((ret = ff_set_common_channel_layouts(ctx, layouts)) < 0)
+ return ret;
+ } else {
+ // outlink supports only requested output channel layout
+ layouts = NULL;
+ if ((ret = ff_add_channel_layout(&layouts,
+ eval->out_channel_layout ? eval->out_channel_layout :
+ FF_COUNT2LAYOUT(eval->nb_channels))) < 0)
+ return ret;
+ if ((ret = ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts)) < 0)
+ return ret;
+ }
+
+ formats = ff_make_format_list(sample_fmts);
+ if ((ret = ff_set_common_formats(ctx, formats)) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int aeval_config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ EvalContext *eval = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ int ret;
+
+ if (eval->same_chlayout) {
+ eval->chlayout = inlink->channel_layout;
+
+ if ((ret = parse_channel_expressions(ctx, inlink->channels)) < 0)
+ return ret;
+ }
+
+ eval->n = 0;
+ eval->nb_in_channels = eval->var_values[VAR_NB_IN_CHANNELS] = inlink->channels;
+ eval->var_values[VAR_NB_OUT_CHANNELS] = outlink->channels;
+ eval->var_values[VAR_S] = inlink->sample_rate;
+ eval->var_values[VAR_T] = NAN;
+
+ eval->channel_values = av_realloc_f(eval->channel_values,
+ inlink->channels, sizeof(*eval->channel_values));
+ if (!eval->channel_values)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ EvalContext *eval = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ int nb_samples = in->nb_samples;
+ AVFrame *out;
+ double t0;
+ int i, j;
+
+ /* do volume scaling in-place if input buffer is writable */
+ out = ff_get_audio_buffer(outlink, nb_samples);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, in);
+
+ t0 = TS2T(in->pts, inlink->time_base);
+
+ /* evaluate expression for each single sample and for each channel */
+ for (i = 0; i < nb_samples; i++, eval->n++) {
+ eval->var_values[VAR_N] = eval->n;
+ eval->var_values[VAR_T] = t0 + i * (double)1/inlink->sample_rate;
+
+ for (j = 0; j < inlink->channels; j++)
+ eval->channel_values[j] = *((double *) in->extended_data[j] + i);
+
+ for (j = 0; j < outlink->channels; j++) {
+ eval->var_values[VAR_CH] = j;
+ *((double *) out->extended_data[j] + i) =
+ av_expr_eval(eval->expr[j], eval->var_values, eval);
+ }
+ }
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+#if CONFIG_AEVAL_FILTER
+
+static const AVFilterPad aeval_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad aeval_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = aeval_config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_aeval = {
+ .name = "aeval",
+ .description = NULL_IF_CONFIG_SMALL("Filter audio signal according to a specified expression."),
+ .query_formats = aeval_query_formats,
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(EvalContext),
+ .inputs = aeval_inputs,
+ .outputs = aeval_outputs,
+ .priv_class = &aeval_class,
+};
+
+#endif /* CONFIG_AEVAL_FILTER */
diff --git a/libavfilter/af_acrusher.c b/libavfilter/af_acrusher.c
new file mode 100644
index 0000000000..ddce74465d
--- /dev/null
+++ b/libavfilter/af_acrusher.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) Markus Schmidt and Christian Holschuh
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "audio.h"
+
+typedef struct LFOContext {
+ double freq;
+ double offset;
+ int srate;
+ double amount;
+ double pwidth;
+ double phase;
+} LFOContext;
+
+typedef struct SRContext {
+ double target;
+ double real;
+ double samples;
+ double last;
+} SRContext;
+
+typedef struct ACrusherContext {
+ const AVClass *class;
+
+ double level_in;
+ double level_out;
+ double bits;
+ double mix;
+ int mode;
+ double dc;
+ double idc;
+ double aa;
+ double samples;
+ int is_lfo;
+ double lforange;
+ double lforate;
+
+ double sqr;
+ double aa1;
+ double coeff;
+ int round;
+ double sov;
+ double smin;
+ double sdiff;
+
+ LFOContext lfo;
+ SRContext *sr;
+} ACrusherContext;
+
+#define OFFSET(x) offsetof(ACrusherContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption acrusher_options[] = {
+ { "level_in", "set level in", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
+ { "level_out","set level out", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
+ { "bits", "set bit reduction", OFFSET(bits), AV_OPT_TYPE_DOUBLE, {.dbl=8}, 1, 64, A },
+ { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=.5}, 0, 1, A },
+ { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A, "mode" },
+ { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A, "mode" },
+ { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A, "mode" },
+ { "dc", "set DC", OFFSET(dc), AV_OPT_TYPE_DOUBLE, {.dbl=1}, .25, 4, A },
+ { "aa", "set anti-aliasing", OFFSET(aa), AV_OPT_TYPE_DOUBLE, {.dbl=.5}, 0, 1, A },
+ { "samples", "set sample reduction", OFFSET(samples), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 1, 250, A },
+ { "lfo", "enable LFO", OFFSET(is_lfo), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
+ { "lforange", "set LFO depth", OFFSET(lforange), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 1, 250, A },
+ { "lforate", "set LFO rate", OFFSET(lforate), AV_OPT_TYPE_DOUBLE, {.dbl=.3}, .01, 200, A },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(acrusher);
+
+static double samplereduction(ACrusherContext *s, SRContext *sr, double in)
+{
+ sr->samples++;
+ if (sr->samples >= s->round) {
+ sr->target += s->samples;
+ sr->real += s->round;
+ if (sr->target + s->samples >= sr->real + 1) {
+ sr->last = in;
+ sr->target = 0;
+ sr->real = 0;
+ }
+ sr->samples = 0;
+ }
+ return sr->last;
+}
+
+static double add_dc(double s, double dc, double idc)
+{
+ return s > 0 ? s * dc : s * idc;
+}
+
+static double remove_dc(double s, double dc, double idc)
+{
+ return s > 0 ? s * idc : s * dc;
+}
+
+static inline double factor(double y, double k, double aa1, double aa)
+{
+ return 0.5 * (sin(M_PI * (fabs(y - k) - aa1) / aa - M_PI_2) + 1);
+}
+
+static double bitreduction(ACrusherContext *s, double in)
+{
+ const double sqr = s->sqr;
+ const double coeff = s->coeff;
+ const double aa = s->aa;
+ const double aa1 = s->aa1;
+ double y, k;
+
+ // add dc
+ in = add_dc(in, s->dc, s->idc);
+
+ // main rounding calculation depending on mode
+
+ // the idea for anti-aliasing:
+ // you need a function f which brings you to the scale, where
+ // you want to round and the function f_b (with f(f_b)=id) which
+ // brings you back to your original scale.
+ //
+ // then you can use the logic below in the following way:
+ // y = f(in) and k = roundf(y)
+ // if (y > k + aa1)
+ // k = f_b(k) + ( f_b(k+1) - f_b(k) ) * 0.5 * (sin(x - PI/2) + 1)
+ // if (y < k + aa1)
+ // k = f_b(k) - ( f_b(k+1) - f_b(k) ) * 0.5 * (sin(x - PI/2) + 1)
+ //
+ // whereas x = (fabs(f(in) - k) - aa1) * PI / aa
+ // for both cases.
+
+ switch (s->mode) {
+ case 0:
+ default:
+ // linear
+ y = in * coeff;
+ k = roundf(y);
+ if (k - aa1 <= y && y <= k + aa1) {
+ k /= coeff;
+ } else if (y > k + aa1) {
+ k = k / coeff + ((k + 1) / coeff - k / coeff) *
+ factor(y, k, aa1, aa);
+ } else {
+ k = k / coeff - (k / coeff - (k - 1) / coeff) *
+ factor(y, k, aa1, aa);
+ }
+ break;
+ case 1:
+ // logarithmic
+ y = sqr * log(fabs(in)) + sqr * sqr;
+ k = roundf(y);
+ if(!in) {
+ k = 0;
+ } else if (k - aa1 <= y && y <= k + aa1) {
+ k = in / fabs(in) * exp(k / sqr - sqr);
+ } else if (y > k + aa1) {
+ double x = exp(k / sqr - sqr);
+ k = FFSIGN(in) * (x + (exp((k + 1) / sqr - sqr) - x) *
+ factor(y, k, aa1, aa));
+ } else {
+ double x = exp(k / sqr - sqr);
+ k = in / fabs(in) * (x - (x - exp((k - 1) / sqr - sqr)) *
+ factor(y, k, aa1, aa));
+ }
+ break;
+ }
+
+ // mix between dry and wet signal
+ k += (in - k) * s->mix;
+
+ // remove dc
+ k = remove_dc(k, s->dc, s->idc);
+
+ return k;
+}
+
+static double lfo_get(LFOContext *lfo)
+{
+ double phs = FFMIN(100., lfo->phase / FFMIN(1.99, FFMAX(0.01, lfo->pwidth)) + lfo->offset);
+ double val;
+
+ if (phs > 1)
+ phs = fmod(phs, 1.);
+
+ val = sin((phs * 360.) * M_PI / 180);
+
+ return val * lfo->amount;
+}
+
+static void lfo_advance(LFOContext *lfo, unsigned count)
+{
+ lfo->phase = fabs(lfo->phase + count * lfo->freq * (1. / lfo->srate));
+ if (lfo->phase >= 1.)
+ lfo->phase = fmod(lfo->phase, 1.);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ACrusherContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ const double *src = (const double *)in->data[0];
+ double *dst;
+ const double level_in = s->level_in;
+ const double level_out = s->level_out;
+ const double mix = s->mix;
+ int n, c;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_audio_buffer(inlink, in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ dst = (double *)out->data[0];
+ for (n = 0; n < in->nb_samples; n++) {
+ if (s->is_lfo) {
+ s->samples = s->smin + s->sdiff * (lfo_get(&s->lfo) + 0.5);
+ s->round = round(s->samples);
+ }
+
+ for (c = 0; c < inlink->channels; c++) {
+ double sample = src[c] * level_in;
+
+ sample = mix * samplereduction(s, &s->sr[c], sample) + src[c] * (1. - mix) * level_in;
+ dst[c] = bitreduction(s, sample) * level_out;
+ }
+ src += c;
+ dst += c;
+
+ if (s->is_lfo)
+ lfo_advance(&s->lfo, 1);
+ }
+
+ if (in != out)
+ av_frame_free(&in);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBL,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ACrusherContext *s = ctx->priv;
+
+ av_freep(&s->sr);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ACrusherContext *s = ctx->priv;
+ double rad, sunder, smax, sover;
+
+ s->idc = 1. / s->dc;
+ s->coeff = exp2(s->bits) - 1;
+ s->sqr = sqrt(s->coeff / 2);
+ s->aa1 = (1. - s->aa) / 2.;
+ s->round = round(s->samples);
+ rad = s->lforange / 2.;
+ s->smin = FFMAX(s->samples - rad, 1.);
+ sunder = s->samples - rad - s->smin;
+ smax = FFMIN(s->samples + rad, 250.);
+ sover = s->samples + rad - smax;
+ smax -= sunder;
+ s->smin -= sover;
+ s->sdiff = smax - s->smin;
+
+ s->lfo.freq = s->lforate;
+ s->lfo.pwidth = 1.;
+ s->lfo.srate = inlink->sample_rate;
+ s->lfo.amount = .5;
+
+ s->sr = av_calloc(inlink->channels, sizeof(*s->sr));
+ if (!s->sr)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static const AVFilterPad avfilter_af_acrusher_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_af_acrusher_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_acrusher = {
+ .name = "acrusher",
+ .description = NULL_IF_CONFIG_SMALL("Reduce audio bit resolution."),
+ .priv_size = sizeof(ACrusherContext),
+ .priv_class = &acrusher_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = avfilter_af_acrusher_inputs,
+ .outputs = avfilter_af_acrusher_outputs,
+};
diff --git a/libavfilter/af_adelay.c b/libavfilter/af_adelay.c
new file mode 100644
index 0000000000..187cacf28a
--- /dev/null
+++ b/libavfilter/af_adelay.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/opt.h"
+#include "libavutil/samplefmt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+
+typedef struct ChanDelay {
+ int delay;
+ unsigned delay_index;
+ unsigned index;
+ uint8_t *samples;
+} ChanDelay;
+
+typedef struct AudioDelayContext {
+ const AVClass *class;
+ char *delays;
+ ChanDelay *chandelay;
+ int nb_delays;
+ int block_align;
+ unsigned max_delay;
+ int64_t next_pts;
+
+ void (*delay_channel)(ChanDelay *d, int nb_samples,
+ const uint8_t *src, uint8_t *dst);
+} AudioDelayContext;
+
+#define OFFSET(x) offsetof(AudioDelayContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption adelay_options[] = {
+ { "delays", "set list of delays for each channel", OFFSET(delays), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(adelay);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterChannelLayouts *layouts;
+ AVFilterFormats *formats;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+#define DELAY(name, type, fill) \
+static void delay_channel_## name ##p(ChanDelay *d, int nb_samples, \
+ const uint8_t *ssrc, uint8_t *ddst) \
+{ \
+ const type *src = (type *)ssrc; \
+ type *dst = (type *)ddst; \
+ type *samples = (type *)d->samples; \
+ \
+ while (nb_samples) { \
+ if (d->delay_index < d->delay) { \
+ const int len = FFMIN(nb_samples, d->delay - d->delay_index); \
+ \
+ memcpy(&samples[d->delay_index], src, len * sizeof(type)); \
+ memset(dst, fill, len * sizeof(type)); \
+ d->delay_index += len; \
+ src += len; \
+ dst += len; \
+ nb_samples -= len; \
+ } else { \
+ *dst = samples[d->index]; \
+ samples[d->index] = *src; \
+ nb_samples--; \
+ d->index++; \
+ src++, dst++; \
+ d->index = d->index >= d->delay ? 0 : d->index; \
+ } \
+ } \
+}
+
+DELAY(u8, uint8_t, 0x80)
+DELAY(s16, int16_t, 0)
+DELAY(s32, int32_t, 0)
+DELAY(flt, float, 0)
+DELAY(dbl, double, 0)
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioDelayContext *s = ctx->priv;
+ char *p, *arg, *saveptr = NULL;
+ int i;
+
+ s->chandelay = av_calloc(inlink->channels, sizeof(*s->chandelay));
+ if (!s->chandelay)
+ return AVERROR(ENOMEM);
+ s->nb_delays = inlink->channels;
+ s->block_align = av_get_bytes_per_sample(inlink->format);
+
+ p = s->delays;
+ for (i = 0; i < s->nb_delays; i++) {
+ ChanDelay *d = &s->chandelay[i];
+ float delay;
+ char type = 0;
+ int ret;
+
+ if (!(arg = av_strtok(p, "|", &saveptr)))
+ break;
+
+ p = NULL;
+
+ ret = sscanf(arg, "%d%c", &d->delay, &type);
+ if (ret != 2 || type != 'S') {
+ sscanf(arg, "%f", &delay);
+ d->delay = delay * inlink->sample_rate / 1000.0;
+ }
+
+ if (d->delay < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Delay must be non negative number.\n");
+ return AVERROR(EINVAL);
+ }
+ }
+
+ for (i = 0; i < s->nb_delays; i++) {
+ ChanDelay *d = &s->chandelay[i];
+
+ if (!d->delay)
+ continue;
+
+ d->samples = av_malloc_array(d->delay, s->block_align);
+ if (!d->samples)
+ return AVERROR(ENOMEM);
+
+ s->max_delay = FFMAX(s->max_delay, d->delay);
+ }
+
+ if (!s->max_delay) {
+ av_log(ctx, AV_LOG_ERROR, "At least one delay >0 must be specified.\n");
+ return AVERROR(EINVAL);
+ }
+
+ switch (inlink->format) {
+ case AV_SAMPLE_FMT_U8P : s->delay_channel = delay_channel_u8p ; break;
+ case AV_SAMPLE_FMT_S16P: s->delay_channel = delay_channel_s16p; break;
+ case AV_SAMPLE_FMT_S32P: s->delay_channel = delay_channel_s32p; break;
+ case AV_SAMPLE_FMT_FLTP: s->delay_channel = delay_channel_fltp; break;
+ case AV_SAMPLE_FMT_DBLP: s->delay_channel = delay_channel_dblp; break;
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioDelayContext *s = ctx->priv;
+ AVFrame *out_frame;
+ int i;
+
+ if (ctx->is_disabled || !s->delays)
+ return ff_filter_frame(ctx->outputs[0], frame);
+
+ out_frame = ff_get_audio_buffer(inlink, frame->nb_samples);
+ if (!out_frame) {
+ av_frame_free(&frame);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out_frame, frame);
+
+ for (i = 0; i < s->nb_delays; i++) {
+ ChanDelay *d = &s->chandelay[i];
+ const uint8_t *src = frame->extended_data[i];
+ uint8_t *dst = out_frame->extended_data[i];
+
+ if (!d->delay)
+ memcpy(dst, src, frame->nb_samples * s->block_align);
+ else
+ s->delay_channel(d, frame->nb_samples, src, dst);
+ }
+
+ s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
+ av_frame_free(&frame);
+ return ff_filter_frame(ctx->outputs[0], out_frame);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AudioDelayContext *s = ctx->priv;
+ int ret;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+ if (ret == AVERROR_EOF && !ctx->is_disabled && s->max_delay) {
+ int nb_samples = FFMIN(s->max_delay, 2048);
+ AVFrame *frame;
+
+ frame = ff_get_audio_buffer(outlink, nb_samples);
+ if (!frame)
+ return AVERROR(ENOMEM);
+ s->max_delay -= nb_samples;
+
+ av_samples_set_silence(frame->extended_data, 0,
+ frame->nb_samples,
+ outlink->channels,
+ frame->format);
+
+ frame->pts = s->next_pts;
+ if (s->next_pts != AV_NOPTS_VALUE)
+ s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
+
+ ret = filter_frame(ctx->inputs[0], frame);
+ }
+
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioDelayContext *s = ctx->priv;
+ int i;
+
+ for (i = 0; i < s->nb_delays; i++)
+ av_freep(&s->chandelay[i].samples);
+ av_freep(&s->chandelay);
+}
+
+static const AVFilterPad adelay_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad adelay_outputs[] = {
+ {
+ .name = "default",
+ .request_frame = request_frame,
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_adelay = {
+ .name = "adelay",
+ .description = NULL_IF_CONFIG_SMALL("Delay one or more audio channels."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(AudioDelayContext),
+ .priv_class = &adelay_class,
+ .uninit = uninit,
+ .inputs = adelay_inputs,
+ .outputs = adelay_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/af_aecho.c b/libavfilter/af_aecho.c
new file mode 100644
index 0000000000..82049e9541
--- /dev/null
+++ b/libavfilter/af_aecho.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/opt.h"
+#include "libavutil/samplefmt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+
+typedef struct AudioEchoContext {
+ const AVClass *class;
+ float in_gain, out_gain;
+ char *delays, *decays;
+ float *delay, *decay;
+ int nb_echoes;
+ int delay_index;
+ uint8_t **delayptrs;
+ int max_samples, fade_out;
+ int *samples;
+ int64_t next_pts;
+
+ void (*echo_samples)(struct AudioEchoContext *ctx, uint8_t **delayptrs,
+ uint8_t * const *src, uint8_t **dst,
+ int nb_samples, int channels);
+} AudioEchoContext;
+
+#define OFFSET(x) offsetof(AudioEchoContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption aecho_options[] = {
+ { "in_gain", "set signal input gain", OFFSET(in_gain), AV_OPT_TYPE_FLOAT, {.dbl=0.6}, 0, 1, A },
+ { "out_gain", "set signal output gain", OFFSET(out_gain), AV_OPT_TYPE_FLOAT, {.dbl=0.3}, 0, 1, A },
+ { "delays", "set list of signal delays", OFFSET(delays), AV_OPT_TYPE_STRING, {.str="1000"}, 0, 0, A },
+ { "decays", "set list of signal decays", OFFSET(decays), AV_OPT_TYPE_STRING, {.str="0.5"}, 0, 0, A },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(aecho);
+
+static void count_items(char *item_str, int *nb_items)
+{
+ char *p;
+
+ *nb_items = 1;
+ for (p = item_str; *p; p++) {
+ if (*p == '|')
+ (*nb_items)++;
+ }
+
+}
+
+static void fill_items(char *item_str, int *nb_items, float *items)
+{
+ char *p, *saveptr = NULL;
+ int i, new_nb_items = 0;
+
+ p = item_str;
+ for (i = 0; i < *nb_items; i++) {
+ char *tstr = av_strtok(p, "|", &saveptr);
+ p = NULL;
+ new_nb_items += sscanf(tstr, "%f", &items[i]) == 1;
+ }
+
+ *nb_items = new_nb_items;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioEchoContext *s = ctx->priv;
+
+ av_freep(&s->delay);
+ av_freep(&s->decay);
+ av_freep(&s->samples);
+
+ if (s->delayptrs)
+ av_freep(&s->delayptrs[0]);
+ av_freep(&s->delayptrs);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ AudioEchoContext *s = ctx->priv;
+ int nb_delays, nb_decays, i;
+
+ if (!s->delays || !s->decays) {
+ av_log(ctx, AV_LOG_ERROR, "Missing delays and/or decays.\n");
+ return AVERROR(EINVAL);
+ }
+
+ count_items(s->delays, &nb_delays);
+ count_items(s->decays, &nb_decays);
+
+ s->delay = av_realloc_f(s->delay, nb_delays, sizeof(*s->delay));
+ s->decay = av_realloc_f(s->decay, nb_decays, sizeof(*s->decay));
+ if (!s->delay || !s->decay)
+ return AVERROR(ENOMEM);
+
+ fill_items(s->delays, &nb_delays, s->delay);
+ fill_items(s->decays, &nb_decays, s->decay);
+
+ if (nb_delays != nb_decays) {
+ av_log(ctx, AV_LOG_ERROR, "Number of delays %d differs from number of decays %d.\n", nb_delays, nb_decays);
+ return AVERROR(EINVAL);
+ }
+
+ s->nb_echoes = nb_delays;
+ if (!s->nb_echoes) {
+ av_log(ctx, AV_LOG_ERROR, "At least one decay & delay must be set.\n");
+ return AVERROR(EINVAL);
+ }
+
+ s->samples = av_realloc_f(s->samples, nb_delays, sizeof(*s->samples));
+ if (!s->samples)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < nb_delays; i++) {
+ if (s->delay[i] <= 0 || s->delay[i] > 90000) {
+ av_log(ctx, AV_LOG_ERROR, "delay[%d]: %f is out of allowed range: (0, 90000]\n", i, s->delay[i]);
+ return AVERROR(EINVAL);
+ }
+ if (s->decay[i] <= 0 || s->decay[i] > 1) {
+ av_log(ctx, AV_LOG_ERROR, "decay[%d]: %f is out of allowed range: (0, 1]\n", i, s->decay[i]);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ s->next_pts = AV_NOPTS_VALUE;
+
+ av_log(ctx, AV_LOG_DEBUG, "nb_echoes:%d\n", s->nb_echoes);
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterChannelLayouts *layouts;
+ AVFilterFormats *formats;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+#define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
+
+#define ECHO(name, type, min, max) \
+static void echo_samples_## name ##p(AudioEchoContext *ctx, \
+ uint8_t **delayptrs, \
+ uint8_t * const *src, uint8_t **dst, \
+ int nb_samples, int channels) \
+{ \
+ const double out_gain = ctx->out_gain; \
+ const double in_gain = ctx->in_gain; \
+ const int nb_echoes = ctx->nb_echoes; \
+ const int max_samples = ctx->max_samples; \
+ int i, j, chan, av_uninit(index); \
+ \
+ av_assert1(channels > 0); /* would corrupt delay_index */ \
+ \
+ for (chan = 0; chan < channels; chan++) { \
+ const type *s = (type *)src[chan]; \
+ type *d = (type *)dst[chan]; \
+ type *dbuf = (type *)delayptrs[chan]; \
+ \
+ index = ctx->delay_index; \
+ for (i = 0; i < nb_samples; i++, s++, d++) { \
+ double out, in; \
+ \
+ in = *s; \
+ out = in * in_gain; \
+ for (j = 0; j < nb_echoes; j++) { \
+ int ix = index + max_samples - ctx->samples[j]; \
+ ix = MOD(ix, max_samples); \
+ out += dbuf[ix] * ctx->decay[j]; \
+ } \
+ out *= out_gain; \
+ \
+ *d = av_clipd(out, min, max); \
+ dbuf[index] = in; \
+ \
+ index = MOD(index + 1, max_samples); \
+ } \
+ } \
+ ctx->delay_index = index; \
+}
+
+ECHO(dbl, double, -1.0, 1.0 )
+ECHO(flt, float, -1.0, 1.0 )
+ECHO(s16, int16_t, INT16_MIN, INT16_MAX)
+ECHO(s32, int32_t, INT32_MIN, INT32_MAX)
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AudioEchoContext *s = ctx->priv;
+ float volume = 1.0;
+ int i;
+
+ for (i = 0; i < s->nb_echoes; i++) {
+ s->samples[i] = s->delay[i] * outlink->sample_rate / 1000.0;
+ s->max_samples = FFMAX(s->max_samples, s->samples[i]);
+ volume += s->decay[i];
+ }
+
+ if (s->max_samples <= 0) {
+ av_log(ctx, AV_LOG_ERROR, "Nothing to echo - missing delay samples.\n");
+ return AVERROR(EINVAL);
+ }
+ s->fade_out = s->max_samples;
+
+ if (volume * s->in_gain * s->out_gain > 1.0)
+ av_log(ctx, AV_LOG_WARNING,
+ "out_gain %f can cause saturation of output\n", s->out_gain);
+
+ switch (outlink->format) {
+ case AV_SAMPLE_FMT_DBLP: s->echo_samples = echo_samples_dblp; break;
+ case AV_SAMPLE_FMT_FLTP: s->echo_samples = echo_samples_fltp; break;
+ case AV_SAMPLE_FMT_S16P: s->echo_samples = echo_samples_s16p; break;
+ case AV_SAMPLE_FMT_S32P: s->echo_samples = echo_samples_s32p; break;
+ }
+
+
+ if (s->delayptrs)
+ av_freep(&s->delayptrs[0]);
+ av_freep(&s->delayptrs);
+
+ return av_samples_alloc_array_and_samples(&s->delayptrs, NULL,
+ outlink->channels,
+ s->max_samples,
+ outlink->format, 0);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioEchoContext *s = ctx->priv;
+ AVFrame *out_frame;
+
+ if (av_frame_is_writable(frame)) {
+ out_frame = frame;
+ } else {
+ out_frame = ff_get_audio_buffer(inlink, frame->nb_samples);
+ if (!out_frame) {
+ av_frame_free(&frame);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out_frame, frame);
+ }
+
+ s->echo_samples(s, s->delayptrs, frame->extended_data, out_frame->extended_data,
+ frame->nb_samples, inlink->channels);
+
+ s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
+
+ if (frame != out_frame)
+ av_frame_free(&frame);
+
+ return ff_filter_frame(ctx->outputs[0], out_frame);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AudioEchoContext *s = ctx->priv;
+ int ret;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF && !ctx->is_disabled && s->fade_out) {
+ int nb_samples = FFMIN(s->fade_out, 2048);
+ AVFrame *frame;
+
+ frame = ff_get_audio_buffer(outlink, nb_samples);
+ if (!frame)
+ return AVERROR(ENOMEM);
+ s->fade_out -= nb_samples;
+
+ av_samples_set_silence(frame->extended_data, 0,
+ frame->nb_samples,
+ outlink->channels,
+ frame->format);
+
+ s->echo_samples(s, s->delayptrs, frame->extended_data, frame->extended_data,
+ frame->nb_samples, outlink->channels);
+
+ frame->pts = s->next_pts;
+ if (s->next_pts != AV_NOPTS_VALUE)
+ s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
+
+ return ff_filter_frame(outlink, frame);
+ }
+
+ return ret;
+}
+
+static const AVFilterPad aecho_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad aecho_outputs[] = {
+ {
+ .name = "default",
+ .request_frame = request_frame,
+ .config_props = config_output,
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_aecho = {
+ .name = "aecho",
+ .description = NULL_IF_CONFIG_SMALL("Add echoing to the audio."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(AudioEchoContext),
+ .priv_class = &aecho_class,
+ .init = init,
+ .uninit = uninit,
+ .inputs = aecho_inputs,
+ .outputs = aecho_outputs,
+};
diff --git a/libavfilter/af_aemphasis.c b/libavfilter/af_aemphasis.c
new file mode 100644
index 0000000000..a5b8e3058a
--- /dev/null
+++ b/libavfilter/af_aemphasis.c
@@ -0,0 +1,369 @@
+/*
+ * Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen, Damien Zammit and others
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "audio.h"
+
+typedef struct BiquadCoeffs {
+ double a0, a1, a2, b1, b2;
+} BiquadCoeffs;
+
+typedef struct BiquadD2 {
+ double a0, a1, a2, b1, b2, w1, w2;
+} BiquadD2;
+
+typedef struct RIAACurve {
+ BiquadD2 r1;
+ BiquadD2 brickw;
+ int use_brickw;
+} RIAACurve;
+
+typedef struct AudioEmphasisContext {
+ const AVClass *class;
+ int mode, type;
+ double level_in, level_out;
+
+ RIAACurve *rc;
+} AudioEmphasisContext;
+
+#define OFFSET(x) offsetof(AudioEmphasisContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption aemphasis_options[] = {
+ { "level_in", "set input gain", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 64, FLAGS },
+ { "level_out", "set output gain", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 64, FLAGS },
+ { "mode", "set filter mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "mode" },
+ { "reproduction", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "mode" },
+ { "production", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "mode" },
+ { "type", "set filter type", OFFSET(type), AV_OPT_TYPE_INT, {.i64=4}, 0, 8, FLAGS, "type" },
+ { "col", "Columbia", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "type" },
+ { "emi", "EMI", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "type" },
+ { "bsi", "BSI (78RPM)", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "type" },
+ { "riaa", "RIAA", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, FLAGS, "type" },
+ { "cd", "Compact Disc (CD)", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, FLAGS, "type" },
+ { "50fm", "50µs (FM)", 0, AV_OPT_TYPE_CONST, {.i64=5}, 0, 0, FLAGS, "type" },
+ { "75fm", "75µs (FM)", 0, AV_OPT_TYPE_CONST, {.i64=6}, 0, 0, FLAGS, "type" },
+ { "50kf", "50µs (FM-KF)", 0, AV_OPT_TYPE_CONST, {.i64=7}, 0, 0, FLAGS, "type" },
+ { "75kf", "75µs (FM-KF)", 0, AV_OPT_TYPE_CONST, {.i64=8}, 0, 0, FLAGS, "type" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(aemphasis);
+
+static inline double biquad(BiquadD2 *bq, double in)
+{
+ double n = in;
+ double tmp = n - bq->w1 * bq->b1 - bq->w2 * bq->b2;
+ double out = tmp * bq->a0 + bq->w1 * bq->a1 + bq->w2 * bq->a2;
+
+ bq->w2 = bq->w1;
+ bq->w1 = tmp;
+
+ return out;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AudioEmphasisContext *s = ctx->priv;
+ const double *src = (const double *)in->data[0];
+ const double level_out = s->level_out;
+ const double level_in = s->level_in;
+ AVFrame *out;
+ double *dst;
+ int n, c;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_audio_buffer(inlink, in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+ dst = (double *)out->data[0];
+
+ for (n = 0; n < in->nb_samples; n++) {
+ for (c = 0; c < inlink->channels; c++)
+ dst[c] = level_out * biquad(&s->rc[c].r1, s->rc[c].use_brickw ? biquad(&s->rc[c].brickw, src[c] * level_in) : src[c] * level_in);
+ dst += inlink->channels;
+ src += inlink->channels;
+ }
+
+ if (in != out)
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterChannelLayouts *layouts;
+ AVFilterFormats *formats;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBL,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static inline void set_highshelf_rbj(BiquadD2 *bq, double freq, double q, double peak, double sr)
+{
+ double A = sqrt(peak);
+ double w0 = freq * 2 * M_PI / sr;
+ double alpha = sin(w0) / (2 * q);
+ double cw0 = cos(w0);
+ double tmp = 2 * sqrt(A) * alpha;
+ double b0 = 0, ib0 = 0;
+
+ bq->a0 = A*( (A+1) + (A-1)*cw0 + tmp);
+ bq->a1 = -2*A*( (A-1) + (A+1)*cw0);
+ bq->a2 = A*( (A+1) + (A-1)*cw0 - tmp);
+ b0 = (A+1) - (A-1)*cw0 + tmp;
+ bq->b1 = 2*( (A-1) - (A+1)*cw0);
+ bq->b2 = (A+1) - (A-1)*cw0 - tmp;
+
+ ib0 = 1 / b0;
+ bq->b1 *= ib0;
+ bq->b2 *= ib0;
+ bq->a0 *= ib0;
+ bq->a1 *= ib0;
+ bq->a2 *= ib0;
+}
+
+static inline void set_lp_rbj(BiquadD2 *bq, double fc, double q, double sr, double gain)
+{
+ double omega = 2.0 * M_PI * fc / sr;
+ double sn = sin(omega);
+ double cs = cos(omega);
+ double alpha = sn/(2 * q);
+ double inv = 1.0/(1.0 + alpha);
+
+ bq->a2 = bq->a0 = gain * inv * (1.0 - cs) * 0.5;
+ bq->a1 = bq->a0 + bq->a0;
+ bq->b1 = (-2.0 * cs * inv);
+ bq->b2 = ((1.0 - alpha) * inv);
+}
+
+static double freq_gain(BiquadCoeffs *c, double freq, double sr)
+{
+ double zr, zi;
+
+ freq *= 2.0 * M_PI / sr;
+ zr = cos(freq);
+ zi = -sin(freq);
+
+ /* |(a0 + a1*z + a2*z^2)/(1 + b1*z + b2*z^2)| */
+ return hypot(c->a0 + c->a1*zr + c->a2*(zr*zr-zi*zi), c->a1*zi + 2*c->a2*zr*zi) /
+ hypot(1 + c->b1*zr + c->b2*(zr*zr-zi*zi), c->b1*zi + 2*c->b2*zr*zi);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ double i, j, k, g, t, a0, a1, a2, b1, b2, tau1, tau2, tau3;
+ double cutfreq, gain1kHz, gc, sr = inlink->sample_rate;
+ AVFilterContext *ctx = inlink->dst;
+ AudioEmphasisContext *s = ctx->priv;
+ BiquadCoeffs coeffs;
+ int ch;
+
+ s->rc = av_calloc(inlink->channels, sizeof(*s->rc));
+ if (!s->rc)
+ return AVERROR(ENOMEM);
+
+ switch (s->type) {
+ case 0: //"Columbia"
+ i = 100.;
+ j = 500.;
+ k = 1590.;
+ break;
+ case 1: //"EMI"
+ i = 70.;
+ j = 500.;
+ k = 2500.;
+ break;
+ case 2: //"BSI(78rpm)"
+ i = 50.;
+ j = 353.;
+ k = 3180.;
+ break;
+ case 3: //"RIAA"
+ default:
+ tau1 = 0.003180;
+ tau2 = 0.000318;
+ tau3 = 0.000075;
+ i = 1. / (2. * M_PI * tau1);
+ j = 1. / (2. * M_PI * tau2);
+ k = 1. / (2. * M_PI * tau3);
+ break;
+ case 4: //"CD Mastering"
+ tau1 = 0.000050;
+ tau2 = 0.000015;
+ tau3 = 0.0000001;// 1.6MHz out of audible range for null impact
+ i = 1. / (2. * M_PI * tau1);
+ j = 1. / (2. * M_PI * tau2);
+ k = 1. / (2. * M_PI * tau3);
+ break;
+ case 5: //"50µs FM (Europe)"
+ tau1 = 0.000050;
+ tau2 = tau1 / 20;// not used
+ tau3 = tau1 / 50;//
+ i = 1. / (2. * M_PI * tau1);
+ j = 1. / (2. * M_PI * tau2);
+ k = 1. / (2. * M_PI * tau3);
+ break;
+ case 6: //"75µs FM (US)"
+ tau1 = 0.000075;
+ tau2 = tau1 / 20;// not used
+ tau3 = tau1 / 50;//
+ i = 1. / (2. * M_PI * tau1);
+ j = 1. / (2. * M_PI * tau2);
+ k = 1. / (2. * M_PI * tau3);
+ break;
+ }
+
+ i *= 2 * M_PI;
+ j *= 2 * M_PI;
+ k *= 2 * M_PI;
+
+ t = 1. / sr;
+
+ //swap a1 b1, a2 b2
+ if (s->type == 7 || s->type == 8) {
+ double tau = (s->type == 7 ? 0.000050 : 0.000075);
+ double f = 1.0 / (2 * M_PI * tau);
+ double nyq = sr * 0.5;
+ double gain = sqrt(1.0 + nyq * nyq / (f * f)); // gain at Nyquist
+ double cfreq = sqrt((gain - 1.0) * f * f); // frequency
+ double q = 1.0;
+
+ if (s->type == 8)
+ q = pow((sr / 3269.0) + 19.5, -0.25); // somewhat poor curve-fit
+ if (s->type == 7)
+ q = pow((sr / 4750.0) + 19.5, -0.25);
+ if (s->mode == 0)
+ set_highshelf_rbj(&s->rc[0].r1, cfreq, q, 1. / gain, sr);
+ else
+ set_highshelf_rbj(&s->rc[0].r1, cfreq, q, gain, sr);
+ s->rc[0].use_brickw = 0;
+ } else {
+ s->rc[0].use_brickw = 1;
+ if (s->mode == 0) { // Reproduction
+ g = 1. / (4.+2.*i*t+2.*k*t+i*k*t*t);
+ a0 = (2.*t+j*t*t)*g;
+ a1 = (2.*j*t*t)*g;
+ a2 = (-2.*t+j*t*t)*g;
+ b1 = (-8.+2.*i*k*t*t)*g;
+ b2 = (4.-2.*i*t-2.*k*t+i*k*t*t)*g;
+ } else { // Production
+ g = 1. / (2.*t+j*t*t);
+ a0 = (4.+2.*i*t+2.*k*t+i*k*t*t)*g;
+ a1 = (-8.+2.*i*k*t*t)*g;
+ a2 = (4.-2.*i*t-2.*k*t+i*k*t*t)*g;
+ b1 = (2.*j*t*t)*g;
+ b2 = (-2.*t+j*t*t)*g;
+ }
+
+ coeffs.a0 = a0;
+ coeffs.a1 = a1;
+ coeffs.a2 = a2;
+ coeffs.b1 = b1;
+ coeffs.b2 = b2;
+
+ // the coeffs above give non-normalized value, so it should be normalized to produce 0dB at 1 kHz
+ // find actual gain
+ // Note: for FM emphasis, use 100 Hz for normalization instead
+ gain1kHz = freq_gain(&coeffs, 1000.0, sr);
+ // divide one filter's x[n-m] coefficients by that value
+ gc = 1.0 / gain1kHz;
+ s->rc[0].r1.a0 = coeffs.a0 * gc;
+ s->rc[0].r1.a1 = coeffs.a1 * gc;
+ s->rc[0].r1.a2 = coeffs.a2 * gc;
+ s->rc[0].r1.b1 = coeffs.b1;
+ s->rc[0].r1.b2 = coeffs.b2;
+ }
+
+ cutfreq = FFMIN(0.45 * sr, 21000.);
+ set_lp_rbj(&s->rc[0].brickw, cutfreq, 0.707, sr, 1.);
+
+ for (ch = 1; ch < inlink->channels; ch++) {
+ memcpy(&s->rc[ch], &s->rc[0], sizeof(RIAACurve));
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioEmphasisContext *s = ctx->priv;
+ av_freep(&s->rc);
+}
+
+static const AVFilterPad avfilter_af_aemphasis_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_af_aemphasis_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_aemphasis = {
+ .name = "aemphasis",
+ .description = NULL_IF_CONFIG_SMALL("Audio emphasis."),
+ .priv_size = sizeof(AudioEmphasisContext),
+ .priv_class = &aemphasis_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = avfilter_af_aemphasis_inputs,
+ .outputs = avfilter_af_aemphasis_outputs,
+};
diff --git a/libavfilter/af_afade.c b/libavfilter/af_afade.c
new file mode 100644
index 0000000000..9acadc51c5
--- /dev/null
+++ b/libavfilter/af_afade.c
@@ -0,0 +1,669 @@
+/*
+ * Copyright (c) 2013-2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * fade audio filter
+ */
+
+#include "libavutil/audio_fifo.h"
+#include "libavutil/opt.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ int type;
+ int curve, curve2;
+ int nb_samples;
+ int64_t start_sample;
+ int64_t duration;
+ int64_t start_time;
+ int overlap;
+ int cf0_eof;
+ int crossfade_is_over;
+ AVAudioFifo *fifo[2];
+ int64_t pts;
+
+ void (*fade_samples)(uint8_t **dst, uint8_t * const *src,
+ int nb_samples, int channels, int direction,
+ int64_t start, int range, int curve);
+ void (*crossfade_samples)(uint8_t **dst, uint8_t * const *cf0,
+ uint8_t * const *cf1,
+ int nb_samples, int channels,
+ int curve0, int curve1);
+} AudioFadeContext;
+
+enum CurveType { TRI, QSIN, ESIN, HSIN, LOG, IPAR, QUA, CUB, SQU, CBR, PAR, EXP, IQSIN, IHSIN, DESE, DESI, NB_CURVES };
+
+#define OFFSET(x) offsetof(AudioFadeContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static double fade_gain(int curve, int64_t index, int range)
+{
+#define CUBE(a) ((a)*(a)*(a))
+ double gain;
+
+ gain = av_clipd(1.0 * index / range, 0, 1.0);
+
+ switch (curve) {
+ case QSIN:
+ gain = sin(gain * M_PI / 2.0);
+ break;
+ case IQSIN:
+ /* 0.6... = 2 / M_PI */
+ gain = 0.6366197723675814 * asin(gain);
+ break;
+ case ESIN:
+ gain = 1.0 - cos(M_PI / 4.0 * (CUBE(2.0*gain - 1) + 1));
+ break;
+ case HSIN:
+ gain = (1.0 - cos(gain * M_PI)) / 2.0;
+ break;
+ case IHSIN:
+ /* 0.3... = 1 / M_PI */
+ gain = 0.3183098861837907 * acos(1 - 2 * gain);
+ break;
+ case EXP:
+ /* -11.5... = 5*ln(0.1) */
+ gain = exp(-11.512925464970227 * (1 - gain));
+ break;
+ case LOG:
+ gain = av_clipd(1 + 0.2 * log10(gain), 0, 1.0);
+ break;
+ case PAR:
+ gain = 1 - sqrt(1 - gain);
+ break;
+ case IPAR:
+ gain = (1 - (1 - gain) * (1 - gain));
+ break;
+ case QUA:
+ gain *= gain;
+ break;
+ case CUB:
+ gain = CUBE(gain);
+ break;
+ case SQU:
+ gain = sqrt(gain);
+ break;
+ case CBR:
+ gain = cbrt(gain);
+ break;
+ case DESE:
+ gain = gain <= 0.5 ? cbrt(2 * gain) / 2: 1 - cbrt(2 * (1 - gain)) / 2;
+ break;
+ case DESI:
+ gain = gain <= 0.5 ? CUBE(2 * gain) / 2: 1 - CUBE(2 * (1 - gain)) / 2;
+ break;
+ }
+
+ return gain;
+}
+
+#define FADE_PLANAR(name, type) \
+static void fade_samples_## name ##p(uint8_t **dst, uint8_t * const *src, \
+ int nb_samples, int channels, int dir, \
+ int64_t start, int range, int curve) \
+{ \
+ int i, c; \
+ \
+ for (i = 0; i < nb_samples; i++) { \
+ double gain = fade_gain(curve, start + i * dir, range); \
+ for (c = 0; c < channels; c++) { \
+ type *d = (type *)dst[c]; \
+ const type *s = (type *)src[c]; \
+ \
+ d[i] = s[i] * gain; \
+ } \
+ } \
+}
+
+#define FADE(name, type) \
+static void fade_samples_## name (uint8_t **dst, uint8_t * const *src, \
+ int nb_samples, int channels, int dir, \
+ int64_t start, int range, int curve) \
+{ \
+ type *d = (type *)dst[0]; \
+ const type *s = (type *)src[0]; \
+ int i, c, k = 0; \
+ \
+ for (i = 0; i < nb_samples; i++) { \
+ double gain = fade_gain(curve, start + i * dir, range); \
+ for (c = 0; c < channels; c++, k++) \
+ d[k] = s[k] * gain; \
+ } \
+}
+
+FADE_PLANAR(dbl, double)
+FADE_PLANAR(flt, float)
+FADE_PLANAR(s16, int16_t)
+FADE_PLANAR(s32, int32_t)
+
+FADE(dbl, double)
+FADE(flt, float)
+FADE(s16, int16_t)
+FADE(s32, int32_t)
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AudioFadeContext *s = ctx->priv;
+
+ switch (outlink->format) {
+ case AV_SAMPLE_FMT_DBL: s->fade_samples = fade_samples_dbl; break;
+ case AV_SAMPLE_FMT_DBLP: s->fade_samples = fade_samples_dblp; break;
+ case AV_SAMPLE_FMT_FLT: s->fade_samples = fade_samples_flt; break;
+ case AV_SAMPLE_FMT_FLTP: s->fade_samples = fade_samples_fltp; break;
+ case AV_SAMPLE_FMT_S16: s->fade_samples = fade_samples_s16; break;
+ case AV_SAMPLE_FMT_S16P: s->fade_samples = fade_samples_s16p; break;
+ case AV_SAMPLE_FMT_S32: s->fade_samples = fade_samples_s32; break;
+ case AV_SAMPLE_FMT_S32P: s->fade_samples = fade_samples_s32p; break;
+ }
+
+ if (s->duration)
+ s->nb_samples = av_rescale(s->duration, outlink->sample_rate, AV_TIME_BASE);
+ if (s->start_time)
+ s->start_sample = av_rescale(s->start_time, outlink->sample_rate, AV_TIME_BASE);
+
+ return 0;
+}
+
+#if CONFIG_AFADE_FILTER
+
+static const AVOption afade_options[] = {
+ { "type", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS, "type" },
+ { "t", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS, "type" },
+ { "in", "fade-in", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "type" },
+ { "out", "fade-out", 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "type" },
+ { "start_sample", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, FLAGS },
+ { "ss", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, FLAGS },
+ { "nb_samples", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX, FLAGS },
+ { "ns", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX, FLAGS },
+ { "start_time", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "st", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "duration", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "d", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "curve", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, 0, NB_CURVES - 1, FLAGS, "curve" },
+ { "c", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, 0, NB_CURVES - 1, FLAGS, "curve" },
+ { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, FLAGS, "curve" },
+ { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, FLAGS, "curve" },
+ { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, FLAGS, "curve" },
+ { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, FLAGS, "curve" },
+ { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, FLAGS, "curve" },
+ { "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, FLAGS, "curve" },
+ { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, FLAGS, "curve" },
+ { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, FLAGS, "curve" },
+ { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, FLAGS, "curve" },
+ { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, FLAGS, "curve" },
+ { "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, FLAGS, "curve" },
+ { "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, FLAGS, "curve" },
+ { "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, FLAGS, "curve" },
+ { "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, FLAGS, "curve" },
+ { "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, FLAGS, "curve" },
+ { "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, FLAGS, "curve" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(afade);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ AudioFadeContext *s = ctx->priv;
+
+ if (INT64_MAX - s->nb_samples < s->start_sample)
+ return AVERROR(EINVAL);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ AudioFadeContext *s = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ int nb_samples = buf->nb_samples;
+ AVFrame *out_buf;
+ int64_t cur_sample = av_rescale_q(buf->pts, inlink->time_base, (AVRational){1, inlink->sample_rate});
+
+ if ((!s->type && (s->start_sample + s->nb_samples < cur_sample)) ||
+ ( s->type && (cur_sample + nb_samples < s->start_sample)))
+ return ff_filter_frame(outlink, buf);
+
+ if (av_frame_is_writable(buf)) {
+ out_buf = buf;
+ } else {
+ out_buf = ff_get_audio_buffer(inlink, nb_samples);
+ if (!out_buf)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out_buf, buf);
+ }
+
+ if ((!s->type && (cur_sample + nb_samples < s->start_sample)) ||
+ ( s->type && (s->start_sample + s->nb_samples < cur_sample))) {
+ av_samples_set_silence(out_buf->extended_data, 0, nb_samples,
+ av_frame_get_channels(out_buf), out_buf->format);
+ } else {
+ int64_t start;
+
+ if (!s->type)
+ start = cur_sample - s->start_sample;
+ else
+ start = s->start_sample + s->nb_samples - cur_sample;
+
+ s->fade_samples(out_buf->extended_data, buf->extended_data,
+ nb_samples, av_frame_get_channels(buf),
+ s->type ? -1 : 1, start,
+ s->nb_samples, s->curve);
+ }
+
+ if (buf != out_buf)
+ av_frame_free(&buf);
+
+ return ff_filter_frame(outlink, out_buf);
+}
+
+static const AVFilterPad avfilter_af_afade_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_af_afade_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_afade = {
+ .name = "afade",
+ .description = NULL_IF_CONFIG_SMALL("Fade in/out input audio."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(AudioFadeContext),
+ .init = init,
+ .inputs = avfilter_af_afade_inputs,
+ .outputs = avfilter_af_afade_outputs,
+ .priv_class = &afade_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
+
+#endif /* CONFIG_AFADE_FILTER */
+
+#if CONFIG_ACROSSFADE_FILTER
+
+static const AVOption acrossfade_options[] = {
+ { "nb_samples", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
+ { "ns", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
+ { "duration", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, 60, FLAGS },
+ { "d", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, 60, FLAGS },
+ { "overlap", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
+ { "o", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
+ { "curve1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, 0, NB_CURVES - 1, FLAGS, "curve" },
+ { "c1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, 0, NB_CURVES - 1, FLAGS, "curve" },
+ { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, FLAGS, "curve" },
+ { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, FLAGS, "curve" },
+ { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, FLAGS, "curve" },
+ { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, FLAGS, "curve" },
+ { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, FLAGS, "curve" },
+ { "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, FLAGS, "curve" },
+ { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, FLAGS, "curve" },
+ { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, FLAGS, "curve" },
+ { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, FLAGS, "curve" },
+ { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, FLAGS, "curve" },
+ { "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, FLAGS, "curve" },
+ { "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, FLAGS, "curve" },
+ { "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, FLAGS, "curve" },
+ { "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, FLAGS, "curve" },
+ { "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, FLAGS, "curve" },
+ { "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, FLAGS, "curve" },
+ { "curve2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, 0, NB_CURVES - 1, FLAGS, "curve" },
+ { "c2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, 0, NB_CURVES - 1, FLAGS, "curve" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(acrossfade);
+
+#define CROSSFADE_PLANAR(name, type) \
+static void crossfade_samples_## name ##p(uint8_t **dst, uint8_t * const *cf0, \
+ uint8_t * const *cf1, \
+ int nb_samples, int channels, \
+ int curve0, int curve1) \
+{ \
+ int i, c; \
+ \
+ for (i = 0; i < nb_samples; i++) { \
+ double gain0 = fade_gain(curve0, nb_samples - 1 - i, nb_samples); \
+ double gain1 = fade_gain(curve1, i, nb_samples); \
+ for (c = 0; c < channels; c++) { \
+ type *d = (type *)dst[c]; \
+ const type *s0 = (type *)cf0[c]; \
+ const type *s1 = (type *)cf1[c]; \
+ \
+ d[i] = s0[i] * gain0 + s1[i] * gain1; \
+ } \
+ } \
+}
+
+#define CROSSFADE(name, type) \
+static void crossfade_samples_## name (uint8_t **dst, uint8_t * const *cf0, \
+ uint8_t * const *cf1, \
+ int nb_samples, int channels, \
+ int curve0, int curve1) \
+{ \
+ type *d = (type *)dst[0]; \
+ const type *s0 = (type *)cf0[0]; \
+ const type *s1 = (type *)cf1[0]; \
+ int i, c, k = 0; \
+ \
+ for (i = 0; i < nb_samples; i++) { \
+ double gain0 = fade_gain(curve0, nb_samples - 1 - i, nb_samples); \
+ double gain1 = fade_gain(curve1, i, nb_samples); \
+ for (c = 0; c < channels; c++, k++) \
+ d[k] = s0[k] * gain0 + s1[k] * gain1; \
+ } \
+}
+
+CROSSFADE_PLANAR(dbl, double)
+CROSSFADE_PLANAR(flt, float)
+CROSSFADE_PLANAR(s16, int16_t)
+CROSSFADE_PLANAR(s32, int32_t)
+
+CROSSFADE(dbl, double)
+CROSSFADE(flt, float)
+CROSSFADE(s16, int16_t)
+CROSSFADE(s32, int32_t)
+
+static int acrossfade_filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioFadeContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out, *cf[2] = { NULL };
+ int ret = 0, nb_samples;
+
+ if (s->crossfade_is_over) {
+ in->pts = s->pts;
+ s->pts += av_rescale_q(in->nb_samples,
+ (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
+ return ff_filter_frame(outlink, in);
+ } else if (inlink == ctx->inputs[0]) {
+ av_audio_fifo_write(s->fifo[0], (void **)in->extended_data, in->nb_samples);
+
+ nb_samples = av_audio_fifo_size(s->fifo[0]) - s->nb_samples;
+ if (nb_samples > 0) {
+ out = ff_get_audio_buffer(outlink, nb_samples);
+ if (!out) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ av_audio_fifo_read(s->fifo[0], (void **)out->extended_data, nb_samples);
+ out->pts = s->pts;
+ s->pts += av_rescale_q(nb_samples,
+ (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
+ ret = ff_filter_frame(outlink, out);
+ }
+ } else if (av_audio_fifo_size(s->fifo[1]) < s->nb_samples) {
+ if (!s->overlap && av_audio_fifo_size(s->fifo[0]) > 0) {
+ nb_samples = av_audio_fifo_size(s->fifo[0]);
+
+ cf[0] = ff_get_audio_buffer(outlink, nb_samples);
+ out = ff_get_audio_buffer(outlink, nb_samples);
+ if (!out || !cf[0]) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ av_audio_fifo_read(s->fifo[0], (void **)cf[0]->extended_data, nb_samples);
+
+ s->fade_samples(out->extended_data, cf[0]->extended_data, nb_samples,
+ outlink->channels, -1, nb_samples - 1, nb_samples, s->curve);
+ out->pts = s->pts;
+ s->pts += av_rescale_q(nb_samples,
+ (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
+ ret = ff_filter_frame(outlink, out);
+ if (ret < 0)
+ goto fail;
+ }
+
+ av_audio_fifo_write(s->fifo[1], (void **)in->extended_data, in->nb_samples);
+ } else if (av_audio_fifo_size(s->fifo[1]) >= s->nb_samples) {
+ av_audio_fifo_write(s->fifo[1], (void **)in->extended_data, in->nb_samples);
+
+ if (s->overlap) {
+ cf[0] = ff_get_audio_buffer(outlink, s->nb_samples);
+ cf[1] = ff_get_audio_buffer(outlink, s->nb_samples);
+ out = ff_get_audio_buffer(outlink, s->nb_samples);
+ if (!out || !cf[0] || !cf[1]) {
+ av_frame_free(&out);
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ av_audio_fifo_read(s->fifo[0], (void **)cf[0]->extended_data, s->nb_samples);
+ av_audio_fifo_read(s->fifo[1], (void **)cf[1]->extended_data, s->nb_samples);
+
+ s->crossfade_samples(out->extended_data, cf[0]->extended_data,
+ cf[1]->extended_data,
+ s->nb_samples, av_frame_get_channels(in),
+ s->curve, s->curve2);
+ out->pts = s->pts;
+ s->pts += av_rescale_q(s->nb_samples,
+ (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
+ ret = ff_filter_frame(outlink, out);
+ if (ret < 0)
+ goto fail;
+ } else {
+ out = ff_get_audio_buffer(outlink, s->nb_samples);
+ cf[1] = ff_get_audio_buffer(outlink, s->nb_samples);
+ if (!out || !cf[1]) {
+ ret = AVERROR(ENOMEM);
+ av_frame_free(&out);
+ goto fail;
+ }
+
+ av_audio_fifo_read(s->fifo[1], (void **)cf[1]->extended_data, s->nb_samples);
+
+ s->fade_samples(out->extended_data, cf[1]->extended_data, s->nb_samples,
+ outlink->channels, 1, 0, s->nb_samples, s->curve2);
+ out->pts = s->pts;
+ s->pts += av_rescale_q(s->nb_samples,
+ (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
+ ret = ff_filter_frame(outlink, out);
+ if (ret < 0)
+ goto fail;
+ }
+
+ nb_samples = av_audio_fifo_size(s->fifo[1]);
+ if (nb_samples > 0) {
+ out = ff_get_audio_buffer(outlink, nb_samples);
+ if (!out) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ av_audio_fifo_read(s->fifo[1], (void **)out->extended_data, nb_samples);
+ out->pts = s->pts;
+ s->pts += av_rescale_q(nb_samples,
+ (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
+ ret = ff_filter_frame(outlink, out);
+ }
+ s->crossfade_is_over = 1;
+ }
+
+fail:
+ av_frame_free(&in);
+ av_frame_free(&cf[0]);
+ av_frame_free(&cf[1]);
+ return ret;
+}
+
+static int acrossfade_request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AudioFadeContext *s = ctx->priv;
+ int ret = 0;
+
+ if (!s->cf0_eof) {
+ AVFilterLink *cf0 = ctx->inputs[0];
+ ret = ff_request_frame(cf0);
+ if (ret < 0 && ret != AVERROR_EOF)
+ return ret;
+ if (ret == AVERROR_EOF) {
+ s->cf0_eof = 1;
+ ret = 0;
+ }
+ } else {
+ AVFilterLink *cf1 = ctx->inputs[1];
+ int nb_samples = av_audio_fifo_size(s->fifo[1]);
+
+ ret = ff_request_frame(cf1);
+ if (ret == AVERROR_EOF && nb_samples > 0) {
+ AVFrame *out = ff_get_audio_buffer(outlink, nb_samples);
+ if (!out)
+ return AVERROR(ENOMEM);
+
+ av_audio_fifo_read(s->fifo[1], (void **)out->extended_data, nb_samples);
+ ret = ff_filter_frame(outlink, out);
+ }
+ }
+
+ return ret;
+}
+
+static int acrossfade_config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AudioFadeContext *s = ctx->priv;
+
+ if (ctx->inputs[0]->sample_rate != ctx->inputs[1]->sample_rate) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Inputs must have the same sample rate "
+ "%d for in0 vs %d for in1\n",
+ ctx->inputs[0]->sample_rate, ctx->inputs[1]->sample_rate);
+ return AVERROR(EINVAL);
+ }
+
+ outlink->sample_rate = ctx->inputs[0]->sample_rate;
+ outlink->time_base = ctx->inputs[0]->time_base;
+ outlink->channel_layout = ctx->inputs[0]->channel_layout;
+ outlink->channels = ctx->inputs[0]->channels;
+
+ switch (outlink->format) {
+ case AV_SAMPLE_FMT_DBL: s->crossfade_samples = crossfade_samples_dbl; break;
+ case AV_SAMPLE_FMT_DBLP: s->crossfade_samples = crossfade_samples_dblp; break;
+ case AV_SAMPLE_FMT_FLT: s->crossfade_samples = crossfade_samples_flt; break;
+ case AV_SAMPLE_FMT_FLTP: s->crossfade_samples = crossfade_samples_fltp; break;
+ case AV_SAMPLE_FMT_S16: s->crossfade_samples = crossfade_samples_s16; break;
+ case AV_SAMPLE_FMT_S16P: s->crossfade_samples = crossfade_samples_s16p; break;
+ case AV_SAMPLE_FMT_S32: s->crossfade_samples = crossfade_samples_s32; break;
+ case AV_SAMPLE_FMT_S32P: s->crossfade_samples = crossfade_samples_s32p; break;
+ }
+
+ config_output(outlink);
+
+ s->fifo[0] = av_audio_fifo_alloc(outlink->format, outlink->channels, s->nb_samples);
+ s->fifo[1] = av_audio_fifo_alloc(outlink->format, outlink->channels, s->nb_samples);
+ if (!s->fifo[0] || !s->fifo[1])
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioFadeContext *s = ctx->priv;
+
+ av_audio_fifo_free(s->fifo[0]);
+ av_audio_fifo_free(s->fifo[1]);
+}
+
+static const AVFilterPad avfilter_af_acrossfade_inputs[] = {
+ {
+ .name = "crossfade0",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = acrossfade_filter_frame,
+ },
+ {
+ .name = "crossfade1",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = acrossfade_filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_af_acrossfade_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .request_frame = acrossfade_request_frame,
+ .config_props = acrossfade_config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_acrossfade = {
+ .name = "acrossfade",
+ .description = NULL_IF_CONFIG_SMALL("Cross fade two input audio streams."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(AudioFadeContext),
+ .uninit = uninit,
+ .priv_class = &acrossfade_class,
+ .inputs = avfilter_af_acrossfade_inputs,
+ .outputs = avfilter_af_acrossfade_outputs,
+};
+
+#endif /* CONFIG_ACROSSFADE_FILTER */
diff --git a/libavfilter/af_afftfilt.c b/libavfilter/af_afftfilt.c
new file mode 100644
index 0000000000..3fc1a1b4c9
--- /dev/null
+++ b/libavfilter/af_afftfilt.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright (c) 2016 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License,
+ * or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/audio_fifo.h"
+#include "libavutil/avstring.h"
+#include "libavfilter/internal.h"
+#include "libavutil/common.h"
+#include "libavutil/opt.h"
+#include "libavcodec/avfft.h"
+#include "libavutil/eval.h"
+#include "audio.h"
+#include "window_func.h"
+
+typedef struct AFFTFiltContext {
+ const AVClass *class;
+ char *real_str;
+ char *img_str;
+ int fft_bits;
+
+ FFTContext *fft, *ifft;
+ FFTComplex **fft_data;
+ int nb_exprs;
+ int window_size;
+ AVExpr **real;
+ AVExpr **imag;
+ AVAudioFifo *fifo;
+ int64_t pts;
+ int hop_size;
+ float overlap;
+ AVFrame *buffer;
+ int start, end;
+ int win_func;
+ float win_scale;
+ float *window_func_lut;
+} AFFTFiltContext;
+
+static const char *const var_names[] = { "sr", "b", "nb", "ch", "chs", "pts", NULL };
+enum { VAR_SAMPLE_RATE, VAR_BIN, VAR_NBBINS, VAR_CHANNEL, VAR_CHANNELS, VAR_PTS, VAR_VARS_NB };
+
+#define OFFSET(x) offsetof(AFFTFiltContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption afftfilt_options[] = {
+ { "real", "set channels real expressions", OFFSET(real_str), AV_OPT_TYPE_STRING, {.str = "1" }, 0, 0, A },
+ { "imag", "set channels imaginary expressions", OFFSET(img_str), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, A },
+ { "win_size", "set window size", OFFSET(fft_bits), AV_OPT_TYPE_INT, {.i64=12}, 4, 17, A, "fft" },
+ { "w16", 0, 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, A, "fft" },
+ { "w32", 0, 0, AV_OPT_TYPE_CONST, {.i64=5}, 0, 0, A, "fft" },
+ { "w64", 0, 0, AV_OPT_TYPE_CONST, {.i64=6}, 0, 0, A, "fft" },
+ { "w128", 0, 0, AV_OPT_TYPE_CONST, {.i64=7}, 0, 0, A, "fft" },
+ { "w256", 0, 0, AV_OPT_TYPE_CONST, {.i64=8}, 0, 0, A, "fft" },
+ { "w512", 0, 0, AV_OPT_TYPE_CONST, {.i64=9}, 0, 0, A, "fft" },
+ { "w1024", 0, 0, AV_OPT_TYPE_CONST, {.i64=10}, 0, 0, A, "fft" },
+ { "w2048", 0, 0, AV_OPT_TYPE_CONST, {.i64=11}, 0, 0, A, "fft" },
+ { "w4096", 0, 0, AV_OPT_TYPE_CONST, {.i64=12}, 0, 0, A, "fft" },
+ { "w8192", 0, 0, AV_OPT_TYPE_CONST, {.i64=13}, 0, 0, A, "fft" },
+ { "w16384", 0, 0, AV_OPT_TYPE_CONST, {.i64=14}, 0, 0, A, "fft" },
+ { "w32768", 0, 0, AV_OPT_TYPE_CONST, {.i64=15}, 0, 0, A, "fft" },
+ { "w65536", 0, 0, AV_OPT_TYPE_CONST, {.i64=16}, 0, 0, A, "fft" },
+ { "w131072",0, 0, AV_OPT_TYPE_CONST, {.i64=17}, 0, 0, A, "fft" },
+ { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = WFUNC_HANNING}, 0, NB_WFUNC-1, A, "win_func" },
+ { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, A, "win_func" },
+ { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, A, "win_func" },
+ { "hann", "Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, A, "win_func" },
+ { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, A, "win_func" },
+ { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, A, "win_func" },
+ { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, A, "win_func" },
+ { "overlap", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 1, A },
+ { NULL },
+};
+
+AVFILTER_DEFINE_CLASS(afftfilt);
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AFFTFiltContext *s = ctx->priv;
+ char *saveptr = NULL;
+ int ret = 0, ch, i;
+ float overlap;
+ char *args;
+ const char *last_expr = "1";
+
+ s->fft = av_fft_init(s->fft_bits, 0);
+ s->ifft = av_fft_init(s->fft_bits, 1);
+ if (!s->fft || !s->ifft)
+ return AVERROR(ENOMEM);
+
+ s->window_size = 1 << s->fft_bits;
+
+ s->fft_data = av_calloc(inlink->channels, sizeof(*s->fft_data));
+ if (!s->fft_data)
+ return AVERROR(ENOMEM);
+
+ for (ch = 0; ch < inlink->channels; ch++) {
+ s->fft_data[ch] = av_calloc(s->window_size, sizeof(**s->fft_data));
+ if (!s->fft_data[ch])
+ return AVERROR(ENOMEM);
+ }
+
+ s->real = av_calloc(inlink->channels, sizeof(*s->real));
+ if (!s->real)
+ return AVERROR(ENOMEM);
+
+ s->imag = av_calloc(inlink->channels, sizeof(*s->imag));
+ if (!s->imag)
+ return AVERROR(ENOMEM);
+
+ args = av_strdup(s->real_str);
+ if (!args)
+ return AVERROR(ENOMEM);
+
+ for (ch = 0; ch < inlink->channels; ch++) {
+ char *arg = av_strtok(ch == 0 ? args : NULL, "|", &saveptr);
+
+ ret = av_expr_parse(&s->real[ch], arg ? arg : last_expr, var_names,
+ NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0)
+ break;
+ if (arg)
+ last_expr = arg;
+ s->nb_exprs++;
+ }
+
+ av_free(args);
+
+ args = av_strdup(s->img_str ? s->img_str : s->real_str);
+ if (!args)
+ return AVERROR(ENOMEM);
+
+ for (ch = 0; ch < inlink->channels; ch++) {
+ char *arg = av_strtok(ch == 0 ? args : NULL, "|", &saveptr);
+
+ ret = av_expr_parse(&s->imag[ch], arg ? arg : last_expr, var_names,
+ NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0)
+ break;
+ if (arg)
+ last_expr = arg;
+ }
+
+ av_free(args);
+
+ s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->window_size);
+ if (!s->fifo)
+ return AVERROR(ENOMEM);
+
+ s->window_func_lut = av_realloc_f(s->window_func_lut, s->window_size,
+ sizeof(*s->window_func_lut));
+ if (!s->window_func_lut)
+ return AVERROR(ENOMEM);
+ ff_generate_window_func(s->window_func_lut, s->window_size, s->win_func, &overlap);
+ if (s->overlap == 1)
+ s->overlap = overlap;
+
+ for (s->win_scale = 0, i = 0; i < s->window_size; i++) {
+ s->win_scale += s->window_func_lut[i] * s->window_func_lut[i];
+ }
+
+ s->hop_size = s->window_size * (1 - s->overlap);
+ if (s->hop_size <= 0)
+ return AVERROR(EINVAL);
+
+ s->buffer = ff_get_audio_buffer(inlink, s->window_size * 2);
+ if (!s->buffer)
+ return AVERROR(ENOMEM);
+
+ return ret;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AFFTFiltContext *s = ctx->priv;
+ const int window_size = s->window_size;
+ const float f = 1. / s->win_scale;
+ double values[VAR_VARS_NB];
+ AVFrame *out, *in = NULL;
+ int ch, n, ret, i, j, k;
+ int start = s->start, end = s->end;
+
+ av_audio_fifo_write(s->fifo, (void **)frame->extended_data, frame->nb_samples);
+ av_frame_free(&frame);
+
+ while (av_audio_fifo_size(s->fifo) >= window_size) {
+ if (!in) {
+ in = ff_get_audio_buffer(outlink, window_size);
+ if (!in)
+ return AVERROR(ENOMEM);
+ }
+
+ ret = av_audio_fifo_peek(s->fifo, (void **)in->extended_data, window_size);
+ if (ret < 0)
+ break;
+
+ for (ch = 0; ch < inlink->channels; ch++) {
+ const float *src = (float *)in->extended_data[ch];
+ FFTComplex *fft_data = s->fft_data[ch];
+
+ for (n = 0; n < in->nb_samples; n++) {
+ fft_data[n].re = src[n] * s->window_func_lut[n];
+ fft_data[n].im = 0;
+ }
+
+ for (; n < window_size; n++) {
+ fft_data[n].re = 0;
+ fft_data[n].im = 0;
+ }
+ }
+
+ values[VAR_PTS] = s->pts;
+ values[VAR_SAMPLE_RATE] = inlink->sample_rate;
+ values[VAR_NBBINS] = window_size / 2;
+ values[VAR_CHANNELS] = inlink->channels;
+
+ for (ch = 0; ch < inlink->channels; ch++) {
+ FFTComplex *fft_data = s->fft_data[ch];
+ float *buf = (float *)s->buffer->extended_data[ch];
+ int x;
+
+ values[VAR_CHANNEL] = ch;
+
+ av_fft_permute(s->fft, fft_data);
+ av_fft_calc(s->fft, fft_data);
+
+ for (n = 0; n < window_size / 2; n++) {
+ float fr, fi;
+
+ values[VAR_BIN] = n;
+
+ fr = av_expr_eval(s->real[ch], values, s);
+ fi = av_expr_eval(s->imag[ch], values, s);
+
+ fft_data[n].re *= fr;
+ fft_data[n].im *= fi;
+ }
+
+ for (n = window_size / 2 + 1, x = window_size / 2 - 1; n < window_size; n++, x--) {
+ fft_data[n].re = fft_data[x].re;
+ fft_data[n].im = -fft_data[x].im;
+ }
+
+ av_fft_permute(s->ifft, fft_data);
+ av_fft_calc(s->ifft, fft_data);
+
+ start = s->start;
+ end = s->end;
+ k = end;
+ for (i = 0, j = start; j < k && i < window_size; i++, j++) {
+ buf[j] += s->fft_data[ch][i].re * f;
+ }
+
+ for (; i < window_size; i++, j++) {
+ buf[j] = s->fft_data[ch][i].re * f;
+ }
+
+ start += s->hop_size;
+ end = j;
+ }
+
+ s->start = start;
+ s->end = end;
+
+ if (start >= window_size) {
+ float *dst, *buf;
+
+ start -= window_size;
+ end -= window_size;
+
+ s->start = start;
+ s->end = end;
+
+ out = ff_get_audio_buffer(outlink, window_size);
+ if (!out) {
+ ret = AVERROR(ENOMEM);
+ break;
+ }
+
+ out->pts = s->pts;
+ s->pts += window_size;
+
+ for (ch = 0; ch < inlink->channels; ch++) {
+ dst = (float *)out->extended_data[ch];
+ buf = (float *)s->buffer->extended_data[ch];
+
+ for (n = 0; n < window_size; n++) {
+ dst[n] = buf[n] * (1 - s->overlap);
+ }
+ memmove(buf, buf + window_size, window_size * 4);
+ }
+
+ ret = ff_filter_frame(outlink, out);
+ if (ret < 0)
+ break;
+ }
+
+ av_audio_fifo_drain(s->fifo, s->hop_size);
+ }
+
+ av_frame_free(&in);
+ return ret;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AFFTFiltContext *s = ctx->priv;
+ int i;
+
+ av_fft_end(s->fft);
+ av_fft_end(s->ifft);
+
+ for (i = 0; i < s->nb_exprs; i++) {
+ if (s->fft_data)
+ av_freep(&s->fft_data[i]);
+ }
+ av_freep(&s->fft_data);
+
+ for (i = 0; i < s->nb_exprs; i++) {
+ av_expr_free(s->real[i]);
+ av_expr_free(s->imag[i]);
+ }
+
+ av_freep(&s->real);
+ av_freep(&s->imag);
+ av_frame_free(&s->buffer);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_afftfilt = {
+ .name = "afftfilt",
+ .description = NULL_IF_CONFIG_SMALL("Apply arbitrary expressions to samples in frequency domain."),
+ .priv_size = sizeof(AFFTFiltContext),
+ .priv_class = &afftfilt_class,
+ .inputs = inputs,
+ .outputs = outputs,
+ .query_formats = query_formats,
+ .uninit = uninit,
+};
diff --git a/libavfilter/af_aformat.c b/libavfilter/af_aformat.c
index f0746737dc..e43149561a 100644
--- a/libavfilter/af_aformat.c
+++ b/libavfilter/af_aformat.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2011 Mina Nagy Zaki
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -47,23 +47,20 @@ typedef struct AFormatContext {
#define OFFSET(x) offsetof(AFormatContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
-static const AVOption options[] = {
- { "sample_fmts", "A comma-separated list of sample formats.", OFFSET(formats_str), AV_OPT_TYPE_STRING, .flags = A },
- { "sample_rates", "A comma-separated list of sample rates.", OFFSET(sample_rates_str), AV_OPT_TYPE_STRING, .flags = A },
- { "channel_layouts", "A comma-separated list of channel layouts.", OFFSET(channel_layouts_str), AV_OPT_TYPE_STRING, .flags = A },
- { NULL },
+#define F AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption aformat_options[] = {
+ { "sample_fmts", "A '|'-separated list of sample formats.", OFFSET(formats_str), AV_OPT_TYPE_STRING, .flags = A|F },
+ { "sample_rates", "A '|'-separated list of sample rates.", OFFSET(sample_rates_str), AV_OPT_TYPE_STRING, .flags = A|F },
+ { "channel_layouts", "A '|'-separated list of channel layouts.", OFFSET(channel_layouts_str), AV_OPT_TYPE_STRING, .flags = A|F },
+ { NULL }
};
-static const AVClass aformat_class = {
- .class_name = "aformat filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(aformat);
-#define PARSE_FORMATS(str, type, list, add_to_list, get_fmt, none, desc) \
+#define PARSE_FORMATS(str, type, list, add_to_list, unref_fn, get_fmt, none, desc) \
do { \
char *next, *cur = str, sep; \
+ int ret; \
\
if (str && strchr(str, ',')) { \
av_log(ctx, AV_LOG_WARNING, "This syntax is deprecated, use '|' to "\
@@ -82,7 +79,10 @@ do { \
av_log(ctx, AV_LOG_ERROR, "Error parsing " desc ": %s.\n", cur);\
return AVERROR(EINVAL); \
} \
- add_to_list(&list, fmt); \
+ if ((ret = add_to_list(&list, fmt)) < 0) { \
+ unref_fn(&list); \
+ return ret; \
+ } \
\
cur = next; \
} \
@@ -99,11 +99,11 @@ static av_cold int init(AVFilterContext *ctx)
AFormatContext *s = ctx->priv;
PARSE_FORMATS(s->formats_str, enum AVSampleFormat, s->formats,
- ff_add_format, av_get_sample_fmt, AV_SAMPLE_FMT_NONE, "sample format");
- PARSE_FORMATS(s->sample_rates_str, int, s->sample_rates, ff_add_format,
+ ff_add_format, ff_formats_unref, av_get_sample_fmt, AV_SAMPLE_FMT_NONE, "sample format");
+ PARSE_FORMATS(s->sample_rates_str, int, s->sample_rates, ff_add_format, ff_formats_unref,
get_sample_rate, 0, "sample rate");
PARSE_FORMATS(s->channel_layouts_str, uint64_t, s->channel_layouts,
- ff_add_channel_layout, av_get_channel_layout, 0,
+ ff_add_channel_layout, ff_channel_layouts_unref, av_get_channel_layout, 0,
"channel layout");
return 0;
@@ -112,15 +112,18 @@ static av_cold int init(AVFilterContext *ctx)
static int query_formats(AVFilterContext *ctx)
{
AFormatContext *s = ctx->priv;
+ int ret;
- ff_set_common_formats(ctx, s->formats ? s->formats :
+ ret = ff_set_common_formats(ctx, s->formats ? s->formats :
ff_all_formats(AVMEDIA_TYPE_AUDIO));
- ff_set_common_samplerates(ctx, s->sample_rates ? s->sample_rates :
+ if (ret < 0)
+ return ret;
+ ret = ff_set_common_samplerates(ctx, s->sample_rates ? s->sample_rates :
ff_all_samplerates());
- ff_set_common_channel_layouts(ctx, s->channel_layouts ? s->channel_layouts :
- ff_all_channel_layouts());
-
- return 0;
+ if (ret < 0)
+ return ret;
+ return ff_set_common_channel_layouts(ctx, s->channel_layouts ? s->channel_layouts :
+ ff_all_channel_counts());
}
static const AVFilterPad avfilter_af_aformat_inputs[] = {
@@ -146,7 +149,6 @@ AVFilter ff_af_aformat = {
.query_formats = query_formats,
.priv_size = sizeof(AFormatContext),
.priv_class = &aformat_class,
-
.inputs = avfilter_af_aformat_inputs,
.outputs = avfilter_af_aformat_outputs,
};
diff --git a/libavfilter/af_agate.c b/libavfilter/af_agate.c
new file mode 100644
index 0000000000..328e25ba84
--- /dev/null
+++ b/libavfilter/af_agate.c
@@ -0,0 +1,440 @@
+/*
+ * Copyright (C) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen, Damien Zammit
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Audio (Sidechain) Gate filter
+ */
+
+#include "libavutil/audio_fifo.h"
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "formats.h"
+#include "hermite.h"
+
+typedef struct AudioGateContext {
+ const AVClass *class;
+
+ double level_in;
+ double level_sc;
+ double attack;
+ double release;
+ double threshold;
+ double ratio;
+ double knee;
+ double makeup;
+ double range;
+ int link;
+ int detection;
+
+ double thres;
+ double knee_start;
+ double lin_knee_stop;
+ double knee_stop;
+ double lin_slope;
+ double attack_coeff;
+ double release_coeff;
+
+ AVAudioFifo *fifo[2];
+ int64_t pts;
+} AudioGateContext;
+
+#define OFFSET(x) offsetof(AudioGateContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption options[] = {
+ { "level_in", "set input level", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
+ { "range", "set max gain reduction", OFFSET(range), AV_OPT_TYPE_DOUBLE, {.dbl=0.06125}, 0, 1, A },
+ { "threshold", "set threshold", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0.125}, 0, 1, A },
+ { "ratio", "set ratio", OFFSET(ratio), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 1, 9000, A },
+ { "attack", "set attack", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 0.01, 9000, A },
+ { "release", "set release", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=250}, 0.01, 9000, A },
+ { "makeup", "set makeup gain", OFFSET(makeup), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 1, 64, A },
+ { "knee", "set knee", OFFSET(knee), AV_OPT_TYPE_DOUBLE, {.dbl=2.828427125}, 1, 8, A },
+ { "detection", "set detection", OFFSET(detection), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, A, "detection" },
+ { "peak", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A, "detection" },
+ { "rms", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A, "detection" },
+ { "link", "set link", OFFSET(link), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A, "link" },
+ { "average", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A, "link" },
+ { "maximum", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A, "link" },
+ { "level_sc", "set sidechain gain", OFFSET(level_sc), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
+ { NULL }
+};
+
+static int agate_config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioGateContext *s = ctx->priv;
+ double lin_threshold = s->threshold;
+ double lin_knee_sqrt = sqrt(s->knee);
+ double lin_knee_start;
+
+ if (s->detection)
+ lin_threshold *= lin_threshold;
+
+ s->attack_coeff = FFMIN(1., 1. / (s->attack * inlink->sample_rate / 4000.));
+ s->release_coeff = FFMIN(1., 1. / (s->release * inlink->sample_rate / 4000.));
+ s->lin_knee_stop = lin_threshold * lin_knee_sqrt;
+ lin_knee_start = lin_threshold / lin_knee_sqrt;
+ s->thres = log(lin_threshold);
+ s->knee_start = log(lin_knee_start);
+ s->knee_stop = log(s->lin_knee_stop);
+
+ return 0;
+}
+
+// A fake infinity value (because real infinity may break some hosts)
+#define FAKE_INFINITY (65536.0 * 65536.0)
+
+// Check for infinity (with appropriate-ish tolerance)
+#define IS_FAKE_INFINITY(value) (fabs(value-FAKE_INFINITY) < 1.0)
+
+static double output_gain(double lin_slope, double ratio, double thres,
+ double knee, double knee_start, double knee_stop,
+ double lin_knee_stop, double range)
+{
+ if (lin_slope < lin_knee_stop) {
+ double slope = log(lin_slope);
+ double tratio = ratio;
+ double gain = 0.;
+ double delta = 0.;
+
+ if (IS_FAKE_INFINITY(ratio))
+ tratio = 1000.;
+ gain = (slope - thres) * tratio + thres;
+ delta = tratio;
+
+ if (knee > 1. && slope > knee_start) {
+ gain = hermite_interpolation(slope, knee_start, knee_stop, ((knee_start - thres) * tratio + thres), knee_stop, delta, 1.);
+ }
+ return FFMAX(range, exp(gain - slope));
+ }
+
+ return 1.;
+}
+
+static void gate(AudioGateContext *s,
+ const double *src, double *dst, const double *scsrc,
+ int nb_samples, double level_in, double level_sc,
+ AVFilterLink *inlink, AVFilterLink *sclink)
+{
+ const double makeup = s->makeup;
+ const double attack_coeff = s->attack_coeff;
+ const double release_coeff = s->release_coeff;
+ int n, c;
+
+ for (n = 0; n < nb_samples; n++, src += inlink->channels, dst += inlink->channels, scsrc += sclink->channels) {
+ double abs_sample = fabs(scsrc[0] * level_sc), gain = 1.0;
+
+ if (s->link == 1) {
+ for (c = 1; c < sclink->channels; c++)
+ abs_sample = FFMAX(fabs(scsrc[c] * level_sc), abs_sample);
+ } else {
+ for (c = 1; c < sclink->channels; c++)
+ abs_sample += fabs(scsrc[c] * level_sc);
+
+ abs_sample /= sclink->channels;
+ }
+
+ if (s->detection)
+ abs_sample *= abs_sample;
+
+ s->lin_slope += (abs_sample - s->lin_slope) * (abs_sample > s->lin_slope ? attack_coeff : release_coeff);
+ if (s->lin_slope > 0.0)
+ gain = output_gain(s->lin_slope, s->ratio, s->thres,
+ s->knee, s->knee_start, s->knee_stop,
+ s->lin_knee_stop, s->range);
+
+ for (c = 0; c < inlink->channels; c++)
+ dst[c] = src[c] * level_in * gain * makeup;
+ }
+}
+
+#if CONFIG_AGATE_FILTER
+
+#define agate_options options
+AVFILTER_DEFINE_CLASS(agate);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts;
+ int ret;
+
+ if ((ret = ff_add_format(&formats, AV_SAMPLE_FMT_DBL)) < 0)
+ return ret;
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ const double *src = (const double *)in->data[0];
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AudioGateContext *s = ctx->priv;
+ AVFrame *out;
+ double *dst;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_audio_buffer(inlink, in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+ dst = (double *)out->data[0];
+
+ gate(s, src, dst, src, in->nb_samples,
+ s->level_in, s->level_in, inlink, inlink);
+
+ if (out != in)
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ .config_props = agate_config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_agate = {
+ .name = "agate",
+ .description = NULL_IF_CONFIG_SMALL("Audio gate."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(AudioGateContext),
+ .priv_class = &agate_class,
+ .inputs = inputs,
+ .outputs = outputs,
+};
+
+#endif /* CONFIG_AGATE_FILTER */
+
+#if CONFIG_SIDECHAINGATE_FILTER
+
+#define sidechaingate_options options
+AVFILTER_DEFINE_CLASS(sidechaingate);
+
+static int scfilter_frame(AVFilterLink *link, AVFrame *frame)
+{
+ AVFilterContext *ctx = link->dst;
+ AudioGateContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out, *in[2] = { NULL };
+ double *dst;
+ int nb_samples;
+ int i;
+
+ for (i = 0; i < 2; i++)
+ if (link == ctx->inputs[i])
+ break;
+ av_assert0(i < 2);
+ av_audio_fifo_write(s->fifo[i], (void **)frame->extended_data,
+ frame->nb_samples);
+ av_frame_free(&frame);
+
+ nb_samples = FFMIN(av_audio_fifo_size(s->fifo[0]), av_audio_fifo_size(s->fifo[1]));
+ if (!nb_samples)
+ return 0;
+
+ out = ff_get_audio_buffer(outlink, nb_samples);
+ if (!out)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < 2; i++) {
+ in[i] = ff_get_audio_buffer(ctx->inputs[i], nb_samples);
+ if (!in[i]) {
+ av_frame_free(&in[0]);
+ av_frame_free(&in[1]);
+ av_frame_free(&out);
+ return AVERROR(ENOMEM);
+ }
+ av_audio_fifo_read(s->fifo[i], (void **)in[i]->data, nb_samples);
+ }
+
+ dst = (double *)out->data[0];
+ out->pts = s->pts;
+ s->pts += nb_samples;
+
+ gate(s, (double *)in[0]->data[0], dst,
+ (double *)in[1]->data[0], nb_samples,
+ s->level_in, s->level_sc,
+ ctx->inputs[0], ctx->inputs[1]);
+
+ av_frame_free(&in[0]);
+ av_frame_free(&in[1]);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int screquest_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AudioGateContext *s = ctx->priv;
+ int i;
+
+ /* get a frame on each input */
+ for (i = 0; i < 2; i++) {
+ AVFilterLink *inlink = ctx->inputs[i];
+ if (!av_audio_fifo_size(s->fifo[i]))
+ return ff_request_frame(inlink);
+ }
+
+ return 0;
+}
+
+static int scquery_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts = NULL;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBL,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret, i;
+
+ if (!ctx->inputs[0]->in_channel_layouts ||
+ !ctx->inputs[0]->in_channel_layouts->nb_channel_layouts) {
+ av_log(ctx, AV_LOG_WARNING,
+ "No channel layout for input 1\n");
+ return AVERROR(EAGAIN);
+ }
+
+ if ((ret = ff_add_channel_layout(&layouts, ctx->inputs[0]->in_channel_layouts->channel_layouts[0])) < 0 ||
+ (ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
+ return ret;
+
+ for (i = 0; i < 2; i++) {
+ layouts = ff_all_channel_counts();
+ if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
+ return ret;
+ }
+
+ formats = ff_make_format_list(sample_fmts);
+ if ((ret = ff_set_common_formats(ctx, formats)) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int scconfig_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AudioGateContext *s = ctx->priv;
+
+ if (ctx->inputs[0]->sample_rate != ctx->inputs[1]->sample_rate) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Inputs must have the same sample rate "
+ "%d for in0 vs %d for in1\n",
+ ctx->inputs[0]->sample_rate, ctx->inputs[1]->sample_rate);
+ return AVERROR(EINVAL);
+ }
+
+ outlink->sample_rate = ctx->inputs[0]->sample_rate;
+ outlink->time_base = ctx->inputs[0]->time_base;
+ outlink->channel_layout = ctx->inputs[0]->channel_layout;
+ outlink->channels = ctx->inputs[0]->channels;
+
+ s->fifo[0] = av_audio_fifo_alloc(ctx->inputs[0]->format, ctx->inputs[0]->channels, 1024);
+ s->fifo[1] = av_audio_fifo_alloc(ctx->inputs[1]->format, ctx->inputs[1]->channels, 1024);
+ if (!s->fifo[0] || !s->fifo[1])
+ return AVERROR(ENOMEM);
+
+
+ agate_config_input(ctx->inputs[0]);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioGateContext *s = ctx->priv;
+
+ av_audio_fifo_free(s->fifo[0]);
+ av_audio_fifo_free(s->fifo[1]);
+}
+
+static const AVFilterPad sidechaingate_inputs[] = {
+ {
+ .name = "main",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = scfilter_frame,
+ },{
+ .name = "sidechain",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = scfilter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad sidechaingate_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = scconfig_output,
+ .request_frame = screquest_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_sidechaingate = {
+ .name = "sidechaingate",
+ .description = NULL_IF_CONFIG_SMALL("Audio sidechain gate."),
+ .priv_size = sizeof(AudioGateContext),
+ .priv_class = &sidechaingate_class,
+ .query_formats = scquery_formats,
+ .uninit = uninit,
+ .inputs = sidechaingate_inputs,
+ .outputs = sidechaingate_outputs,
+};
+#endif /* CONFIG_SIDECHAINGATE_FILTER */
diff --git a/libavfilter/af_alimiter.c b/libavfilter/af_alimiter.c
new file mode 100644
index 0000000000..46211a710a
--- /dev/null
+++ b/libavfilter/af_alimiter.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright (C) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Lookahead limiter filter
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/common.h"
+#include "libavutil/opt.h"
+
+#include "audio.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+typedef struct AudioLimiterContext {
+ const AVClass *class;
+
+ double limit;
+ double attack;
+ double release;
+ double att;
+ double level_in;
+ double level_out;
+ int auto_release;
+ int auto_level;
+ double asc;
+ int asc_c;
+ int asc_pos;
+ double asc_coeff;
+
+ double *buffer;
+ int buffer_size;
+ int pos;
+ int *nextpos;
+ double *nextdelta;
+
+ double delta;
+ int nextiter;
+ int nextlen;
+ int asc_changed;
+} AudioLimiterContext;
+
+#define OFFSET(x) offsetof(AudioLimiterContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM
+#define F AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption alimiter_options[] = {
+ { "level_in", "set input level", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1},.015625, 64, A|F },
+ { "level_out", "set output level", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1},.015625, 64, A|F },
+ { "limit", "set limit", OFFSET(limit), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.0625, 1, A|F },
+ { "attack", "set attack", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=5}, 0.1, 80, A|F },
+ { "release", "set release", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=50}, 1, 8000, A|F },
+ { "asc", "enable asc", OFFSET(auto_release), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A|F },
+ { "asc_level", "set asc level", OFFSET(asc_coeff), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, A|F },
+ { "level", "auto level", OFFSET(auto_level), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, A|F },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(alimiter);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ AudioLimiterContext *s = ctx->priv;
+
+ s->attack /= 1000.;
+ s->release /= 1000.;
+ s->att = 1.;
+ s->asc_pos = -1;
+ s->asc_coeff = pow(0.5, s->asc_coeff - 0.5) * 2 * -1;
+
+ return 0;
+}
+
+static double get_rdelta(AudioLimiterContext *s, double release, int sample_rate,
+ double peak, double limit, double patt, int asc)
+{
+ double rdelta = (1.0 - patt) / (sample_rate * release);
+
+ if (asc && s->auto_release && s->asc_c > 0) {
+ double a_att = limit / (s->asc_coeff * s->asc) * (double)s->asc_c;
+
+ if (a_att > patt) {
+ double delta = FFMAX((a_att - patt) / (sample_rate * release), rdelta / 10);
+
+ if (delta < rdelta)
+ rdelta = delta;
+ }
+ }
+
+ return rdelta;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioLimiterContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ const double *src = (const double *)in->data[0];
+ const int channels = inlink->channels;
+ const int buffer_size = s->buffer_size;
+ double *dst, *buffer = s->buffer;
+ const double release = s->release;
+ const double limit = s->limit;
+ double *nextdelta = s->nextdelta;
+ double level = s->auto_level ? 1 / limit : 1;
+ const double level_out = s->level_out;
+ const double level_in = s->level_in;
+ int *nextpos = s->nextpos;
+ AVFrame *out;
+ double *buf;
+ int n, c, i;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_audio_buffer(inlink, in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+ dst = (double *)out->data[0];
+
+ for (n = 0; n < in->nb_samples; n++) {
+ double peak = 0;
+
+ for (c = 0; c < channels; c++) {
+ double sample = src[c] * level_in;
+
+ buffer[s->pos + c] = sample;
+ peak = FFMAX(peak, fabs(sample));
+ }
+
+ if (s->auto_release && peak > limit) {
+ s->asc += peak;
+ s->asc_c++;
+ }
+
+ if (peak > limit) {
+ double patt = FFMIN(limit / peak, 1.);
+ double rdelta = get_rdelta(s, release, inlink->sample_rate,
+ peak, limit, patt, 0);
+ double delta = (limit / peak - s->att) / buffer_size * channels;
+ int found = 0;
+
+ if (delta < s->delta) {
+ s->delta = delta;
+ nextpos[0] = s->pos;
+ nextpos[1] = -1;
+ nextdelta[0] = rdelta;
+ s->nextlen = 1;
+ s->nextiter= 0;
+ } else {
+ for (i = s->nextiter; i < s->nextiter + s->nextlen; i++) {
+ int j = i % buffer_size;
+ double ppeak, pdelta;
+
+ ppeak = fabs(buffer[nextpos[j]]) > fabs(buffer[nextpos[j] + 1]) ?
+ fabs(buffer[nextpos[j]]) : fabs(buffer[nextpos[j] + 1]);
+ pdelta = (limit / peak - limit / ppeak) / (((buffer_size - nextpos[j] + s->pos) % buffer_size) / channels);
+ if (pdelta < nextdelta[j]) {
+ nextdelta[j] = pdelta;
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ s->nextlen = i - s->nextiter + 1;
+ nextpos[(s->nextiter + s->nextlen) % buffer_size] = s->pos;
+ nextdelta[(s->nextiter + s->nextlen) % buffer_size] = rdelta;
+ nextpos[(s->nextiter + s->nextlen + 1) % buffer_size] = -1;
+ s->nextlen++;
+ }
+ }
+ }
+
+ buf = &s->buffer[(s->pos + channels) % buffer_size];
+ peak = 0;
+ for (c = 0; c < channels; c++) {
+ double sample = buf[c];
+
+ peak = FFMAX(peak, fabs(sample));
+ }
+
+ if (s->pos == s->asc_pos && !s->asc_changed)
+ s->asc_pos = -1;
+
+ if (s->auto_release && s->asc_pos == -1 && peak > limit) {
+ s->asc -= peak;
+ s->asc_c--;
+ }
+
+ s->att += s->delta;
+
+ for (c = 0; c < channels; c++)
+ dst[c] = buf[c] * s->att;
+
+ if ((s->pos + channels) % buffer_size == nextpos[s->nextiter]) {
+ if (s->auto_release) {
+ s->delta = get_rdelta(s, release, inlink->sample_rate,
+ peak, limit, s->att, 1);
+ if (s->nextlen > 1) {
+ int pnextpos = nextpos[(s->nextiter + 1) % buffer_size];
+ double ppeak = fabs(buffer[pnextpos]) > fabs(buffer[pnextpos + 1]) ?
+ fabs(buffer[pnextpos]) :
+ fabs(buffer[pnextpos + 1]);
+ double pdelta = (limit / ppeak - s->att) /
+ (((buffer_size + pnextpos -
+ ((s->pos + channels) % buffer_size)) %
+ buffer_size) / channels);
+ if (pdelta < s->delta)
+ s->delta = pdelta;
+ }
+ } else {
+ s->delta = nextdelta[s->nextiter];
+ s->att = limit / peak;
+ }
+
+ s->nextlen -= 1;
+ nextpos[s->nextiter] = -1;
+ s->nextiter = (s->nextiter + 1) % buffer_size;
+ }
+
+ if (s->att > 1.) {
+ s->att = 1.;
+ s->delta = 0.;
+ s->nextiter = 0;
+ s->nextlen = 0;
+ nextpos[0] = -1;
+ }
+
+ if (s->att <= 0.) {
+ s->att = 0.0000000000001;
+ s->delta = (1.0 - s->att) / (inlink->sample_rate * release);
+ }
+
+ if (s->att != 1. && (1. - s->att) < 0.0000000000001)
+ s->att = 1.;
+
+ if (s->delta != 0. && fabs(s->delta) < 0.00000000000001)
+ s->delta = 0.;
+
+ for (c = 0; c < channels; c++)
+ dst[c] = av_clipd(dst[c], -limit, limit) * level * level_out;
+
+ s->pos = (s->pos + channels) % buffer_size;
+ src += channels;
+ dst += channels;
+ }
+
+ if (in != out)
+ av_frame_free(&in);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBL,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioLimiterContext *s = ctx->priv;
+ int obuffer_size;
+
+ obuffer_size = inlink->sample_rate * inlink->channels * 100 / 1000. + inlink->channels;
+ if (obuffer_size < inlink->channels)
+ return AVERROR(EINVAL);
+
+ s->buffer = av_calloc(obuffer_size, sizeof(*s->buffer));
+ s->nextdelta = av_calloc(obuffer_size, sizeof(*s->nextdelta));
+ s->nextpos = av_malloc_array(obuffer_size, sizeof(*s->nextpos));
+ if (!s->buffer || !s->nextdelta || !s->nextpos)
+ return AVERROR(ENOMEM);
+
+ memset(s->nextpos, -1, obuffer_size * sizeof(*s->nextpos));
+ s->buffer_size = inlink->sample_rate * s->attack * inlink->channels;
+ s->buffer_size -= s->buffer_size % inlink->channels;
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioLimiterContext *s = ctx->priv;
+
+ av_freep(&s->buffer);
+ av_freep(&s->nextdelta);
+ av_freep(&s->nextpos);
+}
+
+static const AVFilterPad alimiter_inputs[] = {
+ {
+ .name = "main",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad alimiter_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_alimiter = {
+ .name = "alimiter",
+ .description = NULL_IF_CONFIG_SMALL("Audio lookahead limiter."),
+ .priv_size = sizeof(AudioLimiterContext),
+ .priv_class = &alimiter_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = alimiter_inputs,
+ .outputs = alimiter_outputs,
+};
diff --git a/libavfilter/af_amerge.c b/libavfilter/af_amerge.c
new file mode 100644
index 0000000000..8ea01e206b
--- /dev/null
+++ b/libavfilter/af_amerge.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Audio merging filter
+ */
+
+#define FF_INTERNAL_FIELDS 1
+#include "framequeue.h"
+
+#include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "bufferqueue.h"
+#include "internal.h"
+
+#define SWR_CH_MAX 64
+
+typedef struct {
+ const AVClass *class;
+ int nb_inputs;
+ int route[SWR_CH_MAX]; /**< channels routing, see copy_samples */
+ int bps;
+ struct amerge_input {
+ struct FFBufQueue queue;
+ int nb_ch; /**< number of channels for the input */
+ int nb_samples;
+ int pos;
+ } *in;
+} AMergeContext;
+
+#define OFFSET(x) offsetof(AMergeContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption amerge_options[] = {
+ { "inputs", "specify the number of inputs", OFFSET(nb_inputs),
+ AV_OPT_TYPE_INT, { .i64 = 2 }, 1, SWR_CH_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(amerge);
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AMergeContext *s = ctx->priv;
+ int i;
+
+ for (i = 0; i < s->nb_inputs; i++) {
+ if (s->in)
+ ff_bufqueue_discard_all(&s->in[i].queue);
+ if (ctx->input_pads)
+ av_freep(&ctx->input_pads[i].name);
+ }
+ av_freep(&s->in);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AMergeContext *s = ctx->priv;
+ int64_t inlayout[SWR_CH_MAX], outlayout = 0;
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ int i, ret, overlap = 0, nb_ch = 0;
+
+ for (i = 0; i < s->nb_inputs; i++) {
+ if (!ctx->inputs[i]->in_channel_layouts ||
+ !ctx->inputs[i]->in_channel_layouts->nb_channel_layouts) {
+ av_log(ctx, AV_LOG_WARNING,
+ "No channel layout for input %d\n", i + 1);
+ return AVERROR(EAGAIN);
+ }
+ inlayout[i] = ctx->inputs[i]->in_channel_layouts->channel_layouts[0];
+ if (ctx->inputs[i]->in_channel_layouts->nb_channel_layouts > 1) {
+ char buf[256];
+ av_get_channel_layout_string(buf, sizeof(buf), 0, inlayout[i]);
+ av_log(ctx, AV_LOG_INFO, "Using \"%s\" for input %d\n", buf, i + 1);
+ }
+ s->in[i].nb_ch = FF_LAYOUT2COUNT(inlayout[i]);
+ if (s->in[i].nb_ch) {
+ overlap++;
+ } else {
+ s->in[i].nb_ch = av_get_channel_layout_nb_channels(inlayout[i]);
+ if (outlayout & inlayout[i])
+ overlap++;
+ outlayout |= inlayout[i];
+ }
+ nb_ch += s->in[i].nb_ch;
+ }
+ if (nb_ch > SWR_CH_MAX) {
+ av_log(ctx, AV_LOG_ERROR, "Too many channels (max %d)\n", SWR_CH_MAX);
+ return AVERROR(EINVAL);
+ }
+ if (overlap) {
+ av_log(ctx, AV_LOG_WARNING,
+ "Input channel layouts overlap: "
+ "output layout will be determined by the number of distinct input channels\n");
+ for (i = 0; i < nb_ch; i++)
+ s->route[i] = i;
+ outlayout = av_get_default_channel_layout(nb_ch);
+ if (!outlayout && nb_ch)
+ outlayout = 0xFFFFFFFFFFFFFFFFULL >> (64 - nb_ch);
+ } else {
+ int *route[SWR_CH_MAX];
+ int c, out_ch_number = 0;
+
+ route[0] = s->route;
+ for (i = 1; i < s->nb_inputs; i++)
+ route[i] = route[i - 1] + s->in[i - 1].nb_ch;
+ for (c = 0; c < 64; c++)
+ for (i = 0; i < s->nb_inputs; i++)
+ if ((inlayout[i] >> c) & 1)
+ *(route[i]++) = out_ch_number++;
+ }
+ formats = ff_make_format_list(ff_packed_sample_fmts_array);
+ if ((ret = ff_set_common_formats(ctx, formats)) < 0)
+ return ret;
+ for (i = 0; i < s->nb_inputs; i++) {
+ layouts = NULL;
+ if ((ret = ff_add_channel_layout(&layouts, inlayout[i])) < 0)
+ return ret;
+ if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
+ return ret;
+ }
+ layouts = NULL;
+ if ((ret = ff_add_channel_layout(&layouts, outlayout)) < 0)
+ return ret;
+ if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
+ return ret;
+
+ return ff_set_common_samplerates(ctx, ff_all_samplerates());
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AMergeContext *s = ctx->priv;
+ AVBPrint bp;
+ int i;
+
+ for (i = 1; i < s->nb_inputs; i++) {
+ if (ctx->inputs[i]->sample_rate != ctx->inputs[0]->sample_rate) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Inputs must have the same sample rate "
+ "%d for in%d vs %d\n",
+ ctx->inputs[i]->sample_rate, i, ctx->inputs[0]->sample_rate);
+ return AVERROR(EINVAL);
+ }
+ }
+ s->bps = av_get_bytes_per_sample(ctx->outputs[0]->format);
+ outlink->sample_rate = ctx->inputs[0]->sample_rate;
+ outlink->time_base = ctx->inputs[0]->time_base;
+
+ av_bprint_init(&bp, 0, 1);
+ for (i = 0; i < s->nb_inputs; i++) {
+ av_bprintf(&bp, "%sin%d:", i ? " + " : "", i);
+ av_bprint_channel_layout(&bp, -1, ctx->inputs[i]->channel_layout);
+ }
+ av_bprintf(&bp, " -> out:");
+ av_bprint_channel_layout(&bp, -1, ctx->outputs[0]->channel_layout);
+ av_log(ctx, AV_LOG_VERBOSE, "%s\n", bp.str);
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AMergeContext *s = ctx->priv;
+ int i, ret;
+
+ for (i = 0; i < s->nb_inputs; i++)
+ if (!s->in[i].nb_samples ||
+ /* detect EOF immediately */
+ (ctx->inputs[i]->status_in && !ctx->inputs[i]->status_out))
+ if ((ret = ff_request_frame(ctx->inputs[i])) < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * Copy samples from several input streams to one output stream.
+ * @param nb_inputs number of inputs
+ * @param in inputs; used only for the nb_ch field;
+ * @param route routing values;
+ * input channel i goes to output channel route[i];
+ * i < in[0].nb_ch are the channels from the first output;
+ * i >= in[0].nb_ch are the channels from the second output
+ * @param ins pointer to the samples of each inputs, in packed format;
+ * will be left at the end of the copied samples
+ * @param outs pointer to the samples of the output, in packet format;
+ * must point to a buffer big enough;
+ * will be left at the end of the copied samples
+ * @param ns number of samples to copy
+ * @param bps bytes per sample
+ */
+static inline void copy_samples(int nb_inputs, struct amerge_input in[],
+ int *route, uint8_t *ins[],
+ uint8_t **outs, int ns, int bps)
+{
+ int *route_cur;
+ int i, c, nb_ch = 0;
+
+ for (i = 0; i < nb_inputs; i++)
+ nb_ch += in[i].nb_ch;
+ while (ns--) {
+ route_cur = route;
+ for (i = 0; i < nb_inputs; i++) {
+ for (c = 0; c < in[i].nb_ch; c++) {
+ memcpy((*outs) + bps * *(route_cur++), ins[i], bps);
+ ins[i] += bps;
+ }
+ }
+ *outs += nb_ch * bps;
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AMergeContext *s = ctx->priv;
+ AVFilterLink *const outlink = ctx->outputs[0];
+ int input_number;
+ int nb_samples, ns, i;
+ AVFrame *outbuf, *inbuf[SWR_CH_MAX];
+ uint8_t *ins[SWR_CH_MAX], *outs;
+
+ for (input_number = 0; input_number < s->nb_inputs; input_number++)
+ if (inlink == ctx->inputs[input_number])
+ break;
+ av_assert1(input_number < s->nb_inputs);
+ if (ff_bufqueue_is_full(&s->in[input_number].queue)) {
+ av_frame_free(&insamples);
+ return AVERROR(ENOMEM);
+ }
+ ff_bufqueue_add(ctx, &s->in[input_number].queue, av_frame_clone(insamples));
+ s->in[input_number].nb_samples += insamples->nb_samples;
+ av_frame_free(&insamples);
+ nb_samples = s->in[0].nb_samples;
+ for (i = 1; i < s->nb_inputs; i++)
+ nb_samples = FFMIN(nb_samples, s->in[i].nb_samples);
+ if (!nb_samples)
+ return 0;
+
+ outbuf = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
+ if (!outbuf)
+ return AVERROR(ENOMEM);
+ outs = outbuf->data[0];
+ for (i = 0; i < s->nb_inputs; i++) {
+ inbuf[i] = ff_bufqueue_peek(&s->in[i].queue, 0);
+ ins[i] = inbuf[i]->data[0] +
+ s->in[i].pos * s->in[i].nb_ch * s->bps;
+ }
+ av_frame_copy_props(outbuf, inbuf[0]);
+ outbuf->pts = inbuf[0]->pts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE :
+ inbuf[0]->pts +
+ av_rescale_q(s->in[0].pos,
+ av_make_q(1, ctx->inputs[0]->sample_rate),
+ ctx->outputs[0]->time_base);
+
+ outbuf->nb_samples = nb_samples;
+ outbuf->channel_layout = outlink->channel_layout;
+ av_frame_set_channels(outbuf, outlink->channels);
+
+ while (nb_samples) {
+ ns = nb_samples;
+ for (i = 0; i < s->nb_inputs; i++)
+ ns = FFMIN(ns, inbuf[i]->nb_samples - s->in[i].pos);
+ /* Unroll the most common sample formats: speed +~350% for the loop,
+ +~13% overall (including two common decoders) */
+ switch (s->bps) {
+ case 1:
+ copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, ns, 1);
+ break;
+ case 2:
+ copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, ns, 2);
+ break;
+ case 4:
+ copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, ns, 4);
+ break;
+ default:
+ copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, ns, s->bps);
+ break;
+ }
+
+ nb_samples -= ns;
+ for (i = 0; i < s->nb_inputs; i++) {
+ s->in[i].nb_samples -= ns;
+ s->in[i].pos += ns;
+ if (s->in[i].pos == inbuf[i]->nb_samples) {
+ s->in[i].pos = 0;
+ av_frame_free(&inbuf[i]);
+ ff_bufqueue_get(&s->in[i].queue);
+ inbuf[i] = ff_bufqueue_peek(&s->in[i].queue, 0);
+ ins[i] = inbuf[i] ? inbuf[i]->data[0] : NULL;
+ }
+ }
+ }
+ return ff_filter_frame(ctx->outputs[0], outbuf);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ AMergeContext *s = ctx->priv;
+ int i;
+
+ s->in = av_calloc(s->nb_inputs, sizeof(*s->in));
+ if (!s->in)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < s->nb_inputs; i++) {
+ char *name = av_asprintf("in%d", i);
+ AVFilterPad pad = {
+ .name = name,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ };
+ if (!name)
+ return AVERROR(ENOMEM);
+ ff_insert_inpad(ctx, i, &pad);
+ }
+ return 0;
+}
+
+static const AVFilterPad amerge_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_amerge = {
+ .name = "amerge",
+ .description = NULL_IF_CONFIG_SMALL("Merge two or more audio streams into "
+ "a single multi-channel stream."),
+ .priv_size = sizeof(AMergeContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = NULL,
+ .outputs = amerge_outputs,
+ .priv_class = &amerge_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
diff --git a/libavfilter/af_amix.c b/libavfilter/af_amix.c
index bfba1504ea..e18743e0b8 100644
--- a/libavfilter/af_amix.c
+++ b/libavfilter/af_amix.c
@@ -2,20 +2,20 @@
* Audio Mix Filter
* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -44,9 +44,8 @@
#include "formats.h"
#include "internal.h"
-#define INPUT_OFF 0 /**< input has reached EOF */
#define INPUT_ON 1 /**< input is active */
-#define INPUT_INACTIVE 2 /**< input is on, but is currently inactive */
+#define INPUT_EOF 2 /**< input has reached EOF (may still be active) */
#define DURATION_LONGEST 0
#define DURATION_SHORTEST 1
@@ -110,7 +109,7 @@ static void frame_list_remove_samples(FrameList *frame_list, int nb_samples)
int samples = nb_samples;
while (samples > 0) {
FrameInfo *info = frame_list->list;
- av_assert0(info != NULL);
+ av_assert0(info);
if (info->nb_samples <= samples) {
samples -= info->nb_samples;
frame_list->list = info->next;
@@ -142,7 +141,7 @@ static int frame_list_add_frame(FrameList *frame_list, int nb_samples, int64_t p
frame_list->list = info;
frame_list->end = info;
} else {
- av_assert0(frame_list->end != NULL);
+ av_assert0(frame_list->end);
frame_list->end->next = info;
frame_list->end = info;
}
@@ -155,7 +154,7 @@ static int frame_list_add_frame(FrameList *frame_list, int nb_samples, int64_t p
typedef struct MixContext {
const AVClass *class; /**< class for AVOptions */
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
int nb_inputs; /**< number of inputs */
int active_inputs; /**< number of input currently active */
@@ -175,27 +174,22 @@ typedef struct MixContext {
#define OFFSET(x) offsetof(MixContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
-static const AVOption options[] = {
+#define F AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption amix_options[] = {
{ "inputs", "Number of inputs.",
- OFFSET(nb_inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, 32, A },
+ OFFSET(nb_inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, 32, A|F },
{ "duration", "How to determine the end-of-stream.",
- OFFSET(duration_mode), AV_OPT_TYPE_INT, { .i64 = DURATION_LONGEST }, 0, 2, A, "duration" },
- { "longest", "Duration of longest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_LONGEST }, INT_MIN, INT_MAX, A, "duration" },
- { "shortest", "Duration of shortest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_SHORTEST }, INT_MIN, INT_MAX, A, "duration" },
- { "first", "Duration of first input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_FIRST }, INT_MIN, INT_MAX, A, "duration" },
+ OFFSET(duration_mode), AV_OPT_TYPE_INT, { .i64 = DURATION_LONGEST }, 0, 2, A|F, "duration" },
+ { "longest", "Duration of longest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_LONGEST }, INT_MIN, INT_MAX, A|F, "duration" },
+ { "shortest", "Duration of shortest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_SHORTEST }, INT_MIN, INT_MAX, A|F, "duration" },
+ { "first", "Duration of first input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_FIRST }, INT_MIN, INT_MAX, A|F, "duration" },
{ "dropout_transition", "Transition time, in seconds, for volume "
"renormalization when an input stream ends.",
- OFFSET(dropout_transition), AV_OPT_TYPE_FLOAT, { .dbl = 2.0 }, 0, INT_MAX, A },
- { NULL },
-};
-
-static const AVClass amix_class = {
- .class_name = "amix filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
+ OFFSET(dropout_transition), AV_OPT_TYPE_FLOAT, { .dbl = 2.0 }, 0, INT_MAX, A|F },
+ { NULL }
};
+AVFILTER_DEFINE_CLASS(amix);
/**
* Update the scaling factors to apply to each input during mixing.
@@ -214,7 +208,7 @@ static void calculate_scales(MixContext *s, int nb_samples)
}
for (i = 0; i < s->nb_inputs; i++) {
- if (s->input_state[i] == INPUT_ON)
+ if (s->input_state[i] & INPUT_ON)
s->input_scale[i] = 1.0f / s->scale_norm;
else
s->input_scale[i] = 0.0f;
@@ -237,11 +231,11 @@ static int config_output(AVFilterLink *outlink)
if (!s->frame_list)
return AVERROR(ENOMEM);
- s->fifos = av_mallocz(s->nb_inputs * sizeof(*s->fifos));
+ s->fifos = av_mallocz_array(s->nb_inputs, sizeof(*s->fifos));
if (!s->fifos)
return AVERROR(ENOMEM);
- s->nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);
+ s->nb_channels = outlink->channels;
for (i = 0; i < s->nb_inputs; i++) {
s->fifos[i] = av_audio_fifo_alloc(outlink->format, s->nb_channels, 1024);
if (!s->fifos[i])
@@ -254,7 +248,7 @@ static int config_output(AVFilterLink *outlink)
memset(s->input_state, INPUT_ON, s->nb_inputs);
s->active_inputs = s->nb_inputs;
- s->input_scale = av_mallocz(s->nb_inputs * sizeof(*s->input_scale));
+ s->input_scale = av_mallocz_array(s->nb_inputs, sizeof(*s->input_scale));
if (!s->input_scale)
return AVERROR(ENOMEM);
s->scale_norm = s->active_inputs;
@@ -269,18 +263,58 @@ static int config_output(AVFilterLink *outlink)
return 0;
}
+static int calc_active_inputs(MixContext *s);
+
/**
* Read samples from the input FIFOs, mix, and write to the output link.
*/
-static int output_frame(AVFilterLink *outlink, int nb_samples)
+static int output_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
MixContext *s = ctx->priv;
AVFrame *out_buf, *in_buf;
- int i;
+ int nb_samples, ns, ret, i;
+
+ ret = calc_active_inputs(s);
+ if (ret < 0)
+ return ret;
+
+ if (s->input_state[0] & INPUT_ON) {
+ /* first input live: use the corresponding frame size */
+ nb_samples = frame_list_next_frame_size(s->frame_list);
+ for (i = 1; i < s->nb_inputs; i++) {
+ if (s->input_state[i] & INPUT_ON) {
+ ns = av_audio_fifo_size(s->fifos[i]);
+ if (ns < nb_samples) {
+ if (!(s->input_state[i] & INPUT_EOF))
+ /* unclosed input with not enough samples */
+ return 0;
+ /* closed input to drain */
+ nb_samples = ns;
+ }
+ }
+ }
+ } else {
+ /* first input closed: use the available samples */
+ nb_samples = INT_MAX;
+ for (i = 1; i < s->nb_inputs; i++) {
+ if (s->input_state[i] & INPUT_ON) {
+ ns = av_audio_fifo_size(s->fifos[i]);
+ nb_samples = FFMIN(nb_samples, ns);
+ }
+ }
+ if (nb_samples == INT_MAX)
+ return AVERROR_EOF;
+ }
+
+ s->next_pts = frame_list_next_pts(s->frame_list);
+ frame_list_remove_samples(s->frame_list, nb_samples);
calculate_scales(s, nb_samples);
+ if (nb_samples == 0)
+ return 0;
+
out_buf = ff_get_audio_buffer(outlink, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
@@ -292,7 +326,7 @@ static int output_frame(AVFilterLink *outlink, int nb_samples)
}
for (i = 0; i < s->nb_inputs; i++) {
- if (s->input_state[i] == INPUT_ON) {
+ if (s->input_state[i] & INPUT_ON) {
int planes, plane_size, p;
av_audio_fifo_read(s->fifos[i], (void **)in_buf->extended_data,
@@ -303,7 +337,7 @@ static int output_frame(AVFilterLink *outlink, int nb_samples)
plane_size = FFALIGN(plane_size, 16);
for (p = 0; p < planes; p++) {
- s->fdsp.vector_fmac_scalar((float *)out_buf->extended_data[p],
+ s->fdsp->vector_fmac_scalar((float *)out_buf->extended_data[p],
(float *) in_buf->extended_data[p],
s->input_scale[i], plane_size);
}
@@ -319,29 +353,6 @@ static int output_frame(AVFilterLink *outlink, int nb_samples)
}
/**
- * Returns the smallest number of samples available in the input FIFOs other
- * than that of the first input.
- */
-static int get_available_samples(MixContext *s)
-{
- int i;
- int available_samples = INT_MAX;
-
- av_assert0(s->nb_inputs > 1);
-
- for (i = 1; i < s->nb_inputs; i++) {
- int nb_samples;
- if (s->input_state[i] == INPUT_OFF)
- continue;
- nb_samples = av_audio_fifo_size(s->fifos[i]);
- available_samples = FFMIN(available_samples, nb_samples);
- }
- if (available_samples == INT_MAX)
- return 0;
- return available_samples;
-}
-
-/**
* Requests a frame, if needed, from each input link other than the first.
*/
static int request_samples(AVFilterContext *ctx, int min_samples)
@@ -353,19 +364,21 @@ static int request_samples(AVFilterContext *ctx, int min_samples)
for (i = 1; i < s->nb_inputs; i++) {
ret = 0;
- if (s->input_state[i] == INPUT_OFF)
+ if (!(s->input_state[i] & INPUT_ON))
continue;
- while (!ret && av_audio_fifo_size(s->fifos[i]) < min_samples)
- ret = ff_request_frame(ctx->inputs[i]);
+ if (av_audio_fifo_size(s->fifos[i]) >= min_samples)
+ continue;
+ ret = ff_request_frame(ctx->inputs[i]);
if (ret == AVERROR_EOF) {
+ s->input_state[i] |= INPUT_EOF;
if (av_audio_fifo_size(s->fifos[i]) == 0) {
- s->input_state[i] = INPUT_OFF;
+ s->input_state[i] = 0;
continue;
}
} else if (ret < 0)
return ret;
}
- return 0;
+ return output_frame(ctx->outputs[0]);
}
/**
@@ -379,11 +392,11 @@ static int calc_active_inputs(MixContext *s)
int i;
int active_inputs = 0;
for (i = 0; i < s->nb_inputs; i++)
- active_inputs += !!(s->input_state[i] != INPUT_OFF);
+ active_inputs += !!(s->input_state[i] & INPUT_ON);
s->active_inputs = active_inputs;
if (!active_inputs ||
- (s->duration_mode == DURATION_FIRST && s->input_state[0] == INPUT_OFF) ||
+ (s->duration_mode == DURATION_FIRST && !(s->input_state[0] & INPUT_ON)) ||
(s->duration_mode == DURATION_SHORTEST && active_inputs != s->nb_inputs))
return AVERROR_EOF;
return 0;
@@ -394,66 +407,30 @@ static int request_frame(AVFilterLink *outlink)
AVFilterContext *ctx = outlink->src;
MixContext *s = ctx->priv;
int ret;
- int wanted_samples, available_samples;
+ int wanted_samples;
ret = calc_active_inputs(s);
if (ret < 0)
return ret;
- if (s->input_state[0] == INPUT_OFF) {
- ret = request_samples(ctx, 1);
- if (ret < 0)
- return ret;
-
- ret = calc_active_inputs(s);
- if (ret < 0)
- return ret;
-
- available_samples = get_available_samples(s);
- if (!available_samples)
- return AVERROR(EAGAIN);
-
- return output_frame(outlink, available_samples);
- }
+ if (!(s->input_state[0] & INPUT_ON))
+ return request_samples(ctx, 1);
if (s->frame_list->nb_frames == 0) {
ret = ff_request_frame(ctx->inputs[0]);
if (ret == AVERROR_EOF) {
- s->input_state[0] = INPUT_OFF;
+ s->input_state[0] = 0;
if (s->nb_inputs == 1)
return AVERROR_EOF;
- else
- return AVERROR(EAGAIN);
- } else if (ret < 0)
- return ret;
+ return output_frame(ctx->outputs[0]);
+ }
+ return ret;
}
av_assert0(s->frame_list->nb_frames > 0);
wanted_samples = frame_list_next_frame_size(s->frame_list);
- if (s->active_inputs > 1) {
- ret = request_samples(ctx, wanted_samples);
- if (ret < 0)
- return ret;
-
- ret = calc_active_inputs(s);
- if (ret < 0)
- return ret;
- }
-
- if (s->active_inputs > 1) {
- available_samples = get_available_samples(s);
- if (!available_samples)
- return AVERROR(EAGAIN);
- available_samples = FFMIN(available_samples, wanted_samples);
- } else {
- available_samples = wanted_samples;
- }
-
- s->next_pts = frame_list_next_pts(s->frame_list);
- frame_list_remove_samples(s->frame_list, available_samples);
-
- return output_frame(outlink, available_samples);
+ return request_samples(ctx, wanted_samples);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
@@ -483,6 +460,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data,
buf->nb_samples);
+ av_frame_free(&buf);
+ return output_frame(outlink);
+
fail:
av_frame_free(&buf);
@@ -501,12 +481,16 @@ static av_cold int init(AVFilterContext *ctx)
snprintf(name, sizeof(name), "input%d", i);
pad.type = AVMEDIA_TYPE_AUDIO;
pad.name = av_strdup(name);
+ if (!pad.name)
+ return AVERROR(ENOMEM);
pad.filter_frame = filter_frame;
ff_insert_inpad(ctx, i, &pad);
}
- avpriv_float_dsp_init(&s->fdsp, 0);
+ s->fdsp = avpriv_float_dsp_alloc(0);
+ if (!s->fdsp)
+ return AVERROR(ENOMEM);
return 0;
}
@@ -525,6 +509,7 @@ static av_cold void uninit(AVFilterContext *ctx)
av_freep(&s->frame_list);
av_freep(&s->input_state);
av_freep(&s->input_scale);
+ av_freep(&s->fdsp);
for (i = 0; i < ctx->nb_inputs; i++)
av_freep(&ctx->input_pads[i].name);
@@ -533,12 +518,27 @@ static av_cold void uninit(AVFilterContext *ctx)
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
- ff_add_format(&formats, AV_SAMPLE_FMT_FLT);
- ff_add_format(&formats, AV_SAMPLE_FMT_FLTP);
- ff_set_common_formats(ctx, formats);
- ff_set_common_channel_layouts(ctx, ff_all_channel_layouts());
- ff_set_common_samplerates(ctx, ff_all_samplerates());
+ AVFilterChannelLayouts *layouts;
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ if ((ret = ff_add_format(&formats, AV_SAMPLE_FMT_FLT )) < 0 ||
+ (ret = ff_add_format(&formats, AV_SAMPLE_FMT_FLTP)) < 0 ||
+ (ret = ff_set_common_formats (ctx, formats)) < 0 ||
+ (ret = ff_set_common_channel_layouts(ctx, layouts)) < 0 ||
+ (ret = ff_set_common_samplerates(ctx, ff_all_samplerates())) < 0)
+ goto fail;
return 0;
+fail:
+ if (layouts)
+ av_freep(&layouts->channel_layouts);
+ av_freep(&layouts);
+ return ret;
}
static const AVFilterPad avfilter_af_amix_outputs[] = {
@@ -552,17 +552,14 @@ static const AVFilterPad avfilter_af_amix_outputs[] = {
};
AVFilter ff_af_amix = {
- .name = "amix",
- .description = NULL_IF_CONFIG_SMALL("Audio mixing."),
- .priv_size = sizeof(MixContext),
- .priv_class = &amix_class,
-
+ .name = "amix",
+ .description = NULL_IF_CONFIG_SMALL("Audio mixing."),
+ .priv_size = sizeof(MixContext),
+ .priv_class = &amix_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
-
- .inputs = NULL,
- .outputs = avfilter_af_amix_outputs,
-
- .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+ .inputs = NULL,
+ .outputs = avfilter_af_amix_outputs,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
diff --git a/libavfilter/af_anequalizer.c b/libavfilter/af_anequalizer.c
new file mode 100644
index 0000000000..24034602fd
--- /dev/null
+++ b/libavfilter/af_anequalizer.c
@@ -0,0 +1,762 @@
+/*
+ * Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/intreadwrite.h"
+#include "libavutil/avstring.h"
+#include "libavutil/ffmath.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "audio.h"
+
+#define FILTER_ORDER 4
+
+enum FilterType {
+ BUTTERWORTH,
+ CHEBYSHEV1,
+ CHEBYSHEV2,
+ NB_TYPES
+};
+
+typedef struct FoSection {
+ double a0, a1, a2, a3, a4;
+ double b0, b1, b2, b3, b4;
+
+ double num[4];
+ double denum[4];
+} FoSection;
+
+typedef struct EqualizatorFilter {
+ int ignore;
+ int channel;
+ int type;
+
+ double freq;
+ double gain;
+ double width;
+
+ FoSection section[2];
+} EqualizatorFilter;
+
+typedef struct AudioNEqualizerContext {
+ const AVClass *class;
+ char *args;
+ char *colors;
+ int draw_curves;
+ int w, h;
+
+ double mag;
+ int fscale;
+ int nb_filters;
+ int nb_allocated;
+ EqualizatorFilter *filters;
+ AVFrame *video;
+} AudioNEqualizerContext;
+
+#define OFFSET(x) offsetof(AudioNEqualizerContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM
+#define V AV_OPT_FLAG_VIDEO_PARAM
+#define F AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption anequalizer_options[] = {
+ { "params", NULL, OFFSET(args), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, A|F },
+ { "curves", "draw frequency response curves", OFFSET(draw_curves), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, V|F },
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, V|F },
+ { "mgain", "set max gain", OFFSET(mag), AV_OPT_TYPE_DOUBLE, {.dbl=60}, -900, 900, V|F },
+ { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, V|F, "fscale" },
+ { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, V|F, "fscale" },
+ { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, V|F, "fscale" },
+ { "colors", "set channels curves colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, V|F },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(anequalizer);
+
+static void draw_curves(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *out)
+{
+ AudioNEqualizerContext *s = ctx->priv;
+ char *colors, *color, *saveptr = NULL;
+ int ch, i, n;
+
+ colors = av_strdup(s->colors);
+ if (!colors)
+ return;
+
+ memset(out->data[0], 0, s->h * out->linesize[0]);
+
+ for (ch = 0; ch < inlink->channels; ch++) {
+ uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
+ int prev_v = -1;
+ double f;
+
+ color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
+ if (color)
+ av_parse_color(fg, color, -1, ctx);
+
+ for (f = 0; f < s->w; f++) {
+ double zr, zi, zr2, zi2;
+ double Hr, Hi;
+ double Hmag = 1;
+ double w;
+ int v, y, x;
+
+ w = M_PI * (s->fscale ? pow(s->w - 1, f / s->w) : f) / (s->w - 1);
+ zr = cos(w);
+ zr2 = zr * zr;
+ zi = -sin(w);
+ zi2 = zi * zi;
+
+ for (n = 0; n < s->nb_filters; n++) {
+ if (s->filters[n].channel != ch ||
+ s->filters[n].ignore)
+ continue;
+
+ for (i = 0; i < FILTER_ORDER / 2; i++) {
+ FoSection *S = &s->filters[n].section[i];
+
+ /* H *= (((((S->b4 * z + S->b3) * z + S->b2) * z + S->b1) * z + S->b0) /
+ ((((S->a4 * z + S->a3) * z + S->a2) * z + S->a1) * z + S->a0)); */
+
+ Hr = S->b4*(1-8*zr2*zi2) + S->b2*(zr2-zi2) + zr*(S->b1+S->b3*(zr2-3*zi2))+ S->b0;
+ Hi = zi*(S->b3*(3*zr2-zi2) + S->b1 + 2*zr*(2*S->b4*(zr2-zi2) + S->b2));
+ Hmag *= hypot(Hr, Hi);
+ Hr = S->a4*(1-8*zr2*zi2) + S->a2*(zr2-zi2) + zr*(S->a1+S->a3*(zr2-3*zi2))+ S->a0;
+ Hi = zi*(S->a3*(3*zr2-zi2) + S->a1 + 2*zr*(2*S->a4*(zr2-zi2) + S->a2));
+ Hmag /= hypot(Hr, Hi);
+ }
+ }
+
+ v = av_clip((1. + -20 * log10(Hmag) / s->mag) * s->h / 2, 0, s->h - 1);
+ x = lrint(f);
+ if (prev_v == -1)
+ prev_v = v;
+ if (v <= prev_v) {
+ for (y = v; y <= prev_v; y++)
+ AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
+ } else {
+ for (y = prev_v; y <= v; y++)
+ AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
+ }
+
+ prev_v = v;
+ }
+ }
+
+ av_free(colors);
+}
+
+static int config_video(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AudioNEqualizerContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFrame *out;
+
+ outlink->w = s->w;
+ outlink->h = s->h;
+
+ av_frame_free(&s->video);
+ s->video = out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+
+ draw_curves(ctx, inlink, out);
+
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ AudioNEqualizerContext *s = ctx->priv;
+ AVFilterPad pad, vpad;
+
+ pad = (AVFilterPad){
+ .name = av_strdup("out0"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ };
+
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+
+ if (s->draw_curves) {
+ vpad = (AVFilterPad){
+ .name = av_strdup("out1"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_video,
+ };
+ if (!vpad.name)
+ return AVERROR(ENOMEM);
+ }
+
+ ff_insert_outpad(ctx, 0, &pad);
+
+ if (s->draw_curves)
+ ff_insert_outpad(ctx, 1, &vpad);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ AudioNEqualizerContext *s = ctx->priv;
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ if (s->draw_curves) {
+ AVFilterLink *videolink = ctx->outputs[1];
+ formats = ff_make_format_list(pix_fmts);
+ if ((ret = ff_formats_ref(formats, &videolink->in_formats)) < 0)
+ return ret;
+ }
+
+ formats = ff_make_format_list(sample_fmts);
+ if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0 ||
+ (ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
+ return ret;
+
+ layouts = ff_all_channel_counts();
+ if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0 ||
+ (ret = ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts)) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0 ||
+ (ret = ff_formats_ref(formats, &outlink->in_samplerates)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioNEqualizerContext *s = ctx->priv;
+
+ av_freep(&ctx->output_pads[0].name);
+ if (s->draw_curves)
+ av_freep(&ctx->output_pads[1].name);
+ av_frame_free(&s->video);
+ av_freep(&s->filters);
+ s->nb_filters = 0;
+ s->nb_allocated = 0;
+}
+
+static void butterworth_fo_section(FoSection *S, double beta,
+ double si, double g, double g0,
+ double D, double c0)
+{
+ if (c0 == 1 || c0 == -1) {
+ S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
+ S->b1 = 2*c0*(g*g*beta*beta - g0*g0)/D;
+ S->b2 = (g*g*beta*beta - 2*g0*g*beta*si + g0*g0)/D;
+ S->b3 = 0;
+ S->b4 = 0;
+
+ S->a0 = 1;
+ S->a1 = 2*c0*(beta*beta - 1)/D;
+ S->a2 = (beta*beta - 2*beta*si + 1)/D;
+ S->a3 = 0;
+ S->a4 = 0;
+ } else {
+ S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
+ S->b1 = -4*c0*(g0*g0 + g*g0*si*beta)/D;
+ S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - g*g*beta*beta)/D;
+ S->b3 = -4*c0*(g0*g0 - g*g0*si*beta)/D;
+ S->b4 = (g*g*beta*beta - 2*g*g0*si*beta + g0*g0)/D;
+
+ S->a0 = 1;
+ S->a1 = -4*c0*(1 + si*beta)/D;
+ S->a2 = 2*(1 + 2*c0*c0 - beta*beta)/D;
+ S->a3 = -4*c0*(1 - si*beta)/D;
+ S->a4 = (beta*beta - 2*si*beta + 1)/D;
+ }
+}
+
+static void butterworth_bp_filter(EqualizatorFilter *f,
+ int N, double w0, double wb,
+ double G, double Gb, double G0)
+{
+ double g, c0, g0, beta;
+ double epsilon;
+ int r = N % 2;
+ int L = (N - r) / 2;
+ int i;
+
+ if (G == 0 && G0 == 0) {
+ f->section[0].a0 = 1;
+ f->section[0].b0 = 1;
+ f->section[1].a0 = 1;
+ f->section[1].b0 = 1;
+ return;
+ }
+
+ G = ff_exp10(G/20);
+ Gb = ff_exp10(Gb/20);
+ G0 = ff_exp10(G0/20);
+
+ epsilon = sqrt((G * G - Gb * Gb) / (Gb * Gb - G0 * G0));
+ g = pow(G, 1.0 / N);
+ g0 = pow(G0, 1.0 / N);
+ beta = pow(epsilon, -1.0 / N) * tan(wb/2);
+ c0 = cos(w0);
+
+ for (i = 1; i <= L; i++) {
+ double ui = (2.0 * i - 1) / N;
+ double si = sin(M_PI * ui / 2.0);
+ double Di = beta * beta + 2 * si * beta + 1;
+
+ butterworth_fo_section(&f->section[i - 1], beta, si, g, g0, Di, c0);
+ }
+}
+
+static void chebyshev1_fo_section(FoSection *S, double a,
+ double c, double tetta_b,
+ double g0, double si, double b,
+ double D, double c0)
+{
+ if (c0 == 1 || c0 == -1) {
+ S->b0 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) + 2*g0*b*si*tetta_b*tetta_b + g0*g0)/D;
+ S->b1 = 2*c0*(tetta_b*tetta_b*(b*b+g0*g0*c*c) - g0*g0)/D;
+ S->b2 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) - 2*g0*b*si*tetta_b + g0*g0)/D;
+ S->b3 = 0;
+ S->b4 = 0;
+
+ S->a0 = 1;
+ S->a1 = 2*c0*(tetta_b*tetta_b*(a*a+c*c) - 1)/D;
+ S->a2 = (tetta_b*tetta_b*(a*a+c*c) - 2*a*si*tetta_b + 1)/D;
+ S->a3 = 0;
+ S->a4 = 0;
+ } else {
+ S->b0 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b + 2*g0*b*si*tetta_b + g0*g0)/D;
+ S->b1 = -4*c0*(g0*g0 + g0*b*si*tetta_b)/D;
+ S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - (b*b + g0*g0*c*c)*tetta_b*tetta_b)/D;
+ S->b3 = -4*c0*(g0*g0 - g0*b*si*tetta_b)/D;
+ S->b4 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b - 2*g0*b*si*tetta_b + g0*g0)/D;
+
+ S->a0 = 1;
+ S->a1 = -4*c0*(1 + a*si*tetta_b)/D;
+ S->a2 = 2*(1 + 2*c0*c0 - (a*a + c*c)*tetta_b*tetta_b)/D;
+ S->a3 = -4*c0*(1 - a*si*tetta_b)/D;
+ S->a4 = ((a*a + c*c)*tetta_b*tetta_b - 2*a*si*tetta_b + 1)/D;
+ }
+}
+
+static void chebyshev1_bp_filter(EqualizatorFilter *f,
+ int N, double w0, double wb,
+ double G, double Gb, double G0)
+{
+ double a, b, c0, g0, alfa, beta, tetta_b;
+ double epsilon;
+ int r = N % 2;
+ int L = (N - r) / 2;
+ int i;
+
+ if (G == 0 && G0 == 0) {
+ f->section[0].a0 = 1;
+ f->section[0].b0 = 1;
+ f->section[1].a0 = 1;
+ f->section[1].b0 = 1;
+ return;
+ }
+
+ G = ff_exp10(G/20);
+ Gb = ff_exp10(Gb/20);
+ G0 = ff_exp10(G0/20);
+
+ epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
+ g0 = pow(G0,1.0/N);
+ alfa = pow(1.0/epsilon + sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
+ beta = pow(G/epsilon + Gb * sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
+ a = 0.5 * (alfa - 1.0/alfa);
+ b = 0.5 * (beta - g0*g0*(1/beta));
+ tetta_b = tan(wb/2);
+ c0 = cos(w0);
+
+ for (i = 1; i <= L; i++) {
+ double ui = (2.0*i-1.0)/N;
+ double ci = cos(M_PI*ui/2.0);
+ double si = sin(M_PI*ui/2.0);
+ double Di = (a*a + ci*ci)*tetta_b*tetta_b + 2.0*a*si*tetta_b + 1;
+
+ chebyshev1_fo_section(&f->section[i - 1], a, ci, tetta_b, g0, si, b, Di, c0);
+ }
+}
+
+static void chebyshev2_fo_section(FoSection *S, double a,
+ double c, double tetta_b,
+ double g, double si, double b,
+ double D, double c0)
+{
+ if (c0 == 1 || c0 == -1) {
+ S->b0 = (g*g*tetta_b*tetta_b + 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
+ S->b1 = 2*c0*(g*g*tetta_b*tetta_b - b*b - g*g*c*c)/D;
+ S->b2 = (g*g*tetta_b*tetta_b - 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
+ S->b3 = 0;
+ S->b4 = 0;
+
+ S->a0 = 1;
+ S->a1 = 2*c0*(tetta_b*tetta_b - a*a - c*c)/D;
+ S->a2 = (tetta_b*tetta_b - 2*tetta_b*a*si + a*a + c*c)/D;
+ S->a3 = 0;
+ S->a4 = 0;
+ } else {
+ S->b0 = (g*g*tetta_b*tetta_b + 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
+ S->b1 = -4*c0*(b*b + g*g*c*c + g*b*si*tetta_b)/D;
+ S->b2 = 2*((b*b + g*g*c*c)*(1 + 2*c0*c0) - g*g*tetta_b*tetta_b)/D;
+ S->b3 = -4*c0*(b*b + g*g*c*c - g*b*si*tetta_b)/D;
+ S->b4 = (g*g*tetta_b*tetta_b - 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
+
+ S->a0 = 1;
+ S->a1 = -4*c0*(a*a + c*c + a*si*tetta_b)/D;
+ S->a2 = 2*((a*a + c*c)*(1 + 2*c0*c0) - tetta_b*tetta_b)/D;
+ S->a3 = -4*c0*(a*a + c*c - a*si*tetta_b)/D;
+ S->a4 = (tetta_b*tetta_b - 2*a*si*tetta_b + a*a + c*c)/D;
+ }
+}
+
+static void chebyshev2_bp_filter(EqualizatorFilter *f,
+ int N, double w0, double wb,
+ double G, double Gb, double G0)
+{
+ double a, b, c0, tetta_b;
+ double epsilon, g, eu, ew;
+ int r = N % 2;
+ int L = (N - r) / 2;
+ int i;
+
+ if (G == 0 && G0 == 0) {
+ f->section[0].a0 = 1;
+ f->section[0].b0 = 1;
+ f->section[1].a0 = 1;
+ f->section[1].b0 = 1;
+ return;
+ }
+
+ G = ff_exp10(G/20);
+ Gb = ff_exp10(Gb/20);
+ G0 = ff_exp10(G0/20);
+
+ epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
+ g = pow(G, 1.0 / N);
+ eu = pow(epsilon + sqrt(1 + epsilon*epsilon), 1.0/N);
+ ew = pow(G0*epsilon + Gb*sqrt(1 + epsilon*epsilon), 1.0/N);
+ a = (eu - 1.0/eu)/2.0;
+ b = (ew - g*g/ew)/2.0;
+ tetta_b = tan(wb/2);
+ c0 = cos(w0);
+
+ for (i = 1; i <= L; i++) {
+ double ui = (2.0 * i - 1.0)/N;
+ double ci = cos(M_PI * ui / 2.0);
+ double si = sin(M_PI * ui / 2.0);
+ double Di = tetta_b*tetta_b + 2*a*si*tetta_b + a*a + ci*ci;
+
+ chebyshev2_fo_section(&f->section[i - 1], a, ci, tetta_b, g, si, b, Di, c0);
+ }
+}
+
+static double butterworth_compute_bw_gain_db(double gain)
+{
+ double bw_gain = 0;
+
+ if (gain <= -6)
+ bw_gain = gain + 3;
+ else if(gain > -6 && gain < 6)
+ bw_gain = gain * 0.5;
+ else if(gain >= 6)
+ bw_gain = gain - 3;
+
+ return bw_gain;
+}
+
+static double chebyshev1_compute_bw_gain_db(double gain)
+{
+ double bw_gain = 0;
+
+ if (gain <= -6)
+ bw_gain = gain + 1;
+ else if(gain > -6 && gain < 6)
+ bw_gain = gain * 0.9;
+ else if(gain >= 6)
+ bw_gain = gain - 1;
+
+ return bw_gain;
+}
+
+static double chebyshev2_compute_bw_gain_db(double gain)
+{
+ double bw_gain = 0;
+
+ if (gain <= -6)
+ bw_gain = -3;
+ else if(gain > -6 && gain < 6)
+ bw_gain = gain * 0.3;
+ else if(gain >= 6)
+ bw_gain = 3;
+
+ return bw_gain;
+}
+
+static inline double hz_2_rad(double x, double fs)
+{
+ return 2 * M_PI * x / fs;
+}
+
+static void equalizer(EqualizatorFilter *f, double sample_rate)
+{
+ double w0 = hz_2_rad(f->freq, sample_rate);
+ double wb = hz_2_rad(f->width, sample_rate);
+ double bw_gain;
+
+ switch (f->type) {
+ case BUTTERWORTH:
+ bw_gain = butterworth_compute_bw_gain_db(f->gain);
+ butterworth_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
+ break;
+ case CHEBYSHEV1:
+ bw_gain = chebyshev1_compute_bw_gain_db(f->gain);
+ chebyshev1_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
+ break;
+ case CHEBYSHEV2:
+ bw_gain = chebyshev2_compute_bw_gain_db(f->gain);
+ chebyshev2_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
+ break;
+ }
+
+}
+
+static int add_filter(AudioNEqualizerContext *s, AVFilterLink *inlink)
+{
+ equalizer(&s->filters[s->nb_filters], inlink->sample_rate);
+ if (s->nb_filters >= s->nb_allocated) {
+ EqualizatorFilter *filters;
+
+ filters = av_calloc(s->nb_allocated, 2 * sizeof(*s->filters));
+ if (!filters)
+ return AVERROR(ENOMEM);
+ memcpy(filters, s->filters, sizeof(*s->filters) * s->nb_allocated);
+ av_free(s->filters);
+ s->filters = filters;
+ s->nb_allocated *= 2;
+ }
+ s->nb_filters++;
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioNEqualizerContext *s = ctx->priv;
+ char *args = av_strdup(s->args);
+ char *saveptr = NULL;
+ int ret = 0;
+
+ if (!args)
+ return AVERROR(ENOMEM);
+
+ s->nb_allocated = 32 * inlink->channels;
+ s->filters = av_calloc(inlink->channels, 32 * sizeof(*s->filters));
+ if (!s->filters) {
+ s->nb_allocated = 0;
+ av_free(args);
+ return AVERROR(ENOMEM);
+ }
+
+ while (1) {
+ char *arg = av_strtok(s->nb_filters == 0 ? args : NULL, "|", &saveptr);
+
+ if (!arg)
+ break;
+
+ s->filters[s->nb_filters].type = 0;
+ if (sscanf(arg, "c%d f=%lf w=%lf g=%lf t=%d", &s->filters[s->nb_filters].channel,
+ &s->filters[s->nb_filters].freq,
+ &s->filters[s->nb_filters].width,
+ &s->filters[s->nb_filters].gain,
+ &s->filters[s->nb_filters].type) != 5 &&
+ sscanf(arg, "c%d f=%lf w=%lf g=%lf", &s->filters[s->nb_filters].channel,
+ &s->filters[s->nb_filters].freq,
+ &s->filters[s->nb_filters].width,
+ &s->filters[s->nb_filters].gain) != 4 ) {
+ av_free(args);
+ return AVERROR(EINVAL);
+ }
+
+ if (s->filters[s->nb_filters].freq < 0 ||
+ s->filters[s->nb_filters].freq > inlink->sample_rate / 2.0)
+ s->filters[s->nb_filters].ignore = 1;
+
+ if (s->filters[s->nb_filters].channel < 0 ||
+ s->filters[s->nb_filters].channel >= inlink->channels)
+ s->filters[s->nb_filters].ignore = 1;
+
+ s->filters[s->nb_filters].type = av_clip(s->filters[s->nb_filters].type, 0, NB_TYPES - 1);
+ ret = add_filter(s, inlink);
+ if (ret < 0)
+ break;
+ }
+
+ av_free(args);
+
+ return ret;
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ AudioNEqualizerContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ int ret = AVERROR(ENOSYS);
+
+ if (!strcmp(cmd, "change")) {
+ double freq, width, gain;
+ int filter;
+
+ if (sscanf(args, "%d|f=%lf|w=%lf|g=%lf", &filter, &freq, &width, &gain) != 4)
+ return AVERROR(EINVAL);
+
+ if (filter < 0 || filter >= s->nb_filters)
+ return AVERROR(EINVAL);
+
+ if (freq < 0 || freq > inlink->sample_rate / 2.0)
+ return AVERROR(EINVAL);
+
+ s->filters[filter].freq = freq;
+ s->filters[filter].width = width;
+ s->filters[filter].gain = gain;
+ equalizer(&s->filters[filter], inlink->sample_rate);
+ if (s->draw_curves)
+ draw_curves(ctx, inlink, s->video);
+
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static inline double section_process(FoSection *S, double in)
+{
+ double out;
+
+ out = S->b0 * in;
+ out+= S->b1 * S->num[0] - S->denum[0] * S->a1;
+ out+= S->b2 * S->num[1] - S->denum[1] * S->a2;
+ out+= S->b3 * S->num[2] - S->denum[2] * S->a3;
+ out+= S->b4 * S->num[3] - S->denum[3] * S->a4;
+
+ S->num[3] = S->num[2];
+ S->num[2] = S->num[1];
+ S->num[1] = S->num[0];
+ S->num[0] = in;
+
+ S->denum[3] = S->denum[2];
+ S->denum[2] = S->denum[1];
+ S->denum[1] = S->denum[0];
+ S->denum[0] = out;
+
+ return out;
+}
+
+static double process_sample(FoSection *s1, double in)
+{
+ double p0 = in, p1;
+ int i;
+
+ for (i = 0; i < FILTER_ORDER / 2; i++) {
+ p1 = section_process(&s1[i], p0);
+ p0 = p1;
+ }
+
+ return p1;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioNEqualizerContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ double *bptr;
+ int i, n;
+
+ for (i = 0; i < s->nb_filters; i++) {
+ EqualizatorFilter *f = &s->filters[i];
+
+ if (f->gain == 0. || f->ignore)
+ continue;
+
+ bptr = (double *)buf->extended_data[f->channel];
+ for (n = 0; n < buf->nb_samples; n++) {
+ double sample = bptr[n];
+
+ sample = process_sample(f->section, sample);
+ bptr[n] = sample;
+ }
+ }
+
+ if (s->draw_curves) {
+ const int64_t pts = buf->pts +
+ av_rescale_q(buf->nb_samples, (AVRational){ 1, inlink->sample_rate },
+ outlink->time_base);
+ int ret;
+
+ s->video->pts = pts;
+ ret = ff_filter_frame(ctx->outputs[1], av_frame_clone(s->video));
+ if (ret < 0)
+ return ret;
+ }
+
+ return ff_filter_frame(outlink, buf);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ .needs_writable = 1,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_anequalizer = {
+ .name = "anequalizer",
+ .description = NULL_IF_CONFIG_SMALL("Apply high-order audio parametric multi band equalizer."),
+ .priv_size = sizeof(AudioNEqualizerContext),
+ .priv_class = &anequalizer_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = inputs,
+ .outputs = NULL,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+ .process_command = process_command,
+};
diff --git a/libavfilter/af_anull.c b/libavfilter/af_anull.c
index 6d7caf3f4e..1d0af451dc 100644
--- a/libavfilter/af_anull.c
+++ b/libavfilter/af_anull.c
@@ -1,18 +1,19 @@
/*
- * This file is part of Libav.
+ * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram <smeenaks@ucsd.edu>
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -28,9 +29,8 @@
static const AVFilterPad avfilter_af_anull_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
@@ -44,12 +44,8 @@ static const AVFilterPad avfilter_af_anull_outputs[] = {
};
AVFilter ff_af_anull = {
- .name = "anull",
- .description = NULL_IF_CONFIG_SMALL("Pass the source unchanged to the output."),
-
- .priv_size = 0,
-
- .inputs = avfilter_af_anull_inputs,
-
- .outputs = avfilter_af_anull_outputs,
+ .name = "anull",
+ .description = NULL_IF_CONFIG_SMALL("Pass the source unchanged to the output."),
+ .inputs = avfilter_af_anull_inputs,
+ .outputs = avfilter_af_anull_outputs,
};
diff --git a/libavfilter/af_apad.c b/libavfilter/af_apad.c
new file mode 100644
index 0000000000..0a2d4206a9
--- /dev/null
+++ b/libavfilter/af_apad.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2012 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * audio pad filter.
+ *
+ * Based on af_aresample.c
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "libavutil/samplefmt.h"
+#include "libavutil/avassert.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ int64_t next_pts;
+
+ int packet_size;
+ int64_t pad_len, pad_len_left;
+ int64_t whole_len, whole_len_left;
+} APadContext;
+
+#define OFFSET(x) offsetof(APadContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption apad_options[] = {
+ { "packet_size", "set silence packet size", OFFSET(packet_size), AV_OPT_TYPE_INT, { .i64 = 4096 }, 0, INT_MAX, A },
+ { "pad_len", "set number of samples of silence to add", OFFSET(pad_len), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, A },
+ { "whole_len", "set minimum target number of samples in the audio stream", OFFSET(whole_len), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, A },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(apad);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ APadContext *s = ctx->priv;
+
+ s->next_pts = AV_NOPTS_VALUE;
+ if (s->whole_len >= 0 && s->pad_len >= 0) {
+ av_log(ctx, AV_LOG_ERROR, "Both whole and pad length are set, this is not possible\n");
+ return AVERROR(EINVAL);
+ }
+ s->pad_len_left = s->pad_len;
+ s->whole_len_left = s->whole_len;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ APadContext *s = ctx->priv;
+
+ if (s->whole_len >= 0) {
+ s->whole_len_left = FFMAX(s->whole_len_left - frame->nb_samples, 0);
+ av_log(ctx, AV_LOG_DEBUG,
+ "n_out:%d whole_len_left:%"PRId64"\n", frame->nb_samples, s->whole_len_left);
+ }
+
+ s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
+ return ff_filter_frame(ctx->outputs[0], frame);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ APadContext *s = ctx->priv;
+ int ret;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF && !ctx->is_disabled) {
+ int n_out = s->packet_size;
+ AVFrame *outsamplesref;
+
+ if (s->whole_len >= 0 && s->pad_len < 0) {
+ s->pad_len = s->pad_len_left = s->whole_len_left;
+ }
+ if (s->pad_len >=0 || s->whole_len >= 0) {
+ n_out = FFMIN(n_out, s->pad_len_left);
+ s->pad_len_left -= n_out;
+ av_log(ctx, AV_LOG_DEBUG,
+ "padding n_out:%d pad_len_left:%"PRId64"\n", n_out, s->pad_len_left);
+ }
+
+ if (!n_out)
+ return AVERROR_EOF;
+
+ outsamplesref = ff_get_audio_buffer(outlink, n_out);
+ if (!outsamplesref)
+ return AVERROR(ENOMEM);
+
+ av_assert0(outsamplesref->sample_rate == outlink->sample_rate);
+ av_assert0(outsamplesref->nb_samples == n_out);
+
+ av_samples_set_silence(outsamplesref->extended_data, 0,
+ n_out,
+ av_frame_get_channels(outsamplesref),
+ outsamplesref->format);
+
+ outsamplesref->pts = s->next_pts;
+ if (s->next_pts != AV_NOPTS_VALUE)
+ s->next_pts += av_rescale_q(n_out, (AVRational){1, outlink->sample_rate}, outlink->time_base);
+
+ return ff_filter_frame(outlink, outsamplesref);
+ }
+ return ret;
+}
+
+static const AVFilterPad apad_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad apad_outputs[] = {
+ {
+ .name = "default",
+ .request_frame = request_frame,
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_apad = {
+ .name = "apad",
+ .description = NULL_IF_CONFIG_SMALL("Pad audio with silence."),
+ .init = init,
+ .priv_size = sizeof(APadContext),
+ .inputs = apad_inputs,
+ .outputs = apad_outputs,
+ .priv_class = &apad_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/af_aphaser.c b/libavfilter/af_aphaser.c
new file mode 100644
index 0000000000..33ecb1a7fb
--- /dev/null
+++ b/libavfilter/af_aphaser.c
@@ -0,0 +1,301 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * phaser audio filter
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "generate_wave_table.h"
+
+typedef struct AudioPhaserContext {
+ const AVClass *class;
+ double in_gain, out_gain;
+ double delay;
+ double decay;
+ double speed;
+
+ int type;
+
+ int delay_buffer_length;
+ double *delay_buffer;
+
+ int modulation_buffer_length;
+ int32_t *modulation_buffer;
+
+ int delay_pos, modulation_pos;
+
+ void (*phaser)(struct AudioPhaserContext *s,
+ uint8_t * const *src, uint8_t **dst,
+ int nb_samples, int channels);
+} AudioPhaserContext;
+
+#define OFFSET(x) offsetof(AudioPhaserContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption aphaser_options[] = {
+ { "in_gain", "set input gain", OFFSET(in_gain), AV_OPT_TYPE_DOUBLE, {.dbl=.4}, 0, 1, FLAGS },
+ { "out_gain", "set output gain", OFFSET(out_gain), AV_OPT_TYPE_DOUBLE, {.dbl=.74}, 0, 1e9, FLAGS },
+ { "delay", "set delay in milliseconds", OFFSET(delay), AV_OPT_TYPE_DOUBLE, {.dbl=3.}, 0, 5, FLAGS },
+ { "decay", "set decay", OFFSET(decay), AV_OPT_TYPE_DOUBLE, {.dbl=.4}, 0, .99, FLAGS },
+ { "speed", "set modulation speed", OFFSET(speed), AV_OPT_TYPE_DOUBLE, {.dbl=.5}, .1, 2, FLAGS },
+ { "type", "set modulation type", OFFSET(type), AV_OPT_TYPE_INT, {.i64=WAVE_TRI}, 0, WAVE_NB-1, FLAGS, "type" },
+ { "triangular", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_TRI}, 0, 0, FLAGS, "type" },
+ { "t", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_TRI}, 0, 0, FLAGS, "type" },
+ { "sinusoidal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_SIN}, 0, 0, FLAGS, "type" },
+ { "s", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_SIN}, 0, 0, FLAGS, "type" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(aphaser);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ AudioPhaserContext *s = ctx->priv;
+
+ if (s->in_gain > (1 - s->decay * s->decay))
+ av_log(ctx, AV_LOG_WARNING, "in_gain may cause clipping\n");
+ if (s->in_gain / (1 - s->decay) > 1 / s->out_gain)
+ av_log(ctx, AV_LOG_WARNING, "out_gain may cause clipping\n");
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+#define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
+
+#define PHASER_PLANAR(name, type) \
+static void phaser_## name ##p(AudioPhaserContext *s, \
+ uint8_t * const *ssrc, uint8_t **ddst, \
+ int nb_samples, int channels) \
+{ \
+ int i, c, delay_pos, modulation_pos; \
+ \
+ av_assert0(channels > 0); \
+ for (c = 0; c < channels; c++) { \
+ type *src = (type *)ssrc[c]; \
+ type *dst = (type *)ddst[c]; \
+ double *buffer = s->delay_buffer + \
+ c * s->delay_buffer_length; \
+ \
+ delay_pos = s->delay_pos; \
+ modulation_pos = s->modulation_pos; \
+ \
+ for (i = 0; i < nb_samples; i++, src++, dst++) { \
+ double v = *src * s->in_gain + buffer[ \
+ MOD(delay_pos + s->modulation_buffer[ \
+ modulation_pos], \
+ s->delay_buffer_length)] * s->decay; \
+ \
+ modulation_pos = MOD(modulation_pos + 1, \
+ s->modulation_buffer_length); \
+ delay_pos = MOD(delay_pos + 1, s->delay_buffer_length); \
+ buffer[delay_pos] = v; \
+ \
+ *dst = v * s->out_gain; \
+ } \
+ } \
+ \
+ s->delay_pos = delay_pos; \
+ s->modulation_pos = modulation_pos; \
+}
+
+#define PHASER(name, type) \
+static void phaser_## name (AudioPhaserContext *s, \
+ uint8_t * const *ssrc, uint8_t **ddst, \
+ int nb_samples, int channels) \
+{ \
+ int i, c, delay_pos, modulation_pos; \
+ type *src = (type *)ssrc[0]; \
+ type *dst = (type *)ddst[0]; \
+ double *buffer = s->delay_buffer; \
+ \
+ delay_pos = s->delay_pos; \
+ modulation_pos = s->modulation_pos; \
+ \
+ for (i = 0; i < nb_samples; i++) { \
+ int pos = MOD(delay_pos + s->modulation_buffer[modulation_pos], \
+ s->delay_buffer_length) * channels; \
+ int npos; \
+ \
+ delay_pos = MOD(delay_pos + 1, s->delay_buffer_length); \
+ npos = delay_pos * channels; \
+ for (c = 0; c < channels; c++, src++, dst++) { \
+ double v = *src * s->in_gain + buffer[pos + c] * s->decay; \
+ \
+ buffer[npos + c] = v; \
+ \
+ *dst = v * s->out_gain; \
+ } \
+ \
+ modulation_pos = MOD(modulation_pos + 1, \
+ s->modulation_buffer_length); \
+ } \
+ \
+ s->delay_pos = delay_pos; \
+ s->modulation_pos = modulation_pos; \
+}
+
+PHASER_PLANAR(dbl, double)
+PHASER_PLANAR(flt, float)
+PHASER_PLANAR(s16, int16_t)
+PHASER_PLANAR(s32, int32_t)
+
+PHASER(dbl, double)
+PHASER(flt, float)
+PHASER(s16, int16_t)
+PHASER(s32, int32_t)
+
+static int config_output(AVFilterLink *outlink)
+{
+ AudioPhaserContext *s = outlink->src->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+
+ s->delay_buffer_length = s->delay * 0.001 * inlink->sample_rate + 0.5;
+ if (s->delay_buffer_length <= 0) {
+ av_log(outlink->src, AV_LOG_ERROR, "delay is too small\n");
+ return AVERROR(EINVAL);
+ }
+ s->delay_buffer = av_calloc(s->delay_buffer_length, sizeof(*s->delay_buffer) * inlink->channels);
+ s->modulation_buffer_length = inlink->sample_rate / s->speed + 0.5;
+ s->modulation_buffer = av_malloc_array(s->modulation_buffer_length, sizeof(*s->modulation_buffer));
+
+ if (!s->modulation_buffer || !s->delay_buffer)
+ return AVERROR(ENOMEM);
+
+ ff_generate_wave_table(s->type, AV_SAMPLE_FMT_S32,
+ s->modulation_buffer, s->modulation_buffer_length,
+ 1., s->delay_buffer_length, M_PI / 2.0);
+
+ s->delay_pos = s->modulation_pos = 0;
+
+ switch (inlink->format) {
+ case AV_SAMPLE_FMT_DBL: s->phaser = phaser_dbl; break;
+ case AV_SAMPLE_FMT_DBLP: s->phaser = phaser_dblp; break;
+ case AV_SAMPLE_FMT_FLT: s->phaser = phaser_flt; break;
+ case AV_SAMPLE_FMT_FLTP: s->phaser = phaser_fltp; break;
+ case AV_SAMPLE_FMT_S16: s->phaser = phaser_s16; break;
+ case AV_SAMPLE_FMT_S16P: s->phaser = phaser_s16p; break;
+ case AV_SAMPLE_FMT_S32: s->phaser = phaser_s32; break;
+ case AV_SAMPLE_FMT_S32P: s->phaser = phaser_s32p; break;
+ default: av_assert0(0);
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inbuf)
+{
+ AudioPhaserContext *s = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outbuf;
+
+ if (av_frame_is_writable(inbuf)) {
+ outbuf = inbuf;
+ } else {
+ outbuf = ff_get_audio_buffer(inlink, inbuf->nb_samples);
+ if (!outbuf)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(outbuf, inbuf);
+ }
+
+ s->phaser(s, inbuf->extended_data, outbuf->extended_data,
+ outbuf->nb_samples, av_frame_get_channels(outbuf));
+
+ if (inbuf != outbuf)
+ av_frame_free(&inbuf);
+
+ return ff_filter_frame(outlink, outbuf);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioPhaserContext *s = ctx->priv;
+
+ av_freep(&s->delay_buffer);
+ av_freep(&s->modulation_buffer);
+}
+
+static const AVFilterPad aphaser_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad aphaser_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_aphaser = {
+ .name = "aphaser",
+ .description = NULL_IF_CONFIG_SMALL("Add a phasing effect to the audio."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(AudioPhaserContext),
+ .init = init,
+ .uninit = uninit,
+ .inputs = aphaser_inputs,
+ .outputs = aphaser_outputs,
+ .priv_class = &aphaser_class,
+};
diff --git a/libavfilter/af_apulsator.c b/libavfilter/af_apulsator.c
new file mode 100644
index 0000000000..67711a28ce
--- /dev/null
+++ b/libavfilter/af_apulsator.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "audio.h"
+
+enum PulsatorModes { SINE, TRIANGLE, SQUARE, SAWUP, SAWDOWN, NB_MODES };
+enum PulsatorTimings { UNIT_BPM, UNIT_MS, UNIT_HZ, NB_TIMINGS };
+
+typedef struct SimpleLFO {
+ double phase;
+ double freq;
+ double offset;
+ double amount;
+ double pwidth;
+ int mode;
+ int srate;
+} SimpleLFO;
+
+typedef struct AudioPulsatorContext {
+ const AVClass *class;
+ int mode;
+ double level_in;
+ double level_out;
+ double amount;
+ double offset_l;
+ double offset_r;
+ double pwidth;
+ double bpm;
+ double hertz;
+ int ms;
+ int timing;
+
+ SimpleLFO lfoL, lfoR;
+} AudioPulsatorContext;
+
+#define OFFSET(x) offsetof(AudioPulsatorContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption apulsator_options[] = {
+ { "level_in", "set input gain", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, FLAGS, },
+ { "level_out", "set output gain", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, FLAGS, },
+ { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=SINE}, SINE, NB_MODES-1, FLAGS, "mode" },
+ { "sine", NULL, 0, AV_OPT_TYPE_CONST, {.i64=SINE}, 0, 0, FLAGS, "mode" },
+ { "triangle", NULL, 0, AV_OPT_TYPE_CONST, {.i64=TRIANGLE},0, 0, FLAGS, "mode" },
+ { "square", NULL, 0, AV_OPT_TYPE_CONST, {.i64=SQUARE}, 0, 0, FLAGS, "mode" },
+ { "sawup", NULL, 0, AV_OPT_TYPE_CONST, {.i64=SAWUP}, 0, 0, FLAGS, "mode" },
+ { "sawdown", NULL, 0, AV_OPT_TYPE_CONST, {.i64=SAWDOWN}, 0, 0, FLAGS, "mode" },
+ { "amount", "set modulation", OFFSET(amount), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
+ { "offset_l", "set offset L", OFFSET(offset_l), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 1, FLAGS },
+ { "offset_r", "set offset R", OFFSET(offset_r), AV_OPT_TYPE_DOUBLE, {.dbl=.5}, 0, 1, FLAGS },
+ { "width", "set pulse width", OFFSET(pwidth), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 2, FLAGS },
+ { "timing", "set timing", OFFSET(timing), AV_OPT_TYPE_INT, {.i64=2}, 0, NB_TIMINGS-1, FLAGS, "timing" },
+ { "bpm", NULL, 0, AV_OPT_TYPE_CONST, {.i64=UNIT_BPM}, 0, 0, FLAGS, "timing" },
+ { "ms", NULL, 0, AV_OPT_TYPE_CONST, {.i64=UNIT_MS}, 0, 0, FLAGS, "timing" },
+ { "hz", NULL, 0, AV_OPT_TYPE_CONST, {.i64=UNIT_HZ}, 0, 0, FLAGS, "timing" },
+ { "bpm", "set BPM", OFFSET(bpm), AV_OPT_TYPE_DOUBLE, {.dbl=120}, 30, 300, FLAGS },
+ { "ms", "set ms", OFFSET(ms), AV_OPT_TYPE_INT, {.i64=500}, 10, 2000, FLAGS },
+ { "hz", "set frequency", OFFSET(hertz), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0.01, 100, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(apulsator);
+
+static void lfo_advance(SimpleLFO *lfo, unsigned count)
+{
+ lfo->phase = fabs(lfo->phase + count * lfo->freq / lfo->srate);
+ if (lfo->phase >= 1)
+ lfo->phase = fmod(lfo->phase, 1);
+}
+
+static double lfo_get_value(SimpleLFO *lfo)
+{
+ double phs = FFMIN(100, lfo->phase / FFMIN(1.99, FFMAX(0.01, lfo->pwidth)) + lfo->offset);
+ double val;
+
+ if (phs > 1)
+ phs = fmod(phs, 1.);
+
+ switch (lfo->mode) {
+ case SINE:
+ val = sin(phs * 2 * M_PI);
+ break;
+ case TRIANGLE:
+ if (phs > 0.75)
+ val = (phs - 0.75) * 4 - 1;
+ else if (phs > 0.25)
+ val = -4 * phs + 2;
+ else
+ val = phs * 4;
+ break;
+ case SQUARE:
+ val = phs < 0.5 ? -1 : +1;
+ break;
+ case SAWUP:
+ val = phs * 2 - 1;
+ break;
+ case SAWDOWN:
+ val = 1 - phs * 2;
+ break;
+ default: av_assert0(0);
+ }
+
+ return val * lfo->amount;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AudioPulsatorContext *s = ctx->priv;
+ const double *src = (const double *)in->data[0];
+ const int nb_samples = in->nb_samples;
+ const double level_out = s->level_out;
+ const double level_in = s->level_in;
+ const double amount = s->amount;
+ AVFrame *out;
+ double *dst;
+ int n;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_audio_buffer(inlink, in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+ dst = (double *)out->data[0];
+
+ for (n = 0; n < nb_samples; n++) {
+ double outL;
+ double outR;
+ double inL = src[0] * level_in;
+ double inR = src[1] * level_in;
+ double procL = inL;
+ double procR = inR;
+
+ procL *= lfo_get_value(&s->lfoL) * 0.5 + amount / 2;
+ procR *= lfo_get_value(&s->lfoR) * 0.5 + amount / 2;
+
+ outL = procL + inL * (1 - amount);
+ outR = procR + inR * (1 - amount);
+
+ outL *= level_out;
+ outR *= level_out;
+
+ dst[0] = outL;
+ dst[1] = outR;
+
+ lfo_advance(&s->lfoL, 1);
+ lfo_advance(&s->lfoR, 1);
+
+ dst += 2;
+ src += 2;
+ }
+
+ if (in != out)
+ av_frame_free(&in);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterChannelLayouts *layout = NULL;
+ AVFilterFormats *formats = NULL;
+ int ret;
+
+ if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_DBL )) < 0 ||
+ (ret = ff_set_common_formats (ctx , formats )) < 0 ||
+ (ret = ff_add_channel_layout (&layout , AV_CH_LAYOUT_STEREO)) < 0 ||
+ (ret = ff_set_common_channel_layouts (ctx , layout )) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioPulsatorContext *s = ctx->priv;
+ double freq;
+
+ switch (s->timing) {
+ case UNIT_BPM: freq = s->bpm / 60; break;
+ case UNIT_MS: freq = 1 / (s->ms / 1000.); break;
+ case UNIT_HZ: freq = s->hertz; break;
+ default: av_assert0(0);
+ }
+
+ s->lfoL.freq = freq;
+ s->lfoR.freq = freq;
+ s->lfoL.mode = s->mode;
+ s->lfoR.mode = s->mode;
+ s->lfoL.offset = s->offset_l;
+ s->lfoR.offset = s->offset_r;
+ s->lfoL.srate = inlink->sample_rate;
+ s->lfoR.srate = inlink->sample_rate;
+ s->lfoL.amount = s->amount;
+ s->lfoR.amount = s->amount;
+ s->lfoL.pwidth = s->pwidth;
+ s->lfoR.pwidth = s->pwidth;
+
+ return 0;
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_apulsator = {
+ .name = "apulsator",
+ .description = NULL_IF_CONFIG_SMALL("Audio pulsator."),
+ .priv_size = sizeof(AudioPulsatorContext),
+ .priv_class = &apulsator_class,
+ .query_formats = query_formats,
+ .inputs = inputs,
+ .outputs = outputs,
+};
diff --git a/libavfilter/af_aresample.c b/libavfilter/af_aresample.c
new file mode 100644
index 0000000000..028e105318
--- /dev/null
+++ b/libavfilter/af_aresample.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2011 Mina Nagy Zaki
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * resampling audio filter
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "libavutil/samplefmt.h"
+#include "libavutil/avassert.h"
+#include "libswresample/swresample.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ int sample_rate_arg;
+ double ratio;
+ struct SwrContext *swr;
+ int64_t next_pts;
+ int more_data;
+} AResampleContext;
+
+static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
+{
+ AResampleContext *aresample = ctx->priv;
+ int ret = 0;
+
+ aresample->next_pts = AV_NOPTS_VALUE;
+ aresample->swr = swr_alloc();
+ if (!aresample->swr) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ if (opts) {
+ AVDictionaryEntry *e = NULL;
+
+ while ((e = av_dict_get(*opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
+ if ((ret = av_opt_set(aresample->swr, e->key, e->value, 0)) < 0)
+ goto end;
+ }
+ av_dict_free(opts);
+ }
+ if (aresample->sample_rate_arg > 0)
+ av_opt_set_int(aresample->swr, "osr", aresample->sample_rate_arg, 0);
+end:
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AResampleContext *aresample = ctx->priv;
+ swr_free(&aresample->swr);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AResampleContext *aresample = ctx->priv;
+ enum AVSampleFormat out_format;
+ int64_t out_rate, out_layout;
+
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ AVFilterFormats *in_formats, *out_formats;
+ AVFilterFormats *in_samplerates, *out_samplerates;
+ AVFilterChannelLayouts *in_layouts, *out_layouts;
+ int ret;
+
+ av_opt_get_sample_fmt(aresample->swr, "osf", 0, &out_format);
+ av_opt_get_int(aresample->swr, "osr", 0, &out_rate);
+ av_opt_get_int(aresample->swr, "ocl", 0, &out_layout);
+
+ in_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
+ if ((ret = ff_formats_ref(in_formats, &inlink->out_formats)) < 0)
+ return ret;
+
+ in_samplerates = ff_all_samplerates();
+ if ((ret = ff_formats_ref(in_samplerates, &inlink->out_samplerates)) < 0)
+ return ret;
+
+ in_layouts = ff_all_channel_counts();
+ if ((ret = ff_channel_layouts_ref(in_layouts, &inlink->out_channel_layouts)) < 0)
+ return ret;
+
+ if(out_rate > 0) {
+ int ratelist[] = { out_rate, -1 };
+ out_samplerates = ff_make_format_list(ratelist);
+ } else {
+ out_samplerates = ff_all_samplerates();
+ }
+
+ if ((ret = ff_formats_ref(out_samplerates, &outlink->in_samplerates)) < 0)
+ return ret;
+
+ if(out_format != AV_SAMPLE_FMT_NONE) {
+ int formatlist[] = { out_format, -1 };
+ out_formats = ff_make_format_list(formatlist);
+ } else
+ out_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
+ if ((ret = ff_formats_ref(out_formats, &outlink->in_formats)) < 0)
+ return ret;
+
+ if(out_layout) {
+ int64_t layout_list[] = { out_layout, -1 };
+ out_layouts = avfilter_make_format64_list(layout_list);
+ } else
+ out_layouts = ff_all_channel_counts();
+
+ return ff_channel_layouts_ref(out_layouts, &outlink->in_channel_layouts);
+}
+
+
+static int config_output(AVFilterLink *outlink)
+{
+ int ret;
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AResampleContext *aresample = ctx->priv;
+ int64_t out_rate, out_layout;
+ enum AVSampleFormat out_format;
+ char inchl_buf[128], outchl_buf[128];
+
+ aresample->swr = swr_alloc_set_opts(aresample->swr,
+ outlink->channel_layout, outlink->format, outlink->sample_rate,
+ inlink->channel_layout, inlink->format, inlink->sample_rate,
+ 0, ctx);
+ if (!aresample->swr)
+ return AVERROR(ENOMEM);
+ if (!inlink->channel_layout)
+ av_opt_set_int(aresample->swr, "ich", inlink->channels, 0);
+ if (!outlink->channel_layout)
+ av_opt_set_int(aresample->swr, "och", outlink->channels, 0);
+
+ ret = swr_init(aresample->swr);
+ if (ret < 0)
+ return ret;
+
+ av_opt_get_int(aresample->swr, "osr", 0, &out_rate);
+ av_opt_get_int(aresample->swr, "ocl", 0, &out_layout);
+ av_opt_get_sample_fmt(aresample->swr, "osf", 0, &out_format);
+ outlink->time_base = (AVRational) {1, out_rate};
+
+ av_assert0(outlink->sample_rate == out_rate);
+ av_assert0(outlink->channel_layout == out_layout || !outlink->channel_layout);
+ av_assert0(outlink->format == out_format);
+
+ aresample->ratio = (double)outlink->sample_rate / inlink->sample_rate;
+
+ av_get_channel_layout_string(inchl_buf, sizeof(inchl_buf), inlink ->channels, inlink ->channel_layout);
+ av_get_channel_layout_string(outchl_buf, sizeof(outchl_buf), outlink->channels, outlink->channel_layout);
+
+ av_log(ctx, AV_LOG_VERBOSE, "ch:%d chl:%s fmt:%s r:%dHz -> ch:%d chl:%s fmt:%s r:%dHz\n",
+ inlink ->channels, inchl_buf, av_get_sample_fmt_name(inlink->format), inlink->sample_rate,
+ outlink->channels, outchl_buf, av_get_sample_fmt_name(outlink->format), outlink->sample_rate);
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref)
+{
+ AResampleContext *aresample = inlink->dst->priv;
+ const int n_in = insamplesref->nb_samples;
+ int64_t delay;
+ int n_out = n_in * aresample->ratio + 32;
+ AVFilterLink *const outlink = inlink->dst->outputs[0];
+ AVFrame *outsamplesref;
+ int ret;
+
+ delay = swr_get_delay(aresample->swr, outlink->sample_rate);
+ if (delay > 0)
+ n_out += FFMIN(delay, FFMAX(4096, n_out));
+
+ outsamplesref = ff_get_audio_buffer(outlink, n_out);
+
+ if(!outsamplesref)
+ return AVERROR(ENOMEM);
+
+ av_frame_copy_props(outsamplesref, insamplesref);
+ outsamplesref->format = outlink->format;
+ av_frame_set_channels(outsamplesref, outlink->channels);
+ outsamplesref->channel_layout = outlink->channel_layout;
+ outsamplesref->sample_rate = outlink->sample_rate;
+
+ if(insamplesref->pts != AV_NOPTS_VALUE) {
+ int64_t inpts = av_rescale(insamplesref->pts, inlink->time_base.num * (int64_t)outlink->sample_rate * inlink->sample_rate, inlink->time_base.den);
+ int64_t outpts= swr_next_pts(aresample->swr, inpts);
+ aresample->next_pts =
+ outsamplesref->pts = ROUNDED_DIV(outpts, inlink->sample_rate);
+ } else {
+ outsamplesref->pts = AV_NOPTS_VALUE;
+ }
+ n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out,
+ (void *)insamplesref->extended_data, n_in);
+ if (n_out <= 0) {
+ av_frame_free(&outsamplesref);
+ av_frame_free(&insamplesref);
+ return 0;
+ }
+
+ aresample->more_data = outsamplesref->nb_samples == n_out; // Indicate that there is probably more data in our buffers
+
+ outsamplesref->nb_samples = n_out;
+
+ ret = ff_filter_frame(outlink, outsamplesref);
+ av_frame_free(&insamplesref);
+ return ret;
+}
+
+static int flush_frame(AVFilterLink *outlink, int final, AVFrame **outsamplesref_ret)
+{
+ AVFilterContext *ctx = outlink->src;
+ AResampleContext *aresample = ctx->priv;
+ AVFilterLink *const inlink = outlink->src->inputs[0];
+ AVFrame *outsamplesref;
+ int n_out = 4096;
+ int64_t pts;
+
+ outsamplesref = ff_get_audio_buffer(outlink, n_out);
+ *outsamplesref_ret = outsamplesref;
+ if (!outsamplesref)
+ return AVERROR(ENOMEM);
+
+ pts = swr_next_pts(aresample->swr, INT64_MIN);
+ pts = ROUNDED_DIV(pts, inlink->sample_rate);
+
+ n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, final ? NULL : (void*)outsamplesref->extended_data, 0);
+ if (n_out <= 0) {
+ av_frame_free(&outsamplesref);
+ return (n_out == 0) ? AVERROR_EOF : n_out;
+ }
+
+ outsamplesref->sample_rate = outlink->sample_rate;
+ outsamplesref->nb_samples = n_out;
+
+ outsamplesref->pts = pts;
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AResampleContext *aresample = ctx->priv;
+ int ret;
+
+ // First try to get data from the internal buffers
+ if (aresample->more_data) {
+ AVFrame *outsamplesref;
+
+ if (flush_frame(outlink, 0, &outsamplesref) >= 0) {
+ return ff_filter_frame(outlink, outsamplesref);
+ }
+ }
+ aresample->more_data = 0;
+
+ // Second request more data from the input
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ // Third if we hit the end flush
+ if (ret == AVERROR_EOF) {
+ AVFrame *outsamplesref;
+
+ if ((ret = flush_frame(outlink, 1, &outsamplesref)) < 0)
+ return ret;
+
+ return ff_filter_frame(outlink, outsamplesref);
+ }
+ return ret;
+}
+
+static const AVClass *resample_child_class_next(const AVClass *prev)
+{
+ return prev ? NULL : swr_get_class();
+}
+
+static void *resample_child_next(void *obj, void *prev)
+{
+ AResampleContext *s = obj;
+ return prev ? NULL : s->swr;
+}
+
+#define OFFSET(x) offsetof(AResampleContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption options[] = {
+ {"sample_rate", NULL, OFFSET(sample_rate_arg), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
+ {NULL}
+};
+
+static const AVClass aresample_class = {
+ .class_name = "aresample",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .child_class_next = resample_child_class_next,
+ .child_next = resample_child_next,
+};
+
+static const AVFilterPad aresample_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad aresample_outputs[] = {
+ {
+ .name = "default",
+ .config_props = config_output,
+ .request_frame = request_frame,
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_aresample = {
+ .name = "aresample",
+ .description = NULL_IF_CONFIG_SMALL("Resample audio data."),
+ .init_dict = init_dict,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(AResampleContext),
+ .priv_class = &aresample_class,
+ .inputs = aresample_inputs,
+ .outputs = aresample_outputs,
+};
diff --git a/libavfilter/af_asetnsamples.c b/libavfilter/af_asetnsamples.c
new file mode 100644
index 0000000000..b5aa193c2d
--- /dev/null
+++ b/libavfilter/af_asetnsamples.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2012 Andrey Utkin
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Filter that changes number of samples on single output operation
+ */
+
+#include "libavutil/audio_fifo.h"
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+#include "formats.h"
+
+typedef struct {
+ const AVClass *class;
+ int nb_out_samples; ///< how many samples to output
+ AVAudioFifo *fifo; ///< samples are queued here
+ int64_t next_out_pts;
+ int pad;
+} ASNSContext;
+
+#define OFFSET(x) offsetof(ASNSContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption asetnsamples_options[] = {
+ { "nb_out_samples", "set the number of per-frame output samples", OFFSET(nb_out_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
+ { "n", "set the number of per-frame output samples", OFFSET(nb_out_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
+ { "pad", "pad last frame with zeros", OFFSET(pad), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
+ { "p", "pad last frame with zeros", OFFSET(pad), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(asetnsamples);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ASNSContext *asns = ctx->priv;
+
+ asns->next_out_pts = AV_NOPTS_VALUE;
+ av_log(ctx, AV_LOG_VERBOSE, "nb_out_samples:%d pad:%d\n", asns->nb_out_samples, asns->pad);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ASNSContext *asns = ctx->priv;
+ av_audio_fifo_free(asns->fifo);
+}
+
+static int config_props_output(AVFilterLink *outlink)
+{
+ ASNSContext *asns = outlink->src->priv;
+
+ asns->fifo = av_audio_fifo_alloc(outlink->format, outlink->channels, asns->nb_out_samples);
+ if (!asns->fifo)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static int push_samples(AVFilterLink *outlink)
+{
+ ASNSContext *asns = outlink->src->priv;
+ AVFrame *outsamples = NULL;
+ int ret, nb_out_samples, nb_pad_samples;
+
+ if (asns->pad) {
+ nb_out_samples = av_audio_fifo_size(asns->fifo) ? asns->nb_out_samples : 0;
+ nb_pad_samples = nb_out_samples - FFMIN(nb_out_samples, av_audio_fifo_size(asns->fifo));
+ } else {
+ nb_out_samples = FFMIN(asns->nb_out_samples, av_audio_fifo_size(asns->fifo));
+ nb_pad_samples = 0;
+ }
+
+ if (!nb_out_samples)
+ return 0;
+
+ outsamples = ff_get_audio_buffer(outlink, nb_out_samples);
+ if (!outsamples)
+ return AVERROR(ENOMEM);
+
+ av_audio_fifo_read(asns->fifo,
+ (void **)outsamples->extended_data, nb_out_samples);
+
+ if (nb_pad_samples)
+ av_samples_set_silence(outsamples->extended_data, nb_out_samples - nb_pad_samples,
+ nb_pad_samples, outlink->channels,
+ outlink->format);
+ outsamples->nb_samples = nb_out_samples;
+ outsamples->channel_layout = outlink->channel_layout;
+ outsamples->sample_rate = outlink->sample_rate;
+ outsamples->pts = asns->next_out_pts;
+
+ if (asns->next_out_pts != AV_NOPTS_VALUE)
+ asns->next_out_pts += av_rescale_q(nb_out_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
+
+ ret = ff_filter_frame(outlink, outsamples);
+ if (ret < 0)
+ return ret;
+ return nb_out_samples;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ASNSContext *asns = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ int ret;
+ int nb_samples = insamples->nb_samples;
+
+ if (av_audio_fifo_space(asns->fifo) < nb_samples) {
+ av_log(ctx, AV_LOG_DEBUG, "No space for %d samples, stretching audio fifo\n", nb_samples);
+ ret = av_audio_fifo_realloc(asns->fifo, av_audio_fifo_size(asns->fifo) + nb_samples);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Stretching audio fifo failed, discarded %d samples\n", nb_samples);
+ return -1;
+ }
+ }
+ av_audio_fifo_write(asns->fifo, (void **)insamples->extended_data, nb_samples);
+ if (asns->next_out_pts == AV_NOPTS_VALUE)
+ asns->next_out_pts = insamples->pts;
+ av_frame_free(&insamples);
+
+ while (av_audio_fifo_size(asns->fifo) >= asns->nb_out_samples)
+ push_samples(outlink);
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ int ret;
+
+ ret = ff_request_frame(inlink);
+ if (ret == AVERROR_EOF) {
+ ret = push_samples(outlink);
+ return ret < 0 ? ret : ret > 0 ? 0 : AVERROR_EOF;
+ }
+
+ return ret;
+}
+
+static const AVFilterPad asetnsamples_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad asetnsamples_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .request_frame = request_frame,
+ .config_props = config_props_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_asetnsamples = {
+ .name = "asetnsamples",
+ .description = NULL_IF_CONFIG_SMALL("Set the number of samples for each output audio frames."),
+ .priv_size = sizeof(ASNSContext),
+ .priv_class = &asetnsamples_class,
+ .init = init,
+ .uninit = uninit,
+ .inputs = asetnsamples_inputs,
+ .outputs = asetnsamples_outputs,
+};
diff --git a/libavfilter/af_asetrate.c b/libavfilter/af_asetrate.c
new file mode 100644
index 0000000000..66febd71ba
--- /dev/null
+++ b/libavfilter/af_asetrate.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2013 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ int sample_rate;
+ int rescale_pts;
+} ASetRateContext;
+
+#define CONTEXT ASetRateContext
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+#define OPT_GENERIC(name, field, def, min, max, descr, type, deffield, ...) \
+ { name, descr, offsetof(CONTEXT, field), AV_OPT_TYPE_ ## type, \
+ { .deffield = def }, min, max, FLAGS, __VA_ARGS__ }
+
+#define OPT_INT(name, field, def, min, max, descr, ...) \
+ OPT_GENERIC(name, field, def, min, max, descr, INT, i64, __VA_ARGS__)
+
+static const AVOption asetrate_options[] = {
+ OPT_INT("sample_rate", sample_rate, 44100, 1, INT_MAX, "set the sample rate",),
+ OPT_INT("r", sample_rate, 44100, 1, INT_MAX, "set the sample rate",),
+ {NULL},
+};
+
+AVFILTER_DEFINE_CLASS(asetrate);
+
+static av_cold int query_formats(AVFilterContext *ctx)
+{
+ ASetRateContext *sr = ctx->priv;
+ int sample_rates[] = { sr->sample_rate, -1 };
+
+ return ff_formats_ref(ff_make_format_list(sample_rates),
+ &ctx->outputs[0]->in_samplerates);
+}
+
+static av_cold int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ASetRateContext *sr = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVRational intb = ctx->inputs[0]->time_base;
+ int inrate = inlink->sample_rate;
+
+ if (intb.num == 1 && intb.den == inrate) {
+ outlink->time_base.num = 1;
+ outlink->time_base.den = outlink->sample_rate;
+ } else {
+ outlink->time_base = intb;
+ sr->rescale_pts = 1;
+ if (av_q2d(intb) > 1.0 / FFMAX(inrate, outlink->sample_rate))
+ av_log(ctx, AV_LOG_WARNING, "Time base is inaccurate\n");
+ }
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ASetRateContext *sr = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ frame->sample_rate = outlink->sample_rate;
+ if (sr->rescale_pts)
+ frame->pts = av_rescale(frame->pts, inlink->sample_rate,
+ outlink->sample_rate);
+ return ff_filter_frame(outlink, frame);
+}
+
+static const AVFilterPad asetrate_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad asetrate_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_asetrate = {
+ .name = "asetrate",
+ .description = NULL_IF_CONFIG_SMALL("Change the sample rate without "
+ "altering the data."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(ASetRateContext),
+ .inputs = asetrate_inputs,
+ .outputs = asetrate_outputs,
+ .priv_class = &asetrate_class,
+};
diff --git a/libavfilter/af_ashowinfo.c b/libavfilter/af_ashowinfo.c
index 5f0e2549ff..a81729f7f7 100644
--- a/libavfilter/af_ashowinfo.c
+++ b/libavfilter/af_ashowinfo.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2011 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -34,6 +34,7 @@
#include "libavutil/intreadwrite.h"
#include "libavutil/mem.h"
#include "libavutil/replaygain.h"
+#include "libavutil/timestamp.h"
#include "libavutil/samplefmt.h"
#include "libavcodec/avcodec.h"
@@ -47,24 +48,8 @@ typedef struct AShowInfoContext {
* Scratch space for individual plane checksums for planar audio
*/
uint32_t *plane_checksums;
-
- /**
- * Frame counter
- */
- uint64_t frame;
} AShowInfoContext;
-static int config_input(AVFilterLink *inlink)
-{
- AShowInfoContext *s = inlink->dst->priv;
- int channels = av_get_channel_layout_nb_channels(inlink->channel_layout);
- s->plane_checksums = av_malloc(channels * sizeof(*s->plane_checksums));
- if (!s->plane_checksums)
- return AVERROR(ENOMEM);
-
- return 0;
-}
-
static av_cold void uninit(AVFilterContext *ctx)
{
AShowInfoContext *s = ctx->priv;
@@ -194,12 +179,17 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
AShowInfoContext *s = ctx->priv;
char chlayout_str[128];
uint32_t checksum = 0;
- int channels = av_get_channel_layout_nb_channels(buf->channel_layout);
+ int channels = inlink->channels;
int planar = av_sample_fmt_is_planar(buf->format);
int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels);
int data_size = buf->nb_samples * block_align;
int planes = planar ? channels : 1;
int i;
+ void *tmp_ptr = av_realloc_array(s->plane_checksums, channels, sizeof(*s->plane_checksums));
+
+ if (!tmp_ptr)
+ return AVERROR(ENOMEM);
+ s->plane_checksums = tmp_ptr;
for (i = 0; i < planes; i++) {
uint8_t *data = buf->extended_data[i];
@@ -209,15 +199,17 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
s->plane_checksums[0];
}
- av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), -1,
+ av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), av_frame_get_channels(buf),
buf->channel_layout);
av_log(ctx, AV_LOG_INFO,
- "n:%"PRIu64" pts:%"PRId64" pts_time:%f "
- "fmt:%s chlayout:%s rate:%d nb_samples:%d "
+ "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
+ "fmt:%s channels:%d chlayout:%s rate:%d nb_samples:%d "
"checksum:%08"PRIX32" ",
- s->frame, buf->pts, buf->pts * av_q2d(inlink->time_base),
- av_get_sample_fmt_name(buf->format), chlayout_str,
+ inlink->frame_count_out,
+ av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base),
+ av_frame_get_pkt_pos(buf),
+ av_get_sample_fmt_name(buf->format), av_frame_get_channels(buf), chlayout_str,
buf->sample_rate, buf->nb_samples,
checksum);
@@ -241,19 +233,16 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
av_log(ctx, AV_LOG_INFO, "\n");
}
- s->frame++;
return ff_filter_frame(inlink->dst->outputs[0], buf);
}
static const AVFilterPad inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
- .config_props = config_input,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
},
- { NULL },
+ { NULL }
};
static const AVFilterPad outputs[] = {
@@ -261,7 +250,7 @@ static const AVFilterPad outputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
- { NULL },
+ { NULL }
};
AVFilter ff_af_ashowinfo = {
diff --git a/libavfilter/af_astats.c b/libavfilter/af_astats.c
new file mode 100644
index 0000000000..e7f9675c2e
--- /dev/null
+++ b/libavfilter/af_astats.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright (c) 2009 Rob Sykes <robs@users.sourceforge.net>
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <float.h>
+
+#include "libavutil/opt.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct ChannelStats {
+ double last;
+ double sigma_x, sigma_x2;
+ double avg_sigma_x2, min_sigma_x2, max_sigma_x2;
+ double min, max;
+ double nmin, nmax;
+ double min_run, max_run;
+ double min_runs, max_runs;
+ double min_diff, max_diff;
+ double diff1_sum;
+ uint64_t mask, imask;
+ uint64_t min_count, max_count;
+ uint64_t nb_samples;
+} ChannelStats;
+
+typedef struct {
+ const AVClass *class;
+ ChannelStats *chstats;
+ int nb_channels;
+ uint64_t tc_samples;
+ double time_constant;
+ double mult;
+ int metadata;
+ int reset_count;
+ int nb_frames;
+ int maxbitdepth;
+} AudioStatsContext;
+
+#define OFFSET(x) offsetof(AudioStatsContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption astats_options[] = {
+ { "length", "set the window length", OFFSET(time_constant), AV_OPT_TYPE_DOUBLE, {.dbl=.05}, .01, 10, FLAGS },
+ { "metadata", "inject metadata in the filtergraph", OFFSET(metadata), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { "reset", "recalculate stats after this many frames", OFFSET(reset_count), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(astats);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64P,
+ AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static void reset_stats(AudioStatsContext *s)
+{
+ int c;
+
+ for (c = 0; c < s->nb_channels; c++) {
+ ChannelStats *p = &s->chstats[c];
+
+ p->min = p->nmin = p->min_sigma_x2 = DBL_MAX;
+ p->max = p->nmax = p->max_sigma_x2 = DBL_MIN;
+ p->min_diff = DBL_MAX;
+ p->max_diff = DBL_MIN;
+ p->sigma_x = 0;
+ p->sigma_x2 = 0;
+ p->avg_sigma_x2 = 0;
+ p->min_sigma_x2 = 0;
+ p->max_sigma_x2 = 0;
+ p->min_run = 0;
+ p->max_run = 0;
+ p->min_runs = 0;
+ p->max_runs = 0;
+ p->diff1_sum = 0;
+ p->mask = 0;
+ p->imask = 0xFFFFFFFFFFFFFFFF;
+ p->min_count = 0;
+ p->max_count = 0;
+ p->nb_samples = 0;
+ }
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AudioStatsContext *s = outlink->src->priv;
+
+ s->chstats = av_calloc(sizeof(*s->chstats), outlink->channels);
+ if (!s->chstats)
+ return AVERROR(ENOMEM);
+ s->nb_channels = outlink->channels;
+ s->mult = exp((-1 / s->time_constant / outlink->sample_rate));
+ s->tc_samples = 5 * s->time_constant * outlink->sample_rate + .5;
+ s->nb_frames = 0;
+ s->maxbitdepth = av_get_bytes_per_sample(outlink->format) * 8;
+
+ reset_stats(s);
+
+ return 0;
+}
+
+static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
+{
+ unsigned result = s->maxbitdepth;
+
+ mask = mask & (~imask);
+
+ for (; result && !(mask & 1); --result, mask >>= 1);
+
+ depth->den = result;
+ depth->num = 0;
+
+ for (; result; --result, mask >>= 1)
+ if (mask & 1)
+ depth->num++;
+}
+
+static inline void update_stat(AudioStatsContext *s, ChannelStats *p, double d, double nd, int64_t i)
+{
+ if (d < p->min) {
+ p->min = d;
+ p->nmin = nd;
+ p->min_run = 1;
+ p->min_runs = 0;
+ p->min_count = 1;
+ } else if (d == p->min) {
+ p->min_count++;
+ p->min_run = d == p->last ? p->min_run + 1 : 1;
+ } else if (p->last == p->min) {
+ p->min_runs += p->min_run * p->min_run;
+ }
+
+ if (d > p->max) {
+ p->max = d;
+ p->nmax = nd;
+ p->max_run = 1;
+ p->max_runs = 0;
+ p->max_count = 1;
+ } else if (d == p->max) {
+ p->max_count++;
+ p->max_run = d == p->last ? p->max_run + 1 : 1;
+ } else if (p->last == p->max) {
+ p->max_runs += p->max_run * p->max_run;
+ }
+
+ p->sigma_x += nd;
+ p->sigma_x2 += nd * nd;
+ p->avg_sigma_x2 = p->avg_sigma_x2 * s->mult + (1.0 - s->mult) * nd * nd;
+ p->min_diff = FFMIN(p->min_diff, fabs(d - p->last));
+ p->max_diff = FFMAX(p->max_diff, fabs(d - p->last));
+ p->diff1_sum += fabs(d - p->last);
+ p->last = d;
+ p->mask |= i;
+ p->imask &= i;
+
+ if (p->nb_samples >= s->tc_samples) {
+ p->max_sigma_x2 = FFMAX(p->max_sigma_x2, p->avg_sigma_x2);
+ p->min_sigma_x2 = FFMIN(p->min_sigma_x2, p->avg_sigma_x2);
+ }
+ p->nb_samples++;
+}
+
+static void set_meta(AVDictionary **metadata, int chan, const char *key,
+ const char *fmt, double val)
+{
+ uint8_t value[128];
+ uint8_t key2[128];
+
+ snprintf(value, sizeof(value), fmt, val);
+ if (chan)
+ snprintf(key2, sizeof(key2), "lavfi.astats.%d.%s", chan, key);
+ else
+ snprintf(key2, sizeof(key2), "lavfi.astats.%s", key);
+ av_dict_set(metadata, key2, value, 0);
+}
+
+#define LINEAR_TO_DB(x) (log10(x) * 20)
+
+static void set_metadata(AudioStatsContext *s, AVDictionary **metadata)
+{
+ uint64_t mask = 0, imask = 0xFFFFFFFFFFFFFFFF, min_count = 0, max_count = 0, nb_samples = 0;
+ double min_runs = 0, max_runs = 0,
+ min = DBL_MAX, max = DBL_MIN, min_diff = DBL_MAX, max_diff = 0,
+ nmin = DBL_MAX, nmax = DBL_MIN,
+ max_sigma_x = 0,
+ diff1_sum = 0,
+ sigma_x = 0,
+ sigma_x2 = 0,
+ min_sigma_x2 = DBL_MAX,
+ max_sigma_x2 = DBL_MIN;
+ AVRational depth;
+ int c;
+
+ for (c = 0; c < s->nb_channels; c++) {
+ ChannelStats *p = &s->chstats[c];
+
+ if (p->nb_samples < s->tc_samples)
+ p->min_sigma_x2 = p->max_sigma_x2 = p->sigma_x2 / p->nb_samples;
+
+ min = FFMIN(min, p->min);
+ max = FFMAX(max, p->max);
+ nmin = FFMIN(nmin, p->nmin);
+ nmax = FFMAX(nmax, p->nmax);
+ min_diff = FFMIN(min_diff, p->min_diff);
+ max_diff = FFMAX(max_diff, p->max_diff);
+ diff1_sum += p->diff1_sum,
+ min_sigma_x2 = FFMIN(min_sigma_x2, p->min_sigma_x2);
+ max_sigma_x2 = FFMAX(max_sigma_x2, p->max_sigma_x2);
+ sigma_x += p->sigma_x;
+ sigma_x2 += p->sigma_x2;
+ min_count += p->min_count;
+ max_count += p->max_count;
+ min_runs += p->min_runs;
+ max_runs += p->max_runs;
+ mask |= p->mask;
+ imask &= p->imask;
+ nb_samples += p->nb_samples;
+ if (fabs(p->sigma_x) > fabs(max_sigma_x))
+ max_sigma_x = p->sigma_x;
+
+ set_meta(metadata, c + 1, "DC_offset", "%f", p->sigma_x / p->nb_samples);
+ set_meta(metadata, c + 1, "Min_level", "%f", p->min);
+ set_meta(metadata, c + 1, "Max_level", "%f", p->max);
+ set_meta(metadata, c + 1, "Min_difference", "%f", p->min_diff);
+ set_meta(metadata, c + 1, "Max_difference", "%f", p->max_diff);
+ set_meta(metadata, c + 1, "Mean_difference", "%f", p->diff1_sum / (p->nb_samples - 1));
+ set_meta(metadata, c + 1, "Peak_level", "%f", LINEAR_TO_DB(FFMAX(-p->nmin, p->nmax)));
+ set_meta(metadata, c + 1, "RMS_level", "%f", LINEAR_TO_DB(sqrt(p->sigma_x2 / p->nb_samples)));
+ set_meta(metadata, c + 1, "RMS_peak", "%f", LINEAR_TO_DB(sqrt(p->max_sigma_x2)));
+ set_meta(metadata, c + 1, "RMS_trough", "%f", LINEAR_TO_DB(sqrt(p->min_sigma_x2)));
+ set_meta(metadata, c + 1, "Crest_factor", "%f", p->sigma_x2 ? FFMAX(-p->min, p->max) / sqrt(p->sigma_x2 / p->nb_samples) : 1);
+ set_meta(metadata, c + 1, "Flat_factor", "%f", LINEAR_TO_DB((p->min_runs + p->max_runs) / (p->min_count + p->max_count)));
+ set_meta(metadata, c + 1, "Peak_count", "%f", (float)(p->min_count + p->max_count));
+ bit_depth(s, p->mask, p->imask, &depth);
+ set_meta(metadata, c + 1, "Bit_depth", "%f", depth.num);
+ set_meta(metadata, c + 1, "Bit_depth2", "%f", depth.den);
+ }
+
+ set_meta(metadata, 0, "Overall.DC_offset", "%f", max_sigma_x / (nb_samples / s->nb_channels));
+ set_meta(metadata, 0, "Overall.Min_level", "%f", min);
+ set_meta(metadata, 0, "Overall.Max_level", "%f", max);
+ set_meta(metadata, 0, "Overall.Min_difference", "%f", min_diff);
+ set_meta(metadata, 0, "Overall.Max_difference", "%f", max_diff);
+ set_meta(metadata, 0, "Overall.Mean_difference", "%f", diff1_sum / (nb_samples - s->nb_channels));
+ set_meta(metadata, 0, "Overall.Peak_level", "%f", LINEAR_TO_DB(FFMAX(-nmin, nmax)));
+ set_meta(metadata, 0, "Overall.RMS_level", "%f", LINEAR_TO_DB(sqrt(sigma_x2 / nb_samples)));
+ set_meta(metadata, 0, "Overall.RMS_peak", "%f", LINEAR_TO_DB(sqrt(max_sigma_x2)));
+ set_meta(metadata, 0, "Overall.RMS_trough", "%f", LINEAR_TO_DB(sqrt(min_sigma_x2)));
+ set_meta(metadata, 0, "Overall.Flat_factor", "%f", LINEAR_TO_DB((min_runs + max_runs) / (min_count + max_count)));
+ set_meta(metadata, 0, "Overall.Peak_count", "%f", (float)(min_count + max_count) / (double)s->nb_channels);
+ bit_depth(s, mask, imask, &depth);
+ set_meta(metadata, 0, "Overall.Bit_depth", "%f", depth.num);
+ set_meta(metadata, 0, "Overall.Bit_depth2", "%f", depth.den);
+ set_meta(metadata, 0, "Overall.Number_of_samples", "%f", nb_samples / s->nb_channels);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ AudioStatsContext *s = inlink->dst->priv;
+ AVDictionary **metadata = avpriv_frame_get_metadatap(buf);
+ const int channels = s->nb_channels;
+ int i, c;
+
+ if (s->reset_count > 0) {
+ if (s->nb_frames >= s->reset_count) {
+ reset_stats(s);
+ s->nb_frames = 0;
+ }
+ s->nb_frames++;
+ }
+
+ switch (inlink->format) {
+ case AV_SAMPLE_FMT_DBLP:
+ for (c = 0; c < channels; c++) {
+ ChannelStats *p = &s->chstats[c];
+ const double *src = (const double *)buf->extended_data[c];
+
+ for (i = 0; i < buf->nb_samples; i++, src++)
+ update_stat(s, p, *src, *src, llrint(*src * (UINT64_C(1) << 63)));
+ }
+ break;
+ case AV_SAMPLE_FMT_DBL: {
+ const double *src = (const double *)buf->extended_data[0];
+
+ for (i = 0; i < buf->nb_samples; i++) {
+ for (c = 0; c < channels; c++, src++)
+ update_stat(s, &s->chstats[c], *src, *src, llrint(*src * (UINT64_C(1) << 63)));
+ }}
+ break;
+ case AV_SAMPLE_FMT_FLTP:
+ for (c = 0; c < channels; c++) {
+ ChannelStats *p = &s->chstats[c];
+ const float *src = (const float *)buf->extended_data[c];
+
+ for (i = 0; i < buf->nb_samples; i++, src++)
+ update_stat(s, p, *src, *src, llrint(*src * (UINT64_C(1) << 31)));
+ }
+ break;
+ case AV_SAMPLE_FMT_FLT: {
+ const float *src = (const float *)buf->extended_data[0];
+
+ for (i = 0; i < buf->nb_samples; i++) {
+ for (c = 0; c < channels; c++, src++)
+ update_stat(s, &s->chstats[c], *src, *src, llrint(*src * (UINT64_C(1) << 31)));
+ }}
+ break;
+ case AV_SAMPLE_FMT_S64P:
+ for (c = 0; c < channels; c++) {
+ ChannelStats *p = &s->chstats[c];
+ const int64_t *src = (const int64_t *)buf->extended_data[c];
+
+ for (i = 0; i < buf->nb_samples; i++, src++)
+ update_stat(s, p, *src, *src / (double)INT64_MAX, *src);
+ }
+ break;
+ case AV_SAMPLE_FMT_S64: {
+ const int64_t *src = (const int64_t *)buf->extended_data[0];
+
+ for (i = 0; i < buf->nb_samples; i++) {
+ for (c = 0; c < channels; c++, src++)
+ update_stat(s, &s->chstats[c], *src, *src / (double)INT64_MAX, *src);
+ }}
+ break;
+ case AV_SAMPLE_FMT_S32P:
+ for (c = 0; c < channels; c++) {
+ ChannelStats *p = &s->chstats[c];
+ const int32_t *src = (const int32_t *)buf->extended_data[c];
+
+ for (i = 0; i < buf->nb_samples; i++, src++)
+ update_stat(s, p, *src, *src / (double)INT32_MAX, *src);
+ }
+ break;
+ case AV_SAMPLE_FMT_S32: {
+ const int32_t *src = (const int32_t *)buf->extended_data[0];
+
+ for (i = 0; i < buf->nb_samples; i++) {
+ for (c = 0; c < channels; c++, src++)
+ update_stat(s, &s->chstats[c], *src, *src / (double)INT32_MAX, *src);
+ }}
+ break;
+ case AV_SAMPLE_FMT_S16P:
+ for (c = 0; c < channels; c++) {
+ ChannelStats *p = &s->chstats[c];
+ const int16_t *src = (const int16_t *)buf->extended_data[c];
+
+ for (i = 0; i < buf->nb_samples; i++, src++)
+ update_stat(s, p, *src, *src / (double)INT16_MAX, *src);
+ }
+ break;
+ case AV_SAMPLE_FMT_S16: {
+ const int16_t *src = (const int16_t *)buf->extended_data[0];
+
+ for (i = 0; i < buf->nb_samples; i++) {
+ for (c = 0; c < channels; c++, src++)
+ update_stat(s, &s->chstats[c], *src, *src / (double)INT16_MAX, *src);
+ }}
+ break;
+ }
+
+ if (s->metadata)
+ set_metadata(s, metadata);
+
+ return ff_filter_frame(inlink->dst->outputs[0], buf);
+}
+
+static void print_stats(AVFilterContext *ctx)
+{
+ AudioStatsContext *s = ctx->priv;
+ uint64_t mask = 0, imask = 0xFFFFFFFFFFFFFFFF, min_count = 0, max_count = 0, nb_samples = 0;
+ double min_runs = 0, max_runs = 0,
+ min = DBL_MAX, max = DBL_MIN, min_diff = DBL_MAX, max_diff = 0,
+ nmin = DBL_MAX, nmax = DBL_MIN,
+ max_sigma_x = 0,
+ diff1_sum = 0,
+ sigma_x = 0,
+ sigma_x2 = 0,
+ min_sigma_x2 = DBL_MAX,
+ max_sigma_x2 = DBL_MIN;
+ AVRational depth;
+ int c;
+
+ for (c = 0; c < s->nb_channels; c++) {
+ ChannelStats *p = &s->chstats[c];
+
+ if (p->nb_samples < s->tc_samples)
+ p->min_sigma_x2 = p->max_sigma_x2 = p->sigma_x2 / p->nb_samples;
+
+ min = FFMIN(min, p->min);
+ max = FFMAX(max, p->max);
+ nmin = FFMIN(nmin, p->nmin);
+ nmax = FFMAX(nmax, p->nmax);
+ min_diff = FFMIN(min_diff, p->min_diff);
+ max_diff = FFMAX(max_diff, p->max_diff);
+ diff1_sum += p->diff1_sum,
+ min_sigma_x2 = FFMIN(min_sigma_x2, p->min_sigma_x2);
+ max_sigma_x2 = FFMAX(max_sigma_x2, p->max_sigma_x2);
+ sigma_x += p->sigma_x;
+ sigma_x2 += p->sigma_x2;
+ min_count += p->min_count;
+ max_count += p->max_count;
+ min_runs += p->min_runs;
+ max_runs += p->max_runs;
+ mask |= p->mask;
+ imask &= p->imask;
+ nb_samples += p->nb_samples;
+ if (fabs(p->sigma_x) > fabs(max_sigma_x))
+ max_sigma_x = p->sigma_x;
+
+ av_log(ctx, AV_LOG_INFO, "Channel: %d\n", c + 1);
+ av_log(ctx, AV_LOG_INFO, "DC offset: %f\n", p->sigma_x / p->nb_samples);
+ av_log(ctx, AV_LOG_INFO, "Min level: %f\n", p->min);
+ av_log(ctx, AV_LOG_INFO, "Max level: %f\n", p->max);
+ av_log(ctx, AV_LOG_INFO, "Min difference: %f\n", p->min_diff);
+ av_log(ctx, AV_LOG_INFO, "Max difference: %f\n", p->max_diff);
+ av_log(ctx, AV_LOG_INFO, "Mean difference: %f\n", p->diff1_sum / (p->nb_samples - 1));
+ av_log(ctx, AV_LOG_INFO, "Peak level dB: %f\n", LINEAR_TO_DB(FFMAX(-p->nmin, p->nmax)));
+ av_log(ctx, AV_LOG_INFO, "RMS level dB: %f\n", LINEAR_TO_DB(sqrt(p->sigma_x2 / p->nb_samples)));
+ av_log(ctx, AV_LOG_INFO, "RMS peak dB: %f\n", LINEAR_TO_DB(sqrt(p->max_sigma_x2)));
+ if (p->min_sigma_x2 != 1)
+ av_log(ctx, AV_LOG_INFO, "RMS trough dB: %f\n",LINEAR_TO_DB(sqrt(p->min_sigma_x2)));
+ av_log(ctx, AV_LOG_INFO, "Crest factor: %f\n", p->sigma_x2 ? FFMAX(-p->nmin, p->nmax) / sqrt(p->sigma_x2 / p->nb_samples) : 1);
+ av_log(ctx, AV_LOG_INFO, "Flat factor: %f\n", LINEAR_TO_DB((p->min_runs + p->max_runs) / (p->min_count + p->max_count)));
+ av_log(ctx, AV_LOG_INFO, "Peak count: %"PRId64"\n", p->min_count + p->max_count);
+ bit_depth(s, p->mask, p->imask, &depth);
+ av_log(ctx, AV_LOG_INFO, "Bit depth: %u/%u\n", depth.num, depth.den);
+ }
+
+ av_log(ctx, AV_LOG_INFO, "Overall\n");
+ av_log(ctx, AV_LOG_INFO, "DC offset: %f\n", max_sigma_x / (nb_samples / s->nb_channels));
+ av_log(ctx, AV_LOG_INFO, "Min level: %f\n", min);
+ av_log(ctx, AV_LOG_INFO, "Max level: %f\n", max);
+ av_log(ctx, AV_LOG_INFO, "Min difference: %f\n", min_diff);
+ av_log(ctx, AV_LOG_INFO, "Max difference: %f\n", max_diff);
+ av_log(ctx, AV_LOG_INFO, "Mean difference: %f\n", diff1_sum / (nb_samples - s->nb_channels));
+ av_log(ctx, AV_LOG_INFO, "Peak level dB: %f\n", LINEAR_TO_DB(FFMAX(-nmin, nmax)));
+ av_log(ctx, AV_LOG_INFO, "RMS level dB: %f\n", LINEAR_TO_DB(sqrt(sigma_x2 / nb_samples)));
+ av_log(ctx, AV_LOG_INFO, "RMS peak dB: %f\n", LINEAR_TO_DB(sqrt(max_sigma_x2)));
+ if (min_sigma_x2 != 1)
+ av_log(ctx, AV_LOG_INFO, "RMS trough dB: %f\n", LINEAR_TO_DB(sqrt(min_sigma_x2)));
+ av_log(ctx, AV_LOG_INFO, "Flat factor: %f\n", LINEAR_TO_DB((min_runs + max_runs) / (min_count + max_count)));
+ av_log(ctx, AV_LOG_INFO, "Peak count: %f\n", (min_count + max_count) / (double)s->nb_channels);
+ bit_depth(s, mask, imask, &depth);
+ av_log(ctx, AV_LOG_INFO, "Bit depth: %u/%u\n", depth.num, depth.den);
+ av_log(ctx, AV_LOG_INFO, "Number of samples: %"PRId64"\n", nb_samples / s->nb_channels);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioStatsContext *s = ctx->priv;
+
+ if (s->nb_channels)
+ print_stats(ctx);
+ av_freep(&s->chstats);
+}
+
+static const AVFilterPad astats_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad astats_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_astats = {
+ .name = "astats",
+ .description = NULL_IF_CONFIG_SMALL("Show time domain statistics about audio frames."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(AudioStatsContext),
+ .priv_class = &astats_class,
+ .uninit = uninit,
+ .inputs = astats_inputs,
+ .outputs = astats_outputs,
+};
diff --git a/libavfilter/af_asyncts.c b/libavfilter/af_asyncts.c
index e662c842ac..a33e0dd67e 100644
--- a/libavfilter/af_asyncts.c
+++ b/libavfilter/af_asyncts.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -51,21 +51,17 @@ typedef struct ASyncContext {
#define OFFSET(x) offsetof(ASyncContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
-static const AVOption options[] = {
- { "compensate", "Stretch/squeeze the data to make it match the timestamps", OFFSET(resample), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, A },
+#define F AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption asyncts_options[] = {
+ { "compensate", "Stretch/squeeze the data to make it match the timestamps", OFFSET(resample), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, A|F },
{ "min_delta", "Minimum difference between timestamps and audio data "
- "(in seconds) to trigger padding/trimmin the data.", OFFSET(min_delta_sec), AV_OPT_TYPE_FLOAT, { .dbl = 0.1 }, 0, INT_MAX, A },
- { "max_comp", "Maximum compensation in samples per second.", OFFSET(max_comp), AV_OPT_TYPE_INT, { .i64 = 500 }, 0, INT_MAX, A },
- { "first_pts", "Assume the first pts should be this value.", OFFSET(first_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, A },
- { NULL },
+ "(in seconds) to trigger padding/trimmin the data.", OFFSET(min_delta_sec), AV_OPT_TYPE_FLOAT, { .dbl = 0.1 }, 0, INT_MAX, A|F },
+ { "max_comp", "Maximum compensation in samples per second.", OFFSET(max_comp), AV_OPT_TYPE_INT, { .i64 = 500 }, 0, INT_MAX, A|F },
+ { "first_pts", "Assume the first pts should be this value.", OFFSET(first_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, A|F },
+ { NULL }
};
-static const AVClass async_class = {
- .class_name = "asyncts filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(asyncts);
static av_cold int init(AVFilterContext *ctx)
{
@@ -143,8 +139,7 @@ static int request_frame(AVFilterLink *link)
int nb_samples;
s->got_output = 0;
- while (ret >= 0 && !s->got_output)
- ret = ff_request_frame(ctx->inputs[0]);
+ ret = ff_request_frame(ctx->inputs[0]);
/* flush the fifo */
if (ret == AVERROR_EOF) {
@@ -209,7 +204,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
delta = pts - s->pts - get_delay(s);
out_size = avresample_available(s->avr);
- if (labs(delta) > s->min_delta ||
+ if (llabs(delta) > s->min_delta ||
(s->first_frame && delta && s->first_pts != AV_NOPTS_VALUE)) {
av_log(ctx, AV_LOG_VERBOSE, "Discontinuity - %"PRId64" samples.\n", delta);
out_size = av_clipl_int32((int64_t)out_size + delta);
@@ -298,9 +293,9 @@ fail:
static const AVFilterPad avfilter_af_asyncts_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame
},
{ NULL }
};
@@ -317,14 +312,12 @@ static const AVFilterPad avfilter_af_asyncts_outputs[] = {
AVFilter ff_af_asyncts = {
.name = "asyncts",
- .description = NULL_IF_CONFIG_SMALL("Sync audio data to timestamps"),
-
+ .description = NULL_IF_CONFIG_SMALL("Sync audio data to timestamps."),
.init = init,
.uninit = uninit,
-
.priv_size = sizeof(ASyncContext),
- .priv_class = &async_class,
-
+ .priv_class = &asyncts_class,
+ .query_formats = ff_query_formats_all_layouts,
.inputs = avfilter_af_asyncts_inputs,
.outputs = avfilter_af_asyncts_outputs,
};
diff --git a/libavfilter/af_atempo.c b/libavfilter/af_atempo.c
new file mode 100644
index 0000000000..eb626564cd
--- /dev/null
+++ b/libavfilter/af_atempo.c
@@ -0,0 +1,1202 @@
+/*
+ * Copyright (c) 2012 Pavel Koshevoy <pkoshevoy at gmail dot com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * tempo scaling audio filter -- an implementation of WSOLA algorithm
+ *
+ * Based on MIT licensed yaeAudioTempoFilter.h and yaeAudioFragment.h
+ * from Apprentice Video player by Pavel Koshevoy.
+ * https://sourceforge.net/projects/apprenticevideo/
+ *
+ * An explanation of SOLA algorithm is available at
+ * http://www.surina.net/article/time-and-pitch-scaling.html
+ *
+ * WSOLA is very similar to SOLA, only one major difference exists between
+ * these algorithms. SOLA shifts audio fragments along the output stream,
+ * where as WSOLA shifts audio fragments along the input stream.
+ *
+ * The advantage of WSOLA algorithm is that the overlap region size is
+ * always the same, therefore the blending function is constant and
+ * can be precomputed.
+ */
+
+#include <float.h>
+#include "libavcodec/avfft.h"
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/eval.h"
+#include "libavutil/opt.h"
+#include "libavutil/samplefmt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+
+/**
+ * A fragment of audio waveform
+ */
+typedef struct {
+ // index of the first sample of this fragment in the overall waveform;
+ // 0: input sample position
+ // 1: output sample position
+ int64_t position[2];
+
+ // original packed multi-channel samples:
+ uint8_t *data;
+
+ // number of samples in this fragment:
+ int nsamples;
+
+ // rDFT transform of the down-mixed mono fragment, used for
+ // fast waveform alignment via correlation in frequency domain:
+ FFTSample *xdat;
+} AudioFragment;
+
+/**
+ * Filter state machine states
+ */
+typedef enum {
+ YAE_LOAD_FRAGMENT,
+ YAE_ADJUST_POSITION,
+ YAE_RELOAD_FRAGMENT,
+ YAE_OUTPUT_OVERLAP_ADD,
+ YAE_FLUSH_OUTPUT,
+} FilterState;
+
+/**
+ * Filter state machine
+ */
+typedef struct {
+ const AVClass *class;
+
+ // ring-buffer of input samples, necessary because some times
+ // input fragment position may be adjusted backwards:
+ uint8_t *buffer;
+
+ // ring-buffer maximum capacity, expressed in sample rate time base:
+ int ring;
+
+ // ring-buffer house keeping:
+ int size;
+ int head;
+ int tail;
+
+ // 0: input sample position corresponding to the ring buffer tail
+ // 1: output sample position
+ int64_t position[2];
+
+ // sample format:
+ enum AVSampleFormat format;
+
+ // number of channels:
+ int channels;
+
+ // row of bytes to skip from one sample to next, across multple channels;
+ // stride = (number-of-channels * bits-per-sample-per-channel) / 8
+ int stride;
+
+ // fragment window size, power-of-two integer:
+ int window;
+
+ // Hann window coefficients, for feathering
+ // (blending) the overlapping fragment region:
+ float *hann;
+
+ // tempo scaling factor:
+ double tempo;
+
+ // a snapshot of previous fragment input and output position values
+ // captured when the tempo scale factor was set most recently:
+ int64_t origin[2];
+
+ // current/previous fragment ring-buffer:
+ AudioFragment frag[2];
+
+ // current fragment index:
+ uint64_t nfrag;
+
+ // current state:
+ FilterState state;
+
+ // for fast correlation calculation in frequency domain:
+ RDFTContext *real_to_complex;
+ RDFTContext *complex_to_real;
+ FFTSample *correlation;
+
+ // for managing AVFilterPad.request_frame and AVFilterPad.filter_frame
+ AVFrame *dst_buffer;
+ uint8_t *dst;
+ uint8_t *dst_end;
+ uint64_t nsamples_in;
+ uint64_t nsamples_out;
+} ATempoContext;
+
+#define OFFSET(x) offsetof(ATempoContext, x)
+
+static const AVOption atempo_options[] = {
+ { "tempo", "set tempo scale factor",
+ OFFSET(tempo), AV_OPT_TYPE_DOUBLE, { .dbl = 1.0 }, 0.5, 2.0,
+ AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(atempo);
+
+inline static AudioFragment *yae_curr_frag(ATempoContext *atempo)
+{
+ return &atempo->frag[atempo->nfrag % 2];
+}
+
+inline static AudioFragment *yae_prev_frag(ATempoContext *atempo)
+{
+ return &atempo->frag[(atempo->nfrag + 1) % 2];
+}
+
+/**
+ * Reset filter to initial state, do not deallocate existing local buffers.
+ */
+static void yae_clear(ATempoContext *atempo)
+{
+ atempo->size = 0;
+ atempo->head = 0;
+ atempo->tail = 0;
+
+ atempo->nfrag = 0;
+ atempo->state = YAE_LOAD_FRAGMENT;
+
+ atempo->position[0] = 0;
+ atempo->position[1] = 0;
+
+ atempo->origin[0] = 0;
+ atempo->origin[1] = 0;
+
+ atempo->frag[0].position[0] = 0;
+ atempo->frag[0].position[1] = 0;
+ atempo->frag[0].nsamples = 0;
+
+ atempo->frag[1].position[0] = 0;
+ atempo->frag[1].position[1] = 0;
+ atempo->frag[1].nsamples = 0;
+
+ // shift left position of 1st fragment by half a window
+ // so that no re-normalization would be required for
+ // the left half of the 1st fragment:
+ atempo->frag[0].position[0] = -(int64_t)(atempo->window / 2);
+ atempo->frag[0].position[1] = -(int64_t)(atempo->window / 2);
+
+ av_frame_free(&atempo->dst_buffer);
+ atempo->dst = NULL;
+ atempo->dst_end = NULL;
+
+ atempo->nsamples_in = 0;
+ atempo->nsamples_out = 0;
+}
+
+/**
+ * Reset filter to initial state and deallocate all buffers.
+ */
+static void yae_release_buffers(ATempoContext *atempo)
+{
+ yae_clear(atempo);
+
+ av_freep(&atempo->frag[0].data);
+ av_freep(&atempo->frag[1].data);
+ av_freep(&atempo->frag[0].xdat);
+ av_freep(&atempo->frag[1].xdat);
+
+ av_freep(&atempo->buffer);
+ av_freep(&atempo->hann);
+ av_freep(&atempo->correlation);
+
+ av_rdft_end(atempo->real_to_complex);
+ atempo->real_to_complex = NULL;
+
+ av_rdft_end(atempo->complex_to_real);
+ atempo->complex_to_real = NULL;
+}
+
+/* av_realloc is not aligned enough; fortunately, the data does not need to
+ * be preserved */
+#define RE_MALLOC_OR_FAIL(field, field_size) \
+ do { \
+ av_freep(&field); \
+ field = av_malloc(field_size); \
+ if (!field) { \
+ yae_release_buffers(atempo); \
+ return AVERROR(ENOMEM); \
+ } \
+ } while (0)
+
+/**
+ * Prepare filter for processing audio data of given format,
+ * sample rate and number of channels.
+ */
+static int yae_reset(ATempoContext *atempo,
+ enum AVSampleFormat format,
+ int sample_rate,
+ int channels)
+{
+ const int sample_size = av_get_bytes_per_sample(format);
+ uint32_t nlevels = 0;
+ uint32_t pot;
+ int i;
+
+ atempo->format = format;
+ atempo->channels = channels;
+ atempo->stride = sample_size * channels;
+
+ // pick a segment window size:
+ atempo->window = sample_rate / 24;
+
+ // adjust window size to be a power-of-two integer:
+ nlevels = av_log2(atempo->window);
+ pot = 1 << nlevels;
+ av_assert0(pot <= atempo->window);
+
+ if (pot < atempo->window) {
+ atempo->window = pot * 2;
+ nlevels++;
+ }
+
+ // initialize audio fragment buffers:
+ RE_MALLOC_OR_FAIL(atempo->frag[0].data, atempo->window * atempo->stride);
+ RE_MALLOC_OR_FAIL(atempo->frag[1].data, atempo->window * atempo->stride);
+ RE_MALLOC_OR_FAIL(atempo->frag[0].xdat, atempo->window * sizeof(FFTComplex));
+ RE_MALLOC_OR_FAIL(atempo->frag[1].xdat, atempo->window * sizeof(FFTComplex));
+
+ // initialize rDFT contexts:
+ av_rdft_end(atempo->real_to_complex);
+ atempo->real_to_complex = NULL;
+
+ av_rdft_end(atempo->complex_to_real);
+ atempo->complex_to_real = NULL;
+
+ atempo->real_to_complex = av_rdft_init(nlevels + 1, DFT_R2C);
+ if (!atempo->real_to_complex) {
+ yae_release_buffers(atempo);
+ return AVERROR(ENOMEM);
+ }
+
+ atempo->complex_to_real = av_rdft_init(nlevels + 1, IDFT_C2R);
+ if (!atempo->complex_to_real) {
+ yae_release_buffers(atempo);
+ return AVERROR(ENOMEM);
+ }
+
+ RE_MALLOC_OR_FAIL(atempo->correlation, atempo->window * sizeof(FFTComplex));
+
+ atempo->ring = atempo->window * 3;
+ RE_MALLOC_OR_FAIL(atempo->buffer, atempo->ring * atempo->stride);
+
+ // initialize the Hann window function:
+ RE_MALLOC_OR_FAIL(atempo->hann, atempo->window * sizeof(float));
+
+ for (i = 0; i < atempo->window; i++) {
+ double t = (double)i / (double)(atempo->window - 1);
+ double h = 0.5 * (1.0 - cos(2.0 * M_PI * t));
+ atempo->hann[i] = (float)h;
+ }
+
+ yae_clear(atempo);
+ return 0;
+}
+
+static int yae_set_tempo(AVFilterContext *ctx, const char *arg_tempo)
+{
+ const AudioFragment *prev;
+ ATempoContext *atempo = ctx->priv;
+ char *tail = NULL;
+ double tempo = av_strtod(arg_tempo, &tail);
+
+ if (tail && *tail) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid tempo value '%s'\n", arg_tempo);
+ return AVERROR(EINVAL);
+ }
+
+ if (tempo < 0.5 || tempo > 2.0) {
+ av_log(ctx, AV_LOG_ERROR, "Tempo value %f exceeds [0.5, 2.0] range\n",
+ tempo);
+ return AVERROR(EINVAL);
+ }
+
+ prev = yae_prev_frag(atempo);
+ atempo->origin[0] = prev->position[0] + atempo->window / 2;
+ atempo->origin[1] = prev->position[1] + atempo->window / 2;
+ atempo->tempo = tempo;
+ return 0;
+}
+
+/**
+ * A helper macro for initializing complex data buffer with scalar data
+ * of a given type.
+ */
+#define yae_init_xdat(scalar_type, scalar_max) \
+ do { \
+ const uint8_t *src_end = src + \
+ frag->nsamples * atempo->channels * sizeof(scalar_type); \
+ \
+ FFTSample *xdat = frag->xdat; \
+ scalar_type tmp; \
+ \
+ if (atempo->channels == 1) { \
+ for (; src < src_end; xdat++) { \
+ tmp = *(const scalar_type *)src; \
+ src += sizeof(scalar_type); \
+ \
+ *xdat = (FFTSample)tmp; \
+ } \
+ } else { \
+ FFTSample s, max, ti, si; \
+ int i; \
+ \
+ for (; src < src_end; xdat++) { \
+ tmp = *(const scalar_type *)src; \
+ src += sizeof(scalar_type); \
+ \
+ max = (FFTSample)tmp; \
+ s = FFMIN((FFTSample)scalar_max, \
+ (FFTSample)fabsf(max)); \
+ \
+ for (i = 1; i < atempo->channels; i++) { \
+ tmp = *(const scalar_type *)src; \
+ src += sizeof(scalar_type); \
+ \
+ ti = (FFTSample)tmp; \
+ si = FFMIN((FFTSample)scalar_max, \
+ (FFTSample)fabsf(ti)); \
+ \
+ if (s < si) { \
+ s = si; \
+ max = ti; \
+ } \
+ } \
+ \
+ *xdat = max; \
+ } \
+ } \
+ } while (0)
+
+/**
+ * Initialize complex data buffer of a given audio fragment
+ * with down-mixed mono data of appropriate scalar type.
+ */
+static void yae_downmix(ATempoContext *atempo, AudioFragment *frag)
+{
+ // shortcuts:
+ const uint8_t *src = frag->data;
+
+ // init complex data buffer used for FFT and Correlation:
+ memset(frag->xdat, 0, sizeof(FFTComplex) * atempo->window);
+
+ if (atempo->format == AV_SAMPLE_FMT_U8) {
+ yae_init_xdat(uint8_t, 127);
+ } else if (atempo->format == AV_SAMPLE_FMT_S16) {
+ yae_init_xdat(int16_t, 32767);
+ } else if (atempo->format == AV_SAMPLE_FMT_S32) {
+ yae_init_xdat(int, 2147483647);
+ } else if (atempo->format == AV_SAMPLE_FMT_FLT) {
+ yae_init_xdat(float, 1);
+ } else if (atempo->format == AV_SAMPLE_FMT_DBL) {
+ yae_init_xdat(double, 1);
+ }
+}
+
+/**
+ * Populate the internal data buffer on as-needed basis.
+ *
+ * @return
+ * 0 if requested data was already available or was successfully loaded,
+ * AVERROR(EAGAIN) if more input data is required.
+ */
+static int yae_load_data(ATempoContext *atempo,
+ const uint8_t **src_ref,
+ const uint8_t *src_end,
+ int64_t stop_here)
+{
+ // shortcut:
+ const uint8_t *src = *src_ref;
+ const int read_size = stop_here - atempo->position[0];
+
+ if (stop_here <= atempo->position[0]) {
+ return 0;
+ }
+
+ // samples are not expected to be skipped:
+ av_assert0(read_size <= atempo->ring);
+
+ while (atempo->position[0] < stop_here && src < src_end) {
+ int src_samples = (src_end - src) / atempo->stride;
+
+ // load data piece-wise, in order to avoid complicating the logic:
+ int nsamples = FFMIN(read_size, src_samples);
+ int na;
+ int nb;
+
+ nsamples = FFMIN(nsamples, atempo->ring);
+ na = FFMIN(nsamples, atempo->ring - atempo->tail);
+ nb = FFMIN(nsamples - na, atempo->ring);
+
+ if (na) {
+ uint8_t *a = atempo->buffer + atempo->tail * atempo->stride;
+ memcpy(a, src, na * atempo->stride);
+
+ src += na * atempo->stride;
+ atempo->position[0] += na;
+
+ atempo->size = FFMIN(atempo->size + na, atempo->ring);
+ atempo->tail = (atempo->tail + na) % atempo->ring;
+ atempo->head =
+ atempo->size < atempo->ring ?
+ atempo->tail - atempo->size :
+ atempo->tail;
+ }
+
+ if (nb) {
+ uint8_t *b = atempo->buffer;
+ memcpy(b, src, nb * atempo->stride);
+
+ src += nb * atempo->stride;
+ atempo->position[0] += nb;
+
+ atempo->size = FFMIN(atempo->size + nb, atempo->ring);
+ atempo->tail = (atempo->tail + nb) % atempo->ring;
+ atempo->head =
+ atempo->size < atempo->ring ?
+ atempo->tail - atempo->size :
+ atempo->tail;
+ }
+ }
+
+ // pass back the updated source buffer pointer:
+ *src_ref = src;
+
+ // sanity check:
+ av_assert0(atempo->position[0] <= stop_here);
+
+ return atempo->position[0] == stop_here ? 0 : AVERROR(EAGAIN);
+}
+
+/**
+ * Populate current audio fragment data buffer.
+ *
+ * @return
+ * 0 when the fragment is ready,
+ * AVERROR(EAGAIN) if more input data is required.
+ */
+static int yae_load_frag(ATempoContext *atempo,
+ const uint8_t **src_ref,
+ const uint8_t *src_end)
+{
+ // shortcuts:
+ AudioFragment *frag = yae_curr_frag(atempo);
+ uint8_t *dst;
+ int64_t missing, start, zeros;
+ uint32_t nsamples;
+ const uint8_t *a, *b;
+ int i0, i1, n0, n1, na, nb;
+
+ int64_t stop_here = frag->position[0] + atempo->window;
+ if (src_ref && yae_load_data(atempo, src_ref, src_end, stop_here) != 0) {
+ return AVERROR(EAGAIN);
+ }
+
+ // calculate the number of samples we don't have:
+ missing =
+ stop_here > atempo->position[0] ?
+ stop_here - atempo->position[0] : 0;
+
+ nsamples =
+ missing < (int64_t)atempo->window ?
+ (uint32_t)(atempo->window - missing) : 0;
+
+ // setup the output buffer:
+ frag->nsamples = nsamples;
+ dst = frag->data;
+
+ start = atempo->position[0] - atempo->size;
+ zeros = 0;
+
+ if (frag->position[0] < start) {
+ // what we don't have we substitute with zeros:
+ zeros = FFMIN(start - frag->position[0], (int64_t)nsamples);
+ av_assert0(zeros != nsamples);
+
+ memset(dst, 0, zeros * atempo->stride);
+ dst += zeros * atempo->stride;
+ }
+
+ if (zeros == nsamples) {
+ return 0;
+ }
+
+ // get the remaining data from the ring buffer:
+ na = (atempo->head < atempo->tail ?
+ atempo->tail - atempo->head :
+ atempo->ring - atempo->head);
+
+ nb = atempo->head < atempo->tail ? 0 : atempo->tail;
+
+ // sanity check:
+ av_assert0(nsamples <= zeros + na + nb);
+
+ a = atempo->buffer + atempo->head * atempo->stride;
+ b = atempo->buffer;
+
+ i0 = frag->position[0] + zeros - start;
+ i1 = i0 < na ? 0 : i0 - na;
+
+ n0 = i0 < na ? FFMIN(na - i0, (int)(nsamples - zeros)) : 0;
+ n1 = nsamples - zeros - n0;
+
+ if (n0) {
+ memcpy(dst, a + i0 * atempo->stride, n0 * atempo->stride);
+ dst += n0 * atempo->stride;
+ }
+
+ if (n1) {
+ memcpy(dst, b + i1 * atempo->stride, n1 * atempo->stride);
+ }
+
+ return 0;
+}
+
+/**
+ * Prepare for loading next audio fragment.
+ */
+static void yae_advance_to_next_frag(ATempoContext *atempo)
+{
+ const double fragment_step = atempo->tempo * (double)(atempo->window / 2);
+
+ const AudioFragment *prev;
+ AudioFragment *frag;
+
+ atempo->nfrag++;
+ prev = yae_prev_frag(atempo);
+ frag = yae_curr_frag(atempo);
+
+ frag->position[0] = prev->position[0] + (int64_t)fragment_step;
+ frag->position[1] = prev->position[1] + atempo->window / 2;
+ frag->nsamples = 0;
+}
+
+/**
+ * Calculate cross-correlation via rDFT.
+ *
+ * Multiply two vectors of complex numbers (result of real_to_complex rDFT)
+ * and transform back via complex_to_real rDFT.
+ */
+static void yae_xcorr_via_rdft(FFTSample *xcorr,
+ RDFTContext *complex_to_real,
+ const FFTComplex *xa,
+ const FFTComplex *xb,
+ const int window)
+{
+ FFTComplex *xc = (FFTComplex *)xcorr;
+ int i;
+
+ // NOTE: first element requires special care -- Given Y = rDFT(X),
+ // Im(Y[0]) and Im(Y[N/2]) are always zero, therefore av_rdft_calc
+ // stores Re(Y[N/2]) in place of Im(Y[0]).
+
+ xc->re = xa->re * xb->re;
+ xc->im = xa->im * xb->im;
+ xa++;
+ xb++;
+ xc++;
+
+ for (i = 1; i < window; i++, xa++, xb++, xc++) {
+ xc->re = (xa->re * xb->re + xa->im * xb->im);
+ xc->im = (xa->im * xb->re - xa->re * xb->im);
+ }
+
+ // apply inverse rDFT:
+ av_rdft_calc(complex_to_real, xcorr);
+}
+
+/**
+ * Calculate alignment offset for given fragment
+ * relative to the previous fragment.
+ *
+ * @return alignment offset of current fragment relative to previous.
+ */
+static int yae_align(AudioFragment *frag,
+ const AudioFragment *prev,
+ const int window,
+ const int delta_max,
+ const int drift,
+ FFTSample *correlation,
+ RDFTContext *complex_to_real)
+{
+ int best_offset = -drift;
+ FFTSample best_metric = -FLT_MAX;
+ FFTSample *xcorr;
+
+ int i0;
+ int i1;
+ int i;
+
+ yae_xcorr_via_rdft(correlation,
+ complex_to_real,
+ (const FFTComplex *)prev->xdat,
+ (const FFTComplex *)frag->xdat,
+ window);
+
+ // identify search window boundaries:
+ i0 = FFMAX(window / 2 - delta_max - drift, 0);
+ i0 = FFMIN(i0, window);
+
+ i1 = FFMIN(window / 2 + delta_max - drift, window - window / 16);
+ i1 = FFMAX(i1, 0);
+
+ // identify cross-correlation peaks within search window:
+ xcorr = correlation + i0;
+
+ for (i = i0; i < i1; i++, xcorr++) {
+ FFTSample metric = *xcorr;
+
+ // normalize:
+ FFTSample drifti = (FFTSample)(drift + i);
+ metric *= drifti * (FFTSample)(i - i0) * (FFTSample)(i1 - i);
+
+ if (metric > best_metric) {
+ best_metric = metric;
+ best_offset = i - window / 2;
+ }
+ }
+
+ return best_offset;
+}
+
+/**
+ * Adjust current fragment position for better alignment
+ * with previous fragment.
+ *
+ * @return alignment correction.
+ */
+static int yae_adjust_position(ATempoContext *atempo)
+{
+ const AudioFragment *prev = yae_prev_frag(atempo);
+ AudioFragment *frag = yae_curr_frag(atempo);
+
+ const double prev_output_position =
+ (double)(prev->position[1] - atempo->origin[1] + atempo->window / 2) *
+ atempo->tempo;
+
+ const double ideal_output_position =
+ (double)(prev->position[0] - atempo->origin[0] + atempo->window / 2);
+
+ const int drift = (int)(prev_output_position - ideal_output_position);
+
+ const int delta_max = atempo->window / 2;
+ const int correction = yae_align(frag,
+ prev,
+ atempo->window,
+ delta_max,
+ drift,
+ atempo->correlation,
+ atempo->complex_to_real);
+
+ if (correction) {
+ // adjust fragment position:
+ frag->position[0] -= correction;
+
+ // clear so that the fragment can be reloaded:
+ frag->nsamples = 0;
+ }
+
+ return correction;
+}
+
+/**
+ * A helper macro for blending the overlap region of previous
+ * and current audio fragment.
+ */
+#define yae_blend(scalar_type) \
+ do { \
+ const scalar_type *aaa = (const scalar_type *)a; \
+ const scalar_type *bbb = (const scalar_type *)b; \
+ \
+ scalar_type *out = (scalar_type *)dst; \
+ scalar_type *out_end = (scalar_type *)dst_end; \
+ int64_t i; \
+ \
+ for (i = 0; i < overlap && out < out_end; \
+ i++, atempo->position[1]++, wa++, wb++) { \
+ float w0 = *wa; \
+ float w1 = *wb; \
+ int j; \
+ \
+ for (j = 0; j < atempo->channels; \
+ j++, aaa++, bbb++, out++) { \
+ float t0 = (float)*aaa; \
+ float t1 = (float)*bbb; \
+ \
+ *out = \
+ frag->position[0] + i < 0 ? \
+ *aaa : \
+ (scalar_type)(t0 * w0 + t1 * w1); \
+ } \
+ } \
+ dst = (uint8_t *)out; \
+ } while (0)
+
+/**
+ * Blend the overlap region of previous and current audio fragment
+ * and output the results to the given destination buffer.
+ *
+ * @return
+ * 0 if the overlap region was completely stored in the dst buffer,
+ * AVERROR(EAGAIN) if more destination buffer space is required.
+ */
+static int yae_overlap_add(ATempoContext *atempo,
+ uint8_t **dst_ref,
+ uint8_t *dst_end)
+{
+ // shortcuts:
+ const AudioFragment *prev = yae_prev_frag(atempo);
+ const AudioFragment *frag = yae_curr_frag(atempo);
+
+ const int64_t start_here = FFMAX(atempo->position[1],
+ frag->position[1]);
+
+ const int64_t stop_here = FFMIN(prev->position[1] + prev->nsamples,
+ frag->position[1] + frag->nsamples);
+
+ const int64_t overlap = stop_here - start_here;
+
+ const int64_t ia = start_here - prev->position[1];
+ const int64_t ib = start_here - frag->position[1];
+
+ const float *wa = atempo->hann + ia;
+ const float *wb = atempo->hann + ib;
+
+ const uint8_t *a = prev->data + ia * atempo->stride;
+ const uint8_t *b = frag->data + ib * atempo->stride;
+
+ uint8_t *dst = *dst_ref;
+
+ av_assert0(start_here <= stop_here &&
+ frag->position[1] <= start_here &&
+ overlap <= frag->nsamples);
+
+ if (atempo->format == AV_SAMPLE_FMT_U8) {
+ yae_blend(uint8_t);
+ } else if (atempo->format == AV_SAMPLE_FMT_S16) {
+ yae_blend(int16_t);
+ } else if (atempo->format == AV_SAMPLE_FMT_S32) {
+ yae_blend(int);
+ } else if (atempo->format == AV_SAMPLE_FMT_FLT) {
+ yae_blend(float);
+ } else if (atempo->format == AV_SAMPLE_FMT_DBL) {
+ yae_blend(double);
+ }
+
+ // pass-back the updated destination buffer pointer:
+ *dst_ref = dst;
+
+ return atempo->position[1] == stop_here ? 0 : AVERROR(EAGAIN);
+}
+
+/**
+ * Feed as much data to the filter as it is able to consume
+ * and receive as much processed data in the destination buffer
+ * as it is able to produce or store.
+ */
+static void
+yae_apply(ATempoContext *atempo,
+ const uint8_t **src_ref,
+ const uint8_t *src_end,
+ uint8_t **dst_ref,
+ uint8_t *dst_end)
+{
+ while (1) {
+ if (atempo->state == YAE_LOAD_FRAGMENT) {
+ // load additional data for the current fragment:
+ if (yae_load_frag(atempo, src_ref, src_end) != 0) {
+ break;
+ }
+
+ // down-mix to mono:
+ yae_downmix(atempo, yae_curr_frag(atempo));
+
+ // apply rDFT:
+ av_rdft_calc(atempo->real_to_complex, yae_curr_frag(atempo)->xdat);
+
+ // must load the second fragment before alignment can start:
+ if (!atempo->nfrag) {
+ yae_advance_to_next_frag(atempo);
+ continue;
+ }
+
+ atempo->state = YAE_ADJUST_POSITION;
+ }
+
+ if (atempo->state == YAE_ADJUST_POSITION) {
+ // adjust position for better alignment:
+ if (yae_adjust_position(atempo)) {
+ // reload the fragment at the corrected position, so that the
+ // Hann window blending would not require normalization:
+ atempo->state = YAE_RELOAD_FRAGMENT;
+ } else {
+ atempo->state = YAE_OUTPUT_OVERLAP_ADD;
+ }
+ }
+
+ if (atempo->state == YAE_RELOAD_FRAGMENT) {
+ // load additional data if necessary due to position adjustment:
+ if (yae_load_frag(atempo, src_ref, src_end) != 0) {
+ break;
+ }
+
+ // down-mix to mono:
+ yae_downmix(atempo, yae_curr_frag(atempo));
+
+ // apply rDFT:
+ av_rdft_calc(atempo->real_to_complex, yae_curr_frag(atempo)->xdat);
+
+ atempo->state = YAE_OUTPUT_OVERLAP_ADD;
+ }
+
+ if (atempo->state == YAE_OUTPUT_OVERLAP_ADD) {
+ // overlap-add and output the result:
+ if (yae_overlap_add(atempo, dst_ref, dst_end) != 0) {
+ break;
+ }
+
+ // advance to the next fragment, repeat:
+ yae_advance_to_next_frag(atempo);
+ atempo->state = YAE_LOAD_FRAGMENT;
+ }
+ }
+}
+
+/**
+ * Flush any buffered data from the filter.
+ *
+ * @return
+ * 0 if all data was completely stored in the dst buffer,
+ * AVERROR(EAGAIN) if more destination buffer space is required.
+ */
+static int yae_flush(ATempoContext *atempo,
+ uint8_t **dst_ref,
+ uint8_t *dst_end)
+{
+ AudioFragment *frag = yae_curr_frag(atempo);
+ int64_t overlap_end;
+ int64_t start_here;
+ int64_t stop_here;
+ int64_t offset;
+
+ const uint8_t *src;
+ uint8_t *dst;
+
+ int src_size;
+ int dst_size;
+ int nbytes;
+
+ atempo->state = YAE_FLUSH_OUTPUT;
+
+ if (atempo->position[0] >= frag->position[0] + frag->nsamples &&
+ atempo->position[1] >= frag->position[1] + frag->nsamples) {
+ // the current fragment is already flushed:
+ return 0;
+ }
+
+ if (frag->position[0] + frag->nsamples < atempo->position[0]) {
+ // finish loading the current (possibly partial) fragment:
+ yae_load_frag(atempo, NULL, NULL);
+
+ if (atempo->nfrag) {
+ // down-mix to mono:
+ yae_downmix(atempo, frag);
+
+ // apply rDFT:
+ av_rdft_calc(atempo->real_to_complex, frag->xdat);
+
+ // align current fragment to previous fragment:
+ if (yae_adjust_position(atempo)) {
+ // reload the current fragment due to adjusted position:
+ yae_load_frag(atempo, NULL, NULL);
+ }
+ }
+ }
+
+ // flush the overlap region:
+ overlap_end = frag->position[1] + FFMIN(atempo->window / 2,
+ frag->nsamples);
+
+ while (atempo->position[1] < overlap_end) {
+ if (yae_overlap_add(atempo, dst_ref, dst_end) != 0) {
+ return AVERROR(EAGAIN);
+ }
+ }
+
+ // check whether all of the input samples have been consumed:
+ if (frag->position[0] + frag->nsamples < atempo->position[0]) {
+ yae_advance_to_next_frag(atempo);
+ return AVERROR(EAGAIN);
+ }
+
+ // flush the remainder of the current fragment:
+ start_here = FFMAX(atempo->position[1], overlap_end);
+ stop_here = frag->position[1] + frag->nsamples;
+ offset = start_here - frag->position[1];
+ av_assert0(start_here <= stop_here && frag->position[1] <= start_here);
+
+ src = frag->data + offset * atempo->stride;
+ dst = (uint8_t *)*dst_ref;
+
+ src_size = (int)(stop_here - start_here) * atempo->stride;
+ dst_size = dst_end - dst;
+ nbytes = FFMIN(src_size, dst_size);
+
+ memcpy(dst, src, nbytes);
+ dst += nbytes;
+
+ atempo->position[1] += (nbytes / atempo->stride);
+
+ // pass-back the updated destination buffer pointer:
+ *dst_ref = (uint8_t *)dst;
+
+ return atempo->position[1] == stop_here ? 0 : AVERROR(EAGAIN);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ATempoContext *atempo = ctx->priv;
+ atempo->format = AV_SAMPLE_FMT_NONE;
+ atempo->state = YAE_LOAD_FRAGMENT;
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ATempoContext *atempo = ctx->priv;
+ yae_release_buffers(atempo);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterChannelLayouts *layouts = NULL;
+ AVFilterFormats *formats = NULL;
+
+ // WSOLA necessitates an internal sliding window ring buffer
+ // for incoming audio stream.
+ //
+ // Planar sample formats are too cumbersome to store in a ring buffer,
+ // therefore planar sample formats are not supported.
+ //
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_U8,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_S32,
+ AV_SAMPLE_FMT_FLT,
+ AV_SAMPLE_FMT_DBL,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts) {
+ return AVERROR(ENOMEM);
+ }
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats) {
+ return AVERROR(ENOMEM);
+ }
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats) {
+ return AVERROR(ENOMEM);
+ }
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ATempoContext *atempo = ctx->priv;
+
+ enum AVSampleFormat format = inlink->format;
+ int sample_rate = (int)inlink->sample_rate;
+
+ return yae_reset(atempo, format, sample_rate, inlink->channels);
+}
+
+static int push_samples(ATempoContext *atempo,
+ AVFilterLink *outlink,
+ int n_out)
+{
+ int ret;
+
+ atempo->dst_buffer->sample_rate = outlink->sample_rate;
+ atempo->dst_buffer->nb_samples = n_out;
+
+ // adjust the PTS:
+ atempo->dst_buffer->pts =
+ av_rescale_q(atempo->nsamples_out,
+ (AVRational){ 1, outlink->sample_rate },
+ outlink->time_base);
+
+ ret = ff_filter_frame(outlink, atempo->dst_buffer);
+ atempo->dst_buffer = NULL;
+ atempo->dst = NULL;
+ atempo->dst_end = NULL;
+ if (ret < 0)
+ return ret;
+
+ atempo->nsamples_out += n_out;
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *src_buffer)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ATempoContext *atempo = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ int ret = 0;
+ int n_in = src_buffer->nb_samples;
+ int n_out = (int)(0.5 + ((double)n_in) / atempo->tempo);
+
+ const uint8_t *src = src_buffer->data[0];
+ const uint8_t *src_end = src + n_in * atempo->stride;
+
+ while (src < src_end) {
+ if (!atempo->dst_buffer) {
+ atempo->dst_buffer = ff_get_audio_buffer(outlink, n_out);
+ if (!atempo->dst_buffer)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(atempo->dst_buffer, src_buffer);
+
+ atempo->dst = atempo->dst_buffer->data[0];
+ atempo->dst_end = atempo->dst + n_out * atempo->stride;
+ }
+
+ yae_apply(atempo, &src, src_end, &atempo->dst, atempo->dst_end);
+
+ if (atempo->dst == atempo->dst_end) {
+ int n_samples = ((atempo->dst - atempo->dst_buffer->data[0]) /
+ atempo->stride);
+ ret = push_samples(atempo, outlink, n_samples);
+ if (ret < 0)
+ goto end;
+ }
+ }
+
+ atempo->nsamples_in += n_in;
+end:
+ av_frame_free(&src_buffer);
+ return ret;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ATempoContext *atempo = ctx->priv;
+ int ret;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF) {
+ // flush the filter:
+ int n_max = atempo->ring;
+ int n_out;
+ int err = AVERROR(EAGAIN);
+
+ while (err == AVERROR(EAGAIN)) {
+ if (!atempo->dst_buffer) {
+ atempo->dst_buffer = ff_get_audio_buffer(outlink, n_max);
+ if (!atempo->dst_buffer)
+ return AVERROR(ENOMEM);
+
+ atempo->dst = atempo->dst_buffer->data[0];
+ atempo->dst_end = atempo->dst + n_max * atempo->stride;
+ }
+
+ err = yae_flush(atempo, &atempo->dst, atempo->dst_end);
+
+ n_out = ((atempo->dst - atempo->dst_buffer->data[0]) /
+ atempo->stride);
+
+ if (n_out) {
+ ret = push_samples(atempo, outlink, n_out);
+ }
+ }
+
+ av_frame_free(&atempo->dst_buffer);
+ atempo->dst = NULL;
+ atempo->dst_end = NULL;
+
+ return AVERROR_EOF;
+ }
+
+ return ret;
+}
+
+static int process_command(AVFilterContext *ctx,
+ const char *cmd,
+ const char *arg,
+ char *res,
+ int res_len,
+ int flags)
+{
+ return !strcmp(cmd, "tempo") ? yae_set_tempo(ctx, arg) : AVERROR(ENOSYS);
+}
+
+static const AVFilterPad atempo_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad atempo_outputs[] = {
+ {
+ .name = "default",
+ .request_frame = request_frame,
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_atempo = {
+ .name = "atempo",
+ .description = NULL_IF_CONFIG_SMALL("Adjust audio tempo."),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .process_command = process_command,
+ .priv_size = sizeof(ATempoContext),
+ .priv_class = &atempo_class,
+ .inputs = atempo_inputs,
+ .outputs = atempo_outputs,
+};
diff --git a/libavfilter/af_biquads.c b/libavfilter/af_biquads.c
new file mode 100644
index 0000000000..79f1b7cf4c
--- /dev/null
+++ b/libavfilter/af_biquads.c
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ * Copyright (c) 2006-2008 Rob Sykes <robs@users.sourceforge.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * 2-pole filters designed by Robert Bristow-Johnson <rbj@audioimagination.com>
+ * see http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt
+ *
+ * 1-pole filters based on code (c) 2000 Chris Bagwell <cbagwell@sprynet.com>
+ * Algorithms: Recursive single pole low/high pass filter
+ * Reference: The Scientist and Engineer's Guide to Digital Signal Processing
+ *
+ * low-pass: output[N] = input[N] * A + output[N-1] * B
+ * X = exp(-2.0 * pi * Fc)
+ * A = 1 - X
+ * B = X
+ * Fc = cutoff freq / sample rate
+ *
+ * Mimics an RC low-pass filter:
+ *
+ * ---/\/\/\/\----------->
+ * |
+ * --- C
+ * ---
+ * |
+ * |
+ * V
+ *
+ * high-pass: output[N] = A0 * input[N] + A1 * input[N-1] + B1 * output[N-1]
+ * X = exp(-2.0 * pi * Fc)
+ * A0 = (1 + X) / 2
+ * A1 = -(1 + X) / 2
+ * B1 = X
+ * Fc = cutoff freq / sample rate
+ *
+ * Mimics an RC high-pass filter:
+ *
+ * || C
+ * ----||--------->
+ * || |
+ * <
+ * > R
+ * <
+ * |
+ * V
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+
+enum FilterType {
+ biquad,
+ equalizer,
+ bass,
+ treble,
+ band,
+ bandpass,
+ bandreject,
+ allpass,
+ highpass,
+ lowpass,
+};
+
+enum WidthType {
+ NONE,
+ HERTZ,
+ OCTAVE,
+ QFACTOR,
+ SLOPE,
+};
+
+typedef struct ChanCache {
+ double i1, i2;
+ double o1, o2;
+} ChanCache;
+
+typedef struct BiquadsContext {
+ const AVClass *class;
+
+ enum FilterType filter_type;
+ int width_type;
+ int poles;
+ int csg;
+
+ double gain;
+ double frequency;
+ double width;
+
+ double a0, a1, a2;
+ double b0, b1, b2;
+
+ ChanCache *cache;
+ int clippings;
+
+ void (*filter)(struct BiquadsContext *s, const void *ibuf, void *obuf, int len,
+ double *i1, double *i2, double *o1, double *o2,
+ double b0, double b1, double b2, double a1, double a2);
+} BiquadsContext;
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ BiquadsContext *s = ctx->priv;
+
+ if (s->filter_type != biquad) {
+ if (s->frequency <= 0 || s->width <= 0) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid frequency %f and/or width %f <= 0\n",
+ s->frequency, s->width);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+#define BIQUAD_FILTER(name, type, min, max, need_clipping) \
+static void biquad_## name (BiquadsContext *s, \
+ const void *input, void *output, int len, \
+ double *in1, double *in2, \
+ double *out1, double *out2, \
+ double b0, double b1, double b2, \
+ double a1, double a2) \
+{ \
+ const type *ibuf = input; \
+ type *obuf = output; \
+ double i1 = *in1; \
+ double i2 = *in2; \
+ double o1 = *out1; \
+ double o2 = *out2; \
+ int i; \
+ a1 = -a1; \
+ a2 = -a2; \
+ \
+ for (i = 0; i+1 < len; i++) { \
+ o2 = i2 * b2 + i1 * b1 + ibuf[i] * b0 + o2 * a2 + o1 * a1; \
+ i2 = ibuf[i]; \
+ if (need_clipping && o2 < min) { \
+ s->clippings++; \
+ obuf[i] = min; \
+ } else if (need_clipping && o2 > max) { \
+ s->clippings++; \
+ obuf[i] = max; \
+ } else { \
+ obuf[i] = o2; \
+ } \
+ i++; \
+ o1 = i1 * b2 + i2 * b1 + ibuf[i] * b0 + o1 * a2 + o2 * a1; \
+ i1 = ibuf[i]; \
+ if (need_clipping && o1 < min) { \
+ s->clippings++; \
+ obuf[i] = min; \
+ } else if (need_clipping && o1 > max) { \
+ s->clippings++; \
+ obuf[i] = max; \
+ } else { \
+ obuf[i] = o1; \
+ } \
+ } \
+ if (i < len) { \
+ double o0 = ibuf[i] * b0 + i1 * b1 + i2 * b2 + o1 * a1 + o2 * a2; \
+ i2 = i1; \
+ i1 = ibuf[i]; \
+ o2 = o1; \
+ o1 = o0; \
+ if (need_clipping && o0 < min) { \
+ s->clippings++; \
+ obuf[i] = min; \
+ } else if (need_clipping && o0 > max) { \
+ s->clippings++; \
+ obuf[i] = max; \
+ } else { \
+ obuf[i] = o0; \
+ } \
+ } \
+ *in1 = i1; \
+ *in2 = i2; \
+ *out1 = o1; \
+ *out2 = o2; \
+}
+
+BIQUAD_FILTER(s16, int16_t, INT16_MIN, INT16_MAX, 1)
+BIQUAD_FILTER(s32, int32_t, INT32_MIN, INT32_MAX, 1)
+BIQUAD_FILTER(flt, float, -1., 1., 0)
+BIQUAD_FILTER(dbl, double, -1., 1., 0)
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ BiquadsContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ double A = exp(s->gain / 40 * log(10.));
+ double w0 = 2 * M_PI * s->frequency / inlink->sample_rate;
+ double alpha;
+
+ if (w0 > M_PI) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Invalid frequency %f. Frequency must be less than half the sample-rate %d.\n",
+ s->frequency, inlink->sample_rate);
+ return AVERROR(EINVAL);
+ }
+
+ switch (s->width_type) {
+ case NONE:
+ alpha = 0.0;
+ break;
+ case HERTZ:
+ alpha = sin(w0) / (2 * s->frequency / s->width);
+ break;
+ case OCTAVE:
+ alpha = sin(w0) * sinh(log(2.) / 2 * s->width * w0 / sin(w0));
+ break;
+ case QFACTOR:
+ alpha = sin(w0) / (2 * s->width);
+ break;
+ case SLOPE:
+ alpha = sin(w0) / 2 * sqrt((A + 1 / A) * (1 / s->width - 1) + 2);
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ switch (s->filter_type) {
+ case biquad:
+ break;
+ case equalizer:
+ s->a0 = 1 + alpha / A;
+ s->a1 = -2 * cos(w0);
+ s->a2 = 1 - alpha / A;
+ s->b0 = 1 + alpha * A;
+ s->b1 = -2 * cos(w0);
+ s->b2 = 1 - alpha * A;
+ break;
+ case bass:
+ s->a0 = (A + 1) + (A - 1) * cos(w0) + 2 * sqrt(A) * alpha;
+ s->a1 = -2 * ((A - 1) + (A + 1) * cos(w0));
+ s->a2 = (A + 1) + (A - 1) * cos(w0) - 2 * sqrt(A) * alpha;
+ s->b0 = A * ((A + 1) - (A - 1) * cos(w0) + 2 * sqrt(A) * alpha);
+ s->b1 = 2 * A * ((A - 1) - (A + 1) * cos(w0));
+ s->b2 = A * ((A + 1) - (A - 1) * cos(w0) - 2 * sqrt(A) * alpha);
+ break;
+ case treble:
+ s->a0 = (A + 1) - (A - 1) * cos(w0) + 2 * sqrt(A) * alpha;
+ s->a1 = 2 * ((A - 1) - (A + 1) * cos(w0));
+ s->a2 = (A + 1) - (A - 1) * cos(w0) - 2 * sqrt(A) * alpha;
+ s->b0 = A * ((A + 1) + (A - 1) * cos(w0) + 2 * sqrt(A) * alpha);
+ s->b1 =-2 * A * ((A - 1) + (A + 1) * cos(w0));
+ s->b2 = A * ((A + 1) + (A - 1) * cos(w0) - 2 * sqrt(A) * alpha);
+ break;
+ case bandpass:
+ if (s->csg) {
+ s->a0 = 1 + alpha;
+ s->a1 = -2 * cos(w0);
+ s->a2 = 1 - alpha;
+ s->b0 = sin(w0) / 2;
+ s->b1 = 0;
+ s->b2 = -sin(w0) / 2;
+ } else {
+ s->a0 = 1 + alpha;
+ s->a1 = -2 * cos(w0);
+ s->a2 = 1 - alpha;
+ s->b0 = alpha;
+ s->b1 = 0;
+ s->b2 = -alpha;
+ }
+ break;
+ case bandreject:
+ s->a0 = 1 + alpha;
+ s->a1 = -2 * cos(w0);
+ s->a2 = 1 - alpha;
+ s->b0 = 1;
+ s->b1 = -2 * cos(w0);
+ s->b2 = 1;
+ break;
+ case lowpass:
+ if (s->poles == 1) {
+ s->a0 = 1;
+ s->a1 = -exp(-w0);
+ s->a2 = 0;
+ s->b0 = 1 + s->a1;
+ s->b1 = 0;
+ s->b2 = 0;
+ } else {
+ s->a0 = 1 + alpha;
+ s->a1 = -2 * cos(w0);
+ s->a2 = 1 - alpha;
+ s->b0 = (1 - cos(w0)) / 2;
+ s->b1 = 1 - cos(w0);
+ s->b2 = (1 - cos(w0)) / 2;
+ }
+ break;
+ case highpass:
+ if (s->poles == 1) {
+ s->a0 = 1;
+ s->a1 = -exp(-w0);
+ s->a2 = 0;
+ s->b0 = (1 - s->a1) / 2;
+ s->b1 = -s->b0;
+ s->b2 = 0;
+ } else {
+ s->a0 = 1 + alpha;
+ s->a1 = -2 * cos(w0);
+ s->a2 = 1 - alpha;
+ s->b0 = (1 + cos(w0)) / 2;
+ s->b1 = -(1 + cos(w0));
+ s->b2 = (1 + cos(w0)) / 2;
+ }
+ break;
+ case allpass:
+ s->a0 = 1 + alpha;
+ s->a1 = -2 * cos(w0);
+ s->a2 = 1 - alpha;
+ s->b0 = 1 - alpha;
+ s->b1 = -2 * cos(w0);
+ s->b2 = 1 + alpha;
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ s->a1 /= s->a0;
+ s->a2 /= s->a0;
+ s->b0 /= s->a0;
+ s->b1 /= s->a0;
+ s->b2 /= s->a0;
+
+ s->cache = av_realloc_f(s->cache, sizeof(ChanCache), inlink->channels);
+ if (!s->cache)
+ return AVERROR(ENOMEM);
+ memset(s->cache, 0, sizeof(ChanCache) * inlink->channels);
+
+ switch (inlink->format) {
+ case AV_SAMPLE_FMT_S16P: s->filter = biquad_s16; break;
+ case AV_SAMPLE_FMT_S32P: s->filter = biquad_s32; break;
+ case AV_SAMPLE_FMT_FLTP: s->filter = biquad_flt; break;
+ case AV_SAMPLE_FMT_DBLP: s->filter = biquad_dbl; break;
+ default: av_assert0(0);
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ AVFilterContext *ctx = inlink->dst;
+ BiquadsContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out_buf;
+ int nb_samples = buf->nb_samples;
+ int ch;
+
+ if (av_frame_is_writable(buf)) {
+ out_buf = buf;
+ } else {
+ out_buf = ff_get_audio_buffer(inlink, nb_samples);
+ if (!out_buf) {
+ av_frame_free(&buf);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out_buf, buf);
+ }
+
+ for (ch = 0; ch < av_frame_get_channels(buf); ch++)
+ s->filter(s, buf->extended_data[ch],
+ out_buf->extended_data[ch], nb_samples,
+ &s->cache[ch].i1, &s->cache[ch].i2,
+ &s->cache[ch].o1, &s->cache[ch].o2,
+ s->b0, s->b1, s->b2, s->a1, s->a2);
+
+ if (s->clippings > 0)
+ av_log(ctx, AV_LOG_WARNING, "clipping %d times. Please reduce gain.\n", s->clippings);
+ s->clippings = 0;
+
+ if (buf != out_buf)
+ av_frame_free(&buf);
+
+ return ff_filter_frame(outlink, out_buf);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ BiquadsContext *s = ctx->priv;
+
+ av_freep(&s->cache);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+#define OFFSET(x) offsetof(BiquadsContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+#define DEFINE_BIQUAD_FILTER(name_, description_) \
+AVFILTER_DEFINE_CLASS(name_); \
+static av_cold int name_##_init(AVFilterContext *ctx) \
+{ \
+ BiquadsContext *s = ctx->priv; \
+ s->class = &name_##_class; \
+ s->filter_type = name_; \
+ return init(ctx); \
+} \
+ \
+AVFilter ff_af_##name_ = { \
+ .name = #name_, \
+ .description = NULL_IF_CONFIG_SMALL(description_), \
+ .priv_size = sizeof(BiquadsContext), \
+ .init = name_##_init, \
+ .uninit = uninit, \
+ .query_formats = query_formats, \
+ .inputs = inputs, \
+ .outputs = outputs, \
+ .priv_class = &name_##_class, \
+}
+
+#if CONFIG_EQUALIZER_FILTER
+static const AVOption equalizer_options[] = {
+ {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 999999, FLAGS},
+ {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 999999, FLAGS},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
+ {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
+ {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
+ {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
+ {"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 999, FLAGS},
+ {"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 999, FLAGS},
+ {"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
+ {"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(equalizer, "Apply two-pole peaking equalization (EQ) filter.");
+#endif /* CONFIG_EQUALIZER_FILTER */
+#if CONFIG_BASS_FILTER
+static const AVOption bass_options[] = {
+ {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 0, 999999, FLAGS},
+ {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 0, 999999, FLAGS},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
+ {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
+ {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
+ {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
+ {"width", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
+ {"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
+ {"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
+ {"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(bass, "Boost or cut lower frequencies.");
+#endif /* CONFIG_BASS_FILTER */
+#if CONFIG_TREBLE_FILTER
+static const AVOption treble_options[] = {
+ {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
+ {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
+ {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
+ {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
+ {"width", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
+ {"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
+ {"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
+ {"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(treble, "Boost or cut upper frequencies.");
+#endif /* CONFIG_TREBLE_FILTER */
+#if CONFIG_BANDPASS_FILTER
+static const AVOption bandpass_options[] = {
+ {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
+ {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
+ {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
+ {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
+ {"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
+ {"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
+ {"csg", "use constant skirt gain", OFFSET(csg), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(bandpass, "Apply a two-pole Butterworth band-pass filter.");
+#endif /* CONFIG_BANDPASS_FILTER */
+#if CONFIG_BANDREJECT_FILTER
+static const AVOption bandreject_options[] = {
+ {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
+ {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
+ {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
+ {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
+ {"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
+ {"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(bandreject, "Apply a two-pole Butterworth band-reject filter.");
+#endif /* CONFIG_BANDREJECT_FILTER */
+#if CONFIG_LOWPASS_FILTER
+static const AVOption lowpass_options[] = {
+ {"frequency", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=500}, 0, 999999, FLAGS},
+ {"f", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=500}, 0, 999999, FLAGS},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
+ {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
+ {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
+ {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
+ {"width", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
+ {"w", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
+ {"poles", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
+ {"p", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(lowpass, "Apply a low-pass filter with 3dB point frequency.");
+#endif /* CONFIG_LOWPASS_FILTER */
+#if CONFIG_HIGHPASS_FILTER
+static const AVOption highpass_options[] = {
+ {"frequency", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"f", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
+ {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
+ {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
+ {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
+ {"width", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
+ {"w", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
+ {"poles", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
+ {"p", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(highpass, "Apply a high-pass filter with 3dB point frequency.");
+#endif /* CONFIG_HIGHPASS_FILTER */
+#if CONFIG_ALLPASS_FILTER
+static const AVOption allpass_options[] = {
+ {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=HERTZ}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
+ {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
+ {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
+ {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
+ {"width", "set filter-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=707.1}, 0, 99999, FLAGS},
+ {"w", "set filter-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=707.1}, 0, 99999, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(allpass, "Apply a two-pole all-pass filter.");
+#endif /* CONFIG_ALLPASS_FILTER */
+#if CONFIG_BIQUAD_FILTER
+static const AVOption biquad_options[] = {
+ {"a0", NULL, OFFSET(a0), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {"a1", NULL, OFFSET(a1), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {"a2", NULL, OFFSET(a2), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {"b0", NULL, OFFSET(b0), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {"b1", NULL, OFFSET(b1), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {"b2", NULL, OFFSET(b2), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(biquad, "Apply a biquad IIR filter with the given coefficients.");
+#endif /* CONFIG_BIQUAD_FILTER */
diff --git a/libavfilter/af_bs2b.c b/libavfilter/af_bs2b.c
index 25e786761f..54d52c5c6e 100644
--- a/libavfilter/af_bs2b.c
+++ b/libavfilter/af_bs2b.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -47,7 +47,7 @@ typedef struct Bs2bContext {
#define OFFSET(x) offsetof(Bs2bContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
-static const AVOption options[] = {
+static const AVOption bs2b_options[] = {
{ "profile", "Apply a pre-defined crossfeed level",
OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = BS2B_DEFAULT_CLEVEL }, 0, INT_MAX, A, "profile" },
{ "default", "default profile", 0, AV_OPT_TYPE_CONST, { .i64 = BS2B_DEFAULT_CLEVEL }, 0, 0, A, "profile" },
@@ -60,12 +60,7 @@ static const AVOption options[] = {
{ NULL },
};
-static const AVClass bs2b_class = {
- .class_name = "bs2b filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(bs2b);
static av_cold int init(AVFilterContext *ctx)
{
@@ -106,22 +101,25 @@ static int query_formats(AVFilterContext *ctx)
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_NONE,
};
+ int ret;
if (ff_add_channel_layout(&layouts, AV_CH_LAYOUT_STEREO) != 0)
return AVERROR(ENOMEM);
- ff_set_common_channel_layouts(ctx, layouts);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
- ff_set_common_formats(ctx, formats);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
- ff_set_common_samplerates(ctx, formats);
-
- return 0;
+ return ff_set_common_samplerates(ctx, formats);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
@@ -168,16 +166,16 @@ static int config_output(AVFilterLink *outlink)
bs2b->filter = bs2b_cross_feed_u8;
break;
case AV_SAMPLE_FMT_S16:
- bs2b->filter = bs2b_cross_feed_s16;
+ bs2b->filter = (void*)bs2b_cross_feed_s16;
break;
case AV_SAMPLE_FMT_S32:
- bs2b->filter = bs2b_cross_feed_s32;
+ bs2b->filter = (void*)bs2b_cross_feed_s32;
break;
case AV_SAMPLE_FMT_FLT:
- bs2b->filter = bs2b_cross_feed_f;
+ bs2b->filter = (void*)bs2b_cross_feed_f;
break;
case AV_SAMPLE_FMT_DBL:
- bs2b->filter = bs2b_cross_feed_d;
+ bs2b->filter = (void*)bs2b_cross_feed_d;
break;
default:
return AVERROR_BUG;
diff --git a/libavfilter/af_channelmap.c b/libavfilter/af_channelmap.c
index 572549808f..cdd8a5885c 100644
--- a/libavfilter/af_channelmap.c
+++ b/libavfilter/af_channelmap.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2012 Google, Inc.
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -67,20 +67,16 @@ typedef struct ChannelMapContext {
#define OFFSET(x) offsetof(ChannelMapContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
-static const AVOption options[] = {
+#define F AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption channelmap_options[] = {
{ "map", "A comma-separated list of input channel numbers in output order.",
- OFFSET(mapping_str), AV_OPT_TYPE_STRING, .flags = A },
+ OFFSET(mapping_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ "channel_layout", "Output channel layout.",
- OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A },
- { NULL },
+ OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A|F },
+ { NULL }
};
-static const AVClass channelmap_class = {
- .class_name = "channel map filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(channelmap);
static char* split(char *message, char delim) {
char *next = strchr(message, delim);
@@ -91,9 +87,12 @@ static char* split(char *message, char delim) {
static int get_channel_idx(char **map, int *ch, char delim, int max_ch)
{
- char *next = split(*map, delim);
+ char *next;
int len;
int n = 0;
+ if (!*map)
+ return AVERROR(EINVAL);
+ next = split(*map, delim);
if (!next && delim == '-')
return AVERROR(EINVAL);
len = strlen(*map);
@@ -288,16 +287,28 @@ static av_cold int channelmap_init(AVFilterContext *ctx)
static int channelmap_query_formats(AVFilterContext *ctx)
{
ChannelMapContext *s = ctx->priv;
+ AVFilterChannelLayouts *layouts;
AVFilterChannelLayouts *channel_layouts = NULL;
+ int ret;
- ff_add_channel_layout(&channel_layouts, s->output_layout);
-
- ff_set_common_formats(ctx, ff_planar_sample_fmts());
- ff_set_common_samplerates(ctx, ff_all_samplerates());
- ff_channel_layouts_ref(ff_all_channel_layouts(), &ctx->inputs[0]->out_channel_layouts);
- ff_channel_layouts_ref(channel_layouts, &ctx->outputs[0]->in_channel_layouts);
+ layouts = ff_all_channel_counts();
+ if (!layouts) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ if ((ret = ff_add_channel_layout (&channel_layouts, s->output_layout )) < 0 ||
+ (ret = ff_set_common_formats (ctx , ff_planar_sample_fmts() )) < 0 ||
+ (ret = ff_set_common_samplerates (ctx , ff_all_samplerates() )) < 0 ||
+ (ret = ff_channel_layouts_ref (layouts , &ctx->inputs[0]->out_channel_layouts)) < 0 ||
+ (ret = ff_channel_layouts_ref (channel_layouts , &ctx->outputs[0]->in_channel_layouts)) < 0)
+ goto fail;
return 0;
+fail:
+ if (layouts)
+ av_freep(&layouts->channel_layouts);
+ av_freep(&layouts);
+ return ret;
}
static int channelmap_filter_frame(AVFilterLink *inlink, AVFrame *buf)
@@ -305,7 +316,7 @@ static int channelmap_filter_frame(AVFilterLink *inlink, AVFrame *buf)
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
const ChannelMapContext *s = ctx->priv;
- const int nch_in = av_get_channel_layout_nb_channels(inlink->channel_layout);
+ const int nch_in = inlink->channels;
const int nch_out = s->nch;
int ch;
uint8_t *source_planes[MAX_CH];
@@ -316,7 +327,7 @@ static int channelmap_filter_frame(AVFilterLink *inlink, AVFrame *buf)
if (nch_out > nch_in) {
if (nch_out > FF_ARRAY_ELEMS(buf->data)) {
uint8_t **new_extended_data =
- av_mallocz(nch_out * sizeof(*buf->extended_data));
+ av_mallocz_array(nch_out, sizeof(*buf->extended_data));
if (!new_extended_data) {
av_frame_free(&buf);
return AVERROR(ENOMEM);
@@ -343,6 +354,7 @@ static int channelmap_filter_frame(AVFilterLink *inlink, AVFrame *buf)
FFMIN(FF_ARRAY_ELEMS(buf->data), nch_out) * sizeof(buf->data[0]));
buf->channel_layout = outlink->channel_layout;
+ av_frame_set_channels(buf, outlink->channels);
return ff_filter_frame(outlink, buf);
}
@@ -351,7 +363,7 @@ static int channelmap_config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
ChannelMapContext *s = ctx->priv;
- int nb_channels = av_get_channel_layout_nb_channels(inlink->channel_layout);
+ int nb_channels = inlink->channels;
int i, err = 0;
const char *channel_name;
char layout_name[256];
@@ -366,7 +378,7 @@ static int channelmap_config_input(AVFilterLink *inlink)
if (m->in_channel_idx < 0 || m->in_channel_idx >= nb_channels) {
av_get_channel_layout_string(layout_name, sizeof(layout_name),
- 0, inlink->channel_layout);
+ nb_channels, inlink->channel_layout);
if (m->in_channel) {
channel_name = av_get_channel_name(m->in_channel);
av_log(ctx, AV_LOG_ERROR,
@@ -389,7 +401,8 @@ static const AVFilterPad avfilter_af_channelmap_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = channelmap_filter_frame,
- .config_props = channelmap_config_input
+ .config_props = channelmap_config_input,
+ .needs_writable = 1,
},
{ NULL }
};
@@ -409,7 +422,6 @@ AVFilter ff_af_channelmap = {
.query_formats = channelmap_query_formats,
.priv_size = sizeof(ChannelMapContext),
.priv_class = &channelmap_class,
-
.inputs = avfilter_af_channelmap_inputs,
.outputs = avfilter_af_channelmap_outputs,
};
diff --git a/libavfilter/af_channelsplit.c b/libavfilter/af_channelsplit.c
index 5b410fd87c..f50414984a 100644
--- a/libavfilter/af_channelsplit.c
+++ b/libavfilter/af_channelsplit.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -42,17 +42,13 @@ typedef struct ChannelSplitContext {
#define OFFSET(x) offsetof(ChannelSplitContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
-static const AVOption options[] = {
- { "channel_layout", "Input channel layout.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, { .str = "stereo" }, .flags = A },
- { NULL },
+#define F AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption channelsplit_options[] = {
+ { "channel_layout", "Input channel layout.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, { .str = "stereo" }, .flags = A|F },
+ { NULL }
};
-static const AVClass channelsplit_class = {
- .class_name = "channelsplit filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(channelsplit);
static av_cold int init(AVFilterContext *ctx)
{
@@ -86,20 +82,23 @@ static int query_formats(AVFilterContext *ctx)
{
ChannelSplitContext *s = ctx->priv;
AVFilterChannelLayouts *in_layouts = NULL;
- int i;
+ int i, ret;
- ff_set_common_formats (ctx, ff_planar_sample_fmts());
- ff_set_common_samplerates(ctx, ff_all_samplerates());
+ if ((ret = ff_set_common_formats(ctx, ff_planar_sample_fmts())) < 0 ||
+ (ret = ff_set_common_samplerates(ctx, ff_all_samplerates())) < 0)
+ return ret;
- ff_add_channel_layout(&in_layouts, s->channel_layout);
- ff_channel_layouts_ref(in_layouts, &ctx->inputs[0]->out_channel_layouts);
+ if ((ret = ff_add_channel_layout(&in_layouts, s->channel_layout)) < 0 ||
+ (ret = ff_channel_layouts_ref(in_layouts, &ctx->inputs[0]->out_channel_layouts)) < 0)
+ return ret;
for (i = 0; i < ctx->nb_outputs; i++) {
AVFilterChannelLayouts *out_layouts = NULL;
uint64_t channel = av_channel_layout_extract_channel(s->channel_layout, i);
- ff_add_channel_layout(&out_layouts, channel);
- ff_channel_layouts_ref(out_layouts, &ctx->outputs[i]->in_channel_layouts);
+ if ((ret = ff_add_channel_layout(&out_layouts, channel)) < 0 ||
+ (ret = ff_channel_layouts_ref(out_layouts, &ctx->outputs[i]->in_channel_layouts)) < 0)
+ return ret;
}
return 0;
@@ -121,6 +120,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i];
buf_out->channel_layout =
av_channel_layout_extract_channel(buf->channel_layout, i);
+ av_frame_set_channels(buf_out, 1);
ret = ff_filter_frame(ctx->outputs[i], buf_out);
if (ret < 0)
@@ -132,24 +132,21 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
static const AVFilterPad avfilter_af_channelsplit_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
AVFilter ff_af_channelsplit = {
.name = "channelsplit",
- .description = NULL_IF_CONFIG_SMALL("Split audio into per-channel streams"),
+ .description = NULL_IF_CONFIG_SMALL("Split audio into per-channel streams."),
.priv_size = sizeof(ChannelSplitContext),
.priv_class = &channelsplit_class,
-
.init = init,
.query_formats = query_formats,
-
- .inputs = avfilter_af_channelsplit_inputs,
- .outputs = NULL,
-
- .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+ .inputs = avfilter_af_channelsplit_inputs,
+ .outputs = NULL,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
diff --git a/libavfilter/af_chorus.c b/libavfilter/af_chorus.c
new file mode 100644
index 0000000000..c596164382
--- /dev/null
+++ b/libavfilter/af_chorus.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright (c) 1998 Juergen Mueller And Sundry Contributors
+ * This source code is freely redistributable and may be used for
+ * any purpose. This copyright notice must be maintained.
+ * Juergen Mueller And Sundry Contributors are not responsible for
+ * the consequences of using this software.
+ *
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * chorus audio filter
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/opt.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "generate_wave_table.h"
+
+typedef struct ChorusContext {
+ const AVClass *class;
+ float in_gain, out_gain;
+ char *delays_str;
+ char *decays_str;
+ char *speeds_str;
+ char *depths_str;
+ float *delays;
+ float *decays;
+ float *speeds;
+ float *depths;
+ uint8_t **chorusbuf;
+ int **phase;
+ int *length;
+ int32_t **lookup_table;
+ int *counter;
+ int num_chorus;
+ int max_samples;
+ int channels;
+ int modulation;
+ int fade_out;
+ int64_t next_pts;
+} ChorusContext;
+
+#define OFFSET(x) offsetof(ChorusContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption chorus_options[] = {
+ { "in_gain", "set input gain", OFFSET(in_gain), AV_OPT_TYPE_FLOAT, {.dbl=.4}, 0, 1, A },
+ { "out_gain", "set output gain", OFFSET(out_gain), AV_OPT_TYPE_FLOAT, {.dbl=.4}, 0, 1, A },
+ { "delays", "set delays", OFFSET(delays_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
+ { "decays", "set decays", OFFSET(decays_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
+ { "speeds", "set speeds", OFFSET(speeds_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
+ { "depths", "set depths", OFFSET(depths_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(chorus);
+
+static void count_items(char *item_str, int *nb_items)
+{
+ char *p;
+
+ *nb_items = 1;
+ for (p = item_str; *p; p++) {
+ if (*p == '|')
+ (*nb_items)++;
+ }
+
+}
+
+static void fill_items(char *item_str, int *nb_items, float *items)
+{
+ char *p, *saveptr = NULL;
+ int i, new_nb_items = 0;
+
+ p = item_str;
+ for (i = 0; i < *nb_items; i++) {
+ char *tstr = av_strtok(p, "|", &saveptr);
+ p = NULL;
+ new_nb_items += sscanf(tstr, "%f", &items[i]) == 1;
+ }
+
+ *nb_items = new_nb_items;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ChorusContext *s = ctx->priv;
+ int nb_delays, nb_decays, nb_speeds, nb_depths;
+
+ if (!s->delays_str || !s->decays_str || !s->speeds_str || !s->depths_str) {
+ av_log(ctx, AV_LOG_ERROR, "Both delays & decays & speeds & depths must be set.\n");
+ return AVERROR(EINVAL);
+ }
+
+ count_items(s->delays_str, &nb_delays);
+ count_items(s->decays_str, &nb_decays);
+ count_items(s->speeds_str, &nb_speeds);
+ count_items(s->depths_str, &nb_depths);
+
+ s->delays = av_realloc_f(s->delays, nb_delays, sizeof(*s->delays));
+ s->decays = av_realloc_f(s->decays, nb_decays, sizeof(*s->decays));
+ s->speeds = av_realloc_f(s->speeds, nb_speeds, sizeof(*s->speeds));
+ s->depths = av_realloc_f(s->depths, nb_depths, sizeof(*s->depths));
+
+ if (!s->delays || !s->decays || !s->speeds || !s->depths)
+ return AVERROR(ENOMEM);
+
+ fill_items(s->delays_str, &nb_delays, s->delays);
+ fill_items(s->decays_str, &nb_decays, s->decays);
+ fill_items(s->speeds_str, &nb_speeds, s->speeds);
+ fill_items(s->depths_str, &nb_depths, s->depths);
+
+ if (nb_delays != nb_decays && nb_delays != nb_speeds && nb_delays != nb_depths) {
+ av_log(ctx, AV_LOG_ERROR, "Number of delays & decays & speeds & depths given must be same.\n");
+ return AVERROR(EINVAL);
+ }
+
+ s->num_chorus = nb_delays;
+
+ if (s->num_chorus < 1) {
+ av_log(ctx, AV_LOG_ERROR, "At least one delay & decay & speed & depth must be set.\n");
+ return AVERROR(EINVAL);
+ }
+
+ s->length = av_calloc(s->num_chorus, sizeof(*s->length));
+ s->lookup_table = av_calloc(s->num_chorus, sizeof(*s->lookup_table));
+
+ if (!s->length || !s->lookup_table)
+ return AVERROR(ENOMEM);
+
+ s->next_pts = AV_NOPTS_VALUE;
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ChorusContext *s = ctx->priv;
+ float sum_in_volume = 1.0;
+ int n;
+
+ s->channels = outlink->channels;
+
+ for (n = 0; n < s->num_chorus; n++) {
+ int samples = (int) ((s->delays[n] + s->depths[n]) * outlink->sample_rate / 1000.0);
+ int depth_samples = (int) (s->depths[n] * outlink->sample_rate / 1000.0);
+
+ s->length[n] = outlink->sample_rate / s->speeds[n];
+
+ s->lookup_table[n] = av_malloc(sizeof(int32_t) * s->length[n]);
+ if (!s->lookup_table[n])
+ return AVERROR(ENOMEM);
+
+ ff_generate_wave_table(WAVE_SIN, AV_SAMPLE_FMT_S32, s->lookup_table[n],
+ s->length[n], 0., depth_samples, 0);
+ s->max_samples = FFMAX(s->max_samples, samples);
+ }
+
+ for (n = 0; n < s->num_chorus; n++)
+ sum_in_volume += s->decays[n];
+
+ if (s->in_gain * (sum_in_volume) > 1.0 / s->out_gain)
+ av_log(ctx, AV_LOG_WARNING, "output gain can cause saturation or clipping of output\n");
+
+ s->counter = av_calloc(outlink->channels, sizeof(*s->counter));
+ if (!s->counter)
+ return AVERROR(ENOMEM);
+
+ s->phase = av_calloc(outlink->channels, sizeof(*s->phase));
+ if (!s->phase)
+ return AVERROR(ENOMEM);
+
+ for (n = 0; n < outlink->channels; n++) {
+ s->phase[n] = av_calloc(s->num_chorus, sizeof(int));
+ if (!s->phase[n])
+ return AVERROR(ENOMEM);
+ }
+
+ s->fade_out = s->max_samples;
+
+ return av_samples_alloc_array_and_samples(&s->chorusbuf, NULL,
+ outlink->channels,
+ s->max_samples,
+ outlink->format, 0);
+}
+
+#define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ChorusContext *s = ctx->priv;
+ AVFrame *out_frame;
+ int c, i, n;
+
+ if (av_frame_is_writable(frame)) {
+ out_frame = frame;
+ } else {
+ out_frame = ff_get_audio_buffer(inlink, frame->nb_samples);
+ if (!out_frame) {
+ av_frame_free(&frame);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out_frame, frame);
+ }
+
+ for (c = 0; c < inlink->channels; c++) {
+ const float *src = (const float *)frame->extended_data[c];
+ float *dst = (float *)out_frame->extended_data[c];
+ float *chorusbuf = (float *)s->chorusbuf[c];
+ int *phase = s->phase[c];
+
+ for (i = 0; i < frame->nb_samples; i++) {
+ float out, in = src[i];
+
+ out = in * s->in_gain;
+
+ for (n = 0; n < s->num_chorus; n++) {
+ out += chorusbuf[MOD(s->max_samples + s->counter[c] -
+ s->lookup_table[n][phase[n]],
+ s->max_samples)] * s->decays[n];
+ phase[n] = MOD(phase[n] + 1, s->length[n]);
+ }
+
+ out *= s->out_gain;
+
+ dst[i] = out;
+
+ chorusbuf[s->counter[c]] = in;
+ s->counter[c] = MOD(s->counter[c] + 1, s->max_samples);
+ }
+ }
+
+ s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
+
+ if (frame != out_frame)
+ av_frame_free(&frame);
+
+ return ff_filter_frame(ctx->outputs[0], out_frame);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ChorusContext *s = ctx->priv;
+ int ret;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF && !ctx->is_disabled && s->fade_out) {
+ int nb_samples = FFMIN(s->fade_out, 2048);
+ AVFrame *frame;
+
+ frame = ff_get_audio_buffer(outlink, nb_samples);
+ if (!frame)
+ return AVERROR(ENOMEM);
+ s->fade_out -= nb_samples;
+
+ av_samples_set_silence(frame->extended_data, 0,
+ frame->nb_samples,
+ outlink->channels,
+ frame->format);
+
+ frame->pts = s->next_pts;
+ if (s->next_pts != AV_NOPTS_VALUE)
+ s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
+
+ ret = filter_frame(ctx->inputs[0], frame);
+ }
+
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ChorusContext *s = ctx->priv;
+ int n;
+
+ av_freep(&s->delays);
+ av_freep(&s->decays);
+ av_freep(&s->speeds);
+ av_freep(&s->depths);
+
+ if (s->chorusbuf)
+ av_freep(&s->chorusbuf[0]);
+ av_freep(&s->chorusbuf);
+
+ if (s->phase)
+ for (n = 0; n < s->channels; n++)
+ av_freep(&s->phase[n]);
+ av_freep(&s->phase);
+
+ av_freep(&s->counter);
+ av_freep(&s->length);
+
+ if (s->lookup_table)
+ for (n = 0; n < s->num_chorus; n++)
+ av_freep(&s->lookup_table[n]);
+ av_freep(&s->lookup_table);
+}
+
+static const AVFilterPad chorus_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad chorus_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .request_frame = request_frame,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_chorus = {
+ .name = "chorus",
+ .description = NULL_IF_CONFIG_SMALL("Add a chorus effect to the audio."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(ChorusContext),
+ .priv_class = &chorus_class,
+ .init = init,
+ .uninit = uninit,
+ .inputs = chorus_inputs,
+ .outputs = chorus_outputs,
+};
diff --git a/libavfilter/af_compand.c b/libavfilter/af_compand.c
index f21c861e06..0bb719fae5 100644
--- a/libavfilter/af_compand.c
+++ b/libavfilter/af_compand.c
@@ -5,20 +5,20 @@
* Copyright (c) 2013 Paul B Mahol
* Copyright (c) 2014 Andrew Kelley
*
- * This file is part of libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,39 +27,34 @@
* audio compand filter
*/
-#include <string.h>
-
+#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
-#include "libavutil/channel_layout.h"
-#include "libavutil/common.h"
-#include "libavutil/mathematics.h"
-#include "libavutil/mem.h"
+#include "libavutil/ffmath.h"
#include "libavutil/opt.h"
+#include "libavutil/samplefmt.h"
#include "audio.h"
#include "avfilter.h"
-#include "formats.h"
#include "internal.h"
typedef struct ChanParam {
- float attack;
- float decay;
- float volume;
+ double attack;
+ double decay;
+ double volume;
} ChanParam;
typedef struct CompandSegment {
- float x, y;
- float a, b;
+ double x, y;
+ double a, b;
} CompandSegment;
typedef struct CompandContext {
const AVClass *class;
- int nb_channels;
int nb_segments;
char *attacks, *decays, *points;
CompandSegment *segments;
ChanParam *channels;
- float in_min_lin;
- float out_min_lin;
+ double in_min_lin;
+ double out_min_lin;
double curve_dB;
double gain_dB;
double initial_volume;
@@ -71,12 +66,10 @@ typedef struct CompandContext {
int64_t pts;
int (*compand)(AVFilterContext *ctx, AVFrame *frame);
- /* set by filter_frame() to signal an output frame to request_frame() */
- int got_output;
} CompandContext;
#define OFFSET(x) offsetof(CompandContext, x)
-#define A AV_OPT_FLAG_AUDIO_PARAM
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption compand_options[] = {
{ "attacks", "set time over which increase of volume is determined", OFFSET(attacks), AV_OPT_TYPE_STRING, { .str = "0.3" }, 0, 0, A },
@@ -89,12 +82,7 @@ static const AVOption compand_options[] = {
{ NULL }
};
-static const AVClass compand_class = {
- .class_name = "compand filter",
- .item_name = av_default_item_name,
- .option = compand_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(compand);
static av_cold int init(AVFilterContext *ctx)
{
@@ -117,26 +105,29 @@ static int query_formats(AVFilterContext *ctx)
AVFilterChannelLayouts *layouts;
AVFilterFormats *formats;
static const enum AVSampleFormat sample_fmts[] = {
- AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
+ int ret;
- layouts = ff_all_channel_layouts();
+ layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
- ff_set_common_channel_layouts(ctx, layouts);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
- ff_set_common_formats(ctx, formats);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
- ff_set_common_samplerates(ctx, formats);
-
- return 0;
+ return ff_set_common_samplerates(ctx, formats);
}
static void count_items(char *item_str, int *nb_items)
@@ -145,14 +136,14 @@ static void count_items(char *item_str, int *nb_items)
*nb_items = 1;
for (p = item_str; *p; p++) {
- if (*p == '|')
+ if (*p == ' ' || *p == '|')
(*nb_items)++;
}
}
-static void update_volume(ChanParam *cp, float in)
+static void update_volume(ChanParam *cp, double in)
{
- float delta = in - cp->volume;
+ double delta = in - cp->volume;
if (delta > 0.0)
cp->volume += delta * cp->attack;
@@ -160,16 +151,16 @@ static void update_volume(ChanParam *cp, float in)
cp->volume += delta * cp->decay;
}
-static float get_volume(CompandContext *s, float in_lin)
+static double get_volume(CompandContext *s, double in_lin)
{
CompandSegment *cs;
- float in_log, out_log;
+ double in_log, out_log;
int i;
if (in_lin < s->in_min_lin)
return s->out_min_lin;
- in_log = logf(in_lin);
+ in_log = log(in_lin);
for (i = 1; i < s->nb_segments; i++)
if (in_log <= s->segments[i].x)
@@ -178,14 +169,14 @@ static float get_volume(CompandContext *s, float in_lin)
in_log -= cs->x;
out_log = cs->y + in_log * (cs->a * in_log + cs->b);
- return expf(out_log);
+ return exp(out_log);
}
static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame)
{
CompandContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
- const int channels = s->nb_channels;
+ const int channels = inlink->channels;
const int nb_samples = frame->nb_samples;
AVFrame *out_frame;
int chan, i;
@@ -208,14 +199,14 @@ static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame)
}
for (chan = 0; chan < channels; chan++) {
- const float *src = (float *)frame->extended_data[chan];
- float *dst = (float *)out_frame->extended_data[chan];
+ const double *src = (double *)frame->extended_data[chan];
+ double *dst = (double *)out_frame->extended_data[chan];
ChanParam *cp = &s->channels[chan];
for (i = 0; i < nb_samples; i++) {
update_volume(cp, fabs(src[i]));
- dst[i] = av_clipf(src[i] * get_volume(s, cp->volume), -1.0f, 1.0f);
+ dst[i] = src[i] * get_volume(s, cp->volume);
}
}
@@ -231,9 +222,9 @@ static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
{
CompandContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
- const int channels = s->nb_channels;
+ const int channels = inlink->channels;
const int nb_samples = frame->nb_samples;
- int chan, i, dindex = 0, oindex, count = 0;
+ int chan, i, av_uninit(dindex), oindex, av_uninit(count);
AVFrame *out_frame = NULL;
int err;
@@ -241,17 +232,19 @@ static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
s->pts = (frame->pts == AV_NOPTS_VALUE) ? 0 : frame->pts;
}
+ av_assert1(channels > 0); /* would corrupt delay_count and delay_index */
+
for (chan = 0; chan < channels; chan++) {
AVFrame *delay_frame = s->delay_frame;
- const float *src = (float *)frame->extended_data[chan];
- float *dbuf = (float *)delay_frame->extended_data[chan];
+ const double *src = (double *)frame->extended_data[chan];
+ double *dbuf = (double *)delay_frame->extended_data[chan];
ChanParam *cp = &s->channels[chan];
- float *dst;
+ double *dst;
count = s->delay_count;
dindex = s->delay_index;
for (i = 0, oindex = 0; i < nb_samples; i++) {
- const float in = src[i];
+ const double in = src[i];
update_volume(cp, fabs(in));
if (count >= s->delay_samples) {
@@ -273,9 +266,8 @@ static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
inlink->time_base);
}
- dst = (float *)out_frame->extended_data[chan];
- dst[oindex++] = av_clipf(dbuf[dindex] *
- get_volume(s, cp->volume), -1.0f, 1.0f);
+ dst = (double *)out_frame->extended_data[chan];
+ dst[oindex++] = dbuf[dindex] * get_volume(s, cp->volume);
} else {
count++;
}
@@ -292,8 +284,6 @@ static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
if (out_frame) {
err = ff_filter_frame(ctx->outputs[0], out_frame);
- if (err >= 0)
- s->got_output = 1;
return err;
}
@@ -304,7 +294,7 @@ static int compand_drain(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
CompandContext *s = ctx->priv;
- const int channels = s->nb_channels;
+ const int channels = outlink->channels;
AVFrame *frame = NULL;
int chan, i, dindex;
@@ -316,16 +306,16 @@ static int compand_drain(AVFilterLink *outlink)
s->pts += av_rescale_q(frame->nb_samples,
(AVRational){ 1, outlink->sample_rate }, outlink->time_base);
+ av_assert0(channels > 0);
for (chan = 0; chan < channels; chan++) {
AVFrame *delay_frame = s->delay_frame;
- float *dbuf = (float *)delay_frame->extended_data[chan];
- float *dst = (float *)frame->extended_data[chan];
+ double *dbuf = (double *)delay_frame->extended_data[chan];
+ double *dst = (double *)frame->extended_data[chan];
ChanParam *cp = &s->channels[chan];
dindex = s->delay_index;
for (i = 0; i < frame->nb_samples; i++) {
- dst[i] = av_clipf(dbuf[dindex] * get_volume(s, cp->volume),
- -1.0f, 1.0f);
+ dst[i] = dbuf[dindex] * get_volume(s, cp->volume);
dindex = MOD(dindex + 1, s->delay_samples);
}
}
@@ -341,9 +331,8 @@ static int config_output(AVFilterLink *outlink)
CompandContext *s = ctx->priv;
const int sample_rate = outlink->sample_rate;
double radius = s->curve_dB * M_LN10 / 20.0;
- const char *p;
- const int channels =
- av_get_channel_layout_nb_channels(outlink->channel_layout);
+ char *p, *saveptr = NULL;
+ const int channels = outlink->channels;
int nb_attacks, nb_decays, nb_points;
int new_nb_items, num;
int i;
@@ -367,7 +356,6 @@ static int config_output(AVFilterLink *outlink)
uninit(ctx);
- s->nb_channels = channels;
s->channels = av_mallocz_array(channels, sizeof(*s->channels));
s->nb_segments = (nb_points + 4) * 2;
s->segments = av_mallocz_array(s->nb_segments, sizeof(*s->segments));
@@ -379,34 +367,25 @@ static int config_output(AVFilterLink *outlink)
p = s->attacks;
for (i = 0, new_nb_items = 0; i < nb_attacks; i++) {
- char *tstr = av_get_token(&p, "|");
- if (!tstr)
- return AVERROR(ENOMEM);
-
- new_nb_items += sscanf(tstr, "%f", &s->channels[i].attack) == 1;
- av_freep(&tstr);
+ char *tstr = av_strtok(p, " |", &saveptr);
+ p = NULL;
+ new_nb_items += sscanf(tstr, "%lf", &s->channels[i].attack) == 1;
if (s->channels[i].attack < 0) {
uninit(ctx);
return AVERROR(EINVAL);
}
- if (*p)
- p++;
}
nb_attacks = new_nb_items;
p = s->decays;
for (i = 0, new_nb_items = 0; i < nb_decays; i++) {
- char *tstr = av_get_token(&p, "|");
- if (!tstr)
- return AVERROR(ENOMEM);
- new_nb_items += sscanf(tstr, "%f", &s->channels[i].decay) == 1;
- av_freep(&tstr);
+ char *tstr = av_strtok(p, " |", &saveptr);
+ p = NULL;
+ new_nb_items += sscanf(tstr, "%lf", &s->channels[i].decay) == 1;
if (s->channels[i].decay < 0) {
uninit(ctx);
return AVERROR(EINVAL);
}
- if (*p)
- p++;
}
nb_decays = new_nb_items;
@@ -418,16 +397,17 @@ static int config_output(AVFilterLink *outlink)
return AVERROR(EINVAL);
}
+ for (i = nb_decays; i < channels; i++) {
+ s->channels[i].attack = s->channels[nb_decays - 1].attack;
+ s->channels[i].decay = s->channels[nb_decays - 1].decay;
+ }
+
#define S(x) s->segments[2 * ((x) + 1)]
p = s->points;
for (i = 0, new_nb_items = 0; i < nb_points; i++) {
- char *tstr = av_get_token(&p, "|");
- if (!tstr)
- return AVERROR(ENOMEM);
-
- err = sscanf(tstr, "%f/%f", &S(i).x, &S(i).y);
- av_freep(&tstr);
- if (err != 2) {
+ char *tstr = av_strtok(p, " |", &saveptr);
+ p = NULL;
+ if (sscanf(tstr, "%lf/%lf", &S(i).x, &S(i).y) != 2) {
av_log(ctx, AV_LOG_ERROR,
"Invalid and/or missing input/output value.\n");
uninit(ctx);
@@ -442,8 +422,6 @@ static int config_output(AVFilterLink *outlink)
S(i).y -= S(i).x;
av_log(ctx, AV_LOG_DEBUG, "%d: x=%f y=%f\n", i, S(i).x, S(i).y);
new_nb_items++;
- if (*p)
- p++;
}
num = new_nb_items;
@@ -464,7 +442,6 @@ static int config_output(AVFilterLink *outlink)
double g2 = (S(i - 0).y - S(i - 1).y) * (S(i - 1).x - S(i - 2).x);
int j;
- /* here we purposefully lose precision so that we can compare floats */
if (fabs(g1 - g2))
continue;
num--;
@@ -472,14 +449,14 @@ static int config_output(AVFilterLink *outlink)
S(j) = S(j + 1);
}
- for (i = 0; !i || s->segments[i - 2].x; i += 2) {
+ for (i = 0; i < s->nb_segments; i += 2) {
s->segments[i].y += s->gain_dB;
s->segments[i].x *= M_LN10 / 20;
s->segments[i].y *= M_LN10 / 20;
}
#define L(x) s->segments[i - (x)]
- for (i = 4; s->segments[i - 2].x; i += 2) {
+ for (i = 4; i < s->nb_segments; i += 2) {
double x, y, cx, cy, in1, in2, out1, out2, theta, len, r;
L(4).a = 0;
@@ -489,13 +466,13 @@ static int config_output(AVFilterLink *outlink)
L(2).b = (L(0).y - L(2).y) / (L(0).x - L(2).x);
theta = atan2(L(2).y - L(4).y, L(2).x - L(4).x);
- len = sqrt(pow(L(2).x - L(4).x, 2.) + pow(L(2).y - L(4).y, 2.));
+ len = hypot(L(2).x - L(4).x, L(2).y - L(4).y);
r = FFMIN(radius, len);
L(3).x = L(2).x - r * cos(theta);
L(3).y = L(2).y - r * sin(theta);
theta = atan2(L(0).y - L(2).y, L(0).x - L(2).x);
- len = sqrt(pow(L(0).x - L(2).x, 2.) + pow(L(0).y - L(2).y, 2.));
+ len = hypot(L(0).x - L(2).x, L(0).y - L(2).y);
r = FFMIN(radius, len / 2);
x = L(2).x + r * cos(theta);
y = L(2).y + r * sin(theta);
@@ -530,7 +507,7 @@ static int config_output(AVFilterLink *outlink)
cp->decay = 1.0 - exp(-1.0 / (sample_rate * cp->decay));
else
cp->decay = 1.0;
- cp->volume = pow(10.0, s->initial_volume / 20);
+ cp->volume = ff_exp10(s->initial_volume / 20);
}
s->delay_samples = s->delay * sample_rate;
@@ -571,11 +548,9 @@ static int request_frame(AVFilterLink *outlink)
CompandContext *s = ctx->priv;
int ret = 0;
- s->got_output = 0;
- while (ret >= 0 && !s->got_output)
- ret = ff_request_frame(ctx->inputs[0]);
+ ret = ff_request_frame(ctx->inputs[0]);
- if (ret == AVERROR_EOF && s->delay_count)
+ if (ret == AVERROR_EOF && !ctx->is_disabled && s->delay_count)
ret = compand_drain(outlink);
return ret;
diff --git a/libavfilter/af_compensationdelay.c b/libavfilter/af_compensationdelay.c
new file mode 100644
index 0000000000..d5a3484317
--- /dev/null
+++ b/libavfilter/af_compensationdelay.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen, Vladimir Sadovnikov and others
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/samplefmt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+
+typedef struct CompensationDelayContext {
+ const AVClass *class;
+ int distance_mm;
+ int distance_cm;
+ int distance_m;
+ double dry, wet;
+ int temp;
+
+ unsigned delay;
+ unsigned w_ptr;
+ unsigned buf_size;
+ AVFrame *delay_frame;
+} CompensationDelayContext;
+
+#define OFFSET(x) offsetof(CompensationDelayContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption compensationdelay_options[] = {
+ { "mm", "set mm distance", OFFSET(distance_mm), AV_OPT_TYPE_INT, {.i64=0}, 0, 10, A },
+ { "cm", "set cm distance", OFFSET(distance_cm), AV_OPT_TYPE_INT, {.i64=0}, 0, 100, A },
+ { "m", "set meter distance", OFFSET(distance_m), AV_OPT_TYPE_INT, {.i64=0}, 0, 100, A },
+ { "dry", "set dry amount", OFFSET(dry), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 1, A },
+ { "wet", "set wet amount", OFFSET(wet), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, A },
+ { "temp", "set temperature °C", OFFSET(temp), AV_OPT_TYPE_INT, {.i64=20}, -50, 50, A },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(compensationdelay);
+
+// The maximum distance for options
+#define COMP_DELAY_MAX_DISTANCE (100.0 * 100.0 + 100.0 * 1.0 + 1.0)
+// The actual speed of sound in normal conditions
+#define COMP_DELAY_SOUND_SPEED_KM_H(temp) 1.85325 * (643.95 * sqrt(((temp + 273.15) / 273.15)))
+#define COMP_DELAY_SOUND_SPEED_CM_S(temp) (COMP_DELAY_SOUND_SPEED_KM_H(temp) * (1000.0 * 100.0) /* cm/km */ / (60.0 * 60.0) /* s/h */)
+#define COMP_DELAY_SOUND_FRONT_DELAY(temp) (1.0 / COMP_DELAY_SOUND_SPEED_CM_S(temp))
+// The maximum delay may be reached by this filter
+#define COMP_DELAY_MAX_DELAY (COMP_DELAY_MAX_DISTANCE * COMP_DELAY_SOUND_FRONT_DELAY(50))
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterChannelLayouts *layouts;
+ AVFilterFormats *formats;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ CompensationDelayContext *s = ctx->priv;
+ unsigned min_size, new_size = 1;
+
+ s->delay = (s->distance_m * 100. + s->distance_cm * 1. + s->distance_mm * .1) *
+ COMP_DELAY_SOUND_FRONT_DELAY(s->temp) * inlink->sample_rate;
+ min_size = inlink->sample_rate * COMP_DELAY_MAX_DELAY;
+
+ while (new_size < min_size)
+ new_size <<= 1;
+
+ s->delay_frame = av_frame_alloc();
+ if (!s->delay_frame)
+ return AVERROR(ENOMEM);
+
+ s->buf_size = new_size;
+ s->delay_frame->format = inlink->format;
+ s->delay_frame->nb_samples = new_size;
+ s->delay_frame->channel_layout = inlink->channel_layout;
+
+ return av_frame_get_buffer(s->delay_frame, 32);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ CompensationDelayContext *s = ctx->priv;
+ const unsigned b_mask = s->buf_size - 1;
+ const unsigned buf_size = s->buf_size;
+ const unsigned delay = s->delay;
+ const double dry = s->dry;
+ const double wet = s->wet;
+ unsigned r_ptr, w_ptr;
+ AVFrame *out;
+ int n, ch;
+
+ out = ff_get_audio_buffer(inlink, in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ for (ch = 0; ch < inlink->channels; ch++) {
+ const double *src = (const double *)in->extended_data[ch];
+ double *dst = (double *)out->extended_data[ch];
+ double *buffer = (double *)s->delay_frame->extended_data[ch];
+
+ w_ptr = s->w_ptr;
+ r_ptr = (w_ptr + buf_size - delay) & b_mask;
+
+ for (n = 0; n < in->nb_samples; n++) {
+ const double sample = src[n];
+
+ buffer[w_ptr] = sample;
+ dst[n] = dry * sample + wet * buffer[r_ptr];
+ w_ptr = (w_ptr + 1) & b_mask;
+ r_ptr = (r_ptr + 1) & b_mask;
+ }
+ }
+ s->w_ptr = w_ptr;
+
+ av_frame_free(&in);
+ return ff_filter_frame(ctx->outputs[0], out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ CompensationDelayContext *s = ctx->priv;
+
+ av_frame_free(&s->delay_frame);
+}
+
+static const AVFilterPad compensationdelay_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad compensationdelay_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_compensationdelay = {
+ .name = "compensationdelay",
+ .description = NULL_IF_CONFIG_SMALL("Audio Compensation Delay Line."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(CompensationDelayContext),
+ .priv_class = &compensationdelay_class,
+ .uninit = uninit,
+ .inputs = compensationdelay_inputs,
+ .outputs = compensationdelay_outputs,
+};
diff --git a/libavfilter/af_crystalizer.c b/libavfilter/af_crystalizer.c
new file mode 100644
index 0000000000..086549a93d
--- /dev/null
+++ b/libavfilter/af_crystalizer.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2016 The FFmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "formats.h"
+
+typedef struct CrystalizerContext {
+ const AVClass *class;
+ float mult;
+ int clip;
+ AVFrame *prev;
+} CrystalizerContext;
+
+#define OFFSET(x) offsetof(CrystalizerContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption crystalizer_options[] = {
+ { "i", "set intensity", OFFSET(mult), AV_OPT_TYPE_FLOAT, {.dbl=2.0}, 0, 10, A },
+ { "c", "enable clipping", OFFSET(clip), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, A },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(crystalizer);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ int ret;
+
+ if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_FLT )) < 0 ||
+ (ret = ff_set_common_formats(ctx , formats )) < 0)
+ return ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ CrystalizerContext *s = ctx->priv;
+ const float *src = (const float *)in->data[0];
+ const float mult = s->mult;
+ AVFrame *out;
+ float *dst, *prv;
+ int n, c;
+
+ if (!s->prev) {
+ s->prev = ff_get_audio_buffer(inlink, 1);
+ if (!s->prev) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_audio_buffer(inlink, in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ dst = (float *)out->data[0];
+ prv = (float *)s->prev->data[0];
+
+ for (n = 0; n < in->nb_samples; n++) {
+ for (c = 0; c < in->channels; c++) {
+ float current = src[c];
+
+ dst[c] = current + (current - prv[c]) * mult;
+ prv[c] = current;
+ if (s->clip) {
+ dst[c] = av_clipf(dst[c], -1, 1);
+ }
+ }
+ dst += c;
+ src += c;
+ }
+
+ if (out != in)
+ av_frame_free(&in);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ CrystalizerContext *s = ctx->priv;
+
+ av_frame_free(&s->prev);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_crystalizer = {
+ .name = "crystalizer",
+ .description = NULL_IF_CONFIG_SMALL("Simple expand audio dynamic range filter."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(CrystalizerContext),
+ .priv_class = &crystalizer_class,
+ .uninit = uninit,
+ .inputs = inputs,
+ .outputs = outputs,
+};
diff --git a/libavfilter/af_dcshift.c b/libavfilter/af_dcshift.c
new file mode 100644
index 0000000000..7332c12b19
--- /dev/null
+++ b/libavfilter/af_dcshift.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2000 Chris Ausbrooks <weed@bucket.pp.ualr.edu>
+ * Copyright (c) 2000 Fabien COELHO <fabien@coelho.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/samplefmt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+
+typedef struct DCShiftContext {
+ const AVClass *class;
+ double dcshift;
+ double limiterthreshhold;
+ double limitergain;
+} DCShiftContext;
+
+#define OFFSET(x) offsetof(DCShiftContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption dcshift_options[] = {
+ { "shift", "set DC shift", OFFSET(dcshift), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, A },
+ { "limitergain", "set limiter gain", OFFSET(limitergain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 1, A },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(dcshift);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ DCShiftContext *s = ctx->priv;
+
+ s->limiterthreshhold = INT32_MAX * (1.0 - (fabs(s->dcshift) - s->limitergain));
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterChannelLayouts *layouts;
+ AVFilterFormats *formats;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_S32P, AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out = ff_get_audio_buffer(inlink, in->nb_samples);
+ DCShiftContext *s = ctx->priv;
+ int i, j;
+ double dcshift = s->dcshift;
+
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ if (s->limitergain > 0) {
+ for (i = 0; i < inlink->channels; i++) {
+ const int32_t *src = (int32_t *)in->extended_data[i];
+ int32_t *dst = (int32_t *)out->extended_data[i];
+
+ for (j = 0; j < in->nb_samples; j++) {
+ double d;
+
+ d = src[j];
+
+ if (d > s->limiterthreshhold && dcshift > 0) {
+ d = (d - s->limiterthreshhold) * s->limitergain /
+ (INT32_MAX - s->limiterthreshhold) +
+ s->limiterthreshhold + dcshift;
+ } else if (d < -s->limiterthreshhold && dcshift < 0) {
+ d = (d + s->limiterthreshhold) * s->limitergain /
+ (INT32_MAX - s->limiterthreshhold) -
+ s->limiterthreshhold + dcshift;
+ } else {
+ d = dcshift * INT32_MAX + d;
+ }
+
+ dst[j] = av_clipl_int32(d);
+ }
+ }
+ } else {
+ for (i = 0; i < inlink->channels; i++) {
+ const int32_t *src = (int32_t *)in->extended_data[i];
+ int32_t *dst = (int32_t *)out->extended_data[i];
+
+ for (j = 0; j < in->nb_samples; j++) {
+ double d = dcshift * (INT32_MAX + 1.) + src[j];
+
+ dst[j] = av_clipl_int32(d);
+ }
+ }
+ }
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+static const AVFilterPad dcshift_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad dcshift_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_dcshift = {
+ .name = "dcshift",
+ .description = NULL_IF_CONFIG_SMALL("Apply a DC shift to the audio."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(DCShiftContext),
+ .priv_class = &dcshift_class,
+ .init = init,
+ .inputs = dcshift_inputs,
+ .outputs = dcshift_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/af_dynaudnorm.c b/libavfilter/af_dynaudnorm.c
new file mode 100644
index 0000000000..ddbef26ab5
--- /dev/null
+++ b/libavfilter/af_dynaudnorm.c
@@ -0,0 +1,754 @@
+/*
+ * Dynamic Audio Normalizer
+ * Copyright (c) 2015 LoRd_MuldeR <mulder2@gmx.de>. Some rights reserved.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Dynamic Audio Normalizer
+ */
+
+#include <float.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+
+#define FF_BUFQUEUE_SIZE 302
+#include "libavfilter/bufferqueue.h"
+
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct cqueue {
+ double *elements;
+ int size;
+ int nb_elements;
+ int first;
+} cqueue;
+
+typedef struct DynamicAudioNormalizerContext {
+ const AVClass *class;
+
+ struct FFBufQueue queue;
+
+ int frame_len;
+ int frame_len_msec;
+ int filter_size;
+ int dc_correction;
+ int channels_coupled;
+ int alt_boundary_mode;
+
+ double peak_value;
+ double max_amplification;
+ double target_rms;
+ double compress_factor;
+ double *prev_amplification_factor;
+ double *dc_correction_value;
+ double *compress_threshold;
+ double *fade_factors[2];
+ double *weights;
+
+ int channels;
+ int delay;
+
+ cqueue **gain_history_original;
+ cqueue **gain_history_minimum;
+ cqueue **gain_history_smoothed;
+} DynamicAudioNormalizerContext;
+
+#define OFFSET(x) offsetof(DynamicAudioNormalizerContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption dynaudnorm_options[] = {
+ { "f", "set the frame length in msec", OFFSET(frame_len_msec), AV_OPT_TYPE_INT, {.i64 = 500}, 10, 8000, FLAGS },
+ { "g", "set the filter size", OFFSET(filter_size), AV_OPT_TYPE_INT, {.i64 = 31}, 3, 301, FLAGS },
+ { "p", "set the peak value", OFFSET(peak_value), AV_OPT_TYPE_DOUBLE, {.dbl = 0.95}, 0.0, 1.0, FLAGS },
+ { "m", "set the max amplification", OFFSET(max_amplification), AV_OPT_TYPE_DOUBLE, {.dbl = 10.0}, 1.0, 100.0, FLAGS },
+ { "r", "set the target RMS", OFFSET(target_rms), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
+ { "n", "set channel coupling", OFFSET(channels_coupled), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
+ { "c", "set DC correction", OFFSET(dc_correction), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
+ { "b", "set alternative boundary mode", OFFSET(alt_boundary_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
+ { "s", "set the compress factor", OFFSET(compress_factor), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 30.0, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(dynaudnorm);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ DynamicAudioNormalizerContext *s = ctx->priv;
+
+ if (!(s->filter_size & 1)) {
+ av_log(ctx, AV_LOG_ERROR, "filter size %d is invalid. Must be an odd value.\n", s->filter_size);
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static inline int frame_size(int sample_rate, int frame_len_msec)
+{
+ const int frame_size = lrint((double)sample_rate * (frame_len_msec / 1000.0));
+ return frame_size + (frame_size % 2);
+}
+
+static void precalculate_fade_factors(double *fade_factors[2], int frame_len)
+{
+ const double step_size = 1.0 / frame_len;
+ int pos;
+
+ for (pos = 0; pos < frame_len; pos++) {
+ fade_factors[0][pos] = 1.0 - (step_size * (pos + 1.0));
+ fade_factors[1][pos] = 1.0 - fade_factors[0][pos];
+ }
+}
+
+static cqueue *cqueue_create(int size)
+{
+ cqueue *q;
+
+ q = av_malloc(sizeof(cqueue));
+ if (!q)
+ return NULL;
+
+ q->size = size;
+ q->nb_elements = 0;
+ q->first = 0;
+
+ q->elements = av_malloc_array(size, sizeof(double));
+ if (!q->elements) {
+ av_free(q);
+ return NULL;
+ }
+
+ return q;
+}
+
+static void cqueue_free(cqueue *q)
+{
+ if (q)
+ av_free(q->elements);
+ av_free(q);
+}
+
+static int cqueue_size(cqueue *q)
+{
+ return q->nb_elements;
+}
+
+static int cqueue_empty(cqueue *q)
+{
+ return !q->nb_elements;
+}
+
+static int cqueue_enqueue(cqueue *q, double element)
+{
+ int i;
+
+ av_assert2(q->nb_elements != q->size);
+
+ i = (q->first + q->nb_elements) % q->size;
+ q->elements[i] = element;
+ q->nb_elements++;
+
+ return 0;
+}
+
+static double cqueue_peek(cqueue *q, int index)
+{
+ av_assert2(index < q->nb_elements);
+ return q->elements[(q->first + index) % q->size];
+}
+
+static int cqueue_dequeue(cqueue *q, double *element)
+{
+ av_assert2(!cqueue_empty(q));
+
+ *element = q->elements[q->first];
+ q->first = (q->first + 1) % q->size;
+ q->nb_elements--;
+
+ return 0;
+}
+
+static int cqueue_pop(cqueue *q)
+{
+ av_assert2(!cqueue_empty(q));
+
+ q->first = (q->first + 1) % q->size;
+ q->nb_elements--;
+
+ return 0;
+}
+
+static void init_gaussian_filter(DynamicAudioNormalizerContext *s)
+{
+ double total_weight = 0.0;
+ const double sigma = (((s->filter_size / 2.0) - 1.0) / 3.0) + (1.0 / 3.0);
+ double adjust;
+ int i;
+
+ // Pre-compute constants
+ const int offset = s->filter_size / 2;
+ const double c1 = 1.0 / (sigma * sqrt(2.0 * M_PI));
+ const double c2 = 2.0 * sigma * sigma;
+
+ // Compute weights
+ for (i = 0; i < s->filter_size; i++) {
+ const int x = i - offset;
+
+ s->weights[i] = c1 * exp(-x * x / c2);
+ total_weight += s->weights[i];
+ }
+
+ // Adjust weights
+ adjust = 1.0 / total_weight;
+ for (i = 0; i < s->filter_size; i++) {
+ s->weights[i] *= adjust;
+ }
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ DynamicAudioNormalizerContext *s = ctx->priv;
+ int c;
+
+ av_freep(&s->prev_amplification_factor);
+ av_freep(&s->dc_correction_value);
+ av_freep(&s->compress_threshold);
+ av_freep(&s->fade_factors[0]);
+ av_freep(&s->fade_factors[1]);
+
+ for (c = 0; c < s->channels; c++) {
+ if (s->gain_history_original)
+ cqueue_free(s->gain_history_original[c]);
+ if (s->gain_history_minimum)
+ cqueue_free(s->gain_history_minimum[c]);
+ if (s->gain_history_smoothed)
+ cqueue_free(s->gain_history_smoothed[c]);
+ }
+
+ av_freep(&s->gain_history_original);
+ av_freep(&s->gain_history_minimum);
+ av_freep(&s->gain_history_smoothed);
+
+ av_freep(&s->weights);
+
+ ff_bufqueue_discard_all(&s->queue);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ DynamicAudioNormalizerContext *s = ctx->priv;
+ int c;
+
+ uninit(ctx);
+
+ s->frame_len =
+ inlink->min_samples =
+ inlink->max_samples =
+ inlink->partial_buf_size = frame_size(inlink->sample_rate, s->frame_len_msec);
+ av_log(ctx, AV_LOG_DEBUG, "frame len %d\n", s->frame_len);
+
+ s->fade_factors[0] = av_malloc_array(s->frame_len, sizeof(*s->fade_factors[0]));
+ s->fade_factors[1] = av_malloc_array(s->frame_len, sizeof(*s->fade_factors[1]));
+
+ s->prev_amplification_factor = av_malloc_array(inlink->channels, sizeof(*s->prev_amplification_factor));
+ s->dc_correction_value = av_calloc(inlink->channels, sizeof(*s->dc_correction_value));
+ s->compress_threshold = av_calloc(inlink->channels, sizeof(*s->compress_threshold));
+ s->gain_history_original = av_calloc(inlink->channels, sizeof(*s->gain_history_original));
+ s->gain_history_minimum = av_calloc(inlink->channels, sizeof(*s->gain_history_minimum));
+ s->gain_history_smoothed = av_calloc(inlink->channels, sizeof(*s->gain_history_smoothed));
+ s->weights = av_malloc_array(s->filter_size, sizeof(*s->weights));
+ if (!s->prev_amplification_factor || !s->dc_correction_value ||
+ !s->compress_threshold || !s->fade_factors[0] || !s->fade_factors[1] ||
+ !s->gain_history_original || !s->gain_history_minimum ||
+ !s->gain_history_smoothed || !s->weights)
+ return AVERROR(ENOMEM);
+
+ for (c = 0; c < inlink->channels; c++) {
+ s->prev_amplification_factor[c] = 1.0;
+
+ s->gain_history_original[c] = cqueue_create(s->filter_size);
+ s->gain_history_minimum[c] = cqueue_create(s->filter_size);
+ s->gain_history_smoothed[c] = cqueue_create(s->filter_size);
+
+ if (!s->gain_history_original[c] || !s->gain_history_minimum[c] ||
+ !s->gain_history_smoothed[c])
+ return AVERROR(ENOMEM);
+ }
+
+ precalculate_fade_factors(s->fade_factors, s->frame_len);
+ init_gaussian_filter(s);
+
+ s->channels = inlink->channels;
+ s->delay = s->filter_size;
+
+ return 0;
+}
+
+static inline double fade(double prev, double next, int pos,
+ double *fade_factors[2])
+{
+ return fade_factors[0][pos] * prev + fade_factors[1][pos] * next;
+}
+
+static inline double pow2(const double value)
+{
+ return value * value;
+}
+
+static inline double bound(const double threshold, const double val)
+{
+ const double CONST = 0.8862269254527580136490837416705725913987747280611935; //sqrt(PI) / 2.0
+ return erf(CONST * (val / threshold)) * threshold;
+}
+
+static double find_peak_magnitude(AVFrame *frame, int channel)
+{
+ double max = DBL_EPSILON;
+ int c, i;
+
+ if (channel == -1) {
+ for (c = 0; c < av_frame_get_channels(frame); c++) {
+ double *data_ptr = (double *)frame->extended_data[c];
+
+ for (i = 0; i < frame->nb_samples; i++)
+ max = FFMAX(max, fabs(data_ptr[i]));
+ }
+ } else {
+ double *data_ptr = (double *)frame->extended_data[channel];
+
+ for (i = 0; i < frame->nb_samples; i++)
+ max = FFMAX(max, fabs(data_ptr[i]));
+ }
+
+ return max;
+}
+
+static double compute_frame_rms(AVFrame *frame, int channel)
+{
+ double rms_value = 0.0;
+ int c, i;
+
+ if (channel == -1) {
+ for (c = 0; c < av_frame_get_channels(frame); c++) {
+ const double *data_ptr = (double *)frame->extended_data[c];
+
+ for (i = 0; i < frame->nb_samples; i++) {
+ rms_value += pow2(data_ptr[i]);
+ }
+ }
+
+ rms_value /= frame->nb_samples * av_frame_get_channels(frame);
+ } else {
+ const double *data_ptr = (double *)frame->extended_data[channel];
+ for (i = 0; i < frame->nb_samples; i++) {
+ rms_value += pow2(data_ptr[i]);
+ }
+
+ rms_value /= frame->nb_samples;
+ }
+
+ return FFMAX(sqrt(rms_value), DBL_EPSILON);
+}
+
+static double get_max_local_gain(DynamicAudioNormalizerContext *s, AVFrame *frame,
+ int channel)
+{
+ const double maximum_gain = s->peak_value / find_peak_magnitude(frame, channel);
+ const double rms_gain = s->target_rms > DBL_EPSILON ? (s->target_rms / compute_frame_rms(frame, channel)) : DBL_MAX;
+ return bound(s->max_amplification, FFMIN(maximum_gain, rms_gain));
+}
+
+static double minimum_filter(cqueue *q)
+{
+ double min = DBL_MAX;
+ int i;
+
+ for (i = 0; i < cqueue_size(q); i++) {
+ min = FFMIN(min, cqueue_peek(q, i));
+ }
+
+ return min;
+}
+
+static double gaussian_filter(DynamicAudioNormalizerContext *s, cqueue *q)
+{
+ double result = 0.0;
+ int i;
+
+ for (i = 0; i < cqueue_size(q); i++) {
+ result += cqueue_peek(q, i) * s->weights[i];
+ }
+
+ return result;
+}
+
+static void update_gain_history(DynamicAudioNormalizerContext *s, int channel,
+ double current_gain_factor)
+{
+ if (cqueue_empty(s->gain_history_original[channel]) ||
+ cqueue_empty(s->gain_history_minimum[channel])) {
+ const int pre_fill_size = s->filter_size / 2;
+ const double initial_value = s->alt_boundary_mode ? current_gain_factor : 1.0;
+
+ s->prev_amplification_factor[channel] = initial_value;
+
+ while (cqueue_size(s->gain_history_original[channel]) < pre_fill_size) {
+ cqueue_enqueue(s->gain_history_original[channel], initial_value);
+ }
+ }
+
+ cqueue_enqueue(s->gain_history_original[channel], current_gain_factor);
+
+ while (cqueue_size(s->gain_history_original[channel]) >= s->filter_size) {
+ double minimum;
+ av_assert0(cqueue_size(s->gain_history_original[channel]) == s->filter_size);
+
+ if (cqueue_empty(s->gain_history_minimum[channel])) {
+ const int pre_fill_size = s->filter_size / 2;
+ double initial_value = s->alt_boundary_mode ? cqueue_peek(s->gain_history_original[channel], 0) : 1.0;
+ int input = pre_fill_size;
+
+ while (cqueue_size(s->gain_history_minimum[channel]) < pre_fill_size) {
+ initial_value = FFMIN(initial_value, cqueue_peek(s->gain_history_original[channel], ++input));
+ cqueue_enqueue(s->gain_history_minimum[channel], initial_value);
+ }
+ }
+
+ minimum = minimum_filter(s->gain_history_original[channel]);
+
+ cqueue_enqueue(s->gain_history_minimum[channel], minimum);
+
+ cqueue_pop(s->gain_history_original[channel]);
+ }
+
+ while (cqueue_size(s->gain_history_minimum[channel]) >= s->filter_size) {
+ double smoothed;
+ av_assert0(cqueue_size(s->gain_history_minimum[channel]) == s->filter_size);
+ smoothed = gaussian_filter(s, s->gain_history_minimum[channel]);
+
+ cqueue_enqueue(s->gain_history_smoothed[channel], smoothed);
+
+ cqueue_pop(s->gain_history_minimum[channel]);
+ }
+}
+
+static inline double update_value(double new, double old, double aggressiveness)
+{
+ av_assert0((aggressiveness >= 0.0) && (aggressiveness <= 1.0));
+ return aggressiveness * new + (1.0 - aggressiveness) * old;
+}
+
+static void perform_dc_correction(DynamicAudioNormalizerContext *s, AVFrame *frame)
+{
+ const double diff = 1.0 / frame->nb_samples;
+ int is_first_frame = cqueue_empty(s->gain_history_original[0]);
+ int c, i;
+
+ for (c = 0; c < s->channels; c++) {
+ double *dst_ptr = (double *)frame->extended_data[c];
+ double current_average_value = 0.0;
+ double prev_value;
+
+ for (i = 0; i < frame->nb_samples; i++)
+ current_average_value += dst_ptr[i] * diff;
+
+ prev_value = is_first_frame ? current_average_value : s->dc_correction_value[c];
+ s->dc_correction_value[c] = is_first_frame ? current_average_value : update_value(current_average_value, s->dc_correction_value[c], 0.1);
+
+ for (i = 0; i < frame->nb_samples; i++) {
+ dst_ptr[i] -= fade(prev_value, s->dc_correction_value[c], i, s->fade_factors);
+ }
+ }
+}
+
+static double setup_compress_thresh(double threshold)
+{
+ if ((threshold > DBL_EPSILON) && (threshold < (1.0 - DBL_EPSILON))) {
+ double current_threshold = threshold;
+ double step_size = 1.0;
+
+ while (step_size > DBL_EPSILON) {
+ while ((llrint((current_threshold + step_size) * (UINT64_C(1) << 63)) >
+ llrint(current_threshold * (UINT64_C(1) << 63))) &&
+ (bound(current_threshold + step_size, 1.0) <= threshold)) {
+ current_threshold += step_size;
+ }
+
+ step_size /= 2.0;
+ }
+
+ return current_threshold;
+ } else {
+ return threshold;
+ }
+}
+
+static double compute_frame_std_dev(DynamicAudioNormalizerContext *s,
+ AVFrame *frame, int channel)
+{
+ double variance = 0.0;
+ int i, c;
+
+ if (channel == -1) {
+ for (c = 0; c < s->channels; c++) {
+ const double *data_ptr = (double *)frame->extended_data[c];
+
+ for (i = 0; i < frame->nb_samples; i++) {
+ variance += pow2(data_ptr[i]); // Assume that MEAN is *zero*
+ }
+ }
+ variance /= (s->channels * frame->nb_samples) - 1;
+ } else {
+ const double *data_ptr = (double *)frame->extended_data[channel];
+
+ for (i = 0; i < frame->nb_samples; i++) {
+ variance += pow2(data_ptr[i]); // Assume that MEAN is *zero*
+ }
+ variance /= frame->nb_samples - 1;
+ }
+
+ return FFMAX(sqrt(variance), DBL_EPSILON);
+}
+
+static void perform_compression(DynamicAudioNormalizerContext *s, AVFrame *frame)
+{
+ int is_first_frame = cqueue_empty(s->gain_history_original[0]);
+ int c, i;
+
+ if (s->channels_coupled) {
+ const double standard_deviation = compute_frame_std_dev(s, frame, -1);
+ const double current_threshold = FFMIN(1.0, s->compress_factor * standard_deviation);
+
+ const double prev_value = is_first_frame ? current_threshold : s->compress_threshold[0];
+ double prev_actual_thresh, curr_actual_thresh;
+ s->compress_threshold[0] = is_first_frame ? current_threshold : update_value(current_threshold, s->compress_threshold[0], (1.0/3.0));
+
+ prev_actual_thresh = setup_compress_thresh(prev_value);
+ curr_actual_thresh = setup_compress_thresh(s->compress_threshold[0]);
+
+ for (c = 0; c < s->channels; c++) {
+ double *const dst_ptr = (double *)frame->extended_data[c];
+ for (i = 0; i < frame->nb_samples; i++) {
+ const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, s->fade_factors);
+ dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
+ }
+ }
+ } else {
+ for (c = 0; c < s->channels; c++) {
+ const double standard_deviation = compute_frame_std_dev(s, frame, c);
+ const double current_threshold = setup_compress_thresh(FFMIN(1.0, s->compress_factor * standard_deviation));
+
+ const double prev_value = is_first_frame ? current_threshold : s->compress_threshold[c];
+ double prev_actual_thresh, curr_actual_thresh;
+ double *dst_ptr;
+ s->compress_threshold[c] = is_first_frame ? current_threshold : update_value(current_threshold, s->compress_threshold[c], 1.0/3.0);
+
+ prev_actual_thresh = setup_compress_thresh(prev_value);
+ curr_actual_thresh = setup_compress_thresh(s->compress_threshold[c]);
+
+ dst_ptr = (double *)frame->extended_data[c];
+ for (i = 0; i < frame->nb_samples; i++) {
+ const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, s->fade_factors);
+ dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
+ }
+ }
+ }
+}
+
+static void analyze_frame(DynamicAudioNormalizerContext *s, AVFrame *frame)
+{
+ if (s->dc_correction) {
+ perform_dc_correction(s, frame);
+ }
+
+ if (s->compress_factor > DBL_EPSILON) {
+ perform_compression(s, frame);
+ }
+
+ if (s->channels_coupled) {
+ const double current_gain_factor = get_max_local_gain(s, frame, -1);
+ int c;
+
+ for (c = 0; c < s->channels; c++)
+ update_gain_history(s, c, current_gain_factor);
+ } else {
+ int c;
+
+ for (c = 0; c < s->channels; c++)
+ update_gain_history(s, c, get_max_local_gain(s, frame, c));
+ }
+}
+
+static void amplify_frame(DynamicAudioNormalizerContext *s, AVFrame *frame)
+{
+ int c, i;
+
+ for (c = 0; c < s->channels; c++) {
+ double *dst_ptr = (double *)frame->extended_data[c];
+ double current_amplification_factor;
+
+ cqueue_dequeue(s->gain_history_smoothed[c], &current_amplification_factor);
+
+ for (i = 0; i < frame->nb_samples; i++) {
+ const double amplification_factor = fade(s->prev_amplification_factor[c],
+ current_amplification_factor, i,
+ s->fade_factors);
+
+ dst_ptr[i] *= amplification_factor;
+
+ if (fabs(dst_ptr[i]) > s->peak_value)
+ dst_ptr[i] = copysign(s->peak_value, dst_ptr[i]);
+ }
+
+ s->prev_amplification_factor[c] = current_amplification_factor;
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ DynamicAudioNormalizerContext *s = ctx->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ int ret = 0;
+
+ if (!cqueue_empty(s->gain_history_smoothed[0])) {
+ AVFrame *out = ff_bufqueue_get(&s->queue);
+
+ amplify_frame(s, out);
+ ret = ff_filter_frame(outlink, out);
+ }
+
+ analyze_frame(s, in);
+ ff_bufqueue_add(ctx, &s->queue, in);
+
+ return ret;
+}
+
+static int flush_buffer(DynamicAudioNormalizerContext *s, AVFilterLink *inlink,
+ AVFilterLink *outlink)
+{
+ AVFrame *out = ff_get_audio_buffer(outlink, s->frame_len);
+ int c, i;
+
+ if (!out)
+ return AVERROR(ENOMEM);
+
+ for (c = 0; c < s->channels; c++) {
+ double *dst_ptr = (double *)out->extended_data[c];
+
+ for (i = 0; i < out->nb_samples; i++) {
+ dst_ptr[i] = s->alt_boundary_mode ? DBL_EPSILON : ((s->target_rms > DBL_EPSILON) ? FFMIN(s->peak_value, s->target_rms) : s->peak_value);
+ if (s->dc_correction) {
+ dst_ptr[i] *= ((i % 2) == 1) ? -1 : 1;
+ dst_ptr[i] += s->dc_correction_value[c];
+ }
+ }
+ }
+
+ s->delay--;
+ return filter_frame(inlink, out);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ DynamicAudioNormalizerContext *s = ctx->priv;
+ int ret = 0;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF && !ctx->is_disabled && s->delay) {
+ if (!cqueue_empty(s->gain_history_smoothed[0])) {
+ ret = flush_buffer(s, ctx->inputs[0], outlink);
+ } else if (s->queue.available) {
+ AVFrame *out = ff_bufqueue_get(&s->queue);
+
+ ret = ff_filter_frame(outlink, out);
+ }
+ }
+
+ return ret;
+}
+
+static const AVFilterPad avfilter_af_dynaudnorm_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ .needs_writable = 1,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_af_dynaudnorm_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_dynaudnorm = {
+ .name = "dynaudnorm",
+ .description = NULL_IF_CONFIG_SMALL("Dynamic Audio Normalizer."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(DynamicAudioNormalizerContext),
+ .init = init,
+ .uninit = uninit,
+ .inputs = avfilter_af_dynaudnorm_inputs,
+ .outputs = avfilter_af_dynaudnorm_outputs,
+ .priv_class = &dynaudnorm_class,
+};
diff --git a/libavfilter/af_earwax.c b/libavfilter/af_earwax.c
new file mode 100644
index 0000000000..b0ba4cff65
--- /dev/null
+++ b/libavfilter/af_earwax.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2011 Mina Nagy Zaki
+ * Copyright (c) 2000 Edward Beingessner And Sundry Contributors.
+ * This source code is freely redistributable and may be used for any purpose.
+ * This copyright notice must be maintained. Edward Beingessner And Sundry
+ * Contributors are not responsible for the consequences of using this
+ * software.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Stereo Widening Effect. Adds audio cues to move stereo image in
+ * front of the listener. Adapted from the libsox earwax effect.
+ */
+
+#include "libavutil/channel_layout.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "formats.h"
+
+#define NUMTAPS 64
+
+static const int8_t filt[NUMTAPS] = {
+/* 30° 330° */
+ 4, -6, /* 32 tap stereo FIR filter. */
+ 4, -11, /* One side filters as if the */
+ -1, -5, /* signal was from 30 degrees */
+ 3, 3, /* from the ear, the other as */
+ -2, 5, /* if 330 degrees. */
+ -5, 0,
+ 9, 1,
+ 6, 3, /* Input */
+ -4, -1, /* Left Right */
+ -5, -3, /* __________ __________ */
+ -2, -5, /* | | | | */
+ -7, 1, /* .---| Hh,0(f) | | Hh,0(f) |---. */
+ 6, -7, /* / |__________| |__________| \ */
+ 30, -29, /* / \ / \ */
+ 12, -3, /* / X \ */
+ -11, 4, /* / / \ \ */
+ -3, 7, /* ____V_____ __________V V__________ _____V____ */
+ -20, 23, /* | | | | | | | | */
+ 2, 0, /* | Hh,30(f) | | Hh,330(f)| | Hh,330(f)| | Hh,30(f) | */
+ 1, -6, /* |__________| |__________| |__________| |__________| */
+ -14, -5, /* \ ___ / \ ___ / */
+ 15, -18, /* \ / \ / _____ \ / \ / */
+ 6, 7, /* `->| + |<--' / \ `-->| + |<-' */
+ 15, -10, /* \___/ _/ \_ \___/ */
+ -14, 22, /* \ / \ / \ / */
+ -7, -2, /* `--->| | | |<---' */
+ -4, 9, /* \_/ \_/ */
+ 6, -12, /* */
+ 6, -6, /* Headphones */
+ 0, -11,
+ 0, -5,
+ 4, 0};
+
+typedef struct {
+ int16_t taps[NUMTAPS * 2];
+} EarwaxContext;
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const int sample_rates[] = { 44100, -1 };
+ int ret;
+
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layout = NULL;
+
+ if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_S16 )) < 0 ||
+ (ret = ff_set_common_formats (ctx , formats )) < 0 ||
+ (ret = ff_add_channel_layout (&layout , AV_CH_LAYOUT_STEREO )) < 0 ||
+ (ret = ff_set_common_channel_layouts (ctx , layout )) < 0 ||
+ (ret = ff_set_common_samplerates (ctx , ff_make_format_list(sample_rates) )) < 0)
+ return ret;
+
+ return 0;
+}
+
+//FIXME: replace with DSPContext.scalarproduct_int16
+static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin, int16_t *out)
+{
+ int32_t sample;
+ int16_t j;
+
+ while (in < endin) {
+ sample = 0;
+ for (j = 0; j < NUMTAPS; j++)
+ sample += in[j] * filt[j];
+ *out = av_clip_int16(sample >> 6);
+ out++;
+ in++;
+ }
+
+ return out;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ int16_t *taps, *endin, *in, *out;
+ AVFrame *outsamples = ff_get_audio_buffer(inlink, insamples->nb_samples);
+ int len;
+
+ if (!outsamples) {
+ av_frame_free(&insamples);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outsamples, insamples);
+
+ taps = ((EarwaxContext *)inlink->dst->priv)->taps;
+ out = (int16_t *)outsamples->data[0];
+ in = (int16_t *)insamples ->data[0];
+
+ len = FFMIN(NUMTAPS, 2*insamples->nb_samples);
+ // copy part of new input and process with saved input
+ memcpy(taps+NUMTAPS, in, len * sizeof(*taps));
+ out = scalarproduct(taps, taps + len, out);
+
+ // process current input
+ if (2*insamples->nb_samples >= NUMTAPS ){
+ endin = in + insamples->nb_samples * 2 - NUMTAPS;
+ scalarproduct(in, endin, out);
+
+ // save part of input for next round
+ memcpy(taps, endin, NUMTAPS * sizeof(*taps));
+ } else
+ memmove(taps, taps + 2*insamples->nb_samples, NUMTAPS * sizeof(*taps));
+
+ av_frame_free(&insamples);
+ return ff_filter_frame(outlink, outsamples);
+}
+
+static const AVFilterPad earwax_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad earwax_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_earwax = {
+ .name = "earwax",
+ .description = NULL_IF_CONFIG_SMALL("Widen the stereo image."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(EarwaxContext),
+ .inputs = earwax_inputs,
+ .outputs = earwax_outputs,
+};
diff --git a/libavfilter/af_extrastereo.c b/libavfilter/af_extrastereo.c
new file mode 100644
index 0000000000..a746006f71
--- /dev/null
+++ b/libavfilter/af_extrastereo.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2015 The FFmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "formats.h"
+
+typedef struct ExtraStereoContext {
+ const AVClass *class;
+ float mult;
+ int clip;
+} ExtraStereoContext;
+
+#define OFFSET(x) offsetof(ExtraStereoContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption extrastereo_options[] = {
+ { "m", "set the difference coefficient", OFFSET(mult), AV_OPT_TYPE_FLOAT, {.dbl=2.5}, -10, 10, A },
+ { "c", "enable clipping", OFFSET(clip), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, A },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(extrastereo);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layout = NULL;
+ int ret;
+
+ if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_FLT )) < 0 ||
+ (ret = ff_set_common_formats (ctx , formats )) < 0 ||
+ (ret = ff_add_channel_layout (&layout , AV_CH_LAYOUT_STEREO)) < 0 ||
+ (ret = ff_set_common_channel_layouts (ctx , layout )) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ExtraStereoContext *s = ctx->priv;
+ const float *src = (const float *)in->data[0];
+ const float mult = s->mult;
+ AVFrame *out;
+ float *dst;
+ int n;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_audio_buffer(inlink, in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+ dst = (float *)out->data[0];
+
+ for (n = 0; n < in->nb_samples; n++) {
+ float average, left, right;
+
+ left = src[n * 2 ];
+ right = src[n * 2 + 1];
+ average = (left + right) / 2.;
+ left = average + mult * (left - average);
+ right = average + mult * (right - average);
+
+ if (s->clip) {
+ dst[n * 2 ] = av_clipf(left, -1, 1);
+ dst[n * 2 + 1] = av_clipf(right, -1, 1);
+ }
+ }
+
+ if (out != in)
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_extrastereo = {
+ .name = "extrastereo",
+ .description = NULL_IF_CONFIG_SMALL("Increase difference between stereo audio channels."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(ExtraStereoContext),
+ .priv_class = &extrastereo_class,
+ .inputs = inputs,
+ .outputs = outputs,
+};
diff --git a/libavfilter/af_firequalizer.c b/libavfilter/af_firequalizer.c
new file mode 100644
index 0000000000..4243d66bd6
--- /dev/null
+++ b/libavfilter/af_firequalizer.c
@@ -0,0 +1,838 @@
+/*
+ * Copyright (c) 2016 Muhammad Faiz <mfcc64@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/eval.h"
+#include "libavutil/avassert.h"
+#include "libavcodec/avfft.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "audio.h"
+
+#define RDFT_BITS_MIN 4
+#define RDFT_BITS_MAX 16
+
+enum WindowFunc {
+ WFUNC_RECTANGULAR,
+ WFUNC_HANN,
+ WFUNC_HAMMING,
+ WFUNC_BLACKMAN,
+ WFUNC_NUTTALL3,
+ WFUNC_MNUTTALL3,
+ WFUNC_NUTTALL,
+ WFUNC_BNUTTALL,
+ WFUNC_BHARRIS,
+ WFUNC_TUKEY,
+ NB_WFUNC
+};
+
+enum Scale {
+ SCALE_LINLIN,
+ SCALE_LINLOG,
+ SCALE_LOGLIN,
+ SCALE_LOGLOG,
+ NB_SCALE
+};
+
+#define NB_GAIN_ENTRY_MAX 4096
+typedef struct {
+ double freq;
+ double gain;
+} GainEntry;
+
+typedef struct {
+ int buf_idx;
+ int overlap_idx;
+} OverlapIndex;
+
+typedef struct {
+ const AVClass *class;
+
+ RDFTContext *analysis_rdft;
+ RDFTContext *analysis_irdft;
+ RDFTContext *rdft;
+ RDFTContext *irdft;
+ FFTContext *fft_ctx;
+ int analysis_rdft_len;
+ int rdft_len;
+
+ float *analysis_buf;
+ float *dump_buf;
+ float *kernel_tmp_buf;
+ float *kernel_buf;
+ float *conv_buf;
+ OverlapIndex *conv_idx;
+ int fir_len;
+ int nsamples_max;
+ int64_t next_pts;
+ int frame_nsamples_max;
+ int remaining;
+
+ char *gain_cmd;
+ char *gain_entry_cmd;
+ const char *gain;
+ const char *gain_entry;
+ double delay;
+ double accuracy;
+ int wfunc;
+ int fixed;
+ int multi;
+ int zero_phase;
+ int scale;
+ char *dumpfile;
+ int dumpscale;
+ int fft2;
+
+ int nb_gain_entry;
+ int gain_entry_err;
+ GainEntry gain_entry_tbl[NB_GAIN_ENTRY_MAX];
+} FIREqualizerContext;
+
+#define OFFSET(x) offsetof(FIREqualizerContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption firequalizer_options[] = {
+ { "gain", "set gain curve", OFFSET(gain), AV_OPT_TYPE_STRING, { .str = "gain_interpolate(f)" }, 0, 0, FLAGS },
+ { "gain_entry", "set gain entry", OFFSET(gain_entry), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
+ { "delay", "set delay", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.0, 1e10, FLAGS },
+ { "accuracy", "set accuracy", OFFSET(accuracy), AV_OPT_TYPE_DOUBLE, { .dbl = 5.0 }, 0.0, 1e10, FLAGS },
+ { "wfunc", "set window function", OFFSET(wfunc), AV_OPT_TYPE_INT, { .i64 = WFUNC_HANN }, 0, NB_WFUNC-1, FLAGS, "wfunc" },
+ { "rectangular", "rectangular window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_RECTANGULAR }, 0, 0, FLAGS, "wfunc" },
+ { "hann", "hann window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_HANN }, 0, 0, FLAGS, "wfunc" },
+ { "hamming", "hamming window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_HAMMING }, 0, 0, FLAGS, "wfunc" },
+ { "blackman", "blackman window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BLACKMAN }, 0, 0, FLAGS, "wfunc" },
+ { "nuttall3", "3-term nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_NUTTALL3 }, 0, 0, FLAGS, "wfunc" },
+ { "mnuttall3", "minimum 3-term nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_MNUTTALL3 }, 0, 0, FLAGS, "wfunc" },
+ { "nuttall", "nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_NUTTALL }, 0, 0, FLAGS, "wfunc" },
+ { "bnuttall", "blackman-nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BNUTTALL }, 0, 0, FLAGS, "wfunc" },
+ { "bharris", "blackman-harris window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BHARRIS }, 0, 0, FLAGS, "wfunc" },
+ { "tukey", "tukey window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_TUKEY }, 0, 0, FLAGS, "wfunc" },
+ { "fixed", "set fixed frame samples", OFFSET(fixed), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
+ { "multi", "set multi channels mode", OFFSET(multi), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
+ { "zero_phase", "set zero phase mode", OFFSET(zero_phase), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
+ { "scale", "set gain scale", OFFSET(scale), AV_OPT_TYPE_INT, { .i64 = SCALE_LINLOG }, 0, NB_SCALE-1, FLAGS, "scale" },
+ { "linlin", "linear-freq linear-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LINLIN }, 0, 0, FLAGS, "scale" },
+ { "linlog", "linear-freq logarithmic-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LINLOG }, 0, 0, FLAGS, "scale" },
+ { "loglin", "logarithmic-freq linear-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LOGLIN }, 0, 0, FLAGS, "scale" },
+ { "loglog", "logarithmic-freq logarithmic-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LOGLOG }, 0, 0, FLAGS, "scale" },
+ { "dumpfile", "set dump file", OFFSET(dumpfile), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
+ { "dumpscale", "set dump scale", OFFSET(dumpscale), AV_OPT_TYPE_INT, { .i64 = SCALE_LINLOG }, 0, NB_SCALE-1, FLAGS, "scale" },
+ { "fft2", "set 2-channels fft", OFFSET(fft2), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(firequalizer);
+
+static void common_uninit(FIREqualizerContext *s)
+{
+ av_rdft_end(s->analysis_rdft);
+ av_rdft_end(s->analysis_irdft);
+ av_rdft_end(s->rdft);
+ av_rdft_end(s->irdft);
+ av_fft_end(s->fft_ctx);
+ s->analysis_rdft = s->analysis_irdft = s->rdft = s->irdft = NULL;
+ s->fft_ctx = NULL;
+
+ av_freep(&s->analysis_buf);
+ av_freep(&s->dump_buf);
+ av_freep(&s->kernel_tmp_buf);
+ av_freep(&s->kernel_buf);
+ av_freep(&s->conv_buf);
+ av_freep(&s->conv_idx);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ FIREqualizerContext *s = ctx->priv;
+
+ common_uninit(s);
+ av_freep(&s->gain_cmd);
+ av_freep(&s->gain_entry_cmd);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterChannelLayouts *layouts;
+ AVFilterFormats *formats;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static void fast_convolute(FIREqualizerContext *av_restrict s, const float *av_restrict kernel_buf, float *av_restrict conv_buf,
+ OverlapIndex *av_restrict idx, float *av_restrict data, int nsamples)
+{
+ if (nsamples <= s->nsamples_max) {
+ float *buf = conv_buf + idx->buf_idx * s->rdft_len;
+ float *obuf = conv_buf + !idx->buf_idx * s->rdft_len + idx->overlap_idx;
+ int center = s->fir_len/2;
+ int k;
+
+ memset(buf, 0, center * sizeof(*data));
+ memcpy(buf + center, data, nsamples * sizeof(*data));
+ memset(buf + center + nsamples, 0, (s->rdft_len - nsamples - center) * sizeof(*data));
+ av_rdft_calc(s->rdft, buf);
+
+ buf[0] *= kernel_buf[0];
+ buf[1] *= kernel_buf[s->rdft_len/2];
+ for (k = 1; k < s->rdft_len/2; k++) {
+ buf[2*k] *= kernel_buf[k];
+ buf[2*k+1] *= kernel_buf[k];
+ }
+
+ av_rdft_calc(s->irdft, buf);
+ for (k = 0; k < s->rdft_len - idx->overlap_idx; k++)
+ buf[k] += obuf[k];
+ memcpy(data, buf, nsamples * sizeof(*data));
+ idx->buf_idx = !idx->buf_idx;
+ idx->overlap_idx = nsamples;
+ } else {
+ while (nsamples > s->nsamples_max * 2) {
+ fast_convolute(s, kernel_buf, conv_buf, idx, data, s->nsamples_max);
+ data += s->nsamples_max;
+ nsamples -= s->nsamples_max;
+ }
+ fast_convolute(s, kernel_buf, conv_buf, idx, data, nsamples/2);
+ fast_convolute(s, kernel_buf, conv_buf, idx, data + nsamples/2, nsamples - nsamples/2);
+ }
+}
+
+static void fast_convolute2(FIREqualizerContext *av_restrict s, const float *av_restrict kernel_buf, FFTComplex *av_restrict conv_buf,
+ OverlapIndex *av_restrict idx, float *av_restrict data0, float *av_restrict data1, int nsamples)
+{
+ if (nsamples <= s->nsamples_max) {
+ FFTComplex *buf = conv_buf + idx->buf_idx * s->rdft_len;
+ FFTComplex *obuf = conv_buf + !idx->buf_idx * s->rdft_len + idx->overlap_idx;
+ int center = s->fir_len/2;
+ int k;
+ float tmp;
+
+ memset(buf, 0, center * sizeof(*buf));
+ for (k = 0; k < nsamples; k++) {
+ buf[center+k].re = data0[k];
+ buf[center+k].im = data1[k];
+ }
+ memset(buf + center + nsamples, 0, (s->rdft_len - nsamples - center) * sizeof(*buf));
+ av_fft_permute(s->fft_ctx, buf);
+ av_fft_calc(s->fft_ctx, buf);
+
+ /* swap re <-> im, do backward fft using forward fft_ctx */
+ /* normalize with 0.5f */
+ tmp = buf[0].re;
+ buf[0].re = 0.5f * kernel_buf[0] * buf[0].im;
+ buf[0].im = 0.5f * kernel_buf[0] * tmp;
+ for (k = 1; k < s->rdft_len/2; k++) {
+ int m = s->rdft_len - k;
+ tmp = buf[k].re;
+ buf[k].re = 0.5f * kernel_buf[k] * buf[k].im;
+ buf[k].im = 0.5f * kernel_buf[k] * tmp;
+ tmp = buf[m].re;
+ buf[m].re = 0.5f * kernel_buf[k] * buf[m].im;
+ buf[m].im = 0.5f * kernel_buf[k] * tmp;
+ }
+ tmp = buf[k].re;
+ buf[k].re = 0.5f * kernel_buf[k] * buf[k].im;
+ buf[k].im = 0.5f * kernel_buf[k] * tmp;
+
+ av_fft_permute(s->fft_ctx, buf);
+ av_fft_calc(s->fft_ctx, buf);
+
+ for (k = 0; k < s->rdft_len - idx->overlap_idx; k++) {
+ buf[k].re += obuf[k].re;
+ buf[k].im += obuf[k].im;
+ }
+
+ /* swapped re <-> im */
+ for (k = 0; k < nsamples; k++) {
+ data0[k] = buf[k].im;
+ data1[k] = buf[k].re;
+ }
+ idx->buf_idx = !idx->buf_idx;
+ idx->overlap_idx = nsamples;
+ } else {
+ while (nsamples > s->nsamples_max * 2) {
+ fast_convolute2(s, kernel_buf, conv_buf, idx, data0, data1, s->nsamples_max);
+ data0 += s->nsamples_max;
+ data1 += s->nsamples_max;
+ nsamples -= s->nsamples_max;
+ }
+ fast_convolute2(s, kernel_buf, conv_buf, idx, data0, data1, nsamples/2);
+ fast_convolute2(s, kernel_buf, conv_buf, idx, data0 + nsamples/2, data1 + nsamples/2, nsamples - nsamples/2);
+ }
+}
+
+static void dump_fir(AVFilterContext *ctx, FILE *fp, int ch)
+{
+ FIREqualizerContext *s = ctx->priv;
+ int rate = ctx->inputs[0]->sample_rate;
+ int xlog = s->dumpscale == SCALE_LOGLIN || s->dumpscale == SCALE_LOGLOG;
+ int ylog = s->dumpscale == SCALE_LINLOG || s->dumpscale == SCALE_LOGLOG;
+ int x;
+ int center = s->fir_len / 2;
+ double delay = s->zero_phase ? 0.0 : (double) center / rate;
+ double vx, ya, yb;
+
+ s->analysis_buf[0] *= s->rdft_len/2;
+ for (x = 1; x <= center; x++) {
+ s->analysis_buf[x] *= s->rdft_len/2;
+ s->analysis_buf[s->analysis_rdft_len - x] *= s->rdft_len/2;
+ }
+
+ if (ch)
+ fprintf(fp, "\n\n");
+
+ fprintf(fp, "# time[%d] (time amplitude)\n", ch);
+
+ for (x = center; x > 0; x--)
+ fprintf(fp, "%15.10f %15.10f\n", delay - (double) x / rate, (double) s->analysis_buf[s->analysis_rdft_len - x]);
+
+ for (x = 0; x <= center; x++)
+ fprintf(fp, "%15.10f %15.10f\n", delay + (double)x / rate , (double) s->analysis_buf[x]);
+
+ av_rdft_calc(s->analysis_rdft, s->analysis_buf);
+
+ fprintf(fp, "\n\n# freq[%d] (frequency desired_gain actual_gain)\n", ch);
+
+ for (x = 0; x <= s->analysis_rdft_len/2; x++) {
+ int i = (x == s->analysis_rdft_len/2) ? 1 : 2 * x;
+ vx = (double)x * rate / s->analysis_rdft_len;
+ if (xlog)
+ vx = log2(0.05*vx);
+ ya = s->dump_buf[i];
+ yb = s->analysis_buf[i];
+ if (ylog) {
+ ya = 20.0 * log10(fabs(ya));
+ yb = 20.0 * log10(fabs(yb));
+ }
+ fprintf(fp, "%17.10f %17.10f %17.10f\n", vx, ya, yb);
+ }
+}
+
+static double entry_func(void *p, double freq, double gain)
+{
+ AVFilterContext *ctx = p;
+ FIREqualizerContext *s = ctx->priv;
+
+ if (s->nb_gain_entry >= NB_GAIN_ENTRY_MAX) {
+ av_log(ctx, AV_LOG_ERROR, "entry table overflow.\n");
+ s->gain_entry_err = AVERROR(EINVAL);
+ return 0;
+ }
+
+ if (isnan(freq)) {
+ av_log(ctx, AV_LOG_ERROR, "nan frequency (%g, %g).\n", freq, gain);
+ s->gain_entry_err = AVERROR(EINVAL);
+ return 0;
+ }
+
+ if (s->nb_gain_entry > 0 && freq <= s->gain_entry_tbl[s->nb_gain_entry - 1].freq) {
+ av_log(ctx, AV_LOG_ERROR, "unsorted frequency (%g, %g).\n", freq, gain);
+ s->gain_entry_err = AVERROR(EINVAL);
+ return 0;
+ }
+
+ s->gain_entry_tbl[s->nb_gain_entry].freq = freq;
+ s->gain_entry_tbl[s->nb_gain_entry].gain = gain;
+ s->nb_gain_entry++;
+ return 0;
+}
+
+static int gain_entry_compare(const void *key, const void *memb)
+{
+ const double *freq = key;
+ const GainEntry *entry = memb;
+
+ if (*freq < entry[0].freq)
+ return -1;
+ if (*freq > entry[1].freq)
+ return 1;
+ return 0;
+}
+
+static double gain_interpolate_func(void *p, double freq)
+{
+ AVFilterContext *ctx = p;
+ FIREqualizerContext *s = ctx->priv;
+ GainEntry *res;
+ double d0, d1, d;
+
+ if (isnan(freq))
+ return freq;
+
+ if (!s->nb_gain_entry)
+ return 0;
+
+ if (freq <= s->gain_entry_tbl[0].freq)
+ return s->gain_entry_tbl[0].gain;
+
+ if (freq >= s->gain_entry_tbl[s->nb_gain_entry-1].freq)
+ return s->gain_entry_tbl[s->nb_gain_entry-1].gain;
+
+ res = bsearch(&freq, &s->gain_entry_tbl, s->nb_gain_entry - 1, sizeof(*res), gain_entry_compare);
+ av_assert0(res);
+
+ d = res[1].freq - res[0].freq;
+ d0 = freq - res[0].freq;
+ d1 = res[1].freq - freq;
+
+ if (d0 && d1)
+ return (d0 * res[1].gain + d1 * res[0].gain) / d;
+
+ if (d0)
+ return res[1].gain;
+
+ return res[0].gain;
+}
+
+static double cubic_interpolate_func(void *p, double freq)
+{
+ AVFilterContext *ctx = p;
+ FIREqualizerContext *s = ctx->priv;
+ GainEntry *res;
+ double x, x2, x3;
+ double a, b, c, d;
+ double m0, m1, m2, msum, unit;
+
+ if (!s->nb_gain_entry)
+ return 0;
+
+ if (freq <= s->gain_entry_tbl[0].freq)
+ return s->gain_entry_tbl[0].gain;
+
+ if (freq >= s->gain_entry_tbl[s->nb_gain_entry-1].freq)
+ return s->gain_entry_tbl[s->nb_gain_entry-1].gain;
+
+ res = bsearch(&freq, &s->gain_entry_tbl, s->nb_gain_entry - 1, sizeof(*res), gain_entry_compare);
+ av_assert0(res);
+
+ unit = res[1].freq - res[0].freq;
+ m0 = res != s->gain_entry_tbl ?
+ unit * (res[0].gain - res[-1].gain) / (res[0].freq - res[-1].freq) : 0;
+ m1 = res[1].gain - res[0].gain;
+ m2 = res != s->gain_entry_tbl + s->nb_gain_entry - 2 ?
+ unit * (res[2].gain - res[1].gain) / (res[2].freq - res[1].freq) : 0;
+
+ msum = fabs(m0) + fabs(m1);
+ m0 = msum > 0 ? (fabs(m0) * m1 + fabs(m1) * m0) / msum : 0;
+ msum = fabs(m1) + fabs(m2);
+ m1 = msum > 0 ? (fabs(m1) * m2 + fabs(m2) * m1) / msum : 0;
+
+ d = res[0].gain;
+ c = m0;
+ b = 3 * res[1].gain - m1 - 2 * c - 3 * d;
+ a = res[1].gain - b - c - d;
+
+ x = (freq - res[0].freq) / unit;
+ x2 = x * x;
+ x3 = x2 * x;
+
+ return a * x3 + b * x2 + c * x + d;
+}
+
+static const char *const var_names[] = {
+ "f",
+ "sr",
+ "ch",
+ "chid",
+ "chs",
+ "chlayout",
+ NULL
+};
+
+enum VarOffset {
+ VAR_F,
+ VAR_SR,
+ VAR_CH,
+ VAR_CHID,
+ VAR_CHS,
+ VAR_CHLAYOUT,
+ VAR_NB
+};
+
+static int generate_kernel(AVFilterContext *ctx, const char *gain, const char *gain_entry)
+{
+ FIREqualizerContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ const char *gain_entry_func_names[] = { "entry", NULL };
+ const char *gain_func_names[] = { "gain_interpolate", "cubic_interpolate", NULL };
+ double (*gain_entry_funcs[])(void *, double, double) = { entry_func, NULL };
+ double (*gain_funcs[])(void *, double) = { gain_interpolate_func, cubic_interpolate_func, NULL };
+ double vars[VAR_NB];
+ AVExpr *gain_expr;
+ int ret, k, center, ch;
+ int xlog = s->scale == SCALE_LOGLIN || s->scale == SCALE_LOGLOG;
+ int ylog = s->scale == SCALE_LINLOG || s->scale == SCALE_LOGLOG;
+ FILE *dump_fp = NULL;
+
+ s->nb_gain_entry = 0;
+ s->gain_entry_err = 0;
+ if (gain_entry) {
+ double result = 0.0;
+ ret = av_expr_parse_and_eval(&result, gain_entry, NULL, NULL, NULL, NULL,
+ gain_entry_func_names, gain_entry_funcs, ctx, 0, ctx);
+ if (ret < 0)
+ return ret;
+ if (s->gain_entry_err < 0)
+ return s->gain_entry_err;
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "nb_gain_entry = %d.\n", s->nb_gain_entry);
+
+ ret = av_expr_parse(&gain_expr, gain, var_names,
+ gain_func_names, gain_funcs, NULL, NULL, 0, ctx);
+ if (ret < 0)
+ return ret;
+
+ if (s->dumpfile && (!s->dump_buf || !s->analysis_rdft || !(dump_fp = fopen(s->dumpfile, "w"))))
+ av_log(ctx, AV_LOG_WARNING, "dumping failed.\n");
+
+ vars[VAR_CHS] = inlink->channels;
+ vars[VAR_CHLAYOUT] = inlink->channel_layout;
+ vars[VAR_SR] = inlink->sample_rate;
+ for (ch = 0; ch < inlink->channels; ch++) {
+ float *rdft_buf = s->kernel_tmp_buf + ch * s->rdft_len;
+ double result;
+ vars[VAR_CH] = ch;
+ vars[VAR_CHID] = av_channel_layout_extract_channel(inlink->channel_layout, ch);
+ vars[VAR_F] = 0.0;
+ if (xlog)
+ vars[VAR_F] = log2(0.05 * vars[VAR_F]);
+ result = av_expr_eval(gain_expr, vars, ctx);
+ s->analysis_buf[0] = ylog ? pow(10.0, 0.05 * result) : result;
+
+ vars[VAR_F] = 0.5 * inlink->sample_rate;
+ if (xlog)
+ vars[VAR_F] = log2(0.05 * vars[VAR_F]);
+ result = av_expr_eval(gain_expr, vars, ctx);
+ s->analysis_buf[1] = ylog ? pow(10.0, 0.05 * result) : result;
+
+ for (k = 1; k < s->analysis_rdft_len/2; k++) {
+ vars[VAR_F] = k * ((double)inlink->sample_rate /(double)s->analysis_rdft_len);
+ if (xlog)
+ vars[VAR_F] = log2(0.05 * vars[VAR_F]);
+ result = av_expr_eval(gain_expr, vars, ctx);
+ s->analysis_buf[2*k] = ylog ? pow(10.0, 0.05 * result) : result;
+ s->analysis_buf[2*k+1] = 0.0;
+ }
+
+ if (s->dump_buf)
+ memcpy(s->dump_buf, s->analysis_buf, s->analysis_rdft_len * sizeof(*s->analysis_buf));
+
+ av_rdft_calc(s->analysis_irdft, s->analysis_buf);
+ center = s->fir_len / 2;
+
+ for (k = 0; k <= center; k++) {
+ double u = k * (M_PI/center);
+ double win;
+ switch (s->wfunc) {
+ case WFUNC_RECTANGULAR:
+ win = 1.0;
+ break;
+ case WFUNC_HANN:
+ win = 0.5 + 0.5 * cos(u);
+ break;
+ case WFUNC_HAMMING:
+ win = 0.53836 + 0.46164 * cos(u);
+ break;
+ case WFUNC_BLACKMAN:
+ win = 0.42 + 0.5 * cos(u) + 0.08 * cos(2*u);
+ break;
+ case WFUNC_NUTTALL3:
+ win = 0.40897 + 0.5 * cos(u) + 0.09103 * cos(2*u);
+ break;
+ case WFUNC_MNUTTALL3:
+ win = 0.4243801 + 0.4973406 * cos(u) + 0.0782793 * cos(2*u);
+ break;
+ case WFUNC_NUTTALL:
+ win = 0.355768 + 0.487396 * cos(u) + 0.144232 * cos(2*u) + 0.012604 * cos(3*u);
+ break;
+ case WFUNC_BNUTTALL:
+ win = 0.3635819 + 0.4891775 * cos(u) + 0.1365995 * cos(2*u) + 0.0106411 * cos(3*u);
+ break;
+ case WFUNC_BHARRIS:
+ win = 0.35875 + 0.48829 * cos(u) + 0.14128 * cos(2*u) + 0.01168 * cos(3*u);
+ break;
+ case WFUNC_TUKEY:
+ win = (u <= 0.5 * M_PI) ? 1.0 : (0.5 + 0.5 * cos(2*u - M_PI));
+ break;
+ default:
+ av_assert0(0);
+ }
+ s->analysis_buf[k] *= (2.0/s->analysis_rdft_len) * (2.0/s->rdft_len) * win;
+ if (k)
+ s->analysis_buf[s->analysis_rdft_len - k] = s->analysis_buf[k];
+ }
+
+ memset(s->analysis_buf + center + 1, 0, (s->analysis_rdft_len - s->fir_len) * sizeof(*s->analysis_buf));
+ memcpy(rdft_buf, s->analysis_buf, s->rdft_len/2 * sizeof(*s->analysis_buf));
+ memcpy(rdft_buf + s->rdft_len/2, s->analysis_buf + s->analysis_rdft_len - s->rdft_len/2, s->rdft_len/2 * sizeof(*s->analysis_buf));
+ av_rdft_calc(s->rdft, rdft_buf);
+
+ for (k = 0; k < s->rdft_len; k++) {
+ if (isnan(rdft_buf[k]) || isinf(rdft_buf[k])) {
+ av_log(ctx, AV_LOG_ERROR, "filter kernel contains nan or infinity.\n");
+ av_expr_free(gain_expr);
+ if (dump_fp)
+ fclose(dump_fp);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ rdft_buf[s->rdft_len-1] = rdft_buf[1];
+ for (k = 0; k < s->rdft_len/2; k++)
+ rdft_buf[k] = rdft_buf[2*k];
+ rdft_buf[s->rdft_len/2] = rdft_buf[s->rdft_len-1];
+
+ if (dump_fp)
+ dump_fir(ctx, dump_fp, ch);
+
+ if (!s->multi)
+ break;
+ }
+
+ memcpy(s->kernel_buf, s->kernel_tmp_buf, (s->multi ? inlink->channels : 1) * s->rdft_len * sizeof(*s->kernel_buf));
+ av_expr_free(gain_expr);
+ if (dump_fp)
+ fclose(dump_fp);
+ return 0;
+}
+
+#define SELECT_GAIN(s) (s->gain_cmd ? s->gain_cmd : s->gain)
+#define SELECT_GAIN_ENTRY(s) (s->gain_entry_cmd ? s->gain_entry_cmd : s->gain_entry)
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ FIREqualizerContext *s = ctx->priv;
+ int rdft_bits;
+
+ common_uninit(s);
+
+ s->next_pts = 0;
+ s->frame_nsamples_max = 0;
+
+ s->fir_len = FFMAX(2 * (int)(inlink->sample_rate * s->delay) + 1, 3);
+ s->remaining = s->fir_len - 1;
+
+ for (rdft_bits = RDFT_BITS_MIN; rdft_bits <= RDFT_BITS_MAX; rdft_bits++) {
+ s->rdft_len = 1 << rdft_bits;
+ s->nsamples_max = s->rdft_len - s->fir_len + 1;
+ if (s->nsamples_max * 2 >= s->fir_len)
+ break;
+ }
+
+ if (rdft_bits > RDFT_BITS_MAX) {
+ av_log(ctx, AV_LOG_ERROR, "too large delay, please decrease it.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (!(s->rdft = av_rdft_init(rdft_bits, DFT_R2C)) || !(s->irdft = av_rdft_init(rdft_bits, IDFT_C2R)))
+ return AVERROR(ENOMEM);
+
+ if (s->fft2 && !s->multi && inlink->channels > 1 && !(s->fft_ctx = av_fft_init(rdft_bits, 0)))
+ return AVERROR(ENOMEM);
+
+ for ( ; rdft_bits <= RDFT_BITS_MAX; rdft_bits++) {
+ s->analysis_rdft_len = 1 << rdft_bits;
+ if (inlink->sample_rate <= s->accuracy * s->analysis_rdft_len)
+ break;
+ }
+
+ if (rdft_bits > RDFT_BITS_MAX) {
+ av_log(ctx, AV_LOG_ERROR, "too small accuracy, please increase it.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (!(s->analysis_irdft = av_rdft_init(rdft_bits, IDFT_C2R)))
+ return AVERROR(ENOMEM);
+
+ if (s->dumpfile) {
+ s->analysis_rdft = av_rdft_init(rdft_bits, DFT_R2C);
+ s->dump_buf = av_malloc_array(s->analysis_rdft_len, sizeof(*s->dump_buf));
+ }
+
+ s->analysis_buf = av_malloc_array(s->analysis_rdft_len, sizeof(*s->analysis_buf));
+ s->kernel_tmp_buf = av_malloc_array(s->rdft_len * (s->multi ? inlink->channels : 1), sizeof(*s->kernel_tmp_buf));
+ s->kernel_buf = av_malloc_array(s->rdft_len * (s->multi ? inlink->channels : 1), sizeof(*s->kernel_buf));
+ s->conv_buf = av_calloc(2 * s->rdft_len * inlink->channels, sizeof(*s->conv_buf));
+ s->conv_idx = av_calloc(inlink->channels, sizeof(*s->conv_idx));
+ if (!s->analysis_buf || !s->kernel_tmp_buf || !s->kernel_buf || !s->conv_buf || !s->conv_idx)
+ return AVERROR(ENOMEM);
+
+ av_log(ctx, AV_LOG_DEBUG, "sample_rate = %d, channels = %d, analysis_rdft_len = %d, rdft_len = %d, fir_len = %d, nsamples_max = %d.\n",
+ inlink->sample_rate, inlink->channels, s->analysis_rdft_len, s->rdft_len, s->fir_len, s->nsamples_max);
+
+ if (s->fixed)
+ inlink->min_samples = inlink->max_samples = inlink->partial_buf_size = s->nsamples_max;
+
+ return generate_kernel(ctx, SELECT_GAIN(s), SELECT_GAIN_ENTRY(s));
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ FIREqualizerContext *s = ctx->priv;
+ int ch;
+
+ for (ch = 0; ch + 1 < inlink->channels && s->fft_ctx; ch += 2) {
+ fast_convolute2(s, s->kernel_buf, (FFTComplex *)(s->conv_buf + 2 * ch * s->rdft_len),
+ s->conv_idx + ch, (float *) frame->extended_data[ch],
+ (float *) frame->extended_data[ch+1], frame->nb_samples);
+ }
+
+ for ( ; ch < inlink->channels; ch++) {
+ fast_convolute(s, s->kernel_buf + (s->multi ? ch * s->rdft_len : 0),
+ s->conv_buf + 2 * ch * s->rdft_len, s->conv_idx + ch,
+ (float *) frame->extended_data[ch], frame->nb_samples);
+ }
+
+ s->next_pts = AV_NOPTS_VALUE;
+ if (frame->pts != AV_NOPTS_VALUE) {
+ s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, av_make_q(1, inlink->sample_rate), inlink->time_base);
+ if (s->zero_phase)
+ frame->pts -= av_rescale_q(s->fir_len/2, av_make_q(1, inlink->sample_rate), inlink->time_base);
+ }
+ s->frame_nsamples_max = FFMAX(s->frame_nsamples_max, frame->nb_samples);
+ return ff_filter_frame(ctx->outputs[0], frame);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ FIREqualizerContext *s= ctx->priv;
+ int ret;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+ if (ret == AVERROR_EOF && s->remaining > 0 && s->frame_nsamples_max > 0) {
+ AVFrame *frame = ff_get_audio_buffer(outlink, FFMIN(s->remaining, s->frame_nsamples_max));
+
+ if (!frame)
+ return AVERROR(ENOMEM);
+
+ av_samples_set_silence(frame->extended_data, 0, frame->nb_samples, outlink->channels, frame->format);
+ frame->pts = s->next_pts;
+ s->remaining -= frame->nb_samples;
+ ret = filter_frame(ctx->inputs[0], frame);
+ }
+
+ return ret;
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ FIREqualizerContext *s = ctx->priv;
+ int ret = AVERROR(ENOSYS);
+
+ if (!strcmp(cmd, "gain")) {
+ char *gain_cmd;
+
+ if (SELECT_GAIN(s) && !strcmp(SELECT_GAIN(s), args)) {
+ av_log(ctx, AV_LOG_DEBUG, "equal gain, do not rebuild.\n");
+ return 0;
+ }
+
+ gain_cmd = av_strdup(args);
+ if (!gain_cmd)
+ return AVERROR(ENOMEM);
+
+ ret = generate_kernel(ctx, gain_cmd, SELECT_GAIN_ENTRY(s));
+ if (ret >= 0) {
+ av_freep(&s->gain_cmd);
+ s->gain_cmd = gain_cmd;
+ } else {
+ av_freep(&gain_cmd);
+ }
+ } else if (!strcmp(cmd, "gain_entry")) {
+ char *gain_entry_cmd;
+
+ if (SELECT_GAIN_ENTRY(s) && !strcmp(SELECT_GAIN_ENTRY(s), args)) {
+ av_log(ctx, AV_LOG_DEBUG, "equal gain_entry, do not rebuild.\n");
+ return 0;
+ }
+
+ gain_entry_cmd = av_strdup(args);
+ if (!gain_entry_cmd)
+ return AVERROR(ENOMEM);
+
+ ret = generate_kernel(ctx, SELECT_GAIN(s), gain_entry_cmd);
+ if (ret >= 0) {
+ av_freep(&s->gain_entry_cmd);
+ s->gain_entry_cmd = gain_entry_cmd;
+ } else {
+ av_freep(&gain_entry_cmd);
+ }
+ }
+
+ return ret;
+}
+
+static const AVFilterPad firequalizer_inputs[] = {
+ {
+ .name = "default",
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .needs_writable = 1,
+ },
+ { NULL }
+};
+
+static const AVFilterPad firequalizer_outputs[] = {
+ {
+ .name = "default",
+ .request_frame = request_frame,
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_firequalizer = {
+ .name = "firequalizer",
+ .description = NULL_IF_CONFIG_SMALL("Finite Impulse Response Equalizer."),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .process_command = process_command,
+ .priv_size = sizeof(FIREqualizerContext),
+ .inputs = firequalizer_inputs,
+ .outputs = firequalizer_outputs,
+ .priv_class = &firequalizer_class,
+};
diff --git a/libavfilter/af_flanger.c b/libavfilter/af_flanger.c
new file mode 100644
index 0000000000..a92367c97a
--- /dev/null
+++ b/libavfilter/af_flanger.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2006 Rob Sykes <robs@users.sourceforge.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/opt.h"
+#include "libavutil/samplefmt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+#include "generate_wave_table.h"
+
+#define INTERPOLATION_LINEAR 0
+#define INTERPOLATION_QUADRATIC 1
+
+typedef struct FlangerContext {
+ const AVClass *class;
+ double delay_min;
+ double delay_depth;
+ double feedback_gain;
+ double delay_gain;
+ double speed;
+ int wave_shape;
+ double channel_phase;
+ int interpolation;
+ double in_gain;
+ int max_samples;
+ uint8_t **delay_buffer;
+ int delay_buf_pos;
+ double *delay_last;
+ float *lfo;
+ int lfo_length;
+ int lfo_pos;
+} FlangerContext;
+
+#define OFFSET(x) offsetof(FlangerContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption flanger_options[] = {
+ { "delay", "base delay in milliseconds", OFFSET(delay_min), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 30, A },
+ { "depth", "added swept delay in milliseconds", OFFSET(delay_depth), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0, 10, A },
+ { "regen", "percentage regeneration (delayed signal feedback)", OFFSET(feedback_gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -95, 95, A },
+ { "width", "percentage of delayed signal mixed with original", OFFSET(delay_gain), AV_OPT_TYPE_DOUBLE, {.dbl=71}, 0, 100, A },
+ { "speed", "sweeps per second (Hz)", OFFSET(speed), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0.1, 10, A },
+ { "shape", "swept wave shape", OFFSET(wave_shape), AV_OPT_TYPE_INT, {.i64=WAVE_SIN}, WAVE_SIN, WAVE_NB-1, A, "type" },
+ { "triangular", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_TRI}, 0, 0, A, "type" },
+ { "t", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_TRI}, 0, 0, A, "type" },
+ { "sinusoidal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_SIN}, 0, 0, A, "type" },
+ { "s", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_SIN}, 0, 0, A, "type" },
+ { "phase", "swept wave percentage phase-shift for multi-channel", OFFSET(channel_phase), AV_OPT_TYPE_DOUBLE, {.dbl=25}, 0, 100, A },
+ { "interp", "delay-line interpolation", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A, "itype" },
+ { "linear", NULL, 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATION_LINEAR}, 0, 0, A, "itype" },
+ { "quadratic", NULL, 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATION_QUADRATIC}, 0, 0, A, "itype" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(flanger);
+
+static int init(AVFilterContext *ctx)
+{
+ FlangerContext *s = ctx->priv;
+
+ s->feedback_gain /= 100;
+ s->delay_gain /= 100;
+ s->channel_phase /= 100;
+ s->delay_min /= 1000;
+ s->delay_depth /= 1000;
+ s->in_gain = 1 / (1 + s->delay_gain);
+ s->delay_gain /= 1 + s->delay_gain;
+ s->delay_gain *= 1 - fabs(s->feedback_gain);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterChannelLayouts *layouts;
+ AVFilterFormats *formats;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ FlangerContext *s = ctx->priv;
+
+ s->max_samples = (s->delay_min + s->delay_depth) * inlink->sample_rate + 2.5;
+ s->lfo_length = inlink->sample_rate / s->speed;
+ s->delay_last = av_calloc(inlink->channels, sizeof(*s->delay_last));
+ s->lfo = av_calloc(s->lfo_length, sizeof(*s->lfo));
+ if (!s->lfo || !s->delay_last)
+ return AVERROR(ENOMEM);
+
+ ff_generate_wave_table(s->wave_shape, AV_SAMPLE_FMT_FLT, s->lfo, s->lfo_length,
+ rint(s->delay_min * inlink->sample_rate),
+ s->max_samples - 2., 3 * M_PI_2);
+
+ return av_samples_alloc_array_and_samples(&s->delay_buffer, NULL,
+ inlink->channels, s->max_samples,
+ inlink->format, 0);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ FlangerContext *s = ctx->priv;
+ AVFrame *out_frame;
+ int chan, i;
+
+ if (av_frame_is_writable(frame)) {
+ out_frame = frame;
+ } else {
+ out_frame = ff_get_audio_buffer(inlink, frame->nb_samples);
+ if (!out_frame) {
+ av_frame_free(&frame);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out_frame, frame);
+ }
+
+ for (i = 0; i < frame->nb_samples; i++) {
+
+ s->delay_buf_pos = (s->delay_buf_pos + s->max_samples - 1) % s->max_samples;
+
+ for (chan = 0; chan < inlink->channels; chan++) {
+ double *src = (double *)frame->extended_data[chan];
+ double *dst = (double *)out_frame->extended_data[chan];
+ double delayed_0, delayed_1;
+ double delayed;
+ double in, out;
+ int channel_phase = chan * s->lfo_length * s->channel_phase + .5;
+ double delay = s->lfo[(s->lfo_pos + channel_phase) % s->lfo_length];
+ int int_delay = (int)delay;
+ double frac_delay = modf(delay, &delay);
+ double *delay_buffer = (double *)s->delay_buffer[chan];
+
+ in = src[i];
+ delay_buffer[s->delay_buf_pos] = in + s->delay_last[chan] *
+ s->feedback_gain;
+ delayed_0 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];
+ delayed_1 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];
+
+ if (s->interpolation == INTERPOLATION_LINEAR) {
+ delayed = delayed_0 + (delayed_1 - delayed_0) * frac_delay;
+ } else {
+ double a, b;
+ double delayed_2 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];
+ delayed_2 -= delayed_0;
+ delayed_1 -= delayed_0;
+ a = delayed_2 * .5 - delayed_1;
+ b = delayed_1 * 2 - delayed_2 *.5;
+ delayed = delayed_0 + (a * frac_delay + b) * frac_delay;
+ }
+
+ s->delay_last[chan] = delayed;
+ out = in * s->in_gain + delayed * s->delay_gain;
+ dst[i] = out;
+ }
+ s->lfo_pos = (s->lfo_pos + 1) % s->lfo_length;
+ }
+
+ if (frame != out_frame)
+ av_frame_free(&frame);
+
+ return ff_filter_frame(ctx->outputs[0], out_frame);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ FlangerContext *s = ctx->priv;
+
+ av_freep(&s->lfo);
+ av_freep(&s->delay_last);
+
+ if (s->delay_buffer)
+ av_freep(&s->delay_buffer[0]);
+ av_freep(&s->delay_buffer);
+}
+
+static const AVFilterPad flanger_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad flanger_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_flanger = {
+ .name = "flanger",
+ .description = NULL_IF_CONFIG_SMALL("Apply a flanging effect to the audio."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(FlangerContext),
+ .priv_class = &flanger_class,
+ .init = init,
+ .uninit = uninit,
+ .inputs = flanger_inputs,
+ .outputs = flanger_outputs,
+};
diff --git a/libavfilter/af_hdcd.c b/libavfilter/af_hdcd.c
index b9dadecca4..1248fd9b75 100644
--- a/libavfilter/af_hdcd.c
+++ b/libavfilter/af_hdcd.c
@@ -1,71 +1,1525 @@
/*
- * This file is part of Libav.
+ * Copyright (C) 2010, Chris Moeller,
+ * All rights reserved.
+ * Optimizations by Gumboot
+ * Additional work by Burt P.
+ * Original code reverse engineered from HDCD decoder library by Christopher Key,
+ * which was likely reverse engineered from Windows Media Player.
*
- * Libav is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * Libav is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of its contributors may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * HDCD is High Definition Compatible Digital
+ * http://wiki.hydrogenaud.io/index.php?title=High_Definition_Compatible_Digital
*
- * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ * More information about HDCD-encoded audio CDs:
+ * http://www.audiomisc.co.uk/HFN/HDCD/Enigma.html
+ * http://www.audiomisc.co.uk/HFN/HDCD/Examined.html
*/
/**
* @file
- * HDCD decoding filter, using libhdcd
+ * HDCD decoding filter
*/
-#include <hdcd/hdcd_simple.h>
-
-#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
-
-#include "audio.h"
+#include "libavutil/avassert.h"
#include "avfilter.h"
-#include "formats.h"
#include "internal.h"
+#include "audio.h"
+
+#define PEAK_EXT_LEVEL 0x5981 /* + sizeof(peaktab)-1 = 0x8000 */
+static const uint32_t peaktab[0x2680] = {
+ 0x2cc08300, 0x2cc10600, 0x2cc18900, 0x2cc20c00, 0x2cc28f00, 0x2cc31200, 0x2cc39500, 0x2cc41800, 0x2cc49b00, 0x2cc51e00, 0x2cc5a100, 0x2cc62400, 0x2cc6a700, 0x2cc72a00, 0x2cc7ad00, 0x2cc83000,
+ 0x2cc8b300, 0x2cc93600, 0x2cc9b900, 0x2cca3c00, 0x2ccabf00, 0x2ccb4200, 0x2ccbc500, 0x2ccc4800, 0x2ccccb00, 0x2ccd4e00, 0x2ccdd100, 0x2cce5400, 0x2cced700, 0x2ccf5a00, 0x2ccfdd00, 0x2cd06000,
+ 0x2cd0e300, 0x2cd16600, 0x2cd1e900, 0x2cd26c00, 0x2cd2ef00, 0x2cd37200, 0x2cd3f500, 0x2cd47800, 0x2cd4fb00, 0x2cd57e00, 0x2cd60100, 0x2cd68400, 0x2cd70700, 0x2cd78a00, 0x2cd80d00, 0x2cd89000,
+ 0x2cd91300, 0x2cd99600, 0x2cda1900, 0x2cda9c00, 0x2cdb1f00, 0x2cdba200, 0x2cdc2500, 0x2cdca800, 0x2cdd2b00, 0x2cddae00, 0x2cde3100, 0x2cdeb400, 0x2cdf3700, 0x2cdfba00, 0x2ce03d00, 0x2ce0c000,
+ 0x2ce14300, 0x2ce1c600, 0x2ce24900, 0x2ce2cc00, 0x2ce34f00, 0x2ce3d200, 0x2ce45500, 0x2ce4d800, 0x2ce55b00, 0x2ce5de00, 0x2ce66100, 0x2ce6e400, 0x2ce76700, 0x2ce7ea00, 0x2ce86d00, 0x2ce8f000,
+ 0x2ce97300, 0x2ce9f600, 0x2cea7900, 0x2ceafc00, 0x2ceb7f00, 0x2cec0200, 0x2cec8500, 0x2ced0800, 0x2ced8b00, 0x2cee0e00, 0x2cee9100, 0x2cef1400, 0x2cef9700, 0x2cf01a00, 0x2cf09d00, 0x2cf12000,
+ 0x2cf1a300, 0x2cf22600, 0x2cf2a900, 0x2cf32c00, 0x2cf3af00, 0x2cf43200, 0x2cf4b500, 0x2cf53800, 0x2cf5bb00, 0x2cf63e00, 0x2cf6c100, 0x2cf74400, 0x2cf7c700, 0x2cf84a00, 0x2cf8cd00, 0x2cf95000,
+ 0x2cf9d300, 0x2cfa5600, 0x2cfad900, 0x2cfb5c00, 0x2cfbdf00, 0x2cfc6200, 0x2cfce500, 0x2cfd6800, 0x2cfdeb00, 0x2cfe6e00, 0x2cfef100, 0x2cff7400, 0x2cfff700, 0x2d007a00, 0x2d00fd00, 0x2d018000,
+ 0x2d020300, 0x2d028600, 0x2d030900, 0x2d038c00, 0x2d040f00, 0x2d049200, 0x2d051500, 0x2d059800, 0x2d061b00, 0x2d069e00, 0x2d072100, 0x2d07a400, 0x2d082700, 0x2d08aa00, 0x2d092d00, 0x2d09b000,
+ 0x2d0a3300, 0x2d0ab600, 0x2d0b3900, 0x2d0bbc00, 0x2d0c3f00, 0x2d0cc200, 0x2d0d4500, 0x2d0dc800, 0x2d0e4b00, 0x2d0ece00, 0x2d0f5100, 0x2d0fd400, 0x2d105700, 0x2d10da00, 0x2d115d00, 0x2d11e000,
+ 0x2d126300, 0x2d12e600, 0x2d136900, 0x2d13ec00, 0x2d146f00, 0x2d14f200, 0x2d157500, 0x2d15f800, 0x2d167b00, 0x2d16fe00, 0x2d178100, 0x2d180400, 0x2d188700, 0x2d190a00, 0x2d198d00, 0x2d1a1000,
+ 0x2d1a9300, 0x2d1b1600, 0x2d1b9900, 0x2d1c1c00, 0x2d1c9f00, 0x2d1d2200, 0x2d1da500, 0x2d1e2800, 0x2d1eab00, 0x2d1f2e00, 0x2d1fb100, 0x2d203400, 0x2d20b700, 0x2d213a00, 0x2d21bd00, 0x2d224000,
+ 0x2d22c300, 0x2d234600, 0x2d23c900, 0x2d244c00, 0x2d24cf00, 0x2d255200, 0x2d25d500, 0x2d265800, 0x2d26db00, 0x2d275e00, 0x2d27e100, 0x2d286400, 0x2d28e700, 0x2d296a00, 0x2d29ed00, 0x2d2a7000,
+ 0x2d2af300, 0x2d2b7600, 0x2d2bf900, 0x2d2c7c00, 0x2d2cff00, 0x2d2d8200, 0x2d2e0500, 0x2d2e8800, 0x2d2f0b00, 0x2d2f8e00, 0x2d301100, 0x2d309400, 0x2d311700, 0x2d319a00, 0x2d321d00, 0x2d32a000,
+ 0x2d332300, 0x2d33a600, 0x2d342900, 0x2d34ac00, 0x2d352f00, 0x2d35b200, 0x2d363500, 0x2d36b800, 0x2d373b00, 0x2d37be00, 0x2d384100, 0x2d38c400, 0x2d394700, 0x2d39ca00, 0x2d3a4d00, 0x2d3ad000,
+ 0x2d3b5300, 0x2d3bd600, 0x2d3c5900, 0x2d3cdc00, 0x2d3d5f00, 0x2d3de200, 0x2d3e6500, 0x2d3ee800, 0x2d3f6b00, 0x2d3fee00, 0x2d407100, 0x2d40f400, 0x2d417700, 0x2d41fa00, 0x2d427d00, 0x2d430000,
+ 0x2d438300, 0x2d440600, 0x2d448900, 0x2d450c00, 0x2d458f00, 0x2d461200, 0x2d469500, 0x2d471800, 0x2d479b00, 0x2d481e00, 0x2d48a100, 0x2d492400, 0x2d49a700, 0x2d4a2a00, 0x2d4aad00, 0x2d4b3000,
+ 0x2d4bb300, 0x2d4c3600, 0x2d4cb900, 0x2d4d3c00, 0x2d4dbf00, 0x2d4e4200, 0x2d4ec500, 0x2d4f4800, 0x2d4fcb00, 0x2d504e00, 0x2d50d100, 0x2d515400, 0x2d51d700, 0x2d525a00, 0x2d52dd00, 0x2d536000,
+ 0x2d53e300, 0x2d546600, 0x2d54e900, 0x2d556c00, 0x2d55ef00, 0x2d567200, 0x2d56f500, 0x2d577800, 0x2d57fb00, 0x2d587e00, 0x2d590100, 0x2d598400, 0x2d5a0700, 0x2d5a8a00, 0x2d5b0d00, 0x2d5b9000,
+ 0x2d5c1300, 0x2d5c9600, 0x2d5d1900, 0x2d5d9c00, 0x2d5e1f00, 0x2d5ea200, 0x2d5f2500, 0x2d5fa800, 0x2d602b00, 0x2d60ae00, 0x2d613100, 0x2d61b400, 0x2d623700, 0x2d62ba00, 0x2d633d00, 0x2d63c000,
+ 0x2d644300, 0x2d64c600, 0x2d654900, 0x2d65cc00, 0x2d664f00, 0x2d66d200, 0x2d675500, 0x2d67d800, 0x2d685b00, 0x2d68de00, 0x2d696100, 0x2d69e400, 0x2d6a6700, 0x2d6aea00, 0x2d6b6d00, 0x2d6bf000,
+ 0x2d6c7300, 0x2d6cf600, 0x2d6d7900, 0x2d6dfc00, 0x2d6e7f00, 0x2d6f0200, 0x2d6f8500, 0x2d700800, 0x2d708b00, 0x2d710e00, 0x2d719100, 0x2d721400, 0x2d729700, 0x2d731a00, 0x2d739d00, 0x2d742000,
+ 0x2d74a300, 0x2d752600, 0x2d75a900, 0x2d762c00, 0x2d76af00, 0x2d773200, 0x2d77b500, 0x2d783800, 0x2d78bb00, 0x2d793e00, 0x2d79c100, 0x2d7a4400, 0x2d7ac700, 0x2d7b4a00, 0x2d7bcd00, 0x2d7c5000,
+ 0x2d7cd300, 0x2d7d5600, 0x2d7dd900, 0x2d7e5c00, 0x2d7edf00, 0x2d7f6200, 0x2d7fe500, 0x2d806800, 0x2d80eb00, 0x2d816e00, 0x2d81f100, 0x2d827400, 0x2d82f700, 0x2d837a00, 0x2d83fd00, 0x2d848000,
+ 0x2d850300, 0x2d858600, 0x2d860900, 0x2d868c00, 0x2d870f00, 0x2d879200, 0x2d881500, 0x2d889800, 0x2d891b00, 0x2d899e00, 0x2d8a2100, 0x2d8aa400, 0x2d8b2700, 0x2d8baa00, 0x2d8c2d00, 0x2d8cb000,
+ 0x2d8d3300, 0x2d8db600, 0x2d8e3900, 0x2d8ebc00, 0x2d8f3f00, 0x2d8fc200, 0x2d904500, 0x2d90c800, 0x2d914b00, 0x2d91ce00, 0x2d925100, 0x2d92d400, 0x2d935700, 0x2d93da00, 0x2d945d00, 0x2d94e000,
+ 0x2d956300, 0x2d95e600, 0x2d966900, 0x2d96ec00, 0x2d976f00, 0x2d97f200, 0x2d987500, 0x2d98f800, 0x2d997b00, 0x2d99fe00, 0x2d9a8100, 0x2d9b0400, 0x2d9b8700, 0x2d9c0a00, 0x2d9c8d00, 0x2d9d1000,
+ 0x2d9d9300, 0x2d9e1600, 0x2d9e9900, 0x2d9f1c00, 0x2d9f9f00, 0x2da02200, 0x2da0a500, 0x2da12800, 0x2da1ab00, 0x2da22e00, 0x2da2b100, 0x2da33400, 0x2da3b700, 0x2da43a00, 0x2da4bd00, 0x2da54000,
+ 0x2da5c300, 0x2da64600, 0x2da6c900, 0x2da74c00, 0x2da7cf00, 0x2da85200, 0x2da8d500, 0x2da95800, 0x2da9db00, 0x2daa5e00, 0x2daae100, 0x2dab6400, 0x2dabe700, 0x2dac6a00, 0x2daced00, 0x2dad7000,
+ 0x2dadf300, 0x2dae7600, 0x2daef900, 0x2daf7c00, 0x2dafff00, 0x2db08200, 0x2db10500, 0x2db18800, 0x2db20b00, 0x2db28e00, 0x2db31100, 0x2db39400, 0x2db41700, 0x2db49a00, 0x2db51d00, 0x2db5a000,
+ 0x2db62300, 0x2db6a600, 0x2db72900, 0x2db7ac00, 0x2db82f00, 0x2db8b200, 0x2db93500, 0x2db9b800, 0x2dba3b00, 0x2dbabe00, 0x2dbb4100, 0x2dbbc400, 0x2dbc4700, 0x2dbcca00, 0x2dbd4d00, 0x2dbdd000,
+ 0x2dbe5300, 0x2dbed600, 0x2dbf5900, 0x2dbfdc00, 0x2dc05f00, 0x2dc0e200, 0x2dc16500, 0x2dc1e800, 0x2dc26b00, 0x2dc2ee00, 0x2dc37100, 0x2dc3f400, 0x2dc47700, 0x2dc4fa00, 0x2dc57d00, 0x2dc60000,
+ 0x2dc68700, 0x2dc70e00, 0x2dc79500, 0x2dc81c00, 0x2dc8a300, 0x2dc92a00, 0x2dc9b100, 0x2dca3800, 0x2dcabf00, 0x2dcb4600, 0x2dcbcd00, 0x2dcc5400, 0x2dccdb00, 0x2dcd6200, 0x2dcde900, 0x2dce7000,
+ 0x2dcef700, 0x2dcf7e00, 0x2dd00500, 0x2dd08c00, 0x2dd11300, 0x2dd19a00, 0x2dd22100, 0x2dd2a800, 0x2dd32f00, 0x2dd3b600, 0x2dd43d00, 0x2dd4c400, 0x2dd54b00, 0x2dd5d200, 0x2dd65900, 0x2dd6e000,
+ 0x2dd76700, 0x2dd7ee00, 0x2dd87500, 0x2dd8fc00, 0x2dd98300, 0x2dda0a00, 0x2dda9100, 0x2ddb1800, 0x2ddb9f00, 0x2ddc2600, 0x2ddcad00, 0x2ddd3400, 0x2dddbb00, 0x2dde4200, 0x2ddec900, 0x2ddf5000,
+ 0x2ddfd700, 0x2de05e00, 0x2de0e500, 0x2de16c00, 0x2de1f300, 0x2de27a00, 0x2de30100, 0x2de38800, 0x2de40f00, 0x2de49600, 0x2de51d00, 0x2de5a400, 0x2de62b00, 0x2de6b200, 0x2de73900, 0x2de7c000,
+ 0x2de84700, 0x2de8ce00, 0x2de95500, 0x2de9dc00, 0x2dea6300, 0x2deaea00, 0x2deb7100, 0x2debf800, 0x2dec7f00, 0x2ded0600, 0x2ded8d00, 0x2dee1400, 0x2dee9b00, 0x2def2200, 0x2defa900, 0x2df03000,
+ 0x2df0b700, 0x2df13e00, 0x2df1c500, 0x2df24c00, 0x2df2d300, 0x2df35a00, 0x2df3e100, 0x2df46800, 0x2df4ef00, 0x2df57600, 0x2df5fd00, 0x2df68400, 0x2df70b00, 0x2df79200, 0x2df81900, 0x2df8a000,
+ 0x2df92700, 0x2df9ae00, 0x2dfa3500, 0x2dfabc00, 0x2dfb4300, 0x2dfbca00, 0x2dfc5100, 0x2dfcd800, 0x2dfd5f00, 0x2dfde600, 0x2dfe6d00, 0x2dfef400, 0x2dff7b00, 0x2e000200, 0x2e008900, 0x2e011000,
+ 0x2e019700, 0x2e021e00, 0x2e02a500, 0x2e032c00, 0x2e03b300, 0x2e043a00, 0x2e04c100, 0x2e054800, 0x2e05cf00, 0x2e065600, 0x2e06dd00, 0x2e076400, 0x2e07eb00, 0x2e087200, 0x2e08f900, 0x2e098000,
+ 0x2e0a0700, 0x2e0a8e00, 0x2e0b1500, 0x2e0b9c00, 0x2e0c2300, 0x2e0caa00, 0x2e0d3100, 0x2e0db800, 0x2e0e3f00, 0x2e0ec600, 0x2e0f4d00, 0x2e0fd400, 0x2e105b00, 0x2e10e200, 0x2e116900, 0x2e11f000,
+ 0x2e127700, 0x2e12fe00, 0x2e138500, 0x2e140c00, 0x2e149300, 0x2e151a00, 0x2e15a100, 0x2e162800, 0x2e16af00, 0x2e173600, 0x2e17bd00, 0x2e184400, 0x2e18cb00, 0x2e195200, 0x2e19d900, 0x2e1a6000,
+ 0x2e1ae700, 0x2e1b6e00, 0x2e1bf500, 0x2e1c7c00, 0x2e1d0300, 0x2e1d8a00, 0x2e1e1100, 0x2e1e9800, 0x2e1f1f00, 0x2e1fa600, 0x2e202d00, 0x2e20b400, 0x2e213b00, 0x2e21c200, 0x2e224900, 0x2e22d000,
+ 0x2e235700, 0x2e23de00, 0x2e246500, 0x2e24ec00, 0x2e257300, 0x2e25fa00, 0x2e268100, 0x2e270800, 0x2e278f00, 0x2e281600, 0x2e289d00, 0x2e292400, 0x2e29ab00, 0x2e2a3200, 0x2e2ab900, 0x2e2b4000,
+ 0x2e2bc700, 0x2e2c4e00, 0x2e2cd500, 0x2e2d5c00, 0x2e2de300, 0x2e2e6a00, 0x2e2ef100, 0x2e2f7800, 0x2e2fff00, 0x2e308600, 0x2e310d00, 0x2e319400, 0x2e321b00, 0x2e32a200, 0x2e332900, 0x2e33b000,
+ 0x2e343700, 0x2e34be00, 0x2e354500, 0x2e35cc00, 0x2e365300, 0x2e36da00, 0x2e376100, 0x2e37e800, 0x2e386f00, 0x2e38f600, 0x2e397d00, 0x2e3a0400, 0x2e3a8b00, 0x2e3b1200, 0x2e3b9900, 0x2e3c2000,
+ 0x2e3ca700, 0x2e3d2e00, 0x2e3db500, 0x2e3e3c00, 0x2e3ec300, 0x2e3f4a00, 0x2e3fd100, 0x2e405800, 0x2e40df00, 0x2e416600, 0x2e41ed00, 0x2e427400, 0x2e42fb00, 0x2e438200, 0x2e440900, 0x2e449000,
+ 0x2e451700, 0x2e459e00, 0x2e462500, 0x2e46ac00, 0x2e473300, 0x2e47ba00, 0x2e484100, 0x2e48c800, 0x2e494f00, 0x2e49d600, 0x2e4a5d00, 0x2e4ae400, 0x2e4b6b00, 0x2e4bf200, 0x2e4c7900, 0x2e4d0000,
+ 0x2e4d8700, 0x2e4e0e00, 0x2e4e9500, 0x2e4f1c00, 0x2e4fa300, 0x2e502a00, 0x2e50b100, 0x2e513800, 0x2e51bf00, 0x2e524600, 0x2e52cd00, 0x2e535400, 0x2e53db00, 0x2e546200, 0x2e54e900, 0x2e557000,
+ 0x2e55f700, 0x2e567e00, 0x2e570500, 0x2e578c00, 0x2e581300, 0x2e589a00, 0x2e592100, 0x2e59a800, 0x2e5a2f00, 0x2e5ab600, 0x2e5b3d00, 0x2e5bc400, 0x2e5c4b00, 0x2e5cd200, 0x2e5d5900, 0x2e5de000,
+ 0x2e5e6700, 0x2e5eee00, 0x2e5f7500, 0x2e5ffc00, 0x2e608300, 0x2e610a00, 0x2e619100, 0x2e621800, 0x2e629f00, 0x2e632600, 0x2e63ad00, 0x2e643400, 0x2e64bb00, 0x2e654200, 0x2e65c900, 0x2e665000,
+ 0x2e66d700, 0x2e675e00, 0x2e67e500, 0x2e686c00, 0x2e68f300, 0x2e697a00, 0x2e6a0100, 0x2e6a8800, 0x2e6b0f00, 0x2e6b9600, 0x2e6c1d00, 0x2e6ca400, 0x2e6d2b00, 0x2e6db200, 0x2e6e3900, 0x2e6ec000,
+ 0x2e6f4700, 0x2e6fce00, 0x2e705500, 0x2e70dc00, 0x2e716300, 0x2e71ea00, 0x2e727100, 0x2e72f800, 0x2e737f00, 0x2e740600, 0x2e748d00, 0x2e751400, 0x2e759b00, 0x2e762200, 0x2e76a900, 0x2e773000,
+ 0x2e77b700, 0x2e783e00, 0x2e78c500, 0x2e794c00, 0x2e79d300, 0x2e7a5a00, 0x2e7ae100, 0x2e7b6800, 0x2e7bef00, 0x2e7c7600, 0x2e7cfd00, 0x2e7d8400, 0x2e7e0b00, 0x2e7e9200, 0x2e7f1900, 0x2e7fa000,
+ 0x2e802700, 0x2e80ae00, 0x2e813500, 0x2e81bc00, 0x2e824300, 0x2e82ca00, 0x2e835100, 0x2e83d800, 0x2e845f00, 0x2e84e600, 0x2e856d00, 0x2e85f400, 0x2e867b00, 0x2e870200, 0x2e878900, 0x2e881000,
+ 0x2e889700, 0x2e891e00, 0x2e89a500, 0x2e8a2c00, 0x2e8ab300, 0x2e8b3a00, 0x2e8bc100, 0x2e8c4800, 0x2e8ccf00, 0x2e8d5600, 0x2e8ddd00, 0x2e8e6400, 0x2e8eeb00, 0x2e8f7200, 0x2e8ff900, 0x2e908000,
+ 0x2e910700, 0x2e918e00, 0x2e921500, 0x2e929c00, 0x2e932300, 0x2e93aa00, 0x2e943100, 0x2e94b800, 0x2e953f00, 0x2e95c600, 0x2e964d00, 0x2e96d400, 0x2e975b00, 0x2e97e200, 0x2e986900, 0x2e98f000,
+ 0x2e997700, 0x2e99fe00, 0x2e9a8500, 0x2e9b0c00, 0x2e9b9300, 0x2e9c1a00, 0x2e9ca100, 0x2e9d2800, 0x2e9daf00, 0x2e9e3600, 0x2e9ebd00, 0x2e9f4400, 0x2e9fcb00, 0x2ea05200, 0x2ea0d900, 0x2ea16000,
+ 0x2ea1e700, 0x2ea26e00, 0x2ea2f500, 0x2ea37c00, 0x2ea40300, 0x2ea48a00, 0x2ea51100, 0x2ea59800, 0x2ea61f00, 0x2ea6a600, 0x2ea72d00, 0x2ea7b400, 0x2ea83b00, 0x2ea8c200, 0x2ea94900, 0x2ea9d000,
+ 0x2eaa5700, 0x2eaade00, 0x2eab6500, 0x2eabec00, 0x2eac7300, 0x2eacfa00, 0x2ead8100, 0x2eae0800, 0x2eae8f00, 0x2eaf1600, 0x2eaf9d00, 0x2eb02400, 0x2eb0ab00, 0x2eb13200, 0x2eb1b900, 0x2eb24000,
+ 0x2eb2c700, 0x2eb34e00, 0x2eb3d500, 0x2eb45c00, 0x2eb4e300, 0x2eb56a00, 0x2eb5f100, 0x2eb67800, 0x2eb6ff00, 0x2eb78600, 0x2eb80d00, 0x2eb89400, 0x2eb91b00, 0x2eb9a200, 0x2eba2900, 0x2ebab000,
+ 0x2ebb3700, 0x2ebbbe00, 0x2ebc4500, 0x2ebccc00, 0x2ebd5300, 0x2ebdda00, 0x2ebe6100, 0x2ebee800, 0x2ebf6f00, 0x2ebff600, 0x2ec07d00, 0x2ec10400, 0x2ec18b00, 0x2ec21200, 0x2ec29900, 0x2ec32000,
+ 0x2ec3a700, 0x2ec42e00, 0x2ec4b500, 0x2ec53c00, 0x2ec5c300, 0x2ec64a00, 0x2ec6d100, 0x2ec75800, 0x2ec7df00, 0x2ec86600, 0x2ec8ed00, 0x2ec97400, 0x2ec9fb00, 0x2eca8200, 0x2ecb0900, 0x2ecb9000,
+ 0x2ecc1700, 0x2ecc9e00, 0x2ecd2500, 0x2ecdac00, 0x2ece3300, 0x2eceba00, 0x2ecf4100, 0x2ecfc800, 0x2ed04f00, 0x2ed0d600, 0x2ed15d00, 0x2ed1e400, 0x2ed26b00, 0x2ed2f200, 0x2ed37900, 0x2ed40000,
+ 0x2ed48700, 0x2ed50e00, 0x2ed59500, 0x2ed61c00, 0x2ed6a300, 0x2ed72a00, 0x2ed7b100, 0x2ed83800, 0x2ed8bf00, 0x2ed94600, 0x2ed9cd00, 0x2eda5400, 0x2edadb00, 0x2edb6200, 0x2edbe900, 0x2edc7000,
+ 0x2edcf700, 0x2edd7e00, 0x2ede0500, 0x2ede8c00, 0x2edf1300, 0x2edf9a00, 0x2ee02100, 0x2ee0a800, 0x2ee12f00, 0x2ee1b600, 0x2ee23d00, 0x2ee2c400, 0x2ee34b00, 0x2ee3d200, 0x2ee45900, 0x2ee4e000,
+ 0x2ee56700, 0x2ee5ee00, 0x2ee67500, 0x2ee6fc00, 0x2ee78300, 0x2ee80a00, 0x2ee89100, 0x2ee91800, 0x2ee99f00, 0x2eea2600, 0x2eeaad00, 0x2eeb3400, 0x2eebbb00, 0x2eec4200, 0x2eecc900, 0x2eed5000,
+ 0x2eedd700, 0x2eee5e00, 0x2eeee500, 0x2eef6c00, 0x2eeff300, 0x2ef07a00, 0x2ef10100, 0x2ef18800, 0x2ef20f00, 0x2ef29600, 0x2ef31d00, 0x2ef3a400, 0x2ef42b00, 0x2ef4b200, 0x2ef53900, 0x2ef5c000,
+ 0x2ef64700, 0x2ef6ce00, 0x2ef75500, 0x2ef7dc00, 0x2ef86300, 0x2ef8ea00, 0x2ef97100, 0x2ef9f800, 0x2efa7f00, 0x2efb0600, 0x2efb8d00, 0x2efc1400, 0x2efc9b00, 0x2efd2200, 0x2efda900, 0x2efe3000,
+ 0x2efeb700, 0x2eff3e00, 0x2effc500, 0x2f004c00, 0x2f00d300, 0x2f015a00, 0x2f01e100, 0x2f026800, 0x2f02ef00, 0x2f037600, 0x2f03fd00, 0x2f048400, 0x2f050b00, 0x2f059200, 0x2f061900, 0x2f06a000,
+ 0x2f072700, 0x2f07ae00, 0x2f083500, 0x2f08bc00, 0x2f094300, 0x2f09ca00, 0x2f0a5100, 0x2f0ad800, 0x2f0b5f00, 0x2f0be600, 0x2f0c6d00, 0x2f0cf400, 0x2f0d7b00, 0x2f0e0200, 0x2f0e8900, 0x2f0f1000,
+ 0x2f0f9700, 0x2f101e00, 0x2f10a500, 0x2f112c00, 0x2f11b300, 0x2f123a00, 0x2f12c100, 0x2f134800, 0x2f13cf00, 0x2f145600, 0x2f14dd00, 0x2f156400, 0x2f15eb00, 0x2f167200, 0x2f16f900, 0x2f178000,
+ 0x2f180700, 0x2f188e00, 0x2f191500, 0x2f199c00, 0x2f1a2300, 0x2f1aaa00, 0x2f1b3100, 0x2f1bb800, 0x2f1c3f00, 0x2f1cc600, 0x2f1d4d00, 0x2f1dd400, 0x2f1e5b00, 0x2f1ee200, 0x2f1f6900, 0x2f1ff000,
+ 0x2f207700, 0x2f20fe00, 0x2f218500, 0x2f220c00, 0x2f229300, 0x2f231a00, 0x2f23a100, 0x2f242800, 0x2f24af00, 0x2f253600, 0x2f25bd00, 0x2f264400, 0x2f26cb00, 0x2f275200, 0x2f27d900, 0x2f286000,
+ 0x2f28e700, 0x2f296e00, 0x2f29f500, 0x2f2a7c00, 0x2f2b0300, 0x2f2b8a00, 0x2f2c1100, 0x2f2c9800, 0x2f2d1f00, 0x2f2da600, 0x2f2e2d00, 0x2f2eb400, 0x2f2f3b00, 0x2f2fc200, 0x2f304900, 0x2f30d000,
+ 0x2f315700, 0x2f31de00, 0x2f326500, 0x2f32ec00, 0x2f337300, 0x2f33fa00, 0x2f348100, 0x2f350800, 0x2f358f00, 0x2f361600, 0x2f369d00, 0x2f372400, 0x2f37ab00, 0x2f383200, 0x2f38b900, 0x2f394000,
+ 0x2f39c700, 0x2f3a4e00, 0x2f3ad500, 0x2f3b5c00, 0x2f3be300, 0x2f3c6a00, 0x2f3cf100, 0x2f3d7800, 0x2f3dff00, 0x2f3e8600, 0x2f3f0d00, 0x2f3f9400, 0x2f401b00, 0x2f40a200, 0x2f412900, 0x2f41b000,
+ 0x2f423700, 0x2f42be00, 0x2f434500, 0x2f43cc00, 0x2f445300, 0x2f44da00, 0x2f456100, 0x2f45e800, 0x2f466f00, 0x2f46f600, 0x2f477d00, 0x2f480400, 0x2f488b00, 0x2f491200, 0x2f499900, 0x2f4a2000,
+ 0x2f4aa700, 0x2f4b2e00, 0x2f4bb500, 0x2f4c3c00, 0x2f4cc300, 0x2f4d4a00, 0x2f4dd100, 0x2f4e5800, 0x2f4edf00, 0x2f4f6600, 0x2f4fed00, 0x2f507400, 0x2f50fb00, 0x2f518200, 0x2f520900, 0x2f529000,
+ 0x2f531700, 0x2f539e00, 0x2f542500, 0x2f54ac00, 0x2f553300, 0x2f55ba00, 0x2f564100, 0x2f56c800, 0x2f574f00, 0x2f57d600, 0x2f585d00, 0x2f58e400, 0x2f596b00, 0x2f59f200, 0x2f5a7900, 0x2f5b0000,
+ 0x2f5b8700, 0x2f5c0e00, 0x2f5c9500, 0x2f5d1c00, 0x2f5da300, 0x2f5e2a00, 0x2f5eb100, 0x2f5f3800, 0x2f5fbf00, 0x2f604600, 0x2f60cd00, 0x2f615400, 0x2f61db00, 0x2f626200, 0x2f62e900, 0x2f637000,
+ 0x2f63f700, 0x2f647e00, 0x2f650500, 0x2f658c00, 0x2f661300, 0x2f669a00, 0x2f672100, 0x2f67a800, 0x2f682f00, 0x2f68b600, 0x2f693d00, 0x2f69c400, 0x2f6a4b00, 0x2f6ad200, 0x2f6b5900, 0x2f6be000,
+ 0x2f6c6700, 0x2f6cee00, 0x2f6d7500, 0x2f6dfc00, 0x2f6e8300, 0x2f6f0a00, 0x2f6f9100, 0x2f701800, 0x2f709f00, 0x2f712600, 0x2f71ad00, 0x2f723400, 0x2f72bb00, 0x2f734200, 0x2f73c900, 0x2f745000,
+ 0x2f74d700, 0x2f755e00, 0x2f75e500, 0x2f766c00, 0x2f76f300, 0x2f777a00, 0x2f780100, 0x2f788800, 0x2f790f00, 0x2f799600, 0x2f7a1d00, 0x2f7aa400, 0x2f7b2b00, 0x2f7bb200, 0x2f7c3900, 0x2f7cc000,
+ 0x2f7d4700, 0x2f7dce00, 0x2f7e5500, 0x2f7edc00, 0x2f7f6300, 0x2f7fea00, 0x2f807100, 0x2f80f800, 0x2f817f00, 0x2f820600, 0x2f828d00, 0x2f831400, 0x2f839b00, 0x2f842200, 0x2f84a900, 0x2f853000,
+ 0x2f85b700, 0x2f863e00, 0x2f86c500, 0x2f874c00, 0x2f87d300, 0x2f885a00, 0x2f88e100, 0x2f896800, 0x2f89ef00, 0x2f8a7600, 0x2f8afd00, 0x2f8b8400, 0x2f8c0b00, 0x2f8c9200, 0x2f8d1900, 0x2f8da000,
+ 0x2f8e2700, 0x2f8eae00, 0x2f8f3500, 0x2f8fbc00, 0x2f904300, 0x2f90ca00, 0x2f915100, 0x2f91d800, 0x2f925f00, 0x2f92e600, 0x2f936d00, 0x2f93f400, 0x2f947b00, 0x2f950200, 0x2f958900, 0x2f961000,
+ 0x2f969700, 0x2f971e00, 0x2f97a500, 0x2f982c00, 0x2f98b300, 0x2f993a00, 0x2f99c100, 0x2f9a4800, 0x2f9acf00, 0x2f9b5600, 0x2f9bdd00, 0x2f9c6400, 0x2f9ceb00, 0x2f9d7200, 0x2f9df900, 0x2f9e8000,
+ 0x2f9f0700, 0x2f9f8e00, 0x2fa01500, 0x2fa09c00, 0x2fa12300, 0x2fa1aa00, 0x2fa23100, 0x2fa2b800, 0x2fa33f00, 0x2fa3c600, 0x2fa44d00, 0x2fa4d400, 0x2fa55b00, 0x2fa5e200, 0x2fa66900, 0x2fa6f000,
+ 0x2fa77700, 0x2fa7fe00, 0x2fa88500, 0x2fa90c00, 0x2fa99300, 0x2faa1a00, 0x2faaa100, 0x2fab2800, 0x2fabaf00, 0x2fac3600, 0x2facbd00, 0x2fad4400, 0x2fadcb00, 0x2fae5200, 0x2faed900, 0x2faf6000,
+ 0x2fafe700, 0x2fb06e00, 0x2fb0f500, 0x2fb17c00, 0x2fb20300, 0x2fb28a00, 0x2fb31100, 0x2fb39800, 0x2fb41f00, 0x2fb4a600, 0x2fb52d00, 0x2fb5b400, 0x2fb63b00, 0x2fb6c200, 0x2fb74900, 0x2fb7d000,
+ 0x2fb85700, 0x2fb8de00, 0x2fb96500, 0x2fb9ec00, 0x2fba7300, 0x2fbafa00, 0x2fbb8100, 0x2fbc0800, 0x2fbc8f00, 0x2fbd1600, 0x2fbd9d00, 0x2fbe2400, 0x2fbeab00, 0x2fbf3200, 0x2fbfb900, 0x2fc04000,
+ 0x2fc0c700, 0x2fc14e00, 0x2fc1d500, 0x2fc25c00, 0x2fc2e300, 0x2fc36a00, 0x2fc3f100, 0x2fc47800, 0x2fc4ff00, 0x2fc58600, 0x2fc60d00, 0x2fc69400, 0x2fc71b00, 0x2fc7a200, 0x2fc82900, 0x2fc8b000,
+ 0x2fc93700, 0x2fc9be00, 0x2fca4500, 0x2fcacc00, 0x2fcb5300, 0x2fcbda00, 0x2fcc6100, 0x2fcce800, 0x2fcd6f00, 0x2fcdf600, 0x2fce7d00, 0x2fcf0400, 0x2fcf8b00, 0x2fd01200, 0x2fd09900, 0x2fd12000,
+ 0x2fd1a700, 0x2fd22e00, 0x2fd2b500, 0x2fd33c00, 0x2fd3c300, 0x2fd44a00, 0x2fd4d100, 0x2fd55800, 0x2fd5df00, 0x2fd66600, 0x2fd6ed00, 0x2fd77400, 0x2fd7fb00, 0x2fd88200, 0x2fd90900, 0x2fd99000,
+ 0x2fda1700, 0x2fda9e00, 0x2fdb2500, 0x2fdbac00, 0x2fdc3300, 0x2fdcba00, 0x2fdd4100, 0x2fddc800, 0x2fde4f00, 0x2fded600, 0x2fdf5d00, 0x2fdfe400, 0x2fe06b00, 0x2fe0f200, 0x2fe17900, 0x2fe20000,
+ 0x2fe29600, 0x2fe32c00, 0x2fe3c200, 0x2fe45800, 0x2fe4ee00, 0x2fe58400, 0x2fe61a00, 0x2fe6b000, 0x2fe74600, 0x2fe7dc00, 0x2fe87200, 0x2fe90800, 0x2fe99e00, 0x2fea3400, 0x2feaca00, 0x2feb6000,
+ 0x2febf600, 0x2fec8c00, 0x2fed2200, 0x2fedb800, 0x2fee4e00, 0x2feee400, 0x2fef7a00, 0x2ff01000, 0x2ff0a600, 0x2ff13c00, 0x2ff1d200, 0x2ff26800, 0x2ff2fe00, 0x2ff39400, 0x2ff42a00, 0x2ff4c000,
+ 0x2ff55600, 0x2ff5ec00, 0x2ff68200, 0x2ff71800, 0x2ff7ae00, 0x2ff84400, 0x2ff8da00, 0x2ff97000, 0x2ffa0600, 0x2ffa9c00, 0x2ffb3200, 0x2ffbc800, 0x2ffc5e00, 0x2ffcf400, 0x2ffd8a00, 0x2ffe2000,
+ 0x2ffeb600, 0x2fff4c00, 0x2fffe200, 0x30007800, 0x30010e00, 0x3001a400, 0x30023a00, 0x3002d000, 0x30036600, 0x3003fc00, 0x30049200, 0x30052800, 0x3005be00, 0x30065400, 0x3006ea00, 0x30078000,
+ 0x30081600, 0x3008ac00, 0x30094200, 0x3009d800, 0x300a6e00, 0x300b0400, 0x300b9a00, 0x300c3000, 0x300cc600, 0x300d5c00, 0x300df200, 0x300e8800, 0x300f1e00, 0x300fb400, 0x30104a00, 0x3010e000,
+ 0x30117600, 0x30120c00, 0x3012a200, 0x30133800, 0x3013ce00, 0x30146400, 0x3014fa00, 0x30159000, 0x30162600, 0x3016bc00, 0x30175200, 0x3017e800, 0x30187e00, 0x30191400, 0x3019aa00, 0x301a4000,
+ 0x301ad600, 0x301b6c00, 0x301c0200, 0x301c9800, 0x301d2e00, 0x301dc400, 0x301e5a00, 0x301ef000, 0x301f8600, 0x30201c00, 0x3020b200, 0x30214800, 0x3021de00, 0x30227400, 0x30230a00, 0x3023a000,
+ 0x30243600, 0x3024cc00, 0x30256200, 0x3025f800, 0x30268e00, 0x30272400, 0x3027ba00, 0x30285000, 0x3028e600, 0x30297c00, 0x302a1200, 0x302aa800, 0x302b3e00, 0x302bd400, 0x302c6a00, 0x302d0000,
+ 0x302d9600, 0x302e2c00, 0x302ec200, 0x302f5800, 0x302fee00, 0x30308400, 0x30311a00, 0x3031b000, 0x30324600, 0x3032dc00, 0x30337200, 0x30340800, 0x30349e00, 0x30353400, 0x3035ca00, 0x30366000,
+ 0x3036f600, 0x30378c00, 0x30382200, 0x3038b800, 0x30394e00, 0x3039e400, 0x303a7a00, 0x303b1000, 0x303ba600, 0x303c3c00, 0x303cd200, 0x303d6800, 0x303dfe00, 0x303e9400, 0x303f2a00, 0x303fc000,
+ 0x30405600, 0x3040ec00, 0x30418200, 0x30421800, 0x3042ae00, 0x30434400, 0x3043da00, 0x30447000, 0x30450600, 0x30459c00, 0x30463200, 0x3046c800, 0x30475e00, 0x3047f400, 0x30488a00, 0x30492000,
+ 0x3049b600, 0x304a4c00, 0x304ae200, 0x304b7800, 0x304c0e00, 0x304ca400, 0x304d3a00, 0x304dd000, 0x304e6600, 0x304efc00, 0x304f9200, 0x30502800, 0x3050be00, 0x30515400, 0x3051ea00, 0x30528000,
+ 0x30531600, 0x3053ac00, 0x30544200, 0x3054d800, 0x30556e00, 0x30560400, 0x30569a00, 0x30573000, 0x3057c600, 0x30585c00, 0x3058f200, 0x30598800, 0x305a1e00, 0x305ab400, 0x305b4a00, 0x305be000,
+ 0x305c7600, 0x305d0c00, 0x305da200, 0x305e3800, 0x305ece00, 0x305f6400, 0x305ffa00, 0x30609000, 0x30612600, 0x3061bc00, 0x30625200, 0x3062e800, 0x30637e00, 0x30641400, 0x3064aa00, 0x30654000,
+ 0x3065d600, 0x30666c00, 0x30670200, 0x30679800, 0x30682e00, 0x3068c400, 0x30695a00, 0x3069f000, 0x306a8600, 0x306b1c00, 0x306bb200, 0x306c4800, 0x306cde00, 0x306d7400, 0x306e0a00, 0x306ea000,
+ 0x306f3600, 0x306fcc00, 0x30706200, 0x3070f800, 0x30718e00, 0x30722400, 0x3072ba00, 0x30735000, 0x3073e600, 0x30747c00, 0x30751200, 0x3075a800, 0x30763e00, 0x3076d400, 0x30776a00, 0x30780000,
+ 0x30789600, 0x30792c00, 0x3079c200, 0x307a5800, 0x307aee00, 0x307b8400, 0x307c1a00, 0x307cb000, 0x307d4600, 0x307ddc00, 0x307e7200, 0x307f0800, 0x307f9e00, 0x30803400, 0x3080ca00, 0x30816000,
+ 0x3081f600, 0x30828c00, 0x30832200, 0x3083b800, 0x30844e00, 0x3084e400, 0x30857a00, 0x30861000, 0x3086a600, 0x30873c00, 0x3087d200, 0x30886800, 0x3088fe00, 0x30899400, 0x308a2a00, 0x308ac000,
+ 0x308b5600, 0x308bec00, 0x308c8200, 0x308d1800, 0x308dae00, 0x308e4400, 0x308eda00, 0x308f7000, 0x30900600, 0x30909c00, 0x30913200, 0x3091c800, 0x30925e00, 0x3092f400, 0x30938a00, 0x30942000,
+ 0x3094b600, 0x30954c00, 0x3095e200, 0x30967800, 0x30970e00, 0x3097a400, 0x30983a00, 0x3098d000, 0x30996600, 0x3099fc00, 0x309a9200, 0x309b2800, 0x309bbe00, 0x309c5400, 0x309cea00, 0x309d8000,
+ 0x309e1600, 0x309eac00, 0x309f4200, 0x309fd800, 0x30a06e00, 0x30a10400, 0x30a19a00, 0x30a23000, 0x30a2c600, 0x30a35c00, 0x30a3f200, 0x30a48800, 0x30a51e00, 0x30a5b400, 0x30a64a00, 0x30a6e000,
+ 0x30a77600, 0x30a80c00, 0x30a8a200, 0x30a93800, 0x30a9ce00, 0x30aa6400, 0x30aafa00, 0x30ab9000, 0x30ac2600, 0x30acbc00, 0x30ad5200, 0x30ade800, 0x30ae7e00, 0x30af1400, 0x30afaa00, 0x30b04000,
+ 0x30b0d600, 0x30b16c00, 0x30b20200, 0x30b29800, 0x30b32e00, 0x30b3c400, 0x30b45a00, 0x30b4f000, 0x30b58600, 0x30b61c00, 0x30b6b200, 0x30b74800, 0x30b7de00, 0x30b87400, 0x30b90a00, 0x30b9a000,
+ 0x30ba3600, 0x30bacc00, 0x30bb6200, 0x30bbf800, 0x30bc8e00, 0x30bd2400, 0x30bdba00, 0x30be5000, 0x30bee600, 0x30bf7c00, 0x30c01200, 0x30c0a800, 0x30c13e00, 0x30c1d400, 0x30c26a00, 0x30c30000,
+ 0x30c39600, 0x30c42c00, 0x30c4c200, 0x30c55800, 0x30c5ee00, 0x30c68400, 0x30c71a00, 0x30c7b000, 0x30c84600, 0x30c8dc00, 0x30c97200, 0x30ca0800, 0x30ca9e00, 0x30cb3400, 0x30cbca00, 0x30cc6000,
+ 0x30ccf600, 0x30cd8c00, 0x30ce2200, 0x30ceb800, 0x30cf4e00, 0x30cfe400, 0x30d07a00, 0x30d11000, 0x30d1a600, 0x30d23c00, 0x30d2d200, 0x30d36800, 0x30d3fe00, 0x30d49400, 0x30d52a00, 0x30d5c000,
+ 0x30d65600, 0x30d6ec00, 0x30d78200, 0x30d81800, 0x30d8ae00, 0x30d94400, 0x30d9da00, 0x30da7000, 0x30db0600, 0x30db9c00, 0x30dc3200, 0x30dcc800, 0x30dd5e00, 0x30ddf400, 0x30de8a00, 0x30df2000,
+ 0x30dfb600, 0x30e04c00, 0x30e0e200, 0x30e17800, 0x30e20e00, 0x30e2a400, 0x30e33a00, 0x30e3d000, 0x30e46600, 0x30e4fc00, 0x30e59200, 0x30e62800, 0x30e6be00, 0x30e75400, 0x30e7ea00, 0x30e88000,
+ 0x30e91600, 0x30e9ac00, 0x30ea4200, 0x30ead800, 0x30eb6e00, 0x30ec0400, 0x30ec9a00, 0x30ed3000, 0x30edc600, 0x30ee5c00, 0x30eef200, 0x30ef8800, 0x30f01e00, 0x30f0b400, 0x30f14a00, 0x30f1e000,
+ 0x30f27600, 0x30f30c00, 0x30f3a200, 0x30f43800, 0x30f4ce00, 0x30f56400, 0x30f5fa00, 0x30f69000, 0x30f72600, 0x30f7bc00, 0x30f85200, 0x30f8e800, 0x30f97e00, 0x30fa1400, 0x30faaa00, 0x30fb4000,
+ 0x30fbd600, 0x30fc6c00, 0x30fd0200, 0x30fd9800, 0x30fe2e00, 0x30fec400, 0x30ff5a00, 0x30fff000, 0x31008600, 0x31011c00, 0x3101b200, 0x31024800, 0x3102de00, 0x31037400, 0x31040a00, 0x3104a000,
+ 0x31053600, 0x3105cc00, 0x31066200, 0x3106f800, 0x31078e00, 0x31082400, 0x3108ba00, 0x31095000, 0x3109e600, 0x310a7c00, 0x310b1200, 0x310ba800, 0x310c3e00, 0x310cd400, 0x310d6a00, 0x310e0000,
+ 0x310e9600, 0x310f2c00, 0x310fc200, 0x31105800, 0x3110ee00, 0x31118400, 0x31121a00, 0x3112b000, 0x31134600, 0x3113dc00, 0x31147200, 0x31150800, 0x31159e00, 0x31163400, 0x3116ca00, 0x31176000,
+ 0x3117f600, 0x31188c00, 0x31192200, 0x3119b800, 0x311a4e00, 0x311ae400, 0x311b7a00, 0x311c1000, 0x311ca600, 0x311d3c00, 0x311dd200, 0x311e6800, 0x311efe00, 0x311f9400, 0x31202a00, 0x3120c000,
+ 0x31215600, 0x3121ec00, 0x31228200, 0x31231800, 0x3123ae00, 0x31244400, 0x3124da00, 0x31257000, 0x31260600, 0x31269c00, 0x31273200, 0x3127c800, 0x31285e00, 0x3128f400, 0x31298a00, 0x312a2000,
+ 0x312ab600, 0x312b4c00, 0x312be200, 0x312c7800, 0x312d0e00, 0x312da400, 0x312e3a00, 0x312ed000, 0x312f6600, 0x312ffc00, 0x31309200, 0x31312800, 0x3131be00, 0x31325400, 0x3132ea00, 0x31338000,
+ 0x31341600, 0x3134ac00, 0x31354200, 0x3135d800, 0x31366e00, 0x31370400, 0x31379a00, 0x31383000, 0x3138c600, 0x31395c00, 0x3139f200, 0x313a8800, 0x313b1e00, 0x313bb400, 0x313c4a00, 0x313ce000,
+ 0x313d7600, 0x313e0c00, 0x313ea200, 0x313f3800, 0x313fce00, 0x31406300, 0x3140f900, 0x31418f00, 0x31422500, 0x3142bb00, 0x31435100, 0x3143e700, 0x31447d00, 0x31451300, 0x3145a900, 0x31463f00,
+ 0x3146d500, 0x31476b00, 0x31480100, 0x31489700, 0x31492d00, 0x3149c300, 0x314a5900, 0x314aef00, 0x314b8500, 0x314c1b00, 0x314cb100, 0x314d4700, 0x314ddd00, 0x314e7300, 0x314f0900, 0x314f9f00,
+ 0x31503500, 0x3150cb00, 0x31516100, 0x3151f700, 0x31528d00, 0x31532300, 0x3153b900, 0x31544f00, 0x3154e500, 0x31557b00, 0x31561100, 0x3156a700, 0x31573d00, 0x3157d300, 0x31586900, 0x3158ff00,
+ 0x31599500, 0x315a2b00, 0x315ac100, 0x315b5700, 0x315bed00, 0x315c8300, 0x315d1900, 0x315daf00, 0x315e4500, 0x315edb00, 0x315f7100, 0x31600700, 0x31609d00, 0x31613300, 0x3161c900, 0x31625f00,
+ 0x3162f500, 0x31638b00, 0x31642100, 0x3164b700, 0x31654d00, 0x3165e300, 0x31667900, 0x31670f00, 0x3167a500, 0x31683b00, 0x3168d100, 0x31696700, 0x3169fd00, 0x316a9300, 0x316b2900, 0x316bbf00,
+ 0x316c5500, 0x316ceb00, 0x316d8100, 0x316e1700, 0x316ead00, 0x316f4300, 0x316fd900, 0x31706f00, 0x31710500, 0x31719b00, 0x31723100, 0x3172c700, 0x31735d00, 0x3173f300, 0x31748900, 0x31751f00,
+ 0x3175b500, 0x31764b00, 0x3176e100, 0x31777700, 0x31780d00, 0x3178a300, 0x31793900, 0x3179cf00, 0x317a6500, 0x317afb00, 0x317b9100, 0x317c2700, 0x317cbd00, 0x317d5300, 0x317de900, 0x317e7f00,
+ 0x317f1500, 0x317fab00, 0x31804100, 0x3180d700, 0x31816d00, 0x31820300, 0x31829900, 0x31832f00, 0x3183c500, 0x31845b00, 0x3184f100, 0x31858700, 0x31861d00, 0x3186b300, 0x31874900, 0x3187df00,
+ 0x31887500, 0x31890b00, 0x3189a100, 0x318a3700, 0x318acd00, 0x318b6300, 0x318bf900, 0x318c8f00, 0x318d2500, 0x318dbb00, 0x318e5100, 0x318ee700, 0x318f7d00, 0x31901300, 0x3190a900, 0x31913f00,
+ 0x3191d500, 0x31926b00, 0x31930100, 0x31939700, 0x31942d00, 0x3194c300, 0x31955900, 0x3195ef00, 0x31968500, 0x31971b00, 0x3197b100, 0x31984700, 0x3198dd00, 0x31997300, 0x319a0900, 0x319a9f00,
+ 0x319b3500, 0x319bcb00, 0x319c6100, 0x319cf700, 0x319d8d00, 0x319e2300, 0x319eb900, 0x319f4f00, 0x319fe500, 0x31a07b00, 0x31a11100, 0x31a1a700, 0x31a23d00, 0x31a2d300, 0x31a36900, 0x31a3ff00,
+ 0x31a49500, 0x31a52b00, 0x31a5c100, 0x31a65700, 0x31a6ed00, 0x31a78300, 0x31a81900, 0x31a8af00, 0x31a94500, 0x31a9db00, 0x31aa7100, 0x31ab0700, 0x31ab9d00, 0x31ac3300, 0x31acc900, 0x31ad5f00,
+ 0x31adf500, 0x31ae8b00, 0x31af2100, 0x31afb700, 0x31b04d00, 0x31b0e300, 0x31b17900, 0x31b20f00, 0x31b2a500, 0x31b33b00, 0x31b3d100, 0x31b46700, 0x31b4fd00, 0x31b59300, 0x31b62900, 0x31b6bf00,
+ 0x31b75500, 0x31b7eb00, 0x31b88100, 0x31b91700, 0x31b9ad00, 0x31ba4300, 0x31bad900, 0x31bb6f00, 0x31bc0500, 0x31bc9b00, 0x31bd3100, 0x31bdc700, 0x31be5d00, 0x31bef300, 0x31bf8900, 0x31c01f00,
+ 0x31c0b500, 0x31c14b00, 0x31c1e100, 0x31c27700, 0x31c30d00, 0x31c3a300, 0x31c43900, 0x31c4cf00, 0x31c56500, 0x31c5fb00, 0x31c69100, 0x31c72700, 0x31c7bd00, 0x31c85300, 0x31c8e900, 0x31c97f00,
+ 0x31ca1500, 0x31caab00, 0x31cb4100, 0x31cbd700, 0x31cc6d00, 0x31cd0300, 0x31cd9900, 0x31ce2f00, 0x31cec500, 0x31cf5b00, 0x31cff100, 0x31d08700, 0x31d11d00, 0x31d1b300, 0x31d24900, 0x31d2df00,
+ 0x31d37500, 0x31d40b00, 0x31d4a100, 0x31d53700, 0x31d5cd00, 0x31d66300, 0x31d6f900, 0x31d78f00, 0x31d82500, 0x31d8bb00, 0x31d95100, 0x31d9e700, 0x31da7d00, 0x31db1300, 0x31dba900, 0x31dc3f00,
+ 0x31dcd500, 0x31dd6b00, 0x31de0100, 0x31de9700, 0x31df2d00, 0x31dfc300, 0x31e05900, 0x31e0ef00, 0x31e18500, 0x31e21b00, 0x31e2b100, 0x31e34700, 0x31e3dd00, 0x31e47300, 0x31e50900, 0x31e59f00,
+ 0x31e63500, 0x31e6cb00, 0x31e76100, 0x31e7f700, 0x31e88d00, 0x31e92300, 0x31e9b900, 0x31ea4f00, 0x31eae500, 0x31eb7b00, 0x31ec1100, 0x31eca700, 0x31ed3d00, 0x31edd300, 0x31ee6900, 0x31eeff00,
+ 0x31ef9500, 0x31f02b00, 0x31f0c100, 0x31f15700, 0x31f1ed00, 0x31f28300, 0x31f31900, 0x31f3af00, 0x31f44500, 0x31f4db00, 0x31f57100, 0x31f60700, 0x31f69d00, 0x31f73300, 0x31f7c900, 0x31f85f00,
+ 0x31f8f500, 0x31f98b00, 0x31fa2100, 0x31fab700, 0x31fb4d00, 0x31fbe300, 0x31fc7900, 0x31fd0f00, 0x31fda500, 0x31fe3b00, 0x31fed100, 0x31ff6700, 0x31fffd00, 0x32009300, 0x32012900, 0x3201bf00,
+ 0x32025500, 0x3202eb00, 0x32038100, 0x32041700, 0x3204ad00, 0x32054300, 0x3205d900, 0x32066f00, 0x32070500, 0x32079b00, 0x32083100, 0x3208c700, 0x32095d00, 0x3209f300, 0x320a8900, 0x320b1f00,
+ 0x320bb500, 0x320c4b00, 0x320ce100, 0x320d7700, 0x320e0d00, 0x320ea300, 0x320f3900, 0x320fcf00, 0x32106500, 0x3210fb00, 0x32119100, 0x32122700, 0x3212bd00, 0x32135300, 0x3213e900, 0x32147f00,
+ 0x32151500, 0x3215ab00, 0x32164100, 0x3216d700, 0x32176d00, 0x32180300, 0x32189900, 0x32192f00, 0x3219c500, 0x321a5b00, 0x321af100, 0x321b8700, 0x321c1d00, 0x321cb300, 0x321d4900, 0x321ddf00,
+ 0x321e7500, 0x321f0b00, 0x321fa100, 0x32203700, 0x3220cd00, 0x32216300, 0x3221f900, 0x32228f00, 0x32232500, 0x3223bb00, 0x32245100, 0x3224e700, 0x32257d00, 0x32261300, 0x3226a900, 0x32273f00,
+ 0x3227d500, 0x32286b00, 0x32290100, 0x32299700, 0x322a2d00, 0x322ac300, 0x322b5900, 0x322bef00, 0x322c8500, 0x322d1b00, 0x322db100, 0x322e4700, 0x322edd00, 0x322f7300, 0x32300900, 0x32309f00,
+ 0x32313500, 0x3231cb00, 0x32326100, 0x3232f700, 0x32338d00, 0x32342300, 0x3234b900, 0x32354f00, 0x3235e500, 0x32367b00, 0x32371100, 0x3237a700, 0x32383d00, 0x3238d300, 0x32396900, 0x3239ff00,
+ 0x323aa400, 0x323b4900, 0x323bee00, 0x323c9300, 0x323d3800, 0x323ddd00, 0x323e8200, 0x323f2700, 0x323fcc00, 0x32407100, 0x32411600, 0x3241bb00, 0x32426000, 0x32430500, 0x3243aa00, 0x32444f00,
+ 0x3244f400, 0x32459900, 0x32463e00, 0x3246e300, 0x32478800, 0x32482d00, 0x3248d200, 0x32497700, 0x324a1c00, 0x324ac100, 0x324b6600, 0x324c0b00, 0x324cb000, 0x324d5500, 0x324dfa00, 0x324e9f00,
+ 0x324f4400, 0x324fe900, 0x32508e00, 0x32513300, 0x3251d800, 0x32527d00, 0x32532200, 0x3253c700, 0x32546c00, 0x32551100, 0x3255b600, 0x32565b00, 0x32570000, 0x3257a500, 0x32584a00, 0x3258ef00,
+ 0x32599400, 0x325a3900, 0x325ade00, 0x325b8300, 0x325c2800, 0x325ccd00, 0x325d7200, 0x325e1700, 0x325ebc00, 0x325f6100, 0x32600600, 0x3260ab00, 0x32615000, 0x3261f500, 0x32629a00, 0x32633f00,
+ 0x3263e400, 0x32648900, 0x32652e00, 0x3265d300, 0x32667800, 0x32671d00, 0x3267c200, 0x32686700, 0x32690c00, 0x3269b100, 0x326a5600, 0x326afb00, 0x326ba000, 0x326c4500, 0x326cea00, 0x326d8f00,
+ 0x326e3400, 0x326ed900, 0x326f7e00, 0x32702300, 0x3270c800, 0x32716d00, 0x32721200, 0x3272b700, 0x32735c00, 0x32740100, 0x3274a600, 0x32754b00, 0x3275f000, 0x32769500, 0x32773a00, 0x3277df00,
+ 0x32788400, 0x32792900, 0x3279ce00, 0x327a7300, 0x327b1800, 0x327bbd00, 0x327c6200, 0x327d0700, 0x327dac00, 0x327e5100, 0x327ef600, 0x327f9b00, 0x32804000, 0x3280e500, 0x32818a00, 0x32822f00,
+ 0x3282d400, 0x32837900, 0x32841e00, 0x3284c300, 0x32856800, 0x32860d00, 0x3286b200, 0x32875700, 0x3287fc00, 0x3288a100, 0x32894600, 0x3289eb00, 0x328a9000, 0x328b3500, 0x328bda00, 0x328c7f00,
+ 0x328d2400, 0x328dc900, 0x328e6e00, 0x328f1300, 0x328fb800, 0x32905d00, 0x32910200, 0x3291a700, 0x32924c00, 0x3292f100, 0x32939600, 0x32943b00, 0x3294e000, 0x32958500, 0x32962a00, 0x3296cf00,
+ 0x32977400, 0x32981900, 0x3298be00, 0x32996300, 0x329a0800, 0x329aad00, 0x329b5200, 0x329bf700, 0x329c9c00, 0x329d4100, 0x329de600, 0x329e8b00, 0x329f3000, 0x329fd500, 0x32a07a00, 0x32a11f00,
+ 0x32a1c400, 0x32a26900, 0x32a30e00, 0x32a3b300, 0x32a45800, 0x32a4fd00, 0x32a5a200, 0x32a64700, 0x32a6ec00, 0x32a79100, 0x32a83600, 0x32a8db00, 0x32a98000, 0x32aa2500, 0x32aaca00, 0x32ab6f00,
+ 0x32ac1400, 0x32acb900, 0x32ad5e00, 0x32ae0300, 0x32aea800, 0x32af4d00, 0x32aff200, 0x32b09700, 0x32b13c00, 0x32b1e100, 0x32b28600, 0x32b32b00, 0x32b3d000, 0x32b47500, 0x32b51a00, 0x32b5bf00,
+ 0x32b66400, 0x32b70900, 0x32b7ae00, 0x32b85300, 0x32b8f800, 0x32b99d00, 0x32ba4200, 0x32bae700, 0x32bb8c00, 0x32bc3100, 0x32bcd600, 0x32bd7b00, 0x32be2000, 0x32bec500, 0x32bf6a00, 0x32c00f00,
+ 0x32c0b400, 0x32c15900, 0x32c1fe00, 0x32c2a300, 0x32c34800, 0x32c3ed00, 0x32c49200, 0x32c53700, 0x32c5dc00, 0x32c68100, 0x32c72600, 0x32c7cb00, 0x32c87000, 0x32c91500, 0x32c9ba00, 0x32ca5f00,
+ 0x32cb0400, 0x32cba900, 0x32cc4e00, 0x32ccf300, 0x32cd9800, 0x32ce3d00, 0x32cee200, 0x32cf8700, 0x32d02c00, 0x32d0d100, 0x32d17600, 0x32d21b00, 0x32d2c000, 0x32d36500, 0x32d40a00, 0x32d4af00,
+ 0x32d55400, 0x32d5f900, 0x32d69e00, 0x32d74300, 0x32d7e800, 0x32d88d00, 0x32d93200, 0x32d9d700, 0x32da7c00, 0x32db2100, 0x32dbc600, 0x32dc6b00, 0x32dd1000, 0x32ddb500, 0x32de5a00, 0x32deff00,
+ 0x32dfa400, 0x32e04900, 0x32e0ee00, 0x32e19300, 0x32e23800, 0x32e2dd00, 0x32e38200, 0x32e42700, 0x32e4cc00, 0x32e57100, 0x32e61600, 0x32e6bb00, 0x32e76000, 0x32e80500, 0x32e8aa00, 0x32e94f00,
+ 0x32e9f400, 0x32ea9900, 0x32eb3e00, 0x32ebe300, 0x32ec8800, 0x32ed2d00, 0x32edd200, 0x32ee7700, 0x32ef1c00, 0x32efc100, 0x32f06600, 0x32f10b00, 0x32f1b000, 0x32f25500, 0x32f2fa00, 0x32f39f00,
+ 0x32f44400, 0x32f4e900, 0x32f58e00, 0x32f63300, 0x32f6d800, 0x32f77d00, 0x32f82200, 0x32f8c700, 0x32f96c00, 0x32fa1100, 0x32fab600, 0x32fb5b00, 0x32fc0000, 0x32fca500, 0x32fd4a00, 0x32fdef00,
+ 0x32fe9400, 0x32ff3900, 0x32ffde00, 0x33008300, 0x33012800, 0x3301cd00, 0x33027200, 0x33031700, 0x3303bc00, 0x33046100, 0x33050600, 0x3305ab00, 0x33065000, 0x3306f500, 0x33079a00, 0x33083f00,
+ 0x3308e400, 0x33098900, 0x330a2e00, 0x330ad300, 0x330b7800, 0x330c1d00, 0x330cc200, 0x330d6700, 0x330e0c00, 0x330eb100, 0x330f5600, 0x330ffb00, 0x3310a000, 0x33114500, 0x3311ea00, 0x33128f00,
+ 0x33133400, 0x3313d900, 0x33147e00, 0x33152300, 0x3315c800, 0x33166d00, 0x33171200, 0x3317b700, 0x33185c00, 0x33190100, 0x3319a600, 0x331a4b00, 0x331af000, 0x331b9500, 0x331c3a00, 0x331cdf00,
+ 0x331d8400, 0x331e2900, 0x331ece00, 0x331f7300, 0x33201800, 0x3320bd00, 0x33216200, 0x33220700, 0x3322ac00, 0x33235100, 0x3323f600, 0x33249b00, 0x33254000, 0x3325e500, 0x33268a00, 0x33272f00,
+ 0x3327d400, 0x33287900, 0x33291e00, 0x3329c300, 0x332a6800, 0x332b0d00, 0x332bb200, 0x332c5700, 0x332cfc00, 0x332da100, 0x332e4600, 0x332eeb00, 0x332f9000, 0x33303500, 0x3330da00, 0x33317f00,
+ 0x33322400, 0x3332c900, 0x33336e00, 0x33341300, 0x3334b800, 0x33355d00, 0x33360200, 0x3336a700, 0x33374c00, 0x3337f100, 0x33389600, 0x33393b00, 0x3339e000, 0x333a8500, 0x333b2a00, 0x333bcf00,
+ 0x333c7400, 0x333d1900, 0x333dbe00, 0x333e6300, 0x333f0800, 0x333fad00, 0x33405200, 0x3340f700, 0x33419c00, 0x33424100, 0x3342e600, 0x33438b00, 0x33443000, 0x3344d500, 0x33457a00, 0x33461f00,
+ 0x3346c400, 0x33476900, 0x33480e00, 0x3348b300, 0x33495800, 0x3349fd00, 0x334aa200, 0x334b4700, 0x334bec00, 0x334c9100, 0x334d3600, 0x334ddb00, 0x334e8000, 0x334f2500, 0x334fca00, 0x33506f00,
+ 0x33511400, 0x3351b900, 0x33525e00, 0x33530300, 0x3353a800, 0x33544d00, 0x3354f200, 0x33559700, 0x33563c00, 0x3356e100, 0x33578600, 0x33582b00, 0x3358d000, 0x33597500, 0x335a1a00, 0x335abf00,
+ 0x335b6400, 0x335c0900, 0x335cae00, 0x335d5300, 0x335df800, 0x335e9d00, 0x335f4200, 0x335fe700, 0x33608c00, 0x33613100, 0x3361d600, 0x33627b00, 0x33632000, 0x3363c500, 0x33646a00, 0x33650f00,
+ 0x3365b400, 0x33665900, 0x3366fe00, 0x3367a300, 0x33684800, 0x3368ed00, 0x33699200, 0x336a3700, 0x336adc00, 0x336b8100, 0x336c2600, 0x336ccb00, 0x336d7000, 0x336e1500, 0x336eba00, 0x336f5f00,
+ 0x33700400, 0x3370a900, 0x33714e00, 0x3371f300, 0x33729800, 0x33733d00, 0x3373e200, 0x33748700, 0x33752c00, 0x3375d100, 0x33767600, 0x33771b00, 0x3377c000, 0x33786500, 0x33790a00, 0x3379af00,
+ 0x337a5400, 0x337af900, 0x337b9e00, 0x337c4300, 0x337ce800, 0x337d8d00, 0x337e3200, 0x337ed700, 0x337f7c00, 0x33802100, 0x3380c600, 0x33816b00, 0x33821000, 0x3382b500, 0x33835a00, 0x3383ff00,
+ 0x3384a700, 0x33854f00, 0x3385f700, 0x33869f00, 0x33874700, 0x3387ef00, 0x33889700, 0x33893f00, 0x3389e700, 0x338a8f00, 0x338b3700, 0x338bdf00, 0x338c8700, 0x338d2f00, 0x338dd700, 0x338e7f00,
+ 0x338f2700, 0x338fcf00, 0x33907700, 0x33911f00, 0x3391c700, 0x33926f00, 0x33931700, 0x3393bf00, 0x33946700, 0x33950f00, 0x3395b700, 0x33965f00, 0x33970700, 0x3397af00, 0x33985700, 0x3398ff00,
+ 0x3399a700, 0x339a4f00, 0x339af700, 0x339b9f00, 0x339c4700, 0x339cef00, 0x339d9700, 0x339e3f00, 0x339ee700, 0x339f8f00, 0x33a03700, 0x33a0df00, 0x33a18700, 0x33a22f00, 0x33a2d700, 0x33a37f00,
+ 0x33a42700, 0x33a4cf00, 0x33a57700, 0x33a61f00, 0x33a6c700, 0x33a76f00, 0x33a81700, 0x33a8bf00, 0x33a96700, 0x33aa0f00, 0x33aab700, 0x33ab5f00, 0x33ac0700, 0x33acaf00, 0x33ad5700, 0x33adff00,
+ 0x33aea700, 0x33af4f00, 0x33aff700, 0x33b09f00, 0x33b14700, 0x33b1ef00, 0x33b29700, 0x33b33f00, 0x33b3e700, 0x33b48f00, 0x33b53700, 0x33b5df00, 0x33b68700, 0x33b72f00, 0x33b7d700, 0x33b87f00,
+ 0x33b92700, 0x33b9cf00, 0x33ba7700, 0x33bb1f00, 0x33bbc700, 0x33bc6f00, 0x33bd1700, 0x33bdbf00, 0x33be6700, 0x33bf0f00, 0x33bfb700, 0x33c05f00, 0x33c10700, 0x33c1af00, 0x33c25700, 0x33c2ff00,
+ 0x33c3a700, 0x33c44f00, 0x33c4f700, 0x33c59f00, 0x33c64700, 0x33c6ef00, 0x33c79700, 0x33c83f00, 0x33c8e700, 0x33c98f00, 0x33ca3700, 0x33cadf00, 0x33cb8700, 0x33cc2f00, 0x33ccd700, 0x33cd7f00,
+ 0x33ce2700, 0x33cecf00, 0x33cf7700, 0x33d01f00, 0x33d0c700, 0x33d16f00, 0x33d21700, 0x33d2bf00, 0x33d36700, 0x33d40f00, 0x33d4b700, 0x33d55f00, 0x33d60700, 0x33d6af00, 0x33d75700, 0x33d80000,
+ 0x33d8a800, 0x33d95000, 0x33d9f800, 0x33daa000, 0x33db4800, 0x33dbf000, 0x33dc9800, 0x33dd4000, 0x33dde800, 0x33de9000, 0x33df3800, 0x33dfe000, 0x33e08800, 0x33e13000, 0x33e1d800, 0x33e28000,
+ 0x33e32800, 0x33e3d000, 0x33e47800, 0x33e52000, 0x33e5c800, 0x33e67000, 0x33e71800, 0x33e7c000, 0x33e86800, 0x33e91000, 0x33e9b800, 0x33ea6000, 0x33eb0800, 0x33ebb000, 0x33ec5800, 0x33ed0000,
+ 0x33eda800, 0x33ee5000, 0x33eef800, 0x33efa000, 0x33f04800, 0x33f0f000, 0x33f19800, 0x33f24000, 0x33f2e800, 0x33f39000, 0x33f43800, 0x33f4e000, 0x33f58800, 0x33f63000, 0x33f6d800, 0x33f78000,
+ 0x33f82800, 0x33f8d000, 0x33f97800, 0x33fa2000, 0x33fac800, 0x33fb7000, 0x33fc1800, 0x33fcc000, 0x33fd6800, 0x33fe1000, 0x33feb800, 0x33ff6000, 0x34000800, 0x3400b000, 0x34015800, 0x34020000,
+ 0x3402a800, 0x34035000, 0x3403f800, 0x3404a000, 0x34054800, 0x3405f000, 0x34069800, 0x34074000, 0x3407e800, 0x34089000, 0x34093800, 0x3409e000, 0x340a8800, 0x340b3000, 0x340bd800, 0x340c8000,
+ 0x340d2800, 0x340dd000, 0x340e7800, 0x340f2000, 0x340fc800, 0x34107000, 0x34111800, 0x3411c000, 0x34126800, 0x34131000, 0x3413b800, 0x34146000, 0x34150800, 0x3415b000, 0x34165800, 0x34170000,
+ 0x3417a800, 0x34185000, 0x3418f800, 0x3419a000, 0x341a4800, 0x341af000, 0x341b9800, 0x341c4000, 0x341ce800, 0x341d9000, 0x341e3800, 0x341ee000, 0x341f8800, 0x34203000, 0x3420d800, 0x34218000,
+ 0x34222800, 0x3422d000, 0x34237800, 0x34242000, 0x3424c800, 0x34257000, 0x34261800, 0x3426c000, 0x34276800, 0x34281000, 0x3428b800, 0x34296000, 0x342a0800, 0x342ab000, 0x342b5800, 0x342c0000,
+ 0x342ca800, 0x342d5000, 0x342df800, 0x342ea000, 0x342f4800, 0x342ff000, 0x34309800, 0x34314000, 0x3431e800, 0x34329000, 0x34333800, 0x3433e000, 0x34348800, 0x34353000, 0x3435d800, 0x34368000,
+ 0x34372800, 0x3437d000, 0x34387800, 0x34392000, 0x3439c800, 0x343a7000, 0x343b1800, 0x343bc000, 0x343c6800, 0x343d1000, 0x343db800, 0x343e6000, 0x343f0800, 0x343fb000, 0x34405800, 0x34410000,
+ 0x3441a800, 0x34425000, 0x3442f800, 0x3443a000, 0x34444800, 0x3444f000, 0x34459800, 0x34464000, 0x3446e800, 0x34479000, 0x34483800, 0x3448e000, 0x34498800, 0x344a3000, 0x344ad800, 0x344b8000,
+ 0x344c2800, 0x344cd000, 0x344d7800, 0x344e2000, 0x344ec800, 0x344f7000, 0x34501800, 0x3450c000, 0x34516800, 0x34521000, 0x3452b800, 0x34536000, 0x34540800, 0x3454b000, 0x34555800, 0x34560000,
+ 0x3456a800, 0x34575000, 0x3457f800, 0x3458a000, 0x34594800, 0x3459f000, 0x345a9800, 0x345b4000, 0x345be800, 0x345c9000, 0x345d3800, 0x345de000, 0x345e8800, 0x345f3000, 0x345fd800, 0x34608000,
+ 0x34612800, 0x3461d000, 0x34627800, 0x34632000, 0x3463c800, 0x34647000, 0x34651800, 0x3465c000, 0x34666800, 0x34671000, 0x3467b800, 0x34686000, 0x34690800, 0x3469b000, 0x346a5800, 0x346b0000,
+ 0x346ba800, 0x346c5000, 0x346cf800, 0x346da000, 0x346e4800, 0x346ef000, 0x346f9800, 0x34704000, 0x3470e800, 0x34719000, 0x34723800, 0x3472e000, 0x34738800, 0x34743000, 0x3474d800, 0x34758000,
+ 0x34762800, 0x3476d000, 0x34777800, 0x34782000, 0x3478c800, 0x34797000, 0x347a1800, 0x347ac000, 0x347b6800, 0x347c1000, 0x347cb800, 0x347d6000, 0x347e0800, 0x347eb000, 0x347f5800, 0x34800000,
+ 0x3480a800, 0x34815000, 0x3481f800, 0x3482a000, 0x34834800, 0x3483f000, 0x34849800, 0x34854000, 0x3485e800, 0x34869000, 0x34873800, 0x3487e000, 0x34888800, 0x34893000, 0x3489d800, 0x348a8000,
+ 0x348b2800, 0x348bd000, 0x348c7800, 0x348d2000, 0x348dc800, 0x348e7000, 0x348f1800, 0x348fc000, 0x34906800, 0x34911000, 0x3491b800, 0x34926000, 0x34930800, 0x3493b000, 0x34945800, 0x34950000,
+ 0x3495a800, 0x34965000, 0x3496f800, 0x3497a000, 0x34984800, 0x3498f000, 0x34999800, 0x349a4000, 0x349ae800, 0x349b9000, 0x349c3800, 0x349ce000, 0x349d8800, 0x349e3000, 0x349ed800, 0x349f8000,
+ 0x34a02800, 0x34a0d000, 0x34a17800, 0x34a22000, 0x34a2c800, 0x34a37000, 0x34a41800, 0x34a4c000, 0x34a56800, 0x34a61000, 0x34a6b800, 0x34a76000, 0x34a80800, 0x34a8b000, 0x34a95800, 0x34aa0000,
+ 0x34aaa800, 0x34ab5000, 0x34abf800, 0x34aca000, 0x34ad4800, 0x34adf000, 0x34ae9800, 0x34af4000, 0x34afe800, 0x34b09000, 0x34b13800, 0x34b1e000, 0x34b28800, 0x34b33000, 0x34b3d800, 0x34b48000,
+ 0x34b52800, 0x34b5d000, 0x34b67800, 0x34b72000, 0x34b7c800, 0x34b87000, 0x34b91800, 0x34b9c000, 0x34ba6800, 0x34bb1000, 0x34bbb800, 0x34bc6000, 0x34bd0800, 0x34bdb000, 0x34be5800, 0x34bf0000,
+ 0x34bfa800, 0x34c05000, 0x34c0f800, 0x34c1a000, 0x34c24800, 0x34c2f000, 0x34c39800, 0x34c44000, 0x34c4e800, 0x34c59000, 0x34c63800, 0x34c6e000, 0x34c78800, 0x34c83000, 0x34c8d800, 0x34c98000,
+ 0x34ca2800, 0x34cad000, 0x34cb7800, 0x34cc2000, 0x34ccc800, 0x34cd7000, 0x34ce1800, 0x34cec000, 0x34cf6800, 0x34d01000, 0x34d0b800, 0x34d16000, 0x34d20800, 0x34d2b000, 0x34d35800, 0x34d40000,
+ 0x34d4bb00, 0x34d57600, 0x34d63100, 0x34d6ec00, 0x34d7a700, 0x34d86200, 0x34d91d00, 0x34d9d800, 0x34da9300, 0x34db4e00, 0x34dc0900, 0x34dcc400, 0x34dd7f00, 0x34de3a00, 0x34def500, 0x34dfb000,
+ 0x34e06b00, 0x34e12600, 0x34e1e100, 0x34e29c00, 0x34e35700, 0x34e41200, 0x34e4cd00, 0x34e58800, 0x34e64300, 0x34e6fe00, 0x34e7b900, 0x34e87400, 0x34e92f00, 0x34e9ea00, 0x34eaa500, 0x34eb6000,
+ 0x34ec1b00, 0x34ecd600, 0x34ed9100, 0x34ee4c00, 0x34ef0700, 0x34efc200, 0x34f07d00, 0x34f13800, 0x34f1f300, 0x34f2ae00, 0x34f36900, 0x34f42400, 0x34f4df00, 0x34f59a00, 0x34f65500, 0x34f71000,
+ 0x34f7cb00, 0x34f88600, 0x34f94100, 0x34f9fc00, 0x34fab700, 0x34fb7200, 0x34fc2d00, 0x34fce800, 0x34fda300, 0x34fe5e00, 0x34ff1900, 0x34ffd400, 0x35008f00, 0x35014a00, 0x35020500, 0x3502c000,
+ 0x35037b00, 0x35043600, 0x3504f100, 0x3505ac00, 0x35066700, 0x35072200, 0x3507dd00, 0x35089800, 0x35095300, 0x350a0e00, 0x350ac900, 0x350b8400, 0x350c3f00, 0x350cfa00, 0x350db500, 0x350e7000,
+ 0x350f2b00, 0x350fe600, 0x3510a100, 0x35115c00, 0x35121700, 0x3512d200, 0x35138d00, 0x35144800, 0x35150300, 0x3515be00, 0x35167900, 0x35173400, 0x3517ef00, 0x3518aa00, 0x35196500, 0x351a2000,
+ 0x351adb00, 0x351b9600, 0x351c5100, 0x351d0c00, 0x351dc700, 0x351e8200, 0x351f3d00, 0x351ff800, 0x3520b300, 0x35216e00, 0x35222900, 0x3522e400, 0x35239f00, 0x35245a00, 0x35251500, 0x3525d000,
+ 0x35268b00, 0x35274600, 0x35280100, 0x3528bc00, 0x35297700, 0x352a3200, 0x352aed00, 0x352ba800, 0x352c6300, 0x352d1e00, 0x352dd900, 0x352e9400, 0x352f4f00, 0x35300a00, 0x3530c500, 0x35318000,
+ 0x35323b00, 0x3532f600, 0x3533b100, 0x35346c00, 0x35352700, 0x3535e200, 0x35369d00, 0x35375800, 0x35381300, 0x3538ce00, 0x35398900, 0x353a4400, 0x353aff00, 0x353bba00, 0x353c7500, 0x353d3000,
+ 0x353deb00, 0x353ea600, 0x353f6100, 0x35401c00, 0x3540d700, 0x35419200, 0x35424d00, 0x35430800, 0x3543c300, 0x35447e00, 0x35453900, 0x3545f400, 0x3546af00, 0x35476a00, 0x35482500, 0x3548e000,
+ 0x35499b00, 0x354a5600, 0x354b1100, 0x354bcc00, 0x354c8700, 0x354d4200, 0x354dfd00, 0x354eb800, 0x354f7300, 0x35502e00, 0x3550e900, 0x3551a400, 0x35525f00, 0x35531a00, 0x3553d500, 0x35549000,
+ 0x35554b00, 0x35560600, 0x3556c100, 0x35577c00, 0x35583700, 0x3558f200, 0x3559ad00, 0x355a6800, 0x355b2300, 0x355bde00, 0x355c9900, 0x355d5400, 0x355e0f00, 0x355eca00, 0x355f8500, 0x35604000,
+ 0x3560fb00, 0x3561b600, 0x35627100, 0x35632c00, 0x3563e700, 0x3564a200, 0x35655d00, 0x35661800, 0x3566d300, 0x35678e00, 0x35684900, 0x35690400, 0x3569bf00, 0x356a7a00, 0x356b3500, 0x356bf000,
+ 0x356cab00, 0x356d6600, 0x356e2100, 0x356edc00, 0x356f9700, 0x35705200, 0x35710d00, 0x3571c800, 0x35728300, 0x35733e00, 0x3573f900, 0x3574b400, 0x35756f00, 0x35762a00, 0x3576e500, 0x3577a000,
+ 0x35785b00, 0x35791600, 0x3579d100, 0x357a8c00, 0x357b4700, 0x357c0200, 0x357cbd00, 0x357d7800, 0x357e3300, 0x357eee00, 0x357fa900, 0x35806400, 0x35811f00, 0x3581da00, 0x35829500, 0x35835000,
+ 0x35840b00, 0x3584c600, 0x35858100, 0x35863c00, 0x3586f700, 0x3587b200, 0x35886d00, 0x35892800, 0x3589e300, 0x358a9e00, 0x358b5900, 0x358c1400, 0x358ccf00, 0x358d8a00, 0x358e4500, 0x358f0000,
+ 0x358fbb00, 0x35907600, 0x35913100, 0x3591ec00, 0x3592a700, 0x35936200, 0x35941d00, 0x3594d800, 0x35959300, 0x35964e00, 0x35970900, 0x3597c400, 0x35987f00, 0x35993a00, 0x3599f500, 0x359ab000,
+ 0x359b6b00, 0x359c2600, 0x359ce100, 0x359d9c00, 0x359e5700, 0x359f1200, 0x359fcd00, 0x35a08800, 0x35a14300, 0x35a1fe00, 0x35a2b900, 0x35a37400, 0x35a42f00, 0x35a4ea00, 0x35a5a500, 0x35a66000,
+ 0x35a71b00, 0x35a7d600, 0x35a89100, 0x35a94c00, 0x35aa0700, 0x35aac200, 0x35ab7d00, 0x35ac3800, 0x35acf300, 0x35adae00, 0x35ae6900, 0x35af2400, 0x35afdf00, 0x35b09a00, 0x35b15500, 0x35b21000,
+ 0x35b2cb00, 0x35b38600, 0x35b44100, 0x35b4fc00, 0x35b5b700, 0x35b67200, 0x35b72d00, 0x35b7e800, 0x35b8a300, 0x35b95e00, 0x35ba1900, 0x35bad400, 0x35bb8f00, 0x35bc4a00, 0x35bd0500, 0x35bdc000,
+ 0x35be7b00, 0x35bf3600, 0x35bff100, 0x35c0ac00, 0x35c16700, 0x35c22200, 0x35c2dd00, 0x35c39800, 0x35c45300, 0x35c50e00, 0x35c5c900, 0x35c68400, 0x35c73f00, 0x35c7fa00, 0x35c8b500, 0x35c97000,
+ 0x35ca2b00, 0x35cae600, 0x35cba100, 0x35cc5c00, 0x35cd1700, 0x35cdd200, 0x35ce8d00, 0x35cf4800, 0x35d00300, 0x35d0be00, 0x35d17900, 0x35d23400, 0x35d2ef00, 0x35d3aa00, 0x35d46500, 0x35d52000,
+ 0x35d5db00, 0x35d69600, 0x35d75100, 0x35d80c00, 0x35d8c700, 0x35d98200, 0x35da3d00, 0x35daf800, 0x35dbb300, 0x35dc6e00, 0x35dd2900, 0x35dde400, 0x35de9f00, 0x35df5a00, 0x35e01500, 0x35e0d000,
+ 0x35e18b00, 0x35e24600, 0x35e30100, 0x35e3bc00, 0x35e47700, 0x35e53200, 0x35e5ed00, 0x35e6a800, 0x35e76300, 0x35e81e00, 0x35e8d900, 0x35e99400, 0x35ea4f00, 0x35eb0a00, 0x35ebc500, 0x35ec8000,
+ 0x35ed3b00, 0x35edf600, 0x35eeb100, 0x35ef6c00, 0x35f02700, 0x35f0e200, 0x35f19d00, 0x35f25800, 0x35f31300, 0x35f3ce00, 0x35f48900, 0x35f54400, 0x35f5ff00, 0x35f6ba00, 0x35f77500, 0x35f83000,
+ 0x35f8eb00, 0x35f9a600, 0x35fa6100, 0x35fb1c00, 0x35fbd700, 0x35fc9200, 0x35fd4d00, 0x35fe0800, 0x35fec300, 0x35ff7e00, 0x36003900, 0x3600f400, 0x3601af00, 0x36026a00, 0x36032500, 0x3603e000,
+ 0x36049b00, 0x36055600, 0x36061100, 0x3606cc00, 0x36078700, 0x36084200, 0x3608fd00, 0x3609b800, 0x360a7300, 0x360b2e00, 0x360be900, 0x360ca400, 0x360d5f00, 0x360e1a00, 0x360ed500, 0x360f9000,
+ 0x36104b00, 0x36110600, 0x3611c100, 0x36127c00, 0x36133700, 0x3613f200, 0x3614ad00, 0x36156800, 0x36162300, 0x3616de00, 0x36179900, 0x36185400, 0x36190f00, 0x3619ca00, 0x361a8500, 0x361b4000,
+ 0x361bfb00, 0x361cb600, 0x361d7100, 0x361e2c00, 0x361ee700, 0x361fa200, 0x36205d00, 0x36211800, 0x3621d300, 0x36228e00, 0x36234900, 0x36240400, 0x3624bf00, 0x36257a00, 0x36263500, 0x3626f000,
+ 0x3627ab00, 0x36286600, 0x36292100, 0x3629dc00, 0x362a9700, 0x362b5200, 0x362c0d00, 0x362cc800, 0x362d8300, 0x362e3e00, 0x362ef900, 0x362fb400, 0x36306f00, 0x36312a00, 0x3631e500, 0x3632a000,
+ 0x36335b00, 0x36341600, 0x3634d100, 0x36358c00, 0x36364700, 0x36370200, 0x3637bd00, 0x36387800, 0x36393300, 0x3639ee00, 0x363aa900, 0x363b6400, 0x363c1f00, 0x363cda00, 0x363d9500, 0x363e5000,
+ 0x363f0b00, 0x363fc600, 0x36408100, 0x36413c00, 0x3641f700, 0x3642b200, 0x36436d00, 0x36442800, 0x3644e300, 0x36459e00, 0x36465900, 0x36471400, 0x3647cf00, 0x36488a00, 0x36494500, 0x364a0000,
+ 0x364ac000, 0x364b8000, 0x364c4000, 0x364d0000, 0x364dc000, 0x364e8000, 0x364f4000, 0x36500000, 0x3650c000, 0x36518000, 0x36524000, 0x36530000, 0x3653c000, 0x36548000, 0x36554000, 0x36560000,
+ 0x3656c000, 0x36578000, 0x36584000, 0x36590000, 0x3659c000, 0x365a8000, 0x365b4000, 0x365c0000, 0x365cc000, 0x365d8000, 0x365e4000, 0x365f0000, 0x365fc000, 0x36608000, 0x36614000, 0x36620000,
+ 0x3662c000, 0x36638000, 0x36644000, 0x36650000, 0x3665c000, 0x36668000, 0x36674000, 0x36680000, 0x3668c000, 0x36698000, 0x366a4000, 0x366b0000, 0x366bc000, 0x366c8000, 0x366d4000, 0x366e0000,
+ 0x366ec000, 0x366f8000, 0x36704000, 0x36710000, 0x3671c000, 0x36728000, 0x36734000, 0x36740000, 0x3674c000, 0x36758000, 0x36764000, 0x36770000, 0x3677c000, 0x36788000, 0x36794000, 0x367a0000,
+ 0x367ac000, 0x367b8000, 0x367c4000, 0x367d0000, 0x367dc000, 0x367e8000, 0x367f4000, 0x36800000, 0x3680c000, 0x36818000, 0x36824000, 0x36830000, 0x3683c000, 0x36848000, 0x36854000, 0x36860000,
+ 0x3686c000, 0x36878000, 0x36884000, 0x36890000, 0x3689c000, 0x368a8000, 0x368b4000, 0x368c0000, 0x368cc000, 0x368d8000, 0x368e4000, 0x368f0000, 0x368fc000, 0x36908000, 0x36914000, 0x36920000,
+ 0x3692c000, 0x36938000, 0x36944000, 0x36950000, 0x3695c000, 0x36968000, 0x36974000, 0x36980000, 0x3698c000, 0x36998000, 0x369a4000, 0x369b0000, 0x369bc000, 0x369c8000, 0x369d4000, 0x369e0000,
+ 0x369ec000, 0x369f8000, 0x36a04000, 0x36a10000, 0x36a1c000, 0x36a28000, 0x36a34000, 0x36a40000, 0x36a4c000, 0x36a58000, 0x36a64000, 0x36a70000, 0x36a7c000, 0x36a88000, 0x36a94000, 0x36aa0000,
+ 0x36aac000, 0x36ab8000, 0x36ac4000, 0x36ad0000, 0x36adc000, 0x36ae8000, 0x36af4000, 0x36b00000, 0x36b0c000, 0x36b18000, 0x36b24000, 0x36b30000, 0x36b3c000, 0x36b48000, 0x36b54000, 0x36b60000,
+ 0x36b6c000, 0x36b78000, 0x36b84000, 0x36b90000, 0x36b9c000, 0x36ba8000, 0x36bb4000, 0x36bc0000, 0x36bcc000, 0x36bd8000, 0x36be4000, 0x36bf0000, 0x36bfc000, 0x36c08000, 0x36c14000, 0x36c20000,
+ 0x36c2c000, 0x36c38000, 0x36c44000, 0x36c50000, 0x36c5c000, 0x36c68000, 0x36c74000, 0x36c80000, 0x36c8c000, 0x36c98000, 0x36ca4000, 0x36cb0000, 0x36cbc000, 0x36cc8000, 0x36cd4000, 0x36ce0000,
+ 0x36cec000, 0x36cf8000, 0x36d04000, 0x36d10000, 0x36d1c000, 0x36d28000, 0x36d34000, 0x36d40000, 0x36d4c000, 0x36d58000, 0x36d64000, 0x36d70000, 0x36d7c000, 0x36d88000, 0x36d94000, 0x36da0000,
+ 0x36dac000, 0x36db8000, 0x36dc4000, 0x36dd0000, 0x36ddc000, 0x36de8000, 0x36df4000, 0x36e00000, 0x36e0c000, 0x36e18000, 0x36e24000, 0x36e30000, 0x36e3c000, 0x36e48000, 0x36e54000, 0x36e60000,
+ 0x36e6c000, 0x36e78000, 0x36e84000, 0x36e90000, 0x36e9c000, 0x36ea8000, 0x36eb4000, 0x36ec0000, 0x36ecc000, 0x36ed8000, 0x36ee4000, 0x36ef0000, 0x36efc000, 0x36f08000, 0x36f14000, 0x36f20000,
+ 0x36f2c000, 0x36f38000, 0x36f44000, 0x36f50000, 0x36f5c000, 0x36f68000, 0x36f74000, 0x36f80000, 0x36f8c000, 0x36f98000, 0x36fa4000, 0x36fb0000, 0x36fbc000, 0x36fc8000, 0x36fd4000, 0x36fe0000,
+ 0x36fec000, 0x36ff8000, 0x37004000, 0x37010000, 0x3701c000, 0x37028000, 0x37034000, 0x37040000, 0x3704c000, 0x37058000, 0x37064000, 0x37070000, 0x3707c000, 0x37088000, 0x37094000, 0x370a0000,
+ 0x370ac000, 0x370b8000, 0x370c4000, 0x370d0000, 0x370dc000, 0x370e8000, 0x370f4000, 0x37100000, 0x3710c000, 0x37118000, 0x37124000, 0x37130000, 0x3713c000, 0x37148000, 0x37154000, 0x37160000,
+ 0x3716c000, 0x37178000, 0x37184000, 0x37190000, 0x3719c000, 0x371a8000, 0x371b4000, 0x371c0000, 0x371cc000, 0x371d8000, 0x371e4000, 0x371f0000, 0x371fc000, 0x37208000, 0x37214000, 0x37220000,
+ 0x3722c000, 0x37238000, 0x37244000, 0x37250000, 0x3725c000, 0x37268000, 0x37274000, 0x37280000, 0x3728c000, 0x37298000, 0x372a4000, 0x372b0000, 0x372bc000, 0x372c8000, 0x372d4000, 0x372e0000,
+ 0x372ec000, 0x372f8000, 0x37304000, 0x37310000, 0x3731c000, 0x37328000, 0x37334000, 0x37340000, 0x3734c000, 0x37358000, 0x37364000, 0x37370000, 0x3737c000, 0x37388000, 0x37394000, 0x373a0000,
+ 0x373ac000, 0x373b8000, 0x373c4000, 0x373d0000, 0x373dc000, 0x373e8000, 0x373f4000, 0x37400000, 0x3740c000, 0x37418000, 0x37424000, 0x37430000, 0x3743c000, 0x37448000, 0x37454000, 0x37460000,
+ 0x3746c000, 0x37478000, 0x37484000, 0x37490000, 0x3749c000, 0x374a8000, 0x374b4000, 0x374c0000, 0x374cc000, 0x374d8000, 0x374e4000, 0x374f0000, 0x374fc000, 0x37508000, 0x37514000, 0x37520000,
+ 0x3752c000, 0x37538000, 0x37544000, 0x37550000, 0x3755c000, 0x37568000, 0x37574000, 0x37580000, 0x3758c000, 0x37598000, 0x375a4000, 0x375b0000, 0x375bc000, 0x375c8000, 0x375d4000, 0x375e0000,
+ 0x375ec000, 0x375f8000, 0x37604000, 0x37610000, 0x3761c000, 0x37628000, 0x37634000, 0x37640000, 0x3764c000, 0x37658000, 0x37664000, 0x37670000, 0x3767c000, 0x37688000, 0x37694000, 0x376a0000,
+ 0x376ac000, 0x376b8000, 0x376c4000, 0x376d0000, 0x376dc000, 0x376e8000, 0x376f4000, 0x37700000, 0x3770c000, 0x37718000, 0x37724000, 0x37730000, 0x3773c000, 0x37748000, 0x37754000, 0x37760000,
+ 0x3776c000, 0x37778000, 0x37784000, 0x37790000, 0x3779c000, 0x377a8000, 0x377b4000, 0x377c0000, 0x377cc000, 0x377d8000, 0x377e4000, 0x377f0000, 0x377fc000, 0x37808000, 0x37814000, 0x37820000,
+ 0x3782c000, 0x37838000, 0x37844000, 0x37850000, 0x3785c000, 0x37868000, 0x37874000, 0x37880000, 0x3788c000, 0x37898000, 0x378a4000, 0x378b0000, 0x378bc000, 0x378c8000, 0x378d4000, 0x378e0000,
+ 0x378ec000, 0x378f8000, 0x37904000, 0x37910000, 0x3791c000, 0x37928000, 0x37934000, 0x37940000, 0x3794c000, 0x37958000, 0x37964000, 0x37970000, 0x3797c000, 0x37988000, 0x37994000, 0x379a0000,
+ 0x379ac000, 0x379b8000, 0x379c4000, 0x379d0000, 0x379dc000, 0x379e8000, 0x379f4000, 0x37a00000, 0x37a0c000, 0x37a18000, 0x37a24000, 0x37a30000, 0x37a3c000, 0x37a48000, 0x37a54000, 0x37a60000,
+ 0x37a6c000, 0x37a78000, 0x37a84000, 0x37a90000, 0x37a9c000, 0x37aa8000, 0x37ab4000, 0x37ac0000, 0x37acc000, 0x37ad8000, 0x37ae4000, 0x37af0000, 0x37afc000, 0x37b08000, 0x37b14000, 0x37b20000,
+ 0x37b2c000, 0x37b38000, 0x37b44000, 0x37b50000, 0x37b5c000, 0x37b68000, 0x37b74000, 0x37b80000, 0x37b8c000, 0x37b98000, 0x37ba4000, 0x37bb0000, 0x37bbc000, 0x37bc8000, 0x37bd4000, 0x37be0000,
+ 0x37bec000, 0x37bf8000, 0x37c04000, 0x37c10000, 0x37c1c000, 0x37c28000, 0x37c34000, 0x37c40000, 0x37c4c000, 0x37c58000, 0x37c64000, 0x37c70000, 0x37c7c000, 0x37c88000, 0x37c94000, 0x37ca0000,
+ 0x37cad800, 0x37cbb000, 0x37cc8800, 0x37cd6000, 0x37ce3800, 0x37cf1000, 0x37cfe800, 0x37d0c000, 0x37d19800, 0x37d27000, 0x37d34800, 0x37d42000, 0x37d4f800, 0x37d5d000, 0x37d6a800, 0x37d78000,
+ 0x37d85800, 0x37d93000, 0x37da0800, 0x37dae000, 0x37dbb800, 0x37dc9000, 0x37dd6800, 0x37de4000, 0x37df1800, 0x37dff000, 0x37e0c800, 0x37e1a000, 0x37e27800, 0x37e35000, 0x37e42800, 0x37e50000,
+ 0x37e5d800, 0x37e6b000, 0x37e78800, 0x37e86000, 0x37e93800, 0x37ea1000, 0x37eae800, 0x37ebc000, 0x37ec9800, 0x37ed7000, 0x37ee4800, 0x37ef2000, 0x37eff800, 0x37f0d000, 0x37f1a800, 0x37f28000,
+ 0x37f35800, 0x37f43000, 0x37f50800, 0x37f5e000, 0x37f6b800, 0x37f79000, 0x37f86800, 0x37f94000, 0x37fa1800, 0x37faf000, 0x37fbc800, 0x37fca000, 0x37fd7800, 0x37fe5000, 0x37ff2800, 0x38000000,
+ 0x3800d800, 0x3801b000, 0x38028800, 0x38036000, 0x38043800, 0x38051000, 0x3805e800, 0x3806c000, 0x38079800, 0x38087000, 0x38094800, 0x380a2000, 0x380af800, 0x380bd000, 0x380ca800, 0x380d8000,
+ 0x380e5800, 0x380f3000, 0x38100800, 0x3810e000, 0x3811b800, 0x38129000, 0x38136800, 0x38144000, 0x38151800, 0x3815f000, 0x3816c800, 0x3817a000, 0x38187800, 0x38195000, 0x381a2800, 0x381b0000,
+ 0x381bd800, 0x381cb000, 0x381d8800, 0x381e6000, 0x381f3800, 0x38201000, 0x3820e800, 0x3821c000, 0x38229800, 0x38237000, 0x38244800, 0x38252000, 0x3825f800, 0x3826d000, 0x3827a800, 0x38288000,
+ 0x38295800, 0x382a3000, 0x382b0800, 0x382be000, 0x382cb800, 0x382d9000, 0x382e6800, 0x382f4000, 0x38301800, 0x3830f000, 0x3831c800, 0x3832a000, 0x38337800, 0x38345000, 0x38352800, 0x38360000,
+ 0x3836d800, 0x3837b000, 0x38388800, 0x38396000, 0x383a3800, 0x383b1000, 0x383be800, 0x383cc000, 0x383d9800, 0x383e7000, 0x383f4800, 0x38402000, 0x3840f800, 0x3841d000, 0x3842a800, 0x38438000,
+ 0x38445800, 0x38453000, 0x38460800, 0x3846e000, 0x3847b800, 0x38489000, 0x38496800, 0x384a4000, 0x384b1800, 0x384bf000, 0x384cc800, 0x384da000, 0x384e7800, 0x384f5000, 0x38502800, 0x38510000,
+ 0x3851d800, 0x3852b000, 0x38538800, 0x38546000, 0x38553800, 0x38561000, 0x3856e800, 0x3857c000, 0x38589800, 0x38597000, 0x385a4800, 0x385b2000, 0x385bf800, 0x385cd000, 0x385da800, 0x385e8000,
+ 0x385f5800, 0x38603000, 0x38610800, 0x3861e000, 0x3862b800, 0x38639000, 0x38646800, 0x38654000, 0x38661800, 0x3866f000, 0x3867c800, 0x3868a000, 0x38697800, 0x386a5000, 0x386b2800, 0x386c0000,
+ 0x386cd700, 0x386daf00, 0x386e8700, 0x386f5f00, 0x38703700, 0x38710f00, 0x3871e700, 0x3872bf00, 0x38739700, 0x38746f00, 0x38754700, 0x38761f00, 0x3876f700, 0x3877cf00, 0x3878a700, 0x38797f00,
+ 0x387a5700, 0x387b2f00, 0x387c0700, 0x387cdf00, 0x387db700, 0x387e8f00, 0x387f6700, 0x38803f00, 0x38811700, 0x3881ef00, 0x3882c700, 0x38839f00, 0x38847700, 0x38854f00, 0x38862700, 0x3886ff00,
+ 0x3887d700, 0x3888af00, 0x38898700, 0x388a5f00, 0x388b3700, 0x388c0f00, 0x388ce700, 0x388dbf00, 0x388e9700, 0x388f6f00, 0x38904700, 0x38911f00, 0x3891f700, 0x3892cf00, 0x3893a700, 0x38947f00,
+ 0x38955700, 0x38962f00, 0x38970700, 0x3897df00, 0x3898b700, 0x38998f00, 0x389a6700, 0x389b3f00, 0x389c1700, 0x389cef00, 0x389dc700, 0x389e9f00, 0x389f7700, 0x38a04f00, 0x38a12700, 0x38a1ff00,
+ 0x38a2d700, 0x38a3af00, 0x38a48700, 0x38a55f00, 0x38a63700, 0x38a70f00, 0x38a7e700, 0x38a8bf00, 0x38a99700, 0x38aa6f00, 0x38ab4700, 0x38ac1f00, 0x38acf700, 0x38adcf00, 0x38aea700, 0x38af7f00,
+ 0x38b05700, 0x38b12f00, 0x38b20700, 0x38b2df00, 0x38b3b700, 0x38b48f00, 0x38b56700, 0x38b63f00, 0x38b71700, 0x38b7ef00, 0x38b8c700, 0x38b99f00, 0x38ba7700, 0x38bb4f00, 0x38bc2700, 0x38bcff00,
+ 0x38bdd700, 0x38beaf00, 0x38bf8700, 0x38c05f00, 0x38c13700, 0x38c20f00, 0x38c2e700, 0x38c3bf00, 0x38c49700, 0x38c56f00, 0x38c64700, 0x38c71f00, 0x38c7f700, 0x38c8cf00, 0x38c9a700, 0x38ca7f00,
+ 0x38cb5700, 0x38cc2f00, 0x38cd0700, 0x38cddf00, 0x38ceb700, 0x38cf8f00, 0x38d06700, 0x38d13f00, 0x38d21700, 0x38d2ef00, 0x38d3c700, 0x38d49f00, 0x38d57700, 0x38d64f00, 0x38d72700, 0x38d7ff00,
+ 0x38d8d700, 0x38d9af00, 0x38da8700, 0x38db5f00, 0x38dc3700, 0x38dd0f00, 0x38dde700, 0x38debf00, 0x38df9700, 0x38e06f00, 0x38e14700, 0x38e21f00, 0x38e2f700, 0x38e3cf00, 0x38e4a700, 0x38e57f00,
+ 0x38e65700, 0x38e72f00, 0x38e80700, 0x38e8df00, 0x38e9b700, 0x38ea8f00, 0x38eb6700, 0x38ec3f00, 0x38ed1700, 0x38edef00, 0x38eec700, 0x38ef9f00, 0x38f07700, 0x38f14f00, 0x38f22700, 0x38f2ff00,
+ 0x38f3d700, 0x38f4af00, 0x38f58700, 0x38f65f00, 0x38f73700, 0x38f80f00, 0x38f8e700, 0x38f9bf00, 0x38fa9700, 0x38fb6f00, 0x38fc4700, 0x38fd1f00, 0x38fdf700, 0x38fecf00, 0x38ffa700, 0x39007f00,
+ 0x39015700, 0x39022f00, 0x39030700, 0x3903df00, 0x3904b700, 0x39058f00, 0x39066700, 0x39073f00, 0x39081700, 0x3908ef00, 0x3909c700, 0x390a9f00, 0x390b7700, 0x390c4f00, 0x390d2700, 0x390dff00,
+ 0x390ed700, 0x390faf00, 0x39108700, 0x39115f00, 0x39123700, 0x39130f00, 0x3913e700, 0x3914bf00, 0x39159700, 0x39166f00, 0x39174700, 0x39181f00, 0x3918f700, 0x3919cf00, 0x391aa700, 0x391b7f00,
+ 0x391c5700, 0x391d2f00, 0x391e0700, 0x391edf00, 0x391fb700, 0x39208f00, 0x39216700, 0x39223f00, 0x39231700, 0x3923ef00, 0x3924c700, 0x39259f00, 0x39267700, 0x39274f00, 0x39282700, 0x3928ff00,
+ 0x3929d700, 0x392aaf00, 0x392b8700, 0x392c5f00, 0x392d3700, 0x392e0f00, 0x392ee700, 0x392fbf00, 0x39309700, 0x39316f00, 0x39324700, 0x39331f00, 0x3933f700, 0x3934cf00, 0x3935a700, 0x39367f00,
+ 0x39375700, 0x39382f00, 0x39390700, 0x3939df00, 0x393ab700, 0x393b8f00, 0x393c6700, 0x393d3f00, 0x393e1700, 0x393eef00, 0x393fc700, 0x39409f00, 0x39417700, 0x39424f00, 0x39432700, 0x3943ff00,
+ 0x3944d700, 0x3945af00, 0x39468700, 0x39475f00, 0x39483700, 0x39490f00, 0x3949e700, 0x394abf00, 0x394b9700, 0x394c6f00, 0x394d4700, 0x394e1f00, 0x394ef700, 0x394fcf00, 0x3950a700, 0x39517f00,
+ 0x39525700, 0x39532f00, 0x39540700, 0x3954df00, 0x3955b700, 0x39568f00, 0x39576700, 0x39583f00, 0x39591700, 0x3959ef00, 0x395ac700, 0x395b9f00, 0x395c7700, 0x395d4f00, 0x395e2700, 0x395eff00,
+ 0x395fd700, 0x3960af00, 0x39618700, 0x39625f00, 0x39633700, 0x39640f00, 0x3964e700, 0x3965bf00, 0x39669700, 0x39676f00, 0x39684700, 0x39691f00, 0x3969f700, 0x396acf00, 0x396ba700, 0x396c7f00,
+ 0x396d5700, 0x396e2f00, 0x396f0700, 0x396fdf00, 0x3970b700, 0x39718f00, 0x39726700, 0x39733f00, 0x39741700, 0x3974ef00, 0x3975c700, 0x39769f00, 0x39777700, 0x39784f00, 0x39792700, 0x3979ff00,
+ 0x397ae900, 0x397bd300, 0x397cbd00, 0x397da700, 0x397e9100, 0x397f7b00, 0x39806500, 0x39814f00, 0x39823900, 0x39832300, 0x39840d00, 0x3984f700, 0x3985e100, 0x3986cb00, 0x3987b500, 0x39889f00,
+ 0x39898900, 0x398a7300, 0x398b5d00, 0x398c4700, 0x398d3100, 0x398e1b00, 0x398f0500, 0x398fef00, 0x3990d900, 0x3991c300, 0x3992ad00, 0x39939700, 0x39948100, 0x39956b00, 0x39965500, 0x39973f00,
+ 0x39982900, 0x39991300, 0x3999fd00, 0x399ae700, 0x399bd100, 0x399cbb00, 0x399da500, 0x399e8f00, 0x399f7900, 0x39a06300, 0x39a14d00, 0x39a23700, 0x39a32100, 0x39a40b00, 0x39a4f500, 0x39a5df00,
+ 0x39a6c900, 0x39a7b300, 0x39a89d00, 0x39a98700, 0x39aa7100, 0x39ab5b00, 0x39ac4500, 0x39ad2f00, 0x39ae1900, 0x39af0300, 0x39afed00, 0x39b0d700, 0x39b1c100, 0x39b2ab00, 0x39b39500, 0x39b47f00,
+ 0x39b56900, 0x39b65300, 0x39b73d00, 0x39b82700, 0x39b91100, 0x39b9fb00, 0x39bae500, 0x39bbcf00, 0x39bcb900, 0x39bda300, 0x39be8d00, 0x39bf7700, 0x39c06100, 0x39c14b00, 0x39c23500, 0x39c31f00,
+ 0x39c40900, 0x39c4f300, 0x39c5dd00, 0x39c6c700, 0x39c7b100, 0x39c89b00, 0x39c98500, 0x39ca6f00, 0x39cb5900, 0x39cc4300, 0x39cd2d00, 0x39ce1700, 0x39cf0100, 0x39cfeb00, 0x39d0d500, 0x39d1bf00,
+ 0x39d2a900, 0x39d39300, 0x39d47d00, 0x39d56700, 0x39d65100, 0x39d73b00, 0x39d82500, 0x39d90f00, 0x39d9f900, 0x39dae300, 0x39dbcd00, 0x39dcb700, 0x39dda100, 0x39de8b00, 0x39df7500, 0x39e05f00,
+ 0x39e14900, 0x39e23300, 0x39e31d00, 0x39e40700, 0x39e4f100, 0x39e5db00, 0x39e6c500, 0x39e7af00, 0x39e89900, 0x39e98300, 0x39ea6d00, 0x39eb5700, 0x39ec4100, 0x39ed2b00, 0x39ee1500, 0x39eeff00,
+ 0x39efe900, 0x39f0d300, 0x39f1bd00, 0x39f2a700, 0x39f39100, 0x39f47b00, 0x39f56500, 0x39f64f00, 0x39f73900, 0x39f82300, 0x39f90d00, 0x39f9f700, 0x39fae100, 0x39fbcb00, 0x39fcb500, 0x39fd9f00,
+ 0x39fe8900, 0x39ff7300, 0x3a005d00, 0x3a014700, 0x3a023100, 0x3a031b00, 0x3a040500, 0x3a04ef00, 0x3a05d900, 0x3a06c300, 0x3a07ad00, 0x3a089700, 0x3a098100, 0x3a0a6b00, 0x3a0b5500, 0x3a0c3f00,
+ 0x3a0d2900, 0x3a0e1300, 0x3a0efd00, 0x3a0fe700, 0x3a10d100, 0x3a11bb00, 0x3a12a500, 0x3a138f00, 0x3a147900, 0x3a156300, 0x3a164d00, 0x3a173700, 0x3a182100, 0x3a190b00, 0x3a19f500, 0x3a1adf00,
+ 0x3a1bc900, 0x3a1cb300, 0x3a1d9d00, 0x3a1e8700, 0x3a1f7100, 0x3a205b00, 0x3a214500, 0x3a222f00, 0x3a231900, 0x3a240300, 0x3a24ed00, 0x3a25d700, 0x3a26c100, 0x3a27ab00, 0x3a289500, 0x3a297f00,
+ 0x3a2a6900, 0x3a2b5300, 0x3a2c3d00, 0x3a2d2700, 0x3a2e1100, 0x3a2efb00, 0x3a2fe500, 0x3a30cf00, 0x3a31b900, 0x3a32a300, 0x3a338d00, 0x3a347700, 0x3a356100, 0x3a364b00, 0x3a373500, 0x3a381f00,
+ 0x3a390900, 0x3a39f300, 0x3a3add00, 0x3a3bc700, 0x3a3cb100, 0x3a3d9b00, 0x3a3e8500, 0x3a3f6f00, 0x3a405900, 0x3a414300, 0x3a422d00, 0x3a431700, 0x3a440100, 0x3a44eb00, 0x3a45d500, 0x3a46bf00,
+ 0x3a47a900, 0x3a489300, 0x3a497d00, 0x3a4a6700, 0x3a4b5100, 0x3a4c3b00, 0x3a4d2500, 0x3a4e0f00, 0x3a4ef900, 0x3a4fe300, 0x3a50cd00, 0x3a51b700, 0x3a52a100, 0x3a538b00, 0x3a547500, 0x3a555f00,
+ 0x3a564900, 0x3a573300, 0x3a581d00, 0x3a590700, 0x3a59f100, 0x3a5adb00, 0x3a5bc500, 0x3a5caf00, 0x3a5d9900, 0x3a5e8300, 0x3a5f6d00, 0x3a605700, 0x3a614100, 0x3a622b00, 0x3a631500, 0x3a63ff00,
+ 0x3a64e900, 0x3a65d300, 0x3a66bd00, 0x3a67a700, 0x3a689100, 0x3a697b00, 0x3a6a6500, 0x3a6b4f00, 0x3a6c3900, 0x3a6d2300, 0x3a6e0d00, 0x3a6ef700, 0x3a6fe100, 0x3a70cb00, 0x3a71b500, 0x3a729f00,
+ 0x3a738900, 0x3a747300, 0x3a755d00, 0x3a764700, 0x3a773100, 0x3a781b00, 0x3a790500, 0x3a79ef00, 0x3a7ad900, 0x3a7bc300, 0x3a7cad00, 0x3a7d9700, 0x3a7e8100, 0x3a7f6b00, 0x3a805500, 0x3a813f00,
+ 0x3a822900, 0x3a831300, 0x3a83fd00, 0x3a84e700, 0x3a85d100, 0x3a86bb00, 0x3a87a500, 0x3a888f00, 0x3a897900, 0x3a8a6300, 0x3a8b4d00, 0x3a8c3700, 0x3a8d2100, 0x3a8e0b00, 0x3a8ef500, 0x3a8fdf00,
+ 0x3a90c900, 0x3a91b300, 0x3a929d00, 0x3a938700, 0x3a947100, 0x3a955b00, 0x3a964500, 0x3a972f00, 0x3a981900, 0x3a990300, 0x3a99ed00, 0x3a9ad700, 0x3a9bc100, 0x3a9cab00, 0x3a9d9500, 0x3a9e7f00,
+ 0x3a9f6900, 0x3aa05300, 0x3aa13d00, 0x3aa22700, 0x3aa31100, 0x3aa3fb00, 0x3aa4e500, 0x3aa5cf00, 0x3aa6b900, 0x3aa7a300, 0x3aa88d00, 0x3aa97700, 0x3aaa6100, 0x3aab4b00, 0x3aac3500, 0x3aad1f00,
+ 0x3aae0900, 0x3aaef300, 0x3aafdd00, 0x3ab0c700, 0x3ab1b100, 0x3ab29b00, 0x3ab38500, 0x3ab46f00, 0x3ab55900, 0x3ab64300, 0x3ab72d00, 0x3ab81700, 0x3ab90100, 0x3ab9eb00, 0x3abad500, 0x3abbbf00,
+ 0x3abca900, 0x3abd9300, 0x3abe7d00, 0x3abf6700, 0x3ac05100, 0x3ac13b00, 0x3ac22500, 0x3ac30f00, 0x3ac3f900, 0x3ac4e300, 0x3ac5cd00, 0x3ac6b700, 0x3ac7a100, 0x3ac88b00, 0x3ac97500, 0x3aca5f00,
+ 0x3acb4900, 0x3acc3300, 0x3acd1d00, 0x3ace0700, 0x3acef100, 0x3acfdb00, 0x3ad0c500, 0x3ad1af00, 0x3ad29900, 0x3ad38300, 0x3ad46d00, 0x3ad55700, 0x3ad64100, 0x3ad72b00, 0x3ad81500, 0x3ad8ff00,
+ 0x3ad9e900, 0x3adad300, 0x3adbbd00, 0x3adca700, 0x3add9100, 0x3ade7b00, 0x3adf6500, 0x3ae04f00, 0x3ae13900, 0x3ae22300, 0x3ae30d00, 0x3ae3f700, 0x3ae4e100, 0x3ae5cb00, 0x3ae6b500, 0x3ae79f00,
+ 0x3ae88900, 0x3ae97300, 0x3aea5d00, 0x3aeb4700, 0x3aec3100, 0x3aed1b00, 0x3aee0500, 0x3aeeef00, 0x3aefd900, 0x3af0c300, 0x3af1ad00, 0x3af29700, 0x3af38100, 0x3af46b00, 0x3af55500, 0x3af63f00,
+ 0x3af72900, 0x3af81300, 0x3af8fd00, 0x3af9e700, 0x3afad100, 0x3afbbb00, 0x3afca500, 0x3afd8f00, 0x3afe7900, 0x3aff6300, 0x3b004e00, 0x3b013800, 0x3b022200, 0x3b030c00, 0x3b03f600, 0x3b04e000,
+ 0x3b05ca00, 0x3b06b400, 0x3b079e00, 0x3b088800, 0x3b097200, 0x3b0a5c00, 0x3b0b4600, 0x3b0c3000, 0x3b0d1a00, 0x3b0e0400, 0x3b0eee00, 0x3b0fd800, 0x3b10c200, 0x3b11ac00, 0x3b129600, 0x3b138000,
+ 0x3b146a00, 0x3b155400, 0x3b163e00, 0x3b172800, 0x3b181200, 0x3b18fc00, 0x3b19e600, 0x3b1ad000, 0x3b1bba00, 0x3b1ca400, 0x3b1d8e00, 0x3b1e7800, 0x3b1f6200, 0x3b204c00, 0x3b213600, 0x3b222000,
+ 0x3b230a00, 0x3b23f400, 0x3b24de00, 0x3b25c800, 0x3b26b200, 0x3b279c00, 0x3b288600, 0x3b297000, 0x3b2a5a00, 0x3b2b4400, 0x3b2c2e00, 0x3b2d1800, 0x3b2e0200, 0x3b2eec00, 0x3b2fd600, 0x3b30c000,
+ 0x3b31aa00, 0x3b329400, 0x3b337e00, 0x3b346800, 0x3b355200, 0x3b363c00, 0x3b372600, 0x3b381000, 0x3b38fa00, 0x3b39e400, 0x3b3ace00, 0x3b3bb800, 0x3b3ca200, 0x3b3d8c00, 0x3b3e7600, 0x3b3f6000,
+ 0x3b404a00, 0x3b413400, 0x3b421e00, 0x3b430800, 0x3b43f200, 0x3b44dc00, 0x3b45c600, 0x3b46b000, 0x3b479a00, 0x3b488400, 0x3b496e00, 0x3b4a5800, 0x3b4b4200, 0x3b4c2c00, 0x3b4d1600, 0x3b4e0000,
+ 0x3b4f0000, 0x3b500000, 0x3b510000, 0x3b520000, 0x3b530000, 0x3b540000, 0x3b550000, 0x3b560000, 0x3b570000, 0x3b580000, 0x3b590000, 0x3b5a0000, 0x3b5b0000, 0x3b5c0000, 0x3b5d0000, 0x3b5e0000,
+ 0x3b5f0000, 0x3b600000, 0x3b610000, 0x3b620000, 0x3b630000, 0x3b640000, 0x3b650000, 0x3b660000, 0x3b670000, 0x3b680000, 0x3b690000, 0x3b6a0000, 0x3b6b0000, 0x3b6c0000, 0x3b6d0000, 0x3b6e0000,
+ 0x3b6f0000, 0x3b700000, 0x3b710000, 0x3b720000, 0x3b730000, 0x3b740000, 0x3b750000, 0x3b760000, 0x3b770000, 0x3b780000, 0x3b790000, 0x3b7a0000, 0x3b7b0000, 0x3b7c0000, 0x3b7d0000, 0x3b7e0000,
+ 0x3b7f0000, 0x3b800000, 0x3b810000, 0x3b820000, 0x3b830000, 0x3b840000, 0x3b850000, 0x3b860000, 0x3b870000, 0x3b880000, 0x3b890000, 0x3b8a0000, 0x3b8b0000, 0x3b8c0000, 0x3b8d0000, 0x3b8e0000,
+ 0x3b8f0000, 0x3b900000, 0x3b910000, 0x3b920000, 0x3b930000, 0x3b940000, 0x3b950000, 0x3b960000, 0x3b970000, 0x3b980000, 0x3b990000, 0x3b9a0000, 0x3b9b0000, 0x3b9c0000, 0x3b9d0000, 0x3b9e0000,
+ 0x3b9f0000, 0x3ba00000, 0x3ba10000, 0x3ba20000, 0x3ba30000, 0x3ba40000, 0x3ba50000, 0x3ba60000, 0x3ba70000, 0x3ba80000, 0x3ba90000, 0x3baa0000, 0x3bab0000, 0x3bac0000, 0x3bad0000, 0x3bae0000,
+ 0x3baf0000, 0x3bb00000, 0x3bb10000, 0x3bb20000, 0x3bb30000, 0x3bb40000, 0x3bb50000, 0x3bb60000, 0x3bb70000, 0x3bb80000, 0x3bb90000, 0x3bba0000, 0x3bbb0000, 0x3bbc0000, 0x3bbd0000, 0x3bbe0000,
+ 0x3bbf0000, 0x3bc00000, 0x3bc10000, 0x3bc20000, 0x3bc30000, 0x3bc40000, 0x3bc50000, 0x3bc60000, 0x3bc70000, 0x3bc80000, 0x3bc90000, 0x3bca0000, 0x3bcb0000, 0x3bcc0000, 0x3bcd0000, 0x3bce0000,
+ 0x3bcf0000, 0x3bd00000, 0x3bd10000, 0x3bd20000, 0x3bd30000, 0x3bd40000, 0x3bd50000, 0x3bd60000, 0x3bd70000, 0x3bd80000, 0x3bd90000, 0x3bda0000, 0x3bdb0000, 0x3bdc0000, 0x3bdd0000, 0x3bde0000,
+ 0x3bdf0000, 0x3be00000, 0x3be10000, 0x3be20000, 0x3be30000, 0x3be40000, 0x3be50000, 0x3be60000, 0x3be70000, 0x3be80000, 0x3be90000, 0x3bea0000, 0x3beb0000, 0x3bec0000, 0x3bed0000, 0x3bee0000,
+ 0x3bef0000, 0x3bf00000, 0x3bf10000, 0x3bf20000, 0x3bf30000, 0x3bf40000, 0x3bf50000, 0x3bf60000, 0x3bf70000, 0x3bf80000, 0x3bf90000, 0x3bfa0000, 0x3bfb0000, 0x3bfc0000, 0x3bfd0000, 0x3bfe0000,
+ 0x3bff0000, 0x3c000000, 0x3c010000, 0x3c020000, 0x3c030000, 0x3c040000, 0x3c050000, 0x3c060000, 0x3c070000, 0x3c080000, 0x3c090000, 0x3c0a0000, 0x3c0b0000, 0x3c0c0000, 0x3c0d0000, 0x3c0e0000,
+ 0x3c0f0000, 0x3c100000, 0x3c110000, 0x3c120000, 0x3c130000, 0x3c140000, 0x3c150000, 0x3c160000, 0x3c170000, 0x3c180000, 0x3c190000, 0x3c1a0000, 0x3c1b0000, 0x3c1c0000, 0x3c1d0000, 0x3c1e0000,
+ 0x3c1f0000, 0x3c200000, 0x3c210000, 0x3c220000, 0x3c230000, 0x3c240000, 0x3c250000, 0x3c260000, 0x3c270000, 0x3c280000, 0x3c290000, 0x3c2a0000, 0x3c2b0000, 0x3c2c0000, 0x3c2d0000, 0x3c2e0000,
+ 0x3c2f0000, 0x3c300000, 0x3c310000, 0x3c320000, 0x3c330000, 0x3c340000, 0x3c350000, 0x3c360000, 0x3c370000, 0x3c380000, 0x3c390000, 0x3c3a0000, 0x3c3b0000, 0x3c3c0000, 0x3c3d0000, 0x3c3e0000,
+ 0x3c3f0000, 0x3c400000, 0x3c410000, 0x3c420000, 0x3c430000, 0x3c440000, 0x3c450000, 0x3c460000, 0x3c470000, 0x3c480000, 0x3c490000, 0x3c4a0000, 0x3c4b0000, 0x3c4c0000, 0x3c4d0000, 0x3c4e0000,
+ 0x3c4f0000, 0x3c500000, 0x3c510000, 0x3c520000, 0x3c530000, 0x3c540000, 0x3c550000, 0x3c560000, 0x3c570000, 0x3c580000, 0x3c590000, 0x3c5a0000, 0x3c5b0000, 0x3c5c0000, 0x3c5d0000, 0x3c5e0000,
+ 0x3c5f0000, 0x3c600000, 0x3c610000, 0x3c620000, 0x3c630000, 0x3c640000, 0x3c650000, 0x3c660000, 0x3c670000, 0x3c680000, 0x3c690000, 0x3c6a0000, 0x3c6b0000, 0x3c6c0000, 0x3c6d0000, 0x3c6e0000,
+ 0x3c6f0000, 0x3c700000, 0x3c710000, 0x3c720000, 0x3c730000, 0x3c740000, 0x3c750000, 0x3c760000, 0x3c770000, 0x3c780000, 0x3c790000, 0x3c7a0000, 0x3c7b0000, 0x3c7c0000, 0x3c7d0000, 0x3c7e0000,
+ 0x3c7f0000, 0x3c800000, 0x3c810000, 0x3c820000, 0x3c830000, 0x3c840000, 0x3c850000, 0x3c860000, 0x3c870000, 0x3c880000, 0x3c890000, 0x3c8a0000, 0x3c8b0000, 0x3c8c0000, 0x3c8d0000, 0x3c8e0000,
+ 0x3c8f0000, 0x3c900000, 0x3c910000, 0x3c920000, 0x3c930000, 0x3c940000, 0x3c950000, 0x3c960000, 0x3c970000, 0x3c980000, 0x3c990000, 0x3c9a0000, 0x3c9b0000, 0x3c9c0000, 0x3c9d0000, 0x3c9e0000,
+ 0x3c9f0000, 0x3ca00000, 0x3ca10000, 0x3ca20000, 0x3ca30000, 0x3ca40000, 0x3ca50000, 0x3ca60000, 0x3ca70000, 0x3ca80000, 0x3ca90000, 0x3caa0000, 0x3cab0000, 0x3cac0000, 0x3cad0000, 0x3cae0000,
+ 0x3caf0000, 0x3cb00000, 0x3cb10000, 0x3cb20000, 0x3cb30000, 0x3cb40000, 0x3cb50000, 0x3cb60000, 0x3cb70000, 0x3cb80000, 0x3cb90000, 0x3cba0000, 0x3cbb0000, 0x3cbc0000, 0x3cbd0000, 0x3cbe0000,
+ 0x3cbf0000, 0x3cc00000, 0x3cc10000, 0x3cc20000, 0x3cc30000, 0x3cc40000, 0x3cc50000, 0x3cc60000, 0x3cc70000, 0x3cc80000, 0x3cc90000, 0x3cca0000, 0x3ccb0000, 0x3ccc0000, 0x3ccd0000, 0x3cce0000,
+ 0x3ccf0000, 0x3cd00000, 0x3cd10000, 0x3cd20000, 0x3cd30000, 0x3cd40000, 0x3cd50000, 0x3cd60000, 0x3cd70000, 0x3cd80000, 0x3cd90000, 0x3cda0000, 0x3cdb0000, 0x3cdc0000, 0x3cdd0000, 0x3cde0000,
+ 0x3cdf0000, 0x3ce00000, 0x3ce10000, 0x3ce20000, 0x3ce30000, 0x3ce40000, 0x3ce50000, 0x3ce60000, 0x3ce70000, 0x3ce80000, 0x3ce90000, 0x3cea0000, 0x3ceb0000, 0x3cec0000, 0x3ced0000, 0x3cee0000,
+ 0x3cef0000, 0x3cf00000, 0x3cf10000, 0x3cf20000, 0x3cf30000, 0x3cf40000, 0x3cf50000, 0x3cf60000, 0x3cf70000, 0x3cf80000, 0x3cf90000, 0x3cfa0000, 0x3cfb0000, 0x3cfc0000, 0x3cfd0000, 0x3cfe0000,
+ 0x3cff0000, 0x3d000000, 0x3d010000, 0x3d020000, 0x3d030000, 0x3d040000, 0x3d050000, 0x3d060000, 0x3d070000, 0x3d080000, 0x3d090000, 0x3d0a0000, 0x3d0b0000, 0x3d0c0000, 0x3d0d0000, 0x3d0e0000,
+ 0x3d0f0000, 0x3d100000, 0x3d110000, 0x3d120000, 0x3d130000, 0x3d140000, 0x3d150000, 0x3d160000, 0x3d170000, 0x3d180000, 0x3d190000, 0x3d1a0000, 0x3d1b0000, 0x3d1c0000, 0x3d1d0000, 0x3d1e0000,
+ 0x3d1f0000, 0x3d200000, 0x3d210000, 0x3d220000, 0x3d230000, 0x3d240000, 0x3d250000, 0x3d260000, 0x3d270000, 0x3d280000, 0x3d290000, 0x3d2a0000, 0x3d2b0000, 0x3d2c0000, 0x3d2d0000, 0x3d2e0000,
+ 0x3d2f0000, 0x3d300000, 0x3d310000, 0x3d320000, 0x3d330000, 0x3d340000, 0x3d350000, 0x3d360000, 0x3d370000, 0x3d380000, 0x3d390000, 0x3d3a0000, 0x3d3b0000, 0x3d3c0000, 0x3d3d0000, 0x3d3e0000,
+ 0x3d3f0000, 0x3d400000, 0x3d410000, 0x3d420000, 0x3d430000, 0x3d440000, 0x3d450000, 0x3d460000, 0x3d470000, 0x3d480000, 0x3d490000, 0x3d4a0000, 0x3d4b0000, 0x3d4c0000, 0x3d4d0000, 0x3d4e0000,
+ 0x3d4f2200, 0x3d504400, 0x3d516600, 0x3d528800, 0x3d53aa00, 0x3d54cc00, 0x3d55ee00, 0x3d571000, 0x3d583200, 0x3d595400, 0x3d5a7600, 0x3d5b9800, 0x3d5cba00, 0x3d5ddc00, 0x3d5efe00, 0x3d602000,
+ 0x3d614200, 0x3d626400, 0x3d638600, 0x3d64a800, 0x3d65ca00, 0x3d66ec00, 0x3d680e00, 0x3d693000, 0x3d6a5200, 0x3d6b7400, 0x3d6c9600, 0x3d6db800, 0x3d6eda00, 0x3d6ffc00, 0x3d711e00, 0x3d724000,
+ 0x3d736200, 0x3d748400, 0x3d75a600, 0x3d76c800, 0x3d77ea00, 0x3d790c00, 0x3d7a2e00, 0x3d7b5000, 0x3d7c7200, 0x3d7d9400, 0x3d7eb600, 0x3d7fd800, 0x3d80fa00, 0x3d821c00, 0x3d833e00, 0x3d846000,
+ 0x3d858200, 0x3d86a400, 0x3d87c600, 0x3d88e800, 0x3d8a0a00, 0x3d8b2c00, 0x3d8c4e00, 0x3d8d7000, 0x3d8e9200, 0x3d8fb400, 0x3d90d600, 0x3d91f800, 0x3d931a00, 0x3d943c00, 0x3d955e00, 0x3d968000,
+ 0x3d97a200, 0x3d98c400, 0x3d99e600, 0x3d9b0800, 0x3d9c2a00, 0x3d9d4c00, 0x3d9e6e00, 0x3d9f9000, 0x3da0b200, 0x3da1d400, 0x3da2f600, 0x3da41800, 0x3da53a00, 0x3da65c00, 0x3da77e00, 0x3da8a000,
+ 0x3da9c200, 0x3daae400, 0x3dac0600, 0x3dad2800, 0x3dae4a00, 0x3daf6c00, 0x3db08e00, 0x3db1b000, 0x3db2d200, 0x3db3f400, 0x3db51600, 0x3db63800, 0x3db75a00, 0x3db87c00, 0x3db99e00, 0x3dbac000,
+ 0x3dbbe200, 0x3dbd0400, 0x3dbe2600, 0x3dbf4800, 0x3dc06a00, 0x3dc18c00, 0x3dc2ae00, 0x3dc3d000, 0x3dc4f200, 0x3dc61400, 0x3dc73600, 0x3dc85800, 0x3dc97a00, 0x3dca9c00, 0x3dcbbe00, 0x3dcce000,
+ 0x3dce0200, 0x3dcf2400, 0x3dd04600, 0x3dd16800, 0x3dd28a00, 0x3dd3ac00, 0x3dd4ce00, 0x3dd5f000, 0x3dd71200, 0x3dd83400, 0x3dd95600, 0x3dda7800, 0x3ddb9a00, 0x3ddcbc00, 0x3dddde00, 0x3ddf0000,
+ 0x3de02200, 0x3de14400, 0x3de26600, 0x3de38800, 0x3de4aa00, 0x3de5cc00, 0x3de6ee00, 0x3de81000, 0x3de93200, 0x3dea5400, 0x3deb7600, 0x3dec9800, 0x3dedba00, 0x3deedc00, 0x3deffe00, 0x3df12000,
+ 0x3df24200, 0x3df36400, 0x3df48600, 0x3df5a800, 0x3df6ca00, 0x3df7ec00, 0x3df90e00, 0x3dfa3000, 0x3dfb5200, 0x3dfc7400, 0x3dfd9600, 0x3dfeb800, 0x3dffda00, 0x3e00fc00, 0x3e021e00, 0x3e034000,
+ 0x3e046200, 0x3e058400, 0x3e06a600, 0x3e07c800, 0x3e08ea00, 0x3e0a0c00, 0x3e0b2e00, 0x3e0c5000, 0x3e0d7200, 0x3e0e9400, 0x3e0fb600, 0x3e10d800, 0x3e11fa00, 0x3e131c00, 0x3e143e00, 0x3e156000,
+ 0x3e168200, 0x3e17a400, 0x3e18c600, 0x3e19e800, 0x3e1b0a00, 0x3e1c2c00, 0x3e1d4e00, 0x3e1e7000, 0x3e1f9200, 0x3e20b400, 0x3e21d600, 0x3e22f800, 0x3e241a00, 0x3e253c00, 0x3e265e00, 0x3e278000,
+ 0x3e28a200, 0x3e29c400, 0x3e2ae600, 0x3e2c0800, 0x3e2d2a00, 0x3e2e4c00, 0x3e2f6e00, 0x3e309000, 0x3e31b200, 0x3e32d400, 0x3e33f600, 0x3e351800, 0x3e363a00, 0x3e375c00, 0x3e387e00, 0x3e39a000,
+ 0x3e3ac200, 0x3e3be400, 0x3e3d0600, 0x3e3e2800, 0x3e3f4a00, 0x3e406c00, 0x3e418e00, 0x3e42b000, 0x3e43d200, 0x3e44f400, 0x3e461600, 0x3e473800, 0x3e485a00, 0x3e497c00, 0x3e4a9e00, 0x3e4bc000,
+ 0x3e4ce200, 0x3e4e0400, 0x3e4f2600, 0x3e504800, 0x3e516a00, 0x3e528c00, 0x3e53ae00, 0x3e54d000, 0x3e55f200, 0x3e571400, 0x3e583600, 0x3e595800, 0x3e5a7a00, 0x3e5b9c00, 0x3e5cbe00, 0x3e5de000,
+ 0x3e5f0200, 0x3e602400, 0x3e614600, 0x3e626800, 0x3e638a00, 0x3e64ac00, 0x3e65ce00, 0x3e66f000, 0x3e681200, 0x3e693400, 0x3e6a5600, 0x3e6b7800, 0x3e6c9a00, 0x3e6dbc00, 0x3e6ede00, 0x3e700000,
+ 0x3e712100, 0x3e724300, 0x3e736500, 0x3e748700, 0x3e75a900, 0x3e76cb00, 0x3e77ed00, 0x3e790f00, 0x3e7a3100, 0x3e7b5300, 0x3e7c7500, 0x3e7d9700, 0x3e7eb900, 0x3e7fdb00, 0x3e80fd00, 0x3e821f00,
+ 0x3e834100, 0x3e846300, 0x3e858500, 0x3e86a700, 0x3e87c900, 0x3e88eb00, 0x3e8a0d00, 0x3e8b2f00, 0x3e8c5100, 0x3e8d7300, 0x3e8e9500, 0x3e8fb700, 0x3e90d900, 0x3e91fb00, 0x3e931d00, 0x3e943f00,
+ 0x3e956100, 0x3e968300, 0x3e97a500, 0x3e98c700, 0x3e99e900, 0x3e9b0b00, 0x3e9c2d00, 0x3e9d4f00, 0x3e9e7100, 0x3e9f9300, 0x3ea0b500, 0x3ea1d700, 0x3ea2f900, 0x3ea41b00, 0x3ea53d00, 0x3ea65f00,
+ 0x3ea78100, 0x3ea8a300, 0x3ea9c500, 0x3eaae700, 0x3eac0900, 0x3ead2b00, 0x3eae4d00, 0x3eaf6f00, 0x3eb09100, 0x3eb1b300, 0x3eb2d500, 0x3eb3f700, 0x3eb51900, 0x3eb63b00, 0x3eb75d00, 0x3eb87f00,
+ 0x3eb9a100, 0x3ebac300, 0x3ebbe500, 0x3ebd0700, 0x3ebe2900, 0x3ebf4b00, 0x3ec06d00, 0x3ec18f00, 0x3ec2b100, 0x3ec3d300, 0x3ec4f500, 0x3ec61700, 0x3ec73900, 0x3ec85b00, 0x3ec97d00, 0x3eca9f00,
+ 0x3ecbc100, 0x3ecce300, 0x3ece0500, 0x3ecf2700, 0x3ed04900, 0x3ed16b00, 0x3ed28d00, 0x3ed3af00, 0x3ed4d100, 0x3ed5f300, 0x3ed71500, 0x3ed83700, 0x3ed95900, 0x3eda7b00, 0x3edb9d00, 0x3edcbf00,
+ 0x3edde100, 0x3edf0300, 0x3ee02500, 0x3ee14700, 0x3ee26900, 0x3ee38b00, 0x3ee4ad00, 0x3ee5cf00, 0x3ee6f100, 0x3ee81300, 0x3ee93500, 0x3eea5700, 0x3eeb7900, 0x3eec9b00, 0x3eedbd00, 0x3eeedf00,
+ 0x3ef00100, 0x3ef12300, 0x3ef24500, 0x3ef36700, 0x3ef48900, 0x3ef5ab00, 0x3ef6cd00, 0x3ef7ef00, 0x3ef91100, 0x3efa3300, 0x3efb5500, 0x3efc7700, 0x3efd9900, 0x3efebb00, 0x3effdd00, 0x3f00ff00,
+ 0x3f022100, 0x3f034300, 0x3f046500, 0x3f058700, 0x3f06a900, 0x3f07cb00, 0x3f08ed00, 0x3f0a0f00, 0x3f0b3100, 0x3f0c5300, 0x3f0d7500, 0x3f0e9700, 0x3f0fb900, 0x3f10db00, 0x3f11fd00, 0x3f131f00,
+ 0x3f144100, 0x3f156300, 0x3f168500, 0x3f17a700, 0x3f18c900, 0x3f19eb00, 0x3f1b0d00, 0x3f1c2f00, 0x3f1d5100, 0x3f1e7300, 0x3f1f9500, 0x3f20b700, 0x3f21d900, 0x3f22fb00, 0x3f241d00, 0x3f253f00,
+ 0x3f266100, 0x3f278300, 0x3f28a500, 0x3f29c700, 0x3f2ae900, 0x3f2c0b00, 0x3f2d2d00, 0x3f2e4f00, 0x3f2f7100, 0x3f309300, 0x3f31b500, 0x3f32d700, 0x3f33f900, 0x3f351b00, 0x3f363d00, 0x3f375f00,
+ 0x3f388100, 0x3f39a300, 0x3f3ac500, 0x3f3be700, 0x3f3d0900, 0x3f3e2b00, 0x3f3f4d00, 0x3f406f00, 0x3f419100, 0x3f42b300, 0x3f43d500, 0x3f44f700, 0x3f461900, 0x3f473b00, 0x3f485d00, 0x3f497f00,
+ 0x3f4aa100, 0x3f4bc300, 0x3f4ce500, 0x3f4e0700, 0x3f4f2900, 0x3f504b00, 0x3f516d00, 0x3f528f00, 0x3f53b100, 0x3f54d300, 0x3f55f500, 0x3f571700, 0x3f583900, 0x3f595b00, 0x3f5a7d00, 0x3f5b9f00,
+ 0x3f5cc100, 0x3f5de300, 0x3f5f0500, 0x3f602700, 0x3f614900, 0x3f626b00, 0x3f638d00, 0x3f64af00, 0x3f65d100, 0x3f66f300, 0x3f681500, 0x3f693700, 0x3f6a5900, 0x3f6b7b00, 0x3f6c9d00, 0x3f6dbf00,
+ 0x3f6ee100, 0x3f700300, 0x3f712500, 0x3f724700, 0x3f736900, 0x3f748b00, 0x3f75ad00, 0x3f76cf00, 0x3f77f100, 0x3f791300, 0x3f7a3500, 0x3f7b5700, 0x3f7c7900, 0x3f7d9b00, 0x3f7ebd00, 0x3f7fdf00,
+ 0x3f810100, 0x3f822300, 0x3f834500, 0x3f846700, 0x3f858900, 0x3f86ab00, 0x3f87cd00, 0x3f88ef00, 0x3f8a1100, 0x3f8b3300, 0x3f8c5500, 0x3f8d7700, 0x3f8e9900, 0x3f8fbb00, 0x3f90dd00, 0x3f91ff00,
+ 0x3f935300, 0x3f94a700, 0x3f95fb00, 0x3f974f00, 0x3f98a300, 0x3f99f700, 0x3f9b4b00, 0x3f9c9f00, 0x3f9df300, 0x3f9f4700, 0x3fa09b00, 0x3fa1ef00, 0x3fa34300, 0x3fa49700, 0x3fa5eb00, 0x3fa73f00,
+ 0x3fa89300, 0x3fa9e700, 0x3fab3b00, 0x3fac8f00, 0x3fade300, 0x3faf3700, 0x3fb08b00, 0x3fb1df00, 0x3fb33300, 0x3fb48700, 0x3fb5db00, 0x3fb72f00, 0x3fb88300, 0x3fb9d700, 0x3fbb2b00, 0x3fbc7f00,
+ 0x3fbdd300, 0x3fbf2700, 0x3fc07b00, 0x3fc1cf00, 0x3fc32300, 0x3fc47700, 0x3fc5cb00, 0x3fc71f00, 0x3fc87300, 0x3fc9c700, 0x3fcb1b00, 0x3fcc6f00, 0x3fcdc300, 0x3fcf1700, 0x3fd06b00, 0x3fd1bf00,
+ 0x3fd31300, 0x3fd46700, 0x3fd5bb00, 0x3fd70f00, 0x3fd86300, 0x3fd9b700, 0x3fdb0b00, 0x3fdc5f00, 0x3fddb300, 0x3fdf0700, 0x3fe05b00, 0x3fe1af00, 0x3fe30300, 0x3fe45700, 0x3fe5ab00, 0x3fe6ff00,
+ 0x3fe85300, 0x3fe9a700, 0x3feafb00, 0x3fec4f00, 0x3feda300, 0x3feef700, 0x3ff04b00, 0x3ff19f00, 0x3ff2f300, 0x3ff44700, 0x3ff59b00, 0x3ff6ef00, 0x3ff84300, 0x3ff99700, 0x3ffaeb00, 0x3ffc3f00,
+ 0x3ffd9300, 0x3ffee700, 0x40003b00, 0x40018f00, 0x4002e300, 0x40043700, 0x40058b00, 0x4006df00, 0x40083300, 0x40098700, 0x400adb00, 0x400c2f00, 0x400d8300, 0x400ed700, 0x40102b00, 0x40117f00,
+ 0x4012d300, 0x40142700, 0x40157b00, 0x4016cf00, 0x40182300, 0x40197700, 0x401acb00, 0x401c1f00, 0x401d7300, 0x401ec700, 0x40201b00, 0x40216f00, 0x4022c300, 0x40241700, 0x40256b00, 0x4026bf00,
+ 0x40281300, 0x40296700, 0x402abb00, 0x402c0f00, 0x402d6300, 0x402eb700, 0x40300b00, 0x40315f00, 0x4032b300, 0x40340700, 0x40355b00, 0x4036af00, 0x40380300, 0x40395700, 0x403aab00, 0x403bff00,
+ 0x403d5300, 0x403ea700, 0x403ffb00, 0x40414f00, 0x4042a300, 0x4043f700, 0x40454b00, 0x40469f00, 0x4047f300, 0x40494700, 0x404a9b00, 0x404bef00, 0x404d4300, 0x404e9700, 0x404feb00, 0x40513f00,
+ 0x40529300, 0x4053e700, 0x40553b00, 0x40568f00, 0x4057e300, 0x40593700, 0x405a8b00, 0x405bdf00, 0x405d3300, 0x405e8700, 0x405fdb00, 0x40612f00, 0x40628300, 0x4063d700, 0x40652b00, 0x40667f00,
+ 0x4067d300, 0x40692700, 0x406a7b00, 0x406bcf00, 0x406d2300, 0x406e7700, 0x406fcb00, 0x40711f00, 0x40727300, 0x4073c700, 0x40751b00, 0x40766f00, 0x4077c300, 0x40791700, 0x407a6b00, 0x407bbf00,
+ 0x407d1300, 0x407e6700, 0x407fbb00, 0x40810f00, 0x40826300, 0x4083b700, 0x40850b00, 0x40865f00, 0x4087b300, 0x40890700, 0x408a5b00, 0x408baf00, 0x408d0300, 0x408e5700, 0x408fab00, 0x4090ff00,
+ 0x40925300, 0x4093a700, 0x4094fb00, 0x40964f00, 0x4097a300, 0x4098f700, 0x409a4b00, 0x409b9f00, 0x409cf300, 0x409e4700, 0x409f9b00, 0x40a0ef00, 0x40a24300, 0x40a39700, 0x40a4eb00, 0x40a63f00,
+ 0x40a79300, 0x40a8e700, 0x40aa3b00, 0x40ab8f00, 0x40ace300, 0x40ae3700, 0x40af8b00, 0x40b0df00, 0x40b23300, 0x40b38700, 0x40b4db00, 0x40b62f00, 0x40b78300, 0x40b8d700, 0x40ba2b00, 0x40bb7f00,
+ 0x40bcd300, 0x40be2700, 0x40bf7b00, 0x40c0cf00, 0x40c22300, 0x40c37700, 0x40c4cb00, 0x40c61f00, 0x40c77300, 0x40c8c700, 0x40ca1b00, 0x40cb6f00, 0x40ccc300, 0x40ce1700, 0x40cf6b00, 0x40d0bf00,
+ 0x40d21300, 0x40d36700, 0x40d4bb00, 0x40d60f00, 0x40d76300, 0x40d8b700, 0x40da0b00, 0x40db5f00, 0x40dcb300, 0x40de0700, 0x40df5b00, 0x40e0af00, 0x40e20300, 0x40e35700, 0x40e4ab00, 0x40e5ff00,
+ 0x40e75300, 0x40e8a700, 0x40e9fb00, 0x40eb4f00, 0x40eca300, 0x40edf700, 0x40ef4b00, 0x40f09f00, 0x40f1f300, 0x40f34700, 0x40f49b00, 0x40f5ef00, 0x40f74300, 0x40f89700, 0x40f9eb00, 0x40fb3f00,
+ 0x40fc9300, 0x40fde700, 0x40ff3b00, 0x41008f00, 0x4101e300, 0x41033700, 0x41048b00, 0x4105df00, 0x41073300, 0x41088700, 0x4109db00, 0x410b2f00, 0x410c8300, 0x410dd700, 0x410f2b00, 0x41107f00,
+ 0x4111d300, 0x41132700, 0x41147b00, 0x4115cf00, 0x41172300, 0x41187700, 0x4119cb00, 0x411b1f00, 0x411c7300, 0x411dc700, 0x411f1b00, 0x41206f00, 0x4121c300, 0x41231700, 0x41246b00, 0x4125bf00,
+ 0x41271300, 0x41286700, 0x4129bb00, 0x412b0f00, 0x412c6300, 0x412db700, 0x412f0b00, 0x41305f00, 0x4131b300, 0x41330700, 0x41345b00, 0x4135af00, 0x41370300, 0x41385700, 0x4139ab00, 0x413aff00,
+ 0x413c5300, 0x413da700, 0x413efb00, 0x41404f00, 0x4141a300, 0x4142f700, 0x41444b00, 0x41459f00, 0x4146f300, 0x41484700, 0x41499b00, 0x414aef00, 0x414c4300, 0x414d9700, 0x414eeb00, 0x41503f00,
+ 0x41519300, 0x4152e700, 0x41543b00, 0x41558f00, 0x4156e300, 0x41583700, 0x41598b00, 0x415adf00, 0x415c3300, 0x415d8700, 0x415edb00, 0x41602f00, 0x41618300, 0x4162d700, 0x41642b00, 0x41657f00,
+ 0x4166d300, 0x41682700, 0x41697b00, 0x416acf00, 0x416c2300, 0x416d7700, 0x416ecb00, 0x41701f00, 0x41717300, 0x4172c700, 0x41741b00, 0x41756f00, 0x4176c300, 0x41781700, 0x41796b00, 0x417abf00,
+ 0x417c1300, 0x417d6700, 0x417ebb00, 0x41800f00, 0x41816300, 0x4182b700, 0x41840b00, 0x41855f00, 0x4186b300, 0x41880700, 0x41895b00, 0x418aaf00, 0x418c0300, 0x418d5700, 0x418eab00, 0x418fff00,
+ 0x41915300, 0x4192a700, 0x4193fb00, 0x41954f00, 0x4196a300, 0x4197f700, 0x41994b00, 0x419a9f00, 0x419bf300, 0x419d4700, 0x419e9b00, 0x419fef00, 0x41a14300, 0x41a29700, 0x41a3eb00, 0x41a53f00,
+ 0x41a69300, 0x41a7e700, 0x41a93b00, 0x41aa8f00, 0x41abe300, 0x41ad3700, 0x41ae8b00, 0x41afdf00, 0x41b13300, 0x41b28700, 0x41b3db00, 0x41b52f00, 0x41b68300, 0x41b7d700, 0x41b92b00, 0x41ba7f00,
+ 0x41bbd300, 0x41bd2700, 0x41be7b00, 0x41bfcf00, 0x41c12300, 0x41c27700, 0x41c3cb00, 0x41c51f00, 0x41c67300, 0x41c7c700, 0x41c91b00, 0x41ca6f00, 0x41cbc300, 0x41cd1700, 0x41ce6b00, 0x41cfbf00,
+ 0x41d11300, 0x41d26700, 0x41d3bb00, 0x41d50f00, 0x41d66300, 0x41d7b700, 0x41d90b00, 0x41da5f00, 0x41dbb300, 0x41dd0700, 0x41de5b00, 0x41dfaf00, 0x41e10300, 0x41e25700, 0x41e3ab00, 0x41e4ff00,
+ 0x41e65300, 0x41e7a700, 0x41e8fb00, 0x41ea4f00, 0x41eba300, 0x41ecf700, 0x41ee4b00, 0x41ef9f00, 0x41f0f300, 0x41f24700, 0x41f39b00, 0x41f4ef00, 0x41f64300, 0x41f79700, 0x41f8eb00, 0x41fa3f00,
+ 0x41fb9300, 0x41fce700, 0x41fe3b00, 0x41ff8f00, 0x4200e300, 0x42023700, 0x42038b00, 0x4204df00, 0x42063300, 0x42078700, 0x4208db00, 0x420a2f00, 0x420b8300, 0x420cd700, 0x420e2b00, 0x420f7f00,
+ 0x4210d300, 0x42122700, 0x42137b00, 0x4214cf00, 0x42162300, 0x42177700, 0x4218cb00, 0x421a1f00, 0x421b7300, 0x421cc700, 0x421e1b00, 0x421f6f00, 0x4220c300, 0x42221700, 0x42236b00, 0x4224bf00,
+ 0x42261300, 0x42276700, 0x4228bb00, 0x422a0f00, 0x422b6300, 0x422cb700, 0x422e0b00, 0x422f5f00, 0x4230b300, 0x42320700, 0x42335b00, 0x4234af00, 0x42360300, 0x42375700, 0x4238ab00, 0x4239ff00,
+ 0x423b9d00, 0x423d3b00, 0x423ed900, 0x42407700, 0x42421500, 0x4243b300, 0x42455100, 0x4246ef00, 0x42488d00, 0x424a2b00, 0x424bc900, 0x424d6700, 0x424f0500, 0x4250a300, 0x42524100, 0x4253df00,
+ 0x42557d00, 0x42571b00, 0x4258b900, 0x425a5700, 0x425bf500, 0x425d9300, 0x425f3100, 0x4260cf00, 0x42626d00, 0x42640b00, 0x4265a900, 0x42674700, 0x4268e500, 0x426a8300, 0x426c2100, 0x426dbf00,
+ 0x426f5d00, 0x4270fb00, 0x42729900, 0x42743700, 0x4275d500, 0x42777300, 0x42791100, 0x427aaf00, 0x427c4d00, 0x427deb00, 0x427f8900, 0x42812700, 0x4282c500, 0x42846300, 0x42860100, 0x42879f00,
+ 0x42893d00, 0x428adb00, 0x428c7900, 0x428e1700, 0x428fb500, 0x42915300, 0x4292f100, 0x42948f00, 0x42962d00, 0x4297cb00, 0x42996900, 0x429b0700, 0x429ca500, 0x429e4300, 0x429fe100, 0x42a17f00,
+ 0x42a31d00, 0x42a4bb00, 0x42a65900, 0x42a7f700, 0x42a99500, 0x42ab3300, 0x42acd100, 0x42ae6f00, 0x42b00d00, 0x42b1ab00, 0x42b34900, 0x42b4e700, 0x42b68500, 0x42b82300, 0x42b9c100, 0x42bb5f00,
+ 0x42bcfd00, 0x42be9b00, 0x42c03900, 0x42c1d700, 0x42c37500, 0x42c51300, 0x42c6b100, 0x42c84f00, 0x42c9ed00, 0x42cb8b00, 0x42cd2900, 0x42cec700, 0x42d06500, 0x42d20300, 0x42d3a100, 0x42d53f00,
+ 0x42d6dd00, 0x42d87b00, 0x42da1900, 0x42dbb700, 0x42dd5500, 0x42def300, 0x42e09100, 0x42e22f00, 0x42e3cd00, 0x42e56b00, 0x42e70900, 0x42e8a700, 0x42ea4500, 0x42ebe300, 0x42ed8100, 0x42ef1f00,
+ 0x42f0bd00, 0x42f25b00, 0x42f3f900, 0x42f59700, 0x42f73500, 0x42f8d300, 0x42fa7100, 0x42fc0f00, 0x42fdad00, 0x42ff4b00, 0x4300e900, 0x43028700, 0x43042500, 0x4305c300, 0x43076100, 0x4308ff00,
+ 0x430a9d00, 0x430c3b00, 0x430dd900, 0x430f7700, 0x43111500, 0x4312b300, 0x43145100, 0x4315ef00, 0x43178d00, 0x43192b00, 0x431ac900, 0x431c6700, 0x431e0500, 0x431fa300, 0x43214100, 0x4322df00,
+ 0x43247d00, 0x43261b00, 0x4327b900, 0x43295700, 0x432af500, 0x432c9300, 0x432e3100, 0x432fcf00, 0x43316d00, 0x43330b00, 0x4334a900, 0x43364700, 0x4337e500, 0x43398300, 0x433b2100, 0x433cbf00,
+ 0x433e5d00, 0x433ffb00, 0x43419900, 0x43433700, 0x4344d500, 0x43467300, 0x43481100, 0x4349af00, 0x434b4d00, 0x434ceb00, 0x434e8900, 0x43502700, 0x4351c500, 0x43536300, 0x43550100, 0x43569f00,
+ 0x43583d00, 0x4359db00, 0x435b7900, 0x435d1700, 0x435eb500, 0x43605300, 0x4361f100, 0x43638f00, 0x43652d00, 0x4366cb00, 0x43686900, 0x436a0700, 0x436ba500, 0x436d4300, 0x436ee100, 0x43707f00,
+ 0x43721d00, 0x4373bb00, 0x43755900, 0x4376f700, 0x43789500, 0x437a3300, 0x437bd100, 0x437d6f00, 0x437f0d00, 0x4380ab00, 0x43824900, 0x4383e700, 0x43858500, 0x43872300, 0x4388c100, 0x438a5f00,
+ 0x438bfd00, 0x438d9b00, 0x438f3900, 0x4390d700, 0x43927500, 0x43941300, 0x4395b100, 0x43974f00, 0x4398ed00, 0x439a8b00, 0x439c2900, 0x439dc700, 0x439f6500, 0x43a10300, 0x43a2a100, 0x43a43f00,
+ 0x43a5dd00, 0x43a77b00, 0x43a91900, 0x43aab700, 0x43ac5500, 0x43adf300, 0x43af9100, 0x43b12f00, 0x43b2cd00, 0x43b46b00, 0x43b60900, 0x43b7a700, 0x43b94500, 0x43bae300, 0x43bc8100, 0x43be1f00,
+ 0x43bfbd00, 0x43c15b00, 0x43c2f900, 0x43c49700, 0x43c63500, 0x43c7d300, 0x43c97100, 0x43cb0f00, 0x43ccad00, 0x43ce4b00, 0x43cfe900, 0x43d18700, 0x43d32500, 0x43d4c300, 0x43d66100, 0x43d7ff00,
+ 0x43d99d00, 0x43db3b00, 0x43dcd900, 0x43de7700, 0x43e01500, 0x43e1b300, 0x43e35100, 0x43e4ef00, 0x43e68d00, 0x43e82b00, 0x43e9c900, 0x43eb6700, 0x43ed0500, 0x43eea300, 0x43f04100, 0x43f1df00,
+ 0x43f37d00, 0x43f51b00, 0x43f6b900, 0x43f85700, 0x43f9f500, 0x43fb9300, 0x43fd3100, 0x43fecf00, 0x44006d00, 0x44020b00, 0x4403a900, 0x44054700, 0x4406e500, 0x44088300, 0x440a2100, 0x440bbf00,
+ 0x440d5d00, 0x440efb00, 0x44109900, 0x44123700, 0x4413d500, 0x44157300, 0x44171100, 0x4418af00, 0x441a4d00, 0x441beb00, 0x441d8900, 0x441f2700, 0x4420c500, 0x44226300, 0x44240100, 0x44259f00,
+ 0x44273d00, 0x4428db00, 0x442a7900, 0x442c1700, 0x442db500, 0x442f5300, 0x4430f100, 0x44328f00, 0x44342d00, 0x4435cb00, 0x44376900, 0x44390700, 0x443aa500, 0x443c4300, 0x443de100, 0x443f7f00,
+ 0x44411d00, 0x4442bb00, 0x44445900, 0x4445f700, 0x44479500, 0x44493300, 0x444ad100, 0x444c6f00, 0x444e0d00, 0x444fab00, 0x44514900, 0x4452e700, 0x44548500, 0x44562300, 0x4457c100, 0x44595f00,
+ 0x445afd00, 0x445c9b00, 0x445e3900, 0x445fd700, 0x44617500, 0x44631300, 0x4464b100, 0x44664f00, 0x4467ed00, 0x44698b00, 0x446b2900, 0x446cc700, 0x446e6500, 0x44700300, 0x4471a100, 0x44733f00,
+ 0x4474dd00, 0x44767b00, 0x44781900, 0x4479b700, 0x447b5500, 0x447cf300, 0x447e9100, 0x44802f00, 0x4481cd00, 0x44836b00, 0x44850900, 0x4486a700, 0x44884500, 0x4489e300, 0x448b8100, 0x448d1f00,
+ 0x448ebd00, 0x44905b00, 0x4491f900, 0x44939700, 0x44953500, 0x4496d300, 0x44987100, 0x449a0f00, 0x449bad00, 0x449d4b00, 0x449ee900, 0x44a08700, 0x44a22500, 0x44a3c300, 0x44a56100, 0x44a6ff00,
+ 0x44a89d00, 0x44aa3b00, 0x44abd900, 0x44ad7700, 0x44af1500, 0x44b0b300, 0x44b25100, 0x44b3ef00, 0x44b58d00, 0x44b72b00, 0x44b8c900, 0x44ba6700, 0x44bc0500, 0x44bda300, 0x44bf4100, 0x44c0df00,
+ 0x44c27d00, 0x44c41b00, 0x44c5b900, 0x44c75700, 0x44c8f500, 0x44ca9300, 0x44cc3100, 0x44cdcf00, 0x44cf6d00, 0x44d10b00, 0x44d2a900, 0x44d44700, 0x44d5e500, 0x44d78300, 0x44d92100, 0x44dabf00,
+ 0x44dc5d00, 0x44ddfb00, 0x44df9900, 0x44e13700, 0x44e2d500, 0x44e47300, 0x44e61100, 0x44e7af00, 0x44e94d00, 0x44eaeb00, 0x44ec8900, 0x44ee2700, 0x44efc500, 0x44f16300, 0x44f30100, 0x44f49f00,
+ 0x44f63d00, 0x44f7db00, 0x44f97900, 0x44fb1700, 0x44fcb500, 0x44fe5300, 0x44fff100, 0x45018f00, 0x45032d00, 0x4504cb00, 0x45066900, 0x45080700, 0x4509a500, 0x450b4300, 0x450ce100, 0x450e7f00,
+ 0x45101d00, 0x4511bb00, 0x45135900, 0x4514f700, 0x45169500, 0x45183300, 0x4519d100, 0x451b6f00, 0x451d0d00, 0x451eab00, 0x45204900, 0x4521e700, 0x45238500, 0x45252300, 0x4526c100, 0x45285f00,
+ 0x4529fd00, 0x452b9b00, 0x452d3900, 0x452ed700, 0x45307500, 0x45321300, 0x4533b100, 0x45354f00, 0x4536ed00, 0x45388b00, 0x453a2900, 0x453bc700, 0x453d6500, 0x453f0300, 0x4540a100, 0x45423f00,
+ 0x4543dd00, 0x45457b00, 0x45471900, 0x4548b700, 0x454a5500, 0x454bf300, 0x454d9100, 0x454f2f00, 0x4550cd00, 0x45526b00, 0x45540900, 0x4555a700, 0x45574500, 0x4558e300, 0x455a8100, 0x455c1f00,
+ 0x455dbd00, 0x455f5b00, 0x4560f900, 0x45629700, 0x45643500, 0x4565d300, 0x45677100, 0x45690f00, 0x456aad00, 0x456c4b00, 0x456de900, 0x456f8700, 0x45712500, 0x4572c300, 0x45746100, 0x4575ff00,
+ 0x4577e300, 0x4579c700, 0x457bab00, 0x457d8f00, 0x457f7300, 0x45815700, 0x45833b00, 0x45851f00, 0x45870300, 0x4588e700, 0x458acb00, 0x458caf00, 0x458e9300, 0x45907700, 0x45925b00, 0x45943f00,
+ 0x45962300, 0x45980700, 0x4599eb00, 0x459bcf00, 0x459db300, 0x459f9700, 0x45a17b00, 0x45a35f00, 0x45a54300, 0x45a72700, 0x45a90b00, 0x45aaef00, 0x45acd300, 0x45aeb700, 0x45b09b00, 0x45b27f00,
+ 0x45b46300, 0x45b64700, 0x45b82b00, 0x45ba0f00, 0x45bbf300, 0x45bdd700, 0x45bfbb00, 0x45c19f00, 0x45c38300, 0x45c56700, 0x45c74b00, 0x45c92f00, 0x45cb1300, 0x45ccf700, 0x45cedb00, 0x45d0bf00,
+ 0x45d2a300, 0x45d48700, 0x45d66b00, 0x45d84f00, 0x45da3300, 0x45dc1700, 0x45ddfb00, 0x45dfdf00, 0x45e1c300, 0x45e3a700, 0x45e58b00, 0x45e76f00, 0x45e95300, 0x45eb3700, 0x45ed1b00, 0x45eeff00,
+ 0x45f0e300, 0x45f2c700, 0x45f4ab00, 0x45f68f00, 0x45f87300, 0x45fa5700, 0x45fc3b00, 0x45fe1f00, 0x46000300, 0x4601e700, 0x4603cb00, 0x4605af00, 0x46079300, 0x46097700, 0x460b5b00, 0x460d3f00,
+ 0x460f2300, 0x46110700, 0x4612eb00, 0x4614cf00, 0x4616b300, 0x46189700, 0x461a7b00, 0x461c5f00, 0x461e4300, 0x46202700, 0x46220b00, 0x4623ef00, 0x4625d300, 0x4627b700, 0x46299b00, 0x462b7f00,
+ 0x462d6300, 0x462f4700, 0x46312b00, 0x46330f00, 0x4634f300, 0x4636d700, 0x4638bb00, 0x463a9f00, 0x463c8300, 0x463e6700, 0x46404b00, 0x46422f00, 0x46441300, 0x4645f700, 0x4647db00, 0x4649bf00,
+ 0x464ba300, 0x464d8700, 0x464f6b00, 0x46514f00, 0x46533300, 0x46551700, 0x4656fb00, 0x4658df00, 0x465ac300, 0x465ca700, 0x465e8b00, 0x46606f00, 0x46625300, 0x46643700, 0x46661b00, 0x4667ff00,
+ 0x4669e300, 0x466bc700, 0x466dab00, 0x466f8f00, 0x46717300, 0x46735700, 0x46753b00, 0x46771f00, 0x46790300, 0x467ae700, 0x467ccb00, 0x467eaf00, 0x46809300, 0x46827700, 0x46845b00, 0x46863f00,
+ 0x46882300, 0x468a0700, 0x468beb00, 0x468dcf00, 0x468fb300, 0x46919700, 0x46937b00, 0x46955f00, 0x46974300, 0x46992700, 0x469b0b00, 0x469cef00, 0x469ed300, 0x46a0b700, 0x46a29b00, 0x46a47f00,
+ 0x46a66300, 0x46a84700, 0x46aa2b00, 0x46ac0f00, 0x46adf300, 0x46afd700, 0x46b1bb00, 0x46b39f00, 0x46b58300, 0x46b76700, 0x46b94b00, 0x46bb2f00, 0x46bd1300, 0x46bef700, 0x46c0db00, 0x46c2bf00,
+ 0x46c4a300, 0x46c68700, 0x46c86b00, 0x46ca4f00, 0x46cc3300, 0x46ce1700, 0x46cffb00, 0x46d1df00, 0x46d3c300, 0x46d5a700, 0x46d78b00, 0x46d96f00, 0x46db5300, 0x46dd3700, 0x46df1b00, 0x46e0ff00,
+ 0x46e2e300, 0x46e4c700, 0x46e6ab00, 0x46e88f00, 0x46ea7300, 0x46ec5700, 0x46ee3b00, 0x46f01f00, 0x46f20300, 0x46f3e700, 0x46f5cb00, 0x46f7af00, 0x46f99300, 0x46fb7700, 0x46fd5b00, 0x46ff3f00,
+ 0x47012300, 0x47030700, 0x4704eb00, 0x4706cf00, 0x4708b300, 0x470a9700, 0x470c7b00, 0x470e5f00, 0x47104300, 0x47122700, 0x47140b00, 0x4715ef00, 0x4717d300, 0x4719b700, 0x471b9b00, 0x471d7f00,
+ 0x471f6300, 0x47214700, 0x47232b00, 0x47250f00, 0x4726f300, 0x4728d700, 0x472abb00, 0x472c9f00, 0x472e8300, 0x47306700, 0x47324b00, 0x47342f00, 0x47361300, 0x4737f700, 0x4739db00, 0x473bbf00,
+ 0x473da300, 0x473f8700, 0x47416b00, 0x47434f00, 0x47453300, 0x47471700, 0x4748fb00, 0x474adf00, 0x474cc300, 0x474ea700, 0x47508b00, 0x47526f00, 0x47545300, 0x47563700, 0x47581b00, 0x4759ff00,
+ 0x475c3f00, 0x475e7f00, 0x4760bf00, 0x4762ff00, 0x47653f00, 0x47677f00, 0x4769bf00, 0x476bff00, 0x476e3f00, 0x47707f00, 0x4772bf00, 0x4774ff00, 0x47773f00, 0x47797f00, 0x477bbf00, 0x477dff00,
+ 0x47803f00, 0x47827f00, 0x4784bf00, 0x4786ff00, 0x47893f00, 0x478b7f00, 0x478dbf00, 0x478fff00, 0x47923f00, 0x47947f00, 0x4796bf00, 0x4798ff00, 0x479b3f00, 0x479d7f00, 0x479fbf00, 0x47a1ff00,
+ 0x47a43f00, 0x47a67f00, 0x47a8bf00, 0x47aaff00, 0x47ad3f00, 0x47af7f00, 0x47b1bf00, 0x47b3ff00, 0x47b63f00, 0x47b87f00, 0x47babf00, 0x47bcff00, 0x47bf3f00, 0x47c17f00, 0x47c3bf00, 0x47c5ff00,
+ 0x47c83f00, 0x47ca7f00, 0x47ccbf00, 0x47ceff00, 0x47d13f00, 0x47d37f00, 0x47d5bf00, 0x47d7ff00, 0x47da3f00, 0x47dc7f00, 0x47debf00, 0x47e0ff00, 0x47e33f00, 0x47e57f00, 0x47e7bf00, 0x47e9ff00,
+ 0x47ec3f00, 0x47ee7f00, 0x47f0bf00, 0x47f2ff00, 0x47f53f00, 0x47f77f00, 0x47f9bf00, 0x47fbff00, 0x47fe3f00, 0x48007f00, 0x4802bf00, 0x4804ff00, 0x48073f00, 0x48097f00, 0x480bbf00, 0x480dff00,
+ 0x48103f00, 0x48127f00, 0x4814bf00, 0x4816ff00, 0x48193f00, 0x481b7f00, 0x481dbf00, 0x481fff00, 0x48223f00, 0x48247f00, 0x4826bf00, 0x4828ff00, 0x482b3f00, 0x482d7f00, 0x482fbf00, 0x4831ff00,
+ 0x48343f00, 0x48367f00, 0x4838bf00, 0x483aff00, 0x483d3f00, 0x483f7f00, 0x4841bf00, 0x4843ff00, 0x48463f00, 0x48487f00, 0x484abf00, 0x484cff00, 0x484f3f00, 0x48517f00, 0x4853bf00, 0x4855ff00,
+ 0x48583f00, 0x485a7f00, 0x485cbf00, 0x485eff00, 0x48613f00, 0x48637f00, 0x4865bf00, 0x4867ff00, 0x486a3f00, 0x486c7f00, 0x486ebf00, 0x4870ff00, 0x48733f00, 0x48757f00, 0x4877bf00, 0x4879ff00,
+ 0x487c3f00, 0x487e7f00, 0x4880bf00, 0x4882ff00, 0x48853f00, 0x48877f00, 0x4889bf00, 0x488bff00, 0x488e3f00, 0x48907f00, 0x4892bf00, 0x4894ff00, 0x48973f00, 0x48997f00, 0x489bbf00, 0x489dff00,
+ 0x48a03f00, 0x48a27f00, 0x48a4bf00, 0x48a6ff00, 0x48a93f00, 0x48ab7f00, 0x48adbf00, 0x48afff00, 0x48b23f00, 0x48b47f00, 0x48b6bf00, 0x48b8ff00, 0x48bb3f00, 0x48bd7f00, 0x48bfbf00, 0x48c1ff00,
+ 0x48c43f00, 0x48c67f00, 0x48c8bf00, 0x48caff00, 0x48cd3f00, 0x48cf7f00, 0x48d1bf00, 0x48d3ff00, 0x48d63f00, 0x48d87f00, 0x48dabf00, 0x48dcff00, 0x48df3f00, 0x48e17f00, 0x48e3bf00, 0x48e5ff00,
+ 0x48e83f00, 0x48ea7f00, 0x48ecbf00, 0x48eeff00, 0x48f13f00, 0x48f37f00, 0x48f5bf00, 0x48f7ff00, 0x48fa3f00, 0x48fc7f00, 0x48febf00, 0x4900ff00, 0x49033f00, 0x49057f00, 0x4907bf00, 0x4909ff00,
+ 0x490c3f00, 0x490e7f00, 0x4910bf00, 0x4912ff00, 0x49153f00, 0x49177f00, 0x4919bf00, 0x491bff00, 0x491e3f00, 0x49207f00, 0x4922bf00, 0x4924ff00, 0x49273f00, 0x49297f00, 0x492bbf00, 0x492dff00,
+ 0x49303f00, 0x49327f00, 0x4934bf00, 0x4936ff00, 0x49393f00, 0x493b7f00, 0x493dbf00, 0x493fff00, 0x49423f00, 0x49447f00, 0x4946bf00, 0x4948ff00, 0x494b3f00, 0x494d7f00, 0x494fbf00, 0x4951ff00,
+ 0x49543f00, 0x49567f00, 0x4958bf00, 0x495aff00, 0x495d3f00, 0x495f7f00, 0x4961bf00, 0x4963ff00, 0x49663f00, 0x49687f00, 0x496abf00, 0x496cff00, 0x496f3f00, 0x49717f00, 0x4973bf00, 0x4975ff00,
+ 0x49783f00, 0x497a7f00, 0x497cbf00, 0x497eff00, 0x49813f00, 0x49837f00, 0x4985bf00, 0x4987ff00, 0x498a3f00, 0x498c7f00, 0x498ebf00, 0x4990ff00, 0x49933f00, 0x49957f00, 0x4997bf00, 0x4999ff00,
+ 0x499cb300, 0x499f6700, 0x49a21b00, 0x49a4cf00, 0x49a78300, 0x49aa3700, 0x49aceb00, 0x49af9f00, 0x49b25300, 0x49b50700, 0x49b7bb00, 0x49ba6f00, 0x49bd2300, 0x49bfd700, 0x49c28b00, 0x49c53f00,
+ 0x49c7f300, 0x49caa700, 0x49cd5b00, 0x49d00f00, 0x49d2c300, 0x49d57700, 0x49d82b00, 0x49dadf00, 0x49dd9300, 0x49e04700, 0x49e2fb00, 0x49e5af00, 0x49e86300, 0x49eb1700, 0x49edcb00, 0x49f07f00,
+ 0x49f33300, 0x49f5e700, 0x49f89b00, 0x49fb4f00, 0x49fe0300, 0x4a00b700, 0x4a036b00, 0x4a061f00, 0x4a08d300, 0x4a0b8700, 0x4a0e3b00, 0x4a10ef00, 0x4a13a300, 0x4a165700, 0x4a190b00, 0x4a1bbf00,
+ 0x4a1e7300, 0x4a212700, 0x4a23db00, 0x4a268f00, 0x4a294300, 0x4a2bf700, 0x4a2eab00, 0x4a315f00, 0x4a341300, 0x4a36c700, 0x4a397c00, 0x4a3c3000, 0x4a3ee400, 0x4a419800, 0x4a444c00, 0x4a470000,
+ 0x4a49b400, 0x4a4c6800, 0x4a4f1c00, 0x4a51d000, 0x4a548400, 0x4a573800, 0x4a59ec00, 0x4a5ca000, 0x4a5f5400, 0x4a620800, 0x4a64bc00, 0x4a677000, 0x4a6a2400, 0x4a6cd800, 0x4a6f8c00, 0x4a724000,
+ 0x4a74f400, 0x4a77a800, 0x4a7a5c00, 0x4a7d1000, 0x4a7fc400, 0x4a827800, 0x4a852c00, 0x4a87e000, 0x4a8a9400, 0x4a8d4800, 0x4a8ffc00, 0x4a92b000, 0x4a956400, 0x4a981800, 0x4a9acc00, 0x4a9d8000,
+ 0x4aa03400, 0x4aa2e800, 0x4aa59c00, 0x4aa85000, 0x4aab0400, 0x4aadb800, 0x4ab06c00, 0x4ab32000, 0x4ab5d400, 0x4ab88800, 0x4abb3c00, 0x4abdf000, 0x4ac0a400, 0x4ac35800, 0x4ac60c00, 0x4ac8c000,
+ 0x4acb7400, 0x4ace2800, 0x4ad0dc00, 0x4ad39000, 0x4ad64400, 0x4ad8f800, 0x4adbac00, 0x4ade6000, 0x4ae11400, 0x4ae3c800, 0x4ae67c00, 0x4ae93000, 0x4aebe400, 0x4aee9800, 0x4af14c00, 0x4af40000,
+ 0x4af6cc00, 0x4af99800, 0x4afc6400, 0x4aff3000, 0x4b01fc00, 0x4b04c800, 0x4b079400, 0x4b0a6000, 0x4b0d2c00, 0x4b0ff800, 0x4b12c400, 0x4b159000, 0x4b185c00, 0x4b1b2800, 0x4b1df400, 0x4b20c000,
+ 0x4b238c00, 0x4b265800, 0x4b292400, 0x4b2bf000, 0x4b2ebc00, 0x4b318800, 0x4b345400, 0x4b372000, 0x4b39ec00, 0x4b3cb800, 0x4b3f8400, 0x4b425000, 0x4b451c00, 0x4b47e800, 0x4b4ab400, 0x4b4d8000,
+ 0x4b504c00, 0x4b531800, 0x4b55e400, 0x4b58b000, 0x4b5b7c00, 0x4b5e4800, 0x4b611400, 0x4b63e000, 0x4b66ac00, 0x4b697800, 0x4b6c4400, 0x4b6f1000, 0x4b71dc00, 0x4b74a800, 0x4b777400, 0x4b7a4000,
+ 0x4b7d0c00, 0x4b7fd800, 0x4b82a400, 0x4b857000, 0x4b883c00, 0x4b8b0800, 0x4b8dd400, 0x4b90a000, 0x4b936c00, 0x4b963800, 0x4b990400, 0x4b9bd000, 0x4b9e9c00, 0x4ba16800, 0x4ba43400, 0x4ba70000,
+ 0x4ba9cc00, 0x4bac9800, 0x4baf6400, 0x4bb23000, 0x4bb4fc00, 0x4bb7c700, 0x4bba9300, 0x4bbd5f00, 0x4bc02b00, 0x4bc2f700, 0x4bc5c300, 0x4bc88f00, 0x4bcb5b00, 0x4bce2700, 0x4bd0f300, 0x4bd3bf00,
+ 0x4bd68b00, 0x4bd95700, 0x4bdc2300, 0x4bdeef00, 0x4be1bb00, 0x4be48700, 0x4be75300, 0x4bea1f00, 0x4beceb00, 0x4befb700, 0x4bf28300, 0x4bf54f00, 0x4bf81b00, 0x4bfae700, 0x4bfdb300, 0x4c007f00,
+ 0x4c034b00, 0x4c061700, 0x4c08e300, 0x4c0baf00, 0x4c0e7b00, 0x4c114700, 0x4c141300, 0x4c16df00, 0x4c19ab00, 0x4c1c7700, 0x4c1f4300, 0x4c220f00, 0x4c24db00, 0x4c27a700, 0x4c2a7300, 0x4c2d3f00,
+ 0x4c300b00, 0x4c32d700, 0x4c35a300, 0x4c386f00, 0x4c3b3b00, 0x4c3e0700, 0x4c40d300, 0x4c439f00, 0x4c466b00, 0x4c493700, 0x4c4c0300, 0x4c4ecf00, 0x4c519b00, 0x4c546700, 0x4c573300, 0x4c59ff00,
+ 0x4c5d7700, 0x4c60ef00, 0x4c646700, 0x4c67df00, 0x4c6b5700, 0x4c6ecf00, 0x4c724700, 0x4c75bf00, 0x4c793700, 0x4c7caf00, 0x4c802700, 0x4c839f00, 0x4c871700, 0x4c8a8f00, 0x4c8e0700, 0x4c917f00,
+ 0x4c94f700, 0x4c986f00, 0x4c9be700, 0x4c9f5f00, 0x4ca2d700, 0x4ca64f00, 0x4ca9c700, 0x4cad3f00, 0x4cb0b700, 0x4cb42f00, 0x4cb7a700, 0x4cbb1f00, 0x4cbe9700, 0x4cc20f00, 0x4cc58700, 0x4cc8ff00,
+ 0x4ccc7700, 0x4ccfef00, 0x4cd36700, 0x4cd6df00, 0x4cda5700, 0x4cddcf00, 0x4ce14700, 0x4ce4bf00, 0x4ce83700, 0x4cebaf00, 0x4cef2700, 0x4cf29f00, 0x4cf61700, 0x4cf98f00, 0x4cfd0700, 0x4d007f00,
+ 0x4d03f700, 0x4d076f00, 0x4d0ae700, 0x4d0e5f00, 0x4d11d700, 0x4d154f00, 0x4d18c700, 0x4d1c3f00, 0x4d1fb700, 0x4d232f00, 0x4d26a700, 0x4d2a1f00, 0x4d2d9700, 0x4d310f00, 0x4d348700, 0x4d37ff00,
+ 0x4d3b7700, 0x4d3eef00, 0x4d426700, 0x4d45df00, 0x4d495700, 0x4d4ccf00, 0x4d504700, 0x4d53bf00, 0x4d573700, 0x4d5aaf00, 0x4d5e2700, 0x4d619f00, 0x4d651700, 0x4d688f00, 0x4d6c0700, 0x4d6f7f00,
+ 0x4d72f700, 0x4d766f00, 0x4d79e700, 0x4d7d5f00, 0x4d80d700, 0x4d844f00, 0x4d87c700, 0x4d8b3f00, 0x4d8eb700, 0x4d922f00, 0x4d95a700, 0x4d991f00, 0x4d9c9700, 0x4da00f00, 0x4da38700, 0x4da6ff00,
+ 0x4daa7700, 0x4dadef00, 0x4db16700, 0x4db4df00, 0x4db85700, 0x4dbbcf00, 0x4dbf4700, 0x4dc2bf00, 0x4dc63700, 0x4dc9af00, 0x4dcd2700, 0x4dd09f00, 0x4dd41700, 0x4dd78f00, 0x4ddb0700, 0x4dde7f00,
+ 0x4de1f700, 0x4de56f00, 0x4de8e700, 0x4dec5f00, 0x4defd700, 0x4df34f00, 0x4df6c700, 0x4dfa3f00, 0x4dfdb700, 0x4e012f00, 0x4e04a700, 0x4e081f00, 0x4e0b9700, 0x4e0f0f00, 0x4e128700, 0x4e15ff00,
+ 0x4e19ff00, 0x4e1dff00, 0x4e21ff00, 0x4e25ff00, 0x4e29ff00, 0x4e2dff00, 0x4e31ff00, 0x4e35ff00, 0x4e39ff00, 0x4e3dff00, 0x4e41ff00, 0x4e45ff00, 0x4e49ff00, 0x4e4dff00, 0x4e51ff00, 0x4e55ff00,
+ 0x4e59ff00, 0x4e5dff00, 0x4e61ff00, 0x4e65ff00, 0x4e69ff00, 0x4e6dff00, 0x4e71ff00, 0x4e75ff00, 0x4e79ff00, 0x4e7dff00, 0x4e81ff00, 0x4e85ff00, 0x4e89ff00, 0x4e8dff00, 0x4e91ff00, 0x4e95ff00,
+ 0x4e99ff00, 0x4e9dff00, 0x4ea1ff00, 0x4ea5ff00, 0x4ea9ff00, 0x4eadff00, 0x4eb1ff00, 0x4eb5ff00, 0x4eb9ff00, 0x4ebdff00, 0x4ec1ff00, 0x4ec5ff00, 0x4ec9ff00, 0x4ecdff00, 0x4ed1ff00, 0x4ed5ff00,
+ 0x4ed9ff00, 0x4eddff00, 0x4ee1ff00, 0x4ee5ff00, 0x4ee9ff00, 0x4eedff00, 0x4ef1ff00, 0x4ef5ff00, 0x4ef9ff00, 0x4efdff00, 0x4f01ff00, 0x4f05ff00, 0x4f09ff00, 0x4f0dff00, 0x4f11ff00, 0x4f15ff00,
+ 0x4f19ff00, 0x4f1dff00, 0x4f21ff00, 0x4f25ff00, 0x4f29ff00, 0x4f2dff00, 0x4f31ff00, 0x4f35ff00, 0x4f39ff00, 0x4f3dff00, 0x4f41ff00, 0x4f45ff00, 0x4f49ff00, 0x4f4dff00, 0x4f51ff00, 0x4f55ff00,
+ 0x4f59ff00, 0x4f5dff00, 0x4f61ff00, 0x4f65ff00, 0x4f69ff00, 0x4f6dff00, 0x4f71ff00, 0x4f75ff00, 0x4f79ff00, 0x4f7dff00, 0x4f81ff00, 0x4f85ff00, 0x4f89ff00, 0x4f8dff00, 0x4f91ff00, 0x4f95ff00,
+ 0x4f99ff00, 0x4f9dff00, 0x4fa1ff00, 0x4fa5ff00, 0x4fa9ff00, 0x4fadff00, 0x4fb1ff00, 0x4fb5ff00, 0x4fb9ff00, 0x4fbdff00, 0x4fc1ff00, 0x4fc5ff00, 0x4fc9ff00, 0x4fcdff00, 0x4fd1ff00, 0x4fd5ff00,
+ 0x4fd9ff00, 0x4fddff00, 0x4fe1ff00, 0x4fe5ff00, 0x4fe9ff00, 0x4fedff00, 0x4ff1ff00, 0x4ff5ff00, 0x4ff9ff00, 0x4ffdff00, 0x5001ff00, 0x5005ff00, 0x5009ff00, 0x500dff00, 0x5011ff00, 0x5015ff00,
+ 0x501a7700, 0x501eef00, 0x50236700, 0x5027df00, 0x502c5700, 0x5030cf00, 0x50354700, 0x5039bf00, 0x503e3700, 0x5042af00, 0x50472700, 0x504b9f00, 0x50501700, 0x50548f00, 0x50590700, 0x505d7f00,
+ 0x5061f700, 0x50666f00, 0x506ae700, 0x506f5f00, 0x5073d700, 0x50785000, 0x507cc800, 0x50814000, 0x5085b800, 0x508a3000, 0x508ea800, 0x50932000, 0x50979800, 0x509c1000, 0x50a08800, 0x50a50000,
+ 0x50a97800, 0x50adf000, 0x50b26800, 0x50b6e000, 0x50bb5800, 0x50bfd000, 0x50c44800, 0x50c8c000, 0x50cd3800, 0x50d1b000, 0x50d62800, 0x50daa000, 0x50df1800, 0x50e39000, 0x50e80800, 0x50ec8000,
+ 0x50f0f800, 0x50f57000, 0x50f9e800, 0x50fe6000, 0x5102d800, 0x51075000, 0x510bc800, 0x51104000, 0x5114b800, 0x51193000, 0x511da800, 0x51222000, 0x51269800, 0x512b1000, 0x512f8800, 0x51340000,
+ 0x51396800, 0x513ed000, 0x51443800, 0x5149a000, 0x514f0800, 0x51547000, 0x5159d800, 0x515f4000, 0x5164a800, 0x516a1000, 0x516f7800, 0x5174e000, 0x517a4800, 0x517fb000, 0x51851800, 0x518a8000,
+ 0x518fe800, 0x51955000, 0x519ab800, 0x51a02000, 0x51a58800, 0x51aaf000, 0x51b05800, 0x51b5c000, 0x51bb2800, 0x51c09000, 0x51c5f800, 0x51cb6000, 0x51d0c800, 0x51d63000, 0x51db9800, 0x51e10000,
+ 0x51e66800, 0x51ebd000, 0x51f13800, 0x51f6a000, 0x51fc0800, 0x52017000, 0x5206d800, 0x520c4000, 0x5211a800, 0x52171000, 0x521c7800, 0x5221e000, 0x52274800, 0x522cb000, 0x52321800, 0x52378000,
+ 0x523ce800, 0x52425000, 0x5247b800, 0x524d2000, 0x52528800, 0x5257f000, 0x525d5800, 0x5262c000, 0x52682800, 0x526d9000, 0x5272f800, 0x52786000, 0x527dc800, 0x52833000, 0x52889800, 0x528e0000,
+ 0x5293c000, 0x52998000, 0x529f4000, 0x52a50000, 0x52aac000, 0x52b08000, 0x52b64000, 0x52bc0000, 0x52c1bf00, 0x52c77f00, 0x52cd3f00, 0x52d2ff00, 0x52d8bf00, 0x52de7f00, 0x52e43f00, 0x52e9ff00,
+ 0x52efbf00, 0x52f57f00, 0x52fb3f00, 0x5300ff00, 0x5306bf00, 0x530c7f00, 0x53123f00, 0x5317ff00, 0x531dbf00, 0x53237f00, 0x53293f00, 0x532eff00, 0x5334bf00, 0x533a7f00, 0x53403f00, 0x5345ff00,
+ 0x534bbf00, 0x53517f00, 0x53573f00, 0x535cff00, 0x5362bf00, 0x53687f00, 0x536e3f00, 0x5373ff00, 0x5379bf00, 0x537f7f00, 0x53853f00, 0x538aff00, 0x5390bf00, 0x53967f00, 0x539c3f00, 0x53a1ff00,
+ 0x53a7bf00, 0x53ad7f00, 0x53b33f00, 0x53b8ff00, 0x53bebf00, 0x53c47f00, 0x53ca3f00, 0x53cfff00, 0x53d5bf00, 0x53db7f00, 0x53e13f00, 0x53e6ff00, 0x53ecbf00, 0x53f27f00, 0x53f83f00, 0x53fdff00,
+ 0x54055f00, 0x540cbf00, 0x54141f00, 0x541b7f00, 0x5422df00, 0x542a3f00, 0x54319f00, 0x5438ff00, 0x54405f00, 0x5447bf00, 0x544f1f00, 0x54567f00, 0x545ddf00, 0x54653f00, 0x546c9f00, 0x5473ff00,
+ 0x547b5f00, 0x5482bf00, 0x548a1f00, 0x54917f00, 0x5498df00, 0x54a03f00, 0x54a79f00, 0x54aeff00, 0x54b65f00, 0x54bdbf00, 0x54c51f00, 0x54cc7f00, 0x54d3df00, 0x54db3f00, 0x54e29f00, 0x54e9ff00,
+ 0x54f15f00, 0x54f8bf00, 0x55001f00, 0x55077f00, 0x550edf00, 0x55163f00, 0x551d9f00, 0x5524ff00, 0x552c5f00, 0x5533bf00, 0x553b1f00, 0x55427f00, 0x5549df00, 0x55513f00, 0x55589f00, 0x555fff00,
+ 0x55675f00, 0x556ebf00, 0x55761f00, 0x557d7f00, 0x5584df00, 0x558c3f00, 0x55939f00, 0x559aff00, 0x55a25f00, 0x55a9bf00, 0x55b11f00, 0x55b87f00, 0x55bfdf00, 0x55c73f00, 0x55ce9f00, 0x55d5ff00,
+ 0x55de9f00, 0x55e73f00, 0x55efdf00, 0x55f87f00, 0x56011f00, 0x5609bf00, 0x56125f00, 0x561aff00, 0x56239f00, 0x562c3f00, 0x5634df00, 0x563d7f00, 0x56461f00, 0x564ebf00, 0x56575f00, 0x565fff00,
+ 0x56689f00, 0x56713f00, 0x5679df00, 0x56827f00, 0x568b1f00, 0x5693bf00, 0x569c5f00, 0x56a4ff00, 0x56ad9f00, 0x56b63f00, 0x56bedf00, 0x56c77f00, 0x56d01f00, 0x56d8bf00, 0x56e15f00, 0x56e9ff00,
+ 0x56f29f00, 0x56fb3f00, 0x5703df00, 0x570c7f00, 0x57151f00, 0x571dbf00, 0x57265f00, 0x572eff00, 0x57379f00, 0x57403f00, 0x5748df00, 0x57517f00, 0x575a1f00, 0x5762bf00, 0x576b5f00, 0x5773ff00,
+ 0x577c9f00, 0x57853f00, 0x578ddf00, 0x57967f00, 0x579f1f00, 0x57a7bf00, 0x57b05f00, 0x57b8ff00, 0x57c19f00, 0x57ca3f00, 0x57d2df00, 0x57db7f00, 0x57e41f00, 0x57ecbf00, 0x57f55f00, 0x57fdff00,
+ 0x58087f00, 0x5812ff00, 0x581d7f00, 0x5827ff00, 0x58327f00, 0x583cff00, 0x58477f00, 0x5851ff00, 0x585c7f00, 0x5866ff00, 0x58717f00, 0x587bff00, 0x58867f00, 0x5890ff00, 0x589b7f00, 0x58a5ff00,
+ 0x58b07f00, 0x58baff00, 0x58c57f00, 0x58cfff00, 0x58da7f00, 0x58e4ff00, 0x58ef7f00, 0x58f9ff00, 0x59047f00, 0x590eff00, 0x59197f00, 0x5923ff00, 0x592e7f00, 0x5938ff00, 0x59437f00, 0x594dff00,
+ 0x59587f00, 0x5962ff00, 0x596d7f00, 0x5977ff00, 0x59827f00, 0x598cff00, 0x59977f00, 0x59a1ff00, 0x59ac7f00, 0x59b6ff00, 0x59c17f00, 0x59cbff00, 0x59d67f00, 0x59e0ff00, 0x59eb7f00, 0x59f5ff00,
+ 0x5a007f00, 0x5a0aff00, 0x5a157f00, 0x5a1fff00, 0x5a2a7f00, 0x5a34ff00, 0x5a3f7f00, 0x5a49ff00, 0x5a547f00, 0x5a5eff00, 0x5a697f00, 0x5a73ff00, 0x5a7e7f00, 0x5a88ff00, 0x5a937f00, 0x5a9dff00,
+ 0x5aaa0700, 0x5ab60f00, 0x5ac21700, 0x5ace1f00, 0x5ada2700, 0x5ae62f00, 0x5af23700, 0x5afe3f00, 0x5b0a4700, 0x5b164f00, 0x5b225700, 0x5b2e5f00, 0x5b3a6700, 0x5b466f00, 0x5b527700, 0x5b5e7f00,
+ 0x5b6a8700, 0x5b768f00, 0x5b829700, 0x5b8e9f00, 0x5b9aa700, 0x5ba6af00, 0x5bb2b700, 0x5bbebf00, 0x5bcac700, 0x5bd6cf00, 0x5be2d700, 0x5beedf00, 0x5bfae700, 0x5c06ef00, 0x5c12f700, 0x5c1eff00,
+ 0x5c2b0700, 0x5c370f00, 0x5c431700, 0x5c4f1f00, 0x5c5b2700, 0x5c672f00, 0x5c733700, 0x5c7f3f00, 0x5c8b4700, 0x5c974f00, 0x5ca35700, 0x5caf5f00, 0x5cbb6700, 0x5cc76f00, 0x5cd37700, 0x5cdf7f00,
+ 0x5ceb8700, 0x5cf78f00, 0x5d039700, 0x5d0f9f00, 0x5d1ba700, 0x5d27af00, 0x5d33b700, 0x5d3fbf00, 0x5d4bc700, 0x5d57cf00, 0x5d63d700, 0x5d6fdf00, 0x5d7be700, 0x5d87ef00, 0x5d93f700, 0x5da00000,
+ 0x5dac8000, 0x5db90000, 0x5dc58000, 0x5dd20000, 0x5dde8000, 0x5deb0000, 0x5df78000, 0x5e040000, 0x5e108000, 0x5e1d0000, 0x5e298000, 0x5e360000, 0x5e428000, 0x5e4f0000, 0x5e5b8000, 0x5e680000,
+ 0x5e748000, 0x5e810000, 0x5e8d8000, 0x5e9a0000, 0x5ea68000, 0x5eb30000, 0x5ebf8000, 0x5ecc0000, 0x5ed88000, 0x5ee50000, 0x5ef18000, 0x5efe0000, 0x5f0a8000, 0x5f170000, 0x5f238000, 0x5f300000,
+ 0x5f3c8000, 0x5f490000, 0x5f558000, 0x5f620000, 0x5f6e8000, 0x5f7b0000, 0x5f878000, 0x5f940000, 0x5fa08000, 0x5fad0000, 0x5fb98000, 0x5fc60000, 0x5fd28000, 0x5fdf0000, 0x5feb8000, 0x5ff80000,
+ 0x60048000, 0x60110000, 0x601d8000, 0x602a0000, 0x60368000, 0x60430000, 0x604f8000, 0x605c0000, 0x60688000, 0x60750000, 0x60818000, 0x608e0000, 0x609a8000, 0x60a70000, 0x60b38000, 0x60c00000,
+ 0x60cc8000, 0x60d90000, 0x60e58000, 0x60f20000, 0x60fe8000, 0x610b0000, 0x61178000, 0x61240000, 0x61308000, 0x613d0000, 0x61498000, 0x61560000, 0x61628000, 0x616f0000, 0x617b8000, 0x61880000,
+ 0x61948000, 0x61a10000, 0x61ad8000, 0x61ba0000, 0x61c68000, 0x61d30000, 0x61df8000, 0x61ec0000, 0x61f88000, 0x62050000, 0x62118000, 0x621e0000, 0x622a8000, 0x62370000, 0x62438000, 0x62500000,
+ 0x625c8000, 0x62690000, 0x62758000, 0x62820000, 0x628e8000, 0x629b0000, 0x62a78000, 0x62b40000, 0x62c08000, 0x62cd0000, 0x62d98000, 0x62e60000, 0x62f28000, 0x62ff0000, 0x630b8000, 0x63180000,
+ 0x63248000, 0x63310000, 0x633d8000, 0x634a0000, 0x63568000, 0x63630000, 0x636f8000, 0x637c0000, 0x63888000, 0x63950000, 0x63a18000, 0x63ae0000, 0x63ba8000, 0x63c70000, 0x63d38000, 0x63e00000,
+ 0x63ec8000, 0x63f90000, 0x64058000, 0x64120000, 0x641e8000, 0x642b0000, 0x64378000, 0x64440000, 0x64508000, 0x645d0000, 0x64698000, 0x64760000, 0x64828000, 0x648f0000, 0x649b8000, 0x64a80000,
+ 0x64b48000, 0x64c10000, 0x64cd8000, 0x64da0000, 0x64e68000, 0x64f30000, 0x64ff8000, 0x650c0000, 0x65188000, 0x65250000, 0x65318000, 0x653e0000, 0x654a8000, 0x65570000, 0x65638000, 0x65700000,
+ 0x657c8000, 0x65890000, 0x65958000, 0x65a20000, 0x65ae8000, 0x65bb0000, 0x65c78000, 0x65d40000, 0x65e08000, 0x65ed0000, 0x65f98000, 0x66060000, 0x66128000, 0x661f0000, 0x662b8000, 0x66380000,
+ 0x66448000, 0x66510000, 0x665d8000, 0x666a0000, 0x66768000, 0x66830000, 0x668f8000, 0x669c0000, 0x66a88000, 0x66b50000, 0x66c18000, 0x66ce0000, 0x66da8000, 0x66e70000, 0x66f38000, 0x67000000,
+ 0x670c8000, 0x67190000, 0x67258000, 0x67320000, 0x673e8000, 0x674b0000, 0x67578000, 0x67640000, 0x67708000, 0x677d0000, 0x67898000, 0x67960000, 0x67a28000, 0x67af0000, 0x67bb8000, 0x67c80000,
+ 0x67d48000, 0x67e10000, 0x67ed8000, 0x67fa0000, 0x68068000, 0x68130000, 0x681f8000, 0x682c0000, 0x68388000, 0x68450000, 0x68518000, 0x685e0000, 0x686a8000, 0x68770000, 0x68838000, 0x68900000,
+ 0x689c8000, 0x68a90000, 0x68b58000, 0x68c20000, 0x68ce8000, 0x68db0000, 0x68e78000, 0x68f40000, 0x69008000, 0x690d0000, 0x69198000, 0x69260000, 0x69328000, 0x693f0000, 0x694b8000, 0x69580000,
+ 0x69648000, 0x69710000, 0x697d8000, 0x698a0000, 0x69968000, 0x69a30000, 0x69af8000, 0x69bc0000, 0x69c88000, 0x69d50000, 0x69e18000, 0x69ee0000, 0x69fa8000, 0x6a070000, 0x6a138000, 0x6a200000,
+ 0x6a2c8000, 0x6a390000, 0x6a458000, 0x6a520000, 0x6a5e8000, 0x6a6b0000, 0x6a778000, 0x6a840000, 0x6a908000, 0x6a9d0000, 0x6aa98000, 0x6ab60000, 0x6ac28000, 0x6acf0000, 0x6adb8000, 0x6ae80000,
+ 0x6af48000, 0x6b010000, 0x6b0d8000, 0x6b1a0000, 0x6b268000, 0x6b330000, 0x6b3f8000, 0x6b4c0000, 0x6b588000, 0x6b650000, 0x6b718000, 0x6b7e0000, 0x6b8a8000, 0x6b970000, 0x6ba38000, 0x6bb00000,
+ 0x6bbc8000, 0x6bc90000, 0x6bd58000, 0x6be20000, 0x6bee8000, 0x6bfb0000, 0x6c078000, 0x6c140000, 0x6c208000, 0x6c2d0000, 0x6c398000, 0x6c460000, 0x6c528000, 0x6c5f0000, 0x6c6b8000, 0x6c780000,
+ 0x6c848000, 0x6c910000, 0x6c9d8000, 0x6caa0000, 0x6cb68000, 0x6cc30000, 0x6ccf8000, 0x6cdc0000, 0x6ce88000, 0x6cf50000, 0x6d018000, 0x6d0e0000, 0x6d1a8000, 0x6d270000, 0x6d338000, 0x6d400000,
+ 0x6d4c8000, 0x6d590000, 0x6d658000, 0x6d720000, 0x6d7e8000, 0x6d8b0000, 0x6d978000, 0x6da40000, 0x6db08000, 0x6dbd0000, 0x6dc98000, 0x6dd60000, 0x6de28000, 0x6def0000, 0x6dfb8000, 0x6e080000,
+ 0x6e148000, 0x6e210000, 0x6e2d8000, 0x6e3a0000, 0x6e468000, 0x6e530000, 0x6e5f8000, 0x6e6c0000, 0x6e788000, 0x6e850000, 0x6e918000, 0x6e9e0000, 0x6eaa8000, 0x6eb70000, 0x6ec38000, 0x6ed00000,
+ 0x6edc8000, 0x6ee90000, 0x6ef58000, 0x6f020000, 0x6f0e8000, 0x6f1b0000, 0x6f278000, 0x6f340000, 0x6f408000, 0x6f4d0000, 0x6f598000, 0x6f660000, 0x6f728000, 0x6f7f0000, 0x6f8b8000, 0x6f980000,
+ 0x6fa48000, 0x6fb10000, 0x6fbd8000, 0x6fca0000, 0x6fd68000, 0x6fe30000, 0x6fef8000, 0x6ffc0000, 0x70088000, 0x70150000, 0x70218000, 0x702e0000, 0x703a8000, 0x70470000, 0x70538000, 0x70600000,
+ 0x706c8000, 0x70790000, 0x70858000, 0x70920000, 0x709e8000, 0x70ab0000, 0x70b78000, 0x70c40000, 0x70d08000, 0x70dd0000, 0x70e98000, 0x70f60000, 0x71028000, 0x710f0000, 0x711b8000, 0x71280000,
+ 0x71348000, 0x71410000, 0x714d8000, 0x715a0000, 0x71668000, 0x71730000, 0x717f8000, 0x718c0000, 0x71988000, 0x71a50000, 0x71b18000, 0x71be0000, 0x71ca8000, 0x71d70000, 0x71e38000, 0x71f00000,
+ 0x71fc8000, 0x72090000, 0x72158000, 0x72220000, 0x722e8000, 0x723b0000, 0x72478000, 0x72540000, 0x72608000, 0x726d0000, 0x72798000, 0x72860000, 0x72928000, 0x729f0000, 0x72ab8000, 0x72b80000,
+ 0x72c48000, 0x72d10000, 0x72dd8000, 0x72ea0000, 0x72f68000, 0x73030000, 0x730f8000, 0x731c0000, 0x73288000, 0x73350000, 0x73418000, 0x734e0000, 0x735a8000, 0x73670000, 0x73738000, 0x73800000,
+ 0x738c8000, 0x73990000, 0x73a58000, 0x73b20000, 0x73be8000, 0x73cb0000, 0x73d78000, 0x73e40000, 0x73f08000, 0x73fd0000, 0x74098000, 0x74160000, 0x74228000, 0x742f0000, 0x743b8000, 0x74480000,
+ 0x74548000, 0x74610000, 0x746d8000, 0x747a0000, 0x74868000, 0x74930000, 0x749f8000, 0x74ac0000, 0x74b88000, 0x74c50000, 0x74d18000, 0x74de0000, 0x74ea8000, 0x74f70000, 0x75038000, 0x75100000,
+ 0x751c8000, 0x75290000, 0x75358000, 0x75420000, 0x754e8000, 0x755b0000, 0x75678000, 0x75740000, 0x75808000, 0x758d0000, 0x75998000, 0x75a60000, 0x75b28000, 0x75bf0000, 0x75cb8000, 0x75d80000,
+ 0x75e48000, 0x75f10000, 0x75fd8000, 0x760a0000, 0x76168000, 0x76230000, 0x762f8000, 0x763c0000, 0x76488000, 0x76550000, 0x76618000, 0x766e0000, 0x767a8000, 0x76870000, 0x76938000, 0x76a00000,
+ 0x76ac8000, 0x76b90000, 0x76c58000, 0x76d20000, 0x76de8000, 0x76eb0000, 0x76f78000, 0x77040000, 0x77108000, 0x771d0000, 0x77298000, 0x77360000, 0x77428000, 0x774f0000, 0x775b8000, 0x77680000,
+ 0x77748000, 0x77810000, 0x778d8000, 0x779a0000, 0x77a68000, 0x77b30000, 0x77bf8000, 0x77cc0000, 0x77d88000, 0x77e50000, 0x77f18000, 0x77fe0000, 0x780a8000, 0x78170000, 0x78238000, 0x78300000,
+ 0x783c8000, 0x78490000, 0x78558000, 0x78620000, 0x786e8000, 0x787b0000, 0x78878000, 0x78940000, 0x78a08000, 0x78ad0000, 0x78b98000, 0x78c60000, 0x78d28000, 0x78df0000, 0x78eb8000, 0x78f80000,
+ 0x79048000, 0x79110000, 0x791d8000, 0x792a0000, 0x79368000, 0x79430000, 0x794f8000, 0x795c0000, 0x79688000, 0x79750000, 0x79818000, 0x798e0000, 0x799a8000, 0x79a70000, 0x79b38000, 0x79c00000,
+ 0x79cc8000, 0x79d90000, 0x79e58000, 0x79f20000, 0x79fe8000, 0x7a0b0000, 0x7a178000, 0x7a240000, 0x7a308000, 0x7a3d0000, 0x7a498000, 0x7a560000, 0x7a628000, 0x7a6f0000, 0x7a7b8000, 0x7a880000,
+ 0x7a948000, 0x7aa10000, 0x7aad8000, 0x7aba0000, 0x7ac68000, 0x7ad30000, 0x7adf8000, 0x7aec0000, 0x7af88000, 0x7b050000, 0x7b118000, 0x7b1e0000, 0x7b2a8000, 0x7b370000, 0x7b438000, 0x7b500000,
+ 0x7b5c8000, 0x7b690000, 0x7b758000, 0x7b820000, 0x7b8e8000, 0x7b9b0000, 0x7ba78000, 0x7bb40000, 0x7bc08000, 0x7bcd0000, 0x7bd98000, 0x7be60000, 0x7bf28000, 0x7bff0000, 0x7c0b8000, 0x7c180000,
+ 0x7c248000, 0x7c310000, 0x7c3d8000, 0x7c4a0000, 0x7c568000, 0x7c630000, 0x7c6f8000, 0x7c7c0000, 0x7c888000, 0x7c950000, 0x7ca18000, 0x7cae0000, 0x7cba8000, 0x7cc70000, 0x7cd38000, 0x7ce00000,
+ 0x7cec8000, 0x7cf90000, 0x7d058000, 0x7d120000, 0x7d1e8000, 0x7d2b0000, 0x7d378000, 0x7d440000, 0x7d508000, 0x7d5d0000, 0x7d698000, 0x7d760000, 0x7d828000, 0x7d8f0000, 0x7d9b8000, 0x7da80000,
+ 0x7db48000, 0x7dc10000, 0x7dcd8000, 0x7dda0000, 0x7de68000, 0x7df30000, 0x7dff8000, 0x7e0c0000, 0x7e188000, 0x7e250000, 0x7e318000, 0x7e3e0000, 0x7e4a8000, 0x7e570000, 0x7e638000, 0x7e700000,
+ 0x7e7c8000, 0x7e890000, 0x7e958000, 0x7ea20000, 0x7eae8000, 0x7ebb0000, 0x7ec78000, 0x7ed40000, 0x7ee08000, 0x7eed0000, 0x7ef98000, 0x7f060000, 0x7f128000, 0x7f1f0000, 0x7f2b8000, 0x7f380000,
+ 0x7f448000, 0x7f510000, 0x7f5d8000, 0x7f6a0000, 0x7f768000, 0x7f830000, 0x7f8f8000, 0x7f9c0000, 0x7fa88000, 0x7fb50000, 0x7fc18000, 0x7fce0000, 0x7fda8000, 0x7fe70000, 0x7ff38000, 0x80000000
+};
+
+static const uint8_t readaheadtab[] = {
+ 0x03, 0x02, 0x01, 0x01, 0x1f, 0x1e, 0x1f, 0x11, 0x1f, 0x1e, 0x1f, 0x1d, 0x1f, 0x1e, 0x1f,
+ 0x10, 0x1f, 0x1e, 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x1c, 0x1f, 0x1e, 0x1f, 0x1d, 0x1f, 0x1e,
+ 0x1f, 0x0f, 0x1f, 0x1e, 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x1c, 0x1f, 0x1e, 0x1f, 0x1d, 0x1f,
+ 0x1e, 0x1f, 0x1b, 0x1f, 0x1e, 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x1c, 0x1f, 0x1e, 0x1f, 0x1d,
+ 0x1f, 0x1e, 0x0e, 0x19, 0x07, 0x1e, 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x1c, 0x1f, 0x1e, 0x1f,
+ 0x1d, 0x1f, 0x1e, 0x1f, 0x1b, 0x1f, 0x1e, 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x1c, 0x1f, 0x1e,
+ 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x1a, 0x1f, 0x1e, 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x1c, 0x1f,
+ 0x1e, 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x1b, 0x1f, 0x1e, 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x1c,
+ 0x1f, 0x1e, 0x1f, 0x1d, 0x1f, 0x0d, 0x18, 0x20, 0x06, 0x1e, 0x1f, 0x12, 0x1f, 0x1e, 0x1f,
+ 0x1c, 0x1f, 0x1e, 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x1b, 0x1f, 0x1e, 0x1f, 0x1d, 0x1f, 0x1e,
+ 0x1f, 0x1c, 0x1f, 0x1e, 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x1a, 0x08, 0x1e, 0x1f, 0x1d, 0x1f,
+ 0x1e, 0x1f, 0x1c, 0x1f, 0x1e, 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x1b, 0x1f, 0x1e, 0x1f, 0x1d,
+ 0x1f, 0x1e, 0x1f, 0x1c, 0x1f, 0x1e, 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x19, 0x1f, 0x13, 0x1f,
+ 0x1d, 0x1f, 0x1e, 0x1f, 0x1c, 0x1f, 0x1e, 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x1b, 0x09, 0x1e,
+ 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x1c, 0x1f, 0x1e, 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x1a, 0x14,
+ 0x1e, 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x1c, 0x0a, 0x1e, 0x1f, 0x1d, 0x1f, 0x1e, 0x1f, 0x1b,
+ 0x15, 0x1e, 0x1f, 0x1d, 0x0b, 0x1e, 0x1f, 0x1c, 0x16, 0x1e, 0x0c, 0x1d, 0x17, 0x1e, 0x1f,
+ 0x20
+};
+
+//values between 0 and 1 multiplied by 2^23 to avoid floating point numbers.
+static const int32_t gaintab[] = {
+ 0x800000, 0x7ff144, 0x7fe28a, 0x7fd3d2, 0x7fc51b, 0x7fb666, 0x7fa7b3, 0x7f9901, 0x7f8a52, 0x7f7ba3, 0x7f6cf7, 0x7f5e4c, 0x7f4fa3, 0x7f40fc, 0x7f3256,
+ 0x7f23b2, 0x7f1510, 0x7f066f, 0x7ef7d0, 0x7ee933, 0x7eda97, 0x7ecbfd, 0x7ebd65, 0x7eaece, 0x7ea039, 0x7e91a6, 0x7e8315, 0x7e7485, 0x7e65f6, 0x7e576a,
+ 0x7e48df, 0x7e3a56, 0x7e2bce, 0x7e1d49, 0x7e0ec5, 0x7e0042, 0x7df1c1, 0x7de342, 0x7dd4c5, 0x7dc649, 0x7db7cf, 0x7da956, 0x7d9adf, 0x7d8c6a, 0x7d7df7,
+ 0x7d6f85, 0x7d6115, 0x7d52a6, 0x7d443a, 0x7d35ce, 0x7d2765, 0x7d18fd, 0x7d0a97, 0x7cfc32, 0x7cedd0, 0x7cdf6e, 0x7cd10f, 0x7cc2b1, 0x7cb455, 0x7ca5fa,
+ 0x7c97a1, 0x7c894a, 0x7c7af4, 0x7c6ca0, 0x7c5e4e, 0x7c4ffd, 0x7c41ae, 0x7c3361, 0x7c2515, 0x7c16cb, 0x7c0882, 0x7bfa3b, 0x7bebf6, 0x7bddb3, 0x7bcf71,
+ 0x7bc131, 0x7bb2f2, 0x7ba4b5, 0x7b967a, 0x7b8840, 0x7b7a08, 0x7b6bd2, 0x7b5d9d, 0x7b4f6a, 0x7b4138, 0x7b3308, 0x7b24da, 0x7b16ad, 0x7b0882, 0x7afa59,
+ 0x7aec31, 0x7ade0b, 0x7acfe7, 0x7ac1c4, 0x7ab3a3, 0x7aa583, 0x7a9765, 0x7a8949, 0x7a7b2e, 0x7a6d15, 0x7a5efd, 0x7a50e8, 0x7a42d3, 0x7a34c1, 0x7a26b0,
+ 0x7a18a0, 0x7a0a93, 0x79fc87, 0x79ee7c, 0x79e073, 0x79d26c, 0x79c466, 0x79b662, 0x79a860, 0x799a5f, 0x798c60, 0x797e62, 0x797066, 0x79626c, 0x795473,
+ 0x79467c, 0x793886, 0x792a92, 0x791ca0, 0x790eaf, 0x7900c0, 0x78f2d3, 0x78e4e7, 0x78d6fc, 0x78c914, 0x78bb2d, 0x78ad47, 0x789f63, 0x789181, 0x7883a0,
+ 0x7875c1, 0x7867e3, 0x785a07, 0x784c2d, 0x783e54, 0x78307d, 0x7822a8, 0x7814d4, 0x780701, 0x77f931, 0x77eb61, 0x77dd94, 0x77cfc8, 0x77c1fd, 0x77b434,
+ 0x77a66d, 0x7798a8, 0x778ae3, 0x777d21, 0x776f60, 0x7761a1, 0x7753e3, 0x774627, 0x77386c, 0x772ab3, 0x771cfc, 0x770f46, 0x770192, 0x76f3df, 0x76e62e,
+ 0x76d87e, 0x76cad0, 0x76bd24, 0x76af79, 0x76a1d0, 0x769428, 0x768682, 0x7678de, 0x766b3b, 0x765d99, 0x764ffa, 0x76425b, 0x7634bf, 0x762723, 0x76198a,
+ 0x760bf2, 0x75fe5c, 0x75f0c7, 0x75e333, 0x75d5a2, 0x75c811, 0x75ba83, 0x75acf6, 0x759f6a, 0x7591e0, 0x758458, 0x7576d1, 0x75694c, 0x755bc8, 0x754e46,
+ 0x7540c6, 0x753347, 0x7525c9, 0x75184d, 0x750ad3, 0x74fd5a, 0x74efe3, 0x74e26d, 0x74d4f9, 0x74c786, 0x74ba15, 0x74aca6, 0x749f38, 0x7491cb, 0x748460,
+ 0x7476f7, 0x74698f, 0x745c29, 0x744ec4, 0x744161, 0x7433ff, 0x74269f, 0x741941, 0x740be4, 0x73fe88, 0x73f12e, 0x73e3d6, 0x73d67f, 0x73c92a, 0x73bbd6,
+ 0x73ae84, 0x73a133, 0x7393e4, 0x738696, 0x73794a, 0x736bff, 0x735eb6, 0x73516f, 0x734429, 0x7336e4, 0x7329a1, 0x731c60, 0x730f20, 0x7301e1, 0x72f4a5,
+ 0x72e769, 0x72da2f, 0x72ccf7, 0x72bfc0, 0x72b28b, 0x72a557, 0x729825, 0x728af4, 0x727dc5, 0x727098, 0x72636c, 0x725641, 0x724918, 0x723bf0, 0x722eca,
+ 0x7221a6, 0x721482, 0x720761, 0x71fa41, 0x71ed22, 0x71e005, 0x71d2ea, 0x71c5d0, 0x71b8b7, 0x71aba0, 0x719e8b, 0x719177, 0x718465, 0x717754, 0x716a44,
+ 0x715d36, 0x71502a, 0x71431f, 0x713615, 0x71290e, 0x711c07, 0x710f02, 0x7101ff, 0x70f4fd, 0x70e7fc, 0x70dafd, 0x70ce00, 0x70c104, 0x70b40a, 0x70a711,
+ 0x709a19, 0x708d23, 0x70802f, 0x70733c, 0x70664a, 0x70595a, 0x704c6c, 0x703f7f, 0x703293, 0x7025a9, 0x7018c0, 0x700bd9, 0x6ffef4, 0x6ff20f, 0x6fe52d,
+ 0x6fd84c, 0x6fcb6c, 0x6fbe8e, 0x6fb1b1, 0x6fa4d6, 0x6f97fc, 0x6f8b24, 0x6f7e4d, 0x6f7178, 0x6f64a4, 0x6f57d2, 0x6f4b01, 0x6f3e31, 0x6f3163, 0x6f2497,
+ 0x6f17cc, 0x6f0b02, 0x6efe3a, 0x6ef174, 0x6ee4af, 0x6ed7eb, 0x6ecb29, 0x6ebe68, 0x6eb1a9, 0x6ea4eb, 0x6e982f, 0x6e8b74, 0x6e7ebb, 0x6e7203, 0x6e654c,
+ 0x6e5897, 0x6e4be4, 0x6e3f32, 0x6e3281, 0x6e25d2, 0x6e1924, 0x6e0c78, 0x6dffcd, 0x6df324, 0x6de67c, 0x6dd9d6, 0x6dcd31, 0x6dc08e, 0x6db3ec, 0x6da74b,
+ 0x6d9aac, 0x6d8e0e, 0x6d8172, 0x6d74d7, 0x6d683e, 0x6d5ba6, 0x6d4f10, 0x6d427b, 0x6d35e7, 0x6d2955, 0x6d1cc5, 0x6d1036, 0x6d03a8, 0x6cf71c, 0x6cea91,
+ 0x6cde07, 0x6cd17f, 0x6cc4f9, 0x6cb874, 0x6cabf0, 0x6c9f6e, 0x6c92ed, 0x6c866e, 0x6c79f0, 0x6c6d74, 0x6c60f9, 0x6c547f, 0x6c4807, 0x6c3b91, 0x6c2f1b,
+ 0x6c22a8, 0x6c1635, 0x6c09c4, 0x6bfd55, 0x6bf0e7, 0x6be47a, 0x6bd80f, 0x6bcba5, 0x6bbf3d, 0x6bb2d6, 0x6ba670, 0x6b9a0c, 0x6b8daa, 0x6b8148, 0x6b74e9,
+ 0x6b688a, 0x6b5c2d, 0x6b4fd2, 0x6b4378, 0x6b371f, 0x6b2ac8, 0x6b1e72, 0x6b121d, 0x6b05ca, 0x6af979, 0x6aed29, 0x6ae0da, 0x6ad48d, 0x6ac841, 0x6abbf6,
+ 0x6aafad, 0x6aa365, 0x6a971f, 0x6a8ada, 0x6a7e97, 0x6a7255, 0x6a6614, 0x6a59d5, 0x6a4d97, 0x6a415b, 0x6a3520, 0x6a28e6, 0x6a1cae, 0x6a1078, 0x6a0442,
+ 0x69f80e, 0x69ebdc, 0x69dfab, 0x69d37b, 0x69c74d, 0x69bb20, 0x69aef4, 0x69a2ca, 0x6996a1, 0x698a7a, 0x697e54, 0x697230, 0x69660c, 0x6959eb, 0x694dca,
+ 0x6941ab, 0x69358e, 0x692972, 0x691d57, 0x69113e, 0x690526, 0x68f90f, 0x68ecfa, 0x68e0e6, 0x68d4d4, 0x68c8c3, 0x68bcb3, 0x68b0a5, 0x68a498, 0x68988d,
+ 0x688c83, 0x68807a, 0x687473, 0x68686d, 0x685c68, 0x685065, 0x684463, 0x683863, 0x682c64, 0x682066, 0x68146a, 0x68086f, 0x67fc76, 0x67f07d, 0x67e487,
+ 0x67d891, 0x67cc9d, 0x67c0ab, 0x67b4ba, 0x67a8ca, 0x679cdb, 0x6790ee, 0x678502, 0x677918, 0x676d2f, 0x676147, 0x675561, 0x67497c, 0x673d99, 0x6731b7,
+ 0x6725d6, 0x6719f7, 0x670e19, 0x67023c, 0x66f661, 0x66ea87, 0x66deae, 0x66d2d7, 0x66c701, 0x66bb2d, 0x66af59, 0x66a388, 0x6697b7, 0x668be8, 0x66801a,
+ 0x66744e, 0x666883, 0x665cba, 0x6650f1, 0x66452a, 0x663965, 0x662da1, 0x6621de, 0x66161c, 0x660a5c, 0x65fe9e, 0x65f2e0, 0x65e724, 0x65db69, 0x65cfb0,
+ 0x65c3f8, 0x65b841, 0x65ac8c, 0x65a0d8, 0x659525, 0x658974, 0x657dc4, 0x657216, 0x656668, 0x655abc, 0x654f12, 0x654369, 0x6537c1, 0x652c1a, 0x652075,
+ 0x6514d1, 0x65092f, 0x64fd8d, 0x64f1ee, 0x64e64f, 0x64dab2, 0x64cf16, 0x64c37c, 0x64b7e3, 0x64ac4b, 0x64a0b4, 0x64951f, 0x64898b, 0x647df9, 0x647268,
+ 0x6466d8, 0x645b49, 0x644fbc, 0x644430, 0x6438a6, 0x642d1d, 0x642195, 0x64160e, 0x640a89, 0x63ff05, 0x63f383, 0x63e802, 0x63dc82, 0x63d103, 0x63c586,
+ 0x63ba0a, 0x63ae8f, 0x63a316, 0x63979e, 0x638c28, 0x6380b2, 0x63753e, 0x6369cc, 0x635e5a, 0x6352ea, 0x63477b, 0x633c0e, 0x6330a2, 0x632537, 0x6319ce,
+ 0x630e66, 0x6302ff, 0x62f799, 0x62ec35, 0x62e0d2, 0x62d571, 0x62ca10, 0x62beb1, 0x62b354, 0x62a7f7, 0x629c9c, 0x629142, 0x6285ea, 0x627a93, 0x626f3d,
+ 0x6263e9, 0x625895, 0x624d43, 0x6241f3, 0x6236a4, 0x622b56, 0x622009, 0x6214bd, 0x620973, 0x61fe2b, 0x61f2e3, 0x61e79d, 0x61dc58, 0x61d114, 0x61c5d2,
+ 0x61ba91, 0x61af51, 0x61a413, 0x6198d6, 0x618d9a, 0x61825f, 0x617726, 0x616bee, 0x6160b7, 0x615582, 0x614a4e, 0x613f1b, 0x6133ea, 0x6128b9, 0x611d8a,
+ 0x61125d, 0x610730, 0x60fc05, 0x60f0dc, 0x60e5b3, 0x60da8c, 0x60cf66, 0x60c441, 0x60b91e, 0x60adfc, 0x60a2db, 0x6097bc, 0x608c9d, 0x608180, 0x607665,
+ 0x606b4a, 0x606031, 0x605519, 0x604a03, 0x603eed, 0x6033d9, 0x6028c7, 0x601db5, 0x6012a5, 0x600796, 0x5ffc88, 0x5ff17c, 0x5fe671, 0x5fdb67, 0x5fd05e,
+ 0x5fc557, 0x5fba51, 0x5faf4c, 0x5fa449, 0x5f9947, 0x5f8e46, 0x5f8346, 0x5f7848, 0x5f6d4a, 0x5f624e, 0x5f5754, 0x5f4c5a, 0x5f4162, 0x5f366c, 0x5f2b76,
+ 0x5f2082, 0x5f158f, 0x5f0a9d, 0x5effac, 0x5ef4bd, 0x5ee9cf, 0x5edee2, 0x5ed3f7, 0x5ec90c, 0x5ebe23, 0x5eb33c, 0x5ea855, 0x5e9d70, 0x5e928c, 0x5e87a9,
+ 0x5e7cc8, 0x5e71e8, 0x5e6709, 0x5e5c2b, 0x5e514f, 0x5e4673, 0x5e3b99, 0x5e30c1, 0x5e25e9, 0x5e1b13, 0x5e103e, 0x5e056a, 0x5dfa98, 0x5defc7, 0x5de4f7,
+ 0x5dda28, 0x5dcf5a, 0x5dc48e, 0x5db9c3, 0x5daef9, 0x5da431, 0x5d996a, 0x5d8ea4, 0x5d83df, 0x5d791b, 0x5d6e59, 0x5d6398, 0x5d58d8, 0x5d4e19, 0x5d435c,
+ 0x5d38a0, 0x5d2de5, 0x5d232b, 0x5d1873, 0x5d0dbc, 0x5d0306, 0x5cf851, 0x5ced9d, 0x5ce2eb, 0x5cd83a, 0x5ccd8a, 0x5cc2dc, 0x5cb82e, 0x5cad82, 0x5ca2d7,
+ 0x5c982e, 0x5c8d85, 0x5c82de, 0x5c7838, 0x5c6d93, 0x5c62f0, 0x5c584e, 0x5c4dad, 0x5c430d, 0x5c386e, 0x5c2dd1, 0x5c2334, 0x5c1899, 0x5c0e00, 0x5c0367,
+ 0x5bf8d0, 0x5bee3a, 0x5be3a5, 0x5bd911, 0x5bce7f, 0x5bc3ee, 0x5bb95e, 0x5baecf, 0x5ba441, 0x5b99b5, 0x5b8f2a, 0x5b84a0, 0x5b7a17, 0x5b6f90, 0x5b6509,
+ 0x5b5a84, 0x5b5000, 0x5b457e, 0x5b3afc, 0x5b307c, 0x5b25fd, 0x5b1b7f, 0x5b1103, 0x5b0687, 0x5afc0d, 0x5af194, 0x5ae71c, 0x5adca6, 0x5ad230, 0x5ac7bc,
+ 0x5abd49, 0x5ab2d7, 0x5aa867, 0x5a9df7, 0x5a9389, 0x5a891c, 0x5a7eb1, 0x5a7446, 0x5a69dd, 0x5a5f74, 0x5a550d, 0x5a4aa8, 0x5a4043, 0x5a35e0, 0x5a2b7e,
+ 0x5a211d, 0x5a16bd, 0x5a0c5e, 0x5a0201, 0x59f7a5, 0x59ed4a, 0x59e2f0, 0x59d897, 0x59ce40, 0x59c3e9, 0x59b994, 0x59af40, 0x59a4ee, 0x599a9c, 0x59904c,
+ 0x5985fd, 0x597baf, 0x597162, 0x596717, 0x595ccc, 0x595283, 0x59483b, 0x593df4, 0x5933ae, 0x59296a, 0x591f27, 0x5914e5, 0x590aa4, 0x590064, 0x58f625,
+ 0x58ebe8, 0x58e1ac, 0x58d771, 0x58cd37, 0x58c2fe, 0x58b8c7, 0x58ae90, 0x58a45b, 0x589a27, 0x588ff5, 0x5885c3, 0x587b93, 0x587163, 0x586735, 0x585d08,
+ 0x5852dc, 0x5848b2, 0x583e88, 0x583460, 0x582a39, 0x582013, 0x5815ee, 0x580bcb, 0x5801a9, 0x57f787, 0x57ed67, 0x57e348, 0x57d92b, 0x57cf0e, 0x57c4f3,
+ 0x57bad8, 0x57b0bf, 0x57a6a7, 0x579c91, 0x57927b, 0x578866, 0x577e53, 0x577441, 0x576a30, 0x576020, 0x575612, 0x574c04, 0x5741f8, 0x5737ed, 0x572de3,
+ 0x5723da, 0x5719d2, 0x570fcc, 0x5705c6, 0x56fbc2, 0x56f1bf, 0x56e7bd, 0x56ddbc, 0x56d3bc, 0x56c9be, 0x56bfc1, 0x56b5c4, 0x56abc9, 0x56a1cf, 0x5697d7,
+ 0x568ddf, 0x5683e9, 0x5679f3, 0x566fff, 0x56660c, 0x565c1a, 0x56522a, 0x56483a, 0x563e4c, 0x56345e, 0x562a72, 0x562087, 0x56169d, 0x560cb5, 0x5602cd,
+ 0x55f8e7, 0x55ef01, 0x55e51d, 0x55db3a, 0x55d158, 0x55c777, 0x55bd98, 0x55b3b9, 0x55a9dc, 0x55a000, 0x559625, 0x558c4b, 0x558272, 0x55789a, 0x556ec4,
+ 0x5564ee, 0x555b1a, 0x555147, 0x554775, 0x553da4, 0x5533d4, 0x552a06, 0x552038, 0x55166c, 0x550ca1, 0x5502d7, 0x54f90e, 0x54ef46, 0x54e57f, 0x54dbba,
+ 0x54d1f5, 0x54c832, 0x54be6f, 0x54b4ae, 0x54aaee, 0x54a130, 0x549772, 0x548db5, 0x5483fa, 0x547a3f, 0x547086, 0x5466ce, 0x545d17, 0x545361, 0x5449ac,
+ 0x543ff9, 0x543646, 0x542c95, 0x5422e4, 0x541935, 0x540f87, 0x5405da, 0x53fc2e, 0x53f283, 0x53e8da, 0x53df31, 0x53d58a, 0x53cbe4, 0x53c23e, 0x53b89a,
+ 0x53aef7, 0x53a555, 0x539bb5, 0x539215, 0x538877, 0x537ed9, 0x53753d, 0x536ba2, 0x536208, 0x53586f, 0x534ed7, 0x534540, 0x533baa, 0x533216, 0x532882,
+ 0x531ef0, 0x53155e, 0x530bce, 0x53023f, 0x52f8b1, 0x52ef24, 0x52e599, 0x52dc0e, 0x52d284, 0x52c8fc, 0x52bf74, 0x52b5ee, 0x52ac69, 0x52a2e5, 0x529962,
+ 0x528fe0, 0x52865f, 0x527cdf, 0x527361, 0x5269e3, 0x526067, 0x5256eb, 0x524d71, 0x5243f8, 0x523a80, 0x523109, 0x522793, 0x521e1e, 0x5214ab, 0x520b38,
+ 0x5201c6, 0x51f856, 0x51eee7, 0x51e578, 0x51dc0b, 0x51d29f, 0x51c934, 0x51bfca, 0x51b661, 0x51acf9, 0x51a393, 0x519a2d, 0x5190c9, 0x518765, 0x517e03,
+ 0x5174a1, 0x516b41, 0x5161e2, 0x515884, 0x514f27, 0x5145cb, 0x513c70, 0x513317, 0x5129be, 0x512066, 0x511710, 0x510dba, 0x510466, 0x50fb13, 0x50f1c1,
+ 0x50e86f, 0x50df1f, 0x50d5d0, 0x50cc82, 0x50c336, 0x50b9ea, 0x50b09f, 0x50a755, 0x509e0d, 0x5094c5, 0x508b7f, 0x50823a, 0x5078f5, 0x506fb2, 0x506670,
+ 0x505d2f, 0x5053ef, 0x504ab0, 0x504172, 0x503835, 0x502ef9, 0x5025be, 0x501c85, 0x50134c, 0x500a15, 0x5000de, 0x4ff7a9, 0x4fee74, 0x4fe541, 0x4fdc0f,
+ 0x4fd2de, 0x4fc9ae, 0x4fc07e, 0x4fb750, 0x4fae23, 0x4fa4f8, 0x4f9bcd, 0x4f92a3, 0x4f897a, 0x4f8053, 0x4f772c, 0x4f6e06, 0x4f64e2, 0x4f5bbe, 0x4f529c,
+ 0x4f497b, 0x4f405a, 0x4f373b, 0x4f2e1d, 0x4f2500, 0x4f1be4, 0x4f12c9, 0x4f09af, 0x4f0096, 0x4ef77e, 0x4eee67, 0x4ee551, 0x4edc3c, 0x4ed328, 0x4eca16,
+ 0x4ec104, 0x4eb7f3, 0x4eaee4, 0x4ea5d5, 0x4e9cc8, 0x4e93bc, 0x4e8ab0, 0x4e81a6, 0x4e789c, 0x4e6f94, 0x4e668d, 0x4e5d87, 0x4e5482, 0x4e4b7e, 0x4e427a,
+ 0x4e3978, 0x4e3077, 0x4e2777, 0x4e1e79, 0x4e157b, 0x4e0c7e, 0x4e0382, 0x4dfa87, 0x4df18d, 0x4de895, 0x4ddf9d, 0x4dd6a6, 0x4dcdb1, 0x4dc4bc, 0x4dbbc9,
+ 0x4db2d6, 0x4da9e5, 0x4da0f4, 0x4d9805, 0x4d8f16, 0x4d8629, 0x4d7d3c, 0x4d7451, 0x4d6b67, 0x4d627e, 0x4d5995, 0x4d50ae, 0x4d47c8, 0x4d3ee3, 0x4d35ff,
+ 0x4d2d1b, 0x4d2439, 0x4d1b58, 0x4d1278, 0x4d0999, 0x4d00bb, 0x4cf7de, 0x4cef02, 0x4ce627, 0x4cdd4d, 0x4cd474, 0x4ccb9c, 0x4cc2c5, 0x4cb9f0, 0x4cb11b,
+ 0x4ca847, 0x4c9f74, 0x4c96a2, 0x4c8dd1, 0x4c8502, 0x4c7c33, 0x4c7365, 0x4c6a98, 0x4c61cd, 0x4c5902, 0x4c5038, 0x4c4770, 0x4c3ea8, 0x4c35e1, 0x4c2d1c,
+ 0x4c2457, 0x4c1b93, 0x4c12d1, 0x4c0a0f, 0x4c014f, 0x4bf88f, 0x4befd0, 0x4be713, 0x4bde56, 0x4bd59b, 0x4bcce0, 0x4bc426, 0x4bbb6e, 0x4bb2b6, 0x4baa00,
+ 0x4ba14a, 0x4b9896, 0x4b8fe2, 0x4b8730, 0x4b7e7e, 0x4b75cd, 0x4b6d1e, 0x4b646f, 0x4b5bc2, 0x4b5315, 0x4b4a6a, 0x4b41bf, 0x4b3916, 0x4b306d, 0x4b27c6,
+ 0x4b1f1f, 0x4b1679, 0x4b0dd5, 0x4b0531, 0x4afc8f, 0x4af3ed, 0x4aeb4c, 0x4ae2ad, 0x4ada0e, 0x4ad171, 0x4ac8d4, 0x4ac038, 0x4ab79e, 0x4aaf04, 0x4aa66b,
+ 0x4a9dd4, 0x4a953d, 0x4a8ca7, 0x4a8413, 0x4a7b7f, 0x4a72ec, 0x4a6a5a, 0x4a61ca, 0x4a593a, 0x4a50ab, 0x4a481d, 0x4a3f91, 0x4a3705, 0x4a2e7a, 0x4a25f0,
+ 0x4a1d67, 0x4a14df, 0x4a0c58, 0x4a03d2, 0x49fb4d, 0x49f2c9, 0x49ea46, 0x49e1c4, 0x49d943, 0x49d0c3, 0x49c844, 0x49bfc6, 0x49b749, 0x49aecd, 0x49a652,
+ 0x499dd7, 0x49955e, 0x498ce6, 0x49846f, 0x497bf8, 0x497383, 0x496b0f, 0x49629b, 0x495a29, 0x4951b8, 0x494947, 0x4940d8, 0x493869, 0x492ffc, 0x49278f,
+ 0x491f23, 0x4916b9, 0x490e4f, 0x4905e6, 0x48fd7f, 0x48f518, 0x48ecb2, 0x48e44d, 0x48dbe9, 0x48d386, 0x48cb25, 0x48c2c4, 0x48ba64, 0x48b205, 0x48a9a6,
+ 0x48a149, 0x4898ed, 0x489092, 0x488838, 0x487fdf, 0x487786, 0x486f2f, 0x4866d8, 0x485e83, 0x48562f, 0x484ddb, 0x484589, 0x483d37, 0x4834e6, 0x482c97,
+ 0x482448, 0x481bfa, 0x4813ad, 0x480b62, 0x480317, 0x47facd, 0x47f284, 0x47ea3c, 0x47e1f5, 0x47d9ae, 0x47d169, 0x47c925, 0x47c0e2, 0x47b89f, 0x47b05e,
+ 0x47a81e, 0x479fde, 0x4797a0, 0x478f62, 0x478725, 0x477eea, 0x4776af, 0x476e75, 0x47663c, 0x475e05, 0x4755ce, 0x474d98, 0x474563, 0x473d2f, 0x4734fb,
+ 0x472cc9, 0x472498, 0x471c68, 0x471438, 0x470c0a, 0x4703dc, 0x46fbb0, 0x46f384, 0x46eb59, 0x46e330, 0x46db07, 0x46d2df, 0x46cab8, 0x46c292, 0x46ba6d,
+ 0x46b249, 0x46aa26, 0x46a203, 0x4699e2, 0x4691c2, 0x4689a2, 0x468184, 0x467966, 0x46714a, 0x46692e, 0x466113, 0x4658f9, 0x4650e0, 0x4648c9, 0x4640b1,
+ 0x46389b, 0x463086, 0x462872, 0x46205f, 0x46184c, 0x46103b, 0x46082a, 0x46001b, 0x45f80c, 0x45effe, 0x45e7f2, 0x45dfe6, 0x45d7db, 0x45cfd1, 0x45c7c8,
+ 0x45bfbf, 0x45b7b8, 0x45afb2, 0x45a7ac, 0x459fa8, 0x4597a4, 0x458fa2, 0x4587a0, 0x457f9f, 0x45779f, 0x456fa0, 0x4567a2, 0x455fa5, 0x4557a9, 0x454fae,
+ 0x4547b3, 0x453fba, 0x4537c1, 0x452fca, 0x4527d3, 0x451fdd, 0x4517e8, 0x450ff5, 0x450802, 0x45000f, 0x44f81e, 0x44f02e, 0x44e83f, 0x44e050, 0x44d863,
+ 0x44d076, 0x44c88a, 0x44c09f, 0x44b8b6, 0x44b0cd, 0x44a8e4, 0x44a0fd, 0x449917, 0x449132, 0x44894d, 0x44816a, 0x447987, 0x4471a5, 0x4469c5, 0x4461e5,
+ 0x445a06, 0x445228, 0x444a4b, 0x44426e, 0x443a93, 0x4432b8, 0x442adf, 0x442306, 0x441b2e, 0x441358, 0x440b82, 0x4403ad, 0x43fbd8, 0x43f405, 0x43ec33,
+ 0x43e461, 0x43dc91, 0x43d4c1, 0x43ccf3, 0x43c525, 0x43bd58, 0x43b58c, 0x43adc1, 0x43a5f6, 0x439e2d, 0x439664, 0x438e9d, 0x4386d6, 0x437f10, 0x43774c,
+ 0x436f88, 0x4367c5, 0x436002, 0x435841, 0x435081, 0x4348c1, 0x434102, 0x433945, 0x433188, 0x4329cc, 0x432211, 0x431a57, 0x43129d, 0x430ae5, 0x43032e,
+ 0x42fb77, 0x42f3c1, 0x42ec0c, 0x42e458, 0x42dca5, 0x42d4f3, 0x42cd42, 0x42c591, 0x42bde2, 0x42b633, 0x42ae85, 0x42a6d9, 0x429f2d, 0x429781, 0x428fd7,
+ 0x42882e, 0x428085, 0x4278de, 0x427137, 0x426991, 0x4261ec, 0x425a48, 0x4252a5, 0x424b03, 0x424361, 0x423bc1, 0x423421, 0x422c82, 0x4224e5, 0x421d48,
+ 0x4215ab, 0x420e10, 0x420676, 0x41fedc, 0x41f744, 0x41efac, 0x41e815, 0x41e07f, 0x41d8ea, 0x41d155, 0x41c9c2, 0x41c22f, 0x41ba9e, 0x41b30d, 0x41ab7d,
+ 0x41a3ee, 0x419c60, 0x4194d2, 0x418d46, 0x4185ba, 0x417e30, 0x4176a6, 0x416f1d, 0x416795, 0x41600d, 0x415887, 0x415102, 0x41497d, 0x4141f9, 0x413a76,
+ 0x4132f4, 0x412b73, 0x4123f3, 0x411c73, 0x4114f5, 0x410d77, 0x4105fa, 0x40fe7e, 0x40f703, 0x40ef89, 0x40e80f, 0x40e097, 0x40d91f, 0x40d1a8, 0x40ca32,
+ 0x40c2bd, 0x40bb49, 0x40b3d5, 0x40ac63, 0x40a4f1, 0x409d80, 0x409610, 0x408ea1, 0x408733, 0x407fc5, 0x407859, 0x4070ed, 0x406982, 0x406218, 0x405aaf,
+ 0x405347, 0x404bdf, 0x404479, 0x403d13, 0x4035ae, 0x402e4a, 0x4026e7, 0x401f85, 0x401823, 0x4010c3, 0x400963, 0x400204, 0x3ffaa6, 0x3ff348, 0x3febec,
+ 0x3fe490, 0x3fdd36, 0x3fd5dc, 0x3fce83, 0x3fc72b, 0x3fbfd3, 0x3fb87d, 0x3fb127, 0x3fa9d3, 0x3fa27f, 0x3f9b2c, 0x3f93d9, 0x3f8c88, 0x3f8537, 0x3f7de8,
+ 0x3f7699, 0x3f6f4b, 0x3f67fd, 0x3f60b1, 0x3f5966, 0x3f521b, 0x3f4ad1, 0x3f4388, 0x3f3c40, 0x3f34f9, 0x3f2db2, 0x3f266c, 0x3f1f28, 0x3f17e4, 0x3f10a1,
+ 0x3f095e, 0x3f021d, 0x3efadc, 0x3ef39c, 0x3eec5d, 0x3ee51f, 0x3edde2, 0x3ed6a6, 0x3ecf6a, 0x3ec82f, 0x3ec0f5, 0x3eb9bc, 0x3eb284, 0x3eab4c, 0x3ea416,
+ 0x3e9ce0, 0x3e95ab, 0x3e8e77, 0x3e8743, 0x3e8011, 0x3e78df, 0x3e71ae, 0x3e6a7e, 0x3e634f, 0x3e5c21, 0x3e54f3, 0x3e4dc7, 0x3e469b, 0x3e3f70, 0x3e3845,
+ 0x3e311c, 0x3e29f3, 0x3e22cc, 0x3e1ba5, 0x3e147f, 0x3e0d59, 0x3e0635, 0x3dff11, 0x3df7ef, 0x3df0cd, 0x3de9ab, 0x3de28b, 0x3ddb6b, 0x3dd44d, 0x3dcd2f,
+ 0x3dc612, 0x3dbef6, 0x3db7da, 0x3db0c0, 0x3da9a6, 0x3da28d, 0x3d9b75, 0x3d945d, 0x3d8d47, 0x3d8631, 0x3d7f1c, 0x3d7808, 0x3d70f5, 0x3d69e2, 0x3d62d1,
+ 0x3d5bc0, 0x3d54b0, 0x3d4da1, 0x3d4692, 0x3d3f85, 0x3d3878, 0x3d316c, 0x3d2a61, 0x3d2356, 0x3d1c4d, 0x3d1544, 0x3d0e3c, 0x3d0735, 0x3d002f, 0x3cf929,
+ 0x3cf225, 0x3ceb21, 0x3ce41e, 0x3cdd1c, 0x3cd61a, 0x3ccf1a, 0x3cc81a, 0x3cc11b, 0x3cba1c, 0x3cb31f, 0x3cac22, 0x3ca527, 0x3c9e2c, 0x3c9731, 0x3c9038,
+ 0x3c893f, 0x3c8248, 0x3c7b51, 0x3c745b, 0x3c6d65, 0x3c6671, 0x3c5f7d, 0x3c588a, 0x3c5198, 0x3c4aa6, 0x3c43b6, 0x3c3cc6, 0x3c35d7, 0x3c2ee9, 0x3c27fb,
+ 0x3c210f, 0x3c1a23, 0x3c1338, 0x3c0c4e, 0x3c0564, 0x3bfe7c, 0x3bf794, 0x3bf0ad, 0x3be9c7, 0x3be2e1, 0x3bdbfd, 0x3bd519, 0x3bce36, 0x3bc753, 0x3bc072,
+ 0x3bb991, 0x3bb2b1, 0x3babd2, 0x3ba4f4, 0x3b9e17, 0x3b973a, 0x3b905e, 0x3b8983, 0x3b82a8, 0x3b7bcf, 0x3b74f6, 0x3b6e1e, 0x3b6747, 0x3b6070, 0x3b599b,
+ 0x3b52c6, 0x3b4bf2, 0x3b451f, 0x3b3e4c, 0x3b377b, 0x3b30aa, 0x3b29da, 0x3b230a, 0x3b1c3c, 0x3b156e, 0x3b0ea1, 0x3b07d5, 0x3b0109, 0x3afa3f, 0x3af375,
+ 0x3aecac, 0x3ae5e3, 0x3adf1c, 0x3ad855, 0x3ad18f, 0x3acaca, 0x3ac406, 0x3abd42, 0x3ab67f, 0x3aafbd, 0x3aa8fc, 0x3aa23b, 0x3a9b7c, 0x3a94bd, 0x3a8dfe,
+ 0x3a8741, 0x3a8084, 0x3a79c9, 0x3a730d, 0x3a6c53, 0x3a659a, 0x3a5ee1, 0x3a5829, 0x3a5172, 0x3a4abb, 0x3a4406, 0x3a3d51, 0x3a369d, 0x3a2fe9, 0x3a2937,
+ 0x3a2285, 0x3a1bd4, 0x3a1524, 0x3a0e74, 0x3a07c5, 0x3a0118, 0x39fa6a, 0x39f3be, 0x39ed12, 0x39e667, 0x39dfbd, 0x39d914, 0x39d26b, 0x39cbc4, 0x39c51d,
+ 0x39be76, 0x39b7d1, 0x39b12c, 0x39aa88, 0x39a3e5, 0x399d42, 0x3996a1, 0x399000, 0x398960, 0x3982c0, 0x397c22, 0x397584, 0x396ee7, 0x39684a, 0x3961af,
+ 0x395b14, 0x39547a, 0x394de0, 0x394748, 0x3940b0, 0x393a19, 0x393383, 0x392ced, 0x392658, 0x391fc4, 0x391931, 0x39129f, 0x390c0d, 0x39057c, 0x38feec,
+ 0x38f85c, 0x38f1ce, 0x38eb40, 0x38e4b2, 0x38de26, 0x38d79a, 0x38d10f, 0x38ca85, 0x38c3fc, 0x38bd73, 0x38b6eb, 0x38b064, 0x38a9de, 0x38a358, 0x389cd3,
+ 0x38964f, 0x388fcb, 0x388949, 0x3882c7, 0x387c46, 0x3875c5, 0x386f45, 0x3868c7, 0x386248, 0x385bcb, 0x38554e, 0x384ed2, 0x384857, 0x3841dd, 0x383b63,
+ 0x3834ea, 0x382e72, 0x3827fa, 0x382184, 0x381b0e, 0x381498, 0x380e24, 0x3807b0, 0x38013d, 0x37facb, 0x37f459, 0x37ede9, 0x37e778, 0x37e109, 0x37da9b,
+ 0x37d42d, 0x37cdc0, 0x37c753, 0x37c0e8, 0x37ba7d, 0x37b413, 0x37ada9, 0x37a741, 0x37a0d9, 0x379a72, 0x37940b, 0x378da6, 0x378741, 0x3780dc, 0x377a79,
+ 0x377416, 0x376db4, 0x376753, 0x3760f2, 0x375a93, 0x375433, 0x374dd5, 0x374777, 0x37411b, 0x373abe, 0x373463, 0x372e08, 0x3727ae, 0x372155, 0x371afd,
+ 0x3714a5, 0x370e4e, 0x3707f8, 0x3701a2, 0x36fb4d, 0x36f4f9, 0x36eea6, 0x36e853, 0x36e201, 0x36dbb0, 0x36d55f, 0x36cf10, 0x36c8c1, 0x36c272, 0x36bc25,
+ 0x36b5d8, 0x36af8c, 0x36a940, 0x36a2f6, 0x369cac, 0x369663, 0x36901a, 0x3689d2, 0x36838b, 0x367d45, 0x3676ff, 0x3670ba, 0x366a76, 0x366433, 0x365df0,
+ 0x3657ae, 0x36516d, 0x364b2c, 0x3644ec, 0x363ead, 0x36386f, 0x363231, 0x362bf4, 0x3625b8, 0x361f7c, 0x361942, 0x361308, 0x360cce, 0x360695, 0x36005e,
+ 0x35fa26
+};
+
+#define HDCD_PROCESS_STEREO_DEFAULT 1
+#define HDCD_MAX_CHANNELS 2
+
+/** convert to float from 4-bit (3.1) fixed-point
+ * the always-negative value is stored positive, so make it negative */
+#define GAINTOFLOAT(g) (g) ? -(float)(g>>1) - ((g & 1) ? 0.5 : 0.0) : 0.0
+
+/** apply gain, 11-bit (3.8) fixed point,
+ * always negative but stored positive. */
+#define APPLY_GAIN(s,g) do{int64_t s64 = s; s64 *= gaintab[g]; s = (int32_t)(s64 >> 23); }while(0);
+
+/** tone generator: sample_number, frequency, sample_rate, amplitude */
+#define TONEGEN16(sn, f, sr, a) (int16_t)(sin((6.28318530718 * (sn) * (f)) /(sr)) * (a) * 0x7fff)
+
+typedef struct {
+ uint64_t window;
+ unsigned char readahead;
+
+ /** arg is set when a packet prefix is found.
+ * control is the active control code, where
+ * bit 0-3: target_gain, 4-bit (3.1) fixed-point value
+ * bit 4 : peak_extend
+ * bit 5 : transient_filter
+ * bit 6,7: always zero */
+ uint8_t arg, control;
+ unsigned int sustain, sustain_reset; /**< code detect timer */
+
+ int running_gain; /**< 11-bit (3.8) fixed point, extended from target_gain */
+
+ /* counters */
+ int code_counterA; /**< 8-bit format packet */
+ int code_counterA_almost; /**< looks like an A code, but a bit expected to be 0 is 1 */
+ int code_counterB; /**< 16-bit format packet, 8-bit code, 8-bit XOR of code */
+ int code_counterB_checkfails; /**< looks like a B code, but doesn't pass the XOR check */
+ int code_counterC; /**< packet prefix was found, expect a code */
+ int code_counterC_unmatched; /**< told to look for a code, but didn't find one */
+ int count_peak_extend; /**< valid packets where peak_extend was enabled */
+ int count_transient_filter; /**< valid packets where filter was detected */
+ /** target_gain is a 4-bit (3.1) fixed-point value, always
+ * negative, but stored positive.
+ * The 16 possible values range from -7.5 to 0.0 dB in
+ * steps of 0.5, but no value below -6.0 dB should appear. */
+ int gain_counts[16];
+ int max_gain;
+ /** occurrences of code detect timer expiring without detecting
+ * a code. -1 for timer never set. */
+ int count_sustain_expired;
+
+ int rate; /**< sampling rate */
+ int _ana_snb; /**< used in the analyze mode tone generator */
+} hdcd_state;
+
+typedef enum {
+ HDCD_PE_NEVER = 0, /**< All valid packets have PE set to off */
+ HDCD_PE_INTERMITTENT = 1, /**< Some valid packets have PE set to on */
+ HDCD_PE_PERMANENT = 2, /**< All valid packets have PE set to on */
+} hdcd_pe;
+
+static const char * const pe_str[] = {
+ "never enabled",
+ "enabled intermittently",
+ "enabled permanently"
+};
+
+typedef enum {
+ HDCD_NONE = 0, /**< HDCD packets do not (yet) appear */
+ HDCD_NO_EFFECT = 1, /**< HDCD packets appear, but all control codes are NOP */
+ HDCD_EFFECTUAL = 2, /**< HDCD packets appear, and change the output in some way */
+} hdcd_dv;
+
+typedef enum {
+ HDCD_PVER_NONE = 0, /**< No packets (yet) discovered */
+ HDCD_PVER_A = 1, /**< Packets of type A (8-bit control) discovered */
+ HDCD_PVER_B = 2, /**< Packets of type B (8-bit control, 8-bit XOR) discovered */
+ HDCD_PVER_MIX = 3, /**< Packets of type A and B discovered, most likely an encoding error */
+} hdcd_pf;
+
+static const char * const pf_str[] = {
+ "?", "A", "B", "A+B"
+};
+
+typedef struct {
+ hdcd_dv hdcd_detected;
+ hdcd_pf packet_type;
+ int total_packets; /**< valid packets */
+ int errors; /**< detectable errors */
+ hdcd_pe peak_extend;
+ int uses_transient_filter;
+ float max_gain_adjustment; /**< in dB, expected in the range -7.5 to 0.0 */
+ int cdt_expirations; /**< -1 for never set, 0 for set but never expired */
+
+ int _active_count; /**< used internally */
+} hdcd_detection_data;
+
+typedef enum {
+ HDCD_ANA_OFF = 0,
+ HDCD_ANA_LLE = 1,
+ HDCD_ANA_PE = 2,
+ HDCD_ANA_CDT = 3,
+ HDCD_ANA_TGM = 4,
+ HDCD_ANA_TOP = 5, /**< used in max value of AVOption */
+} hdcd_ana_mode;
+
+/** analyze mode descriptions: macro for AVOption definitions, array of const char for mapping mode to string */
+#define HDCD_ANA_OFF_DESC "disabled"
+#define HDCD_ANA_LLE_DESC "gain adjustment level at each sample"
+#define HDCD_ANA_PE_DESC "samples where peak extend occurs"
+#define HDCD_ANA_CDT_DESC "samples where the code detect timer is active"
+#define HDCD_ANA_TGM_DESC "samples where the target gain does not match between channels"
+static const char * const ana_mode_str[] = {
+ HDCD_ANA_OFF_DESC,
+ HDCD_ANA_LLE_DESC,
+ HDCD_ANA_PE_DESC,
+ HDCD_ANA_CDT_DESC,
+ HDCD_ANA_TGM_DESC,
+};
typedef struct HDCDContext {
const AVClass *class;
-
- hdcd_simple *shdcd;
+ hdcd_state state[HDCD_MAX_CHANNELS];
/* AVOption members */
+ /** use hdcd_*_stereo() functions to process both channels together.
+ * -af hdcd=process_stereo=0 for off
+ * -af hdcd=process_stereo=1 for on
+ * default is HDCD_PROCESS_STEREO_DEFAULT */
+ int process_stereo;
+ /** always extend peaks above -3dBFS even if PE isn't signaled
+ * -af hdcd=force_pe=0 for off
+ * -af hdcd=force_pe=1 for on
+ * default is off */
+ int force_pe;
+
/** analyze mode replaces the audio with a solid tone and adjusts
* the amplitude to signal some specific aspect of the decoding
* process. See docs or HDCD_ANA_* defines. */
int analyze_mode;
+
+ int cdt_ms; /**< code detect timer period in ms */
+
+ int disable_autoconvert; /**< disable any format conversion or resampling in the filter graph */
+
+ int bits_per_sample; /**< bits per sample 16, 20, or 24 */
/* end AVOption members */
+
+ /** config_input() and config_output() scan links for any resampling
+ * or format changes. If found, warnings are issued and bad_config
+ * is set. */
+ int bad_config;
+
+ AVFilterContext *fctx; /**< filter context for logging errors */
+ int sample_count; /**< used in error logging */
+ int val_target_gain; /**< last matching target_gain in both channels */
+
+ /* User information/stats */
+ hdcd_detection_data detect;
} HDCDContext;
#define OFFSET(x) offsetof(HDCDContext, x)
-#define A AV_OPT_FLAG_AUDIO_PARAM
-#define HDCD_ANA_MAX 6
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption hdcd_options[] = {
+ { "disable_autoconvert", "Disable any format conversion or resampling in the filter graph.",
+ OFFSET(disable_autoconvert), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, A },
+ { "process_stereo", "Process stereo channels together. Only apply target_gain when both channels match.",
+ OFFSET(process_stereo), AV_OPT_TYPE_BOOL, { .i64 = HDCD_PROCESS_STEREO_DEFAULT }, 0, 1, A },
+ { "cdt_ms", "Code detect timer period in ms.",
+ OFFSET(cdt_ms), AV_OPT_TYPE_INT, { .i64 = 2000 }, 100, 60000, A },
+ { "force_pe", "Always extend peaks above -3dBFS even when PE is not signaled.",
+ OFFSET(force_pe), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, A },
{ "analyze_mode", "Replace audio with solid tone and signal some processing aspect in the amplitude.",
- OFFSET(analyze_mode), AV_OPT_TYPE_INT, { .i64=HDCD_ANA_OFF }, 0, HDCD_ANA_MAX, A, "analyze_mode"},
- { "off", HDCD_ANA_OFF_DESC, 0, AV_OPT_TYPE_CONST, { .i64 = HDCD_ANA_OFF}, 0, 0, A, "analyze_mode" },
- { "lle", HDCD_ANA_LLE_DESC, 0, AV_OPT_TYPE_CONST, { .i64 = HDCD_ANA_LLE}, 0, 0, A, "analyze_mode" },
- { "pe", HDCD_ANA_PE_DESC, 0, AV_OPT_TYPE_CONST, { .i64 = HDCD_ANA_PE}, 0, 0, A, "analyze_mode" },
- { "cdt", HDCD_ANA_CDT_DESC, 0, AV_OPT_TYPE_CONST, { .i64 = HDCD_ANA_CDT}, 0, 0, A, "analyze_mode" },
- { "tgm", HDCD_ANA_TGM_DESC, 0, AV_OPT_TYPE_CONST, { .i64 = HDCD_ANA_TGM}, 0, 0, A, "analyze_mode" },
- { "pel", HDCD_ANA_PEL_DESC, 0, AV_OPT_TYPE_CONST, { .i64 = HDCD_ANA_PEL}, 0, 0, A, "analyze_mode" },
- { "ltgm", HDCD_ANA_LTGM_DESC, 0, AV_OPT_TYPE_CONST, { .i64 = HDCD_ANA_LTGM}, 0, 0, A, "analyze_mode" },
- { NULL }
+ OFFSET(analyze_mode), AV_OPT_TYPE_INT, { .i64=HDCD_ANA_OFF }, 0, HDCD_ANA_TOP-1, A, "analyze_mode"},
+ { "off", HDCD_ANA_OFF_DESC, 0, AV_OPT_TYPE_CONST, {.i64=HDCD_ANA_OFF}, 0, 0, A, "analyze_mode" },
+ { "lle", HDCD_ANA_LLE_DESC, 0, AV_OPT_TYPE_CONST, {.i64=HDCD_ANA_LLE}, 0, 0, A, "analyze_mode" },
+ { "pe", HDCD_ANA_PE_DESC, 0, AV_OPT_TYPE_CONST, {.i64=HDCD_ANA_PE}, 0, 0, A, "analyze_mode" },
+ { "cdt", HDCD_ANA_CDT_DESC, 0, AV_OPT_TYPE_CONST, {.i64=HDCD_ANA_CDT}, 0, 0, A, "analyze_mode" },
+ { "tgm", HDCD_ANA_TGM_DESC, 0, AV_OPT_TYPE_CONST, {.i64=HDCD_ANA_TGM}, 0, 0, A, "analyze_mode" },
+ { "bits_per_sample", "Valid bits per sample (location of the true LSB).",
+ OFFSET(bits_per_sample), AV_OPT_TYPE_INT, { .i64=16 }, 16, 24, A, "bits_per_sample"},
+ { "16", "16-bit (in s32 or s16)", 0, AV_OPT_TYPE_CONST, {.i64=16}, 0, 0, A, "bits_per_sample" },
+ { "20", "20-bit (in s32)", 0, AV_OPT_TYPE_CONST, {.i64=20}, 0, 0, A, "bits_per_sample" },
+ { "24", "24-bit (in s32)", 0, AV_OPT_TYPE_CONST, {.i64=24}, 0, 0, A, "bits_per_sample" },
+ {NULL}
};
-static const AVClass hdcd_class = {
- .class_name = "HDCD filter",
- .item_name = av_default_item_name,
- .option = hdcd_options,
- .version = LIBAVFILTER_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(hdcd);
+
+static void hdcd_reset(hdcd_state *state, unsigned rate, unsigned cdt_ms)
+{
+ int i;
+ uint64_t sustain_reset = (uint64_t)cdt_ms * rate / 1000;
+
+ state->window = 0;
+ state->readahead = 32;
+ state->arg = 0;
+ state->control = 0;
+ state->running_gain = 0;
+ state->sustain_reset = sustain_reset;
+ state->sustain = 0;
+
+ state->code_counterA = 0;
+ state->code_counterA_almost = 0;
+ state->code_counterB = 0;
+ state->code_counterB_checkfails = 0;
+ state->code_counterC = 0;
+ state->code_counterC_unmatched = 0;
+ state->count_peak_extend = 0;
+ state->count_transient_filter = 0;
+ for(i = 0; i < 16; i++) state->gain_counts[i] = 0;
+ state->max_gain = 0;
+ state->count_sustain_expired = -1;
+
+ state->rate = rate;
+ state->_ana_snb = 0;
+}
+
+static int hdcd_integrate(HDCDContext *ctx, hdcd_state *states, int channels, int *flag, const int32_t *samples, int count, int stride)
+{
+ uint32_t bits[HDCD_MAX_CHANNELS];
+ int result = count;
+ int i, j, f;
+ *flag = 0;
+
+ memset(bits, 0, sizeof(bits));
+ if (stride < channels) stride = channels;
+
+ for (i = 0; i < channels; i++)
+ result = FFMIN(states[i].readahead, result);
+
+ for (j = result - 1; j >= 0; j--) {
+ for (i = 0; i < channels; i++)
+ bits[i] |= (*(samples++) & 1) << j;
+ samples += stride - channels;
+ }
+
+ for (i = 0; i < channels; i++) {
+ states[i].window = (states[i].window << result) | bits[i];
+ states[i].readahead -= result;
+
+ if (states[i].readahead == 0) {
+ uint32_t wbits = (uint32_t)(states[i].window ^ states[i].window >> 5 ^ states[i].window >> 23);
+ if (states[i].arg) {
+ f = 0;
+ if ((wbits & 0x0fa00500) == 0x0fa00500) {
+ /* A: 8-bit code 0x7e0fa005[..] */
+ if ((wbits & 0xc8) == 0) {
+ /* [..pt gggg]
+ * 0x0fa005[..] -> 0b[00.. 0...], gain part doubled (shifted left 1) */
+ states[i].control = (wbits & 255) + (wbits & 7);
+ f = 1;
+ states[i].code_counterA++;
+ } else {
+ /* one of bits 3, 6, or 7 was not 0 */
+ states[i].code_counterA_almost++;
+ av_log(ctx->fctx, AV_LOG_VERBOSE,
+ "hdcd error: Control A almost: 0x%02x near %d\n", wbits & 0xff, ctx->sample_count);
+ }
+ } else if ((wbits & 0xa0060000) == 0xa0060000) {
+ /* B: 8-bit code, 8-bit XOR check, 0x7e0fa006[....] */
+ if (((wbits ^ (~wbits >> 8 & 255)) & 0xffff00ff) == 0xa0060000) {
+ /* check: [..pt gggg ~(..pt gggg)]
+ * 0xa006[....] -> 0b[.... .... .... .... ] */
+ states[i].control = wbits >> 8 & 255;
+ f = 1;
+ states[i].code_counterB++;
+ } else {
+ /* XOR check failed */
+ states[i].code_counterB_checkfails++;
+ av_log(ctx->fctx, AV_LOG_VERBOSE,
+ "hdcd error: Control B check failed: 0x%04x (0x%02x vs 0x%02x) near %d\n", wbits & 0xffff, (wbits & 0xff00) >> 8, ~wbits & 0xff, ctx->sample_count);
+ }
+ }
+ if (f) {
+ *flag |= (1<<i);
+ /* update counters */
+ if (states[i].control & 16) states[i].count_peak_extend++;
+ if (states[i].control & 32) states[i].count_transient_filter++;
+ states[i].gain_counts[states[i].control & 15]++;
+ states[i].max_gain = FFMAX(states[i].max_gain, (states[i].control & 15));
+ }
+ states[i].arg = 0;
+ }
+ if (wbits == 0x7e0fa005 || wbits == 0x7e0fa006) {
+ /* 0x7e0fa00[.]-> [0b0101 or 0b0110] */
+ states[i].readahead = (wbits & 3) * 8;
+ states[i].arg = 1;
+ states[i].code_counterC++;
+ } else {
+ if (wbits)
+ states[i].readahead = readaheadtab[wbits & 0xff];
+ else
+ states[i].readahead = 31; /* ffwd over digisilence */
+ }
+ }
+ }
+ return result;
+}
+
+static int hdcd_scan(HDCDContext *ctx, hdcd_state *states, int channels, const int32_t *samples, int max, int stride)
+{
+ int result;
+ int i;
+ int cdt_active[HDCD_MAX_CHANNELS];
+ memset(cdt_active, 0, sizeof(cdt_active));
+
+ if (stride < channels) stride = channels;
+
+ /* code detect timers for each channel */
+ for(i = 0; i < channels; i++) {
+ if (states[i].sustain > 0) {
+ cdt_active[i] = 1;
+ if (states[i].sustain <= (unsigned)max) {
+ states[i].control = 0;
+ max = states[i].sustain;
+ }
+ states[i].sustain -= max;
+ }
+ }
+
+ result = 0;
+ while (result < max) {
+ int flag;
+ int consumed = hdcd_integrate(ctx, states, channels, &flag, samples, max - result, stride);
+ result += consumed;
+ if (flag) {
+ /* reset timer if code detected in a channel */
+ for(i = 0; i < channels; i++) {
+ if (flag & (1<<i)) {
+ states[i].sustain = states[i].sustain_reset;
+ /* if this is the first reset then change
+ * from never set, to never expired */
+ if (states[i].count_sustain_expired == -1)
+ states[i].count_sustain_expired = 0;
+ }
+ }
+ break;
+ }
+ samples += consumed * stride;
+ }
+
+ for(i = 0; i < channels; i++) {
+ /* code detect timer expired */
+ if (cdt_active[i] && states[i].sustain == 0)
+ states[i].count_sustain_expired++;
+ }
+
+ return result;
+}
+
+/** replace audio with solid tone, but save LSBs */
+static void hdcd_analyze_prepare(hdcd_state *state, int32_t *samples, int count, int stride) {
+ int n, f = 300;
+ int so = state->rate / f;
+ for (n = 0; n < count * stride; n += stride) {
+ /* in analyze mode, the audio is replaced by a solid tone, and
+ * amplitude is changed to signal when the specified feature is
+ * used.
+ * bit 0: HDCD signal preserved
+ * bit 1: Original sample was above PE level */
+ int32_t save = (abs(samples[n]) - PEAK_EXT_LEVEL >= 0) ? 2 : 0; /* above PE level */
+ save |= samples[n] & 1; /* save LSB for HDCD packets */
+ samples[n] = TONEGEN16(state->_ana_snb, f, state->rate, 0.1);
+ samples[n] = (samples[n] | 3) ^ ((~save) & 3);
+ if (++state->_ana_snb > so) state->_ana_snb = 0;
+ }
+}
+
+/** encode a value in the given sample by adjusting the amplitude */
+static int32_t hdcd_analyze_gen(int32_t sample, unsigned int v, unsigned int maxv)
+{
+ static const int r = 18, m = 1024;
+ int64_t s64 = sample;
+ v = m + (v * r * m / maxv);
+ return (int32_t)(s64 * v / m);
+}
+
+/** behaves like hdcd_envelope(), but encodes processing information in
+ * a way that is audible (and visible in an audio editor) to aid analysis. */
+static int hdcd_analyze(int32_t *samples, int count, int stride, int gain, int target_gain, int extend, int mode, int cdt_active, int tg_mismatch)
+{
+ static const int maxg = 0xf << 7;
+ int i;
+ int32_t *samples_end = samples + stride * count;
+
+ for (i = 0; i < count; i++) {
+ samples[i * stride] <<= 15;
+ if (mode == HDCD_ANA_PE) {
+ int pel = (samples[i * stride] >> 16) & 1;
+ int32_t sample = samples[i * stride];
+ samples[i * stride] = hdcd_analyze_gen(sample, !!(pel && extend), 1);
+ } else if (mode == HDCD_ANA_TGM && tg_mismatch > 0)
+ samples[i * stride] = hdcd_analyze_gen(samples[i * stride], 1, 1);
+ else if (mode == HDCD_ANA_CDT && cdt_active)
+ samples[i * stride] = hdcd_analyze_gen(samples[i * stride], 1, 1);
+ }
+
+ if (gain <= target_gain) {
+ int len = FFMIN(count, target_gain - gain);
+ /* attenuate slowly */
+ for (i = 0; i < len; i++) {
+ ++gain;
+ if (mode == HDCD_ANA_LLE)
+ *samples = hdcd_analyze_gen(*samples, gain, maxg);
+ samples += stride;
+ }
+ count -= len;
+ } else {
+ int len = FFMIN(count, (gain - target_gain) >> 3);
+ /* amplify quickly */
+ for (i = 0; i < len; i++) {
+ gain -= 8;
+ if (mode == HDCD_ANA_LLE)
+ *samples = hdcd_analyze_gen(*samples, gain, maxg);
+ samples += stride;
+ }
+ if (gain - 8 < target_gain)
+ gain = target_gain;
+ count -= len;
+ }
+
+ /* hold a steady level */
+ if (gain == 0) {
+ if (count > 0)
+ samples += count * stride;
+ } else {
+ while (--count >= 0) {
+ if (mode == HDCD_ANA_LLE)
+ *samples = hdcd_analyze_gen(*samples, gain, maxg);
+ samples += stride;
+ }
+ }
+
+ av_assert0(samples == samples_end);
+
+ return gain;
+}
+
+/** apply HDCD decoding parameters to a series of samples */
+static int hdcd_envelope(int32_t *samples, int count, int stride, int vbits, int gain, int target_gain, int extend)
+{
+ static const int max_asample = sizeof(peaktab) / sizeof(peaktab[0]) - 1;
+ int32_t *samples_end = samples + stride * count;
+ int i;
+
+ int pe_level = PEAK_EXT_LEVEL, shft = 15;
+ if (vbits != 16) {
+ pe_level = (1 << (vbits - 1)) - (0x8000 - PEAK_EXT_LEVEL);
+ shft = 32 - vbits - 1;
+ }
+ av_assert0(PEAK_EXT_LEVEL + max_asample == 0x8000);
+
+ if (extend) {
+ for (i = 0; i < count; i++) {
+ int32_t sample = samples[i * stride];
+ int32_t asample = abs(sample) - pe_level;
+ if (asample >= 0) {
+ av_assert0(asample <= max_asample);
+ sample = sample >= 0 ? peaktab[asample] : -peaktab[asample];
+ } else
+ sample <<= shft;
+
+ samples[i * stride] = sample;
+ }
+ } else {
+ for (i = 0; i < count; i++)
+ samples[i * stride] <<= shft;
+ }
+
+ if (gain <= target_gain) {
+ int len = FFMIN(count, target_gain - gain);
+ /* attenuate slowly */
+ for (i = 0; i < len; i++) {
+ ++gain;
+ APPLY_GAIN(*samples, gain);
+ samples += stride;
+ }
+ count -= len;
+ } else {
+ int len = FFMIN(count, (gain - target_gain) >> 3);
+ /* amplify quickly */
+ for (i = 0; i < len; i++) {
+ gain -= 8;
+ APPLY_GAIN(*samples, gain);
+ samples += stride;
+ }
+ if (gain - 8 < target_gain)
+ gain = target_gain;
+ count -= len;
+ }
+
+ /* hold a steady level */
+ if (gain == 0) {
+ if (count > 0)
+ samples += count * stride;
+ } else {
+ while (--count >= 0) {
+ APPLY_GAIN(*samples, gain);
+ samples += stride;
+ }
+ }
+
+ av_assert0(samples == samples_end);
+
+ return gain;
+}
+
+/** extract fields from control code */
+static void hdcd_control(HDCDContext *ctx, hdcd_state *state, int *peak_extend, int *target_gain)
+{
+ *peak_extend = (ctx->force_pe || state->control & 16);
+ *target_gain = (state->control & 15) << 7;
+}
+
+typedef enum {
+ HDCD_OK=0,
+ HDCD_TG_MISMATCH
+} hdcd_control_result;
+
+static hdcd_control_result hdcd_control_stereo(HDCDContext *ctx, int *peak_extend0, int *peak_extend1)
+{
+ int target_gain[2];
+ hdcd_control(ctx, &ctx->state[0], peak_extend0, &target_gain[0]);
+ hdcd_control(ctx, &ctx->state[1], peak_extend1, &target_gain[1]);
+ if (target_gain[0] == target_gain[1])
+ ctx->val_target_gain = target_gain[0];
+ else {
+ av_log(ctx->fctx, AV_LOG_VERBOSE,
+ "hdcd error: Unmatched target_gain near %d: tg0: %0.1f, tg1: %0.1f, lvg: %0.1f\n",
+ ctx->sample_count,
+ GAINTOFLOAT(target_gain[0] >>7),
+ GAINTOFLOAT(target_gain[1] >>7),
+ GAINTOFLOAT(ctx->val_target_gain >>7) );
+ return HDCD_TG_MISMATCH;
+ }
+ return HDCD_OK;
+}
+
+static void hdcd_process(HDCDContext *ctx, hdcd_state *state, int32_t *samples, int count, int stride)
+{
+ int32_t *samples_end = samples + count * stride;
+ int gain = state->running_gain;
+ int peak_extend, target_gain;
+ int lead = 0;
+
+ if (ctx->analyze_mode)
+ hdcd_analyze_prepare(state, samples, count, stride);
+
+ hdcd_control(ctx, state, &peak_extend, &target_gain);
+ while (count > lead) {
+ int envelope_run;
+ int run;
+
+ av_assert0(samples + lead * stride + stride * (count - lead) <= samples_end);
+ run = hdcd_scan(ctx, state, 1, samples + lead * stride, count - lead, 0) + lead;
+ envelope_run = run - 1;
+
+ av_assert0(samples + envelope_run * stride <= samples_end);
+ if (ctx->analyze_mode)
+ gain = hdcd_analyze(samples, envelope_run, stride, gain, target_gain, peak_extend, ctx->analyze_mode, state->sustain, -1);
+ else
+ gain = hdcd_envelope(samples, envelope_run, stride, ctx->bits_per_sample, gain, target_gain, peak_extend);
+
+ samples += envelope_run * stride;
+ count -= envelope_run;
+ lead = run - envelope_run;
+ hdcd_control(ctx, state, &peak_extend, &target_gain);
+ }
+ if (lead > 0) {
+ av_assert0(samples + lead * stride <= samples_end);
+ if (ctx->analyze_mode)
+ gain = hdcd_analyze(samples, lead, stride, gain, target_gain, peak_extend, ctx->analyze_mode, state->sustain, -1);
+ else
+ gain = hdcd_envelope(samples, lead, stride, ctx->bits_per_sample, gain, target_gain, peak_extend);
+ }
+
+ state->running_gain = gain;
+}
+
+static void hdcd_process_stereo(HDCDContext *ctx, int32_t *samples, int count)
+{
+ const int stride = 2;
+ int32_t *samples_end = samples + count * stride;
+ int gain[2] = {ctx->state[0].running_gain, ctx->state[1].running_gain};
+ int peak_extend[2];
+ int lead = 0;
+ int ctlret;
+
+ if (ctx->analyze_mode) {
+ hdcd_analyze_prepare(&ctx->state[0], samples, count, stride);
+ hdcd_analyze_prepare(&ctx->state[1], samples + 1, count, stride);
+ }
+
+ ctlret = hdcd_control_stereo(ctx, &peak_extend[0], &peak_extend[1]);
+ while (count > lead) {
+ int envelope_run, run;
+
+ av_assert0(samples + lead * stride + stride * (count - lead) <= samples_end);
+ run = hdcd_scan(ctx, ctx->state, 2, samples + lead * stride, count - lead, 0) + lead;
+ envelope_run = run - 1;
+
+ av_assert0(samples + envelope_run * stride <= samples_end);
+
+ if (ctx->analyze_mode) {
+ gain[0] = hdcd_analyze(samples, envelope_run, stride, gain[0], ctx->val_target_gain, peak_extend[0],
+ ctx->analyze_mode,
+ ctx->state[0].sustain,
+ (ctlret == HDCD_TG_MISMATCH) );
+ gain[1] = hdcd_analyze(samples + 1, envelope_run, stride, gain[1], ctx->val_target_gain, peak_extend[1],
+ ctx->analyze_mode,
+ ctx->state[1].sustain,
+ (ctlret == HDCD_TG_MISMATCH) );
+ } else {
+ gain[0] = hdcd_envelope(samples, envelope_run, stride, ctx->bits_per_sample, gain[0], ctx->val_target_gain, peak_extend[0]);
+ gain[1] = hdcd_envelope(samples + 1, envelope_run, stride, ctx->bits_per_sample, gain[1], ctx->val_target_gain, peak_extend[1]);
+ }
+
+ samples += envelope_run * stride;
+ count -= envelope_run;
+ lead = run - envelope_run;
+
+ ctlret = hdcd_control_stereo(ctx, &peak_extend[0], &peak_extend[1]);
+ }
+ if (lead > 0) {
+ av_assert0(samples + lead * stride <= samples_end);
+ if (ctx->analyze_mode) {
+ gain[0] = hdcd_analyze(samples, lead, stride, gain[0], ctx->val_target_gain, peak_extend[0],
+ ctx->analyze_mode,
+ ctx->state[0].sustain,
+ (ctlret == HDCD_TG_MISMATCH) );
+ gain[1] = hdcd_analyze(samples + 1, lead, stride, gain[1], ctx->val_target_gain, peak_extend[1],
+ ctx->analyze_mode,
+ ctx->state[1].sustain,
+ (ctlret == HDCD_TG_MISMATCH) );
+ } else {
+ gain[0] = hdcd_envelope(samples, lead, stride, ctx->bits_per_sample, gain[0], ctx->val_target_gain, peak_extend[0]);
+ gain[1] = hdcd_envelope(samples + 1, lead, stride, ctx->bits_per_sample, gain[1], ctx->val_target_gain, peak_extend[1]);
+ }
+ }
+
+ ctx->state[0].running_gain = gain[0];
+ ctx->state[1].running_gain = gain[1];
+}
+
+static void hdcd_detect_reset(hdcd_detection_data *detect) {
+ detect->hdcd_detected = HDCD_NONE;
+ detect->packet_type = HDCD_PVER_NONE;
+ detect->total_packets = 0;
+ detect->errors = 0;
+ detect->peak_extend = HDCD_PE_NEVER;
+ detect->uses_transient_filter = 0;
+ detect->max_gain_adjustment = 0.0;
+ detect->cdt_expirations = -1;
+ detect->_active_count = 0;
+}
+
+static void hdcd_detect_start(hdcd_detection_data *detect) {
+ detect->errors = 0; /* re-sum every pass */
+ detect->total_packets = 0;
+ detect->_active_count = 0; /* will need to match channels at hdcd_detect_end() */
+ detect->cdt_expirations = -1;
+}
+
+static void hdcd_detect_onech(hdcd_state *state, hdcd_detection_data *detect) {
+ hdcd_pe pe = HDCD_PE_NEVER;
+ detect->uses_transient_filter |= !!(state->count_transient_filter);
+ detect->total_packets += state->code_counterA + state->code_counterB;
+ if (state->code_counterA) detect->packet_type |= HDCD_PVER_A;
+ if (state->code_counterB) detect->packet_type |= HDCD_PVER_B;
+ if (state->count_peak_extend) {
+ /* if every valid packet has used PE, call it permanent */
+ if (state->count_peak_extend == state->code_counterA + state->code_counterB)
+ pe = HDCD_PE_PERMANENT;
+ else
+ pe = HDCD_PE_INTERMITTENT;
+ if (detect->peak_extend != HDCD_PE_INTERMITTENT)
+ detect->peak_extend = pe;
+ }
+ detect->max_gain_adjustment = FFMIN(detect->max_gain_adjustment, GAINTOFLOAT(state->max_gain));
+ detect->errors += state->code_counterA_almost
+ + state->code_counterB_checkfails
+ + state->code_counterC_unmatched;
+ if (state->sustain) detect->_active_count++;
+ if (state->count_sustain_expired >= 0) {
+ if (detect->cdt_expirations == -1) detect->cdt_expirations = 0;
+ detect->cdt_expirations += state->count_sustain_expired;
+ }
+}
+
+static void hdcd_detect_end(hdcd_detection_data *detect, int channels) {
+ /* HDCD is detected if a valid packet is active in all
+ * channels at the same time. */
+ if (detect->_active_count == channels) {
+ if (detect->max_gain_adjustment || detect->peak_extend)
+ detect->hdcd_detected = HDCD_EFFECTUAL;
+ else
+ detect->hdcd_detected = HDCD_NO_EFFECT;
+ }
+}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
@@ -74,9 +1528,10 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
const int16_t *in_data;
+ const int32_t *in_data32;
int32_t *out_data;
- int n, result;
- int channel_count = av_get_channel_layout_nb_channels(in->channel_layout);
+ int n, c, result;
+ int a = 32 - s->bits_per_sample;
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
@@ -89,13 +1544,53 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
av_frame_free(&in);
return result;
}
+ out->format = outlink->format; // is this needed?
- in_data = (int16_t *)in->data[0];
- out_data = (int32_t *)out->data[0];
- for (n = 0; n < in->nb_samples * channel_count; n++)
- out_data[n] = in_data[n];
+ out_data = (int32_t*)out->data[0];
+ switch (inlink->format) {
+ case AV_SAMPLE_FMT_S16P:
+ for (n = 0; n < in->nb_samples; n++)
+ for (c = 0; c < in->channels; c++) {
+ in_data = (int16_t*)in->extended_data[c];
+ out_data[(n * in->channels) + c] = in_data[n];
+ }
+ break;
+ case AV_SAMPLE_FMT_S16:
+ in_data = (int16_t*)in->data[0];
+ for (n = 0; n < in->nb_samples * in->channels; n++)
+ out_data[n] = in_data[n];
+ break;
- hdcd_process(s->shdcd, out_data, in->nb_samples);
+ case AV_SAMPLE_FMT_S32P:
+ for (n = 0; n < in->nb_samples; n++)
+ for (c = 0; c < in->channels; c++) {
+ in_data32 = (int32_t*)in->extended_data[c];
+ out_data[(n * in->channels) + c] = in_data32[n] >> a;
+ }
+ break;
+ case AV_SAMPLE_FMT_S32:
+ in_data32 = (int32_t*)in->data[0];
+ for (n = 0; n < in->nb_samples * in->channels; n++)
+ out_data[n] = in_data32[n] >> a;
+ break;
+ }
+
+ if (s->process_stereo) {
+ hdcd_detect_start(&s->detect);
+ hdcd_process_stereo(s, out_data, in->nb_samples);
+ hdcd_detect_onech(&s->state[0], &s->detect);
+ hdcd_detect_onech(&s->state[1], &s->detect);
+ hdcd_detect_end(&s->detect, 2);
+ } else {
+ hdcd_detect_start(&s->detect);
+ for (c = 0; c < in->channels; c++) {
+ hdcd_process(s, &s->state[c], out_data + c, in->nb_samples, in->channels);
+ hdcd_detect_onech(&s->state[c], &s->detect);
+ }
+ hdcd_detect_end(&s->detect, in->channels);
+ }
+
+ s->sample_count += in->nb_samples * in->channels;
av_frame_free(&in);
return ff_filter_frame(outlink, out);
@@ -103,66 +1598,155 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static int query_formats(AVFilterContext *ctx)
{
- AVFilterFormats *in_formats, *out_formats, *sample_rates = NULL;
+ static const int sample_rates[] = {
+ 44100, 48000,
+ 88200, 96000,
+ 176400, 192000,
+ -1
+ };
+ AVFilterFormats *in_formats;
+ AVFilterFormats *out_formats;
AVFilterChannelLayouts *layouts = NULL;
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
static const enum AVSampleFormat sample_fmts_in[] = {
AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_S32,
+ AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_NONE
};
static const enum AVSampleFormat sample_fmts_out[] = {
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_NONE
};
+ int ret;
- ff_add_channel_layout(&layouts, AV_CH_LAYOUT_STEREO);
+ ret = ff_add_channel_layout(&layouts, AV_CH_LAYOUT_MONO);
+ if (ret < 0)
+ return ret;
+ ret = ff_add_channel_layout(&layouts, AV_CH_LAYOUT_STEREO);
+ if (ret < 0)
+ return ret;
- ff_set_common_channel_layouts(ctx, layouts);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
- in_formats = ff_make_format_list(sample_fmts_in);
- out_formats = ff_make_format_list(sample_fmts_out);
- if (!in_formats || !out_formats)
- return AVERROR(ENOMEM);
+ in_formats = ff_make_format_list(sample_fmts_in);
+ ret = ff_formats_ref(in_formats, &inlink->out_formats);
+ if (ret < 0)
+ return ret;
- ff_formats_ref(in_formats, &inlink->out_formats);
- ff_formats_ref(out_formats, &outlink->in_formats);
+ out_formats = ff_make_format_list(sample_fmts_out);
+ ret = ff_formats_ref(out_formats, &outlink->in_formats);
+ if (ret < 0)
+ return ret;
- ff_add_format(&sample_rates, 44100);
- ff_set_common_samplerates(ctx, sample_rates);
- return 0;
+ return
+ ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates) );
}
static av_cold void uninit(AVFilterContext *ctx)
{
HDCDContext *s = ctx->priv;
- char detect_str[256] = "";
+ int i, j;
- /* log the HDCD decode information */
- hdcd_detect_str(s->shdcd, detect_str, sizeof(detect_str));
- av_log(ctx, AV_LOG_INFO, "%s\n", detect_str);
+ /* dump the state for each channel for AV_LOG_VERBOSE */
+ for (i = 0; i < HDCD_MAX_CHANNELS; i++) {
+ hdcd_state *state = &s->state[i];
+ av_log(ctx, AV_LOG_VERBOSE, "Channel %d: counter A: %d, B: %d, C: %d\n", i,
+ state->code_counterA, state->code_counterB, state->code_counterC);
+ av_log(ctx, AV_LOG_VERBOSE, "Channel %d: pe: %d, tf: %d, almost_A: %d, checkfail_B: %d, unmatched_C: %d, cdt_expired: %d\n", i,
+ state->count_peak_extend,
+ state->count_transient_filter,
+ state->code_counterA_almost,
+ state->code_counterB_checkfails,
+ state->code_counterC_unmatched,
+ state->count_sustain_expired);
+ for (j = 0; j <= state->max_gain; j++) {
+ av_log(ctx, AV_LOG_VERBOSE, "Channel %d: tg %0.1f: %d\n", i, GAINTOFLOAT(j), state->gain_counts[j]);
+ }
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "Packets: type: %s, total: %d\n",
+ pf_str[s->detect.packet_type],
+ s->detect.total_packets);
- hdcd_free(s->shdcd);
+ /* log the HDCD decode information */
+ if (s->detect.hdcd_detected)
+ av_log(ctx, AV_LOG_INFO,
+ "HDCD detected: yes, peak_extend: %s, max_gain_adj: %0.1f dB, transient_filter: %s, detectable errors: %d%s%s\n",
+ pe_str[s->detect.peak_extend],
+ s->detect.max_gain_adjustment,
+ (s->detect.uses_transient_filter) ? "detected" : "not detected",
+ s->detect.errors, (s->detect.errors) ? " (try -v verbose)" : "",
+ (s->bad_config) ? " (bad_config)" : ""
+ );
+ else
+ av_log(ctx, AV_LOG_INFO, "HDCD detected: no%s\n",
+ (s->bad_config) ? " (bad_config)" : ""
+ );
}
-/** callback for error logging */
-static void af_hdcd_log(const void *priv, const char *fmt, va_list args)
-{
- av_vlog((AVFilterContext *)priv, AV_LOG_VERBOSE, fmt, args);
-}
static av_cold int init(AVFilterContext *ctx)
{
HDCDContext *s = ctx->priv;
- s->shdcd = hdcd_new();
- hdcd_logger_attach(s->shdcd, af_hdcd_log, ctx);
+ s->sample_count = 0;
+ s->fctx = ctx;
+ s->bad_config = 0;
+
+ if (s->disable_autoconvert) {
+ av_log(ctx, AV_LOG_VERBOSE, "Disabling automatic format conversion.\n");
+ avfilter_graph_set_auto_convert(ctx->graph, AVFILTER_AUTO_CONVERT_NONE);
+ }
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink) {
+ AVFilterContext *ctx = inlink->dst;
+ HDCDContext *s = ctx->priv;
+ int c;
+
+ av_log(ctx, AV_LOG_VERBOSE, "Auto-convert: %s\n",
+ (ctx->graph->disable_auto_convert) ? "disabled" : "enabled");
+
+ if ((inlink->format == AV_SAMPLE_FMT_S16 ||
+ inlink->format == AV_SAMPLE_FMT_S16P) &&
+ s->bits_per_sample != 16) {
+ av_log(ctx, AV_LOG_WARNING, "bits_per_sample %d does not fit into sample format %s, falling back to 16\n",
+ s->bits_per_sample, av_get_sample_fmt_name(inlink->format) );
+ s->bits_per_sample = 16;
+ } else {
+ av_log(ctx, AV_LOG_VERBOSE, "Looking for %d-bit HDCD in sample format %s\n",
+ s->bits_per_sample, av_get_sample_fmt_name(inlink->format) );
+ }
+
+ if (s->bits_per_sample != 16)
+ av_log(ctx, AV_LOG_WARNING, "20 and 24-bit HDCD decoding is experimental\n");
+ if (inlink->sample_rate != 44100)
+ av_log(ctx, AV_LOG_WARNING, "HDCD decoding for sample rates other than 44100 is experimental\n");
+
+ hdcd_detect_reset(&s->detect);
+ for (c = 0; c < HDCD_MAX_CHANNELS; c++) {
+ hdcd_reset(&s->state[c], inlink->sample_rate, s->cdt_ms);
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "CDT period: %dms (%u samples @44100Hz)\n",
+ s->cdt_ms, s->state[0].sustain_reset );
+
+ if (inlink->channels != 2 && s->process_stereo) {
+ av_log(ctx, AV_LOG_WARNING, "process_stereo disabled (channels = %d)\n", inlink->channels);
+ s->process_stereo = 0;
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "Process mode: %s\n",
+ (s->process_stereo) ? "process stereo channels together" : "process each channel separately");
- if (s->analyze_mode)
- hdcd_analyze_mode(s->shdcd, s->analyze_mode);
+ av_log(ctx, AV_LOG_VERBOSE, "Force PE: %s\n",
+ (s->force_pe) ? "on" : "off");
av_log(ctx, AV_LOG_VERBOSE, "Analyze mode: [%d] %s\n",
- s->analyze_mode, hdcd_str_analyze_mode_desc(s->analyze_mode));
+ s->analyze_mode, ana_mode_str[s->analyze_mode] );
return 0;
}
@@ -172,6 +1756,7 @@ static const AVFilterPad avfilter_af_hdcd_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
+ .config_props = config_input,
},
{ NULL }
};
diff --git a/libavfilter/af_join.c b/libavfilter/af_join.c
index 4d86c8b357..bd780cc379 100644
--- a/libavfilter/af_join.c
+++ b/libavfilter/af_join.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -65,28 +65,26 @@ typedef struct JoinContext {
#define OFFSET(x) offsetof(JoinContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
+#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption join_options[] = {
- { "inputs", "Number of input streams.", OFFSET(inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, A },
+ { "inputs", "Number of input streams.", OFFSET(inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, A|F },
{ "channel_layout", "Channel layout of the "
- "output stream.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, A },
+ "output stream.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, A|F },
{ "map", "A comma-separated list of channels maps in the format "
"'input_stream.input_channel-output_channel.",
- OFFSET(map), AV_OPT_TYPE_STRING, .flags = A },
- { NULL },
+ OFFSET(map), AV_OPT_TYPE_STRING, .flags = A|F },
+ { NULL }
};
-static const AVClass join_class = {
- .class_name = "join filter",
- .item_name = av_default_item_name,
- .option = join_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(join);
+
+static int try_push_frame(AVFilterContext *ctx);
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
AVFilterContext *ctx = link->dst;
JoinContext *s = ctx->priv;
- int i;
+ int i, j;
for (i = 0; i < ctx->nb_inputs; i++)
if (link == ctx->inputs[i])
@@ -95,7 +93,17 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
av_assert0(!s->input_frames[i]);
s->input_frames[i] = frame;
- return 0;
+ /* request the same number of samples on all inputs */
+ /* FIXME that means a frame arriving asynchronously on a different input
+ will not have the requested number of samples */
+ if (i == 0) {
+ int nb_samples = s->input_frames[0]->nb_samples;
+
+ for (j = 1; !i && j < ctx->nb_inputs; j++)
+ ctx->inputs[j]->request_samples = nb_samples;
+ }
+
+ return try_push_frame(ctx);
}
static int parse_maps(AVFilterContext *ctx)
@@ -193,18 +201,15 @@ static av_cold int join_init(AVFilterContext *ctx)
if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) {
av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n",
s->channel_layout_str);
- ret = AVERROR(EINVAL);
- goto fail;
+ return AVERROR(EINVAL);
}
s->nb_channels = av_get_channel_layout_nb_channels(s->channel_layout);
- s->channels = av_mallocz(sizeof(*s->channels) * s->nb_channels);
- s->buffers = av_mallocz(sizeof(*s->buffers) * s->nb_channels);
- s->input_frames = av_mallocz(sizeof(*s->input_frames) * s->inputs);
- if (!s->channels || !s->buffers|| !s->input_frames) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
+ s->channels = av_mallocz_array(s->nb_channels, sizeof(*s->channels));
+ s->buffers = av_mallocz_array(s->nb_channels, sizeof(*s->buffers));
+ s->input_frames = av_mallocz_array(s->inputs, sizeof(*s->input_frames));
+ if (!s->channels || !s->buffers|| !s->input_frames)
+ return AVERROR(ENOMEM);
for (i = 0; i < s->nb_channels; i++) {
s->channels[i].out_channel = av_channel_layout_extract_channel(s->channel_layout, i);
@@ -212,7 +217,7 @@ static av_cold int join_init(AVFilterContext *ctx)
}
if ((ret = parse_maps(ctx)) < 0)
- goto fail;
+ return ret;
for (i = 0; i < s->inputs; i++) {
char name[32];
@@ -221,6 +226,8 @@ static av_cold int join_init(AVFilterContext *ctx)
snprintf(name, sizeof(name), "input%d", i);
pad.type = AVMEDIA_TYPE_AUDIO;
pad.name = av_strdup(name);
+ if (!pad.name)
+ return AVERROR(ENOMEM);
pad.filter_frame = filter_frame;
pad.needs_fifo = 1;
@@ -228,9 +235,7 @@ static av_cold int join_init(AVFilterContext *ctx)
ff_insert_inpad(ctx, i, &pad);
}
-fail:
- av_opt_free(s);
- return ret;
+ return 0;
}
static av_cold void join_uninit(AVFilterContext *ctx)
@@ -252,17 +257,21 @@ static int join_query_formats(AVFilterContext *ctx)
{
JoinContext *s = ctx->priv;
AVFilterChannelLayouts *layouts = NULL;
- int i;
+ int i, ret;
- ff_add_channel_layout(&layouts, s->channel_layout);
- ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts);
+ if ((ret = ff_add_channel_layout(&layouts, s->channel_layout)) < 0 ||
+ (ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
+ return ret;
- for (i = 0; i < ctx->nb_inputs; i++)
- ff_channel_layouts_ref(ff_all_channel_layouts(),
- &ctx->inputs[i]->out_channel_layouts);
+ for (i = 0; i < ctx->nb_inputs; i++) {
+ layouts = ff_all_channel_layouts();
+ if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
+ return ret;
+ }
- ff_set_common_formats (ctx, ff_planar_sample_fmts());
- ff_set_common_samplerates(ctx, ff_all_samplerates());
+ if ((ret = ff_set_common_formats(ctx, ff_planar_sample_fmts())) < 0 ||
+ (ret = ff_set_common_samplerates(ctx, ff_all_samplerates())) < 0)
+ return ret;
return 0;
}
@@ -312,7 +321,7 @@ static int join_config_output(AVFilterLink *outlink)
int i, ret = 0;
/* initialize inputs to user-specified mappings */
- if (!(inputs = av_mallocz(sizeof(*inputs) * ctx->nb_inputs)))
+ if (!(inputs = av_mallocz_array(ctx->nb_inputs, sizeof(*inputs))))
return AVERROR(ENOMEM);
for (i = 0; i < s->nb_channels; i++) {
ChannelMap *ch = &s->channels[i];
@@ -390,27 +399,31 @@ static int join_request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
JoinContext *s = ctx->priv;
- AVFrame *frame;
- int linesize = INT_MAX;
- int nb_samples = 0;
- int nb_buffers = 0;
- int i, j, ret;
+ int i;
/* get a frame on each input */
for (i = 0; i < ctx->nb_inputs; i++) {
AVFilterLink *inlink = ctx->inputs[i];
+ if (!s->input_frames[i])
+ return ff_request_frame(inlink);
+ }
+ return 0;
+}
- if (!s->input_frames[i] &&
- (ret = ff_request_frame(inlink)) < 0)
- return ret;
-
- /* request the same number of samples on all inputs */
- if (i == 0) {
- nb_samples = s->input_frames[0]->nb_samples;
+static int try_push_frame(AVFilterContext *ctx)
+{
+ AVFilterLink *outlink = ctx->outputs[0];
+ JoinContext *s = ctx->priv;
+ AVFrame *frame;
+ int linesize = INT_MAX;
+ int nb_samples = INT_MAX;
+ int nb_buffers = 0;
+ int i, j, ret;
- for (j = 1; !i && j < ctx->nb_inputs; j++)
- ctx->inputs[j]->request_samples = nb_samples;
- }
+ for (i = 0; i < ctx->nb_inputs; i++) {
+ if (!s->input_frames[i])
+ return 0;
+ nb_samples = FFMIN(nb_samples, s->input_frames[i]->nb_samples);
}
/* setup the output frame */
@@ -418,7 +431,7 @@ static int join_request_frame(AVFilterLink *outlink)
if (!frame)
return AVERROR(ENOMEM);
if (s->nb_channels > FF_ARRAY_ELEMS(frame->data)) {
- frame->extended_data = av_mallocz(s->nb_channels *
+ frame->extended_data = av_mallocz_array(s->nb_channels,
sizeof(*frame->extended_data));
if (!frame->extended_data) {
ret = AVERROR(ENOMEM);
@@ -452,8 +465,8 @@ static int join_request_frame(AVFilterLink *outlink)
/* create references to the buffers we copied to output */
if (nb_buffers > FF_ARRAY_ELEMS(frame->buf)) {
frame->nb_extended_buf = nb_buffers - FF_ARRAY_ELEMS(frame->buf);
- frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) *
- frame->nb_extended_buf);
+ frame->extended_buf = av_mallocz_array(frame->nb_extended_buf,
+ sizeof(*frame->extended_buf));
if (!frame->extended_buf) {
frame->nb_extended_buf = 0;
ret = AVERROR(ENOMEM);
@@ -478,6 +491,7 @@ static int join_request_frame(AVFilterLink *outlink)
frame->nb_samples = nb_samples;
frame->channel_layout = outlink->channel_layout;
+ av_frame_set_channels(frame, outlink->channels);
frame->sample_rate = outlink->sample_rate;
frame->format = outlink->format;
frame->pts = s->input_frames[0]->pts;
@@ -512,16 +526,13 @@ static const AVFilterPad avfilter_af_join_outputs[] = {
AVFilter ff_af_join = {
.name = "join",
.description = NULL_IF_CONFIG_SMALL("Join multiple audio streams into "
- "multi-channel output"),
+ "multi-channel output."),
.priv_size = sizeof(JoinContext),
.priv_class = &join_class,
-
.init = join_init,
.uninit = join_uninit,
.query_formats = join_query_formats,
-
- .inputs = NULL,
- .outputs = avfilter_af_join_outputs,
-
- .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+ .inputs = NULL,
+ .outputs = avfilter_af_join_outputs,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
diff --git a/libavfilter/af_ladspa.c b/libavfilter/af_ladspa.c
new file mode 100644
index 0000000000..5532dacd73
--- /dev/null
+++ b/libavfilter/af_ladspa.c
@@ -0,0 +1,748 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ * Copyright (c) 2011 Mina Nagy Zaki
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * LADSPA wrapper
+ */
+
+#include <dlfcn.h>
+#include <ladspa.h>
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct LADSPAContext {
+ const AVClass *class;
+ char *dl_name;
+ char *plugin;
+ char *options;
+ void *dl_handle;
+
+ unsigned long nb_inputs;
+ unsigned long *ipmap; /* map input number to port number */
+
+ unsigned long nb_inputcontrols;
+ unsigned long *icmap; /* map input control number to port number */
+ LADSPA_Data *ictlv; /* input controls values */
+
+ unsigned long nb_outputs;
+ unsigned long *opmap; /* map output number to port number */
+
+ unsigned long nb_outputcontrols;
+ unsigned long *ocmap; /* map output control number to port number */
+ LADSPA_Data *octlv; /* output controls values */
+
+ const LADSPA_Descriptor *desc;
+ int *ctl_needs_value;
+ int nb_handles;
+ LADSPA_Handle *handles;
+
+ int sample_rate;
+ int nb_samples;
+ int64_t pts;
+ int64_t duration;
+} LADSPAContext;
+
+#define OFFSET(x) offsetof(LADSPAContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption ladspa_options[] = {
+ { "file", "set library name or full path", OFFSET(dl_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "f", "set library name or full path", OFFSET(dl_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "plugin", "set plugin name", OFFSET(plugin), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "p", "set plugin name", OFFSET(plugin), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "controls", "set plugin options", OFFSET(options), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "c", "set plugin options", OFFSET(options), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, INT32_MAX, FLAGS },
+ { "s", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, INT32_MAX, FLAGS },
+ { "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
+ { "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
+ { "duration", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=-1}, -1, INT64_MAX, FLAGS },
+ { "d", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=-1}, -1, INT64_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(ladspa);
+
+static void print_ctl_info(AVFilterContext *ctx, int level,
+ LADSPAContext *s, int ctl, unsigned long *map,
+ LADSPA_Data *values, int print)
+{
+ const LADSPA_PortRangeHint *h = s->desc->PortRangeHints + map[ctl];
+
+ av_log(ctx, level, "c%i: %s [", ctl, s->desc->PortNames[map[ctl]]);
+
+ if (LADSPA_IS_HINT_TOGGLED(h->HintDescriptor)) {
+ av_log(ctx, level, "toggled (1 or 0)");
+
+ if (LADSPA_IS_HINT_HAS_DEFAULT(h->HintDescriptor))
+ av_log(ctx, level, " (default %i)", (int)values[ctl]);
+ } else {
+ if (LADSPA_IS_HINT_INTEGER(h->HintDescriptor)) {
+ av_log(ctx, level, "<int>");
+
+ if (LADSPA_IS_HINT_BOUNDED_BELOW(h->HintDescriptor))
+ av_log(ctx, level, ", min: %i", (int)h->LowerBound);
+
+ if (LADSPA_IS_HINT_BOUNDED_ABOVE(h->HintDescriptor))
+ av_log(ctx, level, ", max: %i", (int)h->UpperBound);
+
+ if (print)
+ av_log(ctx, level, " (value %d)", (int)values[ctl]);
+ else if (LADSPA_IS_HINT_HAS_DEFAULT(h->HintDescriptor))
+ av_log(ctx, level, " (default %d)", (int)values[ctl]);
+ } else {
+ av_log(ctx, level, "<float>");
+
+ if (LADSPA_IS_HINT_BOUNDED_BELOW(h->HintDescriptor))
+ av_log(ctx, level, ", min: %f", h->LowerBound);
+
+ if (LADSPA_IS_HINT_BOUNDED_ABOVE(h->HintDescriptor))
+ av_log(ctx, level, ", max: %f", h->UpperBound);
+
+ if (print)
+ av_log(ctx, level, " (value %f)", values[ctl]);
+ else if (LADSPA_IS_HINT_HAS_DEFAULT(h->HintDescriptor))
+ av_log(ctx, level, " (default %f)", values[ctl]);
+ }
+
+ if (LADSPA_IS_HINT_SAMPLE_RATE(h->HintDescriptor))
+ av_log(ctx, level, ", multiple of sample rate");
+
+ if (LADSPA_IS_HINT_LOGARITHMIC(h->HintDescriptor))
+ av_log(ctx, level, ", logarithmic scale");
+ }
+
+ av_log(ctx, level, "]\n");
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ LADSPAContext *s = ctx->priv;
+ AVFrame *out;
+ int i, h, p;
+
+ av_assert0(in->channels == (s->nb_inputs * s->nb_handles));
+
+ if (!s->nb_outputs ||
+ (av_frame_is_writable(in) && s->nb_inputs == s->nb_outputs &&
+ !(s->desc->Properties & LADSPA_PROPERTY_INPLACE_BROKEN))) {
+ out = in;
+ } else {
+ out = ff_get_audio_buffer(ctx->outputs[0], in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ av_assert0(!s->nb_outputs || out->channels == (s->nb_outputs * s->nb_handles));
+
+ for (h = 0; h < s->nb_handles; h++) {
+ for (i = 0; i < s->nb_inputs; i++) {
+ p = s->nb_handles > 1 ? h : i;
+ s->desc->connect_port(s->handles[h], s->ipmap[i],
+ (LADSPA_Data*)in->extended_data[p]);
+ }
+
+ for (i = 0; i < s->nb_outputs; i++) {
+ p = s->nb_handles > 1 ? h : i;
+ s->desc->connect_port(s->handles[h], s->opmap[i],
+ (LADSPA_Data*)out->extended_data[p]);
+ }
+
+ s->desc->run(s->handles[h], in->nb_samples);
+ }
+
+ for (i = 0; i < s->nb_outputcontrols; i++)
+ print_ctl_info(ctx, AV_LOG_VERBOSE, s, i, s->ocmap, s->octlv, 1);
+
+ if (out != in)
+ av_frame_free(&in);
+
+ return ff_filter_frame(ctx->outputs[0], out);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ LADSPAContext *s = ctx->priv;
+ AVFrame *out;
+ int64_t t;
+ int i;
+
+ if (ctx->nb_inputs)
+ return ff_request_frame(ctx->inputs[0]);
+
+ t = av_rescale(s->pts, AV_TIME_BASE, s->sample_rate);
+ if (s->duration >= 0 && t >= s->duration)
+ return AVERROR_EOF;
+
+ out = ff_get_audio_buffer(outlink, s->nb_samples);
+ if (!out)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < s->nb_outputs; i++)
+ s->desc->connect_port(s->handles[0], s->opmap[i],
+ (LADSPA_Data*)out->extended_data[i]);
+
+ s->desc->run(s->handles[0], s->nb_samples);
+
+ for (i = 0; i < s->nb_outputcontrols; i++)
+ print_ctl_info(ctx, AV_LOG_INFO, s, i, s->ocmap, s->octlv, 1);
+
+ out->sample_rate = s->sample_rate;
+ out->pts = s->pts;
+ s->pts += s->nb_samples;
+
+ return ff_filter_frame(outlink, out);
+}
+
+static void set_default_ctl_value(LADSPAContext *s, int ctl,
+ unsigned long *map, LADSPA_Data *values)
+{
+ const LADSPA_PortRangeHint *h = s->desc->PortRangeHints + map[ctl];
+ const LADSPA_Data lower = h->LowerBound;
+ const LADSPA_Data upper = h->UpperBound;
+
+ if (LADSPA_IS_HINT_DEFAULT_MINIMUM(h->HintDescriptor)) {
+ values[ctl] = lower;
+ } else if (LADSPA_IS_HINT_DEFAULT_MAXIMUM(h->HintDescriptor)) {
+ values[ctl] = upper;
+ } else if (LADSPA_IS_HINT_DEFAULT_0(h->HintDescriptor)) {
+ values[ctl] = 0.0;
+ } else if (LADSPA_IS_HINT_DEFAULT_1(h->HintDescriptor)) {
+ values[ctl] = 1.0;
+ } else if (LADSPA_IS_HINT_DEFAULT_100(h->HintDescriptor)) {
+ values[ctl] = 100.0;
+ } else if (LADSPA_IS_HINT_DEFAULT_440(h->HintDescriptor)) {
+ values[ctl] = 440.0;
+ } else if (LADSPA_IS_HINT_DEFAULT_LOW(h->HintDescriptor)) {
+ if (LADSPA_IS_HINT_LOGARITHMIC(h->HintDescriptor))
+ values[ctl] = exp(log(lower) * 0.75 + log(upper) * 0.25);
+ else
+ values[ctl] = lower * 0.75 + upper * 0.25;
+ } else if (LADSPA_IS_HINT_DEFAULT_MIDDLE(h->HintDescriptor)) {
+ if (LADSPA_IS_HINT_LOGARITHMIC(h->HintDescriptor))
+ values[ctl] = exp(log(lower) * 0.5 + log(upper) * 0.5);
+ else
+ values[ctl] = lower * 0.5 + upper * 0.5;
+ } else if (LADSPA_IS_HINT_DEFAULT_HIGH(h->HintDescriptor)) {
+ if (LADSPA_IS_HINT_LOGARITHMIC(h->HintDescriptor))
+ values[ctl] = exp(log(lower) * 0.25 + log(upper) * 0.75);
+ else
+ values[ctl] = lower * 0.25 + upper * 0.75;
+ }
+}
+
+static int connect_ports(AVFilterContext *ctx, AVFilterLink *link)
+{
+ LADSPAContext *s = ctx->priv;
+ int i, j;
+
+ s->nb_handles = s->nb_inputs == 1 && s->nb_outputs == 1 ? link->channels : 1;
+ s->handles = av_calloc(s->nb_handles, sizeof(*s->handles));
+ if (!s->handles)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < s->nb_handles; i++) {
+ s->handles[i] = s->desc->instantiate(s->desc, link->sample_rate);
+ if (!s->handles[i]) {
+ av_log(ctx, AV_LOG_ERROR, "Could not instantiate plugin.\n");
+ return AVERROR_EXTERNAL;
+ }
+
+ // Connect the input control ports
+ for (j = 0; j < s->nb_inputcontrols; j++)
+ s->desc->connect_port(s->handles[i], s->icmap[j], s->ictlv + j);
+
+ // Connect the output control ports
+ for (j = 0; j < s->nb_outputcontrols; j++)
+ s->desc->connect_port(s->handles[i], s->ocmap[j], &s->octlv[j]);
+
+ if (s->desc->activate)
+ s->desc->activate(s->handles[i]);
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "handles: %d\n", s->nb_handles);
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+
+ return connect_ports(ctx, inlink);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ LADSPAContext *s = ctx->priv;
+ int ret;
+
+ if (ctx->nb_inputs) {
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ outlink->format = inlink->format;
+ outlink->sample_rate = inlink->sample_rate;
+ if (s->nb_inputs == s->nb_outputs) {
+ outlink->channel_layout = inlink->channel_layout;
+ outlink->channels = inlink->channels;
+ }
+
+ ret = 0;
+ } else {
+ LADSPAContext *s = ctx->priv;
+
+ outlink->sample_rate = s->sample_rate;
+ outlink->time_base = (AVRational){1, s->sample_rate};
+
+ ret = connect_ports(ctx, outlink);
+ }
+
+ return ret;
+}
+
+static void count_ports(const LADSPA_Descriptor *desc,
+ unsigned long *nb_inputs, unsigned long *nb_outputs)
+{
+ LADSPA_PortDescriptor pd;
+ int i;
+
+ for (i = 0; i < desc->PortCount; i++) {
+ pd = desc->PortDescriptors[i];
+
+ if (LADSPA_IS_PORT_AUDIO(pd)) {
+ if (LADSPA_IS_PORT_INPUT(pd)) {
+ (*nb_inputs)++;
+ } else if (LADSPA_IS_PORT_OUTPUT(pd)) {
+ (*nb_outputs)++;
+ }
+ }
+ }
+}
+
+static void *try_load(const char *dir, const char *soname)
+{
+ char *path = av_asprintf("%s/%s.so", dir, soname);
+ void *ret = NULL;
+
+ if (path) {
+ ret = dlopen(path, RTLD_LOCAL|RTLD_NOW);
+ av_free(path);
+ }
+
+ return ret;
+}
+
+static int set_control(AVFilterContext *ctx, unsigned long port, LADSPA_Data value)
+{
+ LADSPAContext *s = ctx->priv;
+ const char *label = s->desc->Label;
+ LADSPA_PortRangeHint *h = (LADSPA_PortRangeHint *)s->desc->PortRangeHints +
+ s->icmap[port];
+
+ if (port >= s->nb_inputcontrols) {
+ av_log(ctx, AV_LOG_ERROR, "Control c%ld is out of range [0 - %lu].\n",
+ port, s->nb_inputcontrols);
+ return AVERROR(EINVAL);
+ }
+
+ if (LADSPA_IS_HINT_BOUNDED_BELOW(h->HintDescriptor) &&
+ value < h->LowerBound) {
+ av_log(ctx, AV_LOG_ERROR,
+ "%s: input control c%ld is below lower boundary of %0.4f.\n",
+ label, port, h->LowerBound);
+ return AVERROR(EINVAL);
+ }
+
+ if (LADSPA_IS_HINT_BOUNDED_ABOVE(h->HintDescriptor) &&
+ value > h->UpperBound) {
+ av_log(ctx, AV_LOG_ERROR,
+ "%s: input control c%ld is above upper boundary of %0.4f.\n",
+ label, port, h->UpperBound);
+ return AVERROR(EINVAL);
+ }
+
+ s->ictlv[port] = value;
+
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ LADSPAContext *s = ctx->priv;
+ LADSPA_Descriptor_Function descriptor_fn;
+ const LADSPA_Descriptor *desc;
+ LADSPA_PortDescriptor pd;
+ AVFilterPad pad = { NULL };
+ char *p, *arg, *saveptr = NULL;
+ unsigned long nb_ports;
+ int i, j = 0;
+
+ if (!s->dl_name) {
+ av_log(ctx, AV_LOG_ERROR, "No plugin name provided\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (s->dl_name[0] == '/' || s->dl_name[0] == '.') {
+ // argument is a path
+ s->dl_handle = dlopen(s->dl_name, RTLD_LOCAL|RTLD_NOW);
+ } else {
+ // argument is a shared object name
+ char *paths = av_strdup(getenv("LADSPA_PATH"));
+ const char *separator = ":";
+
+ if (paths) {
+ p = paths;
+ while ((arg = av_strtok(p, separator, &saveptr)) && !s->dl_handle) {
+ s->dl_handle = try_load(arg, s->dl_name);
+ p = NULL;
+ }
+ }
+
+ av_free(paths);
+ if (!s->dl_handle && (paths = av_asprintf("%s/.ladspa/lib", getenv("HOME")))) {
+ s->dl_handle = try_load(paths, s->dl_name);
+ av_free(paths);
+ }
+
+ if (!s->dl_handle)
+ s->dl_handle = try_load("/usr/local/lib/ladspa", s->dl_name);
+
+ if (!s->dl_handle)
+ s->dl_handle = try_load("/usr/lib/ladspa", s->dl_name);
+ }
+ if (!s->dl_handle) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to load '%s'\n", s->dl_name);
+ return AVERROR(EINVAL);
+ }
+
+ descriptor_fn = dlsym(s->dl_handle, "ladspa_descriptor");
+ if (!descriptor_fn) {
+ av_log(ctx, AV_LOG_ERROR, "Could not find ladspa_descriptor: %s\n", dlerror());
+ return AVERROR(EINVAL);
+ }
+
+ // Find the requested plugin, or list plugins
+ if (!s->plugin) {
+ av_log(ctx, AV_LOG_INFO, "The '%s' library contains the following plugins:\n", s->dl_name);
+ av_log(ctx, AV_LOG_INFO, "I = Input Channels\n");
+ av_log(ctx, AV_LOG_INFO, "O = Output Channels\n");
+ av_log(ctx, AV_LOG_INFO, "I:O %-25s %s\n", "Plugin", "Description");
+ av_log(ctx, AV_LOG_INFO, "\n");
+ for (i = 0; desc = descriptor_fn(i); i++) {
+ unsigned long inputs = 0, outputs = 0;
+
+ count_ports(desc, &inputs, &outputs);
+ av_log(ctx, AV_LOG_INFO, "%lu:%lu %-25s %s\n", inputs, outputs, desc->Label,
+ (char *)av_x_if_null(desc->Name, "?"));
+ av_log(ctx, AV_LOG_VERBOSE, "Maker: %s\n",
+ (char *)av_x_if_null(desc->Maker, "?"));
+ av_log(ctx, AV_LOG_VERBOSE, "Copyright: %s\n",
+ (char *)av_x_if_null(desc->Copyright, "?"));
+ }
+ return AVERROR_EXIT;
+ } else {
+ for (i = 0;; i++) {
+ desc = descriptor_fn(i);
+ if (!desc) {
+ av_log(ctx, AV_LOG_ERROR, "Could not find plugin: %s\n", s->plugin);
+ return AVERROR(EINVAL);
+ }
+
+ if (desc->Label && !strcmp(desc->Label, s->plugin))
+ break;
+ }
+ }
+
+ s->desc = desc;
+ nb_ports = desc->PortCount;
+
+ s->ipmap = av_calloc(nb_ports, sizeof(*s->ipmap));
+ s->opmap = av_calloc(nb_ports, sizeof(*s->opmap));
+ s->icmap = av_calloc(nb_ports, sizeof(*s->icmap));
+ s->ocmap = av_calloc(nb_ports, sizeof(*s->ocmap));
+ s->ictlv = av_calloc(nb_ports, sizeof(*s->ictlv));
+ s->octlv = av_calloc(nb_ports, sizeof(*s->octlv));
+ s->ctl_needs_value = av_calloc(nb_ports, sizeof(*s->ctl_needs_value));
+ if (!s->ipmap || !s->opmap || !s->icmap ||
+ !s->ocmap || !s->ictlv || !s->octlv || !s->ctl_needs_value)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < nb_ports; i++) {
+ pd = desc->PortDescriptors[i];
+
+ if (LADSPA_IS_PORT_AUDIO(pd)) {
+ if (LADSPA_IS_PORT_INPUT(pd)) {
+ s->ipmap[s->nb_inputs] = i;
+ s->nb_inputs++;
+ } else if (LADSPA_IS_PORT_OUTPUT(pd)) {
+ s->opmap[s->nb_outputs] = i;
+ s->nb_outputs++;
+ }
+ } else if (LADSPA_IS_PORT_CONTROL(pd)) {
+ if (LADSPA_IS_PORT_INPUT(pd)) {
+ s->icmap[s->nb_inputcontrols] = i;
+
+ if (LADSPA_IS_HINT_HAS_DEFAULT(desc->PortRangeHints[i].HintDescriptor))
+ set_default_ctl_value(s, s->nb_inputcontrols, s->icmap, s->ictlv);
+ else
+ s->ctl_needs_value[s->nb_inputcontrols] = 1;
+
+ s->nb_inputcontrols++;
+ } else if (LADSPA_IS_PORT_OUTPUT(pd)) {
+ s->ocmap[s->nb_outputcontrols] = i;
+ s->nb_outputcontrols++;
+ }
+ }
+ }
+
+ // List Control Ports if "help" is specified
+ if (s->options && !strcmp(s->options, "help")) {
+ if (!s->nb_inputcontrols) {
+ av_log(ctx, AV_LOG_INFO,
+ "The '%s' plugin does not have any input controls.\n",
+ desc->Label);
+ } else {
+ av_log(ctx, AV_LOG_INFO,
+ "The '%s' plugin has the following input controls:\n",
+ desc->Label);
+ for (i = 0; i < s->nb_inputcontrols; i++)
+ print_ctl_info(ctx, AV_LOG_INFO, s, i, s->icmap, s->ictlv, 0);
+ }
+ return AVERROR_EXIT;
+ }
+
+ // Parse control parameters
+ p = s->options;
+ while (s->options) {
+ LADSPA_Data val;
+ int ret;
+
+ if (!(arg = av_strtok(p, " |", &saveptr)))
+ break;
+ p = NULL;
+
+ if (sscanf(arg, "c%d=%f", &i, &val) != 2) {
+ if (sscanf(arg, "%f", &val) != 1) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid syntax.\n");
+ return AVERROR(EINVAL);
+ }
+ i = j++;
+ }
+
+ if ((ret = set_control(ctx, i, val)) < 0)
+ return ret;
+ s->ctl_needs_value[i] = 0;
+ }
+
+ // Check if any controls are not set
+ for (i = 0; i < s->nb_inputcontrols; i++) {
+ if (s->ctl_needs_value[i]) {
+ av_log(ctx, AV_LOG_ERROR, "Control c%d must be set.\n", i);
+ print_ctl_info(ctx, AV_LOG_ERROR, s, i, s->icmap, s->ictlv, 0);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ pad.type = AVMEDIA_TYPE_AUDIO;
+
+ if (s->nb_inputs) {
+ pad.name = av_asprintf("in0:%s%lu", desc->Label, s->nb_inputs);
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+
+ pad.filter_frame = filter_frame;
+ pad.config_props = config_input;
+ if (ff_insert_inpad(ctx, ctx->nb_inputs, &pad) < 0) {
+ av_freep(&pad.name);
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "ports: %lu\n", nb_ports);
+ av_log(ctx, AV_LOG_DEBUG, "inputs: %lu outputs: %lu\n",
+ s->nb_inputs, s->nb_outputs);
+ av_log(ctx, AV_LOG_DEBUG, "input controls: %lu output controls: %lu\n",
+ s->nb_inputcontrols, s->nb_outputcontrols);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ LADSPAContext *s = ctx->priv;
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
+ int ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ if (s->nb_inputs) {
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+
+ ret = ff_set_common_samplerates(ctx, formats);
+ if (ret < 0)
+ return ret;
+ } else {
+ int sample_rates[] = { s->sample_rate, -1 };
+
+ ret = ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates));
+ if (ret < 0)
+ return ret;
+ }
+
+ if (s->nb_inputs == 1 && s->nb_outputs == 1) {
+ // We will instantiate multiple LADSPA_Handle, one over each channel
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+ } else if (s->nb_inputs == 2 && s->nb_outputs == 2) {
+ layouts = NULL;
+ ret = ff_add_channel_layout(&layouts, AV_CH_LAYOUT_STEREO);
+ if (ret < 0)
+ return ret;
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+ } else {
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ if (s->nb_inputs >= 1) {
+ AVFilterLink *inlink = ctx->inputs[0];
+ uint64_t inlayout = FF_COUNT2LAYOUT(s->nb_inputs);
+
+ layouts = NULL;
+ ret = ff_add_channel_layout(&layouts, inlayout);
+ if (ret < 0)
+ return ret;
+ ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
+ if (ret < 0)
+ return ret;
+
+ if (!s->nb_outputs) {
+ ret = ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (s->nb_outputs >= 1) {
+ uint64_t outlayout = FF_COUNT2LAYOUT(s->nb_outputs);
+
+ layouts = NULL;
+ ret = ff_add_channel_layout(&layouts, outlayout);
+ if (ret < 0)
+ return ret;
+ ret = ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ LADSPAContext *s = ctx->priv;
+ int i;
+
+ for (i = 0; i < s->nb_handles; i++) {
+ if (s->desc->deactivate)
+ s->desc->deactivate(s->handles[i]);
+ if (s->desc->cleanup)
+ s->desc->cleanup(s->handles[i]);
+ }
+
+ if (s->dl_handle)
+ dlclose(s->dl_handle);
+
+ av_freep(&s->ipmap);
+ av_freep(&s->opmap);
+ av_freep(&s->icmap);
+ av_freep(&s->ocmap);
+ av_freep(&s->ictlv);
+ av_freep(&s->octlv);
+ av_freep(&s->handles);
+ av_freep(&s->ctl_needs_value);
+
+ if (ctx->nb_inputs)
+ av_freep(&ctx->input_pads[0].name);
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ LADSPA_Data value;
+ unsigned long port;
+
+ if (sscanf(cmd, "c%ld", &port) + sscanf(args, "%f", &value) != 2)
+ return AVERROR(EINVAL);
+
+ return set_control(ctx, port, value);
+}
+
+static const AVFilterPad ladspa_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_ladspa = {
+ .name = "ladspa",
+ .description = NULL_IF_CONFIG_SMALL("Apply LADSPA effect."),
+ .priv_size = sizeof(LADSPAContext),
+ .priv_class = &ladspa_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .process_command = process_command,
+ .inputs = 0,
+ .outputs = ladspa_outputs,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
diff --git a/libavfilter/af_loudnorm.c b/libavfilter/af_loudnorm.c
new file mode 100644
index 0000000000..9d91c76047
--- /dev/null
+++ b/libavfilter/af_loudnorm.c
@@ -0,0 +1,920 @@
+/*
+ * Copyright (c) 2016 Kyle Swanson <k@ylo.ph>.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* http://k.ylo.ph/2016/04/04/loudnorm.html */
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "audio.h"
+#include "ebur128.h"
+
+enum FrameType {
+ FIRST_FRAME,
+ INNER_FRAME,
+ FINAL_FRAME,
+ LINEAR_MODE,
+ FRAME_NB
+};
+
+enum LimiterState {
+ OUT,
+ ATTACK,
+ SUSTAIN,
+ RELEASE,
+ STATE_NB
+};
+
+enum PrintFormat {
+ NONE,
+ JSON,
+ SUMMARY,
+ PF_NB
+};
+
+typedef struct LoudNormContext {
+ const AVClass *class;
+ double target_i;
+ double target_lra;
+ double target_tp;
+ double measured_i;
+ double measured_lra;
+ double measured_tp;
+ double measured_thresh;
+ double offset;
+ int linear;
+ int dual_mono;
+ enum PrintFormat print_format;
+
+ double *buf;
+ int buf_size;
+ int buf_index;
+ int prev_buf_index;
+
+ double delta[30];
+ double weights[21];
+ double prev_delta;
+ int index;
+
+ double gain_reduction[2];
+ double *limiter_buf;
+ double *prev_smp;
+ int limiter_buf_index;
+ int limiter_buf_size;
+ enum LimiterState limiter_state;
+ int peak_index;
+ int env_index;
+ int env_cnt;
+ int attack_length;
+ int release_length;
+
+ int64_t pts;
+ enum FrameType frame_type;
+ int above_threshold;
+ int prev_nb_samples;
+ int channels;
+
+ FFEBUR128State *r128_in;
+ FFEBUR128State *r128_out;
+} LoudNormContext;
+
+#define OFFSET(x) offsetof(LoudNormContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption loudnorm_options[] = {
+ { "I", "set integrated loudness target", OFFSET(target_i), AV_OPT_TYPE_DOUBLE, {.dbl = -24.}, -70., -5., FLAGS },
+ { "i", "set integrated loudness target", OFFSET(target_i), AV_OPT_TYPE_DOUBLE, {.dbl = -24.}, -70., -5., FLAGS },
+ { "LRA", "set loudness range target", OFFSET(target_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 7.}, 1., 20., FLAGS },
+ { "lra", "set loudness range target", OFFSET(target_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 7.}, 1., 20., FLAGS },
+ { "TP", "set maximum true peak", OFFSET(target_tp), AV_OPT_TYPE_DOUBLE, {.dbl = -2.}, -9., 0., FLAGS },
+ { "tp", "set maximum true peak", OFFSET(target_tp), AV_OPT_TYPE_DOUBLE, {.dbl = -2.}, -9., 0., FLAGS },
+ { "measured_I", "measured IL of input file", OFFSET(measured_i), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, -99., 0., FLAGS },
+ { "measured_i", "measured IL of input file", OFFSET(measured_i), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, -99., 0., FLAGS },
+ { "measured_LRA", "measured LRA of input file", OFFSET(measured_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, 0., 99., FLAGS },
+ { "measured_lra", "measured LRA of input file", OFFSET(measured_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, 0., 99., FLAGS },
+ { "measured_TP", "measured true peak of input file", OFFSET(measured_tp), AV_OPT_TYPE_DOUBLE, {.dbl = 99.}, -99., 99., FLAGS },
+ { "measured_tp", "measured true peak of input file", OFFSET(measured_tp), AV_OPT_TYPE_DOUBLE, {.dbl = 99.}, -99., 99., FLAGS },
+ { "measured_thresh", "measured threshold of input file", OFFSET(measured_thresh), AV_OPT_TYPE_DOUBLE, {.dbl = -70.}, -99., 0., FLAGS },
+ { "offset", "set offset gain", OFFSET(offset), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, -99., 99., FLAGS },
+ { "linear", "normalize linearly if possible", OFFSET(linear), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
+ { "dual_mono", "treat mono input as dual-mono", OFFSET(dual_mono), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
+ { "print_format", "set print format for stats", OFFSET(print_format), AV_OPT_TYPE_INT, {.i64 = NONE}, NONE, PF_NB -1, FLAGS, "print_format" },
+ { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64 = NONE}, 0, 0, FLAGS, "print_format" },
+ { "json", 0, 0, AV_OPT_TYPE_CONST, {.i64 = JSON}, 0, 0, FLAGS, "print_format" },
+ { "summary", 0, 0, AV_OPT_TYPE_CONST, {.i64 = SUMMARY}, 0, 0, FLAGS, "print_format" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(loudnorm);
+
+static inline int frame_size(int sample_rate, int frame_len_msec)
+{
+ const int frame_size = round((double)sample_rate * (frame_len_msec / 1000.0));
+ return frame_size + (frame_size % 2);
+}
+
+static void init_gaussian_filter(LoudNormContext *s)
+{
+ double total_weight = 0.0;
+ const double sigma = 3.5;
+ double adjust;
+ int i;
+
+ const int offset = 21 / 2;
+ const double c1 = 1.0 / (sigma * sqrt(2.0 * M_PI));
+ const double c2 = 2.0 * pow(sigma, 2.0);
+
+ for (i = 0; i < 21; i++) {
+ const int x = i - offset;
+ s->weights[i] = c1 * exp(-(pow(x, 2.0) / c2));
+ total_weight += s->weights[i];
+ }
+
+ adjust = 1.0 / total_weight;
+ for (i = 0; i < 21; i++)
+ s->weights[i] *= adjust;
+}
+
+static double gaussian_filter(LoudNormContext *s, int index)
+{
+ double result = 0.;
+ int i;
+
+ index = index - 10 > 0 ? index - 10 : index + 20;
+ for (i = 0; i < 21; i++)
+ result += s->delta[((index + i) < 30) ? (index + i) : (index + i - 30)] * s->weights[i];
+
+ return result;
+}
+
+static void detect_peak(LoudNormContext *s, int offset, int nb_samples, int channels, int *peak_delta, double *peak_value)
+{
+ int n, c, i, index;
+ double ceiling;
+ double *buf;
+
+ *peak_delta = -1;
+ buf = s->limiter_buf;
+ ceiling = s->target_tp;
+
+ index = s->limiter_buf_index + (offset * channels) + (1920 * channels);
+ if (index >= s->limiter_buf_size)
+ index -= s->limiter_buf_size;
+
+ if (s->frame_type == FIRST_FRAME) {
+ for (c = 0; c < channels; c++)
+ s->prev_smp[c] = fabs(buf[index + c - channels]);
+ }
+
+ for (n = 0; n < nb_samples; n++) {
+ for (c = 0; c < channels; c++) {
+ double this, next, max_peak;
+
+ this = fabs(buf[(index + c) < s->limiter_buf_size ? (index + c) : (index + c - s->limiter_buf_size)]);
+ next = fabs(buf[(index + c + channels) < s->limiter_buf_size ? (index + c + channels) : (index + c + channels - s->limiter_buf_size)]);
+
+ if ((s->prev_smp[c] <= this) && (next <= this) && (this > ceiling) && (n > 0)) {
+ int detected;
+
+ detected = 1;
+ for (i = 2; i < 12; i++) {
+ next = fabs(buf[(index + c + (i * channels)) < s->limiter_buf_size ? (index + c + (i * channels)) : (index + c + (i * channels) - s->limiter_buf_size)]);
+ if (next > this) {
+ detected = 0;
+ break;
+ }
+ }
+
+ if (!detected)
+ continue;
+
+ for (c = 0; c < channels; c++) {
+ if (c == 0 || fabs(buf[index + c]) > max_peak)
+ max_peak = fabs(buf[index + c]);
+
+ s->prev_smp[c] = fabs(buf[(index + c) < s->limiter_buf_size ? (index + c) : (index + c - s->limiter_buf_size)]);
+ }
+
+ *peak_delta = n;
+ s->peak_index = index;
+ *peak_value = max_peak;
+ return;
+ }
+
+ s->prev_smp[c] = this;
+ }
+
+ index += channels;
+ if (index >= s->limiter_buf_size)
+ index -= s->limiter_buf_size;
+ }
+}
+
+static void true_peak_limiter(LoudNormContext *s, double *out, int nb_samples, int channels)
+{
+ int n, c, index, peak_delta, smp_cnt;
+ double ceiling, peak_value;
+ double *buf;
+
+ buf = s->limiter_buf;
+ ceiling = s->target_tp;
+ index = s->limiter_buf_index;
+ smp_cnt = 0;
+
+ if (s->frame_type == FIRST_FRAME) {
+ double max;
+
+ max = 0.;
+ for (n = 0; n < 1920; n++) {
+ for (c = 0; c < channels; c++) {
+ max = fabs(buf[c]) > max ? fabs(buf[c]) : max;
+ }
+ buf += channels;
+ }
+
+ if (max > ceiling) {
+ s->gain_reduction[1] = ceiling / max;
+ s->limiter_state = SUSTAIN;
+ buf = s->limiter_buf;
+
+ for (n = 0; n < 1920; n++) {
+ for (c = 0; c < channels; c++) {
+ double env;
+ env = s->gain_reduction[1];
+ buf[c] *= env;
+ }
+ buf += channels;
+ }
+ }
+
+ buf = s->limiter_buf;
+ }
+
+ do {
+
+ switch(s->limiter_state) {
+ case OUT:
+ detect_peak(s, smp_cnt, nb_samples - smp_cnt, channels, &peak_delta, &peak_value);
+ if (peak_delta != -1) {
+ s->env_cnt = 0;
+ smp_cnt += (peak_delta - s->attack_length);
+ s->gain_reduction[0] = 1.;
+ s->gain_reduction[1] = ceiling / peak_value;
+ s->limiter_state = ATTACK;
+
+ s->env_index = s->peak_index - (s->attack_length * channels);
+ if (s->env_index < 0)
+ s->env_index += s->limiter_buf_size;
+
+ s->env_index += (s->env_cnt * channels);
+ if (s->env_index > s->limiter_buf_size)
+ s->env_index -= s->limiter_buf_size;
+
+ } else {
+ smp_cnt = nb_samples;
+ }
+ break;
+
+ case ATTACK:
+ for (; s->env_cnt < s->attack_length; s->env_cnt++) {
+ for (c = 0; c < channels; c++) {
+ double env;
+ env = s->gain_reduction[0] - ((double) s->env_cnt / (s->attack_length - 1) * (s->gain_reduction[0] - s->gain_reduction[1]));
+ buf[s->env_index + c] *= env;
+ }
+
+ s->env_index += channels;
+ if (s->env_index >= s->limiter_buf_size)
+ s->env_index -= s->limiter_buf_size;
+
+ smp_cnt++;
+ if (smp_cnt >= nb_samples) {
+ s->env_cnt++;
+ break;
+ }
+ }
+
+ if (smp_cnt < nb_samples) {
+ s->env_cnt = 0;
+ s->attack_length = 1920;
+ s->limiter_state = SUSTAIN;
+ }
+ break;
+
+ case SUSTAIN:
+ detect_peak(s, smp_cnt, nb_samples, channels, &peak_delta, &peak_value);
+ if (peak_delta == -1) {
+ s->limiter_state = RELEASE;
+ s->gain_reduction[0] = s->gain_reduction[1];
+ s->gain_reduction[1] = 1.;
+ s->env_cnt = 0;
+ break;
+ } else {
+ double gain_reduction;
+ gain_reduction = ceiling / peak_value;
+
+ if (gain_reduction < s->gain_reduction[1]) {
+ s->limiter_state = ATTACK;
+
+ s->attack_length = peak_delta;
+ if (s->attack_length <= 1)
+ s->attack_length = 2;
+
+ s->gain_reduction[0] = s->gain_reduction[1];
+ s->gain_reduction[1] = gain_reduction;
+ s->env_cnt = 0;
+ break;
+ }
+
+ for (s->env_cnt = 0; s->env_cnt < peak_delta; s->env_cnt++) {
+ for (c = 0; c < channels; c++) {
+ double env;
+ env = s->gain_reduction[1];
+ buf[s->env_index + c] *= env;
+ }
+
+ s->env_index += channels;
+ if (s->env_index >= s->limiter_buf_size)
+ s->env_index -= s->limiter_buf_size;
+
+ smp_cnt++;
+ if (smp_cnt >= nb_samples) {
+ s->env_cnt++;
+ break;
+ }
+ }
+ }
+ break;
+
+ case RELEASE:
+ for (; s->env_cnt < s->release_length; s->env_cnt++) {
+ for (c = 0; c < channels; c++) {
+ double env;
+ env = s->gain_reduction[0] + (((double) s->env_cnt / (s->release_length - 1)) * (s->gain_reduction[1] - s->gain_reduction[0]));
+ buf[s->env_index + c] *= env;
+ }
+
+ s->env_index += channels;
+ if (s->env_index >= s->limiter_buf_size)
+ s->env_index -= s->limiter_buf_size;
+
+ smp_cnt++;
+ if (smp_cnt >= nb_samples) {
+ s->env_cnt++;
+ break;
+ }
+ }
+
+ if (smp_cnt < nb_samples) {
+ s->env_cnt = 0;
+ s->limiter_state = OUT;
+ }
+
+ break;
+ }
+
+ } while (smp_cnt < nb_samples);
+
+ for (n = 0; n < nb_samples; n++) {
+ for (c = 0; c < channels; c++) {
+ out[c] = buf[index + c];
+ if (fabs(out[c]) > ceiling) {
+ out[c] = ceiling * (out[c] < 0 ? -1 : 1);
+ }
+ }
+ out += channels;
+ index += channels;
+ if (index >= s->limiter_buf_size)
+ index -= s->limiter_buf_size;
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ LoudNormContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ const double *src;
+ double *dst;
+ double *buf;
+ double *limiter_buf;
+ int i, n, c, subframe_length, src_index;
+ double gain, gain_next, env_global, env_shortterm,
+ global, shortterm, lra, relative_threshold;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_audio_buffer(inlink, in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ out->pts = s->pts;
+ src = (const double *)in->data[0];
+ dst = (double *)out->data[0];
+ buf = s->buf;
+ limiter_buf = s->limiter_buf;
+
+ ff_ebur128_add_frames_double(s->r128_in, src, in->nb_samples);
+
+ if (s->frame_type == FIRST_FRAME && in->nb_samples < frame_size(inlink->sample_rate, 3000)) {
+ double offset, offset_tp, true_peak;
+
+ ff_ebur128_loudness_global(s->r128_in, &global);
+ for (c = 0; c < inlink->channels; c++) {
+ double tmp;
+ ff_ebur128_sample_peak(s->r128_in, c, &tmp);
+ if (c == 0 || tmp > true_peak)
+ true_peak = tmp;
+ }
+
+ offset = s->target_i - global;
+ offset_tp = true_peak + offset;
+ s->offset = offset_tp < s->target_tp ? offset : s->target_tp - true_peak;
+ s->offset = pow(10., s->offset / 20.);
+ s->frame_type = LINEAR_MODE;
+ }
+
+ switch (s->frame_type) {
+ case FIRST_FRAME:
+ for (n = 0; n < in->nb_samples; n++) {
+ for (c = 0; c < inlink->channels; c++) {
+ buf[s->buf_index + c] = src[c];
+ }
+ src += inlink->channels;
+ s->buf_index += inlink->channels;
+ }
+
+ ff_ebur128_loudness_shortterm(s->r128_in, &shortterm);
+
+ if (shortterm < s->measured_thresh) {
+ s->above_threshold = 0;
+ env_shortterm = shortterm <= -70. ? 0. : s->target_i - s->measured_i;
+ } else {
+ s->above_threshold = 1;
+ env_shortterm = shortterm <= -70. ? 0. : s->target_i - shortterm;
+ }
+
+ for (n = 0; n < 30; n++)
+ s->delta[n] = pow(10., env_shortterm / 20.);
+ s->prev_delta = s->delta[s->index];
+
+ s->buf_index =
+ s->limiter_buf_index = 0;
+
+ for (n = 0; n < (s->limiter_buf_size / inlink->channels); n++) {
+ for (c = 0; c < inlink->channels; c++) {
+ limiter_buf[s->limiter_buf_index + c] = buf[s->buf_index + c] * s->delta[s->index] * s->offset;
+ }
+ s->limiter_buf_index += inlink->channels;
+ if (s->limiter_buf_index >= s->limiter_buf_size)
+ s->limiter_buf_index -= s->limiter_buf_size;
+
+ s->buf_index += inlink->channels;
+ }
+
+ subframe_length = frame_size(inlink->sample_rate, 100);
+ true_peak_limiter(s, dst, subframe_length, inlink->channels);
+ ff_ebur128_add_frames_double(s->r128_out, dst, subframe_length);
+
+ s->pts +=
+ out->nb_samples =
+ inlink->min_samples =
+ inlink->max_samples =
+ inlink->partial_buf_size = subframe_length;
+
+ s->frame_type = INNER_FRAME;
+ break;
+
+ case INNER_FRAME:
+ gain = gaussian_filter(s, s->index + 10 < 30 ? s->index + 10 : s->index + 10 - 30);
+ gain_next = gaussian_filter(s, s->index + 11 < 30 ? s->index + 11 : s->index + 11 - 30);
+
+ for (n = 0; n < in->nb_samples; n++) {
+ for (c = 0; c < inlink->channels; c++) {
+ buf[s->prev_buf_index + c] = src[c];
+ limiter_buf[s->limiter_buf_index + c] = buf[s->buf_index + c] * (gain + (((double) n / in->nb_samples) * (gain_next - gain))) * s->offset;
+ }
+ src += inlink->channels;
+
+ s->limiter_buf_index += inlink->channels;
+ if (s->limiter_buf_index >= s->limiter_buf_size)
+ s->limiter_buf_index -= s->limiter_buf_size;
+
+ s->prev_buf_index += inlink->channels;
+ if (s->prev_buf_index >= s->buf_size)
+ s->prev_buf_index -= s->buf_size;
+
+ s->buf_index += inlink->channels;
+ if (s->buf_index >= s->buf_size)
+ s->buf_index -= s->buf_size;
+ }
+
+ subframe_length = (frame_size(inlink->sample_rate, 100) - in->nb_samples) * inlink->channels;
+ s->limiter_buf_index = s->limiter_buf_index + subframe_length < s->limiter_buf_size ? s->limiter_buf_index + subframe_length : s->limiter_buf_index + subframe_length - s->limiter_buf_size;
+
+ true_peak_limiter(s, dst, in->nb_samples, inlink->channels);
+ ff_ebur128_add_frames_double(s->r128_out, dst, in->nb_samples);
+
+ ff_ebur128_loudness_range(s->r128_in, &lra);
+ ff_ebur128_loudness_global(s->r128_in, &global);
+ ff_ebur128_loudness_shortterm(s->r128_in, &shortterm);
+ ff_ebur128_relative_threshold(s->r128_in, &relative_threshold);
+
+ if (s->above_threshold == 0) {
+ double shortterm_out;
+
+ if (shortterm > s->measured_thresh)
+ s->prev_delta *= 1.0058;
+
+ ff_ebur128_loudness_shortterm(s->r128_out, &shortterm_out);
+ if (shortterm_out >= s->target_i)
+ s->above_threshold = 1;
+ }
+
+ if (shortterm < relative_threshold || shortterm <= -70. || s->above_threshold == 0) {
+ s->delta[s->index] = s->prev_delta;
+ } else {
+ env_global = fabs(shortterm - global) < (s->target_lra / 2.) ? shortterm - global : (s->target_lra / 2.) * ((shortterm - global) < 0 ? -1 : 1);
+ env_shortterm = s->target_i - shortterm;
+ s->delta[s->index] = pow(10., (env_global + env_shortterm) / 20.);
+ }
+
+ s->prev_delta = s->delta[s->index];
+ s->index++;
+ if (s->index >= 30)
+ s->index -= 30;
+ s->prev_nb_samples = in->nb_samples;
+ s->pts += in->nb_samples;
+ break;
+
+ case FINAL_FRAME:
+ gain = gaussian_filter(s, s->index + 10 < 30 ? s->index + 10 : s->index + 10 - 30);
+ s->limiter_buf_index = 0;
+ src_index = 0;
+
+ for (n = 0; n < s->limiter_buf_size / inlink->channels; n++) {
+ for (c = 0; c < inlink->channels; c++) {
+ s->limiter_buf[s->limiter_buf_index + c] = src[src_index + c] * gain * s->offset;
+ }
+ src_index += inlink->channels;
+
+ s->limiter_buf_index += inlink->channels;
+ if (s->limiter_buf_index >= s->limiter_buf_size)
+ s->limiter_buf_index -= s->limiter_buf_size;
+ }
+
+ subframe_length = frame_size(inlink->sample_rate, 100);
+ for (i = 0; i < in->nb_samples / subframe_length; i++) {
+ true_peak_limiter(s, dst, subframe_length, inlink->channels);
+
+ for (n = 0; n < subframe_length; n++) {
+ for (c = 0; c < inlink->channels; c++) {
+ if (src_index < (in->nb_samples * inlink->channels)) {
+ limiter_buf[s->limiter_buf_index + c] = src[src_index + c] * gain * s->offset;
+ } else {
+ limiter_buf[s->limiter_buf_index + c] = 0.;
+ }
+ }
+
+ if (src_index < (in->nb_samples * inlink->channels))
+ src_index += inlink->channels;
+
+ s->limiter_buf_index += inlink->channels;
+ if (s->limiter_buf_index >= s->limiter_buf_size)
+ s->limiter_buf_index -= s->limiter_buf_size;
+ }
+
+ dst += (subframe_length * inlink->channels);
+ }
+
+ dst = (double *)out->data[0];
+ ff_ebur128_add_frames_double(s->r128_out, dst, in->nb_samples);
+ break;
+
+ case LINEAR_MODE:
+ for (n = 0; n < in->nb_samples; n++) {
+ for (c = 0; c < inlink->channels; c++) {
+ dst[c] = src[c] * s->offset;
+ }
+ src += inlink->channels;
+ dst += inlink->channels;
+ }
+
+ dst = (double *)out->data[0];
+ ff_ebur128_add_frames_double(s->r128_out, dst, in->nb_samples);
+ s->pts += in->nb_samples;
+ break;
+ }
+
+ if (in != out)
+ av_frame_free(&in);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ int ret;
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ LoudNormContext *s = ctx->priv;
+
+ ret = ff_request_frame(inlink);
+ if (ret == AVERROR_EOF && s->frame_type == INNER_FRAME) {
+ double *src;
+ double *buf;
+ int nb_samples, n, c, offset;
+ AVFrame *frame;
+
+ nb_samples = (s->buf_size / inlink->channels) - s->prev_nb_samples;
+ nb_samples -= (frame_size(inlink->sample_rate, 100) - s->prev_nb_samples);
+
+ frame = ff_get_audio_buffer(outlink, nb_samples);
+ if (!frame)
+ return AVERROR(ENOMEM);
+ frame->nb_samples = nb_samples;
+
+ buf = s->buf;
+ src = (double *)frame->data[0];
+
+ offset = ((s->limiter_buf_size / inlink->channels) - s->prev_nb_samples) * inlink->channels;
+ offset -= (frame_size(inlink->sample_rate, 100) - s->prev_nb_samples) * inlink->channels;
+ s->buf_index = s->buf_index - offset < 0 ? s->buf_index - offset + s->buf_size : s->buf_index - offset;
+
+ for (n = 0; n < nb_samples; n++) {
+ for (c = 0; c < inlink->channels; c++) {
+ src[c] = buf[s->buf_index + c];
+ }
+ src += inlink->channels;
+ s->buf_index += inlink->channels;
+ if (s->buf_index >= s->buf_size)
+ s->buf_index -= s->buf_size;
+ }
+
+ s->frame_type = FINAL_FRAME;
+ ret = filter_frame(inlink, frame);
+ }
+ return ret;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ static const int input_srate[] = {192000, -1};
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBL,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(input_srate);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_formats_ref(formats, &inlink->out_samplerates);
+ if (ret < 0)
+ return ret;
+ ret = ff_formats_ref(formats, &outlink->in_samplerates);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ LoudNormContext *s = ctx->priv;
+
+ s->r128_in = ff_ebur128_init(inlink->channels, inlink->sample_rate, 0, FF_EBUR128_MODE_I | FF_EBUR128_MODE_S | FF_EBUR128_MODE_LRA | FF_EBUR128_MODE_SAMPLE_PEAK);
+ if (!s->r128_in)
+ return AVERROR(ENOMEM);
+
+ s->r128_out = ff_ebur128_init(inlink->channels, inlink->sample_rate, 0, FF_EBUR128_MODE_I | FF_EBUR128_MODE_S | FF_EBUR128_MODE_LRA | FF_EBUR128_MODE_SAMPLE_PEAK);
+ if (!s->r128_out)
+ return AVERROR(ENOMEM);
+
+ if (inlink->channels == 1 && s->dual_mono) {
+ ff_ebur128_set_channel(s->r128_in, 0, FF_EBUR128_DUAL_MONO);
+ ff_ebur128_set_channel(s->r128_out, 0, FF_EBUR128_DUAL_MONO);
+ }
+
+ s->buf_size = frame_size(inlink->sample_rate, 3000) * inlink->channels;
+ s->buf = av_malloc_array(s->buf_size, sizeof(*s->buf));
+ if (!s->buf)
+ return AVERROR(ENOMEM);
+
+ s->limiter_buf_size = frame_size(inlink->sample_rate, 210) * inlink->channels;
+ s->limiter_buf = av_malloc_array(s->buf_size, sizeof(*s->limiter_buf));
+ if (!s->limiter_buf)
+ return AVERROR(ENOMEM);
+
+ s->prev_smp = av_malloc_array(inlink->channels, sizeof(*s->prev_smp));
+ if (!s->prev_smp)
+ return AVERROR(ENOMEM);
+
+ init_gaussian_filter(s);
+
+ s->frame_type = FIRST_FRAME;
+
+ if (s->linear) {
+ double offset, offset_tp;
+ offset = s->target_i - s->measured_i;
+ offset_tp = s->measured_tp + offset;
+
+ if (s->measured_tp != 99 && s->measured_thresh != -70 && s->measured_lra != 0 && s->measured_i != 0) {
+ if ((offset_tp <= s->target_tp) && (s->measured_lra <= s->target_lra)) {
+ s->frame_type = LINEAR_MODE;
+ s->offset = offset;
+ }
+ }
+ }
+
+ if (s->frame_type != LINEAR_MODE) {
+ inlink->min_samples =
+ inlink->max_samples =
+ inlink->partial_buf_size = frame_size(inlink->sample_rate, 3000);
+ }
+
+ s->pts =
+ s->buf_index =
+ s->prev_buf_index =
+ s->limiter_buf_index = 0;
+ s->channels = inlink->channels;
+ s->index = 1;
+ s->limiter_state = OUT;
+ s->offset = pow(10., s->offset / 20.);
+ s->target_tp = pow(10., s->target_tp / 20.);
+ s->attack_length = frame_size(inlink->sample_rate, 10);
+ s->release_length = frame_size(inlink->sample_rate, 100);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ LoudNormContext *s = ctx->priv;
+ double i_in, i_out, lra_in, lra_out, thresh_in, thresh_out, tp_in, tp_out;
+ int c;
+
+ if (!s->r128_in || !s->r128_out)
+ goto end;
+
+ ff_ebur128_loudness_range(s->r128_in, &lra_in);
+ ff_ebur128_loudness_global(s->r128_in, &i_in);
+ ff_ebur128_relative_threshold(s->r128_in, &thresh_in);
+ for (c = 0; c < s->channels; c++) {
+ double tmp;
+ ff_ebur128_sample_peak(s->r128_in, c, &tmp);
+ if ((c == 0) || (tmp > tp_in))
+ tp_in = tmp;
+ }
+
+ ff_ebur128_loudness_range(s->r128_out, &lra_out);
+ ff_ebur128_loudness_global(s->r128_out, &i_out);
+ ff_ebur128_relative_threshold(s->r128_out, &thresh_out);
+ for (c = 0; c < s->channels; c++) {
+ double tmp;
+ ff_ebur128_sample_peak(s->r128_out, c, &tmp);
+ if ((c == 0) || (tmp > tp_out))
+ tp_out = tmp;
+ }
+
+ switch(s->print_format) {
+ case NONE:
+ break;
+
+ case JSON:
+ av_log(ctx, AV_LOG_INFO,
+ "\n{\n"
+ "\t\"input_i\" : \"%.2f\",\n"
+ "\t\"input_tp\" : \"%.2f\",\n"
+ "\t\"input_lra\" : \"%.2f\",\n"
+ "\t\"input_thresh\" : \"%.2f\",\n"
+ "\t\"output_i\" : \"%.2f\",\n"
+ "\t\"output_tp\" : \"%+.2f\",\n"
+ "\t\"output_lra\" : \"%.2f\",\n"
+ "\t\"output_thresh\" : \"%.2f\",\n"
+ "\t\"normalization_type\" : \"%s\",\n"
+ "\t\"target_offset\" : \"%.2f\"\n"
+ "}\n",
+ i_in,
+ 20. * log10(tp_in),
+ lra_in,
+ thresh_in,
+ i_out,
+ 20. * log10(tp_out),
+ lra_out,
+ thresh_out,
+ s->frame_type == LINEAR_MODE ? "linear" : "dynamic",
+ s->target_i - i_out
+ );
+ break;
+
+ case SUMMARY:
+ av_log(ctx, AV_LOG_INFO,
+ "\n"
+ "Input Integrated: %+6.1f LUFS\n"
+ "Input True Peak: %+6.1f dBTP\n"
+ "Input LRA: %6.1f LU\n"
+ "Input Threshold: %+6.1f LUFS\n"
+ "\n"
+ "Output Integrated: %+6.1f LUFS\n"
+ "Output True Peak: %+6.1f dBTP\n"
+ "Output LRA: %6.1f LU\n"
+ "Output Threshold: %+6.1f LUFS\n"
+ "\n"
+ "Normalization Type: %s\n"
+ "Target Offset: %+6.1f LU\n",
+ i_in,
+ 20. * log10(tp_in),
+ lra_in,
+ thresh_in,
+ i_out,
+ 20. * log10(tp_out),
+ lra_out,
+ thresh_out,
+ s->frame_type == LINEAR_MODE ? "Linear" : "Dynamic",
+ s->target_i - i_out
+ );
+ break;
+ }
+
+end:
+ if (s->r128_in)
+ ff_ebur128_destroy(&s->r128_in);
+ if (s->r128_out)
+ ff_ebur128_destroy(&s->r128_out);
+ av_freep(&s->limiter_buf);
+ av_freep(&s->prev_smp);
+ av_freep(&s->buf);
+}
+
+static const AVFilterPad avfilter_af_loudnorm_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_af_loudnorm_outputs[] = {
+ {
+ .name = "default",
+ .request_frame = request_frame,
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_loudnorm = {
+ .name = "loudnorm",
+ .description = NULL_IF_CONFIG_SMALL("EBU R128 loudness normalization"),
+ .priv_size = sizeof(LoudNormContext),
+ .priv_class = &loudnorm_class,
+ .query_formats = query_formats,
+ .uninit = uninit,
+ .inputs = avfilter_af_loudnorm_inputs,
+ .outputs = avfilter_af_loudnorm_outputs,
+};
diff --git a/libavfilter/af_pan.c b/libavfilter/af_pan.c
new file mode 100644
index 0000000000..a477bde460
--- /dev/null
+++ b/libavfilter/af_pan.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2002 Anders Johansson <ajh@atri.curtin.edu.au>
+ * Copyright (c) 2011 Clément Bœsch <u pkh me>
+ * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Audio panning filter (channels mixing)
+ * Original code written by Anders Johansson for MPlayer,
+ * reimplemented for FFmpeg.
+ */
+
+#include <stdio.h>
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "libswresample/swresample.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+#define MAX_CHANNELS 64
+
+typedef struct PanContext {
+ const AVClass *class;
+ char *args;
+ int64_t out_channel_layout;
+ double gain[MAX_CHANNELS][MAX_CHANNELS];
+ int64_t need_renorm;
+ int need_renumber;
+ int nb_output_channels;
+
+ int pure_gains;
+ /* channel mapping specific */
+ int channel_map[MAX_CHANNELS];
+ struct SwrContext *swr;
+} PanContext;
+
+static void skip_spaces(char **arg)
+{
+ int len = 0;
+
+ sscanf(*arg, " %n", &len);
+ *arg += len;
+}
+
+static int parse_channel_name(char **arg, int *rchannel, int *rnamed)
+{
+ char buf[8];
+ int len, i, channel_id = 0;
+ int64_t layout, layout0;
+
+ skip_spaces(arg);
+ /* try to parse a channel name, e.g. "FL" */
+ if (sscanf(*arg, "%7[A-Z]%n", buf, &len)) {
+ layout0 = layout = av_get_channel_layout(buf);
+ /* channel_id <- first set bit in layout */
+ for (i = 32; i > 0; i >>= 1) {
+ if (layout >= (int64_t)1 << i) {
+ channel_id += i;
+ layout >>= i;
+ }
+ }
+ /* reject layouts that are not a single channel */
+ if (channel_id >= MAX_CHANNELS || layout0 != (int64_t)1 << channel_id)
+ return AVERROR(EINVAL);
+ *rchannel = channel_id;
+ *rnamed = 1;
+ *arg += len;
+ return 0;
+ }
+ /* try to parse a channel number, e.g. "c2" */
+ if (sscanf(*arg, "c%d%n", &channel_id, &len) &&
+ channel_id >= 0 && channel_id < MAX_CHANNELS) {
+ *rchannel = channel_id;
+ *rnamed = 0;
+ *arg += len;
+ return 0;
+ }
+ return AVERROR(EINVAL);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ PanContext *const pan = ctx->priv;
+ char *arg, *arg0, *tokenizer, *args = av_strdup(pan->args);
+ int out_ch_id, in_ch_id, len, named, ret, sign = 1;
+ int nb_in_channels[2] = { 0, 0 }; // number of unnamed and named input channels
+ double gain;
+
+ if (!pan->args) {
+ av_log(ctx, AV_LOG_ERROR,
+ "pan filter needs a channel layout and a set "
+ "of channel definitions as parameter\n");
+ return AVERROR(EINVAL);
+ }
+ if (!args)
+ return AVERROR(ENOMEM);
+ arg = av_strtok(args, "|", &tokenizer);
+ if (!arg) {
+ av_log(ctx, AV_LOG_ERROR, "Channel layout not specified\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ ret = ff_parse_channel_layout(&pan->out_channel_layout,
+ &pan->nb_output_channels, arg, ctx);
+ if (ret < 0)
+ goto fail;
+
+ /* parse channel specifications */
+ while ((arg = arg0 = av_strtok(NULL, "|", &tokenizer))) {
+ /* channel name */
+ if (parse_channel_name(&arg, &out_ch_id, &named)) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Expected out channel name, got \"%.8s\"\n", arg);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ if (named) {
+ if (!((pan->out_channel_layout >> out_ch_id) & 1)) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Channel \"%.8s\" does not exist in the chosen layout\n", arg0);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ /* get the channel number in the output channel layout:
+ * out_channel_layout & ((1 << out_ch_id) - 1) are all the
+ * channels that come before out_ch_id,
+ * so their count is the index of out_ch_id */
+ out_ch_id = av_get_channel_layout_nb_channels(pan->out_channel_layout & (((int64_t)1 << out_ch_id) - 1));
+ }
+ if (out_ch_id < 0 || out_ch_id >= pan->nb_output_channels) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Invalid out channel name \"%.8s\"\n", arg0);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ skip_spaces(&arg);
+ if (*arg == '=') {
+ arg++;
+ } else if (*arg == '<') {
+ pan->need_renorm |= (int64_t)1 << out_ch_id;
+ arg++;
+ } else {
+ av_log(ctx, AV_LOG_ERROR,
+ "Syntax error after channel name in \"%.8s\"\n", arg0);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ /* gains */
+ while (1) {
+ gain = 1;
+ if (sscanf(arg, "%lf%n *%n", &gain, &len, &len))
+ arg += len;
+ if (parse_channel_name(&arg, &in_ch_id, &named)){
+ av_log(ctx, AV_LOG_ERROR,
+ "Expected in channel name, got \"%.8s\"\n", arg);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ nb_in_channels[named]++;
+ if (nb_in_channels[!named]) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Can not mix named and numbered channels\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ pan->gain[out_ch_id][in_ch_id] = sign * gain;
+ skip_spaces(&arg);
+ if (!*arg)
+ break;
+ if (*arg == '-') {
+ sign = -1;
+ } else if (*arg != '+') {
+ av_log(ctx, AV_LOG_ERROR, "Syntax error near \"%.8s\"\n", arg);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ } else {
+ sign = 1;
+ }
+ arg++;
+ }
+ }
+ pan->need_renumber = !!nb_in_channels[1];
+
+ ret = 0;
+fail:
+ av_free(args);
+ return ret;
+}
+
+static int are_gains_pure(const PanContext *pan)
+{
+ int i, j;
+
+ for (i = 0; i < MAX_CHANNELS; i++) {
+ int nb_gain = 0;
+
+ for (j = 0; j < MAX_CHANNELS; j++) {
+ double gain = pan->gain[i][j];
+
+ /* channel mapping is effective only if 0% or 100% of a channel is
+ * selected... */
+ if (gain != 0. && gain != 1.)
+ return 0;
+ /* ...and if the output channel is only composed of one input */
+ if (gain && nb_gain++)
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ PanContext *pan = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts;
+ int ret;
+
+ pan->pure_gains = are_gains_pure(pan);
+ /* libswr supports any sample and packing formats */
+ if ((ret = ff_set_common_formats(ctx, ff_all_formats(AVMEDIA_TYPE_AUDIO))) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if ((ret = ff_set_common_samplerates(ctx, formats)) < 0)
+ return ret;
+
+ // inlink supports any channel layout
+ layouts = ff_all_channel_counts();
+ if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
+ return ret;
+
+ // outlink supports only requested output channel layout
+ layouts = NULL;
+ if ((ret = ff_add_channel_layout(&layouts,
+ pan->out_channel_layout ? pan->out_channel_layout :
+ FF_COUNT2LAYOUT(pan->nb_output_channels))) < 0)
+ return ret;
+ return ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
+}
+
+static int config_props(AVFilterLink *link)
+{
+ AVFilterContext *ctx = link->dst;
+ PanContext *pan = ctx->priv;
+ char buf[1024], *cur;
+ int i, j, k, r;
+ double t;
+
+ if (pan->need_renumber) {
+ // input channels were given by their name: renumber them
+ for (i = j = 0; i < MAX_CHANNELS; i++) {
+ if ((link->channel_layout >> i) & 1) {
+ for (k = 0; k < pan->nb_output_channels; k++)
+ pan->gain[k][j] = pan->gain[k][i];
+ j++;
+ }
+ }
+ }
+
+ // sanity check; can't be done in query_formats since the inlink
+ // channel layout is unknown at that time
+ if (link->channels > MAX_CHANNELS ||
+ pan->nb_output_channels > MAX_CHANNELS) {
+ av_log(ctx, AV_LOG_ERROR,
+ "af_pan supports a maximum of %d channels. "
+ "Feel free to ask for a higher limit.\n", MAX_CHANNELS);
+ return AVERROR_PATCHWELCOME;
+ }
+
+ // init libswresample context
+ pan->swr = swr_alloc_set_opts(pan->swr,
+ pan->out_channel_layout, link->format, link->sample_rate,
+ link->channel_layout, link->format, link->sample_rate,
+ 0, ctx);
+ if (!pan->swr)
+ return AVERROR(ENOMEM);
+ if (!link->channel_layout) {
+ if (av_opt_set_int(pan->swr, "ich", link->channels, 0) < 0)
+ return AVERROR(EINVAL);
+ }
+ if (!pan->out_channel_layout) {
+ if (av_opt_set_int(pan->swr, "och", pan->nb_output_channels, 0) < 0)
+ return AVERROR(EINVAL);
+ }
+
+ // gains are pure, init the channel mapping
+ if (pan->pure_gains) {
+
+ // get channel map from the pure gains
+ for (i = 0; i < pan->nb_output_channels; i++) {
+ int ch_id = -1;
+ for (j = 0; j < link->channels; j++) {
+ if (pan->gain[i][j]) {
+ ch_id = j;
+ break;
+ }
+ }
+ pan->channel_map[i] = ch_id;
+ }
+
+ av_opt_set_int(pan->swr, "icl", pan->out_channel_layout, 0);
+ av_opt_set_int(pan->swr, "uch", pan->nb_output_channels, 0);
+ swr_set_channel_mapping(pan->swr, pan->channel_map);
+ } else {
+ // renormalize
+ for (i = 0; i < pan->nb_output_channels; i++) {
+ if (!((pan->need_renorm >> i) & 1))
+ continue;
+ t = 0;
+ for (j = 0; j < link->channels; j++)
+ t += fabs(pan->gain[i][j]);
+ if (t > -1E-5 && t < 1E-5) {
+ // t is almost 0 but not exactly, this is probably a mistake
+ if (t)
+ av_log(ctx, AV_LOG_WARNING,
+ "Degenerate coefficients while renormalizing\n");
+ continue;
+ }
+ for (j = 0; j < link->channels; j++)
+ pan->gain[i][j] /= t;
+ }
+ av_opt_set_int(pan->swr, "icl", link->channel_layout, 0);
+ av_opt_set_int(pan->swr, "ocl", pan->out_channel_layout, 0);
+ swr_set_matrix(pan->swr, pan->gain[0], pan->gain[1] - pan->gain[0]);
+ }
+
+ r = swr_init(pan->swr);
+ if (r < 0)
+ return r;
+
+ // summary
+ for (i = 0; i < pan->nb_output_channels; i++) {
+ cur = buf;
+ for (j = 0; j < link->channels; j++) {
+ r = snprintf(cur, buf + sizeof(buf) - cur, "%s%.3g i%d",
+ j ? " + " : "", pan->gain[i][j], j);
+ cur += FFMIN(buf + sizeof(buf) - cur, r);
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "o%d = %s\n", i, buf);
+ }
+ // add channel mapping summary if possible
+ if (pan->pure_gains) {
+ av_log(ctx, AV_LOG_INFO, "Pure channel mapping detected:");
+ for (i = 0; i < pan->nb_output_channels; i++)
+ if (pan->channel_map[i] < 0)
+ av_log(ctx, AV_LOG_INFO, " M");
+ else
+ av_log(ctx, AV_LOG_INFO, " %d", pan->channel_map[i]);
+ av_log(ctx, AV_LOG_INFO, "\n");
+ return 0;
+ }
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ int ret;
+ int n = insamples->nb_samples;
+ AVFilterLink *const outlink = inlink->dst->outputs[0];
+ AVFrame *outsamples = ff_get_audio_buffer(outlink, n);
+ PanContext *pan = inlink->dst->priv;
+
+ if (!outsamples)
+ return AVERROR(ENOMEM);
+ swr_convert(pan->swr, outsamples->extended_data, n,
+ (void *)insamples->extended_data, n);
+ av_frame_copy_props(outsamples, insamples);
+ outsamples->channel_layout = outlink->channel_layout;
+ av_frame_set_channels(outsamples, outlink->channels);
+
+ ret = ff_filter_frame(outlink, outsamples);
+ av_frame_free(&insamples);
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ PanContext *pan = ctx->priv;
+ swr_free(&pan->swr);
+}
+
+#define OFFSET(x) offsetof(PanContext, x)
+
+static const AVOption pan_options[] = {
+ { "args", NULL, OFFSET(args), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(pan);
+
+static const AVFilterPad pan_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_props,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad pan_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_pan = {
+ .name = "pan",
+ .description = NULL_IF_CONFIG_SMALL("Remix channels with coefficients (panning)."),
+ .priv_size = sizeof(PanContext),
+ .priv_class = &pan_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = pan_inputs,
+ .outputs = pan_outputs,
+};
diff --git a/libavfilter/af_replaygain.c b/libavfilter/af_replaygain.c
new file mode 100644
index 0000000000..c8f6f9666d
--- /dev/null
+++ b/libavfilter/af_replaygain.c
@@ -0,0 +1,615 @@
+/*
+ * Copyright (c) 1998 - 2009 Conifer Software
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * ReplayGain scanner
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+
+#define HISTOGRAM_SLOTS 12000
+#define BUTTER_ORDER 2
+#define YULE_ORDER 10
+
+typedef struct ReplayGainFreqInfo {
+ int sample_rate;
+ double BYule[YULE_ORDER + 1];
+ double AYule[YULE_ORDER + 1];
+ double BButter[BUTTER_ORDER + 1];
+ double AButter[BUTTER_ORDER + 1];
+} ReplayGainFreqInfo;
+
+static const ReplayGainFreqInfo freqinfos[] =
+{
+ {
+ 192000,
+ { 0.01184742123123, -0.04631092400086, 0.06584226961238,
+ -0.02165588522478, -0.05656260778952, 0.08607493592760,
+ -0.03375544339786, -0.04216579932754, 0.06416711490648,
+ -0.03444708260844, 0.00697275872241 },
+ { 1.00000000000000, -5.24727318348167, 10.60821585192244,
+ -8.74127665810413, -1.33906071371683, 8.07972882096606,
+ -5.46179918950847, 0.54318070652536, 0.87450969224280,
+ -0.34656083539754, 0.03034796843589 },
+ { 0.99653501465135, -1.99307002930271, 0.99653501465135 },
+ { 1.00000000000000, -1.99305802314321, 0.99308203546221 },
+ },
+ {
+ 176400,
+ { 0.00268568524529, -0.00852379426080, 0.00852704191347,
+ 0.00146116310295, -0.00950855828762, 0.00625449515499,
+ 0.00116183868722, -0.00362461417136, 0.00203961000134,
+ -0.00050664587933, 0.00004327455427 },
+ { 1.00000000000000, -5.57512782763045, 12.44291056065794,
+ -12.87462799681221, 3.08554846961576, 6.62493459880692,
+ -7.07662766313248, 2.51175542736441, 0.06731510802735,
+ -0.24567753819213, 0.03961404162376 },
+ { 0.99622916581118, -1.99245833162236, 0.99622916581118 },
+ { 1.00000000000000, -1.99244411238133, 0.99247255086339 },
+ },
+ {
+ 144000,
+ { 0.00639682359450, -0.02556437970955, 0.04230854400938,
+ -0.03722462201267, 0.01718514827295, 0.00610592243009,
+ -0.03065965747365, 0.04345745003539, -0.03298592681309,
+ 0.01320937236809, -0.00220304127757 },
+ { 1.00000000000000, -6.14814623523425, 15.80002457141566,
+ -20.78487587686937, 11.98848552310315, 3.36462015062606,
+ -10.22419868359470, 6.65599702146473, -1.67141861110485,
+ -0.05417956536718, 0.07374767867406 },
+ { 0.99538268958706, -1.99076537917413, 0.99538268958706 },
+ { 1.00000000000000, -1.99074405950505, 0.99078669884321 },
+ },
+ {
+ 128000,
+ { 0.00553120584305, -0.02112620545016, 0.03549076243117,
+ -0.03362498312306, 0.01425867248183, 0.01344686928787,
+ -0.03392770787836, 0.03464136459530, -0.02039116051549,
+ 0.00667420794705, -0.00093763762995 },
+ { 1.00000000000000, -6.14581710839925, 16.04785903675838,
+ -22.19089131407749, 15.24756471580286, -0.52001440400238,
+ -8.00488641699940, 6.60916094768855, -2.37856022810923,
+ 0.33106947986101, 0.00459820832036 },
+ { 0.99480702681278, -1.98961405362557, 0.99480702681278 },
+ { 1.00000000000000, -1.98958708647324, 0.98964102077790 },
+ },
+ {
+ 112000,
+ { 0.00528778718259, -0.01893240907245, 0.03185982561867,
+ -0.02926260297838, 0.00715743034072, 0.01985743355827,
+ -0.03222614850941, 0.02565681978192, -0.01210662313473,
+ 0.00325436284541, -0.00044173593001 },
+ { 1.00000000000000, -6.24932108456288, 17.42344320538476,
+ -27.86819709054896, 26.79087344681326,-13.43711081485123,
+ -0.66023612948173, 6.03658091814935, -4.24926577030310,
+ 1.40829268709186, -0.19480852628112 },
+ { 0.99406737810867, -1.98813475621734, 0.99406737810867 },
+ { 1.00000000000000, -1.98809955990514, 0.98816995252954 },
+ },
+ {
+ 96000,
+ { 0.00588138296683, -0.01613559730421, 0.02184798954216,
+ -0.01742490405317, 0.00464635643780, 0.01117772513205,
+ -0.02123865824368, 0.01959354413350, -0.01079720643523,
+ 0.00352183686289, -0.00063124341421 },
+ { 1.00000000000000, -5.97808823642008, 16.21362507964068,
+ -25.72923730652599, 25.40470663139513,-14.66166287771134,
+ 2.81597484359752, 2.51447125969733, -2.23575306985286,
+ 0.75788151036791, -0.10078025199029 },
+ { 0.99308203517541, -1.98616407035082, 0.99308203517541 },
+ { 1.00000000000000, -1.98611621154089, 0.98621192916075 },
+ },
+ {
+ 88200,
+ { 0.02667482047416, -0.11377479336097, 0.23063167910965,
+ -0.30726477945593, 0.33188520686529, -0.33862680249063,
+ 0.31807161531340, -0.23730796929880, 0.12273894790371,
+ -0.03840017967282, 0.00549673387936 },
+ { 1.00000000000000, -6.31836451657302, 18.31351310801799,
+ -31.88210014815921, 36.53792146976740,-28.23393036467559,
+ 14.24725258227189, -4.04670980012854, 0.18865757280515,
+ 0.25420333563908, -0.06012333531065 },
+ { 0.99247255046129, -1.98494510092259, 0.99247255046129 },
+ { 1.00000000000000, -1.98488843762335, 0.98500176422183 },
+ },
+ {
+ 64000,
+ { 0.02613056568174, -0.08128786488109, 0.14937282347325,
+ -0.21695711675126, 0.25010286673402, -0.23162283619278,
+ 0.17424041833052, -0.10299599216680, 0.04258696481981,
+ -0.00977952936493, 0.00105325558889 },
+ { 1.00000000000000, -5.73625477092119, 16.15249794355035,
+ -29.68654912464508, 39.55706155674083,-39.82524556246253,
+ 30.50605345013009,-17.43051772821245, 7.05154573908017,
+ -1.80783839720514, 0.22127840210813 },
+ { 0.98964101933472, -1.97928203866944, 0.98964101933472 },
+ { 1.00000000000000, -1.97917472731009, 0.97938935002880 },
+ },
+ {
+ 56000,
+ { 0.03144914734085, -0.06151729206963, 0.08066788708145,
+ -0.09737939921516, 0.08943210803999, -0.06989984672010,
+ 0.04926972841044, -0.03161257848451, 0.01456837493506,
+ -0.00316015108496, 0.00132807215875 },
+ { 1.00000000000000, -4.87377313090032, 12.03922160140209,
+ -20.10151118381395, 25.10388534415171,-24.29065560815903,
+ 18.27158469090663,-10.45249552560593, 4.30319491872003,
+ -1.13716992070185, 0.14510733527035 },
+ { 0.98816995007392, -1.97633990014784, 0.98816995007392 },
+ { 1.00000000000000, -1.97619994516973, 0.97647985512594 },
+ },
+ {
+ 48000,
+ { 0.03857599435200, -0.02160367184185, -0.00123395316851,
+ -0.00009291677959, -0.01655260341619, 0.02161526843274,
+ -0.02074045215285, 0.00594298065125, 0.00306428023191,
+ 0.00012025322027, 0.00288463683916 },
+ { 1.00000000000000, -3.84664617118067, 7.81501653005538,
+ -11.34170355132042, 13.05504219327545,-12.28759895145294,
+ 9.48293806319790, -5.87257861775999, 2.75465861874613,
+ -0.86984376593551, 0.13919314567432 },
+ { 0.98621192462708, -1.97242384925416, 0.98621192462708 },
+ { 1.00000000000000, -1.97223372919527, 0.97261396931306 },
+ },
+ {
+ 44100,
+ { 0.05418656406430, -0.02911007808948, -0.00848709379851,
+ -0.00851165645469, -0.00834990904936, 0.02245293253339,
+ -0.02596338512915, 0.01624864962975, -0.00240879051584,
+ 0.00674613682247, -0.00187763777362 },
+ { 1.00000000000000, -3.47845948550071, 6.36317777566148,
+ -8.54751527471874, 9.47693607801280, -8.81498681370155,
+ 6.85401540936998, -4.39470996079559, 2.19611684890774,
+ -0.75104302451432, 0.13149317958808 },
+ { 0.98500175787242, -1.97000351574484, 0.98500175787242 },
+ { 1.00000000000000, -1.96977855582618, 0.97022847566350 },
+ },
+ {
+ 37800,
+ { 0.08717879977844, -0.01000374016172, -0.06265852122368,
+ -0.01119328800950, -0.00114279372960, 0.02081333954769,
+ -0.01603261863207, 0.01936763028546, 0.00760044736442,
+ -0.00303979112271, -0.00075088605788 },
+ { 1.00000000000000, -2.62816311472146, 3.53734535817992,
+ -3.81003448678921, 3.91291636730132, -3.53518605896288,
+ 2.71356866157873, -1.86723311846592, 1.12075382367659,
+ -0.48574086886890, 0.11330544663849 },
+ { 0.98252400815195, -1.96504801630391, 0.98252400815195 },
+ { 1.00000000000000, -1.96474258269041, 0.96535344991740 },
+ },
+ {
+ 32000,
+ { 0.15457299681924, -0.09331049056315, -0.06247880153653,
+ 0.02163541888798, -0.05588393329856, 0.04781476674921,
+ 0.00222312597743, 0.03174092540049, -0.01390589421898,
+ 0.00651420667831, -0.00881362733839 },
+ { 1.00000000000000, -2.37898834973084, 2.84868151156327,
+ -2.64577170229825, 2.23697657451713, -1.67148153367602,
+ 1.00595954808547, -0.45953458054983, 0.16378164858596,
+ -0.05032077717131, 0.02347897407020 },
+ { 0.97938932735214, -1.95877865470428, 0.97938932735214 },
+ { 1.00000000000000, -1.95835380975398, 0.95920349965459 },
+ },
+ {
+ 24000,
+ { 0.30296907319327, -0.22613988682123, -0.08587323730772,
+ 0.03282930172664, -0.00915702933434, -0.02364141202522,
+ -0.00584456039913, 0.06276101321749, -0.00000828086748,
+ 0.00205861885564, -0.02950134983287 },
+ { 1.00000000000000, -1.61273165137247, 1.07977492259970,
+ -0.25656257754070, -0.16276719120440, -0.22638893773906,
+ 0.39120800788284, -0.22138138954925, 0.04500235387352,
+ 0.02005851806501, 0.00302439095741 },
+ { 0.97531843204928, -1.95063686409857, 0.97531843204928 },
+ { 1.00000000000000, -1.95002759149878, 0.95124613669835 },
+ },
+ {
+ 22050,
+ { 0.33642304856132, -0.25572241425570, -0.11828570177555,
+ 0.11921148675203, -0.07834489609479, -0.00469977914380,
+ -0.00589500224440, 0.05724228140351, 0.00832043980773,
+ -0.01635381384540, -0.01760176568150 },
+ { 1.00000000000000, -1.49858979367799, 0.87350271418188,
+ 0.12205022308084, -0.80774944671438, 0.47854794562326,
+ -0.12453458140019, -0.04067510197014, 0.08333755284107,
+ -0.04237348025746, 0.02977207319925 },
+ { 0.97316523498161, -1.94633046996323, 0.97316523498161 },
+ { 1.00000000000000, -1.94561023566527, 0.94705070426118 },
+ },
+ {
+ 18900,
+ { 0.38524531015142, -0.27682212062067, -0.09980181488805,
+ 0.09951486755646, -0.08934020156622, -0.00322369330199,
+ -0.00110329090689, 0.03784509844682, 0.01683906213303,
+ -0.01147039862572, -0.01941767987192 },
+ { 1.00000000000000, -1.29708918404534, 0.90399339674203,
+ -0.29613799017877, -0.42326645916207, 0.37934887402200,
+ -0.37919795944938, 0.23410283284785, -0.03892971758879,
+ 0.00403009552351, 0.03640166626278 },
+ { 0.96535326815829, -1.93070653631658, 0.96535326815829 },
+ { 1.00000000000000, -1.92950577983524, 0.93190729279793 },
+ },
+ {
+ 16000,
+ { 0.44915256608450, -0.14351757464547, -0.22784394429749,
+ -0.01419140100551, 0.04078262797139, -0.12398163381748,
+ 0.04097565135648, 0.10478503600251, -0.01863887810927,
+ -0.03193428438915, 0.00541907748707 },
+ { 1.00000000000000, -0.62820619233671, 0.29661783706366,
+ -0.37256372942400, 0.00213767857124, -0.42029820170918,
+ 0.22199650564824, 0.00613424350682, 0.06747620744683,
+ 0.05784820375801, 0.03222754072173 },
+ { 0.96454515552826, -1.92909031105652, 0.96454515552826 },
+ { 1.00000000000000, -1.92783286977036, 0.93034775234268 },
+ },
+ {
+ 12000,
+ { 0.56619470757641, -0.75464456939302, 0.16242137742230,
+ 0.16744243493672, -0.18901604199609, 0.30931782841830,
+ -0.27562961986224, 0.00647310677246, 0.08647503780351,
+ -0.03788984554840, -0.00588215443421 },
+ { 1.00000000000000, -1.04800335126349, 0.29156311971249,
+ -0.26806001042947, 0.00819999645858, 0.45054734505008,
+ -0.33032403314006, 0.06739368333110, -0.04784254229033,
+ 0.01639907836189, 0.01807364323573 },
+ { 0.96009142950541, -1.92018285901082, 0.96009142950541 },
+ { 1.00000000000000, -1.91858953033784, 0.92177618768381 },
+ },
+ {
+ 11025,
+ { 0.58100494960553, -0.53174909058578, -0.14289799034253,
+ 0.17520704835522, 0.02377945217615, 0.15558449135573,
+ -0.25344790059353, 0.01628462406333, 0.06920467763959,
+ -0.03721611395801, -0.00749618797172 },
+ { 1.00000000000000, -0.51035327095184, -0.31863563325245,
+ -0.20256413484477, 0.14728154134330, 0.38952639978999,
+ -0.23313271880868, -0.05246019024463, -0.02505961724053,
+ 0.02442357316099, 0.01818801111503 },
+ { 0.95856916599601, -1.91713833199203, 0.95856916599601 },
+ { 1.00000000000000, -1.91542108074780, 0.91885558323625 },
+ },
+ {
+ 8000,
+ { 0.53648789255105, -0.42163034350696, -0.00275953611929,
+ 0.04267842219415, -0.10214864179676, 0.14590772289388,
+ -0.02459864859345, -0.11202315195388, -0.04060034127000,
+ 0.04788665548180, -0.02217936801134 },
+ { 1.00000000000000, -0.25049871956020, -0.43193942311114,
+ -0.03424681017675, -0.04678328784242, 0.26408300200955,
+ 0.15113130533216, -0.17556493366449, -0.18823009262115,
+ 0.05477720428674, 0.04704409688120 },
+ { 0.94597685600279, -1.89195371200558, 0.94597685600279 },
+ { 1.00000000000000, -1.88903307939452, 0.89487434461664 },
+ },
+};
+
+typedef struct ReplayGainContext {
+ uint32_t histogram[HISTOGRAM_SLOTS];
+ float peak;
+ int yule_hist_i, butter_hist_i;
+ const double *yule_coeff_a;
+ const double *yule_coeff_b;
+ const double *butter_coeff_a;
+ const double *butter_coeff_b;
+ float yule_hist_a[256];
+ float yule_hist_b[256];
+ float butter_hist_a[256];
+ float butter_hist_b[256];
+} ReplayGainContext;
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layout = NULL;
+ int i, ret;
+
+ if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_FLT )) < 0 ||
+ (ret = ff_set_common_formats (ctx , formats )) < 0 ||
+ (ret = ff_add_channel_layout (&layout , AV_CH_LAYOUT_STEREO)) < 0 ||
+ (ret = ff_set_common_channel_layouts (ctx , layout )) < 0)
+ return ret;
+
+ formats = NULL;
+ for (i = 0; i < FF_ARRAY_ELEMS(freqinfos); i++) {
+ if ((ret = ff_add_format(&formats, freqinfos[i].sample_rate)) < 0)
+ return ret;
+ }
+
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ReplayGainContext *s = ctx->priv;
+ int i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(freqinfos); i++) {
+ if (freqinfos[i].sample_rate == inlink->sample_rate)
+ break;
+ }
+ av_assert0(i < FF_ARRAY_ELEMS(freqinfos));
+
+ s->yule_coeff_a = freqinfos[i].AYule;
+ s->yule_coeff_b = freqinfos[i].BYule;
+ s->butter_coeff_a = freqinfos[i].AButter;
+ s->butter_coeff_b = freqinfos[i].BButter;
+
+ s->yule_hist_i = 20;
+ s->butter_hist_i = 4;
+ inlink->partial_buf_size =
+ inlink->min_samples =
+ inlink->max_samples = inlink->sample_rate / 20;
+
+ return 0;
+}
+
+/*
+ * Update largest absolute sample value.
+ */
+static void calc_stereo_peak(const float *samples, int nb_samples,
+ float *peak_p)
+{
+ float peak = 0.0;
+
+ while (nb_samples--) {
+ if (samples[0] > peak)
+ peak = samples[0];
+ else if (-samples[0] > peak)
+ peak = -samples[0];
+
+ if (samples[1] > peak)
+ peak = samples[1];
+ else if (-samples[1] > peak)
+ peak = -samples[1];
+
+ samples += 2;
+ }
+
+ *peak_p = FFMAX(peak, *peak_p);
+}
+
+/*
+ * Calculate stereo RMS level. Minimum value is about -100 dB for
+ * digital silence. The 90 dB offset is to compensate for the
+ * normalized float range and 3 dB is for stereo samples.
+ */
+static double calc_stereo_rms(const float *samples, int nb_samples)
+{
+ int count = nb_samples;
+ double sum = 1e-16;
+
+ while (count--) {
+ sum += samples[0] * samples[0] + samples[1] * samples[1];
+ samples += 2;
+ }
+
+ return 10 * log10 (sum / nb_samples) + 90.0 - 3.0;
+}
+
+/*
+ * Optimized implementation of 2nd-order IIR stereo filter.
+ */
+static void butter_filter_stereo_samples(ReplayGainContext *s,
+ float *samples, int nb_samples)
+{
+ const double *coeff_a = s->butter_coeff_a;
+ const double *coeff_b = s->butter_coeff_b;
+ float *hist_a = s->butter_hist_a;
+ float *hist_b = s->butter_hist_b;
+ double left, right;
+ int i, j;
+
+ i = s->butter_hist_i;
+
+ // If filter history is very small magnitude, clear it completely
+ // to prevent denormals from rattling around in there forever
+ // (slowing us down).
+
+ for (j = -4; j < 0; ++j)
+ if (fabs(hist_a[i + j]) > 1e-10 || fabs(hist_b[i + j]) > 1e-10)
+ break;
+
+ if (!j) {
+ memset(s->butter_hist_a, 0, sizeof(s->butter_hist_a));
+ memset(s->butter_hist_b, 0, sizeof(s->butter_hist_b));
+ }
+
+ while (nb_samples--) {
+ left = (hist_b[i ] = samples[0]) * coeff_b[0];
+ right = (hist_b[i + 1] = samples[1]) * coeff_b[0];
+ left += hist_b[i - 2] * coeff_b[1] - hist_a[i - 2] * coeff_a[1];
+ right += hist_b[i - 1] * coeff_b[1] - hist_a[i - 1] * coeff_a[1];
+ left += hist_b[i - 4] * coeff_b[2] - hist_a[i - 4] * coeff_a[2];
+ right += hist_b[i - 3] * coeff_b[2] - hist_a[i - 3] * coeff_a[2];
+ samples[0] = hist_a[i ] = (float) left;
+ samples[1] = hist_a[i + 1] = (float) right;
+ samples += 2;
+
+ if ((i += 2) == 256) {
+ memcpy(hist_a, hist_a + 252, sizeof(*hist_a) * 4);
+ memcpy(hist_b, hist_b + 252, sizeof(*hist_b) * 4);
+ i = 4;
+ }
+ }
+
+ s->butter_hist_i = i;
+}
+
+/*
+ * Optimized implementation of 10th-order IIR stereo filter.
+ */
+static void yule_filter_stereo_samples(ReplayGainContext *s, const float *src,
+ float *dst, int nb_samples)
+{
+ const double *coeff_a = s->yule_coeff_a;
+ const double *coeff_b = s->yule_coeff_b;
+ float *hist_a = s->yule_hist_a;
+ float *hist_b = s->yule_hist_b;
+ double left, right;
+ int i, j;
+
+ i = s->yule_hist_i;
+
+ // If filter history is very small magnitude, clear it completely to
+ // prevent denormals from rattling around in there forever
+ // (slowing us down).
+
+ for (j = -20; j < 0; ++j)
+ if (fabs(hist_a[i + j]) > 1e-10 || fabs(hist_b[i + j]) > 1e-10)
+ break;
+
+ if (!j) {
+ memset(s->yule_hist_a, 0, sizeof(s->yule_hist_a));
+ memset(s->yule_hist_b, 0, sizeof(s->yule_hist_b));
+ }
+
+ while (nb_samples--) {
+ left = (hist_b[i] = src[0]) * coeff_b[0];
+ right = (hist_b[i + 1] = src[1]) * coeff_b[0];
+ left += hist_b[i - 2] * coeff_b[ 1] - hist_a[i - 2] * coeff_a[1 ];
+ right += hist_b[i - 1] * coeff_b[ 1] - hist_a[i - 1] * coeff_a[1 ];
+ left += hist_b[i - 4] * coeff_b[ 2] - hist_a[i - 4] * coeff_a[2 ];
+ right += hist_b[i - 3] * coeff_b[ 2] - hist_a[i - 3] * coeff_a[2 ];
+ left += hist_b[i - 6] * coeff_b[ 3] - hist_a[i - 6] * coeff_a[3 ];
+ right += hist_b[i - 5] * coeff_b[ 3] - hist_a[i - 5] * coeff_a[3 ];
+ left += hist_b[i - 8] * coeff_b[ 4] - hist_a[i - 8] * coeff_a[4 ];
+ right += hist_b[i - 7] * coeff_b[ 4] - hist_a[i - 7] * coeff_a[4 ];
+ left += hist_b[i - 10] * coeff_b[ 5] - hist_a[i - 10] * coeff_a[5 ];
+ right += hist_b[i - 9] * coeff_b[ 5] - hist_a[i - 9] * coeff_a[5 ];
+ left += hist_b[i - 12] * coeff_b[ 6] - hist_a[i - 12] * coeff_a[6 ];
+ right += hist_b[i - 11] * coeff_b[ 6] - hist_a[i - 11] * coeff_a[6 ];
+ left += hist_b[i - 14] * coeff_b[ 7] - hist_a[i - 14] * coeff_a[7 ];
+ right += hist_b[i - 13] * coeff_b[ 7] - hist_a[i - 13] * coeff_a[7 ];
+ left += hist_b[i - 16] * coeff_b[ 8] - hist_a[i - 16] * coeff_a[8 ];
+ right += hist_b[i - 15] * coeff_b[ 8] - hist_a[i - 15] * coeff_a[8 ];
+ left += hist_b[i - 18] * coeff_b[ 9] - hist_a[i - 18] * coeff_a[9 ];
+ right += hist_b[i - 17] * coeff_b[ 9] - hist_a[i - 17] * coeff_a[9 ];
+ left += hist_b[i - 20] * coeff_b[10] - hist_a[i - 20] * coeff_a[10];
+ right += hist_b[i - 19] * coeff_b[10] - hist_a[i - 19] * coeff_a[10];
+ dst[0] = hist_a[i ] = (float)left;
+ dst[1] = hist_a[i + 1] = (float)right;
+ src += 2;
+ dst += 2;
+
+ if ((i += 2) == 256) {
+ memcpy(hist_a, hist_a + 236, sizeof(*hist_a) * 20);
+ memcpy(hist_b, hist_b + 236, sizeof(*hist_b) * 20);
+ i = 20;
+ }
+ }
+
+ s->yule_hist_i = i;
+}
+
+/*
+ * Calculate the ReplayGain value from the specified loudness histogram;
+ * clip to -24 / +64 dB.
+ */
+static float calc_replaygain(uint32_t *histogram)
+{
+ uint32_t loud_count = 0, total_windows = 0;
+ float gain;
+ int i;
+
+ for (i = 0; i < HISTOGRAM_SLOTS; i++)
+ total_windows += histogram [i];
+
+ while (i--)
+ if ((loud_count += histogram [i]) * 20 >= total_windows)
+ break;
+
+ gain = (float)(64.54 - i / 100.0);
+
+ return av_clipf(gain, -24.0, 64.0);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ReplayGainContext *s = ctx->priv;
+ uint32_t level;
+ AVFrame *out;
+
+ out = ff_get_audio_buffer(inlink, in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+
+ calc_stereo_peak((float *)in->data[0],
+ in->nb_samples, &s->peak);
+ yule_filter_stereo_samples(s, (const float *)in->data[0],
+ (float *)out->data[0],
+ out->nb_samples);
+ butter_filter_stereo_samples(s, (float *)out->data[0],
+ out->nb_samples);
+ level = (uint32_t)floor(100 * calc_stereo_rms((float *)out->data[0],
+ out->nb_samples));
+ level = av_clip(level, 0, HISTOGRAM_SLOTS - 1);
+
+ s->histogram[level]++;
+
+ av_frame_free(&out);
+ return ff_filter_frame(outlink, in);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ReplayGainContext *s = ctx->priv;
+ float gain = calc_replaygain(s->histogram);
+
+ av_log(ctx, AV_LOG_INFO, "track_gain = %+.2f dB\n", gain);
+ av_log(ctx, AV_LOG_INFO, "track_peak = %.6f\n", s->peak);
+}
+
+static const AVFilterPad replaygain_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad replaygain_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_replaygain = {
+ .name = "replaygain",
+ .description = NULL_IF_CONFIG_SMALL("ReplayGain scanner."),
+ .query_formats = query_formats,
+ .uninit = uninit,
+ .priv_size = sizeof(ReplayGainContext),
+ .inputs = replaygain_inputs,
+ .outputs = replaygain_outputs,
+};
diff --git a/libavfilter/af_resample.c b/libavfilter/af_resample.c
index 413b6634cc..e3c6a20696 100644
--- a/libavfilter/af_resample.c
+++ b/libavfilter/af_resample.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -90,22 +90,25 @@ static int query_formats(AVFilterContext *ctx)
{
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
+ AVFilterFormats *in_formats, *out_formats, *in_samplerates, *out_samplerates;
+ AVFilterChannelLayouts *in_layouts, *out_layouts;
+ int ret;
- AVFilterFormats *in_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
- AVFilterFormats *out_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
- AVFilterFormats *in_samplerates = ff_all_samplerates();
- AVFilterFormats *out_samplerates = ff_all_samplerates();
- AVFilterChannelLayouts *in_layouts = ff_all_channel_layouts();
- AVFilterChannelLayouts *out_layouts = ff_all_channel_layouts();
-
- ff_formats_ref(in_formats, &inlink->out_formats);
- ff_formats_ref(out_formats, &outlink->in_formats);
-
- ff_formats_ref(in_samplerates, &inlink->out_samplerates);
- ff_formats_ref(out_samplerates, &outlink->in_samplerates);
+ if (!(in_formats = ff_all_formats (AVMEDIA_TYPE_AUDIO)) ||
+ !(out_formats = ff_all_formats (AVMEDIA_TYPE_AUDIO)) ||
+ !(in_samplerates = ff_all_samplerates ( )) ||
+ !(out_samplerates = ff_all_samplerates ( )) ||
+ !(in_layouts = ff_all_channel_layouts ( )) ||
+ !(out_layouts = ff_all_channel_layouts ( )))
+ return AVERROR(ENOMEM);
- ff_channel_layouts_ref(in_layouts, &inlink->out_channel_layouts);
- ff_channel_layouts_ref(out_layouts, &outlink->in_channel_layouts);
+ if ((ret = ff_formats_ref (in_formats, &inlink->out_formats )) < 0 ||
+ (ret = ff_formats_ref (out_formats, &outlink->in_formats )) < 0 ||
+ (ret = ff_formats_ref (in_samplerates, &inlink->out_samplerates )) < 0 ||
+ (ret = ff_formats_ref (out_samplerates, &outlink->in_samplerates )) < 0 ||
+ (ret = ff_channel_layouts_ref (in_layouts, &inlink->out_channel_layouts)) < 0 ||
+ (ret = ff_channel_layouts_ref (out_layouts, &outlink->in_channel_layouts)) < 0)
+ return ret;
return 0;
}
@@ -324,9 +327,9 @@ static const AVClass resample_class = {
static const AVFilterPad avfilter_af_resample_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -346,11 +349,9 @@ AVFilter ff_af_resample = {
.description = NULL_IF_CONFIG_SMALL("Audio resampling and conversion."),
.priv_size = sizeof(ResampleContext),
.priv_class = &resample_class,
-
- .init_dict = init,
- .uninit = uninit,
- .query_formats = query_formats,
-
- .inputs = avfilter_af_resample_inputs,
- .outputs = avfilter_af_resample_outputs,
+ .init_dict = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = avfilter_af_resample_inputs,
+ .outputs = avfilter_af_resample_outputs,
};
diff --git a/libavfilter/af_rubberband.c b/libavfilter/af_rubberband.c
new file mode 100644
index 0000000000..ded25449dd
--- /dev/null
+++ b/libavfilter/af_rubberband.c
@@ -0,0 +1,271 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <rubberband/rubberband-c.h>
+
+#include "libavutil/channel_layout.h"
+#include "libavutil/common.h"
+#include "libavutil/opt.h"
+
+#include "audio.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+typedef struct RubberBandContext {
+ const AVClass *class;
+ RubberBandState rbs;
+
+ double tempo, pitch;
+ int transients, detector, phase, window,
+ smoothing, formant, opitch, channels;
+ int64_t nb_samples_out;
+ int64_t nb_samples_in;
+ int flushed;
+} RubberBandContext;
+
+#define OFFSET(x) offsetof(RubberBandContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption rubberband_options[] = {
+ { "tempo", "set tempo scale factor", OFFSET(tempo), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.01, 100, A },
+ { "pitch", "set pitch scale factor", OFFSET(pitch), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.01, 100, A },
+ { "transients", "set transients", OFFSET(transients), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, A, "transients" },
+ { "crisp", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionTransientsCrisp}, 0, 0, A, "transients" },
+ { "mixed", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionTransientsMixed}, 0, 0, A, "transients" },
+ { "smooth", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionTransientsSmooth}, 0, 0, A, "transients" },
+ { "detector", "set detector", OFFSET(detector), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, A, "detector" },
+ { "compound", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionDetectorCompound}, 0, 0, A, "detector" },
+ { "percussive", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionDetectorPercussive}, 0, 0, A, "detector" },
+ { "soft", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionDetectorSoft}, 0, 0, A, "detector" },
+ { "phase", "set phase", OFFSET(phase), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, A, "phase" },
+ { "laminar", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionPhaseLaminar}, 0, 0, A, "phase" },
+ { "independent", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionPhaseIndependent}, 0, 0, A, "phase" },
+ { "window", "set window", OFFSET(window), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, A, "window" },
+ { "standard", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionWindowStandard}, 0, 0, A, "window" },
+ { "short", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionWindowShort}, 0, 0, A, "window" },
+ { "long", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionWindowLong}, 0, 0, A, "window" },
+ { "smoothing", "set smoothing", OFFSET(smoothing), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, A, "smoothing" },
+ { "off", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionSmoothingOff}, 0, 0, A, "smoothing" },
+ { "on", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionSmoothingOn}, 0, 0, A, "smoothing" },
+ { "formant", "set formant", OFFSET(formant), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, A, "formant" },
+ { "shifted", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionFormantShifted}, 0, 0, A, "formant" },
+ { "preserved", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionFormantPreserved}, 0, 0, A, "formant" },
+ { "pitchq", "set pitch quality", OFFSET(opitch), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, A, "pitch" },
+ { "quality", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionPitchHighQuality}, 0, 0, A, "pitch" },
+ { "speed", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionPitchHighSpeed}, 0, 0, A, "pitch" },
+ { "consistency", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionPitchHighConsistency}, 0, 0, A, "pitch" },
+ { "channels", "set channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, A, "channels" },
+ { "apart", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionChannelsApart}, 0, 0, A, "channels" },
+ { "together", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionChannelsTogether}, 0, 0, A, "channels" },
+ { NULL },
+};
+
+AVFILTER_DEFINE_CLASS(rubberband);
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ RubberBandContext *s = ctx->priv;
+
+ if (s->rbs)
+ rubberband_delete(s->rbs);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_NONE,
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ RubberBandContext *s = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *out;
+ int ret = 0, nb_samples;
+
+ rubberband_process(s->rbs, (const float *const *)in->data, in->nb_samples, 0);
+ s->nb_samples_in += in->nb_samples;
+
+ nb_samples = rubberband_available(s->rbs);
+ if (nb_samples > 0) {
+ out = ff_get_audio_buffer(inlink, nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ out->pts = av_rescale_q(s->nb_samples_out,
+ (AVRational){ 1, outlink->sample_rate },
+ outlink->time_base);
+ nb_samples = rubberband_retrieve(s->rbs, (float *const *)out->data, nb_samples);
+ out->nb_samples = nb_samples;
+ ret = ff_filter_frame(outlink, out);
+ s->nb_samples_out += nb_samples;
+ }
+
+ av_frame_free(&in);
+ return ret;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ RubberBandContext *s = ctx->priv;
+ int opts = s->transients|s->detector|s->phase|s->window|
+ s->smoothing|s->formant|s->opitch|s->channels|
+ RubberBandOptionProcessRealTime;
+
+ if (s->rbs)
+ rubberband_delete(s->rbs);
+ s->rbs = rubberband_new(inlink->sample_rate, inlink->channels, opts, 1. / s->tempo, s->pitch);
+
+ inlink->partial_buf_size =
+ inlink->min_samples =
+ inlink->max_samples = rubberband_get_samples_required(s->rbs);
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ RubberBandContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ int ret = 0;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF && !s->flushed) {
+ if (rubberband_available(s->rbs) > 0) {
+ AVFrame *out = ff_get_audio_buffer(inlink, 1);
+ int nb_samples;
+
+ if (!out)
+ return AVERROR(ENOMEM);
+
+ rubberband_process(s->rbs, (const float *const *)out->data, 1, 1);
+ av_frame_free(&out);
+ nb_samples = rubberband_available(s->rbs);
+
+ if (nb_samples > 0) {
+ out = ff_get_audio_buffer(inlink, nb_samples);
+ if (!out)
+ return AVERROR(ENOMEM);
+ out->pts = av_rescale_q(s->nb_samples_out,
+ (AVRational){ 1, outlink->sample_rate },
+ outlink->time_base);
+ nb_samples = rubberband_retrieve(s->rbs, (float *const *)out->data, nb_samples);
+ out->nb_samples = nb_samples;
+ ret = ff_filter_frame(outlink, out);
+ s->nb_samples_out += nb_samples;
+ }
+ }
+ s->flushed = 1;
+ av_log(ctx, AV_LOG_DEBUG, "nb_samples_in %"PRId64" nb_samples_out %"PRId64"\n",
+ s->nb_samples_in, s->nb_samples_out);
+ }
+
+ return ret;
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ RubberBandContext *s = ctx->priv;
+
+ if (!strcmp(cmd, "tempo")) {
+ double arg;
+
+ sscanf(args, "%lf", &arg);
+ if (arg < 0.01 || arg > 100) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Tempo scale factor '%f' out of range\n", arg);
+ return AVERROR(EINVAL);
+ }
+ rubberband_set_time_ratio(s->rbs, 1. / arg);
+ }
+
+ if (!strcmp(cmd, "pitch")) {
+ double arg;
+
+ sscanf(args, "%lf", &arg);
+ if (arg < 0.01 || arg > 100) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Pitch scale factor '%f' out of range\n", arg);
+ return AVERROR(EINVAL);
+ }
+ rubberband_set_pitch_scale(s->rbs, arg);
+ }
+
+ return 0;
+}
+
+static const AVFilterPad rubberband_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad rubberband_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_rubberband = {
+ .name = "rubberband",
+ .description = NULL_IF_CONFIG_SMALL("Apply time-stretching and pitch-shifting."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(RubberBandContext),
+ .priv_class = &rubberband_class,
+ .uninit = uninit,
+ .inputs = rubberband_inputs,
+ .outputs = rubberband_outputs,
+ .process_command = process_command,
+};
diff --git a/libavfilter/af_sidechaincompress.c b/libavfilter/af_sidechaincompress.c
new file mode 100644
index 0000000000..3f540e2dff
--- /dev/null
+++ b/libavfilter/af_sidechaincompress.c
@@ -0,0 +1,450 @@
+/*
+ * Copyright (C) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Audio (Sidechain) Compressor filter
+ */
+
+#include "libavutil/audio_fifo.h"
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/common.h"
+#include "libavutil/opt.h"
+
+#include "audio.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "hermite.h"
+#include "internal.h"
+
+typedef struct SidechainCompressContext {
+ const AVClass *class;
+
+ double level_in;
+ double level_sc;
+ double attack, attack_coeff;
+ double release, release_coeff;
+ double lin_slope;
+ double ratio;
+ double threshold;
+ double makeup;
+ double mix;
+ double thres;
+ double knee;
+ double knee_start;
+ double knee_stop;
+ double lin_knee_start;
+ double adj_knee_start;
+ double compressed_knee_stop;
+ int link;
+ int detection;
+
+ AVAudioFifo *fifo[2];
+ int64_t pts;
+} SidechainCompressContext;
+
+#define OFFSET(x) offsetof(SidechainCompressContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM
+#define F AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption options[] = {
+ { "level_in", "set input gain", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A|F },
+ { "threshold", "set threshold", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0.125}, 0.000976563, 1, A|F },
+ { "ratio", "set ratio", OFFSET(ratio), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 1, 20, A|F },
+ { "attack", "set attack", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 0.01, 2000, A|F },
+ { "release", "set release", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=250}, 0.01, 9000, A|F },
+ { "makeup", "set make up gain", OFFSET(makeup), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 1, 64, A|F },
+ { "knee", "set knee", OFFSET(knee), AV_OPT_TYPE_DOUBLE, {.dbl=2.82843}, 1, 8, A|F },
+ { "link", "set link type", OFFSET(link), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A|F, "link" },
+ { "average", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F, "link" },
+ { "maximum", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F, "link" },
+ { "detection", "set detection", OFFSET(detection), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, A|F, "detection" },
+ { "peak", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F, "detection" },
+ { "rms", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F, "detection" },
+ { "level_sc", "set sidechain gain", OFFSET(level_sc), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A|F },
+ { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, A|F },
+ { NULL }
+};
+
+#define sidechaincompress_options options
+AVFILTER_DEFINE_CLASS(sidechaincompress);
+
+// A fake infinity value (because real infinity may break some hosts)
+#define FAKE_INFINITY (65536.0 * 65536.0)
+
+// Check for infinity (with appropriate-ish tolerance)
+#define IS_FAKE_INFINITY(value) (fabs(value-FAKE_INFINITY) < 1.0)
+
+static double output_gain(double lin_slope, double ratio, double thres,
+ double knee, double knee_start, double knee_stop,
+ double compressed_knee_stop, int detection)
+{
+ double slope = log(lin_slope);
+ double gain = 0.0;
+ double delta = 0.0;
+
+ if (detection)
+ slope *= 0.5;
+
+ if (IS_FAKE_INFINITY(ratio)) {
+ gain = thres;
+ delta = 0.0;
+ } else {
+ gain = (slope - thres) / ratio + thres;
+ delta = 1.0 / ratio;
+ }
+
+ if (knee > 1.0 && slope < knee_stop)
+ gain = hermite_interpolation(slope, knee_start, knee_stop,
+ knee_start, compressed_knee_stop,
+ 1.0, delta);
+
+ return exp(gain - slope);
+}
+
+static int compressor_config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ SidechainCompressContext *s = ctx->priv;
+
+ s->thres = log(s->threshold);
+ s->lin_knee_start = s->threshold / sqrt(s->knee);
+ s->adj_knee_start = s->lin_knee_start * s->lin_knee_start;
+ s->knee_start = log(s->lin_knee_start);
+ s->knee_stop = log(s->threshold * sqrt(s->knee));
+ s->compressed_knee_stop = (s->knee_stop - s->thres) / s->ratio + s->thres;
+
+ s->attack_coeff = FFMIN(1., 1. / (s->attack * outlink->sample_rate / 4000.));
+ s->release_coeff = FFMIN(1., 1. / (s->release * outlink->sample_rate / 4000.));
+
+ return 0;
+}
+
+static void compressor(SidechainCompressContext *s,
+ const double *src, double *dst, const double *scsrc, int nb_samples,
+ double level_in, double level_sc,
+ AVFilterLink *inlink, AVFilterLink *sclink)
+{
+ const double makeup = s->makeup;
+ const double mix = s->mix;
+ int i, c;
+
+ for (i = 0; i < nb_samples; i++) {
+ double abs_sample, gain = 1.0;
+
+ abs_sample = fabs(scsrc[0] * level_sc);
+
+ if (s->link == 1) {
+ for (c = 1; c < sclink->channels; c++)
+ abs_sample = FFMAX(fabs(scsrc[c] * level_sc), abs_sample);
+ } else {
+ for (c = 1; c < sclink->channels; c++)
+ abs_sample += fabs(scsrc[c] * level_sc);
+
+ abs_sample /= sclink->channels;
+ }
+
+ if (s->detection)
+ abs_sample *= abs_sample;
+
+ s->lin_slope += (abs_sample - s->lin_slope) * (abs_sample > s->lin_slope ? s->attack_coeff : s->release_coeff);
+
+ if (s->lin_slope > 0.0 && s->lin_slope > (s->detection ? s->adj_knee_start : s->lin_knee_start))
+ gain = output_gain(s->lin_slope, s->ratio, s->thres, s->knee,
+ s->knee_start, s->knee_stop,
+ s->compressed_knee_stop, s->detection);
+
+ for (c = 0; c < inlink->channels; c++)
+ dst[c] = src[c] * level_in * (gain * makeup * mix + (1. - mix));
+
+ src += inlink->channels;
+ dst += inlink->channels;
+ scsrc += sclink->channels;
+ }
+}
+
+#if CONFIG_SIDECHAINCOMPRESS_FILTER
+static int filter_frame(AVFilterLink *link, AVFrame *frame)
+{
+ AVFilterContext *ctx = link->dst;
+ SidechainCompressContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out = NULL, *in[2] = { NULL };
+ double *dst;
+ int nb_samples;
+ int i;
+
+ for (i = 0; i < 2; i++)
+ if (link == ctx->inputs[i])
+ break;
+ av_assert0(i < 2);
+ av_audio_fifo_write(s->fifo[i], (void **)frame->extended_data,
+ frame->nb_samples);
+ av_frame_free(&frame);
+
+ nb_samples = FFMIN(av_audio_fifo_size(s->fifo[0]), av_audio_fifo_size(s->fifo[1]));
+ if (!nb_samples)
+ return 0;
+
+ out = ff_get_audio_buffer(outlink, nb_samples);
+ if (!out)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < 2; i++) {
+ in[i] = ff_get_audio_buffer(ctx->inputs[i], nb_samples);
+ if (!in[i]) {
+ av_frame_free(&in[0]);
+ av_frame_free(&in[1]);
+ av_frame_free(&out);
+ return AVERROR(ENOMEM);
+ }
+ av_audio_fifo_read(s->fifo[i], (void **)in[i]->data, nb_samples);
+ }
+
+ dst = (double *)out->data[0];
+ out->pts = s->pts;
+ s->pts += nb_samples;
+
+ compressor(s, (double *)in[0]->data[0], dst,
+ (double *)in[1]->data[0], nb_samples,
+ s->level_in, s->level_sc,
+ ctx->inputs[0], ctx->inputs[1]);
+
+ av_frame_free(&in[0]);
+ av_frame_free(&in[1]);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ SidechainCompressContext *s = ctx->priv;
+ int i;
+
+ /* get a frame on each input */
+ for (i = 0; i < 2; i++) {
+ AVFilterLink *inlink = ctx->inputs[i];
+ if (!av_audio_fifo_size(s->fifo[i]))
+ return ff_request_frame(inlink);
+ }
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts = NULL;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBL,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret, i;
+
+ if (!ctx->inputs[0]->in_channel_layouts ||
+ !ctx->inputs[0]->in_channel_layouts->nb_channel_layouts) {
+ av_log(ctx, AV_LOG_WARNING,
+ "No channel layout for input 1\n");
+ return AVERROR(EAGAIN);
+ }
+
+ if ((ret = ff_add_channel_layout(&layouts, ctx->inputs[0]->in_channel_layouts->channel_layouts[0])) < 0 ||
+ (ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
+ return ret;
+
+ for (i = 0; i < 2; i++) {
+ layouts = ff_all_channel_counts();
+ if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
+ return ret;
+ }
+
+ formats = ff_make_format_list(sample_fmts);
+ if ((ret = ff_set_common_formats(ctx, formats)) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ SidechainCompressContext *s = ctx->priv;
+
+ if (ctx->inputs[0]->sample_rate != ctx->inputs[1]->sample_rate) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Inputs must have the same sample rate "
+ "%d for in0 vs %d for in1\n",
+ ctx->inputs[0]->sample_rate, ctx->inputs[1]->sample_rate);
+ return AVERROR(EINVAL);
+ }
+
+ outlink->sample_rate = ctx->inputs[0]->sample_rate;
+ outlink->time_base = ctx->inputs[0]->time_base;
+ outlink->channel_layout = ctx->inputs[0]->channel_layout;
+ outlink->channels = ctx->inputs[0]->channels;
+
+ s->fifo[0] = av_audio_fifo_alloc(ctx->inputs[0]->format, ctx->inputs[0]->channels, 1024);
+ s->fifo[1] = av_audio_fifo_alloc(ctx->inputs[1]->format, ctx->inputs[1]->channels, 1024);
+ if (!s->fifo[0] || !s->fifo[1])
+ return AVERROR(ENOMEM);
+
+ compressor_config_output(outlink);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SidechainCompressContext *s = ctx->priv;
+
+ av_audio_fifo_free(s->fifo[0]);
+ av_audio_fifo_free(s->fifo[1]);
+}
+
+static const AVFilterPad sidechaincompress_inputs[] = {
+ {
+ .name = "main",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },{
+ .name = "sidechain",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad sidechaincompress_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_sidechaincompress = {
+ .name = "sidechaincompress",
+ .description = NULL_IF_CONFIG_SMALL("Sidechain compressor."),
+ .priv_size = sizeof(SidechainCompressContext),
+ .priv_class = &sidechaincompress_class,
+ .query_formats = query_formats,
+ .uninit = uninit,
+ .inputs = sidechaincompress_inputs,
+ .outputs = sidechaincompress_outputs,
+};
+#endif /* CONFIG_SIDECHAINCOMPRESS_FILTER */
+
+#if CONFIG_ACOMPRESSOR_FILTER
+static int acompressor_filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ const double *src = (const double *)in->data[0];
+ AVFilterContext *ctx = inlink->dst;
+ SidechainCompressContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ double *dst;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_audio_buffer(inlink, in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+ dst = (double *)out->data[0];
+
+ compressor(s, src, dst, src, in->nb_samples,
+ s->level_in, s->level_in,
+ inlink, inlink);
+
+ if (out != in)
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static int acompressor_query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBL,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+#define acompressor_options options
+AVFILTER_DEFINE_CLASS(acompressor);
+
+static const AVFilterPad acompressor_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = acompressor_filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad acompressor_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = compressor_config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_acompressor = {
+ .name = "acompressor",
+ .description = NULL_IF_CONFIG_SMALL("Audio compressor."),
+ .priv_size = sizeof(SidechainCompressContext),
+ .priv_class = &acompressor_class,
+ .query_formats = acompressor_query_formats,
+ .inputs = acompressor_inputs,
+ .outputs = acompressor_outputs,
+};
+#endif /* CONFIG_ACOMPRESSOR_FILTER */
diff --git a/libavfilter/af_silencedetect.c b/libavfilter/af_silencedetect.c
new file mode 100644
index 0000000000..b048d63738
--- /dev/null
+++ b/libavfilter/af_silencedetect.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2012 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Audio silence detector
+ */
+
+#include <float.h> /* DBL_MAX */
+
+#include "libavutil/opt.h"
+#include "libavutil/timestamp.h"
+#include "audio.h"
+#include "formats.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct SilenceDetectContext {
+ const AVClass *class;
+ double noise; ///< noise amplitude ratio
+ double duration; ///< minimum duration of silence until notification
+ int64_t nb_null_samples; ///< current number of continuous zero samples
+ int64_t start; ///< if silence is detected, this value contains the time of the first zero sample
+ int last_sample_rate; ///< last sample rate to check for sample rate changes
+
+ void (*silencedetect)(struct SilenceDetectContext *s, AVFrame *insamples,
+ int nb_samples, int64_t nb_samples_notify,
+ AVRational time_base);
+} SilenceDetectContext;
+
+#define OFFSET(x) offsetof(SilenceDetectContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
+static const AVOption silencedetect_options[] = {
+ { "n", "set noise tolerance", OFFSET(noise), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0, DBL_MAX, FLAGS },
+ { "noise", "set noise tolerance", OFFSET(noise), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0, DBL_MAX, FLAGS },
+ { "d", "set minimum duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl=2.}, 0, 24*60*60, FLAGS },
+ { "duration", "set minimum duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl=2.}, 0, 24*60*60, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(silencedetect);
+
+static char *get_metadata_val(AVFrame *insamples, const char *key)
+{
+ AVDictionaryEntry *e = av_dict_get(insamples->metadata, key, NULL, 0);
+ return e && e->value ? e->value : NULL;
+}
+
+static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples,
+ int is_silence, int64_t nb_samples_notify,
+ AVRational time_base)
+{
+ if (is_silence) {
+ if (!s->start) {
+ s->nb_null_samples++;
+ if (s->nb_null_samples >= nb_samples_notify) {
+ s->start = insamples->pts - (int64_t)(s->duration / av_q2d(time_base) + .5);
+ av_dict_set(&insamples->metadata, "lavfi.silence_start",
+ av_ts2timestr(s->start, &time_base), 0);
+ av_log(s, AV_LOG_INFO, "silence_start: %s\n",
+ get_metadata_val(insamples, "lavfi.silence_start"));
+ }
+ }
+ } else {
+ if (s->start) {
+ av_dict_set(&insamples->metadata, "lavfi.silence_end",
+ av_ts2timestr(insamples->pts, &time_base), 0);
+ av_dict_set(&insamples->metadata, "lavfi.silence_duration",
+ av_ts2timestr(insamples->pts - s->start, &time_base), 0);
+ av_log(s, AV_LOG_INFO,
+ "silence_end: %s | silence_duration: %s\n",
+ get_metadata_val(insamples, "lavfi.silence_end"),
+ get_metadata_val(insamples, "lavfi.silence_duration"));
+ }
+ s->nb_null_samples = s->start = 0;
+ }
+}
+
+#define SILENCE_DETECT(name, type) \
+static void silencedetect_##name(SilenceDetectContext *s, AVFrame *insamples, \
+ int nb_samples, int64_t nb_samples_notify, \
+ AVRational time_base) \
+{ \
+ const type *p = (const type *)insamples->data[0]; \
+ const type noise = s->noise; \
+ int i; \
+ \
+ for (i = 0; i < nb_samples; i++, p++) \
+ update(s, insamples, *p < noise && *p > -noise, \
+ nb_samples_notify, time_base); \
+}
+
+SILENCE_DETECT(dbl, double)
+SILENCE_DETECT(flt, float)
+SILENCE_DETECT(s32, int32_t)
+SILENCE_DETECT(s16, int16_t)
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SilenceDetectContext *s = ctx->priv;
+
+ switch (inlink->format) {
+ case AV_SAMPLE_FMT_DBL: s->silencedetect = silencedetect_dbl; break;
+ case AV_SAMPLE_FMT_FLT: s->silencedetect = silencedetect_flt; break;
+ case AV_SAMPLE_FMT_S32:
+ s->noise *= INT32_MAX;
+ s->silencedetect = silencedetect_s32;
+ break;
+ case AV_SAMPLE_FMT_S16:
+ s->noise *= INT16_MAX;
+ s->silencedetect = silencedetect_s16;
+ break;
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ SilenceDetectContext *s = inlink->dst->priv;
+ const int nb_channels = inlink->channels;
+ const int srate = inlink->sample_rate;
+ const int nb_samples = insamples->nb_samples * nb_channels;
+ const int64_t nb_samples_notify = srate * s->duration * nb_channels;
+
+ // scale number of null samples to the new sample rate
+ if (s->last_sample_rate && s->last_sample_rate != srate)
+ s->nb_null_samples = srate * s->nb_null_samples / s->last_sample_rate;
+ s->last_sample_rate = srate;
+
+ // TODO: document metadata
+ s->silencedetect(s, insamples, nb_samples, nb_samples_notify,
+ inlink->time_base);
+
+ return ff_filter_frame(inlink->dst->outputs[0], insamples);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBL,
+ AV_SAMPLE_FMT_FLT,
+ AV_SAMPLE_FMT_S32,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static const AVFilterPad silencedetect_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad silencedetect_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_silencedetect = {
+ .name = "silencedetect",
+ .description = NULL_IF_CONFIG_SMALL("Detect silence."),
+ .priv_size = sizeof(SilenceDetectContext),
+ .query_formats = query_formats,
+ .inputs = silencedetect_inputs,
+ .outputs = silencedetect_outputs,
+ .priv_class = &silencedetect_class,
+};
diff --git a/libavfilter/af_silenceremove.c b/libavfilter/af_silenceremove.c
new file mode 100644
index 0000000000..f156d1883d
--- /dev/null
+++ b/libavfilter/af_silenceremove.c
@@ -0,0 +1,516 @@
+/*
+ * Copyright (c) 2001 Heikki Leinonen
+ * Copyright (c) 2001 Chris Bagwell
+ * Copyright (c) 2003 Donnie Smith
+ * Copyright (c) 2014 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <float.h> /* DBL_MAX */
+
+#include "libavutil/opt.h"
+#include "libavutil/timestamp.h"
+#include "audio.h"
+#include "formats.h"
+#include "avfilter.h"
+#include "internal.h"
+
+enum SilenceMode {
+ SILENCE_TRIM,
+ SILENCE_TRIM_FLUSH,
+ SILENCE_COPY,
+ SILENCE_COPY_FLUSH,
+ SILENCE_STOP
+};
+
+typedef struct SilenceRemoveContext {
+ const AVClass *class;
+
+ enum SilenceMode mode;
+
+ int start_periods;
+ int64_t start_duration;
+ double start_threshold;
+
+ int stop_periods;
+ int64_t stop_duration;
+ double stop_threshold;
+
+ double *start_holdoff;
+ size_t start_holdoff_offset;
+ size_t start_holdoff_end;
+ int start_found_periods;
+
+ double *stop_holdoff;
+ size_t stop_holdoff_offset;
+ size_t stop_holdoff_end;
+ int stop_found_periods;
+
+ double window_ratio;
+ double *window;
+ double *window_current;
+ double *window_end;
+ int window_size;
+ double sum;
+
+ int leave_silence;
+ int restart;
+ int64_t next_pts;
+
+ int detection;
+ void (*update)(struct SilenceRemoveContext *s, double sample);
+ double(*compute)(struct SilenceRemoveContext *s, double sample);
+} SilenceRemoveContext;
+
+#define OFFSET(x) offsetof(SilenceRemoveContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
+static const AVOption silenceremove_options[] = {
+ { "start_periods", NULL, OFFSET(start_periods), AV_OPT_TYPE_INT, {.i64=0}, 0, 9000, FLAGS },
+ { "start_duration", NULL, OFFSET(start_duration), AV_OPT_TYPE_DURATION, {.i64=0}, 0, 9000, FLAGS },
+ { "start_threshold", NULL, OFFSET(start_threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, DBL_MAX, FLAGS },
+ { "stop_periods", NULL, OFFSET(stop_periods), AV_OPT_TYPE_INT, {.i64=0}, -9000, 9000, FLAGS },
+ { "stop_duration", NULL, OFFSET(stop_duration), AV_OPT_TYPE_DURATION, {.i64=0}, 0, 9000, FLAGS },
+ { "stop_threshold", NULL, OFFSET(stop_threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, DBL_MAX, FLAGS },
+ { "leave_silence", NULL, OFFSET(leave_silence), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { "detection", NULL, OFFSET(detection), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "detection" },
+ { "peak", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "detection" },
+ { "rms", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "detection" },
+ { "window", NULL, OFFSET(window_ratio), AV_OPT_TYPE_DOUBLE, {.dbl=0.02}, 0, 10, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(silenceremove);
+
+static double compute_peak(SilenceRemoveContext *s, double sample)
+{
+ double new_sum;
+
+ new_sum = s->sum;
+ new_sum -= *s->window_current;
+ new_sum += fabs(sample);
+
+ return new_sum / s->window_size;
+}
+
+static void update_peak(SilenceRemoveContext *s, double sample)
+{
+ s->sum -= *s->window_current;
+ *s->window_current = fabs(sample);
+ s->sum += *s->window_current;
+
+ s->window_current++;
+ if (s->window_current >= s->window_end)
+ s->window_current = s->window;
+}
+
+static double compute_rms(SilenceRemoveContext *s, double sample)
+{
+ double new_sum;
+
+ new_sum = s->sum;
+ new_sum -= *s->window_current;
+ new_sum += sample * sample;
+
+ return sqrt(new_sum / s->window_size);
+}
+
+static void update_rms(SilenceRemoveContext *s, double sample)
+{
+ s->sum -= *s->window_current;
+ *s->window_current = sample * sample;
+ s->sum += *s->window_current;
+
+ s->window_current++;
+ if (s->window_current >= s->window_end)
+ s->window_current = s->window;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ SilenceRemoveContext *s = ctx->priv;
+
+ if (s->stop_periods < 0) {
+ s->stop_periods = -s->stop_periods;
+ s->restart = 1;
+ }
+
+ switch (s->detection) {
+ case 0:
+ s->update = update_peak;
+ s->compute = compute_peak;
+ break;
+ case 1:
+ s->update = update_rms;
+ s->compute = compute_rms;
+ break;
+ };
+
+ return 0;
+}
+
+static void clear_window(SilenceRemoveContext *s)
+{
+ memset(s->window, 0, s->window_size * sizeof(*s->window));
+
+ s->window_current = s->window;
+ s->window_end = s->window + s->window_size;
+ s->sum = 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SilenceRemoveContext *s = ctx->priv;
+
+ s->window_size = FFMAX((inlink->sample_rate * s->window_ratio), 1) * inlink->channels;
+ s->window = av_malloc_array(s->window_size, sizeof(*s->window));
+ if (!s->window)
+ return AVERROR(ENOMEM);
+
+ clear_window(s);
+
+ s->start_duration = av_rescale(s->start_duration, inlink->sample_rate,
+ AV_TIME_BASE);
+ s->stop_duration = av_rescale(s->stop_duration, inlink->sample_rate,
+ AV_TIME_BASE);
+
+ s->start_holdoff = av_malloc_array(FFMAX(s->start_duration, 1),
+ sizeof(*s->start_holdoff) *
+ inlink->channels);
+ if (!s->start_holdoff)
+ return AVERROR(ENOMEM);
+
+ s->start_holdoff_offset = 0;
+ s->start_holdoff_end = 0;
+ s->start_found_periods = 0;
+
+ s->stop_holdoff = av_malloc_array(FFMAX(s->stop_duration, 1),
+ sizeof(*s->stop_holdoff) *
+ inlink->channels);
+ if (!s->stop_holdoff)
+ return AVERROR(ENOMEM);
+
+ s->stop_holdoff_offset = 0;
+ s->stop_holdoff_end = 0;
+ s->stop_found_periods = 0;
+
+ if (s->start_periods)
+ s->mode = SILENCE_TRIM;
+ else
+ s->mode = SILENCE_COPY;
+
+ return 0;
+}
+
+static void flush(AVFrame *out, AVFilterLink *outlink,
+ int *nb_samples_written, int *ret)
+{
+ if (*nb_samples_written) {
+ out->nb_samples = *nb_samples_written / outlink->channels;
+ *ret = ff_filter_frame(outlink, out);
+ *nb_samples_written = 0;
+ } else {
+ av_frame_free(&out);
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ SilenceRemoveContext *s = ctx->priv;
+ int i, j, threshold, ret = 0;
+ int nbs, nb_samples_read, nb_samples_written;
+ double *obuf, *ibuf = (double *)in->data[0];
+ AVFrame *out;
+
+ nb_samples_read = nb_samples_written = 0;
+
+ switch (s->mode) {
+ case SILENCE_TRIM:
+silence_trim:
+ nbs = in->nb_samples - nb_samples_read / inlink->channels;
+ if (!nbs)
+ break;
+
+ for (i = 0; i < nbs; i++) {
+ threshold = 0;
+ for (j = 0; j < inlink->channels; j++) {
+ threshold |= s->compute(s, ibuf[j]) > s->start_threshold;
+ }
+
+ if (threshold) {
+ for (j = 0; j < inlink->channels; j++) {
+ s->update(s, *ibuf);
+ s->start_holdoff[s->start_holdoff_end++] = *ibuf++;
+ }
+ nb_samples_read += inlink->channels;
+
+ if (s->start_holdoff_end >= s->start_duration * inlink->channels) {
+ if (++s->start_found_periods >= s->start_periods) {
+ s->mode = SILENCE_TRIM_FLUSH;
+ goto silence_trim_flush;
+ }
+
+ s->start_holdoff_offset = 0;
+ s->start_holdoff_end = 0;
+ }
+ } else {
+ s->start_holdoff_end = 0;
+
+ for (j = 0; j < inlink->channels; j++)
+ s->update(s, ibuf[j]);
+
+ ibuf += inlink->channels;
+ nb_samples_read += inlink->channels;
+ }
+ }
+ break;
+
+ case SILENCE_TRIM_FLUSH:
+silence_trim_flush:
+ nbs = s->start_holdoff_end - s->start_holdoff_offset;
+ nbs -= nbs % inlink->channels;
+ if (!nbs)
+ break;
+
+ out = ff_get_audio_buffer(inlink, nbs / inlink->channels);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+
+ memcpy(out->data[0], &s->start_holdoff[s->start_holdoff_offset],
+ nbs * sizeof(double));
+ s->start_holdoff_offset += nbs;
+
+ ret = ff_filter_frame(outlink, out);
+
+ if (s->start_holdoff_offset == s->start_holdoff_end) {
+ s->start_holdoff_offset = 0;
+ s->start_holdoff_end = 0;
+ s->mode = SILENCE_COPY;
+ goto silence_copy;
+ }
+ break;
+
+ case SILENCE_COPY:
+silence_copy:
+ nbs = in->nb_samples - nb_samples_read / inlink->channels;
+ if (!nbs)
+ break;
+
+ out = ff_get_audio_buffer(inlink, nbs);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ obuf = (double *)out->data[0];
+
+ if (s->stop_periods) {
+ for (i = 0; i < nbs; i++) {
+ threshold = 1;
+ for (j = 0; j < inlink->channels; j++)
+ threshold &= s->compute(s, ibuf[j]) > s->stop_threshold;
+
+ if (threshold && s->stop_holdoff_end && !s->leave_silence) {
+ s->mode = SILENCE_COPY_FLUSH;
+ flush(out, outlink, &nb_samples_written, &ret);
+ goto silence_copy_flush;
+ } else if (threshold) {
+ for (j = 0; j < inlink->channels; j++) {
+ s->update(s, *ibuf);
+ *obuf++ = *ibuf++;
+ }
+ nb_samples_read += inlink->channels;
+ nb_samples_written += inlink->channels;
+ } else if (!threshold) {
+ for (j = 0; j < inlink->channels; j++) {
+ s->update(s, *ibuf);
+ if (s->leave_silence) {
+ *obuf++ = *ibuf;
+ nb_samples_written++;
+ }
+
+ s->stop_holdoff[s->stop_holdoff_end++] = *ibuf++;
+ }
+ nb_samples_read += inlink->channels;
+
+ if (s->stop_holdoff_end >= s->stop_duration * inlink->channels) {
+ if (++s->stop_found_periods >= s->stop_periods) {
+ s->stop_holdoff_offset = 0;
+ s->stop_holdoff_end = 0;
+
+ if (!s->restart) {
+ s->mode = SILENCE_STOP;
+ flush(out, outlink, &nb_samples_written, &ret);
+ goto silence_stop;
+ } else {
+ s->stop_found_periods = 0;
+ s->start_found_periods = 0;
+ s->start_holdoff_offset = 0;
+ s->start_holdoff_end = 0;
+ clear_window(s);
+ s->mode = SILENCE_TRIM;
+ flush(out, outlink, &nb_samples_written, &ret);
+ goto silence_trim;
+ }
+ }
+ s->mode = SILENCE_COPY_FLUSH;
+ flush(out, outlink, &nb_samples_written, &ret);
+ goto silence_copy_flush;
+ }
+ }
+ }
+ flush(out, outlink, &nb_samples_written, &ret);
+ } else {
+ memcpy(obuf, ibuf, sizeof(double) * nbs * inlink->channels);
+ ret = ff_filter_frame(outlink, out);
+ }
+ break;
+
+ case SILENCE_COPY_FLUSH:
+silence_copy_flush:
+ nbs = s->stop_holdoff_end - s->stop_holdoff_offset;
+ nbs -= nbs % inlink->channels;
+ if (!nbs)
+ break;
+
+ out = ff_get_audio_buffer(inlink, nbs / inlink->channels);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+
+ memcpy(out->data[0], &s->stop_holdoff[s->stop_holdoff_offset],
+ nbs * sizeof(double));
+ s->stop_holdoff_offset += nbs;
+
+ ret = ff_filter_frame(outlink, out);
+
+ if (s->stop_holdoff_offset == s->stop_holdoff_end) {
+ s->stop_holdoff_offset = 0;
+ s->stop_holdoff_end = 0;
+ s->mode = SILENCE_COPY;
+ goto silence_copy;
+ }
+ break;
+ case SILENCE_STOP:
+silence_stop:
+ break;
+ }
+
+ av_frame_free(&in);
+
+ return ret;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ SilenceRemoveContext *s = ctx->priv;
+ int ret;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+ if (ret == AVERROR_EOF && (s->mode == SILENCE_COPY_FLUSH ||
+ s->mode == SILENCE_COPY)) {
+ int nbs = s->stop_holdoff_end - s->stop_holdoff_offset;
+ if (nbs) {
+ AVFrame *frame;
+
+ frame = ff_get_audio_buffer(outlink, nbs / outlink->channels);
+ if (!frame)
+ return AVERROR(ENOMEM);
+
+ memcpy(frame->data[0], &s->stop_holdoff[s->stop_holdoff_offset],
+ nbs * sizeof(double));
+ ret = ff_filter_frame(ctx->inputs[0], frame);
+ }
+ s->mode = SILENCE_STOP;
+ }
+ return ret;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SilenceRemoveContext *s = ctx->priv;
+
+ av_freep(&s->start_holdoff);
+ av_freep(&s->stop_holdoff);
+ av_freep(&s->window);
+}
+
+static const AVFilterPad silenceremove_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad silenceremove_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_silenceremove = {
+ .name = "silenceremove",
+ .description = NULL_IF_CONFIG_SMALL("Remove silence."),
+ .priv_size = sizeof(SilenceRemoveContext),
+ .priv_class = &silenceremove_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = silenceremove_inputs,
+ .outputs = silenceremove_outputs,
+};
diff --git a/libavfilter/af_sofalizer.c b/libavfilter/af_sofalizer.c
new file mode 100644
index 0000000000..5f0ab31a2a
--- /dev/null
+++ b/libavfilter/af_sofalizer.c
@@ -0,0 +1,1234 @@
+/*****************************************************************************
+ * sofalizer.c : SOFAlizer filter for virtual binaural acoustics
+ *****************************************************************************
+ * Copyright (C) 2013-2015 Andreas Fuchs, Wolfgang Hrauda,
+ * Acoustics Research Institute (ARI), Vienna, Austria
+ *
+ * Authors: Andreas Fuchs <andi.fuchs.mail@gmail.com>
+ * Wolfgang Hrauda <wolfgang.hrauda@gmx.at>
+ *
+ * SOFAlizer project coordinator at ARI, main developer of SOFA:
+ * Piotr Majdak <piotr@majdak.at>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
+ *****************************************************************************/
+
+#include <math.h>
+#include <netcdf.h>
+
+#include "libavcodec/avfft.h"
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/float_dsp.h"
+#include "libavutil/intmath.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "audio.h"
+
+#define TIME_DOMAIN 0
+#define FREQUENCY_DOMAIN 1
+
+typedef struct NCSofa { /* contains data of one SOFA file */
+ int ncid; /* netCDF ID of the opened SOFA file */
+ int n_samples; /* length of one impulse response (IR) */
+ int m_dim; /* number of measurement positions */
+ int *data_delay; /* broadband delay of each IR */
+ /* all measurement positions for each receiver (i.e. ear): */
+ float *sp_a; /* azimuth angles */
+ float *sp_e; /* elevation angles */
+ float *sp_r; /* radii */
+ /* data at each measurement position for each receiver: */
+ float *data_ir; /* IRs (time-domain) */
+} NCSofa;
+
+typedef struct VirtualSpeaker {
+ uint8_t set;
+ float azim;
+ float elev;
+} VirtualSpeaker;
+
+typedef struct SOFAlizerContext {
+ const AVClass *class;
+
+ char *filename; /* name of SOFA file */
+ NCSofa sofa; /* contains data of the SOFA file */
+
+ int sample_rate; /* sample rate from SOFA file */
+ float *speaker_azim; /* azimuth of the virtual loudspeakers */
+ float *speaker_elev; /* elevation of the virtual loudspeakers */
+ char *speakers_pos; /* custom positions of the virtual loudspeakers */
+ float gain_lfe; /* gain applied to LFE channel */
+ int lfe_channel; /* LFE channel position in channel layout */
+
+ int n_conv; /* number of channels to convolute */
+
+ /* buffer variables (for convolution) */
+ float *ringbuffer[2]; /* buffers input samples, length of one buffer: */
+ /* no. input ch. (incl. LFE) x buffer_length */
+ int write[2]; /* current write position to ringbuffer */
+ int buffer_length; /* is: longest IR plus max. delay in all SOFA files */
+ /* then choose next power of 2 */
+ int n_fft; /* number of samples in one FFT block */
+
+ /* netCDF variables */
+ int *delay[2]; /* broadband delay for each channel/IR to be convolved */
+
+ float *data_ir[2]; /* IRs for all channels to be convolved */
+ /* (this excludes the LFE) */
+ float *temp_src[2];
+ FFTComplex *temp_fft[2];
+
+ /* control variables */
+ float gain; /* filter gain (in dB) */
+ float rotation; /* rotation of virtual loudspeakers (in degrees) */
+ float elevation; /* elevation of virtual loudspeakers (in deg.) */
+ float radius; /* distance virtual loudspeakers to listener (in metres) */
+ int type; /* processing type */
+
+ VirtualSpeaker vspkrpos[64];
+
+ FFTContext *fft[2], *ifft[2];
+ FFTComplex *data_hrtf[2];
+
+ AVFloatDSPContext *fdsp;
+} SOFAlizerContext;
+
+static int close_sofa(struct NCSofa *sofa)
+{
+ av_freep(&sofa->data_delay);
+ av_freep(&sofa->sp_a);
+ av_freep(&sofa->sp_e);
+ av_freep(&sofa->sp_r);
+ av_freep(&sofa->data_ir);
+ nc_close(sofa->ncid);
+ sofa->ncid = 0;
+
+ return 0;
+}
+
+static int load_sofa(AVFilterContext *ctx, char *filename, int *samplingrate)
+{
+ struct SOFAlizerContext *s = ctx->priv;
+ /* variables associated with content of SOFA file: */
+ int ncid, n_dims, n_vars, n_gatts, n_unlim_dim_id, status;
+ char data_delay_dim_name[NC_MAX_NAME];
+ float *sp_a, *sp_e, *sp_r, *data_ir;
+ char *sofa_conventions;
+ char dim_name[NC_MAX_NAME]; /* names of netCDF dimensions */
+ size_t *dim_length; /* lengths of netCDF dimensions */
+ char *text;
+ unsigned int sample_rate;
+ int data_delay_dim_id[2];
+ int samplingrate_id;
+ int data_delay_id;
+ int n_samples;
+ int m_dim_id = -1;
+ int n_dim_id = -1;
+ int data_ir_id;
+ size_t att_len;
+ int m_dim;
+ int *data_delay;
+ int sp_id;
+ int i, ret;
+
+ s->sofa.ncid = 0;
+ status = nc_open(filename, NC_NOWRITE, &ncid); /* open SOFA file read-only */
+ if (status != NC_NOERR) {
+ av_log(ctx, AV_LOG_ERROR, "Can't find SOFA-file '%s'\n", filename);
+ return AVERROR(EINVAL);
+ }
+
+ /* get number of dimensions, vars, global attributes and Id of unlimited dimensions: */
+ nc_inq(ncid, &n_dims, &n_vars, &n_gatts, &n_unlim_dim_id);
+
+ /* -- get number of measurements ("M") and length of one IR ("N") -- */
+ dim_length = av_malloc_array(n_dims, sizeof(*dim_length));
+ if (!dim_length) {
+ nc_close(ncid);
+ return AVERROR(ENOMEM);
+ }
+
+ for (i = 0; i < n_dims; i++) { /* go through all dimensions of file */
+ nc_inq_dim(ncid, i, (char *)&dim_name, &dim_length[i]); /* get dimensions */
+ if (!strncmp("M", (const char *)&dim_name, 1)) /* get ID of dimension "M" */
+ m_dim_id = i;
+ if (!strncmp("N", (const char *)&dim_name, 1)) /* get ID of dimension "N" */
+ n_dim_id = i;
+ }
+
+ if ((m_dim_id == -1) || (n_dim_id == -1)) { /* dimension "M" or "N" couldn't be found */
+ av_log(ctx, AV_LOG_ERROR, "Can't find required dimensions in SOFA file.\n");
+ av_freep(&dim_length);
+ nc_close(ncid);
+ return AVERROR(EINVAL);
+ }
+
+ n_samples = dim_length[n_dim_id]; /* get length of one IR */
+ m_dim = dim_length[m_dim_id]; /* get number of measurements */
+
+ av_freep(&dim_length);
+
+ /* -- check file type -- */
+ /* get length of attritube "Conventions" */
+ status = nc_inq_attlen(ncid, NC_GLOBAL, "Conventions", &att_len);
+ if (status != NC_NOERR) {
+ av_log(ctx, AV_LOG_ERROR, "Can't get length of attribute \"Conventions\".\n");
+ nc_close(ncid);
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* check whether file is SOFA file */
+ text = av_malloc(att_len + 1);
+ if (!text) {
+ nc_close(ncid);
+ return AVERROR(ENOMEM);
+ }
+
+ nc_get_att_text(ncid, NC_GLOBAL, "Conventions", text);
+ *(text + att_len) = 0;
+ if (strncmp("SOFA", text, 4)) {
+ av_log(ctx, AV_LOG_ERROR, "Not a SOFA file!\n");
+ av_freep(&text);
+ nc_close(ncid);
+ return AVERROR(EINVAL);
+ }
+ av_freep(&text);
+
+ status = nc_inq_attlen(ncid, NC_GLOBAL, "License", &att_len);
+ if (status == NC_NOERR) {
+ text = av_malloc(att_len + 1);
+ if (text) {
+ nc_get_att_text(ncid, NC_GLOBAL, "License", text);
+ *(text + att_len) = 0;
+ av_log(ctx, AV_LOG_INFO, "SOFA file License: %s\n", text);
+ av_freep(&text);
+ }
+ }
+
+ status = nc_inq_attlen(ncid, NC_GLOBAL, "SourceDescription", &att_len);
+ if (status == NC_NOERR) {
+ text = av_malloc(att_len + 1);
+ if (text) {
+ nc_get_att_text(ncid, NC_GLOBAL, "SourceDescription", text);
+ *(text + att_len) = 0;
+ av_log(ctx, AV_LOG_INFO, "SOFA file SourceDescription: %s\n", text);
+ av_freep(&text);
+ }
+ }
+
+ status = nc_inq_attlen(ncid, NC_GLOBAL, "Comment", &att_len);
+ if (status == NC_NOERR) {
+ text = av_malloc(att_len + 1);
+ if (text) {
+ nc_get_att_text(ncid, NC_GLOBAL, "Comment", text);
+ *(text + att_len) = 0;
+ av_log(ctx, AV_LOG_INFO, "SOFA file Comment: %s\n", text);
+ av_freep(&text);
+ }
+ }
+
+ status = nc_inq_attlen(ncid, NC_GLOBAL, "SOFAConventions", &att_len);
+ if (status != NC_NOERR) {
+ av_log(ctx, AV_LOG_ERROR, "Can't get length of attribute \"SOFAConventions\".\n");
+ nc_close(ncid);
+ return AVERROR_INVALIDDATA;
+ }
+
+ sofa_conventions = av_malloc(att_len + 1);
+ if (!sofa_conventions) {
+ nc_close(ncid);
+ return AVERROR(ENOMEM);
+ }
+
+ nc_get_att_text(ncid, NC_GLOBAL, "SOFAConventions", sofa_conventions);
+ *(sofa_conventions + att_len) = 0;
+ if (strncmp("SimpleFreeFieldHRIR", sofa_conventions, att_len)) {
+ av_log(ctx, AV_LOG_ERROR, "Not a SimpleFreeFieldHRIR file!\n");
+ av_freep(&sofa_conventions);
+ nc_close(ncid);
+ return AVERROR(EINVAL);
+ }
+ av_freep(&sofa_conventions);
+
+ /* -- get sampling rate of HRTFs -- */
+ /* read ID, then value */
+ status = nc_inq_varid(ncid, "Data.SamplingRate", &samplingrate_id);
+ status += nc_get_var_uint(ncid, samplingrate_id, &sample_rate);
+ if (status != NC_NOERR) {
+ av_log(ctx, AV_LOG_ERROR, "Couldn't read Data.SamplingRate.\n");
+ nc_close(ncid);
+ return AVERROR(EINVAL);
+ }
+ *samplingrate = sample_rate; /* remember sampling rate */
+
+ /* -- allocate memory for one value for each measurement position: -- */
+ sp_a = s->sofa.sp_a = av_malloc_array(m_dim, sizeof(float));
+ sp_e = s->sofa.sp_e = av_malloc_array(m_dim, sizeof(float));
+ sp_r = s->sofa.sp_r = av_malloc_array(m_dim, sizeof(float));
+ /* delay and IR values required for each ear and measurement position: */
+ data_delay = s->sofa.data_delay = av_calloc(m_dim, 2 * sizeof(int));
+ data_ir = s->sofa.data_ir = av_calloc(m_dim * FFALIGN(n_samples, 16), sizeof(float) * 2);
+
+ if (!data_delay || !sp_a || !sp_e || !sp_r || !data_ir) {
+ /* if memory could not be allocated */
+ close_sofa(&s->sofa);
+ return AVERROR(ENOMEM);
+ }
+
+ /* get impulse responses (HRTFs): */
+ /* get corresponding ID */
+ status = nc_inq_varid(ncid, "Data.IR", &data_ir_id);
+ status += nc_get_var_float(ncid, data_ir_id, data_ir); /* read and store IRs */
+ if (status != NC_NOERR) {
+ av_log(ctx, AV_LOG_ERROR, "Couldn't read Data.IR!\n");
+ ret = AVERROR(EINVAL);
+ goto error;
+ }
+
+ /* get source positions of the HRTFs in the SOFA file: */
+ status = nc_inq_varid(ncid, "SourcePosition", &sp_id); /* get corresponding ID */
+ status += nc_get_vara_float(ncid, sp_id, (size_t[2]){ 0, 0 } ,
+ (size_t[2]){ m_dim, 1}, sp_a); /* read & store azimuth angles */
+ status += nc_get_vara_float(ncid, sp_id, (size_t[2]){ 0, 1 } ,
+ (size_t[2]){ m_dim, 1}, sp_e); /* read & store elevation angles */
+ status += nc_get_vara_float(ncid, sp_id, (size_t[2]){ 0, 2 } ,
+ (size_t[2]){ m_dim, 1}, sp_r); /* read & store radii */
+ if (status != NC_NOERR) { /* if any source position variable coudn't be read */
+ av_log(ctx, AV_LOG_ERROR, "Couldn't read SourcePosition.\n");
+ ret = AVERROR(EINVAL);
+ goto error;
+ }
+
+ /* read Data.Delay, check for errors and fit it to data_delay */
+ status = nc_inq_varid(ncid, "Data.Delay", &data_delay_id);
+ status += nc_inq_vardimid(ncid, data_delay_id, &data_delay_dim_id[0]);
+ status += nc_inq_dimname(ncid, data_delay_dim_id[0], data_delay_dim_name);
+ if (status != NC_NOERR) {
+ av_log(ctx, AV_LOG_ERROR, "Couldn't read Data.Delay.\n");
+ ret = AVERROR(EINVAL);
+ goto error;
+ }
+
+ /* Data.Delay dimension check */
+ /* dimension of Data.Delay is [I R]: */
+ if (!strncmp(data_delay_dim_name, "I", 2)) {
+ /* check 2 characters to assure string is 0-terminated after "I" */
+ int delay[2]; /* delays get from SOFA file: */
+ int *data_delay_r;
+
+ av_log(ctx, AV_LOG_DEBUG, "Data.Delay has dimension [I R]\n");
+ status = nc_get_var_int(ncid, data_delay_id, &delay[0]);
+ if (status != NC_NOERR) {
+ av_log(ctx, AV_LOG_ERROR, "Couldn't read Data.Delay\n");
+ ret = AVERROR(EINVAL);
+ goto error;
+ }
+ data_delay_r = data_delay + m_dim;
+ for (i = 0; i < m_dim; i++) { /* extend given dimension [I R] to [M R] */
+ /* assign constant delay value for all measurements to data_delay fields */
+ data_delay[i] = delay[0];
+ data_delay_r[i] = delay[1];
+ }
+ /* dimension of Data.Delay is [M R] */
+ } else if (!strncmp(data_delay_dim_name, "M", 2)) {
+ av_log(ctx, AV_LOG_ERROR, "Data.Delay in dimension [M R]\n");
+ /* get delays from SOFA file: */
+ status = nc_get_var_int(ncid, data_delay_id, data_delay);
+ if (status != NC_NOERR) {
+ av_log(ctx, AV_LOG_ERROR, "Couldn't read Data.Delay\n");
+ ret = AVERROR(EINVAL);
+ goto error;
+ }
+ } else { /* dimension of Data.Delay is neither [I R] nor [M R] */
+ av_log(ctx, AV_LOG_ERROR, "Data.Delay does not have the required dimensions [I R] or [M R].\n");
+ ret = AVERROR(EINVAL);
+ goto error;
+ }
+
+ /* save information in SOFA struct: */
+ s->sofa.m_dim = m_dim; /* no. measurement positions */
+ s->sofa.n_samples = n_samples; /* length on one IR */
+ s->sofa.ncid = ncid; /* netCDF ID of SOFA file */
+ nc_close(ncid); /* close SOFA file */
+
+ av_log(ctx, AV_LOG_DEBUG, "m_dim: %d n_samples %d\n", m_dim, n_samples);
+
+ return 0;
+
+error:
+ close_sofa(&s->sofa);
+ return ret;
+}
+
+static int parse_channel_name(char **arg, int *rchannel, char *buf)
+{
+ int len, i, channel_id = 0;
+ int64_t layout, layout0;
+
+ /* try to parse a channel name, e.g. "FL" */
+ if (sscanf(*arg, "%7[A-Z]%n", buf, &len)) {
+ layout0 = layout = av_get_channel_layout(buf);
+ /* channel_id <- first set bit in layout */
+ for (i = 32; i > 0; i >>= 1) {
+ if (layout >= (int64_t)1 << i) {
+ channel_id += i;
+ layout >>= i;
+ }
+ }
+ /* reject layouts that are not a single channel */
+ if (channel_id >= 64 || layout0 != (int64_t)1 << channel_id)
+ return AVERROR(EINVAL);
+ *rchannel = channel_id;
+ *arg += len;
+ return 0;
+ }
+ return AVERROR(EINVAL);
+}
+
+static void parse_speaker_pos(AVFilterContext *ctx, int64_t in_channel_layout)
+{
+ SOFAlizerContext *s = ctx->priv;
+ char *arg, *tokenizer, *p, *args = av_strdup(s->speakers_pos);
+
+ if (!args)
+ return;
+ p = args;
+
+ while ((arg = av_strtok(p, "|", &tokenizer))) {
+ char buf[8];
+ float azim, elev;
+ int out_ch_id;
+
+ p = NULL;
+ if (parse_channel_name(&arg, &out_ch_id, buf)) {
+ av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%s\' as channel name.\n", buf);
+ continue;
+ }
+ if (sscanf(arg, "%f %f", &azim, &elev) == 2) {
+ s->vspkrpos[out_ch_id].set = 1;
+ s->vspkrpos[out_ch_id].azim = azim;
+ s->vspkrpos[out_ch_id].elev = elev;
+ } else if (sscanf(arg, "%f", &azim) == 1) {
+ s->vspkrpos[out_ch_id].set = 1;
+ s->vspkrpos[out_ch_id].azim = azim;
+ s->vspkrpos[out_ch_id].elev = 0;
+ }
+ }
+
+ av_free(args);
+}
+
+static int get_speaker_pos(AVFilterContext *ctx,
+ float *speaker_azim, float *speaker_elev)
+{
+ struct SOFAlizerContext *s = ctx->priv;
+ uint64_t channels_layout = ctx->inputs[0]->channel_layout;
+ float azim[16] = { 0 };
+ float elev[16] = { 0 };
+ int m, ch, n_conv = ctx->inputs[0]->channels; /* get no. input channels */
+
+ if (n_conv > 16)
+ return AVERROR(EINVAL);
+
+ s->lfe_channel = -1;
+
+ if (s->speakers_pos)
+ parse_speaker_pos(ctx, channels_layout);
+
+ /* set speaker positions according to input channel configuration: */
+ for (m = 0, ch = 0; ch < n_conv && m < 64; m++) {
+ uint64_t mask = channels_layout & (1 << m);
+
+ switch (mask) {
+ case AV_CH_FRONT_LEFT: azim[ch] = 30; break;
+ case AV_CH_FRONT_RIGHT: azim[ch] = 330; break;
+ case AV_CH_FRONT_CENTER: azim[ch] = 0; break;
+ case AV_CH_LOW_FREQUENCY:
+ case AV_CH_LOW_FREQUENCY_2: s->lfe_channel = ch; break;
+ case AV_CH_BACK_LEFT: azim[ch] = 150; break;
+ case AV_CH_BACK_RIGHT: azim[ch] = 210; break;
+ case AV_CH_BACK_CENTER: azim[ch] = 180; break;
+ case AV_CH_SIDE_LEFT: azim[ch] = 90; break;
+ case AV_CH_SIDE_RIGHT: azim[ch] = 270; break;
+ case AV_CH_FRONT_LEFT_OF_CENTER: azim[ch] = 15; break;
+ case AV_CH_FRONT_RIGHT_OF_CENTER: azim[ch] = 345; break;
+ case AV_CH_TOP_CENTER: azim[ch] = 0;
+ elev[ch] = 90; break;
+ case AV_CH_TOP_FRONT_LEFT: azim[ch] = 30;
+ elev[ch] = 45; break;
+ case AV_CH_TOP_FRONT_CENTER: azim[ch] = 0;
+ elev[ch] = 45; break;
+ case AV_CH_TOP_FRONT_RIGHT: azim[ch] = 330;
+ elev[ch] = 45; break;
+ case AV_CH_TOP_BACK_LEFT: azim[ch] = 150;
+ elev[ch] = 45; break;
+ case AV_CH_TOP_BACK_RIGHT: azim[ch] = 210;
+ elev[ch] = 45; break;
+ case AV_CH_TOP_BACK_CENTER: azim[ch] = 180;
+ elev[ch] = 45; break;
+ case AV_CH_WIDE_LEFT: azim[ch] = 90; break;
+ case AV_CH_WIDE_RIGHT: azim[ch] = 270; break;
+ case AV_CH_SURROUND_DIRECT_LEFT: azim[ch] = 90; break;
+ case AV_CH_SURROUND_DIRECT_RIGHT: azim[ch] = 270; break;
+ case AV_CH_STEREO_LEFT: azim[ch] = 90; break;
+ case AV_CH_STEREO_RIGHT: azim[ch] = 270; break;
+ case 0: break;
+ default:
+ return AVERROR(EINVAL);
+ }
+
+ if (s->vspkrpos[m].set) {
+ azim[ch] = s->vspkrpos[m].azim;
+ elev[ch] = s->vspkrpos[m].elev;
+ }
+
+ if (mask)
+ ch++;
+ }
+
+ memcpy(speaker_azim, azim, n_conv * sizeof(float));
+ memcpy(speaker_elev, elev, n_conv * sizeof(float));
+
+ return 0;
+
+}
+
+static int max_delay(struct NCSofa *sofa)
+{
+ int i, max = 0;
+
+ for (i = 0; i < sofa->m_dim * 2; i++) {
+ /* search maximum delay in given SOFA file */
+ max = FFMAX(max, sofa->data_delay[i]);
+ }
+
+ return max;
+}
+
+static int find_m(SOFAlizerContext *s, int azim, int elev, float radius)
+{
+ /* get source positions and M of currently selected SOFA file */
+ float *sp_a = s->sofa.sp_a; /* azimuth angle */
+ float *sp_e = s->sofa.sp_e; /* elevation angle */
+ float *sp_r = s->sofa.sp_r; /* radius */
+ int m_dim = s->sofa.m_dim; /* no. measurements */
+ int best_id = 0; /* index m currently closest to desired source pos. */
+ float delta = 1000; /* offset between desired and currently best pos. */
+ float current;
+ int i;
+
+ for (i = 0; i < m_dim; i++) {
+ /* search through all measurements in currently selected SOFA file */
+ /* distance of current to desired source position: */
+ current = fabs(sp_a[i] - azim) +
+ fabs(sp_e[i] - elev) +
+ fabs(sp_r[i] - radius);
+ if (current <= delta) {
+ /* if current distance is smaller than smallest distance so far */
+ delta = current;
+ best_id = i; /* remember index */
+ }
+ }
+
+ return best_id;
+}
+
+static int compensate_volume(AVFilterContext *ctx)
+{
+ struct SOFAlizerContext *s = ctx->priv;
+ float compensate;
+ float energy = 0;
+ float *ir;
+ int m;
+
+ if (s->sofa.ncid) {
+ /* find IR at front center position in the SOFA file (IR closest to 0°,0°,1m) */
+ struct NCSofa *sofa = &s->sofa;
+ m = find_m(s, 0, 0, 1);
+ /* get energy of that IR and compensate volume */
+ ir = sofa->data_ir + 2 * m * sofa->n_samples;
+ if (sofa->n_samples & 31) {
+ energy = avpriv_scalarproduct_float_c(ir, ir, sofa->n_samples);
+ } else {
+ energy = s->fdsp->scalarproduct_float(ir, ir, sofa->n_samples);
+ }
+ compensate = 256 / (sofa->n_samples * sqrt(energy));
+ av_log(ctx, AV_LOG_DEBUG, "Compensate-factor: %f\n", compensate);
+ ir = sofa->data_ir;
+ /* apply volume compensation to IRs */
+ if (sofa->n_samples & 31) {
+ int i;
+ for (i = 0; i < sofa->n_samples * sofa->m_dim * 2; i++) {
+ ir[i] = ir[i] * compensate;
+ }
+ } else {
+ s->fdsp->vector_fmul_scalar(ir, ir, compensate, sofa->n_samples * sofa->m_dim * 2);
+ emms_c();
+ }
+ }
+
+ return 0;
+}
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+ int *write;
+ int **delay;
+ float **ir;
+ int *n_clippings;
+ float **ringbuffer;
+ float **temp_src;
+ FFTComplex **temp_fft;
+} ThreadData;
+
+static int sofalizer_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ SOFAlizerContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in, *out = td->out;
+ int offset = jobnr;
+ int *write = &td->write[jobnr];
+ const int *const delay = td->delay[jobnr];
+ const float *const ir = td->ir[jobnr];
+ int *n_clippings = &td->n_clippings[jobnr];
+ float *ringbuffer = td->ringbuffer[jobnr];
+ float *temp_src = td->temp_src[jobnr];
+ const int n_samples = s->sofa.n_samples; /* length of one IR */
+ const float *src = (const float *)in->data[0]; /* get pointer to audio input buffer */
+ float *dst = (float *)out->data[0]; /* get pointer to audio output buffer */
+ const int in_channels = s->n_conv; /* number of input channels */
+ /* ring buffer length is: longest IR plus max. delay -> next power of 2 */
+ const int buffer_length = s->buffer_length;
+ /* -1 for AND instead of MODULO (applied to powers of 2): */
+ const uint32_t modulo = (uint32_t)buffer_length - 1;
+ float *buffer[16]; /* holds ringbuffer for each input channel */
+ int wr = *write;
+ int read;
+ int i, l;
+
+ dst += offset;
+ for (l = 0; l < in_channels; l++) {
+ /* get starting address of ringbuffer for each input channel */
+ buffer[l] = ringbuffer + l * buffer_length;
+ }
+
+ for (i = 0; i < in->nb_samples; i++) {
+ const float *temp_ir = ir; /* using same set of IRs for each sample */
+
+ *dst = 0;
+ for (l = 0; l < in_channels; l++) {
+ /* write current input sample to ringbuffer (for each channel) */
+ *(buffer[l] + wr) = src[l];
+ }
+
+ /* loop goes through all channels to be convolved */
+ for (l = 0; l < in_channels; l++) {
+ const float *const bptr = buffer[l];
+
+ if (l == s->lfe_channel) {
+ /* LFE is an input channel but requires no convolution */
+ /* apply gain to LFE signal and add to output buffer */
+ *dst += *(buffer[s->lfe_channel] + wr) * s->gain_lfe;
+ temp_ir += FFALIGN(n_samples, 16);
+ continue;
+ }
+
+ /* current read position in ringbuffer: input sample write position
+ * - delay for l-th ch. + diff. betw. IR length and buffer length
+ * (mod buffer length) */
+ read = (wr - *(delay + l) - (n_samples - 1) + buffer_length) & modulo;
+
+ if (read + n_samples < buffer_length) {
+ memcpy(temp_src, bptr + read, n_samples * sizeof(*temp_src));
+ } else {
+ int len = FFMIN(n_samples - (read % n_samples), buffer_length - read);
+
+ memcpy(temp_src, bptr + read, len * sizeof(*temp_src));
+ memcpy(temp_src + len, bptr, (n_samples - len) * sizeof(*temp_src));
+ }
+
+ /* multiply signal and IR, and add up the results */
+ dst[0] += s->fdsp->scalarproduct_float(temp_ir, temp_src, n_samples);
+ temp_ir += FFALIGN(n_samples, 16);
+ }
+
+ /* clippings counter */
+ if (fabs(*dst) > 1)
+ *n_clippings += 1;
+
+ /* move output buffer pointer by +2 to get to next sample of processed channel: */
+ dst += 2;
+ src += in_channels;
+ wr = (wr + 1) & modulo; /* update ringbuffer write position */
+ }
+
+ *write = wr; /* remember write position in ringbuffer for next call */
+
+ return 0;
+}
+
+static int sofalizer_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ SOFAlizerContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in, *out = td->out;
+ int offset = jobnr;
+ int *write = &td->write[jobnr];
+ FFTComplex *hrtf = s->data_hrtf[jobnr]; /* get pointers to current HRTF data */
+ int *n_clippings = &td->n_clippings[jobnr];
+ float *ringbuffer = td->ringbuffer[jobnr];
+ const int n_samples = s->sofa.n_samples; /* length of one IR */
+ const float *src = (const float *)in->data[0]; /* get pointer to audio input buffer */
+ float *dst = (float *)out->data[0]; /* get pointer to audio output buffer */
+ const int in_channels = s->n_conv; /* number of input channels */
+ /* ring buffer length is: longest IR plus max. delay -> next power of 2 */
+ const int buffer_length = s->buffer_length;
+ /* -1 for AND instead of MODULO (applied to powers of 2): */
+ const uint32_t modulo = (uint32_t)buffer_length - 1;
+ FFTComplex *fft_in = s->temp_fft[jobnr]; /* temporary array for FFT input/output data */
+ FFTContext *ifft = s->ifft[jobnr];
+ FFTContext *fft = s->fft[jobnr];
+ const int n_conv = s->n_conv;
+ const int n_fft = s->n_fft;
+ const float fft_scale = 1.0f / s->n_fft;
+ FFTComplex *hrtf_offset;
+ int wr = *write;
+ int n_read;
+ int i, j;
+
+ dst += offset;
+
+ /* find minimum between number of samples and output buffer length:
+ * (important, if one IR is longer than the output buffer) */
+ n_read = FFMIN(s->sofa.n_samples, in->nb_samples);
+ for (j = 0; j < n_read; j++) {
+ /* initialize output buf with saved signal from overflow buf */
+ dst[2 * j] = ringbuffer[wr];
+ ringbuffer[wr] = 0.0; /* re-set read samples to zero */
+ /* update ringbuffer read/write position */
+ wr = (wr + 1) & modulo;
+ }
+
+ /* initialize rest of output buffer with 0 */
+ for (j = n_read; j < in->nb_samples; j++) {
+ dst[2 * j] = 0;
+ }
+
+ for (i = 0; i < n_conv; i++) {
+ if (i == s->lfe_channel) { /* LFE */
+ for (j = 0; j < in->nb_samples; j++) {
+ /* apply gain to LFE signal and add to output buffer */
+ dst[2 * j] += src[i + j * in_channels] * s->gain_lfe;
+ }
+ continue;
+ }
+
+ /* outer loop: go through all input channels to be convolved */
+ offset = i * n_fft; /* no. samples already processed */
+ hrtf_offset = hrtf + offset;
+
+ /* fill FFT input with 0 (we want to zero-pad) */
+ memset(fft_in, 0, sizeof(FFTComplex) * n_fft);
+
+ for (j = 0; j < in->nb_samples; j++) {
+ /* prepare input for FFT */
+ /* write all samples of current input channel to FFT input array */
+ fft_in[j].re = src[j * in_channels + i];
+ }
+
+ /* transform input signal of current channel to frequency domain */
+ av_fft_permute(fft, fft_in);
+ av_fft_calc(fft, fft_in);
+ for (j = 0; j < n_fft; j++) {
+ const FFTComplex *hcomplex = hrtf_offset + j;
+ const float re = fft_in[j].re;
+ const float im = fft_in[j].im;
+
+ /* complex multiplication of input signal and HRTFs */
+ /* output channel (real): */
+ fft_in[j].re = re * hcomplex->re - im * hcomplex->im;
+ /* output channel (imag): */
+ fft_in[j].im = re * hcomplex->im + im * hcomplex->re;
+ }
+
+ /* transform output signal of current channel back to time domain */
+ av_fft_permute(ifft, fft_in);
+ av_fft_calc(ifft, fft_in);
+
+ for (j = 0; j < in->nb_samples; j++) {
+ /* write output signal of current channel to output buffer */
+ dst[2 * j] += fft_in[j].re * fft_scale;
+ }
+
+ for (j = 0; j < n_samples - 1; j++) { /* overflow length is IR length - 1 */
+ /* write the rest of output signal to overflow buffer */
+ int write_pos = (wr + j) & modulo;
+
+ *(ringbuffer + write_pos) += fft_in[in->nb_samples + j].re * fft_scale;
+ }
+ }
+
+ /* go through all samples of current output buffer: count clippings */
+ for (i = 0; i < out->nb_samples; i++) {
+ /* clippings counter */
+ if (fabs(*dst) > 1) { /* if current output sample > 1 */
+ n_clippings[0]++;
+ }
+
+ /* move output buffer pointer by +2 to get to next sample of processed channel: */
+ dst += 2;
+ }
+
+ /* remember read/write position in ringbuffer for next call */
+ *write = wr;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SOFAlizerContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ int n_clippings[2] = { 0 };
+ ThreadData td;
+ AVFrame *out;
+
+ out = ff_get_audio_buffer(outlink, in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ td.in = in; td.out = out; td.write = s->write;
+ td.delay = s->delay; td.ir = s->data_ir; td.n_clippings = n_clippings;
+ td.ringbuffer = s->ringbuffer; td.temp_src = s->temp_src;
+ td.temp_fft = s->temp_fft;
+
+ if (s->type == TIME_DOMAIN) {
+ ctx->internal->execute(ctx, sofalizer_convolute, &td, NULL, 2);
+ } else {
+ ctx->internal->execute(ctx, sofalizer_fast_convolute, &td, NULL, 2);
+ }
+ emms_c();
+
+ /* display error message if clipping occurred */
+ if (n_clippings[0] + n_clippings[1] > 0) {
+ av_log(ctx, AV_LOG_WARNING, "%d of %d samples clipped. Please reduce gain.\n",
+ n_clippings[0] + n_clippings[1], out->nb_samples * 2);
+ }
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ struct SOFAlizerContext *s = ctx->priv;
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ int ret, sample_rates[] = { 48000, -1 };
+
+ ret = ff_add_format(&formats, AV_SAMPLE_FMT_FLT);
+ if (ret)
+ return ret;
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret)
+ return ret;
+
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+
+ ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->out_channel_layouts);
+ if (ret)
+ return ret;
+
+ layouts = NULL;
+ ret = ff_add_channel_layout(&layouts, AV_CH_LAYOUT_STEREO);
+ if (ret)
+ return ret;
+
+ ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts);
+ if (ret)
+ return ret;
+
+ sample_rates[0] = s->sample_rate;
+ formats = ff_make_format_list(sample_rates);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int load_data(AVFilterContext *ctx, int azim, int elev, float radius)
+{
+ struct SOFAlizerContext *s = ctx->priv;
+ const int n_samples = s->sofa.n_samples;
+ int n_conv = s->n_conv; /* no. channels to convolve */
+ int n_fft = s->n_fft;
+ int delay_l[16]; /* broadband delay for each IR */
+ int delay_r[16];
+ int nb_input_channels = ctx->inputs[0]->channels; /* no. input channels */
+ float gain_lin = expf((s->gain - 3 * nb_input_channels) / 20 * M_LN10); /* gain - 3dB/channel */
+ FFTComplex *data_hrtf_l = NULL;
+ FFTComplex *data_hrtf_r = NULL;
+ FFTComplex *fft_in_l = NULL;
+ FFTComplex *fft_in_r = NULL;
+ float *data_ir_l = NULL;
+ float *data_ir_r = NULL;
+ int offset = 0; /* used for faster pointer arithmetics in for-loop */
+ int m[16]; /* measurement index m of IR closest to required source positions */
+ int i, j, azim_orig = azim, elev_orig = elev;
+
+ if (!s->sofa.ncid) { /* if an invalid SOFA file has been selected */
+ av_log(ctx, AV_LOG_ERROR, "Selected SOFA file is invalid. Please select valid SOFA file.\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (s->type == TIME_DOMAIN) {
+ s->temp_src[0] = av_calloc(FFALIGN(n_samples, 16), sizeof(float));
+ s->temp_src[1] = av_calloc(FFALIGN(n_samples, 16), sizeof(float));
+
+ /* get temporary IR for L and R channel */
+ data_ir_l = av_calloc(n_conv * FFALIGN(n_samples, 16), sizeof(*data_ir_l));
+ data_ir_r = av_calloc(n_conv * FFALIGN(n_samples, 16), sizeof(*data_ir_r));
+ if (!data_ir_r || !data_ir_l || !s->temp_src[0] || !s->temp_src[1]) {
+ av_free(data_ir_l);
+ av_free(data_ir_r);
+ return AVERROR(ENOMEM);
+ }
+ } else {
+ /* get temporary HRTF memory for L and R channel */
+ data_hrtf_l = av_malloc_array(n_fft, sizeof(*data_hrtf_l) * n_conv);
+ data_hrtf_r = av_malloc_array(n_fft, sizeof(*data_hrtf_r) * n_conv);
+ if (!data_hrtf_r || !data_hrtf_l) {
+ av_free(data_hrtf_l);
+ av_free(data_hrtf_r);
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ for (i = 0; i < s->n_conv; i++) {
+ /* load and store IRs and corresponding delays */
+ azim = (int)(s->speaker_azim[i] + azim_orig) % 360;
+ elev = (int)(s->speaker_elev[i] + elev_orig) % 90;
+ /* get id of IR closest to desired position */
+ m[i] = find_m(s, azim, elev, radius);
+
+ /* load the delays associated with the current IRs */
+ delay_l[i] = *(s->sofa.data_delay + 2 * m[i]);
+ delay_r[i] = *(s->sofa.data_delay + 2 * m[i] + 1);
+
+ if (s->type == TIME_DOMAIN) {
+ offset = i * FFALIGN(n_samples, 16); /* no. samples already written */
+ for (j = 0; j < n_samples; j++) {
+ /* load reversed IRs of the specified source position
+ * sample-by-sample for left and right ear; and apply gain */
+ *(data_ir_l + offset + j) = /* left channel */
+ *(s->sofa.data_ir + 2 * m[i] * n_samples + n_samples - 1 - j) * gain_lin;
+ *(data_ir_r + offset + j) = /* right channel */
+ *(s->sofa.data_ir + 2 * m[i] * n_samples + n_samples - 1 - j + n_samples) * gain_lin;
+ }
+ } else {
+ fft_in_l = av_calloc(n_fft, sizeof(*fft_in_l));
+ fft_in_r = av_calloc(n_fft, sizeof(*fft_in_r));
+ if (!fft_in_l || !fft_in_r) {
+ av_free(data_hrtf_l);
+ av_free(data_hrtf_r);
+ av_free(fft_in_l);
+ av_free(fft_in_r);
+ return AVERROR(ENOMEM);
+ }
+
+ offset = i * n_fft; /* no. samples already written */
+ for (j = 0; j < n_samples; j++) {
+ /* load non-reversed IRs of the specified source position
+ * sample-by-sample and apply gain,
+ * L channel is loaded to real part, R channel to imag part,
+ * IRs ared shifted by L and R delay */
+ fft_in_l[delay_l[i] + j].re = /* left channel */
+ *(s->sofa.data_ir + 2 * m[i] * n_samples + j) * gain_lin;
+ fft_in_r[delay_r[i] + j].re = /* right channel */
+ *(s->sofa.data_ir + (2 * m[i] + 1) * n_samples + j) * gain_lin;
+ }
+
+ /* actually transform to frequency domain (IRs -> HRTFs) */
+ av_fft_permute(s->fft[0], fft_in_l);
+ av_fft_calc(s->fft[0], fft_in_l);
+ memcpy(data_hrtf_l + offset, fft_in_l, n_fft * sizeof(*fft_in_l));
+ av_fft_permute(s->fft[0], fft_in_r);
+ av_fft_calc(s->fft[0], fft_in_r);
+ memcpy(data_hrtf_r + offset, fft_in_r, n_fft * sizeof(*fft_in_r));
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "Index: %d, Azimuth: %f, Elevation: %f, Radius: %f of SOFA file.\n",
+ m[i], *(s->sofa.sp_a + m[i]), *(s->sofa.sp_e + m[i]), *(s->sofa.sp_r + m[i]));
+ }
+
+ if (s->type == TIME_DOMAIN) {
+ /* copy IRs and delays to allocated memory in the SOFAlizerContext struct: */
+ memcpy(s->data_ir[0], data_ir_l, sizeof(float) * n_conv * FFALIGN(n_samples, 16));
+ memcpy(s->data_ir[1], data_ir_r, sizeof(float) * n_conv * FFALIGN(n_samples, 16));
+
+ av_freep(&data_ir_l); /* free temporary IR memory */
+ av_freep(&data_ir_r);
+ } else {
+ s->data_hrtf[0] = av_malloc_array(n_fft * s->n_conv, sizeof(FFTComplex));
+ s->data_hrtf[1] = av_malloc_array(n_fft * s->n_conv, sizeof(FFTComplex));
+ if (!s->data_hrtf[0] || !s->data_hrtf[1]) {
+ av_freep(&data_hrtf_l);
+ av_freep(&data_hrtf_r);
+ av_freep(&fft_in_l);
+ av_freep(&fft_in_r);
+ return AVERROR(ENOMEM); /* memory allocation failed */
+ }
+
+ memcpy(s->data_hrtf[0], data_hrtf_l, /* copy HRTF data to */
+ sizeof(FFTComplex) * n_conv * n_fft); /* filter struct */
+ memcpy(s->data_hrtf[1], data_hrtf_r,
+ sizeof(FFTComplex) * n_conv * n_fft);
+
+ av_freep(&data_hrtf_l); /* free temporary HRTF memory */
+ av_freep(&data_hrtf_r);
+
+ av_freep(&fft_in_l); /* free temporary FFT memory */
+ av_freep(&fft_in_r);
+ }
+
+ memcpy(s->delay[0], &delay_l[0], sizeof(int) * s->n_conv);
+ memcpy(s->delay[1], &delay_r[0], sizeof(int) * s->n_conv);
+
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ SOFAlizerContext *s = ctx->priv;
+ int ret;
+
+ if (!s->filename) {
+ av_log(ctx, AV_LOG_ERROR, "Valid SOFA filename must be set.\n");
+ return AVERROR(EINVAL);
+ }
+
+ /* load SOFA file, */
+ /* initialize file IDs to 0 before attempting to load SOFA files,
+ * this assures that in case of error, only the memory of already
+ * loaded files is free'd */
+ s->sofa.ncid = 0;
+ ret = load_sofa(ctx, s->filename, &s->sample_rate);
+ if (ret) {
+ /* file loading error */
+ av_log(ctx, AV_LOG_ERROR, "Error while loading SOFA file: '%s'\n", s->filename);
+ } else { /* no file loading error, resampling not required */
+ av_log(ctx, AV_LOG_DEBUG, "File '%s' loaded.\n", s->filename);
+ }
+
+ if (ret) {
+ av_log(ctx, AV_LOG_ERROR, "No valid SOFA file could be loaded. Please specify valid SOFA file.\n");
+ return ret;
+ }
+
+ s->fdsp = avpriv_float_dsp_alloc(0);
+ if (!s->fdsp)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SOFAlizerContext *s = ctx->priv;
+ int nb_input_channels = inlink->channels; /* no. input channels */
+ int n_max_ir = 0;
+ int n_current;
+ int n_max = 0;
+ int ret;
+
+ if (s->type == FREQUENCY_DOMAIN) {
+ inlink->partial_buf_size =
+ inlink->min_samples =
+ inlink->max_samples = inlink->sample_rate;
+ }
+
+ /* gain -3 dB per channel, -6 dB to get LFE on a similar level */
+ s->gain_lfe = expf((s->gain - 3 * inlink->channels - 6) / 20 * M_LN10);
+
+ s->n_conv = nb_input_channels;
+
+ /* get size of ringbuffer (longest IR plus max. delay) */
+ /* then choose next power of 2 for performance optimization */
+ n_current = s->sofa.n_samples + max_delay(&s->sofa);
+ if (n_current > n_max) {
+ /* length of longest IR plus max. delay (in all SOFA files) */
+ n_max = n_current;
+ /* length of longest IR (without delay, in all SOFA files) */
+ n_max_ir = s->sofa.n_samples;
+ }
+ /* buffer length is longest IR plus max. delay -> next power of 2
+ (32 - count leading zeros gives required exponent) */
+ s->buffer_length = 1 << (32 - ff_clz(n_max));
+ s->n_fft = 1 << (32 - ff_clz(n_max + inlink->sample_rate));
+
+ if (s->type == FREQUENCY_DOMAIN) {
+ av_fft_end(s->fft[0]);
+ av_fft_end(s->fft[1]);
+ s->fft[0] = av_fft_init(log2(s->n_fft), 0);
+ s->fft[1] = av_fft_init(log2(s->n_fft), 0);
+ av_fft_end(s->ifft[0]);
+ av_fft_end(s->ifft[1]);
+ s->ifft[0] = av_fft_init(log2(s->n_fft), 1);
+ s->ifft[1] = av_fft_init(log2(s->n_fft), 1);
+
+ if (!s->fft[0] || !s->fft[1] || !s->ifft[0] || !s->ifft[1]) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to create FFT contexts of size %d.\n", s->n_fft);
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ /* Allocate memory for the impulse responses, delays and the ringbuffers */
+ /* size: (longest IR) * (number of channels to convolute) */
+ s->data_ir[0] = av_calloc(FFALIGN(n_max_ir, 16), sizeof(float) * s->n_conv);
+ s->data_ir[1] = av_calloc(FFALIGN(n_max_ir, 16), sizeof(float) * s->n_conv);
+ /* length: number of channels to convolute */
+ s->delay[0] = av_malloc_array(s->n_conv, sizeof(float));
+ s->delay[1] = av_malloc_array(s->n_conv, sizeof(float));
+ /* length: (buffer length) * (number of input channels),
+ * OR: buffer length (if frequency domain processing)
+ * calloc zero-initializes the buffer */
+
+ if (s->type == TIME_DOMAIN) {
+ s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
+ s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
+ } else {
+ s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float));
+ s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float));
+ s->temp_fft[0] = av_malloc_array(s->n_fft, sizeof(FFTComplex));
+ s->temp_fft[1] = av_malloc_array(s->n_fft, sizeof(FFTComplex));
+ if (!s->temp_fft[0] || !s->temp_fft[1])
+ return AVERROR(ENOMEM);
+ }
+
+ /* length: number of channels to convolute */
+ s->speaker_azim = av_calloc(s->n_conv, sizeof(*s->speaker_azim));
+ s->speaker_elev = av_calloc(s->n_conv, sizeof(*s->speaker_elev));
+
+ /* memory allocation failed: */
+ if (!s->data_ir[0] || !s->data_ir[1] || !s->delay[1] ||
+ !s->delay[0] || !s->ringbuffer[0] || !s->ringbuffer[1] ||
+ !s->speaker_azim || !s->speaker_elev)
+ return AVERROR(ENOMEM);
+
+ compensate_volume(ctx);
+
+ /* get speaker positions */
+ if ((ret = get_speaker_pos(ctx, s->speaker_azim, s->speaker_elev)) < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Couldn't get speaker positions. Input channel configuration not supported.\n");
+ return ret;
+ }
+
+ /* load IRs to data_ir[0] and data_ir[1] for required directions */
+ if ((ret = load_data(ctx, s->rotation, s->elevation, s->radius)) < 0)
+ return ret;
+
+ av_log(ctx, AV_LOG_DEBUG, "Samplerate: %d Channels to convolute: %d, Length of ringbuffer: %d x %d\n",
+ inlink->sample_rate, s->n_conv, nb_input_channels, s->buffer_length);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SOFAlizerContext *s = ctx->priv;
+
+ if (s->sofa.ncid) {
+ av_freep(&s->sofa.sp_a);
+ av_freep(&s->sofa.sp_e);
+ av_freep(&s->sofa.sp_r);
+ av_freep(&s->sofa.data_delay);
+ av_freep(&s->sofa.data_ir);
+ }
+ av_fft_end(s->ifft[0]);
+ av_fft_end(s->ifft[1]);
+ av_fft_end(s->fft[0]);
+ av_fft_end(s->fft[1]);
+ av_freep(&s->delay[0]);
+ av_freep(&s->delay[1]);
+ av_freep(&s->data_ir[0]);
+ av_freep(&s->data_ir[1]);
+ av_freep(&s->ringbuffer[0]);
+ av_freep(&s->ringbuffer[1]);
+ av_freep(&s->speaker_azim);
+ av_freep(&s->speaker_elev);
+ av_freep(&s->temp_src[0]);
+ av_freep(&s->temp_src[1]);
+ av_freep(&s->temp_fft[0]);
+ av_freep(&s->temp_fft[1]);
+ av_freep(&s->data_hrtf[0]);
+ av_freep(&s->data_hrtf[1]);
+ av_freep(&s->fdsp);
+}
+
+#define OFFSET(x) offsetof(SOFAlizerContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption sofalizer_options[] = {
+ { "sofa", "sofa filename", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "gain", "set gain in dB", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
+ { "rotation", "set rotation" , OFFSET(rotation), AV_OPT_TYPE_FLOAT, {.dbl=0}, -360, 360, .flags = FLAGS },
+ { "elevation", "set elevation", OFFSET(elevation), AV_OPT_TYPE_FLOAT, {.dbl=0}, -90, 90, .flags = FLAGS },
+ { "radius", "set radius", OFFSET(radius), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 3, .flags = FLAGS },
+ { "type", "set processing", OFFSET(type), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, .flags = FLAGS, "type" },
+ { "time", "time domain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, .flags = FLAGS, "type" },
+ { "freq", "frequency domain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, .flags = FLAGS, "type" },
+ { "speakers", "set speaker custom positions", OFFSET(speakers_pos), AV_OPT_TYPE_STRING, {.str=0}, 0, 0, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(sofalizer);
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_sofalizer = {
+ .name = "sofalizer",
+ .description = NULL_IF_CONFIG_SMALL("SOFAlizer (Spatially Oriented Format for Acoustics)."),
+ .priv_size = sizeof(SOFAlizerContext),
+ .priv_class = &sofalizer_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = inputs,
+ .outputs = outputs,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/af_stereotools.c b/libavfilter/af_stereotools.c
new file mode 100644
index 0000000000..8ab184df11
--- /dev/null
+++ b/libavfilter/af_stereotools.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "formats.h"
+
+typedef struct StereoToolsContext {
+ const AVClass *class;
+
+ int softclip;
+ int mute_l;
+ int mute_r;
+ int phase_l;
+ int phase_r;
+ int mode;
+ double slev;
+ double sbal;
+ double mlev;
+ double mpan;
+ double phase;
+ double base;
+ double delay;
+ double balance_in;
+ double balance_out;
+ double phase_sin_coef;
+ double phase_cos_coef;
+ double sc_level;
+ double inv_atan_shape;
+ double level_in;
+ double level_out;
+
+ double *buffer;
+ int length;
+ int pos;
+} StereoToolsContext;
+
+#define OFFSET(x) offsetof(StereoToolsContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption stereotools_options[] = {
+ { "level_in", "set level in", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
+ { "level_out", "set level out", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
+ { "balance_in", "set balance in", OFFSET(balance_in), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, A },
+ { "balance_out", "set balance out", OFFSET(balance_out), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, A },
+ { "softclip", "enable softclip", OFFSET(softclip), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
+ { "mutel", "mute L", OFFSET(mute_l), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
+ { "muter", "mute R", OFFSET(mute_r), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
+ { "phasel", "phase L", OFFSET(phase_l), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
+ { "phaser", "phase R", OFFSET(phase_r), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
+ { "mode", "set stereo mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 6, A, "mode" },
+ { "lr>lr", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A, "mode" },
+ { "lr>ms", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A, "mode" },
+ { "ms>lr", 0, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, A, "mode" },
+ { "lr>ll", 0, 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, A, "mode" },
+ { "lr>rr", 0, 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, A, "mode" },
+ { "lr>l+r", 0, 0, AV_OPT_TYPE_CONST, {.i64=5}, 0, 0, A, "mode" },
+ { "lr>rl", 0, 0, AV_OPT_TYPE_CONST, {.i64=6}, 0, 0, A, "mode" },
+ { "slev", "set side level", OFFSET(slev), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
+ { "sbal", "set side balance", OFFSET(sbal), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, A },
+ { "mlev", "set middle level", OFFSET(mlev), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
+ { "mpan", "set middle pan", OFFSET(mpan), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, A },
+ { "base", "set stereo base", OFFSET(base), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, A },
+ { "delay", "set delay", OFFSET(delay), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -20, 20, A },
+ { "sclevel", "set S/C level", OFFSET(sc_level), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 1, 100, A },
+ { "phase", "set stereo phase", OFFSET(phase), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 360, A },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(stereotools);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layout = NULL;
+ int ret;
+
+ if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_DBL )) < 0 ||
+ (ret = ff_set_common_formats (ctx , formats )) < 0 ||
+ (ret = ff_add_channel_layout (&layout , AV_CH_LAYOUT_STEREO)) < 0 ||
+ (ret = ff_set_common_channel_layouts (ctx , layout )) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ StereoToolsContext *s = ctx->priv;
+
+ s->length = 2 * inlink->sample_rate * 0.05;
+ if (s->length <= 1 || s->length & 1) {
+ av_log(ctx, AV_LOG_ERROR, "sample rate is too small\n");
+ return AVERROR(EINVAL);
+ }
+ s->buffer = av_calloc(s->length, sizeof(*s->buffer));
+ if (!s->buffer)
+ return AVERROR(ENOMEM);
+
+ s->inv_atan_shape = 1.0 / atan(s->sc_level);
+ s->phase_cos_coef = cos(s->phase / 180 * M_PI);
+ s->phase_sin_coef = sin(s->phase / 180 * M_PI);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ StereoToolsContext *s = ctx->priv;
+ const double *src = (const double *)in->data[0];
+ const double sb = s->base < 0 ? s->base * 0.5 : s->base;
+ const double sbal = 1 + s->sbal;
+ const double mpan = 1 + s->mpan;
+ const double slev = s->slev;
+ const double mlev = s->mlev;
+ const double balance_in = s->balance_in;
+ const double balance_out = s->balance_out;
+ const double level_in = s->level_in;
+ const double level_out = s->level_out;
+ const double sc_level = s->sc_level;
+ const double delay = s->delay;
+ const int length = s->length;
+ const int mute_l = s->mute_l;
+ const int mute_r = s->mute_r;
+ const int phase_l = s->phase_l;
+ const int phase_r = s->phase_r;
+ double *buffer = s->buffer;
+ AVFrame *out;
+ double *dst;
+ int nbuf = inlink->sample_rate * (fabs(delay) / 1000.);
+ int n;
+
+ nbuf -= nbuf % 2;
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_audio_buffer(inlink, in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+ dst = (double *)out->data[0];
+
+ for (n = 0; n < in->nb_samples; n++, src += 2, dst += 2) {
+ double L = src[0], R = src[1], l, r, m, S;
+
+ L *= level_in;
+ R *= level_in;
+
+ L *= 1. - FFMAX(0., balance_in);
+ R *= 1. + FFMIN(0., balance_in);
+
+ if (s->softclip) {
+ R = s->inv_atan_shape * atan(R * sc_level);
+ L = s->inv_atan_shape * atan(L * sc_level);
+ }
+
+ switch (s->mode) {
+ case 0:
+ m = (L + R) * 0.5;
+ S = (L - R) * 0.5;
+ l = m * mlev * FFMIN(1., 2. - mpan) + S * slev * FFMIN(1., 2. - sbal);
+ r = m * mlev * FFMIN(1., mpan) - S * slev * FFMIN(1., sbal);
+ L = l;
+ R = r;
+ break;
+ case 1:
+ l = L * FFMIN(1., 2. - sbal);
+ r = R * FFMIN(1., sbal);
+ L = 0.5 * (l + r) * mlev;
+ R = 0.5 * (l - r) * slev;
+ break;
+ case 2:
+ l = L * mlev * FFMIN(1., 2. - mpan) + R * slev * FFMIN(1., 2. - sbal);
+ r = L * mlev * FFMIN(1., mpan) - R * slev * FFMIN(1., sbal);
+ L = l;
+ R = r;
+ break;
+ case 3:
+ R = L;
+ break;
+ case 4:
+ L = R;
+ break;
+ case 5:
+ L = (L + R) / 2;
+ R = L;
+ break;
+ case 6:
+ l = L;
+ L = R;
+ R = l;
+ m = (L + R) * 0.5;
+ S = (L - R) * 0.5;
+ l = m * mlev * FFMIN(1., 2. - mpan) + S * slev * FFMIN(1., 2. - sbal);
+ r = m * mlev * FFMIN(1., mpan) - S * slev * FFMIN(1., sbal);
+ L = l;
+ R = r;
+ break;
+ }
+
+ L *= 1. - mute_l;
+ R *= 1. - mute_r;
+
+ L *= (2. * (1. - phase_l)) - 1.;
+ R *= (2. * (1. - phase_r)) - 1.;
+
+ buffer[s->pos ] = L;
+ buffer[s->pos+1] = R;
+
+ if (delay > 0.) {
+ R = buffer[(s->pos - (int)nbuf + 1 + length) % length];
+ } else if (delay < 0.) {
+ L = buffer[(s->pos - (int)nbuf + length) % length];
+ }
+
+ l = L + sb * L - sb * R;
+ r = R + sb * R - sb * L;
+
+ L = l;
+ R = r;
+
+ l = L * s->phase_cos_coef - R * s->phase_sin_coef;
+ r = L * s->phase_sin_coef + R * s->phase_cos_coef;
+
+ L = l;
+ R = r;
+
+ s->pos = (s->pos + 2) % s->length;
+
+ L *= 1. - FFMAX(0., balance_out);
+ R *= 1. + FFMIN(0., balance_out);
+
+ L *= level_out;
+ R *= level_out;
+
+ dst[0] = L;
+ dst[1] = R;
+ }
+
+ if (out != in)
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ StereoToolsContext *s = ctx->priv;
+
+ av_freep(&s->buffer);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_stereotools = {
+ .name = "stereotools",
+ .description = NULL_IF_CONFIG_SMALL("Apply various stereo tools."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(StereoToolsContext),
+ .priv_class = &stereotools_class,
+ .uninit = uninit,
+ .inputs = inputs,
+ .outputs = outputs,
+};
diff --git a/libavfilter/af_stereowiden.c b/libavfilter/af_stereowiden.c
new file mode 100644
index 0000000000..24146ff1df
--- /dev/null
+++ b/libavfilter/af_stereowiden.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2012 VLC authors and VideoLAN
+ * Author : Sukrit Sangwan < sukritsangwan at gmail dot com >
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "formats.h"
+
+typedef struct StereoWidenContext {
+ const AVClass *class;
+
+ float delay;
+ float feedback;
+ float crossfeed;
+ float drymix;
+
+ float *buffer;
+ float *cur;
+ int length;
+} StereoWidenContext;
+
+#define OFFSET(x) offsetof(StereoWidenContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption stereowiden_options[] = {
+ { "delay", "set delay time", OFFSET(delay), AV_OPT_TYPE_FLOAT, {.dbl=20}, 1, 100, A },
+ { "feedback", "set feedback gain", OFFSET(feedback), AV_OPT_TYPE_FLOAT, {.dbl=.3}, 0, 0.9, A },
+ { "crossfeed", "set cross feed", OFFSET(crossfeed), AV_OPT_TYPE_FLOAT, {.dbl=.3}, 0, 0.8, A },
+ { "drymix", "set dry-mix", OFFSET(drymix), AV_OPT_TYPE_FLOAT, {.dbl=.8}, 0, 1.0, A },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(stereowiden);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layout = NULL;
+ int ret;
+
+ if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_FLT )) < 0 ||
+ (ret = ff_set_common_formats (ctx , formats )) < 0 ||
+ (ret = ff_add_channel_layout (&layout , AV_CH_LAYOUT_STEREO)) < 0 ||
+ (ret = ff_set_common_channel_layouts (ctx , layout )) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ StereoWidenContext *s = ctx->priv;
+
+ s->length = s->delay * inlink->sample_rate / 1000;
+ s->length *= 2;
+ s->buffer = av_calloc(s->length, sizeof(*s->buffer));
+ if (!s->buffer)
+ return AVERROR(ENOMEM);
+ s->cur = s->buffer;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ StereoWidenContext *s = ctx->priv;
+ const float *src = (const float *)in->data[0];
+ const float drymix = s->drymix;
+ const float crossfeed = s->crossfeed;
+ const float feedback = s->feedback;
+ AVFrame *out;
+ float *dst;
+ int n;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_audio_buffer(inlink, in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+ dst = (float *)out->data[0];
+
+ for (n = 0; n < in->nb_samples; n++, src += 2, dst += 2, s->cur += 2) {
+ const float left = src[0], right = src[1];
+
+ if (s->cur == s->buffer + s->length)
+ s->cur = s->buffer;
+
+ dst[0] = drymix * left - crossfeed * right - feedback * s->cur[1];
+ dst[1] = drymix * right - crossfeed * left - feedback * s->cur[0];
+
+ s->cur[0] = left;
+ s->cur[1] = right;
+ }
+
+ if (out != in)
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ StereoWidenContext *s = ctx->priv;
+
+ av_freep(&s->buffer);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_stereowiden = {
+ .name = "stereowiden",
+ .description = NULL_IF_CONFIG_SMALL("Apply stereo widening effect."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(StereoWidenContext),
+ .priv_class = &stereowiden_class,
+ .uninit = uninit,
+ .inputs = inputs,
+ .outputs = outputs,
+};
diff --git a/libavfilter/af_tremolo.c b/libavfilter/af_tremolo.c
new file mode 100644
index 0000000000..572e9e3b56
--- /dev/null
+++ b/libavfilter/af_tremolo.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2015 Kyle Swanson <k@ylo.ph>.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "audio.h"
+
+typedef struct TremoloContext {
+ const AVClass *class;
+ double freq;
+ double depth;
+ double *table;
+ int index;
+} TremoloContext;
+
+#define OFFSET(x) offsetof(TremoloContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption tremolo_options[] = {
+ { "f", "set frequency in hertz", OFFSET(freq), AV_OPT_TYPE_DOUBLE, {.dbl = 5.0}, 0.1, 20000.0, FLAGS },
+ { "d", "set depth as percentage", OFFSET(depth), AV_OPT_TYPE_DOUBLE, {.dbl = 0.5}, 0.0, 1.0, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(tremolo);
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ TremoloContext *s = ctx->priv;
+ const double *src = (const double *)in->data[0];
+ const int channels = inlink->channels;
+ const int nb_samples = in->nb_samples;
+ AVFrame *out;
+ double *dst;
+ int n, c;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_audio_buffer(inlink, in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+ dst = (double *)out->data[0];
+
+ for (n = 0; n < nb_samples; n++) {
+ for (c = 0; c < channels; c++)
+ dst[c] = src[c] * s->table[s->index];
+ dst += channels;
+ src += channels;
+ s->index++;
+ if (s->index >= inlink->sample_rate / s->freq)
+ s->index = 0;
+ }
+
+ if (in != out)
+ av_frame_free(&in);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBL,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ TremoloContext *s = ctx->priv;
+ av_freep(&s->table);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ TremoloContext *s = ctx->priv;
+ const double offset = 1. - s->depth / 2.;
+ int i;
+
+ s->table = av_malloc_array(inlink->sample_rate / s->freq, sizeof(*s->table));
+ if (!s->table)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < inlink->sample_rate / s->freq; i++) {
+ double env = s->freq * i / inlink->sample_rate;
+ env = sin(2 * M_PI * fmod(env + 0.25, 1.0));
+ s->table[i] = env * (1 - fabs(offset)) + offset;
+ }
+
+ s->index = 0;
+
+ return 0;
+}
+
+static const AVFilterPad avfilter_af_tremolo_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_af_tremolo_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_tremolo = {
+ .name = "tremolo",
+ .description = NULL_IF_CONFIG_SMALL("Apply tremolo effect."),
+ .priv_size = sizeof(TremoloContext),
+ .priv_class = &tremolo_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = avfilter_af_tremolo_inputs,
+ .outputs = avfilter_af_tremolo_outputs,
+};
diff --git a/libavfilter/af_vibrato.c b/libavfilter/af_vibrato.c
new file mode 100644
index 0000000000..c7691f2f2a
--- /dev/null
+++ b/libavfilter/af_vibrato.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2015 Kyle Swanson <k@ylo.ph>.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "audio.h"
+#include "generate_wave_table.h"
+
+typedef struct VibratoContext {
+ const AVClass *class;
+ double freq;
+ double depth;
+ int channels;
+
+ double **buf;
+ int buf_index;
+ int buf_size;
+
+ double *wave_table;
+ int wave_table_index;
+ int wave_table_size;
+} VibratoContext;
+
+#define OFFSET(x) offsetof(VibratoContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption vibrato_options[] = {
+ { "f", "set frequency in hertz", OFFSET(freq), AV_OPT_TYPE_DOUBLE, {.dbl = 5.0}, 0.1, 20000.0, FLAGS },
+ { "d", "set depth as percentage", OFFSET(depth), AV_OPT_TYPE_DOUBLE, {.dbl = 0.5}, 0.00, 1.0, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(vibrato);
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ VibratoContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ int n, c;
+ const double *src;
+ double *dst;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_audio_buffer(inlink, in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+
+ for (n = 0; n < in->nb_samples; n++) {
+ double integer, decimal;
+ decimal = modf(s->depth * s->wave_table[s->wave_table_index], &integer);
+
+ s->wave_table_index++;
+ if (s->wave_table_index >= s->wave_table_size)
+ s->wave_table_index -= s->wave_table_size;
+
+ for (c = 0; c < inlink->channels; c++) {
+ int samp1_index, samp2_index;
+ double *buf;
+ double this_samp;
+
+ src = (const double *)in->extended_data[c];
+ dst = (double *)out->extended_data[c];
+ buf = s->buf[c];
+
+ samp1_index = s->buf_index + integer;
+ if (samp1_index >= s->buf_size)
+ samp1_index -= s->buf_size;
+ samp2_index = samp1_index + 1;
+ if (samp2_index >= s->buf_size)
+ samp2_index -= s->buf_size;
+
+ this_samp = src[n];
+ dst[n] = buf[samp1_index] + (decimal * (buf[samp2_index] - buf[samp1_index]));
+ buf[s->buf_index] = this_samp;
+ }
+ s->buf_index++;
+ if (s->buf_index >= s->buf_size)
+ s->buf_index -= s->buf_size;
+ }
+
+ if (in != out)
+ av_frame_free(&in);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_NONE
+ };
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ VibratoContext *s = ctx->priv;
+ int c;
+
+ av_freep(&s->wave_table);
+ for (c = 0; c < s->channels; c++)
+ av_freep(&s->buf[c]);
+ av_freep(&s->buf);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ int c;
+ AVFilterContext *ctx = inlink->dst;
+ VibratoContext *s = ctx->priv;
+ s->channels = inlink->channels;
+
+ s->buf = av_calloc(inlink->channels, sizeof(*s->buf));
+ if (!s->buf)
+ return AVERROR(ENOMEM);
+ s->buf_size = inlink->sample_rate * 0.005;
+ for (c = 0; c < s->channels; c++) {
+ s->buf[c] = av_malloc_array(s->buf_size, sizeof(*s->buf[c]));
+ if (!s->buf[c])
+ return AVERROR(ENOMEM);
+ }
+ s->buf_index = 0;
+
+ s->wave_table_size = inlink->sample_rate / s->freq;
+ s->wave_table = av_malloc_array(s->wave_table_size, sizeof(*s->wave_table));
+ if (!s->wave_table)
+ return AVERROR(ENOMEM);
+ ff_generate_wave_table(WAVE_SIN, AV_SAMPLE_FMT_DBL, s->wave_table, s->wave_table_size, 0.0, s->buf_size - 1, 3.0 * M_PI_2);
+ s->wave_table_index = 0;
+
+ return 0;
+}
+
+static const AVFilterPad avfilter_af_vibrato_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_af_vibrato_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_vibrato = {
+ .name = "vibrato",
+ .description = NULL_IF_CONFIG_SMALL("Apply vibrato effect."),
+ .priv_size = sizeof(VibratoContext),
+ .priv_class = &vibrato_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = avfilter_af_vibrato_inputs,
+ .outputs = avfilter_af_vibrato_outputs,
+};
diff --git a/libavfilter/af_volume.c b/libavfilter/af_volume.c
index 11d85a17eb..68134033ec 100644
--- a/libavfilter/af_volume.c
+++ b/libavfilter/af_volume.c
@@ -2,20 +2,20 @@
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,6 +27,7 @@
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/eval.h"
+#include "libavutil/ffmath.h"
#include "libavutil/float_dsp.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
@@ -38,57 +39,93 @@
#include "internal.h"
#include "af_volume.h"
-static const char *precision_str[] = {
+static const char * const precision_str[] = {
"fixed", "float", "double"
};
+static const char *const var_names[] = {
+ "n", ///< frame number (starting at zero)
+ "nb_channels", ///< number of channels
+ "nb_consumed_samples", ///< number of samples consumed by the filter
+ "nb_samples", ///< number of samples in the current frame
+ "pos", ///< position in the file of the frame
+ "pts", ///< frame presentation timestamp
+ "sample_rate", ///< sample rate
+ "startpts", ///< PTS at start of stream
+ "startt", ///< time at start of stream
+ "t", ///< time in the file of the frame
+ "tb", ///< timebase
+ "volume", ///< last set value
+ NULL
+};
+
#define OFFSET(x) offsetof(VolumeContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
-
-static const AVOption options[] = {
- { "volume", "Volume adjustment.",
- OFFSET(volume), AV_OPT_TYPE_DOUBLE, { .dbl = 1.0 }, 0, 0x7fffff, A },
- { "precision", "Mathematical precision.",
- OFFSET(precision), AV_OPT_TYPE_INT, { .i64 = PRECISION_FLOAT }, PRECISION_FIXED, PRECISION_DOUBLE, A, "precision" },
- { "fixed", "8-bit fixed-point.", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FIXED }, INT_MIN, INT_MAX, A, "precision" },
- { "float", "32-bit floating-point.", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FLOAT }, INT_MIN, INT_MAX, A, "precision" },
- { "double", "64-bit floating-point.", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_DOUBLE }, INT_MIN, INT_MAX, A, "precision" },
+#define F AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption volume_options[] = {
+ { "volume", "set volume adjustment expression",
+ OFFSET(volume_expr), AV_OPT_TYPE_STRING, { .str = "1.0" }, .flags = A|F },
+ { "precision", "select mathematical precision",
+ OFFSET(precision), AV_OPT_TYPE_INT, { .i64 = PRECISION_FLOAT }, PRECISION_FIXED, PRECISION_DOUBLE, A|F, "precision" },
+ { "fixed", "select 8-bit fixed-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FIXED }, INT_MIN, INT_MAX, A|F, "precision" },
+ { "float", "select 32-bit floating-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FLOAT }, INT_MIN, INT_MAX, A|F, "precision" },
+ { "double", "select 64-bit floating-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_DOUBLE }, INT_MIN, INT_MAX, A|F, "precision" },
+ { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_ONCE}, 0, EVAL_MODE_NB-1, .flags = A|F, "eval" },
+ { "once", "eval volume expression once", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_ONCE}, .flags = A|F, .unit = "eval" },
+ { "frame", "eval volume expression per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = A|F, .unit = "eval" },
{ "replaygain", "Apply replaygain side data when present",
- OFFSET(replaygain), AV_OPT_TYPE_INT, { .i64 = REPLAYGAIN_DROP }, REPLAYGAIN_DROP, REPLAYGAIN_ALBUM, A, "replaygain" },
- { "drop", "replaygain side data is dropped", 0, AV_OPT_TYPE_CONST, { .i64 = REPLAYGAIN_DROP }, 0, 0, A, "replaygain" },
- { "ignore", "replaygain side data is ignored", 0, AV_OPT_TYPE_CONST, { .i64 = REPLAYGAIN_IGNORE }, 0, 0, A, "replaygain" },
- { "track", "track gain is preferred", 0, AV_OPT_TYPE_CONST, { .i64 = REPLAYGAIN_TRACK }, 0, 0, A, "replaygain" },
- { "album", "album gain is preferred", 0, AV_OPT_TYPE_CONST, { .i64 = REPLAYGAIN_ALBUM }, 0, 0, A, "replaygain" },
+ OFFSET(replaygain), AV_OPT_TYPE_INT, { .i64 = REPLAYGAIN_DROP }, REPLAYGAIN_DROP, REPLAYGAIN_ALBUM, A|F, "replaygain" },
+ { "drop", "replaygain side data is dropped", 0, AV_OPT_TYPE_CONST, { .i64 = REPLAYGAIN_DROP }, 0, 0, A|F, "replaygain" },
+ { "ignore", "replaygain side data is ignored", 0, AV_OPT_TYPE_CONST, { .i64 = REPLAYGAIN_IGNORE }, 0, 0, A|F, "replaygain" },
+ { "track", "track gain is preferred", 0, AV_OPT_TYPE_CONST, { .i64 = REPLAYGAIN_TRACK }, 0, 0, A|F, "replaygain" },
+ { "album", "album gain is preferred", 0, AV_OPT_TYPE_CONST, { .i64 = REPLAYGAIN_ALBUM }, 0, 0, A|F, "replaygain" },
{ "replaygain_preamp", "Apply replaygain pre-amplification",
- OFFSET(replaygain_preamp), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, -15.0, 15.0, A },
+ OFFSET(replaygain_preamp), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, -15.0, 15.0, A|F },
{ "replaygain_noclip", "Apply replaygain clipping prevention",
- OFFSET(replaygain_noclip), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, A },
- { NULL },
+ OFFSET(replaygain_noclip), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, A|F },
+ { NULL }
};
-static const AVClass volume_class = {
- .class_name = "volume filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(volume);
+
+static int set_expr(AVExpr **pexpr, const char *expr, void *log_ctx)
+{
+ int ret;
+ AVExpr *old = NULL;
+
+ if (*pexpr)
+ old = *pexpr;
+ ret = av_expr_parse(pexpr, expr, var_names,
+ NULL, NULL, NULL, NULL, 0, log_ctx);
+ if (ret < 0) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Error when evaluating the volume expression '%s'\n", expr);
+ *pexpr = old;
+ return ret;
+ }
+
+ av_expr_free(old);
+ return 0;
+}
static av_cold int init(AVFilterContext *ctx)
{
VolumeContext *vol = ctx->priv;
- if (vol->precision == PRECISION_FIXED) {
- vol->volume_i = (int)(vol->volume * 256 + 0.5);
- vol->volume = vol->volume_i / 256.0;
- av_log(ctx, AV_LOG_VERBOSE, "volume:(%d/256)(%f)(%1.2fdB) precision:fixed\n",
- vol->volume_i, vol->volume, 20.0*log(vol->volume)/M_LN10);
- } else {
- av_log(ctx, AV_LOG_VERBOSE, "volume:(%f)(%1.2fdB) precision:%s\n",
- vol->volume, 20.0*log(vol->volume)/M_LN10,
- precision_str[vol->precision]);
- }
+ vol->fdsp = avpriv_float_dsp_alloc(0);
+ if (!vol->fdsp)
+ return AVERROR(ENOMEM);
- return 0;
+ return set_expr(&vol->volume_pexpr, vol->volume_expr, ctx);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ VolumeContext *vol = ctx->priv;
+ av_expr_free(vol->volume_pexpr);
+ av_opt_free(vol);
+ av_freep(&vol->fdsp);
}
static int query_formats(AVFilterContext *ctx)
@@ -97,8 +134,7 @@ static int query_formats(AVFilterContext *ctx)
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[][7] = {
- /* PRECISION_FIXED */
- {
+ [PRECISION_FIXED] = {
AV_SAMPLE_FMT_U8,
AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_S16,
@@ -107,36 +143,37 @@ static int query_formats(AVFilterContext *ctx)
AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_NONE
},
- /* PRECISION_FLOAT */
- {
+ [PRECISION_FLOAT] = {
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE
},
- /* PRECISION_DOUBLE */
- {
+ [PRECISION_DOUBLE] = {
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
}
};
+ int ret;
- layouts = ff_all_channel_layouts();
+ layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
- ff_set_common_channel_layouts(ctx, layouts);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
formats = ff_make_format_list(sample_fmts[vol->precision]);
if (!formats)
return AVERROR(ENOMEM);
- ff_set_common_formats(ctx, formats);
+ ret = ff_set_common_formats(ctx, formats);
+ if (ret < 0)
+ return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
- ff_set_common_samplerates(ctx, formats);
-
- return 0;
+ return ff_set_common_samplerates(ctx, formats);
}
static inline void scale_samples_u8(uint8_t *dst, const uint8_t *src,
@@ -185,8 +222,6 @@ static inline void scale_samples_s32(uint8_t *dst, const uint8_t *src,
smp_dst[i] = av_clipl_int32((((int64_t)smp_src[i] * volume + 128) >> 8));
}
-
-
static av_cold void volume_init(VolumeContext *vol)
{
vol->samples_align = 1;
@@ -208,11 +243,9 @@ static av_cold void volume_init(VolumeContext *vol)
vol->scale_samples = scale_samples_s32;
break;
case AV_SAMPLE_FMT_FLT:
- avpriv_float_dsp_init(&vol->fdsp, 0);
vol->samples_align = 4;
break;
case AV_SAMPLE_FMT_DBL:
- avpriv_float_dsp_init(&vol->fdsp, 0);
vol->samples_align = 8;
break;
}
@@ -221,6 +254,38 @@ static av_cold void volume_init(VolumeContext *vol)
ff_volume_init_x86(vol);
}
+static int set_volume(AVFilterContext *ctx)
+{
+ VolumeContext *vol = ctx->priv;
+
+ vol->volume = av_expr_eval(vol->volume_pexpr, vol->var_values, NULL);
+ if (isnan(vol->volume)) {
+ if (vol->eval_mode == EVAL_MODE_ONCE) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid value NaN for volume\n");
+ return AVERROR(EINVAL);
+ } else {
+ av_log(ctx, AV_LOG_WARNING, "Invalid value NaN for volume, setting to 0\n");
+ vol->volume = 0;
+ }
+ }
+ vol->var_values[VAR_VOLUME] = vol->volume;
+
+ av_log(ctx, AV_LOG_VERBOSE, "n:%f t:%f pts:%f precision:%s ",
+ vol->var_values[VAR_N], vol->var_values[VAR_T], vol->var_values[VAR_PTS],
+ precision_str[vol->precision]);
+
+ if (vol->precision == PRECISION_FIXED) {
+ vol->volume_i = (int)(vol->volume * 256 + 0.5);
+ vol->volume = vol->volume_i / 256.0;
+ av_log(ctx, AV_LOG_VERBOSE, "volume_i:%d/255 ", vol->volume_i);
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "volume:%f volume_dB:%f\n",
+ vol->volume, 20.0*log10(vol->volume));
+
+ volume_init(vol);
+ return 0;
+}
+
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
@@ -228,20 +293,59 @@ static int config_output(AVFilterLink *outlink)
AVFilterLink *inlink = ctx->inputs[0];
vol->sample_fmt = inlink->format;
- vol->channels = av_get_channel_layout_nb_channels(inlink->channel_layout);
+ vol->channels = inlink->channels;
vol->planes = av_sample_fmt_is_planar(inlink->format) ? vol->channels : 1;
- volume_init(vol);
+ vol->var_values[VAR_N] =
+ vol->var_values[VAR_NB_CONSUMED_SAMPLES] =
+ vol->var_values[VAR_NB_SAMPLES] =
+ vol->var_values[VAR_POS] =
+ vol->var_values[VAR_PTS] =
+ vol->var_values[VAR_STARTPTS] =
+ vol->var_values[VAR_STARTT] =
+ vol->var_values[VAR_T] =
+ vol->var_values[VAR_VOLUME] = NAN;
+
+ vol->var_values[VAR_NB_CHANNELS] = inlink->channels;
+ vol->var_values[VAR_TB] = av_q2d(inlink->time_base);
+ vol->var_values[VAR_SAMPLE_RATE] = inlink->sample_rate;
+
+ av_log(inlink->src, AV_LOG_VERBOSE, "tb:%f sample_rate:%f nb_channels:%f\n",
+ vol->var_values[VAR_TB],
+ vol->var_values[VAR_SAMPLE_RATE],
+ vol->var_values[VAR_NB_CHANNELS]);
+
+ return set_volume(ctx);
+}
- return 0;
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ VolumeContext *vol = ctx->priv;
+ int ret = AVERROR(ENOSYS);
+
+ if (!strcmp(cmd, "volume")) {
+ if ((ret = set_expr(&vol->volume_pexpr, args, ctx)) < 0)
+ return ret;
+ if (vol->eval_mode == EVAL_MODE_ONCE)
+ set_volume(ctx);
+ }
+
+ return ret;
}
+#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
+#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
+#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
+
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
+ AVFilterContext *ctx = inlink->dst;
VolumeContext *vol = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
int nb_samples = buf->nb_samples;
AVFrame *out_buf;
+ int64_t pos;
AVFrameSideData *sd = av_frame_get_side_data(buf, AV_FRAME_DATA_REPLAYGAIN);
int ret;
@@ -273,7 +377,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
av_log(inlink->dst, AV_LOG_VERBOSE,
"Using gain %f dB from replaygain side data.\n", g);
- vol->volume = pow(10, (g + vol->replaygain_preamp) / 20);
+ vol->volume = ff_exp10((g + vol->replaygain_preamp) / 20);
if (vol->replaygain_noclip)
vol->volume = FFMIN(vol->volume, 1.0 / p);
vol->volume_i = (int)(vol->volume * 256 + 0.5);
@@ -283,11 +387,27 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
av_frame_remove_side_data(buf, AV_FRAME_DATA_REPLAYGAIN);
}
- if (vol->volume == 1.0 || vol->volume_i == 256)
- return ff_filter_frame(outlink, buf);
+ if (isnan(vol->var_values[VAR_STARTPTS])) {
+ vol->var_values[VAR_STARTPTS] = TS2D(buf->pts);
+ vol->var_values[VAR_STARTT ] = TS2T(buf->pts, inlink->time_base);
+ }
+ vol->var_values[VAR_PTS] = TS2D(buf->pts);
+ vol->var_values[VAR_T ] = TS2T(buf->pts, inlink->time_base);
+ vol->var_values[VAR_N ] = inlink->frame_count_out;
+
+ pos = av_frame_get_pkt_pos(buf);
+ vol->var_values[VAR_POS] = pos == -1 ? NAN : pos;
+ if (vol->eval_mode == EVAL_MODE_FRAME)
+ set_volume(ctx);
+
+ if (vol->volume == 1.0 || vol->volume_i == 256) {
+ out_buf = buf;
+ goto end;
+ }
/* do volume scaling in-place if input buffer is writable */
- if (av_frame_is_writable(buf)) {
+ if (av_frame_is_writable(buf)
+ && (vol->precision != PRECISION_FIXED || vol->volume_i > 0)) {
out_buf = buf;
} else {
out_buf = ff_get_audio_buffer(inlink, nb_samples);
@@ -317,13 +437,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
}
} else if (av_get_packed_sample_fmt(vol->sample_fmt) == AV_SAMPLE_FMT_FLT) {
for (p = 0; p < vol->planes; p++) {
- vol->fdsp.vector_fmul_scalar((float *)out_buf->extended_data[p],
+ vol->fdsp->vector_fmul_scalar((float *)out_buf->extended_data[p],
(const float *)buf->extended_data[p],
vol->volume, plane_samples);
}
} else {
for (p = 0; p < vol->planes; p++) {
- vol->fdsp.vector_dmul_scalar((double *)out_buf->extended_data[p],
+ vol->fdsp->vector_dmul_scalar((double *)out_buf->extended_data[p],
(const double *)buf->extended_data[p],
vol->volume, plane_samples);
}
@@ -335,6 +455,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
if (buf != out_buf)
av_frame_free(&buf);
+end:
+ vol->var_values[VAR_NB_CONSUMED_SAMPLES] += out_buf->nb_samples;
return ff_filter_frame(outlink, out_buf);
}
@@ -363,6 +485,9 @@ AVFilter ff_af_volume = {
.priv_size = sizeof(VolumeContext),
.priv_class = &volume_class,
.init = init,
+ .uninit = uninit,
.inputs = avfilter_af_volume_inputs,
.outputs = avfilter_af_volume_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+ .process_command = process_command,
};
diff --git a/libavfilter/af_volume.h b/libavfilter/af_volume.h
index 6bd89acc4d..af46e34ff6 100644
--- a/libavfilter/af_volume.h
+++ b/libavfilter/af_volume.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -21,10 +21,11 @@
* audio volume filter
*/
-#ifndef AVFILTER_AF_VOLUME_H
-#define AVFILTER_AF_VOLUME_H
+#ifndef AVFILTER_VOLUME_H
+#define AVFILTER_VOLUME_H
#include "libavutil/common.h"
+#include "libavutil/eval.h"
#include "libavutil/float_dsp.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
@@ -35,6 +36,28 @@ enum PrecisionType {
PRECISION_DOUBLE,
};
+enum EvalMode {
+ EVAL_MODE_ONCE,
+ EVAL_MODE_FRAME,
+ EVAL_MODE_NB
+};
+
+enum VolumeVarName {
+ VAR_N,
+ VAR_NB_CHANNELS,
+ VAR_NB_CONSUMED_SAMPLES,
+ VAR_NB_SAMPLES,
+ VAR_POS,
+ VAR_PTS,
+ VAR_SAMPLE_RATE,
+ VAR_STARTPTS,
+ VAR_STARTT,
+ VAR_T,
+ VAR_TB,
+ VAR_VOLUME,
+ VAR_VARS_NB
+};
+
enum ReplayGainType {
REPLAYGAIN_DROP,
REPLAYGAIN_IGNORE,
@@ -44,9 +67,14 @@ enum ReplayGainType {
typedef struct VolumeContext {
const AVClass *class;
- AVFloatDSPContext fdsp;
- enum PrecisionType precision;
- enum ReplayGainType replaygain;
+ AVFloatDSPContext *fdsp;
+ int precision;
+ int eval_mode;
+ const char *volume_expr;
+ AVExpr *volume_pexpr;
+ double var_values[VAR_VARS_NB];
+
+ int replaygain;
double replaygain_preamp;
int replaygain_noclip;
double volume;
@@ -62,4 +90,4 @@ typedef struct VolumeContext {
void ff_volume_init_x86(VolumeContext *vol);
-#endif /* AVFILTER_AF_VOLUME_H */
+#endif /* AVFILTER_VOLUME_H */
diff --git a/libavfilter/af_volumedetect.c b/libavfilter/af_volumedetect.c
new file mode 100644
index 0000000000..0143940ef3
--- /dev/null
+++ b/libavfilter/af_volumedetect.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2012 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/channel_layout.h"
+#include "libavutil/avassert.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct {
+ /**
+ * Number of samples at each PCM value.
+ * histogram[0x8000 + i] is the number of samples at value i.
+ * The extra element is there for symmetry.
+ */
+ uint64_t histogram[0x10001];
+} VolDetectContext;
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_NONE
+ };
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ int ret;
+
+ if (!(formats = ff_make_format_list(sample_fmts)))
+ return AVERROR(ENOMEM);
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ return ff_set_common_formats(ctx, formats);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *samples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ VolDetectContext *vd = ctx->priv;
+ int nb_samples = samples->nb_samples;
+ int nb_channels = av_frame_get_channels(samples);
+ int nb_planes = nb_channels;
+ int plane, i;
+ int16_t *pcm;
+
+ if (!av_sample_fmt_is_planar(samples->format)) {
+ nb_samples *= nb_channels;
+ nb_planes = 1;
+ }
+ for (plane = 0; plane < nb_planes; plane++) {
+ pcm = (int16_t *)samples->extended_data[plane];
+ for (i = 0; i < nb_samples; i++)
+ vd->histogram[pcm[i] + 0x8000]++;
+ }
+
+ return ff_filter_frame(inlink->dst->outputs[0], samples);
+}
+
+#define MAX_DB 91
+
+static inline double logdb(uint64_t v)
+{
+ double d = v / (double)(0x8000 * 0x8000);
+ if (!v)
+ return MAX_DB;
+ return -log10(d) * 10;
+}
+
+static void print_stats(AVFilterContext *ctx)
+{
+ VolDetectContext *vd = ctx->priv;
+ int i, max_volume, shift;
+ uint64_t nb_samples = 0, power = 0, nb_samples_shift = 0, sum = 0;
+ uint64_t histdb[MAX_DB + 1] = { 0 };
+
+ for (i = 0; i < 0x10000; i++)
+ nb_samples += vd->histogram[i];
+ av_log(ctx, AV_LOG_INFO, "n_samples: %"PRId64"\n", nb_samples);
+ if (!nb_samples)
+ return;
+
+ /* If nb_samples > 1<<34, there is a risk of overflow in the
+ multiplication or the sum: shift all histogram values to avoid that.
+ The total number of samples must be recomputed to avoid rounding
+ errors. */
+ shift = av_log2(nb_samples >> 33);
+ for (i = 0; i < 0x10000; i++) {
+ nb_samples_shift += vd->histogram[i] >> shift;
+ power += (i - 0x8000) * (i - 0x8000) * (vd->histogram[i] >> shift);
+ }
+ if (!nb_samples_shift)
+ return;
+ power = (power + nb_samples_shift / 2) / nb_samples_shift;
+ av_assert0(power <= 0x8000 * 0x8000);
+ av_log(ctx, AV_LOG_INFO, "mean_volume: %.1f dB\n", -logdb(power));
+
+ max_volume = 0x8000;
+ while (max_volume > 0 && !vd->histogram[0x8000 + max_volume] &&
+ !vd->histogram[0x8000 - max_volume])
+ max_volume--;
+ av_log(ctx, AV_LOG_INFO, "max_volume: %.1f dB\n", -logdb(max_volume * max_volume));
+
+ for (i = 0; i < 0x10000; i++)
+ histdb[(int)logdb((i - 0x8000) * (i - 0x8000))] += vd->histogram[i];
+ for (i = 0; i <= MAX_DB && !histdb[i]; i++);
+ for (; i <= MAX_DB && sum < nb_samples / 1000; i++) {
+ av_log(ctx, AV_LOG_INFO, "histogram_%ddb: %"PRId64"\n", i, histdb[i]);
+ sum += histdb[i];
+ }
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ print_stats(ctx);
+}
+
+static const AVFilterPad volumedetect_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad volumedetect_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_volumedetect = {
+ .name = "volumedetect",
+ .description = NULL_IF_CONFIG_SMALL("Detect audio volume."),
+ .priv_size = sizeof(VolDetectContext),
+ .query_formats = query_formats,
+ .uninit = uninit,
+ .inputs = volumedetect_inputs,
+ .outputs = volumedetect_outputs,
+};
diff --git a/libavfilter/all_channel_layouts.inc b/libavfilter/all_channel_layouts.inc
new file mode 100644
index 0000000000..878e1f5f8e
--- /dev/null
+++ b/libavfilter/all_channel_layouts.inc
@@ -0,0 +1,68 @@
+AV_CH_FRONT_CENTER,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY,
+AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER,
+AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index ec27b5ae6e..93271fb2c4 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -2,25 +2,27 @@
* filter registration
* Copyright (c) 2008 Vitor Sessak
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/thread.h"
#include "avfilter.h"
#include "config.h"
+#include "opencl_allkernels.h"
#define REGISTER_FILTER(X, x, y) \
@@ -36,95 +38,336 @@
avfilter_register(&ff_##x); \
}
-void avfilter_register_all(void)
+static void register_all(void)
{
- static int initialized;
-
- if (initialized)
- return;
- initialized = 1;
-
+ REGISTER_FILTER(ABENCH, abench, af);
+ REGISTER_FILTER(ACOMPRESSOR, acompressor, af);
+ REGISTER_FILTER(ACROSSFADE, acrossfade, af);
+ REGISTER_FILTER(ACRUSHER, acrusher, af);
+ REGISTER_FILTER(ADELAY, adelay, af);
+ REGISTER_FILTER(AECHO, aecho, af);
+ REGISTER_FILTER(AEMPHASIS, aemphasis, af);
+ REGISTER_FILTER(AEVAL, aeval, af);
+ REGISTER_FILTER(AFADE, afade, af);
+ REGISTER_FILTER(AFFTFILT, afftfilt, af);
REGISTER_FILTER(AFORMAT, aformat, af);
+ REGISTER_FILTER(AGATE, agate, af);
+ REGISTER_FILTER(AINTERLEAVE, ainterleave, af);
+ REGISTER_FILTER(ALIMITER, alimiter, af);
+ REGISTER_FILTER(ALLPASS, allpass, af);
+ REGISTER_FILTER(ALOOP, aloop, af);
+ REGISTER_FILTER(AMERGE, amerge, af);
+ REGISTER_FILTER(AMETADATA, ametadata, af);
REGISTER_FILTER(AMIX, amix, af);
+ REGISTER_FILTER(ANEQUALIZER, anequalizer, af);
REGISTER_FILTER(ANULL, anull, af);
+ REGISTER_FILTER(APAD, apad, af);
+ REGISTER_FILTER(APERMS, aperms, af);
+ REGISTER_FILTER(APHASER, aphaser, af);
+ REGISTER_FILTER(APULSATOR, apulsator, af);
+ REGISTER_FILTER(AREALTIME, arealtime, af);
+ REGISTER_FILTER(ARESAMPLE, aresample, af);
+ REGISTER_FILTER(AREVERSE, areverse, af);
+ REGISTER_FILTER(ASELECT, aselect, af);
+ REGISTER_FILTER(ASENDCMD, asendcmd, af);
+ REGISTER_FILTER(ASETNSAMPLES, asetnsamples, af);
REGISTER_FILTER(ASETPTS, asetpts, af);
+ REGISTER_FILTER(ASETRATE, asetrate, af);
REGISTER_FILTER(ASETTB, asettb, af);
REGISTER_FILTER(ASHOWINFO, ashowinfo, af);
+ REGISTER_FILTER(ASIDEDATA, asidedata, af);
REGISTER_FILTER(ASPLIT, asplit, af);
+ REGISTER_FILTER(ASTATS, astats, af);
+ REGISTER_FILTER(ASTREAMSELECT, astreamselect, af);
REGISTER_FILTER(ASYNCTS, asyncts, af);
+ REGISTER_FILTER(ATEMPO, atempo, af);
REGISTER_FILTER(ATRIM, atrim, af);
+ REGISTER_FILTER(AZMQ, azmq, af);
+ REGISTER_FILTER(BANDPASS, bandpass, af);
+ REGISTER_FILTER(BANDREJECT, bandreject, af);
+ REGISTER_FILTER(BASS, bass, af);
+ REGISTER_FILTER(BIQUAD, biquad, af);
REGISTER_FILTER(BS2B, bs2b, af);
REGISTER_FILTER(CHANNELMAP, channelmap, af);
REGISTER_FILTER(CHANNELSPLIT, channelsplit, af);
+ REGISTER_FILTER(CHORUS, chorus, af);
REGISTER_FILTER(COMPAND, compand, af);
+ REGISTER_FILTER(COMPENSATIONDELAY, compensationdelay, af);
+ REGISTER_FILTER(CRYSTALIZER, crystalizer, af);
+ REGISTER_FILTER(DCSHIFT, dcshift, af);
+ REGISTER_FILTER(DYNAUDNORM, dynaudnorm, af);
+ REGISTER_FILTER(EARWAX, earwax, af);
+ REGISTER_FILTER(EBUR128, ebur128, af);
+ REGISTER_FILTER(EQUALIZER, equalizer, af);
+ REGISTER_FILTER(EXTRASTEREO, extrastereo, af);
+ REGISTER_FILTER(FIREQUALIZER, firequalizer, af);
+ REGISTER_FILTER(FLANGER, flanger, af);
REGISTER_FILTER(HDCD, hdcd, af);
+ REGISTER_FILTER(HIGHPASS, highpass, af);
REGISTER_FILTER(JOIN, join, af);
+ REGISTER_FILTER(LADSPA, ladspa, af);
+ REGISTER_FILTER(LOUDNORM, loudnorm, af);
+ REGISTER_FILTER(LOWPASS, lowpass, af);
+ REGISTER_FILTER(PAN, pan, af);
+ REGISTER_FILTER(REPLAYGAIN, replaygain, af);
REGISTER_FILTER(RESAMPLE, resample, af);
+ REGISTER_FILTER(RUBBERBAND, rubberband, af);
+ REGISTER_FILTER(SIDECHAINCOMPRESS, sidechaincompress, af);
+ REGISTER_FILTER(SIDECHAINGATE, sidechaingate, af);
+ REGISTER_FILTER(SILENCEDETECT, silencedetect, af);
+ REGISTER_FILTER(SILENCEREMOVE, silenceremove, af);
+ REGISTER_FILTER(SOFALIZER, sofalizer, af);
+ REGISTER_FILTER(STEREOTOOLS, stereotools, af);
+ REGISTER_FILTER(STEREOWIDEN, stereowiden, af);
+ REGISTER_FILTER(TREBLE, treble, af);
+ REGISTER_FILTER(TREMOLO, tremolo, af);
+ REGISTER_FILTER(VIBRATO, vibrato, af);
REGISTER_FILTER(VOLUME, volume, af);
+ REGISTER_FILTER(VOLUMEDETECT, volumedetect, af);
+ REGISTER_FILTER(AEVALSRC, aevalsrc, asrc);
+ REGISTER_FILTER(ANOISESRC, anoisesrc, asrc);
REGISTER_FILTER(ANULLSRC, anullsrc, asrc);
+ REGISTER_FILTER(FLITE, flite, asrc);
+ REGISTER_FILTER(SINE, sine, asrc);
REGISTER_FILTER(ANULLSINK, anullsink, asink);
+ REGISTER_FILTER(ALPHAEXTRACT, alphaextract, vf);
+ REGISTER_FILTER(ALPHAMERGE, alphamerge, vf);
+ REGISTER_FILTER(ASS, ass, vf);
+ REGISTER_FILTER(ATADENOISE, atadenoise, vf);
+ REGISTER_FILTER(AVGBLUR, avgblur, vf);
+ REGISTER_FILTER(BBOX, bbox, vf);
+ REGISTER_FILTER(BENCH, bench, vf);
+ REGISTER_FILTER(BITPLANENOISE, bitplanenoise, vf);
+ REGISTER_FILTER(BLACKDETECT, blackdetect, vf);
REGISTER_FILTER(BLACKFRAME, blackframe, vf);
+ REGISTER_FILTER(BLEND, blend, vf);
REGISTER_FILTER(BOXBLUR, boxblur, vf);
+ REGISTER_FILTER(BWDIF, bwdif, vf);
+ REGISTER_FILTER(CHROMAKEY, chromakey, vf);
+ REGISTER_FILTER(CIESCOPE, ciescope, vf);
+ REGISTER_FILTER(CODECVIEW, codecview, vf);
+ REGISTER_FILTER(COLORBALANCE, colorbalance, vf);
+ REGISTER_FILTER(COLORCHANNELMIXER, colorchannelmixer, vf);
+ REGISTER_FILTER(COLORKEY, colorkey, vf);
+ REGISTER_FILTER(COLORLEVELS, colorlevels, vf);
+ REGISTER_FILTER(COLORMATRIX, colormatrix, vf);
+ REGISTER_FILTER(COLORSPACE, colorspace, vf);
+ REGISTER_FILTER(CONVOLUTION, convolution, vf);
REGISTER_FILTER(COPY, copy, vf);
+ REGISTER_FILTER(COREIMAGE, coreimage, vf);
+ REGISTER_FILTER(COVER_RECT, cover_rect, vf);
REGISTER_FILTER(CROP, crop, vf);
REGISTER_FILTER(CROPDETECT, cropdetect, vf);
+ REGISTER_FILTER(CURVES, curves, vf);
+ REGISTER_FILTER(DATASCOPE, datascope, vf);
+ REGISTER_FILTER(DCTDNOIZ, dctdnoiz, vf);
+ REGISTER_FILTER(DEBAND, deband, vf);
+ REGISTER_FILTER(DECIMATE, decimate, vf);
+ REGISTER_FILTER(DEFLATE, deflate, vf);
REGISTER_FILTER(DEINTERLACE_QSV,deinterlace_qsv,vf);
+ REGISTER_FILTER(DEINTERLACE_VAAPI, deinterlace_vaapi, vf);
+ REGISTER_FILTER(DEJUDDER, dejudder, vf);
REGISTER_FILTER(DELOGO, delogo, vf);
+ REGISTER_FILTER(DESHAKE, deshake, vf);
+ REGISTER_FILTER(DETELECINE, detelecine, vf);
+ REGISTER_FILTER(DILATION, dilation, vf);
+ REGISTER_FILTER(DISPLACE, displace, vf);
REGISTER_FILTER(DRAWBOX, drawbox, vf);
+ REGISTER_FILTER(DRAWGRAPH, drawgraph, vf);
+ REGISTER_FILTER(DRAWGRID, drawgrid, vf);
REGISTER_FILTER(DRAWTEXT, drawtext, vf);
+ REGISTER_FILTER(EDGEDETECT, edgedetect, vf);
+ REGISTER_FILTER(ELBG, elbg, vf);
+ REGISTER_FILTER(EQ, eq, vf);
+ REGISTER_FILTER(EROSION, erosion, vf);
+ REGISTER_FILTER(EXTRACTPLANES, extractplanes, vf);
REGISTER_FILTER(FADE, fade, vf);
+ REGISTER_FILTER(FFTFILT, fftfilt, vf);
+ REGISTER_FILTER(FIELD, field, vf);
+ REGISTER_FILTER(FIELDHINT, fieldhint, vf);
+ REGISTER_FILTER(FIELDMATCH, fieldmatch, vf);
REGISTER_FILTER(FIELDORDER, fieldorder, vf);
+ REGISTER_FILTER(FIND_RECT, find_rect, vf);
REGISTER_FILTER(FORMAT, format, vf);
REGISTER_FILTER(FPS, fps, vf);
REGISTER_FILTER(FRAMEPACK, framepack, vf);
+ REGISTER_FILTER(FRAMERATE, framerate, vf);
+ REGISTER_FILTER(FRAMESTEP, framestep, vf);
REGISTER_FILTER(FREI0R, frei0r, vf);
+ REGISTER_FILTER(FSPP, fspp, vf);
+ REGISTER_FILTER(GBLUR, gblur, vf);
+ REGISTER_FILTER(GEQ, geq, vf);
REGISTER_FILTER(GRADFUN, gradfun, vf);
+ REGISTER_FILTER(HALDCLUT, haldclut, vf);
REGISTER_FILTER(HFLIP, hflip, vf);
+ REGISTER_FILTER(HISTEQ, histeq, vf);
+ REGISTER_FILTER(HISTOGRAM, histogram, vf);
REGISTER_FILTER(HQDN3D, hqdn3d, vf);
+ REGISTER_FILTER(HQX, hqx, vf);
+ REGISTER_FILTER(HSTACK, hstack, vf);
+ REGISTER_FILTER(HUE, hue, vf);
REGISTER_FILTER(HWDOWNLOAD, hwdownload, vf);
REGISTER_FILTER(HWUPLOAD, hwupload, vf);
REGISTER_FILTER(HWUPLOAD_CUDA, hwupload_cuda, vf);
+ REGISTER_FILTER(HYSTERESIS, hysteresis, vf);
+ REGISTER_FILTER(IDET, idet, vf);
+ REGISTER_FILTER(IL, il, vf);
+ REGISTER_FILTER(INFLATE, inflate, vf);
REGISTER_FILTER(INTERLACE, interlace, vf);
+ REGISTER_FILTER(INTERLEAVE, interleave, vf);
+ REGISTER_FILTER(KERNDEINT, kerndeint, vf);
+ REGISTER_FILTER(LENSCORRECTION, lenscorrection, vf);
+ REGISTER_FILTER(LOOP, loop, vf);
REGISTER_FILTER(LUT, lut, vf);
+ REGISTER_FILTER(LUT2, lut2, vf);
+ REGISTER_FILTER(LUT3D, lut3d, vf);
REGISTER_FILTER(LUTRGB, lutrgb, vf);
REGISTER_FILTER(LUTYUV, lutyuv, vf);
+ REGISTER_FILTER(MASKEDCLAMP, maskedclamp, vf);
+ REGISTER_FILTER(MASKEDMERGE, maskedmerge, vf);
+ REGISTER_FILTER(MCDEINT, mcdeint, vf);
+ REGISTER_FILTER(MERGEPLANES, mergeplanes, vf);
+ REGISTER_FILTER(MESTIMATE, mestimate, vf);
+ REGISTER_FILTER(METADATA, metadata, vf);
+ REGISTER_FILTER(MIDEQUALIZER, midequalizer, vf);
+ REGISTER_FILTER(MINTERPOLATE, minterpolate, vf);
+ REGISTER_FILTER(MPDECIMATE, mpdecimate, vf);
REGISTER_FILTER(NEGATE, negate, vf);
+ REGISTER_FILTER(NLMEANS, nlmeans, vf);
+ REGISTER_FILTER(NNEDI, nnedi, vf);
REGISTER_FILTER(NOFORMAT, noformat, vf);
+ REGISTER_FILTER(NOISE, noise, vf);
REGISTER_FILTER(NULL, null, vf);
+ REGISTER_FILTER(OCR, ocr, vf);
REGISTER_FILTER(OCV, ocv, vf);
REGISTER_FILTER(OVERLAY, overlay, vf);
+ REGISTER_FILTER(OWDENOISE, owdenoise, vf);
REGISTER_FILTER(PAD, pad, vf);
+ REGISTER_FILTER(PALETTEGEN, palettegen, vf);
+ REGISTER_FILTER(PALETTEUSE, paletteuse, vf);
+ REGISTER_FILTER(PERMS, perms, vf);
+ REGISTER_FILTER(PERSPECTIVE, perspective, vf);
+ REGISTER_FILTER(PHASE, phase, vf);
REGISTER_FILTER(PIXDESCTEST, pixdesctest, vf);
+ REGISTER_FILTER(PP, pp, vf);
+ REGISTER_FILTER(PP7, pp7, vf);
+ REGISTER_FILTER(PREMULTIPLY, premultiply, vf);
+ REGISTER_FILTER(PREWITT, prewitt, vf);
+ REGISTER_FILTER(PSNR, psnr, vf);
+ REGISTER_FILTER(PULLUP, pullup, vf);
+ REGISTER_FILTER(QP, qp, vf);
+ REGISTER_FILTER(RANDOM, random, vf);
+ REGISTER_FILTER(READEIA608, readeia608, vf);
+ REGISTER_FILTER(READVITC, readvitc, vf);
+ REGISTER_FILTER(REALTIME, realtime, vf);
+ REGISTER_FILTER(REMAP, remap, vf);
+ REGISTER_FILTER(REMOVEGRAIN, removegrain, vf);
+ REGISTER_FILTER(REMOVELOGO, removelogo, vf);
+ REGISTER_FILTER(REPEATFIELDS, repeatfields, vf);
+ REGISTER_FILTER(REVERSE, reverse, vf);
+ REGISTER_FILTER(ROTATE, rotate, vf);
+ REGISTER_FILTER(SAB, sab, vf);
REGISTER_FILTER(SCALE, scale, vf);
REGISTER_FILTER(SCALE_NPP, scale_npp, vf);
REGISTER_FILTER(SCALE_QSV, scale_qsv, vf);
REGISTER_FILTER(SCALE_VAAPI, scale_vaapi, vf);
+ REGISTER_FILTER(SCALE2REF, scale2ref, vf);
REGISTER_FILTER(SELECT, select, vf);
+ REGISTER_FILTER(SELECTIVECOLOR, selectivecolor, vf);
+ REGISTER_FILTER(SENDCMD, sendcmd, vf);
+ REGISTER_FILTER(SEPARATEFIELDS, separatefields, vf);
REGISTER_FILTER(SETDAR, setdar, vf);
+ REGISTER_FILTER(SETFIELD, setfield, vf);
REGISTER_FILTER(SETPTS, setpts, vf);
REGISTER_FILTER(SETSAR, setsar, vf);
REGISTER_FILTER(SETTB, settb, vf);
REGISTER_FILTER(SHOWINFO, showinfo, vf);
+ REGISTER_FILTER(SHOWPALETTE, showpalette, vf);
+ REGISTER_FILTER(SHUFFLEFRAMES, shuffleframes, vf);
REGISTER_FILTER(SHUFFLEPLANES, shuffleplanes, vf);
+ REGISTER_FILTER(SIDEDATA, sidedata, vf);
+ REGISTER_FILTER(SIGNALSTATS, signalstats, vf);
+ REGISTER_FILTER(SIGNATURE, signature, vf);
+ REGISTER_FILTER(SMARTBLUR, smartblur, vf);
+ REGISTER_FILTER(SOBEL, sobel, vf);
REGISTER_FILTER(SPLIT, split, vf);
+ REGISTER_FILTER(SPP, spp, vf);
+ REGISTER_FILTER(SSIM, ssim, vf);
+ REGISTER_FILTER(STEREO3D, stereo3d, vf);
+ REGISTER_FILTER(STREAMSELECT, streamselect, vf);
+ REGISTER_FILTER(SUBTITLES, subtitles, vf);
+ REGISTER_FILTER(SUPER2XSAI, super2xsai, vf);
+ REGISTER_FILTER(SWAPRECT, swaprect, vf);
+ REGISTER_FILTER(SWAPUV, swapuv, vf);
+ REGISTER_FILTER(TBLEND, tblend, vf);
+ REGISTER_FILTER(TELECINE, telecine, vf);
+ REGISTER_FILTER(THRESHOLD, threshold, vf);
+ REGISTER_FILTER(THUMBNAIL, thumbnail, vf);
+ REGISTER_FILTER(TILE, tile, vf);
+ REGISTER_FILTER(TINTERLACE, tinterlace, vf);
REGISTER_FILTER(TRANSPOSE, transpose, vf);
REGISTER_FILTER(TRIM, trim, vf);
REGISTER_FILTER(UNSHARP, unsharp, vf);
+ REGISTER_FILTER(USPP, uspp, vf);
+ REGISTER_FILTER(VAGUEDENOISER, vaguedenoiser, vf);
+ REGISTER_FILTER(VECTORSCOPE, vectorscope, vf);
REGISTER_FILTER(VFLIP, vflip, vf);
+ REGISTER_FILTER(VIDSTABDETECT, vidstabdetect, vf);
+ REGISTER_FILTER(VIDSTABTRANSFORM, vidstabtransform, vf);
+ REGISTER_FILTER(VIGNETTE, vignette, vf);
+ REGISTER_FILTER(VSTACK, vstack, vf);
+ REGISTER_FILTER(W3FDIF, w3fdif, vf);
+ REGISTER_FILTER(WAVEFORM, waveform, vf);
+ REGISTER_FILTER(WEAVE, weave, vf);
+ REGISTER_FILTER(XBR, xbr, vf);
REGISTER_FILTER(YADIF, yadif, vf);
+ REGISTER_FILTER(ZMQ, zmq, vf);
+ REGISTER_FILTER(ZOOMPAN, zoompan, vf);
+ REGISTER_FILTER(ZSCALE, zscale, vf);
+ REGISTER_FILTER(ALLRGB, allrgb, vsrc);
+ REGISTER_FILTER(ALLYUV, allyuv, vsrc);
+ REGISTER_FILTER(CELLAUTO, cellauto, vsrc);
REGISTER_FILTER(COLOR, color, vsrc);
+ REGISTER_FILTER(COREIMAGESRC, coreimagesrc, vsrc);
REGISTER_FILTER(FREI0R, frei0r_src, vsrc);
- REGISTER_FILTER(MOVIE, movie, vsrc);
+ REGISTER_FILTER(HALDCLUTSRC, haldclutsrc, vsrc);
+ REGISTER_FILTER(LIFE, life, vsrc);
+ REGISTER_FILTER(MANDELBROT, mandelbrot, vsrc);
+ REGISTER_FILTER(MPTESTSRC, mptestsrc, vsrc);
REGISTER_FILTER(NULLSRC, nullsrc, vsrc);
REGISTER_FILTER(RGBTESTSRC, rgbtestsrc, vsrc);
+ REGISTER_FILTER(SMPTEBARS, smptebars, vsrc);
+ REGISTER_FILTER(SMPTEHDBARS, smptehdbars, vsrc);
REGISTER_FILTER(TESTSRC, testsrc, vsrc);
+ REGISTER_FILTER(TESTSRC2, testsrc2, vsrc);
+ REGISTER_FILTER(YUVTESTSRC, yuvtestsrc, vsrc);
REGISTER_FILTER(NULLSINK, nullsink, vsink);
+ /* multimedia filters */
+ REGISTER_FILTER(ABITSCOPE, abitscope, avf);
+ REGISTER_FILTER(ADRAWGRAPH, adrawgraph, avf);
+ REGISTER_FILTER(AHISTOGRAM, ahistogram, avf);
+ REGISTER_FILTER(APHASEMETER, aphasemeter, avf);
+ REGISTER_FILTER(AVECTORSCOPE, avectorscope, avf);
+ REGISTER_FILTER(CONCAT, concat, avf);
+ REGISTER_FILTER(SHOWCQT, showcqt, avf);
+ REGISTER_FILTER(SHOWFREQS, showfreqs, avf);
+ REGISTER_FILTER(SHOWSPECTRUM, showspectrum, avf);
+ REGISTER_FILTER(SHOWSPECTRUMPIC, showspectrumpic, avf);
+ REGISTER_FILTER(SHOWVOLUME, showvolume, avf);
+ REGISTER_FILTER(SHOWWAVES, showwaves, avf);
+ REGISTER_FILTER(SHOWWAVESPIC, showwavespic, avf);
+ REGISTER_FILTER(SPECTRUMSYNTH, spectrumsynth, vaf);
+
+ /* multimedia sources */
+ REGISTER_FILTER(AMOVIE, amovie, avsrc);
+ REGISTER_FILTER(MOVIE, movie, avsrc);
+
/* those filters are part of public or internal API => registered
* unconditionally */
REGISTER_FILTER_UNCONDITIONAL(asrc_abuffer);
@@ -133,4 +376,12 @@ void avfilter_register_all(void)
REGISTER_FILTER_UNCONDITIONAL(vsink_buffer);
REGISTER_FILTER_UNCONDITIONAL(af_afifo);
REGISTER_FILTER_UNCONDITIONAL(vf_fifo);
+ ff_opencl_register_filter_kernel_code_all();
+}
+
+void avfilter_register_all(void)
+{
+ AVOnce control = AV_ONCE_INIT;
+
+ ff_thread_once(&control, register_all);
}
diff --git a/libavfilter/asink_anullsink.c b/libavfilter/asink_anullsink.c
index 44f547d6ee..9b53d3fbc2 100644
--- a/libavfilter/asink_anullsink.c
+++ b/libavfilter/asink_anullsink.c
@@ -1,18 +1,20 @@
/*
- * This file is part of Libav.
+ * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram <smeenaks@ucsd.edu>
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavfilter/asrc_anoisesrc.c b/libavfilter/asrc_anoisesrc.c
new file mode 100644
index 0000000000..709224c4cb
--- /dev/null
+++ b/libavfilter/asrc_anoisesrc.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2015 Kyle Swanson <k@ylo.ph>.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "libavutil/lfg.h"
+#include "libavutil/random_seed.h"
+
+typedef struct {
+ const AVClass *class;
+ int sample_rate;
+ double amplitude;
+ int64_t duration;
+ int64_t color;
+ int64_t seed;
+ int nb_samples;
+
+ int64_t pts;
+ int infinite;
+ double (*filter)(double white, double *buf);
+ double buf[7];
+ AVLFG c;
+} ANoiseSrcContext;
+
+#define OFFSET(x) offsetof(ANoiseSrcContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption anoisesrc_options[] = {
+ { "sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 15, INT_MAX, FLAGS },
+ { "r", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 15, INT_MAX, FLAGS },
+ { "amplitude", "set amplitude", OFFSET(amplitude), AV_OPT_TYPE_DOUBLE, {.dbl = 1.}, 0., 1., FLAGS },
+ { "a", "set amplitude", OFFSET(amplitude), AV_OPT_TYPE_DOUBLE, {.dbl = 1.}, 0., 1., FLAGS },
+ { "duration", "set duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0}, 0, INT64_MAX, FLAGS },
+ { "d", "set duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0}, 0, INT64_MAX, FLAGS },
+ { "color", "set noise color", OFFSET(color), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 2, FLAGS, "color" },
+ { "colour", "set noise color", OFFSET(color), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 2, FLAGS, "color" },
+ { "c", "set noise color", OFFSET(color), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 2, FLAGS, "color" },
+ { "white", 0, 0, AV_OPT_TYPE_CONST, {.i64 = 0}, 0, 0, FLAGS, "color" },
+ { "pink", 0, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, 0, 0, FLAGS, "color" },
+ { "brown", 0, 0, AV_OPT_TYPE_CONST, {.i64 = 2}, 0, 0, FLAGS, "color" },
+ { "seed", "set random seed", OFFSET(seed), AV_OPT_TYPE_INT64, {.i64 = -1}, -1, UINT_MAX, FLAGS },
+ { "s", "set random seed", OFFSET(seed), AV_OPT_TYPE_INT64, {.i64 = -1}, -1, UINT_MAX, FLAGS },
+ { "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, FLAGS },
+ { "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, FLAGS },
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(anoisesrc);
+
+static av_cold int query_formats(AVFilterContext *ctx)
+{
+ ANoiseSrcContext *s = ctx->priv;
+ static const int64_t chlayouts[] = { AV_CH_LAYOUT_MONO, -1 };
+ int sample_rates[] = { s->sample_rate, -1 };
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBL,
+ AV_SAMPLE_FMT_NONE
+ };
+
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ int ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats (ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ layouts = avfilter_make_format64_list(chlayouts);
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_rates);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static double white_filter(double white, double *buf)
+{
+ return white;
+}
+
+static double pink_filter(double white, double *buf)
+{
+ double pink;
+
+ /* http://www.musicdsp.org/files/pink.txt */
+ buf[0] = 0.99886 * buf[0] + white * 0.0555179;
+ buf[1] = 0.99332 * buf[1] + white * 0.0750759;
+ buf[2] = 0.96900 * buf[2] + white * 0.1538520;
+ buf[3] = 0.86650 * buf[3] + white * 0.3104856;
+ buf[4] = 0.55000 * buf[4] + white * 0.5329522;
+ buf[5] = -0.7616 * buf[5] - white * 0.0168980;
+ pink = buf[0] + buf[1] + buf[2] + buf[3] + buf[4] + buf[5] + buf[6] + white * 0.5362;
+ buf[6] = white * 0.115926;
+ return pink * 0.11;
+}
+
+static double brown_filter(double white, double *buf)
+{
+ double brown;
+
+ brown = ((0.02 * white) + buf[0]) / 1.02;
+ buf[0] = brown;
+ return brown * 3.5;
+}
+
+static av_cold int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ANoiseSrcContext *s = ctx->priv;
+
+ if (s->seed == -1)
+ s->seed = av_get_random_seed();
+ av_lfg_init(&s->c, s->seed);
+
+ if (s->duration == 0)
+ s->infinite = 1;
+ s->duration = av_rescale(s->duration, s->sample_rate, AV_TIME_BASE);
+
+ switch (s->color) {
+ case 0: s->filter = white_filter; break;
+ case 1: s->filter = pink_filter; break;
+ case 2: s->filter = brown_filter; break;
+ }
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ANoiseSrcContext *s = ctx->priv;
+ AVFrame *frame;
+ int nb_samples, i;
+ double *dst;
+
+ if (!s->infinite && s->duration <= 0) {
+ return AVERROR_EOF;
+ } else if (!s->infinite && s->duration < s->nb_samples) {
+ nb_samples = s->duration;
+ } else {
+ nb_samples = s->nb_samples;
+ }
+
+ if (!(frame = ff_get_audio_buffer(outlink, nb_samples)))
+ return AVERROR(ENOMEM);
+
+ dst = (double *)frame->data[0];
+ for (i = 0; i < nb_samples; i++) {
+ double white;
+ white = s->amplitude * ((2 * ((double) av_lfg_get(&s->c) / 0xffffffff)) - 1);
+ dst[i] = s->filter(white, s->buf);
+ }
+
+ if (!s->infinite)
+ s->duration -= nb_samples;
+
+ frame->pts = s->pts;
+ s->pts += nb_samples;
+ return ff_filter_frame(outlink, frame);
+}
+
+static const AVFilterPad anoisesrc_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_asrc_anoisesrc = {
+ .name = "anoisesrc",
+ .description = NULL_IF_CONFIG_SMALL("Generate a noise audio signal."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(ANoiseSrcContext),
+ .inputs = NULL,
+ .outputs = anoisesrc_outputs,
+ .priv_class = &anoisesrc_class,
+};
diff --git a/libavfilter/asrc_anullsrc.c b/libavfilter/asrc_anullsrc.c
index b1a449cdbe..94bd0cab3a 100644
--- a/libavfilter/asrc_anullsrc.c
+++ b/libavfilter/asrc_anullsrc.c
@@ -1,18 +1,21 @@
/*
- * This file is part of Libav.
+ * Copyright 2010 S.N. Hemanth Meenakshisundaram <smeenaks ucsd edu>
+ * Copyright 2010 Stefano Sabatini <stefano.sabatini-lala poste it>
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -26,28 +29,120 @@
#include "libavutil/channel_layout.h"
#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "audio.h"
#include "avfilter.h"
#include "internal.h"
-static int request_frame(AVFilterLink *link)
+typedef struct {
+ const AVClass *class;
+ char *channel_layout_str;
+ uint64_t channel_layout;
+ char *sample_rate_str;
+ int sample_rate;
+ int nb_samples; ///< number of samples per requested frame
+ int64_t pts;
+} ANullContext;
+
+#define OFFSET(x) offsetof(ANullContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption anullsrc_options[]= {
+ { "channel_layout", "set channel_layout", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, FLAGS },
+ { "cl", "set channel_layout", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, FLAGS },
+ { "sample_rate", "set sample rate", OFFSET(sample_rate_str) , AV_OPT_TYPE_STRING, {.str = "44100"}, 0, 0, FLAGS },
+ { "r", "set sample rate", OFFSET(sample_rate_str) , AV_OPT_TYPE_STRING, {.str = "44100"}, 0, 0, FLAGS },
+ { "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
+ { "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(anullsrc);
+
+static av_cold int init(AVFilterContext *ctx)
{
- return AVERROR_EOF;
+ ANullContext *null = ctx->priv;
+ int ret;
+
+ if ((ret = ff_parse_sample_rate(&null->sample_rate,
+ null->sample_rate_str, ctx)) < 0)
+ return ret;
+
+ if ((ret = ff_parse_channel_layout(&null->channel_layout, NULL,
+ null->channel_layout_str, ctx)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ ANullContext *null = ctx->priv;
+ int64_t chlayouts[] = { null->channel_layout, -1 };
+ int sample_rates[] = { null->sample_rate, -1 };
+ int ret;
+
+ if ((ret = ff_set_common_formats (ctx, ff_all_formats (AVMEDIA_TYPE_AUDIO))) < 0 ||
+ (ret = ff_set_common_channel_layouts (ctx, avfilter_make_format64_list (chlayouts ))) < 0 ||
+ (ret = ff_set_common_samplerates (ctx, ff_make_format_list (sample_rates ))) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ ANullContext *null = outlink->src->priv;
+ char buf[128];
+
+ av_get_channel_layout_string(buf, sizeof(buf), 0, null->channel_layout);
+ av_log(outlink->src, AV_LOG_VERBOSE,
+ "sample_rate:%d channel_layout:'%s' nb_samples:%d\n",
+ null->sample_rate, buf, null->nb_samples);
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ int ret;
+ ANullContext *null = outlink->src->priv;
+ AVFrame *samplesref;
+
+ samplesref = ff_get_audio_buffer(outlink, null->nb_samples);
+ if (!samplesref)
+ return AVERROR(ENOMEM);
+
+ samplesref->pts = null->pts;
+ samplesref->channel_layout = null->channel_layout;
+ samplesref->sample_rate = outlink->sample_rate;
+
+ ret = ff_filter_frame(outlink, av_frame_clone(samplesref));
+ av_frame_free(&samplesref);
+ if (ret < 0)
+ return ret;
+
+ null->pts += null->nb_samples;
+ return ret;
}
static const AVFilterPad avfilter_asrc_anullsrc_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_props,
.request_frame = request_frame,
},
{ NULL }
};
AVFilter ff_asrc_anullsrc = {
- .name = "anullsrc",
- .description = NULL_IF_CONFIG_SMALL("Null audio source, never return audio frames."),
-
- .inputs = NULL,
-
- .outputs = avfilter_asrc_anullsrc_outputs,
+ .name = "anullsrc",
+ .description = NULL_IF_CONFIG_SMALL("Null audio source, return empty audio frames."),
+ .init = init,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ANullContext),
+ .inputs = NULL,
+ .outputs = avfilter_asrc_anullsrc_outputs,
+ .priv_class = &anullsrc_class,
};
diff --git a/libavfilter/asrc_flite.c b/libavfilter/asrc_flite.c
new file mode 100644
index 0000000000..2e5bd4b6c0
--- /dev/null
+++ b/libavfilter/asrc_flite.c
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * flite voice synth source
+ */
+
+#include <flite/flite.h>
+#include "libavutil/channel_layout.h"
+#include "libavutil/file.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "formats.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ char *voice_str;
+ char *textfile;
+ char *text;
+ cst_wave *wave;
+ int16_t *wave_samples;
+ int wave_nb_samples;
+ int list_voices;
+ cst_voice *voice;
+ struct voice_entry *voice_entry;
+ int64_t pts;
+ int frame_nb_samples; ///< number of samples per frame
+} FliteContext;
+
+#define OFFSET(x) offsetof(FliteContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption flite_options[] = {
+ { "list_voices", "list voices and exit", OFFSET(list_voices), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { "nb_samples", "set number of samples per frame", OFFSET(frame_nb_samples), AV_OPT_TYPE_INT, {.i64=512}, 0, INT_MAX, FLAGS },
+ { "n", "set number of samples per frame", OFFSET(frame_nb_samples), AV_OPT_TYPE_INT, {.i64=512}, 0, INT_MAX, FLAGS },
+ { "text", "set text to speak", OFFSET(text), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "textfile", "set filename of the text to speak", OFFSET(textfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "v", "set voice", OFFSET(voice_str), AV_OPT_TYPE_STRING, {.str="kal"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "voice", "set voice", OFFSET(voice_str), AV_OPT_TYPE_STRING, {.str="kal"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(flite);
+
+static volatile int flite_inited = 0;
+
+/* declare functions for all the supported voices */
+#define DECLARE_REGISTER_VOICE_FN(name) \
+ cst_voice *register_cmu_us_## name(const char *); \
+ void unregister_cmu_us_## name(cst_voice *);
+DECLARE_REGISTER_VOICE_FN(awb);
+DECLARE_REGISTER_VOICE_FN(kal);
+DECLARE_REGISTER_VOICE_FN(kal16);
+DECLARE_REGISTER_VOICE_FN(rms);
+DECLARE_REGISTER_VOICE_FN(slt);
+
+struct voice_entry {
+ const char *name;
+ cst_voice * (*register_fn)(const char *);
+ void (*unregister_fn)(cst_voice *);
+ cst_voice *voice;
+ unsigned usage_count;
+} voice_entry;
+
+#define MAKE_VOICE_STRUCTURE(voice_name) { \
+ .name = #voice_name, \
+ .register_fn = register_cmu_us_ ## voice_name, \
+ .unregister_fn = unregister_cmu_us_ ## voice_name, \
+}
+static struct voice_entry voice_entries[] = {
+ MAKE_VOICE_STRUCTURE(awb),
+ MAKE_VOICE_STRUCTURE(kal),
+ MAKE_VOICE_STRUCTURE(kal16),
+ MAKE_VOICE_STRUCTURE(rms),
+ MAKE_VOICE_STRUCTURE(slt),
+};
+
+static void list_voices(void *log_ctx, const char *sep)
+{
+ int i, n = FF_ARRAY_ELEMS(voice_entries);
+ for (i = 0; i < n; i++)
+ av_log(log_ctx, AV_LOG_INFO, "%s%s",
+ voice_entries[i].name, i < (n-1) ? sep : "\n");
+}
+
+static int select_voice(struct voice_entry **entry_ret, const char *voice_name, void *log_ctx)
+{
+ int i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(voice_entries); i++) {
+ struct voice_entry *entry = &voice_entries[i];
+ if (!strcmp(entry->name, voice_name)) {
+ if (!entry->voice)
+ entry->voice = entry->register_fn(NULL);
+ if (!entry->voice) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Could not register voice '%s'\n", voice_name);
+ return AVERROR_UNKNOWN;
+ }
+ entry->usage_count++;
+ *entry_ret = entry;
+ return 0;
+ }
+ }
+
+ av_log(log_ctx, AV_LOG_ERROR, "Could not find voice '%s'\n", voice_name);
+ av_log(log_ctx, AV_LOG_INFO, "Choose between the voices: ");
+ list_voices(log_ctx, ", ");
+
+ return AVERROR(EINVAL);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ FliteContext *flite = ctx->priv;
+ int ret = 0;
+
+ if (flite->list_voices) {
+ list_voices(ctx, "\n");
+ return AVERROR_EXIT;
+ }
+
+ if (!flite_inited) {
+ if (flite_init() < 0) {
+ av_log(ctx, AV_LOG_ERROR, "flite initialization failed\n");
+ return AVERROR_UNKNOWN;
+ }
+ flite_inited++;
+ }
+
+ if ((ret = select_voice(&flite->voice_entry, flite->voice_str, ctx)) < 0)
+ return ret;
+ flite->voice = flite->voice_entry->voice;
+
+ if (flite->textfile && flite->text) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Both text and textfile options set: only one must be specified\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (flite->textfile) {
+ uint8_t *textbuf;
+ size_t textbuf_size;
+
+ if ((ret = av_file_map(flite->textfile, &textbuf, &textbuf_size, 0, ctx)) < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "The text file '%s' could not be read: %s\n",
+ flite->textfile, av_err2str(ret));
+ return ret;
+ }
+
+ if (!(flite->text = av_malloc(textbuf_size+1))) {
+ av_file_unmap(textbuf, textbuf_size);
+ return AVERROR(ENOMEM);
+ }
+ memcpy(flite->text, textbuf, textbuf_size);
+ flite->text[textbuf_size] = 0;
+ av_file_unmap(textbuf, textbuf_size);
+ }
+
+ if (!flite->text) {
+ av_log(ctx, AV_LOG_ERROR,
+ "No speech text specified, specify the 'text' or 'textfile' option\n");
+ return AVERROR(EINVAL);
+ }
+
+ /* synth all the file data in block */
+ flite->wave = flite_text_to_wave(flite->text, flite->voice);
+ flite->wave_samples = flite->wave->samples;
+ flite->wave_nb_samples = flite->wave->num_samples;
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ FliteContext *flite = ctx->priv;
+
+ if (!--flite->voice_entry->usage_count)
+ flite->voice_entry->unregister_fn(flite->voice);
+ flite->voice = NULL;
+ flite->voice_entry = NULL;
+ delete_wave(flite->wave);
+ flite->wave = NULL;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ FliteContext *flite = ctx->priv;
+ int ret;
+
+ AVFilterChannelLayouts *chlayouts = NULL;
+ int64_t chlayout = av_get_default_channel_layout(flite->wave->num_channels);
+ AVFilterFormats *sample_formats = NULL;
+ AVFilterFormats *sample_rates = NULL;
+
+ if ((ret = ff_add_channel_layout (&chlayouts , chlayout )) < 0 ||
+ (ret = ff_set_common_channel_layouts (ctx , chlayouts )) < 0 ||
+ (ret = ff_add_format (&sample_formats, AV_SAMPLE_FMT_S16 )) < 0 ||
+ (ret = ff_set_common_formats (ctx , sample_formats )) < 0 ||
+ (ret = ff_add_format (&sample_rates , flite->wave->sample_rate)) < 0 ||
+ (ret = ff_set_common_samplerates (ctx , sample_rates )) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ FliteContext *flite = ctx->priv;
+
+ outlink->sample_rate = flite->wave->sample_rate;
+ outlink->time_base = (AVRational){1, flite->wave->sample_rate};
+
+ av_log(ctx, AV_LOG_VERBOSE, "voice:%s fmt:%s sample_rate:%d\n",
+ flite->voice_str,
+ av_get_sample_fmt_name(outlink->format), outlink->sample_rate);
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFrame *samplesref;
+ FliteContext *flite = outlink->src->priv;
+ int nb_samples = FFMIN(flite->wave_nb_samples, flite->frame_nb_samples);
+
+ if (!nb_samples)
+ return AVERROR_EOF;
+
+ samplesref = ff_get_audio_buffer(outlink, nb_samples);
+ if (!samplesref)
+ return AVERROR(ENOMEM);
+
+ memcpy(samplesref->data[0], flite->wave_samples,
+ nb_samples * flite->wave->num_channels * 2);
+ samplesref->pts = flite->pts;
+ av_frame_set_pkt_pos(samplesref, -1);
+ av_frame_set_sample_rate(samplesref, flite->wave->sample_rate);
+ flite->pts += nb_samples;
+ flite->wave_samples += nb_samples * flite->wave->num_channels;
+ flite->wave_nb_samples -= nb_samples;
+
+ return ff_filter_frame(outlink, samplesref);
+}
+
+static const AVFilterPad flite_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_props,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_asrc_flite = {
+ .name = "flite",
+ .description = NULL_IF_CONFIG_SMALL("Synthesize voice from text using libflite."),
+ .query_formats = query_formats,
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(FliteContext),
+ .inputs = NULL,
+ .outputs = flite_outputs,
+ .priv_class = &flite_class,
+};
diff --git a/libavfilter/asrc_sine.c b/libavfilter/asrc_sine.c
new file mode 100644
index 0000000000..ff77526622
--- /dev/null
+++ b/libavfilter/asrc_sine.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright (c) 2013 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <float.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/eval.h"
+#include "libavutil/opt.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ double frequency;
+ double beep_factor;
+ char *samples_per_frame;
+ AVExpr *samples_per_frame_expr;
+ int sample_rate;
+ int64_t duration;
+ int16_t *sin;
+ int64_t pts;
+ uint32_t phi; ///< current phase of the sine (2pi = 1<<32)
+ uint32_t dphi; ///< phase increment between two samples
+ unsigned beep_period;
+ unsigned beep_index;
+ unsigned beep_length;
+ uint32_t phi_beep; ///< current phase of the beep
+ uint32_t dphi_beep; ///< phase increment of the beep
+} SineContext;
+
+#define CONTEXT SineContext
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+#define OPT_GENERIC(name, field, def, min, max, descr, type, deffield, ...) \
+ { name, descr, offsetof(CONTEXT, field), AV_OPT_TYPE_ ## type, \
+ { .deffield = def }, min, max, FLAGS, __VA_ARGS__ }
+
+#define OPT_INT(name, field, def, min, max, descr, ...) \
+ OPT_GENERIC(name, field, def, min, max, descr, INT, i64, __VA_ARGS__)
+
+#define OPT_DBL(name, field, def, min, max, descr, ...) \
+ OPT_GENERIC(name, field, def, min, max, descr, DOUBLE, dbl, __VA_ARGS__)
+
+#define OPT_DUR(name, field, def, min, max, descr, ...) \
+ OPT_GENERIC(name, field, def, min, max, descr, DURATION, str, __VA_ARGS__)
+
+#define OPT_STR(name, field, def, min, max, descr, ...) \
+ OPT_GENERIC(name, field, def, min, max, descr, STRING, str, __VA_ARGS__)
+
+static const AVOption sine_options[] = {
+ OPT_DBL("frequency", frequency, 440, 0, DBL_MAX, "set the sine frequency",),
+ OPT_DBL("f", frequency, 440, 0, DBL_MAX, "set the sine frequency",),
+ OPT_DBL("beep_factor", beep_factor, 0, 0, DBL_MAX, "set the beep frequency factor",),
+ OPT_DBL("b", beep_factor, 0, 0, DBL_MAX, "set the beep frequency factor",),
+ OPT_INT("sample_rate", sample_rate, 44100, 1, INT_MAX, "set the sample rate",),
+ OPT_INT("r", sample_rate, 44100, 1, INT_MAX, "set the sample rate",),
+ OPT_DUR("duration", duration, 0, 0, INT64_MAX, "set the audio duration",),
+ OPT_DUR("d", duration, 0, 0, INT64_MAX, "set the audio duration",),
+ OPT_STR("samples_per_frame", samples_per_frame, "1024", 0, 0, "set the number of samples per frame",),
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(sine);
+
+#define LOG_PERIOD 15
+#define AMPLITUDE 4095
+#define AMPLITUDE_SHIFT 3
+
+static void make_sin_table(int16_t *sin)
+{
+ unsigned half_pi = 1 << (LOG_PERIOD - 2);
+ unsigned ampls = AMPLITUDE << AMPLITUDE_SHIFT;
+ uint64_t unit2 = (uint64_t)(ampls * ampls) << 32;
+ unsigned step, i, c, s, k, new_k, n2;
+
+ /* Principle: if u = exp(i*a1) and v = exp(i*a2), then
+ exp(i*(a1+a2)/2) = (u+v) / length(u+v) */
+ sin[0] = 0;
+ sin[half_pi] = ampls;
+ for (step = half_pi; step > 1; step /= 2) {
+ /* k = (1 << 16) * amplitude / length(u+v)
+ In exact values, k is constant at a given step */
+ k = 0x10000;
+ for (i = 0; i < half_pi / 2; i += step) {
+ s = sin[i] + sin[i + step];
+ c = sin[half_pi - i] + sin[half_pi - i - step];
+ n2 = s * s + c * c;
+ /* Newton's method to solve n² * k² = unit² */
+ while (1) {
+ new_k = (k + unit2 / ((uint64_t)k * n2) + 1) >> 1;
+ if (k == new_k)
+ break;
+ k = new_k;
+ }
+ sin[i + step / 2] = (k * s + 0x7FFF) >> 16;
+ sin[half_pi - i - step / 2] = (k * c + 0x8000) >> 16;
+ }
+ }
+ /* Unshift amplitude */
+ for (i = 0; i <= half_pi; i++)
+ sin[i] = (sin[i] + (1 << (AMPLITUDE_SHIFT - 1))) >> AMPLITUDE_SHIFT;
+ /* Use symmetries to fill the other three quarters */
+ for (i = 0; i < half_pi; i++)
+ sin[half_pi * 2 - i] = sin[i];
+ for (i = 0; i < 2 * half_pi; i++)
+ sin[i + 2 * half_pi] = -sin[i];
+}
+
+static const char *const var_names[] = {
+ "n",
+ "pts",
+ "t",
+ "TB",
+ NULL
+};
+
+enum {
+ VAR_N,
+ VAR_PTS,
+ VAR_T,
+ VAR_TB,
+ VAR_VARS_NB
+};
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ int ret;
+ SineContext *sine = ctx->priv;
+
+ if (!(sine->sin = av_malloc(sizeof(*sine->sin) << LOG_PERIOD)))
+ return AVERROR(ENOMEM);
+ sine->dphi = ldexp(sine->frequency, 32) / sine->sample_rate + 0.5;
+ make_sin_table(sine->sin);
+
+ if (sine->beep_factor) {
+ sine->beep_period = sine->sample_rate;
+ sine->beep_length = sine->beep_period / 25;
+ sine->dphi_beep = ldexp(sine->beep_factor * sine->frequency, 32) /
+ sine->sample_rate + 0.5;
+ }
+
+ ret = av_expr_parse(&sine->samples_per_frame_expr,
+ sine->samples_per_frame, var_names,
+ NULL, NULL, NULL, NULL, 0, sine);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SineContext *sine = ctx->priv;
+
+ av_expr_free(sine->samples_per_frame_expr);
+ sine->samples_per_frame_expr = NULL;
+ av_freep(&sine->sin);
+}
+
+static av_cold int query_formats(AVFilterContext *ctx)
+{
+ SineContext *sine = ctx->priv;
+ static const int64_t chlayouts[] = { AV_CH_LAYOUT_MONO, -1 };
+ int sample_rates[] = { sine->sample_rate, -1 };
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_NONE };
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ int ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats (ctx, formats);
+ if (ret < 0)
+ return ret;
+
+ layouts = avfilter_make_format64_list(chlayouts);
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ formats = ff_make_format_list(sample_rates);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static av_cold int config_props(AVFilterLink *outlink)
+{
+ SineContext *sine = outlink->src->priv;
+ sine->duration = av_rescale(sine->duration, sine->sample_rate, AV_TIME_BASE);
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ SineContext *sine = outlink->src->priv;
+ AVFrame *frame;
+ double values[VAR_VARS_NB] = {
+ [VAR_N] = outlink->frame_count_in,
+ [VAR_PTS] = sine->pts,
+ [VAR_T] = sine->pts * av_q2d(outlink->time_base),
+ [VAR_TB] = av_q2d(outlink->time_base),
+ };
+ int i, nb_samples = lrint(av_expr_eval(sine->samples_per_frame_expr, values, sine));
+ int16_t *samples;
+
+ if (nb_samples <= 0) {
+ av_log(sine, AV_LOG_WARNING, "nb samples expression evaluated to %d, "
+ "defaulting to 1024\n", nb_samples);
+ nb_samples = 1024;
+ }
+
+ if (sine->duration) {
+ nb_samples = FFMIN(nb_samples, sine->duration - sine->pts);
+ av_assert1(nb_samples >= 0);
+ if (!nb_samples)
+ return AVERROR_EOF;
+ }
+ if (!(frame = ff_get_audio_buffer(outlink, nb_samples)))
+ return AVERROR(ENOMEM);
+ samples = (int16_t *)frame->data[0];
+
+ for (i = 0; i < nb_samples; i++) {
+ samples[i] = sine->sin[sine->phi >> (32 - LOG_PERIOD)];
+ sine->phi += sine->dphi;
+ if (sine->beep_index < sine->beep_length) {
+ samples[i] += sine->sin[sine->phi_beep >> (32 - LOG_PERIOD)] << 1;
+ sine->phi_beep += sine->dphi_beep;
+ }
+ if (++sine->beep_index == sine->beep_period)
+ sine->beep_index = 0;
+ }
+
+ frame->pts = sine->pts;
+ sine->pts += nb_samples;
+ return ff_filter_frame(outlink, frame);
+}
+
+static const AVFilterPad sine_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_asrc_sine = {
+ .name = "sine",
+ .description = NULL_IF_CONFIG_SMALL("Generate sine wave audio signal."),
+ .query_formats = query_formats,
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(SineContext),
+ .inputs = NULL,
+ .outputs = sine_outputs,
+ .priv_class = &sine_class,
+};
diff --git a/libavfilter/audio.c b/libavfilter/audio.c
index 5fe9da95c3..5996f607b2 100644
--- a/libavfilter/audio.c
+++ b/libavfilter/audio.c
@@ -1,21 +1,25 @@
/*
- * This file is part of Libav.
+ * Copyright (c) Stefano Sabatini | stefasab at gmail.com
+ * Copyright (c) S.N. Hemanth Meenakshisundaram | smeenaks at ucsd.edu
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
@@ -23,6 +27,9 @@
#include "avfilter.h"
#include "internal.h"
+#define BUFFER_ALIGN 0
+
+
AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples)
{
return ff_get_audio_buffer(link->dst->outputs[0], nb_samples);
@@ -30,26 +37,48 @@ AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples)
AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples)
{
- AVFrame *frame = av_frame_alloc();
- int channels = av_get_channel_layout_nb_channels(link->channel_layout);
- int ret;
+ AVFrame *frame = NULL;
+ int channels = link->channels;
+
+ av_assert0(channels == av_get_channel_layout_nb_channels(link->channel_layout) || !av_get_channel_layout_nb_channels(link->channel_layout));
+
+ if (!link->frame_pool) {
+ link->frame_pool = ff_frame_pool_audio_init(av_buffer_allocz, channels,
+ nb_samples, link->format, BUFFER_ALIGN);
+ if (!link->frame_pool)
+ return NULL;
+ } else {
+ int pool_channels = 0;
+ int pool_nb_samples = 0;
+ int pool_align = 0;
+ enum AVSampleFormat pool_format = AV_SAMPLE_FMT_NONE;
+
+ if (ff_frame_pool_get_audio_config(link->frame_pool,
+ &pool_channels, &pool_nb_samples,
+ &pool_format, &pool_align) < 0) {
+ return NULL;
+ }
+ if (pool_channels != channels || pool_nb_samples < nb_samples ||
+ pool_format != link->format || pool_align != BUFFER_ALIGN) {
+
+ ff_frame_pool_uninit((FFFramePool **)&link->frame_pool);
+ link->frame_pool = ff_frame_pool_audio_init(av_buffer_allocz, channels,
+ nb_samples, link->format, BUFFER_ALIGN);
+ if (!link->frame_pool)
+ return NULL;
+ }
+ }
+
+ frame = ff_frame_pool_get(link->frame_pool);
if (!frame)
return NULL;
- frame->nb_samples = nb_samples;
- frame->format = link->format;
+ frame->nb_samples = nb_samples;
frame->channel_layout = link->channel_layout;
- frame->sample_rate = link->sample_rate;
- ret = av_frame_get_buffer(frame, 0);
- if (ret < 0) {
- av_frame_free(&frame);
- return NULL;
- }
-
- av_samples_set_silence(frame->extended_data, 0, nb_samples, channels,
- link->format);
+ frame->sample_rate = link->sample_rate;
+ av_samples_set_silence(frame->extended_data, 0, nb_samples, channels, link->format);
return frame;
}
diff --git a/libavfilter/audio.h b/libavfilter/audio.h
index 4684b6ce60..6adc82dc81 100644
--- a/libavfilter/audio.h
+++ b/libavfilter/audio.h
@@ -1,18 +1,21 @@
/*
- * This file is part of Libav.
+ * Copyright (c) Stefano Sabatini | stefasab at gmail.com
+ * Copyright (c) S.N. Hemanth Meenakshisundaram | smeenaks at ucsd.edu
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -20,6 +23,25 @@
#define AVFILTER_AUDIO_H
#include "avfilter.h"
+#include "internal.h"
+
+static const enum AVSampleFormat ff_packed_sample_fmts_array[] = {
+ AV_SAMPLE_FMT_U8,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_S32,
+ AV_SAMPLE_FMT_FLT,
+ AV_SAMPLE_FMT_DBL,
+ AV_SAMPLE_FMT_NONE
+};
+
+static const enum AVSampleFormat ff_planar_sample_fmts_array[] = {
+ AV_SAMPLE_FMT_U8P,
+ AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_NONE
+};
/** default handler for get_audio_buffer() for audio inputs */
AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples);
diff --git a/libavfilter/avf_abitscope.c b/libavfilter/avf_abitscope.c
new file mode 100644
index 0000000000..4f5d4c7b1c
--- /dev/null
+++ b/libavfilter/avf_abitscope.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2016 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "audio.h"
+#include "video.h"
+#include "internal.h"
+
+typedef struct AudioBitScopeContext {
+ const AVClass *class;
+ int w, h;
+ AVRational frame_rate;
+ char *colors;
+
+ int nb_channels;
+ int depth;
+ uint8_t *fg;
+
+ uint64_t counter[64];
+} AudioBitScopeContext;
+
+#define OFFSET(x) offsetof(AudioBitScopeContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption abitscope_options[] = {
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, 0, FLAGS },
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, 0, FLAGS },
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="1024x256"}, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="1024x256"}, 0, 0, FLAGS },
+ { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(abitscope);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S32P, AV_SAMPLE_FMT_NONE };
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
+ int ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
+ return ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
+ return ret;
+
+ formats = ff_make_format_list(pix_fmts);
+ if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioBitScopeContext *s = ctx->priv;
+ int ch, nb_samples;
+ char *colors, *saveptr = NULL;
+
+ nb_samples = FFMAX(1024, ((double)inlink->sample_rate / av_q2d(s->frame_rate)) + 0.5);
+ inlink->partial_buf_size =
+ inlink->min_samples =
+ inlink->max_samples = nb_samples;
+ s->nb_channels = inlink->channels;
+ s->depth = inlink->format == AV_SAMPLE_FMT_S16P ? 16 : 32;
+
+ s->fg = av_malloc_array(s->nb_channels, 4 * sizeof(*s->fg));
+ if (!s->fg)
+ return AVERROR(ENOMEM);
+
+ colors = av_strdup(s->colors);
+ if (!colors)
+ return AVERROR(ENOMEM);
+
+ for (ch = 0; ch < s->nb_channels; ch++) {
+ uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
+ char *color;
+
+ color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
+ if (color)
+ av_parse_color(fg, color, -1, ctx);
+ s->fg[4 * ch + 0] = fg[0];
+ s->fg[4 * ch + 1] = fg[1];
+ s->fg[4 * ch + 2] = fg[2];
+ s->fg[4 * ch + 3] = fg[3];
+ }
+ av_free(colors);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AudioBitScopeContext *s = outlink->src->priv;
+
+ outlink->w = s->w;
+ outlink->h = s->h;
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+ outlink->frame_rate = s->frame_rate;
+
+ return 0;
+}
+
+static void count_bits(AudioBitScopeContext *s, uint32_t sample, int max)
+{
+ int i;
+
+ for (i = 0; i < max; i++) {
+ if (sample & (1 << i))
+ s->counter[i]++;
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AudioBitScopeContext *s = ctx->priv;
+ AVFrame *outpicref;
+ int ch, i, j, b;
+
+ outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpicref) {
+ av_frame_free(&insamples);
+ return AVERROR(ENOMEM);
+ }
+
+ for (i = 0; i < outlink->h; i++)
+ memset(outpicref->data[0] + i * outpicref->linesize[0], 0, outlink->w * 4);
+
+ outpicref->pts = insamples->pts;
+ switch (insamples->format) {
+ case AV_SAMPLE_FMT_S16P:
+ for (ch = 0; ch < inlink->channels; ch++) {
+ uint16_t *in = (uint16_t *)insamples->extended_data[ch];
+ int w = outpicref->width / inlink->channels;
+ int h = outpicref->height / 16;
+ uint32_t color = AV_RN32(&s->fg[4 * ch]);
+
+ memset(s->counter, 0, sizeof(s->counter));
+ for (i = 0; i < insamples->nb_samples; i++)
+ count_bits(s, in[i], 16);
+
+ for (b = 0; b < 16; b++) {
+ for (j = 1; j < h - 1; j++) {
+ uint8_t *dst = outpicref->data[0] + (b * h + j) * outpicref->linesize[0] + w * ch * 4;
+ int ww = (s->counter[16 - b - 1] / (float)insamples->nb_samples) * (w - 1);
+
+ for (i = 0; i < ww; i++) {
+ AV_WN32(&dst[i * 4], color);
+ }
+ }
+ }
+ }
+ break;
+ case AV_SAMPLE_FMT_S32P:
+ for (ch = 0; ch < inlink->channels; ch++) {
+ uint32_t *in = (uint32_t *)insamples->extended_data[ch];
+ int w = outpicref->width / inlink->channels;
+ int h = outpicref->height / 32;
+ uint32_t color = AV_RN32(&s->fg[4 * ch]);
+
+ memset(s->counter, 0, sizeof(s->counter));
+ for (i = 0; i < insamples->nb_samples; i++)
+ count_bits(s, in[i], 32);
+
+ for (b = 0; b < 32; b++) {
+ for (j = 1; j < h - 1; j++) {
+ uint8_t *dst = outpicref->data[0] + (b * h + j) * outpicref->linesize[0] + w * ch * 4;
+ int ww = (s->counter[32 - b - 1] / (float)insamples->nb_samples) * (w - 1);
+
+ for (i = 0; i < ww; i++) {
+ AV_WN32(&dst[i * 4], color);
+ }
+ }
+ }
+ }
+ break;
+ }
+
+ av_frame_free(&insamples);
+
+ return ff_filter_frame(outlink, outpicref);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_avf_abitscope = {
+ .name = "abitscope",
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio to audio bit scope video output."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(AudioBitScopeContext),
+ .inputs = inputs,
+ .outputs = outputs,
+ .priv_class = &abitscope_class,
+};
diff --git a/libavfilter/avf_ahistogram.c b/libavfilter/avf_ahistogram.c
new file mode 100644
index 0000000000..587415175b
--- /dev/null
+++ b/libavfilter/avf_ahistogram.c
@@ -0,0 +1,413 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "audio.h"
+#include "video.h"
+#include "internal.h"
+
+enum DisplayScale { LINEAR, SQRT, CBRT, LOG, RLOG, NB_SCALES };
+enum AmplitudeScale { ALINEAR, ALOG, NB_ASCALES };
+enum SlideMode { REPLACE, SCROLL, NB_SLIDES };
+enum DisplayMode { SINGLE, SEPARATE, NB_DMODES };
+enum HistogramMode { ACCUMULATE, CURRENT, NB_HMODES };
+
+typedef struct AudioHistogramContext {
+ const AVClass *class;
+ AVFrame *out;
+ int w, h;
+ AVRational frame_rate;
+ uint64_t *achistogram;
+ uint64_t *shistogram;
+ int ascale;
+ int scale;
+ float phisto;
+ int histogram_h;
+ int apos;
+ int ypos;
+ int slide;
+ int dmode;
+ int dchannels;
+ int count;
+ int frame_count;
+ float *combine_buffer;
+ AVFrame *in[101];
+ int first;
+} AudioHistogramContext;
+
+#define OFFSET(x) offsetof(AudioHistogramContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption ahistogram_options[] = {
+ { "dmode", "set method to display channels", OFFSET(dmode), AV_OPT_TYPE_INT, {.i64=SINGLE}, 0, NB_DMODES-1, FLAGS, "dmode" },
+ { "single", "all channels use single histogram", 0, AV_OPT_TYPE_CONST, {.i64=SINGLE}, 0, 0, FLAGS, "dmode" },
+ { "separate", "each channel have own histogram", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "dmode" },
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, FLAGS },
+ { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=LOG}, LINEAR, NB_SCALES-1, FLAGS, "scale" },
+ { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
+ { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
+ { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
+ { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
+ { "rlog", "reverse logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=RLOG}, 0, 0, FLAGS, "scale" },
+ { "ascale", "set amplitude scale", OFFSET(ascale), AV_OPT_TYPE_INT, {.i64=ALOG}, LINEAR, NB_ASCALES-1, FLAGS, "ascale" },
+ { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=ALOG}, 0, 0, FLAGS, "ascale" },
+ { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=ALINEAR}, 0, 0, FLAGS, "ascale" },
+ { "acount", "how much frames to accumulate", OFFSET(count), AV_OPT_TYPE_INT, {.i64=1}, -1, 100, FLAGS },
+ { "rheight", "set histogram ratio of window height", OFFSET(phisto), AV_OPT_TYPE_FLOAT, {.dbl=0.10}, 0, 1, FLAGS },
+ { "slide", "set sonogram sliding", OFFSET(slide), AV_OPT_TYPE_INT, {.i64=REPLACE}, 0, NB_SLIDES-1, FLAGS, "slide" },
+ { "replace", "replace old rows with new", 0, AV_OPT_TYPE_CONST, {.i64=REPLACE}, 0, 0, FLAGS, "slide" },
+ { "scroll", "scroll from top to bottom", 0, AV_OPT_TYPE_CONST, {.i64=SCROLL}, 0, 0, FLAGS, "slide" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(ahistogram);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUVA444P, AV_PIX_FMT_NONE };
+ int ret = AVERROR(EINVAL);
+
+ formats = ff_make_format_list(sample_fmts);
+ if ((ret = ff_formats_ref (formats, &inlink->out_formats )) < 0 ||
+ (layouts = ff_all_channel_counts()) == NULL ||
+ (ret = ff_channel_layouts_ref (layouts, &inlink->out_channel_layouts)) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
+ return ret;
+
+ formats = ff_make_format_list(pix_fmts);
+ if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioHistogramContext *s = ctx->priv;
+ int nb_samples;
+
+ nb_samples = FFMAX(1024, ((double)inlink->sample_rate / av_q2d(s->frame_rate)) + 0.5);
+ inlink->partial_buf_size =
+ inlink->min_samples =
+ inlink->max_samples = nb_samples;
+
+ s->dchannels = s->dmode == SINGLE ? 1 : inlink->channels;
+ s->shistogram = av_calloc(s->w, s->dchannels * sizeof(*s->shistogram));
+ if (!s->shistogram)
+ return AVERROR(ENOMEM);
+
+ s->achistogram = av_calloc(s->w, s->dchannels * sizeof(*s->achistogram));
+ if (!s->achistogram)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AudioHistogramContext *s = outlink->src->priv;
+
+ outlink->w = s->w;
+ outlink->h = s->h;
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+ outlink->frame_rate = s->frame_rate;
+
+ s->histogram_h = s->h * s->phisto;
+ s->ypos = s->h * s->phisto;
+
+ if (s->dmode == SEPARATE) {
+ s->combine_buffer = av_malloc_array(outlink->w * 3, sizeof(*s->combine_buffer));
+ if (!s->combine_buffer)
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AudioHistogramContext *s = ctx->priv;
+ const int H = s->histogram_h;
+ const int w = s->w;
+ int c, y, n, p, bin;
+ uint64_t acmax = 1;
+
+ if (!s->out || s->out->width != outlink->w ||
+ s->out->height != outlink->h) {
+ av_frame_free(&s->out);
+ s->out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!s->out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ for (n = H; n < s->h; n++) {
+ memset(s->out->data[0] + n * s->out->linesize[0], 0, w);
+ memset(s->out->data[1] + n * s->out->linesize[0], 127, w);
+ memset(s->out->data[2] + n * s->out->linesize[0], 127, w);
+ memset(s->out->data[3] + n * s->out->linesize[0], 0, w);
+ }
+ }
+
+ if (s->dmode == SEPARATE) {
+ for (y = 0; y < w; y++) {
+ s->combine_buffer[3 * y ] = 0;
+ s->combine_buffer[3 * y + 1] = 127.5;
+ s->combine_buffer[3 * y + 2] = 127.5;
+ }
+ }
+
+ for (n = 0; n < H; n++) {
+ memset(s->out->data[0] + n * s->out->linesize[0], 0, w);
+ memset(s->out->data[1] + n * s->out->linesize[0], 127, w);
+ memset(s->out->data[2] + n * s->out->linesize[0], 127, w);
+ memset(s->out->data[3] + n * s->out->linesize[0], 0, w);
+ }
+ s->out->pts = in->pts;
+
+ s->first = s->frame_count;
+
+ switch (s->ascale) {
+ case ALINEAR:
+ for (c = 0; c < inlink->channels; c++) {
+ const float *src = (const float *)in->extended_data[c];
+ uint64_t *achistogram = &s->achistogram[(s->dmode == SINGLE ? 0: c) * w];
+
+ for (n = 0; n < in->nb_samples; n++) {
+ bin = lrint(av_clipf(fabsf(src[n]), 0, 1) * (w - 1));
+
+ achistogram[bin]++;
+ }
+
+ if (s->in[s->first] && s->count >= 0) {
+ uint64_t *shistogram = &s->shistogram[(s->dmode == SINGLE ? 0: c) * w];
+ const float *src2 = (const float *)s->in[s->first]->extended_data[c];
+
+ for (n = 0; n < in->nb_samples; n++) {
+ bin = lrint(av_clipf(fabsf(src2[n]), 0, 1) * (w - 1));
+
+ shistogram[bin]++;
+ }
+ }
+ }
+ break;
+ case ALOG:
+ for (c = 0; c < inlink->channels; c++) {
+ const float *src = (const float *)in->extended_data[c];
+ uint64_t *achistogram = &s->achistogram[(s->dmode == SINGLE ? 0: c) * w];
+
+ for (n = 0; n < in->nb_samples; n++) {
+ bin = lrint(av_clipf(1 + log10(fabsf(src[n])) / 6, 0, 1) * (w - 1));
+
+ achistogram[bin]++;
+ }
+
+ if (s->in[s->first] && s->count >= 0) {
+ uint64_t *shistogram = &s->shistogram[(s->dmode == SINGLE ? 0: c) * w];
+ const float *src2 = (const float *)s->in[s->first]->extended_data[c];
+
+ for (n = 0; n < in->nb_samples; n++) {
+ bin = lrint(av_clipf(1 + log10(fabsf(src2[n])) / 6, 0, 1) * (w - 1));
+
+ shistogram[bin]++;
+ }
+ }
+ }
+ break;
+ }
+
+ av_frame_free(&s->in[s->frame_count]);
+ s->in[s->frame_count] = in;
+ s->frame_count++;
+ if (s->frame_count > s->count)
+ s->frame_count = 0;
+
+ for (n = 0; n < w * s->dchannels; n++) {
+ acmax = FFMAX(s->achistogram[n] - s->shistogram[n], acmax);
+ }
+
+ for (c = 0; c < s->dchannels; c++) {
+ uint64_t *shistogram = &s->shistogram[c * w];
+ uint64_t *achistogram = &s->achistogram[c * w];
+ float yf, uf, vf;
+
+ if (s->dmode == SEPARATE) {
+ yf = 256.0f / s->dchannels;
+ uf = yf * M_PI;
+ vf = yf * M_PI;
+ uf *= 0.5 * sin((2 * M_PI * c) / s->dchannels);
+ vf *= 0.5 * cos((2 * M_PI * c) / s->dchannels);
+ }
+
+ for (n = 0; n < w; n++) {
+ double a, aa;
+ int h;
+
+ a = achistogram[n] - shistogram[n];
+
+ switch (s->scale) {
+ case LINEAR:
+ aa = a / (double)acmax;
+ break;
+ case SQRT:
+ aa = sqrt(a) / sqrt(acmax);
+ break;
+ case CBRT:
+ aa = cbrt(a) / cbrt(acmax);
+ break;
+ case LOG:
+ aa = log2(a + 1) / log2(acmax + 1);
+ break;
+ case RLOG:
+ aa = 1. - log2(a + 1) / log2(acmax + 1);
+ if (aa == 1.)
+ aa = 0;
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ h = aa * (H - 1);
+
+ if (s->dmode == SINGLE) {
+
+ for (y = H - h; y < H; y++) {
+ s->out->data[0][y * s->out->linesize[0] + n] = 255;
+ s->out->data[3][y * s->out->linesize[0] + n] = 255;
+ }
+
+ if (s->h - H > 0) {
+ h = aa * 255;
+
+ s->out->data[0][s->ypos * s->out->linesize[0] + n] = h;
+ s->out->data[1][s->ypos * s->out->linesize[1] + n] = 127;
+ s->out->data[2][s->ypos * s->out->linesize[2] + n] = 127;
+ s->out->data[3][s->ypos * s->out->linesize[3] + n] = 255;
+ }
+ } else if (s->dmode == SEPARATE) {
+ float *out = &s->combine_buffer[3 * n];
+ int old;
+
+ old = s->out->data[0][(H - h) * s->out->linesize[0] + n];
+ for (y = H - h; y < H; y++) {
+ if (s->out->data[0][y * s->out->linesize[0] + n] != old)
+ break;
+ old = s->out->data[0][y * s->out->linesize[0] + n];
+ s->out->data[0][y * s->out->linesize[0] + n] = yf;
+ s->out->data[1][y * s->out->linesize[1] + n] = 128+uf;
+ s->out->data[2][y * s->out->linesize[2] + n] = 128+vf;
+ s->out->data[3][y * s->out->linesize[3] + n] = 255;
+ }
+
+ out[0] += aa * yf;
+ out[1] += aa * uf;
+ out[2] += aa * vf;
+ }
+ }
+ }
+
+ if (s->h - H > 0) {
+ if (s->dmode == SEPARATE) {
+ for (n = 0; n < w; n++) {
+ float *cb = &s->combine_buffer[3 * n];
+
+ s->out->data[0][s->ypos * s->out->linesize[0] + n] = cb[0];
+ s->out->data[1][s->ypos * s->out->linesize[1] + n] = cb[1];
+ s->out->data[2][s->ypos * s->out->linesize[2] + n] = cb[2];
+ s->out->data[3][s->ypos * s->out->linesize[3] + n] = 255;
+ }
+ }
+
+ if (s->slide == SCROLL) {
+ for (p = 0; p < 4; p++) {
+ for (y = s->h; y >= H + 1; y--) {
+ memmove(s->out->data[p] + (y ) * s->out->linesize[p],
+ s->out->data[p] + (y-1) * s->out->linesize[p], w);
+ }
+ }
+ }
+
+ s->ypos++;
+ if (s->slide == SCROLL || s->ypos >= s->h)
+ s->ypos = H;
+ }
+
+ return ff_filter_frame(outlink, av_frame_clone(s->out));
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioHistogramContext *s = ctx->priv;
+ int i;
+
+ av_frame_free(&s->out);
+ av_freep(&s->shistogram);
+ av_freep(&s->achistogram);
+ av_freep(&s->combine_buffer);
+ for (i = 0; i < 101; i++)
+ av_frame_free(&s->in[i]);
+}
+
+static const AVFilterPad audiovectorscope_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad audiovectorscope_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_avf_ahistogram = {
+ .name = "ahistogram",
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio to histogram video output."),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(AudioHistogramContext),
+ .inputs = audiovectorscope_inputs,
+ .outputs = audiovectorscope_outputs,
+ .priv_class = &ahistogram_class,
+};
diff --git a/libavfilter/avf_aphasemeter.c b/libavfilter/avf_aphasemeter.c
new file mode 100644
index 0000000000..bfd77861ec
--- /dev/null
+++ b/libavfilter/avf_aphasemeter.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * audio to video multimedia aphasemeter filter
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "audio.h"
+#include "video.h"
+#include "internal.h"
+
+typedef struct AudioPhaseMeterContext {
+ const AVClass *class;
+ AVFrame *out;
+ int do_video;
+ int w, h;
+ AVRational frame_rate;
+ int contrast[4];
+ uint8_t *mpc_str;
+ uint8_t mpc[4];
+ int draw_median_phase;
+} AudioPhaseMeterContext;
+
+#define OFFSET(x) offsetof(AudioPhaseMeterContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption aphasemeter_options[] = {
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="800x400"}, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="800x400"}, 0, 0, FLAGS },
+ { "rc", "set red contrast", OFFSET(contrast[0]), AV_OPT_TYPE_INT, {.i64=2}, 0, 255, FLAGS },
+ { "gc", "set green contrast", OFFSET(contrast[1]), AV_OPT_TYPE_INT, {.i64=7}, 0, 255, FLAGS },
+ { "bc", "set blue contrast", OFFSET(contrast[2]), AV_OPT_TYPE_INT, {.i64=1}, 0, 255, FLAGS },
+ { "mpc", "set median phase color", OFFSET(mpc_str), AV_OPT_TYPE_STRING, {.str = "none"}, 0, 0, FLAGS },
+ { "video", "set video output", OFFSET(do_video), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(aphasemeter);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AudioPhaseMeterContext *s = ctx->priv;
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layout = NULL;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE };
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
+ int ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if ((ret = ff_formats_ref (formats, &inlink->out_formats )) < 0 ||
+ (ret = ff_formats_ref (formats, &outlink->in_formats )) < 0 ||
+ (ret = ff_add_channel_layout (&layout, AV_CH_LAYOUT_STEREO )) < 0 ||
+ (ret = ff_channel_layouts_ref (layout , &inlink->out_channel_layouts)) < 0 ||
+ (ret = ff_channel_layouts_ref (layout , &outlink->in_channel_layouts)) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0 ||
+ (ret = ff_formats_ref(formats, &outlink->in_samplerates)) < 0)
+ return ret;
+
+ if (s->do_video) {
+ AVFilterLink *outlink = ctx->outputs[1];
+
+ formats = ff_make_format_list(pix_fmts);
+ if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioPhaseMeterContext *s = ctx->priv;
+ int nb_samples;
+
+ if (s->do_video) {
+ nb_samples = FFMAX(1024, ((double)inlink->sample_rate / av_q2d(s->frame_rate)) + 0.5);
+ inlink->partial_buf_size =
+ inlink->min_samples =
+ inlink->max_samples = nb_samples;
+ }
+
+ return 0;
+}
+
+static int config_video_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AudioPhaseMeterContext *s = ctx->priv;
+
+ outlink->w = s->w;
+ outlink->h = s->h;
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+ outlink->frame_rate = s->frame_rate;
+
+ if (!strcmp(s->mpc_str, "none"))
+ s->draw_median_phase = 0;
+ else if (av_parse_color(s->mpc, s->mpc_str, -1, ctx) >= 0)
+ s->draw_median_phase = 1;
+ else
+ return AVERROR(EINVAL);
+
+ return 0;
+}
+
+static inline int get_x(float phase, int w)
+{
+ return (phase + 1.) / 2. * (w - 1);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioPhaseMeterContext *s = ctx->priv;
+ AVFilterLink *outlink = s->do_video ? ctx->outputs[1] : NULL;
+ AVFilterLink *aoutlink = ctx->outputs[0];
+ AVDictionary **metadata;
+ const int rc = s->contrast[0];
+ const int gc = s->contrast[1];
+ const int bc = s->contrast[2];
+ float fphase = 0;
+ AVFrame *out;
+ uint8_t *dst;
+ int i;
+
+ if (s->do_video && (!s->out || s->out->width != outlink->w ||
+ s->out->height != outlink->h)) {
+ av_frame_free(&s->out);
+ s->out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!s->out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+
+ out = s->out;
+ for (i = 0; i < outlink->h; i++)
+ memset(out->data[0] + i * out->linesize[0], 0, outlink->w * 4);
+ } else if (s->do_video) {
+ out = s->out;
+ for (i = outlink->h - 1; i >= 10; i--)
+ memmove(out->data[0] + (i ) * out->linesize[0],
+ out->data[0] + (i-1) * out->linesize[0],
+ outlink->w * 4);
+ for (i = 0; i < outlink->w; i++)
+ AV_WL32(out->data[0] + i * 4, 0);
+ }
+
+ for (i = 0; i < in->nb_samples; i++) {
+ const float *src = (float *)in->data[0] + i * 2;
+ const float f = src[0] * src[1] / (src[0]*src[0] + src[1] * src[1]) * 2;
+ const float phase = isnan(f) ? 1 : f;
+ const int x = get_x(phase, s->w);
+
+ if (s->do_video) {
+ dst = out->data[0] + x * 4;
+ dst[0] = FFMIN(255, dst[0] + rc);
+ dst[1] = FFMIN(255, dst[1] + gc);
+ dst[2] = FFMIN(255, dst[2] + bc);
+ dst[3] = 255;
+ }
+ fphase += phase;
+ }
+ fphase /= in->nb_samples;
+
+ if (s->do_video) {
+ if (s->draw_median_phase) {
+ dst = out->data[0] + get_x(fphase, s->w) * 4;
+ AV_WL32(dst, AV_RL32(s->mpc));
+ }
+
+ for (i = 1; i < 10 && i < outlink->h; i++)
+ memcpy(out->data[0] + i * out->linesize[0], out->data[0], outlink->w * 4);
+ }
+
+ metadata = avpriv_frame_get_metadatap(in);
+ if (metadata) {
+ uint8_t value[128];
+
+ snprintf(value, sizeof(value), "%f", fphase);
+ av_dict_set(metadata, "lavfi.aphasemeter.phase", value, 0);
+ }
+
+ if (s->do_video) {
+ s->out->pts = in->pts;
+ ff_filter_frame(outlink, av_frame_clone(s->out));
+ }
+ return ff_filter_frame(aoutlink, in);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioPhaseMeterContext *s = ctx->priv;
+ int i;
+
+ av_frame_free(&s->out);
+ for (i = 0; i < ctx->nb_outputs; i++)
+ av_freep(&ctx->output_pads[i].name);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ AudioPhaseMeterContext *s = ctx->priv;
+ AVFilterPad pad;
+
+ pad = (AVFilterPad){
+ .name = av_strdup("out0"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ };
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ ff_insert_outpad(ctx, 0, &pad);
+
+ if (s->do_video) {
+ pad = (AVFilterPad){
+ .name = av_strdup("out1"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_video_output,
+ };
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ ff_insert_outpad(ctx, 1, &pad);
+ }
+
+ return 0;
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_avf_aphasemeter = {
+ .name = "aphasemeter",
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio to phase meter video output."),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(AudioPhaseMeterContext),
+ .inputs = inputs,
+ .outputs = NULL,
+ .priv_class = &aphasemeter_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
diff --git a/libavfilter/avf_avectorscope.c b/libavfilter/avf_avectorscope.c
new file mode 100644
index 0000000000..3063283efb
--- /dev/null
+++ b/libavfilter/avf_avectorscope.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * audio to video multimedia vectorscope filter
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "audio.h"
+#include "video.h"
+#include "internal.h"
+
+enum VectorScopeMode {
+ LISSAJOUS,
+ LISSAJOUS_XY,
+ POLAR,
+ MODE_NB,
+};
+
+enum VectorScopeDraw {
+ DOT,
+ LINE,
+ DRAW_NB,
+};
+
+enum VectorScopeScale {
+ LIN,
+ SQRT,
+ CBRT,
+ LOG,
+ SCALE_NB,
+};
+
+typedef struct AudioVectorScopeContext {
+ const AVClass *class;
+ AVFrame *outpicref;
+ int w, h;
+ int hw, hh;
+ int mode;
+ int draw;
+ int scale;
+ int contrast[4];
+ int fade[4];
+ double zoom;
+ unsigned prev_x, prev_y;
+ AVRational frame_rate;
+} AudioVectorScopeContext;
+
+#define OFFSET(x) offsetof(AudioVectorScopeContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption avectorscope_options[] = {
+ { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=LISSAJOUS}, 0, MODE_NB-1, FLAGS, "mode" },
+ { "m", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=LISSAJOUS}, 0, MODE_NB-1, FLAGS, "mode" },
+ { "lissajous", "", 0, AV_OPT_TYPE_CONST, {.i64=LISSAJOUS}, 0, 0, FLAGS, "mode" },
+ { "lissajous_xy", "", 0, AV_OPT_TYPE_CONST, {.i64=LISSAJOUS_XY}, 0, 0, FLAGS, "mode" },
+ { "polar", "", 0, AV_OPT_TYPE_CONST, {.i64=POLAR}, 0, 0, FLAGS, "mode" },
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="400x400"}, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="400x400"}, 0, 0, FLAGS },
+ { "rc", "set red contrast", OFFSET(contrast[0]), AV_OPT_TYPE_INT, {.i64=40}, 0, 255, FLAGS },
+ { "gc", "set green contrast", OFFSET(contrast[1]), AV_OPT_TYPE_INT, {.i64=160}, 0, 255, FLAGS },
+ { "bc", "set blue contrast", OFFSET(contrast[2]), AV_OPT_TYPE_INT, {.i64=80}, 0, 255, FLAGS },
+ { "ac", "set alpha contrast", OFFSET(contrast[3]), AV_OPT_TYPE_INT, {.i64=255}, 0, 255, FLAGS },
+ { "rf", "set red fade", OFFSET(fade[0]), AV_OPT_TYPE_INT, {.i64=15}, 0, 255, FLAGS },
+ { "gf", "set green fade", OFFSET(fade[1]), AV_OPT_TYPE_INT, {.i64=10}, 0, 255, FLAGS },
+ { "bf", "set blue fade", OFFSET(fade[2]), AV_OPT_TYPE_INT, {.i64=5}, 0, 255, FLAGS },
+ { "af", "set alpha fade", OFFSET(fade[3]), AV_OPT_TYPE_INT, {.i64=5}, 0, 255, FLAGS },
+ { "zoom", "set zoom factor", OFFSET(zoom), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 1, 10, FLAGS },
+ { "draw", "set draw mode", OFFSET(draw), AV_OPT_TYPE_INT, {.i64=DOT}, 0, DRAW_NB-1, FLAGS, "draw" },
+ { "dot", "", 0, AV_OPT_TYPE_CONST, {.i64=DOT} , 0, 0, FLAGS, "draw" },
+ { "line", "", 0, AV_OPT_TYPE_CONST, {.i64=LINE}, 0, 0, FLAGS, "draw" },
+ { "scale", "set amplitude scale mode", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=LIN}, 0, SCALE_NB-1, FLAGS, "scale" },
+ { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LIN}, 0, 0, FLAGS, "scale" },
+ { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
+ { "cbrt", "cube root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
+ { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(avectorscope);
+
+static void draw_dot(AudioVectorScopeContext *s, unsigned x, unsigned y)
+{
+ const int linesize = s->outpicref->linesize[0];
+ uint8_t *dst;
+
+ if (s->zoom > 1) {
+ if (y >= s->h || x >= s->w)
+ return;
+ } else {
+ y = FFMIN(y, s->h - 1);
+ x = FFMIN(x, s->w - 1);
+ }
+
+ dst = &s->outpicref->data[0][y * linesize + x * 4];
+ dst[0] = FFMIN(dst[0] + s->contrast[0], 255);
+ dst[1] = FFMIN(dst[1] + s->contrast[1], 255);
+ dst[2] = FFMIN(dst[2] + s->contrast[2], 255);
+ dst[3] = FFMIN(dst[3] + s->contrast[3], 255);
+}
+
+static void draw_line(AudioVectorScopeContext *s, int x0, int y0, int x1, int y1)
+{
+ int dx = FFABS(x1-x0), sx = x0 < x1 ? 1 : -1;
+ int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
+ int err = (dx>dy ? dx : -dy) / 2, e2;
+
+ for (;;) {
+ draw_dot(s, x0, y0);
+
+ if (x0 == x1 && y0 == y1)
+ break;
+
+ e2 = err;
+
+ if (e2 >-dx) {
+ err -= dy;
+ x0 += sx;
+ }
+
+ if (e2 < dy) {
+ err += dx;
+ y0 += sy;
+ }
+ }
+}
+
+static void fade(AudioVectorScopeContext *s)
+{
+ const int linesize = s->outpicref->linesize[0];
+ int i, j;
+
+ if (s->fade[0] || s->fade[1] || s->fade[2]) {
+ uint8_t *d = s->outpicref->data[0];
+ for (i = 0; i < s->h; i++) {
+ for (j = 0; j < s->w*4; j+=4) {
+ d[j+0] = FFMAX(d[j+0] - s->fade[0], 0);
+ d[j+1] = FFMAX(d[j+1] - s->fade[1], 0);
+ d[j+2] = FFMAX(d[j+2] - s->fade[2], 0);
+ d[j+3] = FFMAX(d[j+3] - s->fade[3], 0);
+ }
+ d += linesize;
+ }
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layout = NULL;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE };
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
+ int ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if ((ret = ff_formats_ref (formats, &inlink->out_formats )) < 0 ||
+ (ret = ff_add_channel_layout (&layout, AV_CH_LAYOUT_STEREO )) < 0 ||
+ (ret = ff_channel_layouts_ref (layout , &inlink->out_channel_layouts)) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
+ return ret;
+
+ formats = ff_make_format_list(pix_fmts);
+ if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioVectorScopeContext *s = ctx->priv;
+ int nb_samples;
+
+ nb_samples = FFMAX(1024, ((double)inlink->sample_rate / av_q2d(s->frame_rate)) + 0.5);
+ inlink->partial_buf_size =
+ inlink->min_samples =
+ inlink->max_samples = nb_samples;
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AudioVectorScopeContext *s = outlink->src->priv;
+
+ outlink->w = s->w;
+ outlink->h = s->h;
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+ outlink->frame_rate = s->frame_rate;
+
+ s->prev_x = s->hw = s->w / 2;
+ s->prev_y = s->hh = s->mode == POLAR ? s->h - 1 : s->h / 2;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AudioVectorScopeContext *s = ctx->priv;
+ const int hw = s->hw;
+ const int hh = s->hh;
+ unsigned x, y;
+ unsigned prev_x = s->prev_x, prev_y = s->prev_y;
+ const double zoom = s->zoom;
+ int i;
+
+ if (!s->outpicref || s->outpicref->width != outlink->w ||
+ s->outpicref->height != outlink->h) {
+ av_frame_free(&s->outpicref);
+ s->outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!s->outpicref) {
+ av_frame_free(&insamples);
+ return AVERROR(ENOMEM);
+ }
+
+ for (i = 0; i < outlink->h; i++)
+ memset(s->outpicref->data[0] + i * s->outpicref->linesize[0], 0, outlink->w * 4);
+ }
+ s->outpicref->pts = insamples->pts;
+
+ fade(s);
+
+ for (i = 0; i < insamples->nb_samples; i++) {
+ int16_t *samples = (int16_t *)insamples->data[0] + i * 2;
+ float *samplesf = (float *)insamples->data[0] + i * 2;
+ float src[2];
+
+ switch (insamples->format) {
+ case AV_SAMPLE_FMT_S16:
+ src[0] = samples[0] / (float)INT16_MAX;
+ src[1] = samples[1] / (float)INT16_MAX;
+ break;
+ case AV_SAMPLE_FMT_FLT:
+ src[0] = samplesf[0];
+ src[1] = samplesf[1];
+ break;
+ }
+
+ switch (s->scale) {
+ case SQRT:
+ src[0] = FFSIGN(src[0]) * sqrtf(FFABS(src[0]));
+ src[1] = FFSIGN(src[1]) * sqrtf(FFABS(src[1]));
+ break;
+ case CBRT:
+ src[0] = FFSIGN(src[0]) * cbrtf(FFABS(src[0]));
+ src[1] = FFSIGN(src[1]) * cbrtf(FFABS(src[1]));
+ break;
+ case LOG:
+ src[0] = FFSIGN(src[0]) * logf(1 + FFABS(src[0])) / logf(2);
+ src[1] = FFSIGN(src[1]) * logf(1 + FFABS(src[1])) / logf(2);
+ break;
+ }
+
+ if (s->mode == LISSAJOUS) {
+ x = ((src[1] - src[0]) * zoom / 2 + 1) * hw;
+ y = (1.0 - (src[0] + src[1]) * zoom / 2) * hh;
+ } else if (s->mode == LISSAJOUS_XY) {
+ x = (src[1] * zoom + 1) * hw;
+ y = (src[0] * zoom + 1) * hh;
+ } else {
+ float sx, sy, cx, cy;
+
+ sx = src[1] * zoom;
+ sy = src[0] * zoom;
+ cx = sx * sqrtf(1 - 0.5 * sy * sy);
+ cy = sy * sqrtf(1 - 0.5 * sx * sx);
+ x = hw + hw * FFSIGN(cx + cy) * (cx - cy) * .7;
+ y = s->h - s->h * fabsf(cx + cy) * .7;
+ }
+
+ if (s->draw == DOT) {
+ draw_dot(s, x, y);
+ } else {
+ draw_line(s, x, y, prev_x, prev_y);
+ }
+ prev_x = x;
+ prev_y = y;
+ }
+
+ s->prev_x = x, s->prev_y = y;
+ av_frame_free(&insamples);
+
+ return ff_filter_frame(outlink, av_frame_clone(s->outpicref));
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioVectorScopeContext *s = ctx->priv;
+
+ av_frame_free(&s->outpicref);
+}
+
+static const AVFilterPad audiovectorscope_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad audiovectorscope_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_avf_avectorscope = {
+ .name = "avectorscope",
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio to vectorscope video output."),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(AudioVectorScopeContext),
+ .inputs = audiovectorscope_inputs,
+ .outputs = audiovectorscope_outputs,
+ .priv_class = &avectorscope_class,
+};
diff --git a/libavfilter/avf_concat.c b/libavfilter/avf_concat.c
new file mode 100644
index 0000000000..56e41792a7
--- /dev/null
+++ b/libavfilter/avf_concat.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright (c) 2012 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * concat audio-video filter
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#define FF_BUFQUEUE_SIZE 256
+#include "bufferqueue.h"
+#include "internal.h"
+#include "video.h"
+#include "audio.h"
+
+#define TYPE_ALL 2
+
+typedef struct {
+ const AVClass *class;
+ unsigned nb_streams[TYPE_ALL]; /**< number of out streams of each type */
+ unsigned nb_segments;
+ unsigned cur_idx; /**< index of the first input of current segment */
+ int64_t delta_ts; /**< timestamp to add to produce output timestamps */
+ unsigned nb_in_active; /**< number of active inputs in current segment */
+ unsigned unsafe;
+ struct concat_in {
+ int64_t pts;
+ int64_t nb_frames;
+ unsigned eof;
+ struct FFBufQueue queue;
+ } *in;
+} ConcatContext;
+
+#define OFFSET(x) offsetof(ConcatContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM
+#define F AV_OPT_FLAG_FILTERING_PARAM
+#define V AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption concat_options[] = {
+ { "n", "specify the number of segments", OFFSET(nb_segments),
+ AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, V|A|F},
+ { "v", "specify the number of video streams",
+ OFFSET(nb_streams[AVMEDIA_TYPE_VIDEO]),
+ AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, V|F },
+ { "a", "specify the number of audio streams",
+ OFFSET(nb_streams[AVMEDIA_TYPE_AUDIO]),
+ AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A|F},
+ { "unsafe", "enable unsafe mode",
+ OFFSET(unsafe),
+ AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, V|A|F},
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(concat);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ ConcatContext *cat = ctx->priv;
+ unsigned type, nb_str, idx0 = 0, idx, str, seg;
+ AVFilterFormats *formats, *rates = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ int ret;
+
+ for (type = 0; type < TYPE_ALL; type++) {
+ nb_str = cat->nb_streams[type];
+ for (str = 0; str < nb_str; str++) {
+ idx = idx0;
+
+ /* Set the output formats */
+ formats = ff_all_formats(type);
+ if ((ret = ff_formats_ref(formats, &ctx->outputs[idx]->in_formats)) < 0)
+ return ret;
+
+ if (type == AVMEDIA_TYPE_AUDIO) {
+ rates = ff_all_samplerates();
+ if ((ret = ff_formats_ref(rates, &ctx->outputs[idx]->in_samplerates)) < 0)
+ return ret;
+ layouts = ff_all_channel_layouts();
+ if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[idx]->in_channel_layouts)) < 0)
+ return ret;
+ }
+
+ /* Set the same formats for each corresponding input */
+ for (seg = 0; seg < cat->nb_segments; seg++) {
+ if ((ret = ff_formats_ref(formats, &ctx->inputs[idx]->out_formats)) < 0)
+ return ret;
+ if (type == AVMEDIA_TYPE_AUDIO) {
+ if ((ret = ff_formats_ref(rates, &ctx->inputs[idx]->out_samplerates)) < 0 ||
+ (ret = ff_channel_layouts_ref(layouts, &ctx->inputs[idx]->out_channel_layouts)) < 0)
+ return ret;
+ }
+ idx += ctx->nb_outputs;
+ }
+
+ idx0++;
+ }
+ }
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ConcatContext *cat = ctx->priv;
+ unsigned out_no = FF_OUTLINK_IDX(outlink);
+ unsigned in_no = out_no, seg;
+ AVFilterLink *inlink = ctx->inputs[in_no];
+
+ /* enhancement: find a common one */
+ outlink->time_base = AV_TIME_BASE_Q;
+ outlink->w = inlink->w;
+ outlink->h = inlink->h;
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ outlink->format = inlink->format;
+ for (seg = 1; seg < cat->nb_segments; seg++) {
+ inlink = ctx->inputs[in_no += ctx->nb_outputs];
+ if (!outlink->sample_aspect_ratio.num)
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ /* possible enhancement: unsafe mode, do not check */
+ if (outlink->w != inlink->w ||
+ outlink->h != inlink->h ||
+ outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num &&
+ inlink->sample_aspect_ratio.num ||
+ outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
+ av_log(ctx, AV_LOG_ERROR, "Input link %s parameters "
+ "(size %dx%d, SAR %d:%d) do not match the corresponding "
+ "output link %s parameters (%dx%d, SAR %d:%d)\n",
+ ctx->input_pads[in_no].name, inlink->w, inlink->h,
+ inlink->sample_aspect_ratio.num,
+ inlink->sample_aspect_ratio.den,
+ ctx->input_pads[out_no].name, outlink->w, outlink->h,
+ outlink->sample_aspect_ratio.num,
+ outlink->sample_aspect_ratio.den);
+ if (!cat->unsafe)
+ return AVERROR(EINVAL);
+ }
+ }
+
+ return 0;
+}
+
+static int push_frame(AVFilterContext *ctx, unsigned in_no, AVFrame *buf)
+{
+ ConcatContext *cat = ctx->priv;
+ unsigned out_no = in_no % ctx->nb_outputs;
+ AVFilterLink * inlink = ctx-> inputs[ in_no];
+ AVFilterLink *outlink = ctx->outputs[out_no];
+ struct concat_in *in = &cat->in[in_no];
+
+ buf->pts = av_rescale_q(buf->pts, inlink->time_base, outlink->time_base);
+ in->pts = buf->pts;
+ in->nb_frames++;
+ /* add duration to input PTS */
+ if (inlink->sample_rate)
+ /* use number of audio samples */
+ in->pts += av_rescale_q(buf->nb_samples,
+ av_make_q(1, inlink->sample_rate),
+ outlink->time_base);
+ else if (in->nb_frames >= 2)
+ /* use mean duration */
+ in->pts = av_rescale(in->pts, in->nb_frames, in->nb_frames - 1);
+
+ buf->pts += cat->delta_ts;
+ return ff_filter_frame(outlink, buf);
+}
+
+static int process_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ConcatContext *cat = ctx->priv;
+ unsigned in_no = FF_INLINK_IDX(inlink);
+
+ if (in_no < cat->cur_idx) {
+ av_log(ctx, AV_LOG_ERROR, "Frame after EOF on input %s\n",
+ ctx->input_pads[in_no].name);
+ av_frame_free(&buf);
+ } else if (in_no >= cat->cur_idx + ctx->nb_outputs) {
+ ff_bufqueue_add(ctx, &cat->in[in_no].queue, buf);
+ } else {
+ return push_frame(ctx, in_no, buf);
+ }
+ return 0;
+}
+
+static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
+{
+ AVFilterContext *ctx = inlink->dst;
+ unsigned in_no = FF_INLINK_IDX(inlink);
+ AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs];
+
+ return ff_get_video_buffer(outlink, w, h);
+}
+
+static AVFrame *get_audio_buffer(AVFilterLink *inlink, int nb_samples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ unsigned in_no = FF_INLINK_IDX(inlink);
+ AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs];
+
+ return ff_get_audio_buffer(outlink, nb_samples);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ return process_frame(inlink, buf);
+}
+
+static void close_input(AVFilterContext *ctx, unsigned in_no)
+{
+ ConcatContext *cat = ctx->priv;
+
+ cat->in[in_no].eof = 1;
+ cat->nb_in_active--;
+ av_log(ctx, AV_LOG_VERBOSE, "EOF on %s, %d streams left in segment.\n",
+ ctx->input_pads[in_no].name, cat->nb_in_active);
+}
+
+static void find_next_delta_ts(AVFilterContext *ctx, int64_t *seg_delta)
+{
+ ConcatContext *cat = ctx->priv;
+ unsigned i = cat->cur_idx;
+ unsigned imax = i + ctx->nb_outputs;
+ int64_t pts;
+
+ pts = cat->in[i++].pts;
+ for (; i < imax; i++)
+ pts = FFMAX(pts, cat->in[i].pts);
+ cat->delta_ts += pts;
+ *seg_delta = pts;
+}
+
+static int send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no,
+ int64_t seg_delta)
+{
+ ConcatContext *cat = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[out_no];
+ int64_t base_pts = cat->in[in_no].pts + cat->delta_ts - seg_delta;
+ int64_t nb_samples, sent = 0;
+ int frame_nb_samples, ret;
+ AVRational rate_tb = { 1, ctx->inputs[in_no]->sample_rate };
+ AVFrame *buf;
+
+ if (!rate_tb.den)
+ return AVERROR_BUG;
+ nb_samples = av_rescale_q(seg_delta - cat->in[in_no].pts,
+ outlink->time_base, rate_tb);
+ frame_nb_samples = FFMAX(9600, rate_tb.den / 5); /* arbitrary */
+ while (nb_samples) {
+ frame_nb_samples = FFMIN(frame_nb_samples, nb_samples);
+ buf = ff_get_audio_buffer(outlink, frame_nb_samples);
+ if (!buf)
+ return AVERROR(ENOMEM);
+ av_samples_set_silence(buf->extended_data, 0, frame_nb_samples,
+ outlink->channels, outlink->format);
+ buf->pts = base_pts + av_rescale_q(sent, rate_tb, outlink->time_base);
+ ret = ff_filter_frame(outlink, buf);
+ if (ret < 0)
+ return ret;
+ sent += frame_nb_samples;
+ nb_samples -= frame_nb_samples;
+ }
+ return 0;
+}
+
+static int flush_segment(AVFilterContext *ctx)
+{
+ int ret;
+ ConcatContext *cat = ctx->priv;
+ unsigned str, str_max;
+ int64_t seg_delta;
+
+ find_next_delta_ts(ctx, &seg_delta);
+ cat->cur_idx += ctx->nb_outputs;
+ cat->nb_in_active = ctx->nb_outputs;
+ av_log(ctx, AV_LOG_VERBOSE, "Segment finished at pts=%"PRId64"\n",
+ cat->delta_ts);
+
+ if (cat->cur_idx < ctx->nb_inputs) {
+ /* pad audio streams with silence */
+ str = cat->nb_streams[AVMEDIA_TYPE_VIDEO];
+ str_max = str + cat->nb_streams[AVMEDIA_TYPE_AUDIO];
+ for (; str < str_max; str++) {
+ ret = send_silence(ctx, cat->cur_idx - ctx->nb_outputs + str, str,
+ seg_delta);
+ if (ret < 0)
+ return ret;
+ }
+ /* flush queued buffers */
+ /* possible enhancement: flush in PTS order */
+ str_max = cat->cur_idx + ctx->nb_outputs;
+ for (str = cat->cur_idx; str < str_max; str++) {
+ while (cat->in[str].queue.available) {
+ ret = push_frame(ctx, str, ff_bufqueue_get(&cat->in[str].queue));
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ConcatContext *cat = ctx->priv;
+ unsigned out_no = FF_OUTLINK_IDX(outlink);
+ unsigned in_no = out_no + cat->cur_idx;
+ unsigned str, str_max;
+ int ret;
+
+ while (1) {
+ if (in_no >= ctx->nb_inputs)
+ return AVERROR_EOF;
+ if (!cat->in[in_no].eof) {
+ ret = ff_request_frame(ctx->inputs[in_no]);
+ if (ret != AVERROR_EOF)
+ return ret;
+ close_input(ctx, in_no);
+ }
+ /* cycle on all inputs to finish the segment */
+ /* possible enhancement: request in PTS order */
+ str_max = cat->cur_idx + ctx->nb_outputs - 1;
+ for (str = cat->cur_idx; cat->nb_in_active;
+ str = str == str_max ? cat->cur_idx : str + 1) {
+ if (cat->in[str].eof)
+ continue;
+ ret = ff_request_frame(ctx->inputs[str]);
+ if (ret != AVERROR_EOF)
+ return ret;
+ close_input(ctx, str);
+ }
+ ret = flush_segment(ctx);
+ if (ret < 0)
+ return ret;
+ in_no += ctx->nb_outputs;
+ }
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ConcatContext *cat = ctx->priv;
+ unsigned seg, type, str;
+
+ /* create input pads */
+ for (seg = 0; seg < cat->nb_segments; seg++) {
+ for (type = 0; type < TYPE_ALL; type++) {
+ for (str = 0; str < cat->nb_streams[type]; str++) {
+ AVFilterPad pad = {
+ .type = type,
+ .get_video_buffer = get_video_buffer,
+ .get_audio_buffer = get_audio_buffer,
+ .filter_frame = filter_frame,
+ };
+ pad.name = av_asprintf("in%d:%c%d", seg, "va"[type], str);
+ ff_insert_inpad(ctx, ctx->nb_inputs, &pad);
+ }
+ }
+ }
+ /* create output pads */
+ for (type = 0; type < TYPE_ALL; type++) {
+ for (str = 0; str < cat->nb_streams[type]; str++) {
+ AVFilterPad pad = {
+ .type = type,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ };
+ pad.name = av_asprintf("out:%c%d", "va"[type], str);
+ ff_insert_outpad(ctx, ctx->nb_outputs, &pad);
+ }
+ }
+
+ cat->in = av_calloc(ctx->nb_inputs, sizeof(*cat->in));
+ if (!cat->in)
+ return AVERROR(ENOMEM);
+ cat->nb_in_active = ctx->nb_outputs;
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ConcatContext *cat = ctx->priv;
+ unsigned i;
+
+ for (i = 0; i < ctx->nb_inputs; i++) {
+ av_freep(&ctx->input_pads[i].name);
+ ff_bufqueue_discard_all(&cat->in[i].queue);
+ }
+ for (i = 0; i < ctx->nb_outputs; i++)
+ av_freep(&ctx->output_pads[i].name);
+ av_freep(&cat->in);
+}
+
+AVFilter ff_avf_concat = {
+ .name = "concat",
+ .description = NULL_IF_CONFIG_SMALL("Concatenate audio and video streams."),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ConcatContext),
+ .inputs = NULL,
+ .outputs = NULL,
+ .priv_class = &concat_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS | AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
diff --git a/libavfilter/avf_showcqt.c b/libavfilter/avf_showcqt.c
new file mode 100644
index 0000000000..ede56f4b1c
--- /dev/null
+++ b/libavfilter/avf_showcqt.c
@@ -0,0 +1,1577 @@
+/*
+ * Copyright (c) 2014-2015 Muhammad Faiz <mfcc64@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "libavcodec/avfft.h"
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "libavutil/xga_font_data.h"
+#include "libavutil/eval.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/time.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "lavfutils.h"
+#include "lswsutils.h"
+
+#if CONFIG_LIBFREETYPE
+#include <ft2build.h>
+#include FT_FREETYPE_H
+#endif
+
+#if CONFIG_LIBFONTCONFIG
+#include <fontconfig/fontconfig.h>
+#endif
+
+#include "avf_showcqt.h"
+
+#define BASEFREQ 20.01523126408007475
+#define ENDFREQ 20495.59681441799654
+#define TLENGTH "384*tc/(384+tc*f)"
+#define TLENGTH_MIN 0.001
+#define VOLUME_MAX 100.0
+#define FONTCOLOR "st(0, (midi(f)-59.5)/12);" \
+ "st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));" \
+ "r(1-ld(1)) + b(ld(1))"
+#define CSCHEME "1|0.5|0|0|0.5|1"
+#define PTS_STEP 10
+#define PTS_TOLERANCE 1
+
+#define OFFSET(x) offsetof(ShowCQTContext, x)
+#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM)
+
+static const AVOption showcqt_options[] = {
+ { "size", "set video size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, { .str = "1920x1080" }, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, { .str = "1920x1080" }, 0, 0, FLAGS },
+ { "fps", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 1, 1000, FLAGS },
+ { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 1, 1000, FLAGS },
+ { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 1, 1000, FLAGS },
+ { "bar_h", "set bargraph height", OFFSET(bar_h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
+ { "axis_h", "set axis height", OFFSET(axis_h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
+ { "sono_h", "set sonogram height", OFFSET(sono_h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
+ { "fullhd", "set fullhd size", OFFSET(fullhd), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
+ { "sono_v", "set sonogram volume", OFFSET(sono_v), AV_OPT_TYPE_STRING, { .str = "16" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "volume", "set sonogram volume", OFFSET(sono_v), AV_OPT_TYPE_STRING, { .str = "16" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "bar_v", "set bargraph volume", OFFSET(bar_v), AV_OPT_TYPE_STRING, { .str = "sono_v" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "volume2", "set bargraph volume", OFFSET(bar_v), AV_OPT_TYPE_STRING, { .str = "sono_v" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "sono_g", "set sonogram gamma", OFFSET(sono_g), AV_OPT_TYPE_FLOAT, { .dbl = 3.0 }, 1.0, 7.0, FLAGS },
+ { "gamma", "set sonogram gamma", OFFSET(sono_g), AV_OPT_TYPE_FLOAT, { .dbl = 3.0 }, 1.0, 7.0, FLAGS },
+ { "bar_g", "set bargraph gamma", OFFSET(bar_g), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 1.0, 7.0, FLAGS },
+ { "gamma2", "set bargraph gamma", OFFSET(bar_g), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 1.0, 7.0, FLAGS },
+ { "bar_t", "set bar transparency", OFFSET(bar_t), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 0.0, 1.0, FLAGS },
+ { "timeclamp", "set timeclamp", OFFSET(timeclamp), AV_OPT_TYPE_DOUBLE, { .dbl = 0.17 }, 0.002, 1.0, FLAGS },
+ { "tc", "set timeclamp", OFFSET(timeclamp), AV_OPT_TYPE_DOUBLE, { .dbl = 0.17 }, 0.002, 1.0, FLAGS },
+ { "basefreq", "set base frequency", OFFSET(basefreq), AV_OPT_TYPE_DOUBLE, { .dbl = BASEFREQ }, 10.0, 100000.0, FLAGS },
+ { "endfreq", "set end frequency", OFFSET(endfreq), AV_OPT_TYPE_DOUBLE, { .dbl = ENDFREQ }, 10.0, 100000.0, FLAGS },
+ { "coeffclamp", "set coeffclamp", OFFSET(coeffclamp), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 0.1, 10.0, FLAGS },
+ { "tlength", "set tlength", OFFSET(tlength), AV_OPT_TYPE_STRING, { .str = TLENGTH }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "count", "set transform count", OFFSET(count), AV_OPT_TYPE_INT, { .i64 = 6 }, 1, 30, FLAGS },
+ { "fcount", "set frequency count", OFFSET(fcount), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 10, FLAGS },
+ { "fontfile", "set axis font file", OFFSET(fontfile), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "font", "set axis font", OFFSET(font), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "fontcolor", "set font color", OFFSET(fontcolor), AV_OPT_TYPE_STRING, { .str = FONTCOLOR }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "axisfile", "set axis image", OFFSET(axisfile), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "axis", "draw axis", OFFSET(axis), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
+ { "text", "draw axis", OFFSET(axis), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
+ { "csp", "set color space", OFFSET(csp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED }, 0, INT_MAX, FLAGS, "csp" },
+ { "unspecified", "unspecified", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_UNSPECIFIED }, 0, 0, FLAGS, "csp" },
+ { "bt709", "bt709", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_BT709 }, 0, 0, FLAGS, "csp" },
+ { "fcc", "fcc", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_FCC }, 0, 0, FLAGS, "csp" },
+ { "bt470bg", "bt470bg", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_BT470BG }, 0, 0, FLAGS, "csp" },
+ { "smpte170m", "smpte170m", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_SMPTE170M }, 0, 0, FLAGS, "csp" },
+ { "smpte240m", "smpte240m", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_SMPTE240M }, 0, 0, FLAGS, "csp" },
+ { "bt2020ncl", "bt2020ncl", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_BT2020_NCL }, 0, 0, FLAGS, "csp" },
+ { "cscheme", "set color scheme", OFFSET(cscheme), AV_OPT_TYPE_STRING, { .str = CSCHEME }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(showcqt);
+
+static void common_uninit(ShowCQTContext *s)
+{
+ int k;
+ int level = AV_LOG_DEBUG;
+ int64_t plot_time;
+
+ if (s->fft_time)
+ av_log(s->ctx, level, "fft_time = %16.3f s.\n", s->fft_time * 1e-6);
+ if (s->cqt_time)
+ av_log(s->ctx, level, "cqt_time = %16.3f s.\n", s->cqt_time * 1e-6);
+ if (s->process_cqt_time)
+ av_log(s->ctx, level, "process_cqt_time = %16.3f s.\n", s->process_cqt_time * 1e-6);
+ if (s->update_sono_time)
+ av_log(s->ctx, level, "update_sono_time = %16.3f s.\n", s->update_sono_time * 1e-6);
+ if (s->alloc_time)
+ av_log(s->ctx, level, "alloc_time = %16.3f s.\n", s->alloc_time * 1e-6);
+ if (s->bar_time)
+ av_log(s->ctx, level, "bar_time = %16.3f s.\n", s->bar_time * 1e-6);
+ if (s->axis_time)
+ av_log(s->ctx, level, "axis_time = %16.3f s.\n", s->axis_time * 1e-6);
+ if (s->sono_time)
+ av_log(s->ctx, level, "sono_time = %16.3f s.\n", s->sono_time * 1e-6);
+
+ plot_time = s->fft_time + s->cqt_time + s->process_cqt_time + s->update_sono_time
+ + s->alloc_time + s->bar_time + s->axis_time + s->sono_time;
+ if (plot_time)
+ av_log(s->ctx, level, "plot_time = %16.3f s.\n", plot_time * 1e-6);
+
+ s->fft_time = s->cqt_time = s->process_cqt_time = s->update_sono_time
+ = s->alloc_time = s->bar_time = s->axis_time = s->sono_time = 0;
+ /* axis_frame may be non reference counted frame */
+ if (s->axis_frame && !s->axis_frame->buf[0]) {
+ av_freep(s->axis_frame->data);
+ for (k = 0; k < 4; k++)
+ s->axis_frame->data[k] = NULL;
+ }
+
+ av_frame_free(&s->axis_frame);
+ av_frame_free(&s->sono_frame);
+ av_fft_end(s->fft_ctx);
+ s->fft_ctx = NULL;
+ if (s->coeffs)
+ for (k = 0; k < s->cqt_len; k++)
+ av_freep(&s->coeffs[k].val);
+ av_freep(&s->coeffs);
+ av_freep(&s->fft_data);
+ av_freep(&s->fft_result);
+ av_freep(&s->cqt_result);
+ av_freep(&s->c_buf);
+ av_freep(&s->h_buf);
+ av_freep(&s->rcp_h_buf);
+ av_freep(&s->freq);
+ av_freep(&s->sono_v_buf);
+ av_freep(&s->bar_v_buf);
+}
+
+static double *create_freq_table(double base, double end, int n)
+{
+ double log_base, log_end;
+ double rcp_n = 1.0 / n;
+ double *freq;
+ int x;
+
+ freq = av_malloc_array(n, sizeof(*freq));
+ if (!freq)
+ return NULL;
+
+ log_base = log(base);
+ log_end = log(end);
+ for (x = 0; x < n; x++) {
+ double log_freq = log_base + (x + 0.5) * (log_end - log_base) * rcp_n;
+ freq[x] = exp(log_freq);
+ }
+ return freq;
+}
+
+static double clip_with_log(void *log_ctx, const char *name,
+ double val, double min, double max,
+ double nan_replace, int idx)
+{
+ int level = AV_LOG_WARNING;
+ if (isnan(val)) {
+ av_log(log_ctx, level, "[%d] %s is nan, setting it to %g.\n",
+ idx, name, nan_replace);
+ val = nan_replace;
+ } else if (val < min) {
+ av_log(log_ctx, level, "[%d] %s is too low (%g), setting it to %g.\n",
+ idx, name, val, min);
+ val = min;
+ } else if (val > max) {
+ av_log(log_ctx, level, "[%d] %s it too high (%g), setting it to %g.\n",
+ idx, name, val, max);
+ val = max;
+ }
+ return val;
+}
+
+static double a_weighting(void *p, double f)
+{
+ double ret = 12200.0*12200.0 * (f*f*f*f);
+ ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0) *
+ sqrt((f*f + 107.7*107.7) * (f*f + 737.9*737.9));
+ return ret;
+}
+
+static double b_weighting(void *p, double f)
+{
+ double ret = 12200.0*12200.0 * (f*f*f);
+ ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0) * sqrt(f*f + 158.5*158.5);
+ return ret;
+}
+
+static double c_weighting(void *p, double f)
+{
+ double ret = 12200.0*12200.0 * (f*f);
+ ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0);
+ return ret;
+}
+
+static int init_volume(ShowCQTContext *s)
+{
+ const char *func_names[] = { "a_weighting", "b_weighting", "c_weighting", NULL };
+ const char *sono_names[] = { "timeclamp", "tc", "frequency", "freq", "f", "bar_v", NULL };
+ const char *bar_names[] = { "timeclamp", "tc", "frequency", "freq", "f", "sono_v", NULL };
+ double (*funcs[])(void *, double) = { a_weighting, b_weighting, c_weighting };
+ AVExpr *sono = NULL, *bar = NULL;
+ int x, ret = AVERROR(ENOMEM);
+
+ s->sono_v_buf = av_malloc_array(s->cqt_len, sizeof(*s->sono_v_buf));
+ s->bar_v_buf = av_malloc_array(s->cqt_len, sizeof(*s->bar_v_buf));
+ if (!s->sono_v_buf || !s->bar_v_buf)
+ goto error;
+
+ if ((ret = av_expr_parse(&sono, s->sono_v, sono_names, func_names, funcs, NULL, NULL, 0, s->ctx)) < 0)
+ goto error;
+
+ if ((ret = av_expr_parse(&bar, s->bar_v, bar_names, func_names, funcs, NULL, NULL, 0, s->ctx)) < 0)
+ goto error;
+
+ for (x = 0; x < s->cqt_len; x++) {
+ double vars[] = { s->timeclamp, s->timeclamp, s->freq[x], s->freq[x], s->freq[x], 0.0 };
+ double vol = clip_with_log(s->ctx, "sono_v", av_expr_eval(sono, vars, NULL), 0.0, VOLUME_MAX, 0.0, x);
+ vars[5] = vol;
+ vol = clip_with_log(s->ctx, "bar_v", av_expr_eval(bar, vars, NULL), 0.0, VOLUME_MAX, 0.0, x);
+ s->bar_v_buf[x] = vol * vol;
+ vars[5] = vol;
+ vol = clip_with_log(s->ctx, "sono_v", av_expr_eval(sono, vars, NULL), 0.0, VOLUME_MAX, 0.0, x);
+ s->sono_v_buf[x] = vol * vol;
+ }
+ av_expr_free(sono);
+ av_expr_free(bar);
+ return 0;
+
+error:
+ av_freep(&s->sono_v_buf);
+ av_freep(&s->bar_v_buf);
+ av_expr_free(sono);
+ av_expr_free(bar);
+ return ret;
+}
+
+static void cqt_calc(FFTComplex *dst, const FFTComplex *src, const Coeffs *coeffs,
+ int len, int fft_len)
+{
+ int k, x, i, j;
+ for (k = 0; k < len; k++) {
+ FFTComplex l, r, a = {0,0}, b = {0,0};
+
+ for (x = 0; x < coeffs[k].len; x++) {
+ FFTSample u = coeffs[k].val[x];
+ i = coeffs[k].start + x;
+ j = fft_len - i;
+ a.re += u * src[i].re;
+ a.im += u * src[i].im;
+ b.re += u * src[j].re;
+ b.im += u * src[j].im;
+ }
+
+ /* separate left and right, (and multiply by 2.0) */
+ l.re = a.re + b.re;
+ l.im = a.im - b.im;
+ r.re = b.im + a.im;
+ r.im = b.re - a.re;
+ dst[k].re = l.re * l.re + l.im * l.im;
+ dst[k].im = r.re * r.re + r.im * r.im;
+ }
+}
+
+static int init_cqt(ShowCQTContext *s)
+{
+ const char *var_names[] = { "timeclamp", "tc", "frequency", "freq", "f", NULL };
+ AVExpr *expr = NULL;
+ int rate = s->ctx->inputs[0]->sample_rate;
+ int nb_cqt_coeffs = 0;
+ int k, x, ret;
+
+ if ((ret = av_expr_parse(&expr, s->tlength, var_names, NULL, NULL, NULL, NULL, 0, s->ctx)) < 0)
+ goto error;
+
+ ret = AVERROR(ENOMEM);
+ if (!(s->coeffs = av_calloc(s->cqt_len, sizeof(*s->coeffs))))
+ goto error;
+
+ for (k = 0; k < s->cqt_len; k++) {
+ double vars[] = { s->timeclamp, s->timeclamp, s->freq[k], s->freq[k], s->freq[k] };
+ double flen, center, tlength;
+ int start, end, m = k;
+
+ if (s->freq[k] > 0.5 * rate)
+ continue;
+ tlength = clip_with_log(s->ctx, "tlength", av_expr_eval(expr, vars, NULL),
+ TLENGTH_MIN, s->timeclamp, s->timeclamp, k);
+
+ flen = 8.0 * s->fft_len / (tlength * rate);
+ center = s->freq[k] * s->fft_len / rate;
+ start = FFMAX(0, ceil(center - 0.5 * flen));
+ end = FFMIN(s->fft_len, floor(center + 0.5 * flen));
+
+ s->coeffs[m].start = start & ~(s->cqt_align - 1);
+ s->coeffs[m].len = (end | (s->cqt_align - 1)) + 1 - s->coeffs[m].start;
+ nb_cqt_coeffs += s->coeffs[m].len;
+ if (!(s->coeffs[m].val = av_calloc(s->coeffs[m].len, sizeof(*s->coeffs[m].val))))
+ goto error;
+
+ for (x = start; x <= end; x++) {
+ int sign = (x & 1) ? (-1) : 1;
+ double y = 2.0 * M_PI * (x - center) * (1.0 / flen);
+ /* nuttall window */
+ double w = 0.355768 + 0.487396 * cos(y) + 0.144232 * cos(2*y) + 0.012604 * cos(3*y);
+ w *= sign * (1.0 / s->fft_len);
+ s->coeffs[m].val[x - s->coeffs[m].start] = w;
+ }
+
+ if (s->permute_coeffs)
+ s->permute_coeffs(s->coeffs[m].val, s->coeffs[m].len);
+ }
+
+ av_expr_free(expr);
+ av_log(s->ctx, AV_LOG_INFO, "nb_cqt_coeffs = %d.\n", nb_cqt_coeffs);
+ return 0;
+
+error:
+ av_expr_free(expr);
+ if (s->coeffs)
+ for (k = 0; k < s->cqt_len; k++)
+ av_freep(&s->coeffs[k].val);
+ av_freep(&s->coeffs);
+ return ret;
+}
+
+static AVFrame *alloc_frame_empty(enum AVPixelFormat format, int w, int h)
+{
+ AVFrame *out;
+ out = av_frame_alloc();
+ if (!out)
+ return NULL;
+ out->format = format;
+ out->width = w;
+ out->height = h;
+ if (av_frame_get_buffer(out, 32) < 0) {
+ av_frame_free(&out);
+ return NULL;
+ }
+ if (format == AV_PIX_FMT_RGB24 || format == AV_PIX_FMT_RGBA) {
+ memset(out->data[0], 0, out->linesize[0] * h);
+ } else {
+ int hh = (format == AV_PIX_FMT_YUV420P || format == AV_PIX_FMT_YUVA420P) ? h / 2 : h;
+ memset(out->data[0], 16, out->linesize[0] * h);
+ memset(out->data[1], 128, out->linesize[1] * hh);
+ memset(out->data[2], 128, out->linesize[2] * hh);
+ if (out->data[3])
+ memset(out->data[3], 0, out->linesize[3] * h);
+ }
+ return out;
+}
+
+static enum AVPixelFormat convert_axis_pixel_format(enum AVPixelFormat format)
+{
+ switch (format) {
+ case AV_PIX_FMT_RGB24: format = AV_PIX_FMT_RGBA; break;
+ case AV_PIX_FMT_YUV444P:
+ case AV_PIX_FMT_YUV422P:
+ case AV_PIX_FMT_YUV420P: format = AV_PIX_FMT_YUVA444P; break;
+ }
+ return format;
+}
+
+static int init_axis_empty(ShowCQTContext *s)
+{
+ if (!(s->axis_frame = alloc_frame_empty(convert_axis_pixel_format(s->format), s->width, s->axis_h)))
+ return AVERROR(ENOMEM);
+ return 0;
+}
+
+static int init_axis_from_file(ShowCQTContext *s)
+{
+ uint8_t *tmp_data[4] = { NULL };
+ int tmp_linesize[4];
+ enum AVPixelFormat tmp_format;
+ int tmp_w, tmp_h, ret;
+
+ if ((ret = ff_load_image(tmp_data, tmp_linesize, &tmp_w, &tmp_h, &tmp_format,
+ s->axisfile, s->ctx)) < 0)
+ goto error;
+
+ ret = AVERROR(ENOMEM);
+ if (!(s->axis_frame = av_frame_alloc()))
+ goto error;
+
+ if ((ret = ff_scale_image(s->axis_frame->data, s->axis_frame->linesize, s->width, s->axis_h,
+ convert_axis_pixel_format(s->format), tmp_data, tmp_linesize, tmp_w, tmp_h,
+ tmp_format, s->ctx)) < 0)
+ goto error;
+
+ s->axis_frame->width = s->width;
+ s->axis_frame->height = s->axis_h;
+ s->axis_frame->format = convert_axis_pixel_format(s->format);
+ av_freep(tmp_data);
+ return 0;
+
+error:
+ av_frame_free(&s->axis_frame);
+ av_freep(tmp_data);
+ return ret;
+}
+
+static double midi(void *p, double f)
+{
+ return log2(f/440.0) * 12.0 + 69.0;
+}
+
+static double r_func(void *p, double x)
+{
+ x = av_clipd(x, 0.0, 1.0);
+ return lrint(x*255.0) << 16;
+}
+
+static double g_func(void *p, double x)
+{
+ x = av_clipd(x, 0.0, 1.0);
+ return lrint(x*255.0) << 8;
+}
+
+static double b_func(void *p, double x)
+{
+ x = av_clipd(x, 0.0, 1.0);
+ return lrint(x*255.0);
+}
+
+static int init_axis_color(ShowCQTContext *s, AVFrame *tmp, int half)
+{
+ const char *var_names[] = { "timeclamp", "tc", "frequency", "freq", "f", NULL };
+ const char *func_names[] = { "midi", "r", "g", "b", NULL };
+ double (*funcs[])(void *, double) = { midi, r_func, g_func, b_func };
+ AVExpr *expr = NULL;
+ double *freq = NULL;
+ int x, xs, y, ret;
+ int width = half ? 1920/2 : 1920, height = half ? 16 : 32;
+ int step = half ? 2 : 1;
+
+ if (s->basefreq != (double) BASEFREQ || s->endfreq != (double) ENDFREQ) {
+ av_log(s->ctx, AV_LOG_WARNING, "font axis rendering is not implemented in non-default frequency range,"
+ " please use axisfile option instead.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (s->cqt_len == 1920)
+ freq = s->freq;
+ else if (!(freq = create_freq_table(s->basefreq, s->endfreq, 1920)))
+ return AVERROR(ENOMEM);
+
+ if ((ret = av_expr_parse(&expr, s->fontcolor, var_names, func_names, funcs, NULL, NULL, 0, s->ctx)) < 0) {
+ if (freq != s->freq)
+ av_freep(&freq);
+ return ret;
+ }
+
+ for (x = 0, xs = 0; x < width; x++, xs += step) {
+ double vars[] = { s->timeclamp, s->timeclamp, freq[xs], freq[xs], freq[xs] };
+ int color = (int) av_expr_eval(expr, vars, NULL);
+ uint8_t r = (color >> 16) & 0xFF, g = (color >> 8) & 0xFF, b = color & 0xFF;
+ uint8_t *data = tmp->data[0];
+ int linesize = tmp->linesize[0];
+ for (y = 0; y < height; y++) {
+ data[linesize * y + 4 * x] = r;
+ data[linesize * y + 4 * x + 1] = g;
+ data[linesize * y + 4 * x + 2] = b;
+ }
+ }
+
+ av_expr_free(expr);
+ if (freq != s->freq)
+ av_freep(&freq);
+ return 0;
+}
+
+static int render_freetype(ShowCQTContext *s, AVFrame *tmp, char *fontfile)
+{
+#if CONFIG_LIBFREETYPE
+ const char *str = "EF G A BC D ";
+ uint8_t *data = tmp->data[0];
+ int linesize = tmp->linesize[0];
+ FT_Library lib = NULL;
+ FT_Face face = NULL;
+ int font_width = 16, font_height = 32;
+ int font_repeat = font_width * 12;
+ int linear_hori_advance = font_width * 65536;
+ int non_monospace_warning = 0;
+ int x;
+
+ if (!fontfile)
+ return AVERROR(EINVAL);
+
+ if (FT_Init_FreeType(&lib))
+ goto fail;
+
+ if (FT_New_Face(lib, fontfile, 0, &face))
+ goto fail;
+
+ if (FT_Set_Char_Size(face, 16*64, 0, 0, 0))
+ goto fail;
+
+ if (FT_Load_Char(face, 'A', FT_LOAD_RENDER))
+ goto fail;
+
+ if (FT_Set_Char_Size(face, 16*64 * linear_hori_advance / face->glyph->linearHoriAdvance, 0, 0, 0))
+ goto fail;
+
+ for (x = 0; x < 12; x++) {
+ int sx, sy, rx, bx, by, dx, dy;
+
+ if (str[x] == ' ')
+ continue;
+
+ if (FT_Load_Char(face, str[x], FT_LOAD_RENDER))
+ goto fail;
+
+ if (face->glyph->advance.x != font_width*64 && !non_monospace_warning) {
+ av_log(s->ctx, AV_LOG_WARNING, "font is not monospace.\n");
+ non_monospace_warning = 1;
+ }
+
+ sy = font_height - 8 - face->glyph->bitmap_top;
+ for (rx = 0; rx < 10; rx++) {
+ sx = rx * font_repeat + x * font_width + face->glyph->bitmap_left;
+ for (by = 0; by < face->glyph->bitmap.rows; by++) {
+ dy = by + sy;
+ if (dy < 0)
+ continue;
+ if (dy >= font_height)
+ break;
+
+ for (bx = 0; bx < face->glyph->bitmap.width; bx++) {
+ dx = bx + sx;
+ if (dx < 0)
+ continue;
+ if (dx >= 1920)
+ break;
+ data[dy*linesize+4*dx+3] = face->glyph->bitmap.buffer[by*face->glyph->bitmap.width+bx];
+ }
+ }
+ }
+ }
+
+ FT_Done_Face(face);
+ FT_Done_FreeType(lib);
+ return 0;
+
+fail:
+ av_log(s->ctx, AV_LOG_WARNING, "error while loading freetype font.\n");
+ FT_Done_Face(face);
+ FT_Done_FreeType(lib);
+ return AVERROR(EINVAL);
+#else
+ if (fontfile)
+ av_log(s->ctx, AV_LOG_WARNING, "freetype is not available, ignoring fontfile option.\n");
+ return AVERROR(EINVAL);
+#endif
+}
+
+static int render_fontconfig(ShowCQTContext *s, AVFrame *tmp, char* font)
+{
+#if CONFIG_LIBFONTCONFIG
+ FcConfig *fontconfig;
+ FcPattern *pat, *best;
+ FcResult result = FcResultMatch;
+ char *filename;
+ int i, ret;
+
+ if (!font)
+ return AVERROR(EINVAL);
+
+ for (i = 0; font[i]; i++) {
+ if (font[i] == '|')
+ font[i] = ':';
+ }
+
+ if (!(fontconfig = FcInitLoadConfigAndFonts())) {
+ av_log(s->ctx, AV_LOG_ERROR, "impossible to init fontconfig.\n");
+ return AVERROR_UNKNOWN;
+ }
+
+ if (!(pat = FcNameParse((uint8_t *)font))) {
+ av_log(s->ctx, AV_LOG_ERROR, "could not parse fontconfig pat.\n");
+ FcConfigDestroy(fontconfig);
+ return AVERROR(EINVAL);
+ }
+
+ FcDefaultSubstitute(pat);
+
+ if (!FcConfigSubstitute(fontconfig, pat, FcMatchPattern)) {
+ av_log(s->ctx, AV_LOG_ERROR, "could not substitue fontconfig options.\n");
+ FcPatternDestroy(pat);
+ FcConfigDestroy(fontconfig);
+ return AVERROR(ENOMEM);
+ }
+
+ best = FcFontMatch(fontconfig, pat, &result);
+ FcPatternDestroy(pat);
+
+ ret = AVERROR(EINVAL);
+ if (!best || result != FcResultMatch) {
+ av_log(s->ctx, AV_LOG_ERROR, "cannot find a valid font for %s.\n", font);
+ goto fail;
+ }
+
+ if (FcPatternGetString(best, FC_FILE, 0, (FcChar8 **)&filename) != FcResultMatch) {
+ av_log(s->ctx, AV_LOG_ERROR, "no file path for %s\n", font);
+ goto fail;
+ }
+
+ ret = render_freetype(s, tmp, filename);
+
+fail:
+ FcPatternDestroy(best);
+ FcConfigDestroy(fontconfig);
+ return ret;
+#else
+ if (font)
+ av_log(s->ctx, AV_LOG_WARNING, "fontconfig is not available, ignoring font option.\n");
+ return AVERROR(EINVAL);
+#endif
+}
+
+static int render_default_font(AVFrame *tmp)
+{
+ const char *str = "EF G A BC D ";
+ int x, u, v, mask;
+ uint8_t *data = tmp->data[0];
+ int linesize = tmp->linesize[0];
+ int width = 1920/2, height = 16;
+
+ for (x = 0; x < width; x += width/10) {
+ uint8_t *startptr = data + 4 * x;
+ for (u = 0; u < 12; u++) {
+ for (v = 0; v < height; v++) {
+ uint8_t *p = startptr + v * linesize + height/2 * 4 * u;
+ for (mask = 0x80; mask; mask >>= 1, p += 4) {
+ if (mask & avpriv_vga16_font[str[u] * 16 + v])
+ p[3] = 255;
+ else
+ p[3] = 0;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int init_axis_from_font(ShowCQTContext *s)
+{
+ AVFrame *tmp = NULL;
+ int ret = AVERROR(ENOMEM);
+ int width = 1920, height = 32;
+ int default_font = 0;
+
+ if (!(tmp = alloc_frame_empty(AV_PIX_FMT_RGBA, width, height)))
+ goto fail;
+
+ if (!(s->axis_frame = av_frame_alloc()))
+ goto fail;
+
+ if (render_freetype(s, tmp, s->fontfile) < 0 &&
+ render_fontconfig(s, tmp, s->font) < 0 &&
+ (default_font = 1, ret = render_default_font(tmp)) < 0)
+ goto fail;
+
+ if (default_font)
+ width /= 2, height /= 2;
+
+ if ((ret = init_axis_color(s, tmp, default_font)) < 0)
+ goto fail;
+
+ if ((ret = ff_scale_image(s->axis_frame->data, s->axis_frame->linesize, s->width, s->axis_h,
+ convert_axis_pixel_format(s->format), tmp->data, tmp->linesize,
+ width, height, AV_PIX_FMT_RGBA, s->ctx)) < 0)
+ goto fail;
+
+ av_frame_free(&tmp);
+ s->axis_frame->width = s->width;
+ s->axis_frame->height = s->axis_h;
+ s->axis_frame->format = convert_axis_pixel_format(s->format);
+ return 0;
+
+fail:
+ av_frame_free(&tmp);
+ av_frame_free(&s->axis_frame);
+ return ret;
+}
+
+static float calculate_gamma(float v, float g)
+{
+ if (g == 1.0f)
+ return v;
+ if (g == 2.0f)
+ return sqrtf(v);
+ if (g == 3.0f)
+ return cbrtf(v);
+ if (g == 4.0f)
+ return sqrtf(sqrtf(v));
+ return expf(logf(v) / g);
+}
+
+static void rgb_from_cqt(ColorFloat *c, const FFTComplex *v, float g, int len, float cscheme[6])
+{
+ int x;
+ for (x = 0; x < len; x++) {
+ c[x].rgb.r = 255.0f * calculate_gamma(FFMIN(1.0f, cscheme[0] * v[x].re + cscheme[3] * v[x].im), g);
+ c[x].rgb.g = 255.0f * calculate_gamma(FFMIN(1.0f, cscheme[1] * v[x].re + cscheme[4] * v[x].im), g);
+ c[x].rgb.b = 255.0f * calculate_gamma(FFMIN(1.0f, cscheme[2] * v[x].re + cscheme[5] * v[x].im), g);
+ }
+}
+
+static void yuv_from_cqt(ColorFloat *c, const FFTComplex *v, float gamma, int len, float cm[3][3], float cscheme[6])
+{
+ int x;
+ for (x = 0; x < len; x++) {
+ float r, g, b;
+ r = calculate_gamma(FFMIN(1.0f, cscheme[0] * v[x].re + cscheme[3] * v[x].im), gamma);
+ g = calculate_gamma(FFMIN(1.0f, cscheme[1] * v[x].re + cscheme[4] * v[x].im), gamma);
+ b = calculate_gamma(FFMIN(1.0f, cscheme[2] * v[x].re + cscheme[5] * v[x].im), gamma);
+ c[x].yuv.y = cm[0][0] * r + cm[0][1] * g + cm[0][2] * b;
+ c[x].yuv.u = cm[1][0] * r + cm[1][1] * g + cm[1][2] * b;
+ c[x].yuv.v = cm[2][0] * r + cm[2][1] * g + cm[2][2] * b;
+ }
+}
+
+static void draw_bar_rgb(AVFrame *out, const float *h, const float *rcp_h,
+ const ColorFloat *c, int bar_h, float bar_t)
+{
+ int x, y, w = out->width;
+ float mul, ht, rcp_bar_h = 1.0f / bar_h, rcp_bar_t = 1.0f / bar_t;
+ uint8_t *v = out->data[0], *lp;
+ int ls = out->linesize[0];
+
+ for (y = 0; y < bar_h; y++) {
+ ht = (bar_h - y) * rcp_bar_h;
+ lp = v + y * ls;
+ for (x = 0; x < w; x++) {
+ if (h[x] <= ht) {
+ *lp++ = 0;
+ *lp++ = 0;
+ *lp++ = 0;
+ } else {
+ mul = (h[x] - ht) * rcp_h[x];
+ mul = (mul < bar_t) ? (mul * rcp_bar_t) : 1.0f;
+ *lp++ = lrintf(mul * c[x].rgb.r);
+ *lp++ = lrintf(mul * c[x].rgb.g);
+ *lp++ = lrintf(mul * c[x].rgb.b);
+ }
+ }
+ }
+}
+
+#define DRAW_BAR_WITH_CHROMA(x) \
+do { \
+ if (h[x] <= ht) { \
+ *lpy++ = 16; \
+ *lpu++ = 128; \
+ *lpv++ = 128; \
+ } else { \
+ mul = (h[x] - ht) * rcp_h[x]; \
+ mul = (mul < bar_t) ? (mul * rcp_bar_t) : 1.0f; \
+ *lpy++ = lrintf(mul * c[x].yuv.y + 16.0f); \
+ *lpu++ = lrintf(mul * c[x].yuv.u + 128.0f); \
+ *lpv++ = lrintf(mul * c[x].yuv.v + 128.0f); \
+ } \
+} while (0)
+
+#define DRAW_BAR_WITHOUT_CHROMA(x) \
+do { \
+ if (h[x] <= ht) { \
+ *lpy++ = 16; \
+ } else { \
+ mul = (h[x] - ht) * rcp_h[x]; \
+ mul = (mul < bar_t) ? (mul * rcp_bar_t) : 1.0f; \
+ *lpy++ = lrintf(mul * c[x].yuv.y + 16.0f); \
+ } \
+} while (0)
+
+static void draw_bar_yuv(AVFrame *out, const float *h, const float *rcp_h,
+ const ColorFloat *c, int bar_h, float bar_t)
+{
+ int x, y, yh, w = out->width;
+ float mul, ht, rcp_bar_h = 1.0f / bar_h, rcp_bar_t = 1.0f / bar_t;
+ uint8_t *vy = out->data[0], *vu = out->data[1], *vv = out->data[2];
+ uint8_t *lpy, *lpu, *lpv;
+ int lsy = out->linesize[0], lsu = out->linesize[1], lsv = out->linesize[2];
+ int fmt = out->format;
+
+ for (y = 0; y < bar_h; y += 2) {
+ yh = (fmt == AV_PIX_FMT_YUV420P) ? y / 2 : y;
+ ht = (bar_h - y) * rcp_bar_h;
+ lpy = vy + y * lsy;
+ lpu = vu + yh * lsu;
+ lpv = vv + yh * lsv;
+ if (fmt == AV_PIX_FMT_YUV444P) {
+ for (x = 0; x < w; x += 2) {
+ DRAW_BAR_WITH_CHROMA(x);
+ DRAW_BAR_WITH_CHROMA(x+1);
+ }
+ } else {
+ for (x = 0; x < w; x += 2) {
+ DRAW_BAR_WITH_CHROMA(x);
+ DRAW_BAR_WITHOUT_CHROMA(x+1);
+ }
+ }
+
+ ht = (bar_h - (y+1)) * rcp_bar_h;
+ lpy = vy + (y+1) * lsy;
+ lpu = vu + (y+1) * lsu;
+ lpv = vv + (y+1) * lsv;
+ if (fmt == AV_PIX_FMT_YUV444P) {
+ for (x = 0; x < w; x += 2) {
+ DRAW_BAR_WITH_CHROMA(x);
+ DRAW_BAR_WITH_CHROMA(x+1);
+ }
+ } else if (fmt == AV_PIX_FMT_YUV422P) {
+ for (x = 0; x < w; x += 2) {
+ DRAW_BAR_WITH_CHROMA(x);
+ DRAW_BAR_WITHOUT_CHROMA(x+1);
+ }
+ } else {
+ for (x = 0; x < w; x += 2) {
+ DRAW_BAR_WITHOUT_CHROMA(x);
+ DRAW_BAR_WITHOUT_CHROMA(x+1);
+ }
+ }
+ }
+}
+
+static void draw_axis_rgb(AVFrame *out, AVFrame *axis, const ColorFloat *c, int off)
+{
+ int x, y, w = axis->width, h = axis->height;
+ float a, rcp_255 = 1.0f / 255.0f;
+ uint8_t *lp, *lpa;
+
+ for (y = 0; y < h; y++) {
+ lp = out->data[0] + (off + y) * out->linesize[0];
+ lpa = axis->data[0] + y * axis->linesize[0];
+ for (x = 0; x < w; x++) {
+ if (!lpa[3]) {
+ *lp++ = lrintf(c[x].rgb.r);
+ *lp++ = lrintf(c[x].rgb.g);
+ *lp++ = lrintf(c[x].rgb.b);
+ } else if (lpa[3] == 255) {
+ *lp++ = lpa[0];
+ *lp++ = lpa[1];
+ *lp++ = lpa[2];
+ } else {
+ a = rcp_255 * lpa[3];
+ *lp++ = lrintf(a * lpa[0] + (1.0f - a) * c[x].rgb.r);
+ *lp++ = lrintf(a * lpa[1] + (1.0f - a) * c[x].rgb.g);
+ *lp++ = lrintf(a * lpa[2] + (1.0f - a) * c[x].rgb.b);
+ }
+ lpa += 4;
+ }
+ }
+}
+
+#define BLEND_WITH_CHROMA(c) \
+do { \
+ if (!*lpaa) { \
+ *lpy = lrintf(c.yuv.y + 16.0f); \
+ *lpu = lrintf(c.yuv.u + 128.0f); \
+ *lpv = lrintf(c.yuv.v + 128.0f); \
+ } else if (255 == *lpaa) { \
+ *lpy = *lpay; \
+ *lpu = *lpau; \
+ *lpv = *lpav; \
+ } else { \
+ float a = (1.0f/255.0f) * (*lpaa); \
+ *lpy = lrintf(a * (*lpay) + (1.0f - a) * (c.yuv.y + 16.0f)); \
+ *lpu = lrintf(a * (*lpau) + (1.0f - a) * (c.yuv.u + 128.0f)); \
+ *lpv = lrintf(a * (*lpav) + (1.0f - a) * (c.yuv.v + 128.0f)); \
+ } \
+ lpy++; lpu++; lpv++; \
+ lpay++; lpau++; lpav++; lpaa++; \
+} while (0)
+
+#define BLEND_WITHOUT_CHROMA(c, alpha_inc) \
+do { \
+ if (!*lpaa) { \
+ *lpy = lrintf(c.yuv.y + 16.0f); \
+ } else if (255 == *lpaa) { \
+ *lpy = *lpay; \
+ } else { \
+ float a = (1.0f/255.0f) * (*lpaa); \
+ *lpy = lrintf(a * (*lpay) + (1.0f - a) * (c.yuv.y + 16.0f)); \
+ } \
+ lpy++; \
+ lpay++; lpaa += alpha_inc; \
+} while (0)
+
+#define BLEND_CHROMA2(c) \
+do { \
+ if (!lpaa[0] && !lpaa[1]) { \
+ *lpu = lrintf(c.yuv.u + 128.0f); \
+ *lpv = lrintf(c.yuv.v + 128.0f); \
+ } else if (255 == lpaa[0] && 255 == lpaa[1]) { \
+ *lpu = *lpau; *lpv = *lpav; \
+ } else { \
+ float a0 = (0.5f/255.0f) * lpaa[0]; \
+ float a1 = (0.5f/255.0f) * lpaa[1]; \
+ float b = 1.0f - a0 - a1; \
+ *lpu = lrintf(a0 * lpau[0] + a1 * lpau[1] + b * (c.yuv.u + 128.0f)); \
+ *lpv = lrintf(a0 * lpav[0] + a1 * lpav[1] + b * (c.yuv.v + 128.0f)); \
+ } \
+ lpau += 2; lpav += 2; lpaa++; lpu++; lpv++; \
+} while (0)
+
+#define BLEND_CHROMA2x2(c) \
+do { \
+ if (!lpaa[0] && !lpaa[1] && !lpaa[lsaa] && !lpaa[lsaa+1]) { \
+ *lpu = lrintf(c.yuv.u + 128.0f); \
+ *lpv = lrintf(c.yuv.v + 128.0f); \
+ } else if (255 == lpaa[0] && 255 == lpaa[1] && \
+ 255 == lpaa[lsaa] && 255 == lpaa[lsaa+1]) { \
+ *lpu = *lpau; *lpv = *lpav; \
+ } else { \
+ float a0 = (0.25f/255.0f) * lpaa[0]; \
+ float a1 = (0.25f/255.0f) * lpaa[1]; \
+ float a2 = (0.25f/255.0f) * lpaa[lsaa]; \
+ float a3 = (0.25f/255.0f) * lpaa[lsaa+1]; \
+ float b = 1.0f - a0 - a1 - a2 - a3; \
+ *lpu = lrintf(a0 * lpau[0] + a1 * lpau[1] + a2 * lpau[lsau] + a3 * lpau[lsau+1] \
+ + b * (c.yuv.u + 128.0f)); \
+ *lpv = lrintf(a0 * lpav[0] + a1 * lpav[1] + a2 * lpav[lsav] + a3 * lpav[lsav+1] \
+ + b * (c.yuv.v + 128.0f)); \
+ } \
+ lpau += 2; lpav += 2; lpaa++; lpu++; lpv++; \
+} while (0)
+
+static void draw_axis_yuv(AVFrame *out, AVFrame *axis, const ColorFloat *c, int off)
+{
+ int fmt = out->format, x, y, yh, w = axis->width, h = axis->height;
+ int offh = (fmt == AV_PIX_FMT_YUV420P) ? off / 2 : off;
+ uint8_t *vy = out->data[0], *vu = out->data[1], *vv = out->data[2];
+ uint8_t *vay = axis->data[0], *vau = axis->data[1], *vav = axis->data[2], *vaa = axis->data[3];
+ int lsy = out->linesize[0], lsu = out->linesize[1], lsv = out->linesize[2];
+ int lsay = axis->linesize[0], lsau = axis->linesize[1], lsav = axis->linesize[2], lsaa = axis->linesize[3];
+ uint8_t *lpy, *lpu, *lpv, *lpay, *lpau, *lpav, *lpaa;
+
+ for (y = 0; y < h; y += 2) {
+ yh = (fmt == AV_PIX_FMT_YUV420P) ? y / 2 : y;
+ lpy = vy + (off + y) * lsy;
+ lpu = vu + (offh + yh) * lsu;
+ lpv = vv + (offh + yh) * lsv;
+ lpay = vay + y * lsay;
+ lpau = vau + y * lsau;
+ lpav = vav + y * lsav;
+ lpaa = vaa + y * lsaa;
+ if (fmt == AV_PIX_FMT_YUV444P) {
+ for (x = 0; x < w; x += 2) {
+ BLEND_WITH_CHROMA(c[x]);
+ BLEND_WITH_CHROMA(c[x+1]);
+ }
+ } else if (fmt == AV_PIX_FMT_YUV422P) {
+ for (x = 0; x < w; x += 2) {
+ BLEND_WITHOUT_CHROMA(c[x], 0);
+ BLEND_CHROMA2(c[x]);
+ BLEND_WITHOUT_CHROMA(c[x+1], 1);
+ }
+ } else {
+ for (x = 0; x < w; x += 2) {
+ BLEND_WITHOUT_CHROMA(c[x], 0);
+ BLEND_CHROMA2x2(c[x]);
+ BLEND_WITHOUT_CHROMA(c[x+1], 1);
+ }
+ }
+
+ lpy = vy + (off + y + 1) * lsy;
+ lpu = vu + (off + y + 1) * lsu;
+ lpv = vv + (off + y + 1) * lsv;
+ lpay = vay + (y + 1) * lsay;
+ lpau = vau + (y + 1) * lsau;
+ lpav = vav + (y + 1) * lsav;
+ lpaa = vaa + (y + 1) * lsaa;
+ if (fmt == AV_PIX_FMT_YUV444P) {
+ for (x = 0; x < w; x += 2) {
+ BLEND_WITH_CHROMA(c[x]);
+ BLEND_WITH_CHROMA(c[x+1]);
+ }
+ } else if (fmt == AV_PIX_FMT_YUV422P) {
+ for (x = 0; x < w; x += 2) {
+ BLEND_WITHOUT_CHROMA(c[x], 0);
+ BLEND_CHROMA2(c[x]);
+ BLEND_WITHOUT_CHROMA(c[x+1], 1);
+ }
+ } else {
+ for (x = 0; x < w; x += 2) {
+ BLEND_WITHOUT_CHROMA(c[x], 1);
+ BLEND_WITHOUT_CHROMA(c[x+1], 1);
+ }
+ }
+ }
+}
+
+static void draw_sono(AVFrame *out, AVFrame *sono, int off, int idx)
+{
+ int fmt = out->format, h = sono->height;
+ int nb_planes = (fmt == AV_PIX_FMT_RGB24) ? 1 : 3;
+ int offh = (fmt == AV_PIX_FMT_YUV420P) ? off / 2 : off;
+ int inc = (fmt == AV_PIX_FMT_YUV420P) ? 2 : 1;
+ int ls, i, y, yh;
+
+ ls = FFMIN(out->linesize[0], sono->linesize[0]);
+ for (y = 0; y < h; y++) {
+ memcpy(out->data[0] + (off + y) * out->linesize[0],
+ sono->data[0] + (idx + y) % h * sono->linesize[0], ls);
+ }
+
+ for (i = 1; i < nb_planes; i++) {
+ ls = FFMIN(out->linesize[i], sono->linesize[i]);
+ for (y = 0; y < h; y += inc) {
+ yh = (fmt == AV_PIX_FMT_YUV420P) ? y / 2 : y;
+ memcpy(out->data[i] + (offh + yh) * out->linesize[i],
+ sono->data[i] + (idx + y) % h * sono->linesize[i], ls);
+ }
+ }
+}
+
+static void update_sono_rgb(AVFrame *sono, const ColorFloat *c, int idx)
+{
+ int x, w = sono->width;
+ uint8_t *lp = sono->data[0] + idx * sono->linesize[0];
+
+ for (x = 0; x < w; x++) {
+ *lp++ = lrintf(c[x].rgb.r);
+ *lp++ = lrintf(c[x].rgb.g);
+ *lp++ = lrintf(c[x].rgb.b);
+ }
+}
+
+static void update_sono_yuv(AVFrame *sono, const ColorFloat *c, int idx)
+{
+ int x, fmt = sono->format, w = sono->width;
+ uint8_t *lpy = sono->data[0] + idx * sono->linesize[0];
+ uint8_t *lpu = sono->data[1] + idx * sono->linesize[1];
+ uint8_t *lpv = sono->data[2] + idx * sono->linesize[2];
+
+ for (x = 0; x < w; x += 2) {
+ *lpy++ = lrintf(c[x].yuv.y + 16.0f);
+ *lpu++ = lrintf(c[x].yuv.u + 128.0f);
+ *lpv++ = lrintf(c[x].yuv.v + 128.0f);
+ *lpy++ = lrintf(c[x+1].yuv.y + 16.0f);
+ if (fmt == AV_PIX_FMT_YUV444P) {
+ *lpu++ = lrintf(c[x+1].yuv.u + 128.0f);
+ *lpv++ = lrintf(c[x+1].yuv.v + 128.0f);
+ }
+ }
+}
+
+static void process_cqt(ShowCQTContext *s)
+{
+ int x, i;
+ if (!s->sono_count) {
+ for (x = 0; x < s->cqt_len; x++) {
+ s->h_buf[x] = s->bar_v_buf[x] * 0.5f * (s->cqt_result[x].re + s->cqt_result[x].im);
+ }
+ if (s->fcount > 1) {
+ float rcp_fcount = 1.0f / s->fcount;
+ for (x = 0; x < s->width; x++) {
+ float h = 0.0f;
+ for (i = 0; i < s->fcount; i++)
+ h += s->h_buf[s->fcount * x + i];
+ s->h_buf[x] = rcp_fcount * h;
+ }
+ }
+ for (x = 0; x < s->width; x++) {
+ s->h_buf[x] = calculate_gamma(s->h_buf[x], s->bar_g);
+ s->rcp_h_buf[x] = 1.0f / (s->h_buf[x] + 0.0001f);
+ }
+ }
+
+ for (x = 0; x < s->cqt_len; x++) {
+ s->cqt_result[x].re *= s->sono_v_buf[x];
+ s->cqt_result[x].im *= s->sono_v_buf[x];
+ }
+
+ if (s->fcount > 1) {
+ float rcp_fcount = 1.0f / s->fcount;
+ for (x = 0; x < s->width; x++) {
+ FFTComplex result = {0.0f, 0.0f};
+ for (i = 0; i < s->fcount; i++) {
+ result.re += s->cqt_result[s->fcount * x + i].re;
+ result.im += s->cqt_result[s->fcount * x + i].im;
+ }
+ s->cqt_result[x].re = rcp_fcount * result.re;
+ s->cqt_result[x].im = rcp_fcount * result.im;
+ }
+ }
+
+ if (s->format == AV_PIX_FMT_RGB24)
+ rgb_from_cqt(s->c_buf, s->cqt_result, s->sono_g, s->width, s->cscheme_v);
+ else
+ yuv_from_cqt(s->c_buf, s->cqt_result, s->sono_g, s->width, s->cmatrix, s->cscheme_v);
+}
+
+static int plot_cqt(AVFilterContext *ctx, AVFrame **frameout)
+{
+ AVFilterLink *outlink = ctx->outputs[0];
+ ShowCQTContext *s = ctx->priv;
+ int64_t last_time, cur_time;
+
+#define UPDATE_TIME(t) \
+ cur_time = av_gettime(); \
+ t += cur_time - last_time; \
+ last_time = cur_time
+
+ last_time = av_gettime();
+
+ memcpy(s->fft_result, s->fft_data, s->fft_len * sizeof(*s->fft_data));
+ av_fft_permute(s->fft_ctx, s->fft_result);
+ av_fft_calc(s->fft_ctx, s->fft_result);
+ s->fft_result[s->fft_len] = s->fft_result[0];
+ UPDATE_TIME(s->fft_time);
+
+ s->cqt_calc(s->cqt_result, s->fft_result, s->coeffs, s->cqt_len, s->fft_len);
+ UPDATE_TIME(s->cqt_time);
+
+ process_cqt(s);
+ UPDATE_TIME(s->process_cqt_time);
+
+ if (s->sono_h) {
+ s->update_sono(s->sono_frame, s->c_buf, s->sono_idx);
+ UPDATE_TIME(s->update_sono_time);
+ }
+
+ if (!s->sono_count) {
+ AVFrame *out = *frameout = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ out->sample_aspect_ratio = av_make_q(1, 1);
+ av_frame_set_color_range(out, AVCOL_RANGE_MPEG);
+ av_frame_set_colorspace(out, s->csp);
+ UPDATE_TIME(s->alloc_time);
+
+ if (s->bar_h) {
+ s->draw_bar(out, s->h_buf, s->rcp_h_buf, s->c_buf, s->bar_h, s->bar_t);
+ UPDATE_TIME(s->bar_time);
+ }
+
+ if (s->axis_h) {
+ s->draw_axis(out, s->axis_frame, s->c_buf, s->bar_h);
+ UPDATE_TIME(s->axis_time);
+ }
+
+ if (s->sono_h) {
+ s->draw_sono(out, s->sono_frame, s->bar_h + s->axis_h, s->sono_idx);
+ UPDATE_TIME(s->sono_time);
+ }
+ out->pts = s->next_pts;
+ s->next_pts += PTS_STEP;
+ }
+ s->sono_count = (s->sono_count + 1) % s->count;
+ if (s->sono_h)
+ s->sono_idx = (s->sono_idx + s->sono_h - 1) % s->sono_h;
+ return 0;
+}
+
+static void init_colormatrix(ShowCQTContext *s)
+{
+ double kr, kg, kb;
+
+ /* from vf_colorspace.c */
+ switch (s->csp) {
+ default:
+ av_log(s->ctx, AV_LOG_WARNING, "unsupported colorspace, setting it to unspecified.\n");
+ s->csp = AVCOL_SPC_UNSPECIFIED;
+ case AVCOL_SPC_UNSPECIFIED:
+ case AVCOL_SPC_BT470BG:
+ case AVCOL_SPC_SMPTE170M:
+ kr = 0.299; kb = 0.114; break;
+ case AVCOL_SPC_BT709:
+ kr = 0.2126; kb = 0.0722; break;
+ case AVCOL_SPC_FCC:
+ kr = 0.30; kb = 0.11; break;
+ case AVCOL_SPC_SMPTE240M:
+ kr = 0.212; kb = 0.087; break;
+ case AVCOL_SPC_BT2020_NCL:
+ kr = 0.2627; kb = 0.0593; break;
+ }
+
+ kg = 1.0 - kr - kb;
+ s->cmatrix[0][0] = 219.0 * kr;
+ s->cmatrix[0][1] = 219.0 * kg;
+ s->cmatrix[0][2] = 219.0 * kb;
+ s->cmatrix[1][0] = -112.0 * kr / (1.0 - kb);
+ s->cmatrix[1][1] = -112.0 * kg / (1.0 - kb);
+ s->cmatrix[1][2] = 112.0;
+ s->cmatrix[2][0] = 112.0;
+ s->cmatrix[2][1] = -112.0 * kg / (1.0 - kr);
+ s->cmatrix[2][2] = -112.0 * kb / (1.0 - kr);
+}
+
+static int init_cscheme(ShowCQTContext *s)
+{
+ char tail[2];
+ int k;
+
+ if (sscanf(s->cscheme, " %f | %f | %f | %f | %f | %f %1s", &s->cscheme_v[0],
+ &s->cscheme_v[1], &s->cscheme_v[2], &s->cscheme_v[3], &s->cscheme_v[4],
+ &s->cscheme_v[5], tail) != 6)
+ goto fail;
+
+ for (k = 0; k < 6; k++)
+ if (isnan(s->cscheme_v[k]) || s->cscheme_v[k] < 0.0f || s->cscheme_v[k] > 1.0f)
+ goto fail;
+
+ return 0;
+
+fail:
+ av_log(s->ctx, AV_LOG_ERROR, "invalid cscheme.\n");
+ return AVERROR(EINVAL);
+}
+
+/* main filter control */
+static av_cold int init(AVFilterContext *ctx)
+{
+ ShowCQTContext *s = ctx->priv;
+ s->ctx = ctx;
+
+ if (!s->fullhd) {
+ av_log(ctx, AV_LOG_WARNING, "fullhd option is deprecated, use size/s option instead.\n");
+ if (s->width != 1920 || s->height != 1080) {
+ av_log(ctx, AV_LOG_ERROR, "fullhd set to 0 but with custom dimension.\n");
+ return AVERROR(EINVAL);
+ }
+ s->width /= 2;
+ s->height /= 2;
+ s->fullhd = 1;
+ }
+
+ if (s->axis_h < 0) {
+ s->axis_h = s->width / 60;
+ if (s->axis_h & 1)
+ s->axis_h++;
+ if (s->bar_h >= 0 && s->sono_h >= 0)
+ s->axis_h = s->height - s->bar_h - s->sono_h;
+ if (s->bar_h >= 0 && s->sono_h < 0)
+ s->axis_h = FFMIN(s->axis_h, s->height - s->bar_h);
+ if (s->bar_h < 0 && s->sono_h >= 0)
+ s->axis_h = FFMIN(s->axis_h, s->height - s->sono_h);
+ }
+
+ if (s->bar_h < 0) {
+ s->bar_h = (s->height - s->axis_h) / 2;
+ if (s->bar_h & 1)
+ s->bar_h--;
+ if (s->sono_h >= 0)
+ s->bar_h = s->height - s->sono_h - s->axis_h;
+ }
+
+ if (s->sono_h < 0)
+ s->sono_h = s->height - s->axis_h - s->bar_h;
+
+ if ((s->width & 1) || (s->height & 1) || (s->bar_h & 1) || (s->axis_h & 1) || (s->sono_h & 1) ||
+ (s->bar_h < 0) || (s->axis_h < 0) || (s->sono_h < 0) || (s->bar_h > s->height) ||
+ (s->axis_h > s->height) || (s->sono_h > s->height) || (s->bar_h + s->axis_h + s->sono_h != s->height)) {
+ av_log(ctx, AV_LOG_ERROR, "invalid dimension.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (!s->fcount) {
+ do {
+ s->fcount++;
+ } while(s->fcount * s->width < 1920 && s->fcount < 10);
+ }
+
+ init_colormatrix(s);
+
+ return init_cscheme(s);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ common_uninit(ctx->priv);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE };
+ enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE
+ };
+ int64_t channel_layouts[] = { AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_STEREO_DOWNMIX, -1 };
+ int ret;
+
+ /* set input audio formats */
+ formats = ff_make_format_list(sample_fmts);
+ if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
+ return ret;
+
+ layouts = avfilter_make_format64_list(channel_layouts);
+ if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
+ return ret;
+
+ /* set output video format */
+ formats = ff_make_format_list(pix_fmts);
+ if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ ShowCQTContext *s = ctx->priv;
+ int ret;
+
+ common_uninit(s);
+
+ outlink->w = s->width;
+ outlink->h = s->height;
+ s->format = outlink->format;
+ outlink->sample_aspect_ratio = av_make_q(1, 1);
+ outlink->frame_rate = s->rate;
+ outlink->time_base = av_mul_q(av_inv_q(s->rate), av_make_q(1, PTS_STEP));
+ av_log(ctx, AV_LOG_INFO, "video: %dx%d %s %d/%d fps, bar_h = %d, axis_h = %d, sono_h = %d.\n",
+ s->width, s->height, av_get_pix_fmt_name(s->format), s->rate.num, s->rate.den,
+ s->bar_h, s->axis_h, s->sono_h);
+
+ s->cqt_len = s->width * s->fcount;
+ if (!(s->freq = create_freq_table(s->basefreq, s->endfreq, s->cqt_len)))
+ return AVERROR(ENOMEM);
+
+ if ((ret = init_volume(s)) < 0)
+ return ret;
+
+ s->fft_bits = FFMAX(ceil(log2(inlink->sample_rate * s->timeclamp)), 4);
+ s->fft_len = 1 << s->fft_bits;
+ av_log(ctx, AV_LOG_INFO, "fft_len = %d, cqt_len = %d.\n", s->fft_len, s->cqt_len);
+
+ s->fft_ctx = av_fft_init(s->fft_bits, 0);
+ s->fft_data = av_calloc(s->fft_len, sizeof(*s->fft_data));
+ s->fft_result = av_calloc(s->fft_len + 64, sizeof(*s->fft_result));
+ s->cqt_result = av_malloc_array(s->cqt_len, sizeof(*s->cqt_result));
+ if (!s->fft_ctx || !s->fft_data || !s->fft_result || !s->cqt_result)
+ return AVERROR(ENOMEM);
+
+ s->cqt_align = 1;
+ s->cqt_calc = cqt_calc;
+ s->permute_coeffs = NULL;
+ s->draw_sono = draw_sono;
+ if (s->format == AV_PIX_FMT_RGB24) {
+ s->draw_bar = draw_bar_rgb;
+ s->draw_axis = draw_axis_rgb;
+ s->update_sono = update_sono_rgb;
+ } else {
+ s->draw_bar = draw_bar_yuv;
+ s->draw_axis = draw_axis_yuv;
+ s->update_sono = update_sono_yuv;
+ }
+
+ if (ARCH_X86)
+ ff_showcqt_init_x86(s);
+
+ if ((ret = init_cqt(s)) < 0)
+ return ret;
+
+ if (s->axis_h) {
+ if (!s->axis) {
+ if ((ret = init_axis_empty(s)) < 0)
+ return ret;
+ } else if (s->axisfile) {
+ if (init_axis_from_file(s) < 0) {
+ av_log(ctx, AV_LOG_WARNING, "loading axis image failed, fallback to font rendering.\n");
+ if (init_axis_from_font(s) < 0) {
+ av_log(ctx, AV_LOG_WARNING, "loading axis font failed, disable text drawing.\n");
+ if ((ret = init_axis_empty(s)) < 0)
+ return ret;
+ }
+ }
+ } else {
+ if (init_axis_from_font(s) < 0) {
+ av_log(ctx, AV_LOG_WARNING, "loading axis font failed, disable text drawing.\n");
+ if ((ret = init_axis_empty(s)) < 0)
+ return ret;
+ }
+ }
+ }
+
+ if (s->sono_h) {
+ s->sono_frame = alloc_frame_empty((outlink->format == AV_PIX_FMT_YUV420P) ?
+ AV_PIX_FMT_YUV422P : outlink->format, s->width, s->sono_h);
+ if (!s->sono_frame)
+ return AVERROR(ENOMEM);
+ }
+
+ s->h_buf = av_malloc_array(s->cqt_len, sizeof (*s->h_buf));
+ s->rcp_h_buf = av_malloc_array(s->width, sizeof(*s->rcp_h_buf));
+ s->c_buf = av_malloc_array(s->width, sizeof(*s->c_buf));
+ if (!s->h_buf || !s->rcp_h_buf || !s->c_buf)
+ return AVERROR(ENOMEM);
+
+ s->sono_count = 0;
+ s->next_pts = 0;
+ s->sono_idx = 0;
+ s->remaining_fill = s->fft_len / 2;
+ s->remaining_frac = 0;
+ s->step_frac = av_div_q(av_make_q(inlink->sample_rate, s->count) , s->rate);
+ s->step = (int)(s->step_frac.num / s->step_frac.den);
+ s->step_frac.num %= s->step_frac.den;
+ if (s->step_frac.num) {
+ av_log(ctx, AV_LOG_INFO, "audio: %d Hz, step = %d + %d/%d.\n",
+ inlink->sample_rate, s->step, s->step_frac.num, s->step_frac.den);
+ av_log(ctx, AV_LOG_WARNING, "fractional step.\n");
+ } else {
+ av_log(ctx, AV_LOG_INFO, "audio: %d Hz, step = %d.\n",
+ inlink->sample_rate, s->step);
+ }
+
+ return 0;
+}
+
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ShowCQTContext *s = ctx->priv;
+ int remaining, step, ret, x, i, j, m;
+ float *audio_data;
+ AVFrame *out = NULL;
+
+ if (!insamples) {
+ while (s->remaining_fill < s->fft_len / 2) {
+ memset(&s->fft_data[s->fft_len - s->remaining_fill], 0, sizeof(*s->fft_data) * s->remaining_fill);
+ ret = plot_cqt(ctx, &out);
+ if (ret < 0)
+ return ret;
+
+ step = s->step + (s->step_frac.num + s->remaining_frac) / s->step_frac.den;
+ s->remaining_frac = (s->step_frac.num + s->remaining_frac) % s->step_frac.den;
+ for (x = 0; x < (s->fft_len-step); x++)
+ s->fft_data[x] = s->fft_data[x+step];
+ s->remaining_fill += step;
+
+ if (out)
+ return ff_filter_frame(outlink, out);
+ }
+ return AVERROR_EOF;
+ }
+
+ remaining = insamples->nb_samples;
+ audio_data = (float*) insamples->data[0];
+
+ while (remaining) {
+ i = insamples->nb_samples - remaining;
+ j = s->fft_len - s->remaining_fill;
+ if (remaining >= s->remaining_fill) {
+ for (m = 0; m < s->remaining_fill; m++) {
+ s->fft_data[j+m].re = audio_data[2*(i+m)];
+ s->fft_data[j+m].im = audio_data[2*(i+m)+1];
+ }
+ ret = plot_cqt(ctx, &out);
+ if (ret < 0) {
+ av_frame_free(&insamples);
+ return ret;
+ }
+ remaining -= s->remaining_fill;
+ if (out) {
+ int64_t pts = av_rescale_q(insamples->pts, inlink->time_base, av_make_q(1, inlink->sample_rate));
+ pts += insamples->nb_samples - remaining - s->fft_len/2;
+ pts = av_rescale_q(pts, av_make_q(1, inlink->sample_rate), outlink->time_base);
+ if (FFABS(pts - out->pts) > PTS_TOLERANCE) {
+ av_log(ctx, AV_LOG_DEBUG, "changing pts from %"PRId64" (%.3f) to %"PRId64" (%.3f).\n",
+ out->pts, out->pts * av_q2d(outlink->time_base),
+ pts, pts * av_q2d(outlink->time_base));
+ out->pts = pts;
+ s->next_pts = pts + PTS_STEP;
+ }
+ ret = ff_filter_frame(outlink, out);
+ if (ret < 0) {
+ av_frame_free(&insamples);
+ return ret;
+ }
+ out = NULL;
+ }
+ step = s->step + (s->step_frac.num + s->remaining_frac) / s->step_frac.den;
+ s->remaining_frac = (s->step_frac.num + s->remaining_frac) % s->step_frac.den;
+ for (m = 0; m < s->fft_len-step; m++)
+ s->fft_data[m] = s->fft_data[m+step];
+ s->remaining_fill = step;
+ } else {
+ for (m = 0; m < remaining; m++) {
+ s->fft_data[j+m].re = audio_data[2*(i+m)];
+ s->fft_data[j+m].im = audio_data[2*(i+m)+1];
+ }
+ s->remaining_fill -= remaining;
+ remaining = 0;
+ }
+ }
+ av_frame_free(&insamples);
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ int ret;
+
+ ret = ff_request_frame(inlink);
+ if (ret == AVERROR_EOF)
+ ret = filter_frame(inlink, NULL);
+ return ret;
+}
+
+static const AVFilterPad showcqt_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad showcqt_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_avf_showcqt = {
+ .name = "showcqt",
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio to a CQT (Constant/Clamped Q Transform) spectrum video output."),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ShowCQTContext),
+ .inputs = showcqt_inputs,
+ .outputs = showcqt_outputs,
+ .priv_class = &showcqt_class,
+};
diff --git a/libavfilter/avf_showcqt.h b/libavfilter/avf_showcqt.h
new file mode 100644
index 0000000000..3fa36f851c
--- /dev/null
+++ b/libavfilter/avf_showcqt.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2015 Muhammad Faiz <mfcc64@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_SHOWCQT_H
+#define AVFILTER_SHOWCQT_H
+
+#include "libavcodec/avfft.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct {
+ FFTSample *val;
+ int start, len;
+} Coeffs;
+
+typedef struct {
+ float r, g, b;
+} RGBFloat;
+
+typedef struct {
+ float y, u, v;
+} YUVFloat;
+
+typedef union {
+ RGBFloat rgb;
+ YUVFloat yuv;
+} ColorFloat;
+
+typedef struct {
+ const AVClass *class;
+ AVFilterContext *ctx;
+ AVFrame *axis_frame;
+ AVFrame *sono_frame;
+ enum AVPixelFormat format;
+ int sono_idx;
+ int sono_count;
+ int step;
+ AVRational step_frac;
+ int remaining_frac;
+ int remaining_fill;
+ int64_t next_pts;
+ double *freq;
+ FFTContext *fft_ctx;
+ Coeffs *coeffs;
+ FFTComplex *fft_data;
+ FFTComplex *fft_result;
+ FFTComplex *cqt_result;
+ int fft_bits;
+ int fft_len;
+ int cqt_len;
+ int cqt_align;
+ ColorFloat *c_buf;
+ float *h_buf;
+ float *rcp_h_buf;
+ float *sono_v_buf;
+ float *bar_v_buf;
+ float cmatrix[3][3];
+ float cscheme_v[6];
+ /* callback */
+ void (*cqt_calc)(FFTComplex *dst, const FFTComplex *src, const Coeffs *coeffs,
+ int len, int fft_len);
+ void (*permute_coeffs)(float *v, int len);
+ void (*draw_bar)(AVFrame *out, const float *h, const float *rcp_h,
+ const ColorFloat *c, int bar_h, float bar_t);
+ void (*draw_axis)(AVFrame *out, AVFrame *axis, const ColorFloat *c, int off);
+ void (*draw_sono)(AVFrame *out, AVFrame *sono, int off, int idx);
+ void (*update_sono)(AVFrame *sono, const ColorFloat *c, int idx);
+ /* performance debugging */
+ int64_t fft_time;
+ int64_t cqt_time;
+ int64_t process_cqt_time;
+ int64_t update_sono_time;
+ int64_t alloc_time;
+ int64_t bar_time;
+ int64_t axis_time;
+ int64_t sono_time;
+ /* option */
+ int width, height;
+ AVRational rate;
+ int bar_h;
+ int axis_h;
+ int sono_h;
+ int fullhd; /* deprecated */
+ char *sono_v;
+ char *bar_v;
+ float sono_g;
+ float bar_g;
+ float bar_t;
+ double timeclamp;
+ double basefreq;
+ double endfreq;
+ float coeffclamp; /* deprecated - ignored */
+ char *tlength;
+ int count;
+ int fcount;
+ char *fontfile;
+ char *font;
+ char *fontcolor;
+ char *axisfile;
+ int axis;
+ int csp;
+ char *cscheme;
+} ShowCQTContext;
+
+void ff_showcqt_init_x86(ShowCQTContext *s);
+
+#endif
diff --git a/libavfilter/avf_showfreqs.c b/libavfilter/avf_showfreqs.c
new file mode 100644
index 0000000000..21735ed075
--- /dev/null
+++ b/libavfilter/avf_showfreqs.c
@@ -0,0 +1,525 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <float.h>
+#include <math.h>
+
+#include "libavcodec/avfft.h"
+#include "libavutil/audio_fifo.h"
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "audio.h"
+#include "video.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "window_func.h"
+
+enum DisplayMode { LINE, BAR, DOT, NB_MODES };
+enum ChannelMode { COMBINED, SEPARATE, NB_CMODES };
+enum FrequencyScale { FS_LINEAR, FS_LOG, FS_RLOG, NB_FSCALES };
+enum AmplitudeScale { AS_LINEAR, AS_SQRT, AS_CBRT, AS_LOG, NB_ASCALES };
+
+typedef struct ShowFreqsContext {
+ const AVClass *class;
+ int w, h;
+ int mode;
+ int cmode;
+ int fft_bits;
+ int ascale, fscale;
+ int avg;
+ int win_func;
+ FFTContext *fft;
+ FFTComplex **fft_data;
+ float **avg_data;
+ float *window_func_lut;
+ float overlap;
+ float minamp;
+ int hop_size;
+ int nb_channels;
+ int nb_freq;
+ int win_size;
+ float scale;
+ char *colors;
+ AVAudioFifo *fifo;
+ int64_t pts;
+} ShowFreqsContext;
+
+#define OFFSET(x) offsetof(ShowFreqsContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption showfreqs_options[] = {
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "1024x512"}, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "1024x512"}, 0, 0, FLAGS },
+ { "mode", "set display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=BAR}, 0, NB_MODES-1, FLAGS, "mode" },
+ { "line", "show lines", 0, AV_OPT_TYPE_CONST, {.i64=LINE}, 0, 0, FLAGS, "mode" },
+ { "bar", "show bars", 0, AV_OPT_TYPE_CONST, {.i64=BAR}, 0, 0, FLAGS, "mode" },
+ { "dot", "show dots", 0, AV_OPT_TYPE_CONST, {.i64=DOT}, 0, 0, FLAGS, "mode" },
+ { "ascale", "set amplitude scale", OFFSET(ascale), AV_OPT_TYPE_INT, {.i64=AS_LOG}, 0, NB_ASCALES-1, FLAGS, "ascale" },
+ { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=AS_LINEAR}, 0, 0, FLAGS, "ascale" },
+ { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=AS_SQRT}, 0, 0, FLAGS, "ascale" },
+ { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=AS_CBRT}, 0, 0, FLAGS, "ascale" },
+ { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=AS_LOG}, 0, 0, FLAGS, "ascale" },
+ { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=FS_LINEAR}, 0, NB_FSCALES-1, FLAGS, "fscale" },
+ { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=FS_LINEAR}, 0, 0, FLAGS, "fscale" },
+ { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=FS_LOG}, 0, 0, FLAGS, "fscale" },
+ { "rlog", "reverse logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=FS_RLOG}, 0, 0, FLAGS, "fscale" },
+ { "win_size", "set window size", OFFSET(fft_bits), AV_OPT_TYPE_INT, {.i64=11}, 4, 16, FLAGS, "fft" },
+ { "w16", 0, 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, FLAGS, "fft" },
+ { "w32", 0, 0, AV_OPT_TYPE_CONST, {.i64=5}, 0, 0, FLAGS, "fft" },
+ { "w64", 0, 0, AV_OPT_TYPE_CONST, {.i64=6}, 0, 0, FLAGS, "fft" },
+ { "w128", 0, 0, AV_OPT_TYPE_CONST, {.i64=7}, 0, 0, FLAGS, "fft" },
+ { "w256", 0, 0, AV_OPT_TYPE_CONST, {.i64=8}, 0, 0, FLAGS, "fft" },
+ { "w512", 0, 0, AV_OPT_TYPE_CONST, {.i64=9}, 0, 0, FLAGS, "fft" },
+ { "w1024", 0, 0, AV_OPT_TYPE_CONST, {.i64=10}, 0, 0, FLAGS, "fft" },
+ { "w2048", 0, 0, AV_OPT_TYPE_CONST, {.i64=11}, 0, 0, FLAGS, "fft" },
+ { "w4096", 0, 0, AV_OPT_TYPE_CONST, {.i64=12}, 0, 0, FLAGS, "fft" },
+ { "w8192", 0, 0, AV_OPT_TYPE_CONST, {.i64=13}, 0, 0, FLAGS, "fft" },
+ { "w16384", 0, 0, AV_OPT_TYPE_CONST, {.i64=14}, 0, 0, FLAGS, "fft" },
+ { "w32768", 0, 0, AV_OPT_TYPE_CONST, {.i64=15}, 0, 0, FLAGS, "fft" },
+ { "w65536", 0, 0, AV_OPT_TYPE_CONST, {.i64=16}, 0, 0, FLAGS, "fft" },
+ { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64=WFUNC_HANNING}, 0, NB_WFUNC-1, FLAGS, "win_func" },
+ { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
+ { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
+ { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
+ { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
+ { "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
+ { "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
+ { "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
+ { "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
+ { "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
+ { "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
+ { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
+ { "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
+ { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
+ { "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
+ { "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, FLAGS, "win_func" },
+ { "dolph", "Dolph-Chebyshev", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_DOLPH}, 0, 0, FLAGS, "win_func" },
+ { "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, FLAGS, "win_func" },
+ { "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, FLAGS, "win_func" },
+ { "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, FLAGS, "win_func" },
+ { "overlap", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_FLOAT, {.dbl=1.}, 0., 1., FLAGS },
+ { "averaging", "set time averaging", OFFSET(avg), AV_OPT_TYPE_INT, {.i64=1}, 0, INT32_MAX, FLAGS },
+ { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
+ { "cmode", "set channel mode", OFFSET(cmode), AV_OPT_TYPE_INT, {.i64=COMBINED}, 0, NB_CMODES-1, FLAGS, "cmode" },
+ { "combined", "show all channels in same window", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "cmode" },
+ { "separate", "show each channel in own window", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "cmode" },
+ { "minamp", "set minimum amplitude", OFFSET(minamp), AV_OPT_TYPE_FLOAT, {.dbl=1e-6}, FLT_MIN, 1e-6, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(showfreqs);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
+ int ret;
+
+ /* set input audio formats */
+ formats = ff_make_format_list(sample_fmts);
+ if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
+ return ret;
+
+ layouts = ff_all_channel_layouts();
+ if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
+ return ret;
+
+ /* set output video format */
+ formats = ff_make_format_list(pix_fmts);
+ if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ShowFreqsContext *s = ctx->priv;
+
+ s->pts = AV_NOPTS_VALUE;
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ ShowFreqsContext *s = ctx->priv;
+ float overlap;
+ int i;
+
+ s->nb_freq = 1 << (s->fft_bits - 1);
+ s->win_size = s->nb_freq << 1;
+ av_audio_fifo_free(s->fifo);
+ av_fft_end(s->fft);
+ s->fft = av_fft_init(s->fft_bits, 0);
+ if (!s->fft) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to create FFT context. "
+ "The window size might be too high.\n");
+ return AVERROR(ENOMEM);
+ }
+
+ /* FFT buffers: x2 for each (display) channel buffer.
+ * Note: we use free and malloc instead of a realloc-like function to
+ * make sure the buffer is aligned in memory for the FFT functions. */
+ for (i = 0; i < s->nb_channels; i++) {
+ av_freep(&s->fft_data[i]);
+ av_freep(&s->avg_data[i]);
+ }
+ av_freep(&s->fft_data);
+ av_freep(&s->avg_data);
+ s->nb_channels = inlink->channels;
+
+ s->fft_data = av_calloc(s->nb_channels, sizeof(*s->fft_data));
+ if (!s->fft_data)
+ return AVERROR(ENOMEM);
+ s->avg_data = av_calloc(s->nb_channels, sizeof(*s->avg_data));
+ if (!s->fft_data)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < s->nb_channels; i++) {
+ s->fft_data[i] = av_calloc(s->win_size, sizeof(**s->fft_data));
+ s->avg_data[i] = av_calloc(s->nb_freq, sizeof(**s->avg_data));
+ if (!s->fft_data[i] || !s->avg_data[i])
+ return AVERROR(ENOMEM);
+ }
+
+ /* pre-calc windowing function */
+ s->window_func_lut = av_realloc_f(s->window_func_lut, s->win_size,
+ sizeof(*s->window_func_lut));
+ if (!s->window_func_lut)
+ return AVERROR(ENOMEM);
+ ff_generate_window_func(s->window_func_lut, s->win_size, s->win_func, &overlap);
+ if (s->overlap == 1.)
+ s->overlap = overlap;
+ s->hop_size = (1. - s->overlap) * s->win_size;
+ if (s->hop_size < 1) {
+ av_log(ctx, AV_LOG_ERROR, "overlap %f too big\n", s->overlap);
+ return AVERROR(EINVAL);
+ }
+
+ for (s->scale = 0, i = 0; i < s->win_size; i++) {
+ s->scale += s->window_func_lut[i] * s->window_func_lut[i];
+ }
+
+ outlink->frame_rate = av_make_q(inlink->sample_rate, s->win_size * (1.-s->overlap));
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+ outlink->w = s->w;
+ outlink->h = s->h;
+
+ s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->win_size);
+ if (!s->fifo)
+ return AVERROR(ENOMEM);
+ return 0;
+}
+
+static inline void draw_dot(AVFrame *out, int x, int y, uint8_t fg[4])
+{
+
+ uint32_t color = AV_RL32(out->data[0] + y * out->linesize[0] + x * 4);
+
+ if ((color & 0xffffff) != 0)
+ AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg) | color);
+ else
+ AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
+}
+
+static int get_sx(ShowFreqsContext *s, int f)
+{
+ switch (s->fscale) {
+ case FS_LINEAR:
+ return (s->w/(float)s->nb_freq)*f;
+ case FS_LOG:
+ return s->w-pow(s->w, (s->nb_freq-f-1)/(s->nb_freq-1.));
+ case FS_RLOG:
+ return pow(s->w, f/(s->nb_freq-1.));
+ }
+
+ return 0;
+}
+
+static float get_bsize(ShowFreqsContext *s, int f)
+{
+ switch (s->fscale) {
+ case FS_LINEAR:
+ return s->w/(float)s->nb_freq;
+ case FS_LOG:
+ return pow(s->w, (s->nb_freq-f-1)/(s->nb_freq-1.))-
+ pow(s->w, (s->nb_freq-f-2)/(s->nb_freq-1.));
+ case FS_RLOG:
+ return pow(s->w, (f+1)/(s->nb_freq-1.))-
+ pow(s->w, f /(s->nb_freq-1.));
+ }
+
+ return 1.;
+}
+
+static inline void plot_freq(ShowFreqsContext *s, int ch,
+ double a, int f, uint8_t fg[4], int *prev_y,
+ AVFrame *out, AVFilterLink *outlink)
+{
+ const int w = s->w;
+ const float min = s->minamp;
+ const float avg = s->avg_data[ch][f];
+ const float bsize = get_bsize(s, f);
+ const int sx = get_sx(s, f);
+ int end = outlink->h;
+ int x, y, i;
+
+ switch(s->ascale) {
+ case AS_SQRT:
+ a = 1.0 - sqrt(a);
+ break;
+ case AS_CBRT:
+ a = 1.0 - cbrt(a);
+ break;
+ case AS_LOG:
+ a = log(av_clipd(a, min, 1)) / log(min);
+ break;
+ case AS_LINEAR:
+ a = 1.0 - a;
+ break;
+ }
+
+ switch (s->cmode) {
+ case COMBINED:
+ y = a * outlink->h - 1;
+ break;
+ case SEPARATE:
+ end = (outlink->h / s->nb_channels) * (ch + 1);
+ y = (outlink->h / s->nb_channels) * ch + a * (outlink->h / s->nb_channels) - 1;
+ break;
+ default:
+ av_assert0(0);
+ }
+ if (y < 0)
+ return;
+
+ switch (s->avg) {
+ case 0:
+ y = s->avg_data[ch][f] = !outlink->frame_count_in ? y : FFMIN(avg, y);
+ break;
+ case 1:
+ break;
+ default:
+ s->avg_data[ch][f] = avg + y * (y - avg) / (FFMIN(outlink->frame_count_in + 1, s->avg) * y);
+ y = s->avg_data[ch][f];
+ break;
+ }
+
+ switch(s->mode) {
+ case LINE:
+ if (*prev_y == -1) {
+ *prev_y = y;
+ }
+ if (y <= *prev_y) {
+ for (x = sx + 1; x < sx + bsize && x < w; x++)
+ draw_dot(out, x, y, fg);
+ for (i = y; i <= *prev_y; i++)
+ draw_dot(out, sx, i, fg);
+ } else {
+ for (i = *prev_y; i <= y; i++)
+ draw_dot(out, sx, i, fg);
+ for (x = sx + 1; x < sx + bsize && x < w; x++)
+ draw_dot(out, x, i - 1, fg);
+ }
+ *prev_y = y;
+ break;
+ case BAR:
+ for (x = sx; x < sx + bsize && x < w; x++)
+ for (i = y; i < end; i++)
+ draw_dot(out, x, i, fg);
+ break;
+ case DOT:
+ for (x = sx; x < sx + bsize && x < w; x++)
+ draw_dot(out, x, y, fg);
+ break;
+ }
+}
+
+static int plot_freqs(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ShowFreqsContext *s = ctx->priv;
+ const int win_size = s->win_size;
+ char *colors, *color, *saveptr = NULL;
+ AVFrame *out;
+ int ch, n;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+
+ for (n = 0; n < outlink->h; n++)
+ memset(out->data[0] + out->linesize[0] * n, 0, outlink->w * 4);
+
+ /* fill FFT input with the number of samples available */
+ for (ch = 0; ch < s->nb_channels; ch++) {
+ const float *p = (float *)in->extended_data[ch];
+
+ for (n = 0; n < in->nb_samples; n++) {
+ s->fft_data[ch][n].re = p[n] * s->window_func_lut[n];
+ s->fft_data[ch][n].im = 0;
+ }
+ for (; n < win_size; n++) {
+ s->fft_data[ch][n].re = 0;
+ s->fft_data[ch][n].im = 0;
+ }
+ }
+
+ /* run FFT on each samples set */
+ for (ch = 0; ch < s->nb_channels; ch++) {
+ av_fft_permute(s->fft, s->fft_data[ch]);
+ av_fft_calc(s->fft, s->fft_data[ch]);
+ }
+
+#define RE(x, ch) s->fft_data[ch][x].re
+#define IM(x, ch) s->fft_data[ch][x].im
+#define M(a, b) (sqrt((a) * (a) + (b) * (b)))
+
+ colors = av_strdup(s->colors);
+ if (!colors) {
+ av_frame_free(&out);
+ return AVERROR(ENOMEM);
+ }
+
+ for (ch = 0; ch < s->nb_channels; ch++) {
+ uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
+ int prev_y = -1, f;
+ double a;
+
+ color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
+ if (color)
+ av_parse_color(fg, color, -1, ctx);
+
+ a = av_clipd(M(RE(0, ch), 0) / s->scale, 0, 1);
+ plot_freq(s, ch, a, 0, fg, &prev_y, out, outlink);
+
+ for (f = 1; f < s->nb_freq; f++) {
+ a = av_clipd(M(RE(f, ch), IM(f, ch)) / s->scale, 0, 1);
+
+ plot_freq(s, ch, a, f, fg, &prev_y, out, outlink);
+ }
+ }
+
+ av_free(colors);
+ out->pts = in->pts;
+ return ff_filter_frame(outlink, out);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ShowFreqsContext *s = ctx->priv;
+ AVFrame *fin = NULL;
+ int consumed = 0;
+ int ret = 0;
+
+ if (s->pts == AV_NOPTS_VALUE)
+ s->pts = in->pts - av_audio_fifo_size(s->fifo);
+
+ av_audio_fifo_write(s->fifo, (void **)in->extended_data, in->nb_samples);
+ while (av_audio_fifo_size(s->fifo) >= s->win_size) {
+ fin = ff_get_audio_buffer(inlink, s->win_size);
+ if (!fin) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ fin->pts = s->pts + consumed;
+ consumed += s->hop_size;
+ ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data, s->win_size);
+ if (ret < 0)
+ goto fail;
+
+ ret = plot_freqs(inlink, fin);
+ av_frame_free(&fin);
+ av_audio_fifo_drain(s->fifo, s->hop_size);
+ if (ret < 0)
+ goto fail;
+ }
+
+fail:
+ s->pts = AV_NOPTS_VALUE;
+ av_frame_free(&fin);
+ av_frame_free(&in);
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ShowFreqsContext *s = ctx->priv;
+ int i;
+
+ av_fft_end(s->fft);
+ for (i = 0; i < s->nb_channels; i++) {
+ if (s->fft_data)
+ av_freep(&s->fft_data[i]);
+ if (s->avg_data)
+ av_freep(&s->avg_data[i]);
+ }
+ av_freep(&s->fft_data);
+ av_freep(&s->avg_data);
+ av_freep(&s->window_func_lut);
+ av_audio_fifo_free(s->fifo);
+}
+
+static const AVFilterPad showfreqs_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad showfreqs_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_avf_showfreqs = {
+ .name = "showfreqs",
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio to a frequencies video output."),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ShowFreqsContext),
+ .inputs = showfreqs_inputs,
+ .outputs = showfreqs_outputs,
+ .priv_class = &showfreqs_class,
+};
diff --git a/libavfilter/avf_showspectrum.c b/libavfilter/avf_showspectrum.c
new file mode 100644
index 0000000000..09b5a2a51f
--- /dev/null
+++ b/libavfilter/avf_showspectrum.c
@@ -0,0 +1,1314 @@
+/*
+ * Copyright (c) 2012-2013 Clément Bœsch
+ * Copyright (c) 2013 Rudolf Polzer <divverent@xonotic.org>
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * audio to spectrum (video) transmedia filter, based on ffplay rdft showmode
+ * (by Michael Niedermayer) and lavfi/avf_showwaves (by Stefano Sabatini).
+ */
+
+#include <math.h>
+
+#include "libavcodec/avfft.h"
+#include "libavutil/audio_fifo.h"
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "libavutil/xga_font_data.h"
+#include "audio.h"
+#include "video.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "window_func.h"
+
+enum DisplayMode { COMBINED, SEPARATE, NB_MODES };
+enum DataMode { D_MAGNITUDE, D_PHASE, NB_DMODES };
+enum DisplayScale { LINEAR, SQRT, CBRT, LOG, FOURTHRT, FIFTHRT, NB_SCALES };
+enum ColorMode { CHANNEL, INTENSITY, RAINBOW, MORELAND, NEBULAE, FIRE, FIERY, FRUIT, COOL, NB_CLMODES };
+enum SlideMode { REPLACE, SCROLL, FULLFRAME, RSCROLL, NB_SLIDES };
+enum Orientation { VERTICAL, HORIZONTAL, NB_ORIENTATIONS };
+
+typedef struct {
+ const AVClass *class;
+ int w, h;
+ AVFrame *outpicref;
+ int nb_display_channels;
+ int orientation;
+ int channel_width;
+ int channel_height;
+ int sliding; ///< 1 if sliding mode, 0 otherwise
+ int mode; ///< channel display mode
+ int color_mode; ///< display color scheme
+ int scale;
+ float saturation; ///< color saturation multiplier
+ float rotation; ///< color rotation
+ int data;
+ int xpos; ///< x position (current column)
+ FFTContext **fft; ///< Fast Fourier Transform context
+ int fft_bits; ///< number of bits (FFT window size = 1<<fft_bits)
+ FFTComplex **fft_data; ///< bins holder for each (displayed) channels
+ float *window_func_lut; ///< Window function LUT
+ float **magnitudes;
+ float **phases;
+ int win_func;
+ int win_size;
+ double win_scale;
+ float overlap;
+ float gain;
+ int hop_size;
+ float *combine_buffer; ///< color combining buffer (3 * h items)
+ float **color_buffer; ///< color buffer (3 * h * ch items)
+ AVAudioFifo *fifo;
+ int64_t pts;
+ int single_pic;
+ int legend;
+ int start_x, start_y;
+} ShowSpectrumContext;
+
+#define OFFSET(x) offsetof(ShowSpectrumContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption showspectrum_options[] = {
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
+ { "slide", "set sliding mode", OFFSET(sliding), AV_OPT_TYPE_INT, {.i64 = 0}, 0, NB_SLIDES-1, FLAGS, "slide" },
+ { "replace", "replace old columns with new", 0, AV_OPT_TYPE_CONST, {.i64=REPLACE}, 0, 0, FLAGS, "slide" },
+ { "scroll", "scroll from right to left", 0, AV_OPT_TYPE_CONST, {.i64=SCROLL}, 0, 0, FLAGS, "slide" },
+ { "fullframe", "return full frames", 0, AV_OPT_TYPE_CONST, {.i64=FULLFRAME}, 0, 0, FLAGS, "slide" },
+ { "rscroll", "scroll from left to right", 0, AV_OPT_TYPE_CONST, {.i64=RSCROLL}, 0, 0, FLAGS, "slide" },
+ { "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, COMBINED, NB_MODES-1, FLAGS, "mode" },
+ { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
+ { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
+ { "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=CHANNEL}, CHANNEL, NB_CLMODES-1, FLAGS, "color" },
+ { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
+ { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
+ { "rainbow", "rainbow based coloring", 0, AV_OPT_TYPE_CONST, {.i64=RAINBOW}, 0, 0, FLAGS, "color" },
+ { "moreland", "moreland based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MORELAND}, 0, 0, FLAGS, "color" },
+ { "nebulae", "nebulae based coloring", 0, AV_OPT_TYPE_CONST, {.i64=NEBULAE}, 0, 0, FLAGS, "color" },
+ { "fire", "fire based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIRE}, 0, 0, FLAGS, "color" },
+ { "fiery", "fiery based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIERY}, 0, 0, FLAGS, "color" },
+ { "fruit", "fruit based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FRUIT}, 0, 0, FLAGS, "color" },
+ { "cool", "cool based coloring", 0, AV_OPT_TYPE_CONST, {.i64=COOL}, 0, 0, FLAGS, "color" },
+ { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=SQRT}, LINEAR, NB_SCALES-1, FLAGS, "scale" },
+ { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
+ { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
+ { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
+ { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
+ { "4thrt","4th root", 0, AV_OPT_TYPE_CONST, {.i64=FOURTHRT}, 0, 0, FLAGS, "scale" },
+ { "5thrt","5th root", 0, AV_OPT_TYPE_CONST, {.i64=FIFTHRT}, 0, 0, FLAGS, "scale" },
+ { "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
+ { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = WFUNC_HANNING}, 0, NB_WFUNC-1, FLAGS, "win_func" },
+ { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
+ { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
+ { "hann", "Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
+ { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
+ { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
+ { "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
+ { "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
+ { "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
+ { "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
+ { "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
+ { "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
+ { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
+ { "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
+ { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
+ { "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
+ { "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, FLAGS, "win_func" },
+ { "dolph", "Dolph-Chebyshev", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_DOLPH}, 0, 0, FLAGS, "win_func" },
+ { "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, FLAGS, "win_func" },
+ { "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, FLAGS, "win_func" },
+ { "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, FLAGS, "win_func" },
+ { "orientation", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=VERTICAL}, 0, NB_ORIENTATIONS-1, FLAGS, "orientation" },
+ { "vertical", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VERTICAL}, 0, 0, FLAGS, "orientation" },
+ { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=HORIZONTAL}, 0, 0, FLAGS, "orientation" },
+ { "overlap", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_FLOAT, {.dbl = 0}, 0, 1, FLAGS },
+ { "gain", "set scale gain", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl = 1}, 0, 128, FLAGS },
+ { "data", "set data mode", OFFSET(data), AV_OPT_TYPE_INT, {.i64 = 0}, 0, NB_DMODES-1, FLAGS, "data" },
+ { "magnitude", NULL, 0, AV_OPT_TYPE_CONST, {.i64=D_MAGNITUDE}, 0, 0, FLAGS, "data" },
+ { "phase", NULL, 0, AV_OPT_TYPE_CONST, {.i64=D_PHASE}, 0, 0, FLAGS, "data" },
+ { "rotation", "color rotation", OFFSET(rotation), AV_OPT_TYPE_FLOAT, {.dbl = 0}, -1, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(showspectrum);
+
+static const struct ColorTable {
+ float a, y, u, v;
+} color_table[][8] = {
+ [INTENSITY] = {
+ { 0, 0, 0, 0 },
+ { 0.13, .03587126228984074, .1573300977624594, -.02548747583751842 },
+ { 0.30, .18572281794568020, .1772436246393981, .17475554840414750 },
+ { 0.60, .28184980583656130, -.1593064119945782, .47132074554608920 },
+ { 0.73, .65830621175547810, -.3716070802232764, .24352759331252930 },
+ { 0.78, .76318535758242900, -.4307467689263783, .16866496622310430 },
+ { 0.91, .95336363636363640, -.2045454545454546, .03313636363636363 },
+ { 1, 1, 0, 0 }},
+ [RAINBOW] = {
+ { 0, 0, 0, 0 },
+ { 0.13, 44/256., (189-128)/256., (138-128)/256. },
+ { 0.25, 29/256., (186-128)/256., (119-128)/256. },
+ { 0.38, 119/256., (194-128)/256., (53-128)/256. },
+ { 0.60, 111/256., (73-128)/256., (59-128)/256. },
+ { 0.73, 205/256., (19-128)/256., (149-128)/256. },
+ { 0.86, 135/256., (83-128)/256., (200-128)/256. },
+ { 1, 73/256., (95-128)/256., (225-128)/256. }},
+ [MORELAND] = {
+ { 0, 44/256., (181-128)/256., (112-128)/256. },
+ { 0.13, 126/256., (177-128)/256., (106-128)/256. },
+ { 0.25, 164/256., (163-128)/256., (109-128)/256. },
+ { 0.38, 200/256., (140-128)/256., (120-128)/256. },
+ { 0.60, 201/256., (117-128)/256., (141-128)/256. },
+ { 0.73, 177/256., (103-128)/256., (165-128)/256. },
+ { 0.86, 136/256., (100-128)/256., (183-128)/256. },
+ { 1, 68/256., (117-128)/256., (203-128)/256. }},
+ [NEBULAE] = {
+ { 0, 10/256., (134-128)/256., (132-128)/256. },
+ { 0.23, 21/256., (137-128)/256., (130-128)/256. },
+ { 0.45, 35/256., (134-128)/256., (134-128)/256. },
+ { 0.57, 51/256., (130-128)/256., (139-128)/256. },
+ { 0.67, 104/256., (116-128)/256., (162-128)/256. },
+ { 0.77, 120/256., (105-128)/256., (188-128)/256. },
+ { 0.87, 140/256., (105-128)/256., (188-128)/256. },
+ { 1, 1, 0, 0 }},
+ [FIRE] = {
+ { 0, 0, 0, 0 },
+ { 0.23, 44/256., (132-128)/256., (127-128)/256. },
+ { 0.45, 62/256., (116-128)/256., (140-128)/256. },
+ { 0.57, 75/256., (105-128)/256., (152-128)/256. },
+ { 0.67, 95/256., (91-128)/256., (166-128)/256. },
+ { 0.77, 126/256., (74-128)/256., (172-128)/256. },
+ { 0.87, 164/256., (73-128)/256., (162-128)/256. },
+ { 1, 1, 0, 0 }},
+ [FIERY] = {
+ { 0, 0, 0, 0 },
+ { 0.23, 36/256., (116-128)/256., (163-128)/256. },
+ { 0.45, 52/256., (102-128)/256., (200-128)/256. },
+ { 0.57, 116/256., (84-128)/256., (196-128)/256. },
+ { 0.67, 157/256., (67-128)/256., (181-128)/256. },
+ { 0.77, 193/256., (40-128)/256., (155-128)/256. },
+ { 0.87, 221/256., (101-128)/256., (134-128)/256. },
+ { 1, 1, 0, 0 }},
+ [FRUIT] = {
+ { 0, 0, 0, 0 },
+ { 0.20, 29/256., (136-128)/256., (119-128)/256. },
+ { 0.30, 60/256., (119-128)/256., (90-128)/256. },
+ { 0.40, 85/256., (91-128)/256., (85-128)/256. },
+ { 0.50, 116/256., (70-128)/256., (105-128)/256. },
+ { 0.60, 151/256., (50-128)/256., (146-128)/256. },
+ { 0.70, 191/256., (63-128)/256., (178-128)/256. },
+ { 1, 98/256., (80-128)/256., (221-128)/256. }},
+ [COOL] = {
+ { 0, 0, 0, 0 },
+ { .15, 0, .5, -.5 },
+ { 1, 1, -.5, .5 }},
+};
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ShowSpectrumContext *s = ctx->priv;
+ int i;
+
+ av_freep(&s->combine_buffer);
+ if (s->fft) {
+ for (i = 0; i < s->nb_display_channels; i++)
+ av_fft_end(s->fft[i]);
+ }
+ av_freep(&s->fft);
+ if (s->fft_data) {
+ for (i = 0; i < s->nb_display_channels; i++)
+ av_freep(&s->fft_data[i]);
+ }
+ av_freep(&s->fft_data);
+ if (s->color_buffer) {
+ for (i = 0; i < s->nb_display_channels; i++)
+ av_freep(&s->color_buffer[i]);
+ }
+ av_freep(&s->color_buffer);
+ av_freep(&s->window_func_lut);
+ if (s->magnitudes) {
+ for (i = 0; i < s->nb_display_channels; i++)
+ av_freep(&s->magnitudes[i]);
+ }
+ av_freep(&s->magnitudes);
+ av_frame_free(&s->outpicref);
+ av_audio_fifo_free(s->fifo);
+ if (s->phases) {
+ for (i = 0; i < s->nb_display_channels; i++)
+ av_freep(&s->phases[i]);
+ }
+ av_freep(&s->phases);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_NONE };
+ int ret;
+
+ /* set input audio formats */
+ formats = ff_make_format_list(sample_fmts);
+ if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
+ return ret;
+
+ layouts = ff_all_channel_layouts();
+ if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
+ return ret;
+
+ /* set output video format */
+ formats = ff_make_format_list(pix_fmts);
+ if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ ShowSpectrumContext *s = ctx->priv;
+ int i, fft_bits, h, w;
+ float overlap;
+
+ if (!strcmp(ctx->filter->name, "showspectrumpic"))
+ s->single_pic = 1;
+
+ outlink->w = s->w;
+ outlink->h = s->h;
+
+ if (s->legend) {
+ s->start_x = log10(inlink->sample_rate) * 25;
+ s->start_y = 64;
+ outlink->w += s->start_x * 2;
+ outlink->h += s->start_y * 2;
+ }
+
+ h = (s->mode == COMBINED || s->orientation == HORIZONTAL) ? s->h : s->h / inlink->channels;
+ w = (s->mode == COMBINED || s->orientation == VERTICAL) ? s->w : s->w / inlink->channels;
+ s->channel_height = h;
+ s->channel_width = w;
+
+ if (s->orientation == VERTICAL) {
+ /* FFT window size (precision) according to the requested output frame height */
+ for (fft_bits = 1; 1 << fft_bits < 2 * h; fft_bits++);
+ } else {
+ /* FFT window size (precision) according to the requested output frame width */
+ for (fft_bits = 1; 1 << fft_bits < 2 * w; fft_bits++);
+ }
+ s->win_size = 1 << fft_bits;
+
+ if (!s->fft) {
+ s->fft = av_calloc(inlink->channels, sizeof(*s->fft));
+ if (!s->fft)
+ return AVERROR(ENOMEM);
+ }
+
+ /* (re-)configuration if the video output changed (or first init) */
+ if (fft_bits != s->fft_bits) {
+ AVFrame *outpicref;
+
+ s->fft_bits = fft_bits;
+
+ /* FFT buffers: x2 for each (display) channel buffer.
+ * Note: we use free and malloc instead of a realloc-like function to
+ * make sure the buffer is aligned in memory for the FFT functions. */
+ for (i = 0; i < s->nb_display_channels; i++) {
+ av_fft_end(s->fft[i]);
+ av_freep(&s->fft_data[i]);
+ }
+ av_freep(&s->fft_data);
+
+ s->nb_display_channels = inlink->channels;
+ for (i = 0; i < s->nb_display_channels; i++) {
+ s->fft[i] = av_fft_init(fft_bits, 0);
+ if (!s->fft[i]) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to create FFT context. "
+ "The window size might be too high.\n");
+ return AVERROR(EINVAL);
+ }
+ }
+
+ s->magnitudes = av_calloc(s->nb_display_channels, sizeof(*s->magnitudes));
+ if (!s->magnitudes)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < s->nb_display_channels; i++) {
+ s->magnitudes[i] = av_calloc(s->orientation == VERTICAL ? s->h : s->w, sizeof(**s->magnitudes));
+ if (!s->magnitudes[i])
+ return AVERROR(ENOMEM);
+ }
+
+ s->phases = av_calloc(s->nb_display_channels, sizeof(*s->phases));
+ if (!s->phases)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < s->nb_display_channels; i++) {
+ s->phases[i] = av_calloc(s->orientation == VERTICAL ? s->h : s->w, sizeof(**s->phases));
+ if (!s->phases[i])
+ return AVERROR(ENOMEM);
+ }
+
+ av_freep(&s->color_buffer);
+ s->color_buffer = av_calloc(s->nb_display_channels, sizeof(*s->color_buffer));
+ if (!s->color_buffer)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < s->nb_display_channels; i++) {
+ s->color_buffer[i] = av_calloc(s->orientation == VERTICAL ? s->h * 3 : s->w * 3, sizeof(**s->color_buffer));
+ if (!s->color_buffer[i])
+ return AVERROR(ENOMEM);
+ }
+
+ s->fft_data = av_calloc(s->nb_display_channels, sizeof(*s->fft_data));
+ if (!s->fft_data)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < s->nb_display_channels; i++) {
+ s->fft_data[i] = av_calloc(s->win_size, sizeof(**s->fft_data));
+ if (!s->fft_data[i])
+ return AVERROR(ENOMEM);
+ }
+
+ /* pre-calc windowing function */
+ s->window_func_lut =
+ av_realloc_f(s->window_func_lut, s->win_size,
+ sizeof(*s->window_func_lut));
+ if (!s->window_func_lut)
+ return AVERROR(ENOMEM);
+ ff_generate_window_func(s->window_func_lut, s->win_size, s->win_func, &overlap);
+ if (s->overlap == 1)
+ s->overlap = overlap;
+ s->hop_size = (1. - s->overlap) * s->win_size;
+ if (s->hop_size < 1) {
+ av_log(ctx, AV_LOG_ERROR, "overlap %f too big\n", s->overlap);
+ return AVERROR(EINVAL);
+ }
+
+ for (s->win_scale = 0, i = 0; i < s->win_size; i++) {
+ s->win_scale += s->window_func_lut[i] * s->window_func_lut[i];
+ }
+ s->win_scale = 1. / sqrt(s->win_scale);
+
+ /* prepare the initial picref buffer (black frame) */
+ av_frame_free(&s->outpicref);
+ s->outpicref = outpicref =
+ ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpicref)
+ return AVERROR(ENOMEM);
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+ for (i = 0; i < outlink->h; i++) {
+ memset(outpicref->data[0] + i * outpicref->linesize[0], 0, outlink->w);
+ memset(outpicref->data[1] + i * outpicref->linesize[1], 128, outlink->w);
+ memset(outpicref->data[2] + i * outpicref->linesize[2], 128, outlink->w);
+ }
+ av_frame_set_color_range(outpicref, AVCOL_RANGE_JPEG);
+ }
+
+ if ((s->orientation == VERTICAL && s->xpos >= s->w) ||
+ (s->orientation == HORIZONTAL && s->xpos >= s->h))
+ s->xpos = 0;
+
+ outlink->frame_rate = av_make_q(inlink->sample_rate, s->win_size * (1.-s->overlap));
+ if (s->orientation == VERTICAL && s->sliding == FULLFRAME)
+ outlink->frame_rate.den *= s->w;
+ if (s->orientation == HORIZONTAL && s->sliding == FULLFRAME)
+ outlink->frame_rate.den *= s->h;
+
+ if (s->orientation == VERTICAL) {
+ s->combine_buffer =
+ av_realloc_f(s->combine_buffer, s->h * 3,
+ sizeof(*s->combine_buffer));
+ } else {
+ s->combine_buffer =
+ av_realloc_f(s->combine_buffer, s->w * 3,
+ sizeof(*s->combine_buffer));
+ }
+
+ av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d FFT window size:%d\n",
+ s->w, s->h, s->win_size);
+
+ av_audio_fifo_free(s->fifo);
+ s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->win_size);
+ if (!s->fifo)
+ return AVERROR(ENOMEM);
+ return 0;
+}
+
+static int run_channel_fft(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ShowSpectrumContext *s = ctx->priv;
+ const float *window_func_lut = s->window_func_lut;
+ AVFrame *fin = arg;
+ const int ch = jobnr;
+ int n;
+
+ /* fill FFT input with the number of samples available */
+ const float *p = (float *)fin->extended_data[ch];
+
+ for (n = 0; n < s->win_size; n++) {
+ s->fft_data[ch][n].re = p[n] * window_func_lut[n];
+ s->fft_data[ch][n].im = 0;
+ }
+
+ /* run FFT on each samples set */
+ av_fft_permute(s->fft[ch], s->fft_data[ch]);
+ av_fft_calc(s->fft[ch], s->fft_data[ch]);
+
+ return 0;
+}
+
+#define RE(y, ch) s->fft_data[ch][y].re
+#define IM(y, ch) s->fft_data[ch][y].im
+#define MAGNITUDE(y, ch) hypot(RE(y, ch), IM(y, ch))
+#define PHASE(y, ch) atan2(IM(y, ch), RE(y, ch))
+
+static int calc_channel_magnitudes(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ShowSpectrumContext *s = ctx->priv;
+ const double w = s->win_scale * (s->scale == LOG ? s->win_scale : 1);
+ int y, h = s->orientation == VERTICAL ? s->h : s->w;
+ const float f = s->gain * w;
+ const int ch = jobnr;
+ float *magnitudes = s->magnitudes[ch];
+
+ for (y = 0; y < h; y++)
+ magnitudes[y] = MAGNITUDE(y, ch) * f;
+
+ return 0;
+}
+
+static int calc_channel_phases(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ShowSpectrumContext *s = ctx->priv;
+ const int h = s->orientation == VERTICAL ? s->h : s->w;
+ const int ch = jobnr;
+ float *phases = s->phases[ch];
+ int y;
+
+ for (y = 0; y < h; y++)
+ phases[y] = (PHASE(y, ch) / M_PI + 1) / 2;
+
+ return 0;
+}
+
+static void acalc_magnitudes(ShowSpectrumContext *s)
+{
+ const double w = s->win_scale * (s->scale == LOG ? s->win_scale : 1);
+ int ch, y, h = s->orientation == VERTICAL ? s->h : s->w;
+ const float f = s->gain * w;
+
+ for (ch = 0; ch < s->nb_display_channels; ch++) {
+ float *magnitudes = s->magnitudes[ch];
+
+ for (y = 0; y < h; y++)
+ magnitudes[y] += MAGNITUDE(y, ch) * f;
+ }
+}
+
+static void scale_magnitudes(ShowSpectrumContext *s, float scale)
+{
+ int ch, y, h = s->orientation == VERTICAL ? s->h : s->w;
+
+ for (ch = 0; ch < s->nb_display_channels; ch++) {
+ float *magnitudes = s->magnitudes[ch];
+
+ for (y = 0; y < h; y++)
+ magnitudes[y] *= scale;
+ }
+}
+
+static void color_range(ShowSpectrumContext *s, int ch,
+ float *yf, float *uf, float *vf)
+{
+ switch (s->mode) {
+ case COMBINED:
+ // reduce range by channel count
+ *yf = 256.0f / s->nb_display_channels;
+ switch (s->color_mode) {
+ case RAINBOW:
+ case MORELAND:
+ case NEBULAE:
+ case FIRE:
+ case FIERY:
+ case FRUIT:
+ case COOL:
+ case INTENSITY:
+ *uf = *yf;
+ *vf = *yf;
+ break;
+ case CHANNEL:
+ /* adjust saturation for mixed UV coloring */
+ /* this factor is correct for infinite channels, an approximation otherwise */
+ *uf = *yf * M_PI;
+ *vf = *yf * M_PI;
+ break;
+ default:
+ av_assert0(0);
+ }
+ break;
+ case SEPARATE:
+ // full range
+ *yf = 256.0f;
+ *uf = 256.0f;
+ *vf = 256.0f;
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ if (s->color_mode == CHANNEL) {
+ if (s->nb_display_channels > 1) {
+ *uf *= 0.5 * sin((2 * M_PI * ch) / s->nb_display_channels + M_PI * s->rotation);
+ *vf *= 0.5 * cos((2 * M_PI * ch) / s->nb_display_channels + M_PI * s->rotation);
+ } else {
+ *uf *= 0.5 * sin(M_PI * s->rotation);
+ *vf *= 0.5 * cos(M_PI * s->rotation + M_PI_2);
+ }
+ } else {
+ *uf += *uf * sin(M_PI * s->rotation);
+ *vf += *vf * cos(M_PI * s->rotation + M_PI_2);
+ }
+
+ *uf *= s->saturation;
+ *vf *= s->saturation;
+}
+
+static void pick_color(ShowSpectrumContext *s,
+ float yf, float uf, float vf,
+ float a, float *out)
+{
+ if (s->color_mode > CHANNEL) {
+ const int cm = s->color_mode;
+ float y, u, v;
+ int i;
+
+ for (i = 1; i < FF_ARRAY_ELEMS(color_table[cm]) - 1; i++)
+ if (color_table[cm][i].a >= a)
+ break;
+ // i now is the first item >= the color
+ // now we know to interpolate between item i - 1 and i
+ if (a <= color_table[cm][i - 1].a) {
+ y = color_table[cm][i - 1].y;
+ u = color_table[cm][i - 1].u;
+ v = color_table[cm][i - 1].v;
+ } else if (a >= color_table[cm][i].a) {
+ y = color_table[cm][i].y;
+ u = color_table[cm][i].u;
+ v = color_table[cm][i].v;
+ } else {
+ float start = color_table[cm][i - 1].a;
+ float end = color_table[cm][i].a;
+ float lerpfrac = (a - start) / (end - start);
+ y = color_table[cm][i - 1].y * (1.0f - lerpfrac)
+ + color_table[cm][i].y * lerpfrac;
+ u = color_table[cm][i - 1].u * (1.0f - lerpfrac)
+ + color_table[cm][i].u * lerpfrac;
+ v = color_table[cm][i - 1].v * (1.0f - lerpfrac)
+ + color_table[cm][i].v * lerpfrac;
+ }
+
+ out[0] = y * yf;
+ out[1] = u * uf;
+ out[2] = v * vf;
+ } else {
+ out[0] = a * yf;
+ out[1] = a * uf;
+ out[2] = a * vf;
+ }
+}
+
+static void clear_combine_buffer(ShowSpectrumContext *s, int size)
+{
+ int y;
+
+ for (y = 0; y < size; y++) {
+ s->combine_buffer[3 * y ] = 0;
+ s->combine_buffer[3 * y + 1] = 127.5;
+ s->combine_buffer[3 * y + 2] = 127.5;
+ }
+}
+
+static int plot_channel(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ShowSpectrumContext *s = ctx->priv;
+ const int h = s->orientation == VERTICAL ? s->channel_height : s->channel_width;
+ const int ch = jobnr;
+ float *magnitudes = s->magnitudes[ch];
+ float *phases = s->phases[ch];
+ float yf, uf, vf;
+ int y;
+
+ /* decide color range */
+ color_range(s, ch, &yf, &uf, &vf);
+
+ /* draw the channel */
+ for (y = 0; y < h; y++) {
+ int row = (s->mode == COMBINED) ? y : ch * h + y;
+ float *out = &s->color_buffer[ch][3 * row];
+ float a;
+
+ switch (s->data) {
+ case D_MAGNITUDE:
+ /* get magnitude */
+ a = magnitudes[y];
+ break;
+ case D_PHASE:
+ /* get phase */
+ a = phases[y];
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ /* apply scale */
+ switch (s->scale) {
+ case LINEAR:
+ a = av_clipf(a, 0, 1);
+ break;
+ case SQRT:
+ a = av_clipf(sqrt(a), 0, 1);
+ break;
+ case CBRT:
+ a = av_clipf(cbrt(a), 0, 1);
+ break;
+ case FOURTHRT:
+ a = av_clipf(sqrt(sqrt(a)), 0, 1);
+ break;
+ case FIFTHRT:
+ a = av_clipf(pow(a, 0.20), 0, 1);
+ break;
+ case LOG:
+ a = 1 + log10(av_clipd(a, 1e-6, 1)) / 6; // zero = -120dBFS
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ pick_color(s, yf, uf, vf, a, out);
+ }
+
+ return 0;
+}
+
+static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ShowSpectrumContext *s = ctx->priv;
+ AVFrame *outpicref = s->outpicref;
+ int ret, plane, x, y, z = s->orientation == VERTICAL ? s->h : s->w;
+
+ /* fill a new spectrum column */
+ /* initialize buffer for combining to black */
+ clear_combine_buffer(s, z);
+
+ ctx->internal->execute(ctx, plot_channel, NULL, NULL, s->nb_display_channels);
+
+ for (y = 0; y < z * 3; y++) {
+ for (x = 0; x < s->nb_display_channels; x++) {
+ s->combine_buffer[y] += s->color_buffer[x][y];
+ }
+ }
+
+ av_frame_make_writable(s->outpicref);
+ /* copy to output */
+ if (s->orientation == VERTICAL) {
+ if (s->sliding == SCROLL) {
+ for (plane = 0; plane < 3; plane++) {
+ for (y = 0; y < s->h; y++) {
+ uint8_t *p = outpicref->data[plane] +
+ y * outpicref->linesize[plane];
+ memmove(p, p + 1, s->w - 1);
+ }
+ }
+ s->xpos = s->w - 1;
+ } else if (s->sliding == RSCROLL) {
+ for (plane = 0; plane < 3; plane++) {
+ for (y = 0; y < s->h; y++) {
+ uint8_t *p = outpicref->data[plane] +
+ y * outpicref->linesize[plane];
+ memmove(p + 1, p, s->w - 1);
+ }
+ }
+ s->xpos = 0;
+ }
+ for (plane = 0; plane < 3; plane++) {
+ uint8_t *p = outpicref->data[plane] + s->start_x +
+ (outlink->h - 1 - s->start_y) * outpicref->linesize[plane] +
+ s->xpos;
+ for (y = 0; y < s->h; y++) {
+ *p = lrintf(av_clipf(s->combine_buffer[3 * y + plane], 0, 255));
+ p -= outpicref->linesize[plane];
+ }
+ }
+ } else {
+ if (s->sliding == SCROLL) {
+ for (plane = 0; plane < 3; plane++) {
+ for (y = 1; y < s->h; y++) {
+ memmove(outpicref->data[plane] + (y-1) * outpicref->linesize[plane],
+ outpicref->data[plane] + (y ) * outpicref->linesize[plane],
+ s->w);
+ }
+ }
+ s->xpos = s->h - 1;
+ } else if (s->sliding == RSCROLL) {
+ for (plane = 0; plane < 3; plane++) {
+ for (y = s->h - 1; y >= 1; y--) {
+ memmove(outpicref->data[plane] + (y ) * outpicref->linesize[plane],
+ outpicref->data[plane] + (y-1) * outpicref->linesize[plane],
+ s->w);
+ }
+ }
+ s->xpos = 0;
+ }
+ for (plane = 0; plane < 3; plane++) {
+ uint8_t *p = outpicref->data[plane] + s->start_x +
+ (s->xpos + s->start_y) * outpicref->linesize[plane];
+ for (x = 0; x < s->w; x++) {
+ *p = lrintf(av_clipf(s->combine_buffer[3 * x + plane], 0, 255));
+ p++;
+ }
+ }
+ }
+
+ if (s->sliding != FULLFRAME || s->xpos == 0)
+ outpicref->pts = insamples->pts;
+
+ s->xpos++;
+ if (s->orientation == VERTICAL && s->xpos >= s->w)
+ s->xpos = 0;
+ if (s->orientation == HORIZONTAL && s->xpos >= s->h)
+ s->xpos = 0;
+ if (!s->single_pic && (s->sliding != FULLFRAME || s->xpos == 0)) {
+ ret = ff_filter_frame(outlink, av_frame_clone(s->outpicref));
+ if (ret < 0)
+ return ret;
+ }
+
+ return s->win_size;
+}
+
+#if CONFIG_SHOWSPECTRUM_FILTER
+
+static int request_frame(AVFilterLink *outlink)
+{
+ ShowSpectrumContext *s = outlink->src->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ unsigned i;
+ int ret;
+
+ ret = ff_request_frame(inlink);
+ if (ret == AVERROR_EOF && s->sliding == FULLFRAME && s->xpos > 0 &&
+ s->outpicref) {
+ if (s->orientation == VERTICAL) {
+ for (i = 0; i < outlink->h; i++) {
+ memset(s->outpicref->data[0] + i * s->outpicref->linesize[0] + s->xpos, 0, outlink->w - s->xpos);
+ memset(s->outpicref->data[1] + i * s->outpicref->linesize[1] + s->xpos, 128, outlink->w - s->xpos);
+ memset(s->outpicref->data[2] + i * s->outpicref->linesize[2] + s->xpos, 128, outlink->w - s->xpos);
+ }
+ } else {
+ for (i = s->xpos; i < outlink->h; i++) {
+ memset(s->outpicref->data[0] + i * s->outpicref->linesize[0], 0, outlink->w);
+ memset(s->outpicref->data[1] + i * s->outpicref->linesize[1], 128, outlink->w);
+ memset(s->outpicref->data[2] + i * s->outpicref->linesize[2], 128, outlink->w);
+ }
+ }
+ ret = ff_filter_frame(outlink, s->outpicref);
+ s->outpicref = NULL;
+ }
+
+ return ret;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ShowSpectrumContext *s = ctx->priv;
+ AVFrame *fin = NULL;
+ int ret = 0, consumed = 0;
+
+ if (s->pts == AV_NOPTS_VALUE)
+ s->pts = insamples->pts - av_audio_fifo_size(s->fifo);
+
+ av_audio_fifo_write(s->fifo, (void **)insamples->extended_data, insamples->nb_samples);
+ av_frame_free(&insamples);
+ while (av_audio_fifo_size(s->fifo) >= s->win_size) {
+ fin = ff_get_audio_buffer(inlink, s->win_size);
+ if (!fin) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ fin->pts = s->pts + consumed;
+ consumed += s->hop_size;
+ ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data, s->win_size);
+ if (ret < 0)
+ goto fail;
+
+ av_assert0(fin->nb_samples == s->win_size);
+
+ ctx->internal->execute(ctx, run_channel_fft, fin, NULL, s->nb_display_channels);
+
+ if (s->data == D_MAGNITUDE)
+ ctx->internal->execute(ctx, calc_channel_magnitudes, NULL, NULL, s->nb_display_channels);
+
+ if (s->data == D_PHASE)
+ ctx->internal->execute(ctx, calc_channel_phases, NULL, NULL, s->nb_display_channels);
+
+ ret = plot_spectrum_column(inlink, fin);
+ av_frame_free(&fin);
+ av_audio_fifo_drain(s->fifo, s->hop_size);
+ if (ret < 0)
+ goto fail;
+ }
+
+fail:
+ s->pts = AV_NOPTS_VALUE;
+ av_frame_free(&fin);
+ return ret;
+}
+
+static const AVFilterPad showspectrum_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad showspectrum_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_avf_showspectrum = {
+ .name = "showspectrum",
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output."),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ShowSpectrumContext),
+ .inputs = showspectrum_inputs,
+ .outputs = showspectrum_outputs,
+ .priv_class = &showspectrum_class,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
+};
+#endif // CONFIG_SHOWSPECTRUM_FILTER
+
+#if CONFIG_SHOWSPECTRUMPIC_FILTER
+
+static const AVOption showspectrumpic_options[] = {
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "4096x2048"}, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "4096x2048"}, 0, 0, FLAGS },
+ { "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, 0, NB_MODES-1, FLAGS, "mode" },
+ { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
+ { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
+ { "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=INTENSITY}, 0, NB_CLMODES-1, FLAGS, "color" },
+ { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
+ { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
+ { "rainbow", "rainbow based coloring", 0, AV_OPT_TYPE_CONST, {.i64=RAINBOW}, 0, 0, FLAGS, "color" },
+ { "moreland", "moreland based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MORELAND}, 0, 0, FLAGS, "color" },
+ { "nebulae", "nebulae based coloring", 0, AV_OPT_TYPE_CONST, {.i64=NEBULAE}, 0, 0, FLAGS, "color" },
+ { "fire", "fire based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIRE}, 0, 0, FLAGS, "color" },
+ { "fiery", "fiery based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIERY}, 0, 0, FLAGS, "color" },
+ { "fruit", "fruit based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FRUIT}, 0, 0, FLAGS, "color" },
+ { "cool", "cool based coloring", 0, AV_OPT_TYPE_CONST, {.i64=COOL}, 0, 0, FLAGS, "color" },
+ { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=LOG}, 0, NB_SCALES-1, FLAGS, "scale" },
+ { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
+ { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
+ { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
+ { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
+ { "4thrt","4th root", 0, AV_OPT_TYPE_CONST, {.i64=FOURTHRT}, 0, 0, FLAGS, "scale" },
+ { "5thrt","5th root", 0, AV_OPT_TYPE_CONST, {.i64=FIFTHRT}, 0, 0, FLAGS, "scale" },
+ { "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
+ { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = WFUNC_HANNING}, 0, NB_WFUNC-1, FLAGS, "win_func" },
+ { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
+ { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
+ { "hann", "Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
+ { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
+ { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
+ { "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
+ { "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
+ { "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
+ { "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
+ { "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
+ { "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
+ { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
+ { "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
+ { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
+ { "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
+ { "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, FLAGS, "win_func" },
+ { "dolph", "Dolph-Chebyshev", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_DOLPH}, 0, 0, FLAGS, "win_func" },
+ { "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, FLAGS, "win_func" },
+ { "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, FLAGS, "win_func" },
+ { "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, FLAGS, "win_func" },
+ { "orientation", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=VERTICAL}, 0, NB_ORIENTATIONS-1, FLAGS, "orientation" },
+ { "vertical", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VERTICAL}, 0, 0, FLAGS, "orientation" },
+ { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=HORIZONTAL}, 0, 0, FLAGS, "orientation" },
+ { "gain", "set scale gain", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl = 1}, 0, 128, FLAGS },
+ { "legend", "draw legend", OFFSET(legend), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
+ { "rotation", "color rotation", OFFSET(rotation), AV_OPT_TYPE_FLOAT, {.dbl = 0}, -1, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(showspectrumpic);
+
+static void drawtext(AVFrame *pic, int x, int y, const char *txt, int o)
+{
+ const uint8_t *font;
+ int font_height;
+ int i;
+
+ font = avpriv_cga_font, font_height = 8;
+
+ for (i = 0; txt[i]; i++) {
+ int char_y, mask;
+
+ if (o) {
+ for (char_y = font_height - 1; char_y >= 0; char_y--) {
+ uint8_t *p = pic->data[0] + (y + i * 10) * pic->linesize[0] + x;
+ for (mask = 0x80; mask; mask >>= 1) {
+ if (font[txt[i] * font_height + font_height - 1 - char_y] & mask)
+ p[char_y] = ~p[char_y];
+ p += pic->linesize[0];
+ }
+ }
+ } else {
+ uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8);
+ for (char_y = 0; char_y < font_height; char_y++) {
+ for (mask = 0x80; mask; mask >>= 1) {
+ if (font[txt[i] * font_height + char_y] & mask)
+ *p = ~(*p);
+ p++;
+ }
+ p += pic->linesize[0] - 8;
+ }
+ }
+ }
+}
+
+static int showspectrumpic_request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ShowSpectrumContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ int ret, samples;
+
+ ret = ff_request_frame(inlink);
+ samples = av_audio_fifo_size(s->fifo);
+ if (ret == AVERROR_EOF && s->outpicref && samples > 0) {
+ int consumed = 0;
+ int y, x = 0, sz = s->orientation == VERTICAL ? s->w : s->h;
+ int ch, spf, spb;
+ AVFrame *fin;
+
+ spf = s->win_size * (samples / ((s->win_size * sz) * ceil(samples / (float)(s->win_size * sz))));
+ spf = FFMAX(1, spf);
+
+ spb = (samples / (spf * sz)) * spf;
+
+ fin = ff_get_audio_buffer(inlink, s->win_size);
+ if (!fin)
+ return AVERROR(ENOMEM);
+
+ while (x < sz) {
+ ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data, s->win_size);
+ if (ret < 0) {
+ av_frame_free(&fin);
+ return ret;
+ }
+
+ av_audio_fifo_drain(s->fifo, spf);
+
+ if (ret < s->win_size) {
+ for (ch = 0; ch < s->nb_display_channels; ch++) {
+ memset(fin->extended_data[ch] + ret * sizeof(float), 0,
+ (s->win_size - ret) * sizeof(float));
+ }
+ }
+
+ ctx->internal->execute(ctx, run_channel_fft, fin, NULL, s->nb_display_channels);
+ acalc_magnitudes(s);
+
+ consumed += spf;
+ if (consumed >= spb) {
+ int h = s->orientation == VERTICAL ? s->h : s->w;
+
+ scale_magnitudes(s, 1. / (consumed / spf));
+ plot_spectrum_column(inlink, fin);
+ consumed = 0;
+ x++;
+ for (ch = 0; ch < s->nb_display_channels; ch++)
+ memset(s->magnitudes[ch], 0, h * sizeof(float));
+ }
+ }
+
+ av_frame_free(&fin);
+ s->outpicref->pts = 0;
+
+ if (s->legend) {
+ int multi = (s->mode == SEPARATE && s->color_mode == CHANNEL);
+ float spp = samples / (float)sz;
+ uint8_t *dst;
+
+ drawtext(s->outpicref, 2, outlink->h - 10, "CREATED BY LIBAVFILTER", 0);
+
+ dst = s->outpicref->data[0] + (s->start_y - 1) * s->outpicref->linesize[0] + s->start_x - 1;
+ for (x = 0; x < s->w + 1; x++)
+ dst[x] = 200;
+ dst = s->outpicref->data[0] + (s->start_y + s->h) * s->outpicref->linesize[0] + s->start_x - 1;
+ for (x = 0; x < s->w + 1; x++)
+ dst[x] = 200;
+ for (y = 0; y < s->h + 2; y++) {
+ dst = s->outpicref->data[0] + (y + s->start_y - 1) * s->outpicref->linesize[0];
+ dst[s->start_x - 1] = 200;
+ dst[s->start_x + s->w] = 200;
+ }
+ if (s->orientation == VERTICAL) {
+ int h = s->mode == SEPARATE ? s->h / s->nb_display_channels : s->h;
+ for (ch = 0; ch < (s->mode == SEPARATE ? s->nb_display_channels : 1); ch++) {
+ for (y = 0; y < h; y += 20) {
+ dst = s->outpicref->data[0] + (s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[0];
+ dst[s->start_x - 2] = 200;
+ dst[s->start_x + s->w + 1] = 200;
+ }
+ for (y = 0; y < h; y += 40) {
+ dst = s->outpicref->data[0] + (s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[0];
+ dst[s->start_x - 3] = 200;
+ dst[s->start_x + s->w + 2] = 200;
+ }
+ dst = s->outpicref->data[0] + (s->start_y - 2) * s->outpicref->linesize[0] + s->start_x;
+ for (x = 0; x < s->w; x+=40)
+ dst[x] = 200;
+ dst = s->outpicref->data[0] + (s->start_y - 3) * s->outpicref->linesize[0] + s->start_x;
+ for (x = 0; x < s->w; x+=80)
+ dst[x] = 200;
+ dst = s->outpicref->data[0] + (s->h + s->start_y + 1) * s->outpicref->linesize[0] + s->start_x;
+ for (x = 0; x < s->w; x+=40) {
+ dst[x] = 200;
+ }
+ dst = s->outpicref->data[0] + (s->h + s->start_y + 2) * s->outpicref->linesize[0] + s->start_x;
+ for (x = 0; x < s->w; x+=80) {
+ dst[x] = 200;
+ }
+ for (y = 0; y < h; y += 40) {
+ float hertz = y * (inlink->sample_rate / 2) / (float)(1 << (int)ceil(log2(h)));
+ char *units;
+
+ if (hertz == 0)
+ units = av_asprintf("DC");
+ else
+ units = av_asprintf("%.2f", hertz);
+ if (!units)
+ return AVERROR(ENOMEM);
+
+ drawtext(s->outpicref, s->start_x - 8 * strlen(units) - 4, h * (ch + 1) + s->start_y - y - 4, units, 0);
+ av_free(units);
+ }
+ }
+
+ for (x = 0; x < s->w; x+=80) {
+ float seconds = x * spp / inlink->sample_rate;
+ char *units;
+
+ if (x == 0)
+ units = av_asprintf("0");
+ else if (log10(seconds) > 6)
+ units = av_asprintf("%.2fh", seconds / (60 * 60));
+ else if (log10(seconds) > 3)
+ units = av_asprintf("%.2fm", seconds / 60);
+ else
+ units = av_asprintf("%.2fs", seconds);
+ if (!units)
+ return AVERROR(ENOMEM);
+
+ drawtext(s->outpicref, s->start_x + x - 4 * strlen(units), s->h + s->start_y + 6, units, 0);
+ drawtext(s->outpicref, s->start_x + x - 4 * strlen(units), s->start_y - 12, units, 0);
+ av_free(units);
+ }
+
+ drawtext(s->outpicref, outlink->w / 2 - 4 * 4, outlink->h - s->start_y / 2, "TIME", 0);
+ drawtext(s->outpicref, s->start_x / 7, outlink->h / 2 - 14 * 4, "FREQUENCY (Hz)", 1);
+ } else {
+ int w = s->mode == SEPARATE ? s->w / s->nb_display_channels : s->w;
+ for (y = 0; y < s->h; y += 20) {
+ dst = s->outpicref->data[0] + (s->start_y + y) * s->outpicref->linesize[0];
+ dst[s->start_x - 2] = 200;
+ dst[s->start_x + s->w + 1] = 200;
+ }
+ for (y = 0; y < s->h; y += 40) {
+ dst = s->outpicref->data[0] + (s->start_y + y) * s->outpicref->linesize[0];
+ dst[s->start_x - 3] = 200;
+ dst[s->start_x + s->w + 2] = 200;
+ }
+ for (ch = 0; ch < (s->mode == SEPARATE ? s->nb_display_channels : 1); ch++) {
+ dst = s->outpicref->data[0] + (s->start_y - 2) * s->outpicref->linesize[0] + s->start_x + w * ch;
+ for (x = 0; x < w; x+=40)
+ dst[x] = 200;
+ dst = s->outpicref->data[0] + (s->start_y - 3) * s->outpicref->linesize[0] + s->start_x + w * ch;
+ for (x = 0; x < w; x+=80)
+ dst[x] = 200;
+ dst = s->outpicref->data[0] + (s->h + s->start_y + 1) * s->outpicref->linesize[0] + s->start_x + w * ch;
+ for (x = 0; x < w; x+=40) {
+ dst[x] = 200;
+ }
+ dst = s->outpicref->data[0] + (s->h + s->start_y + 2) * s->outpicref->linesize[0] + s->start_x + w * ch;
+ for (x = 0; x < w; x+=80) {
+ dst[x] = 200;
+ }
+ for (x = 0; x < w; x += 80) {
+ float hertz = x * (inlink->sample_rate / 2) / (float)(1 << (int)ceil(log2(w)));
+ char *units;
+
+ if (hertz == 0)
+ units = av_asprintf("DC");
+ else
+ units = av_asprintf("%.2f", hertz);
+ if (!units)
+ return AVERROR(ENOMEM);
+
+ drawtext(s->outpicref, s->start_x - 4 * strlen(units) + x + w * ch, s->start_y - 12, units, 0);
+ drawtext(s->outpicref, s->start_x - 4 * strlen(units) + x + w * ch, s->h + s->start_y + 6, units, 0);
+ av_free(units);
+ }
+ }
+ for (y = 0; y < s->h; y+=40) {
+ float seconds = y * spp / inlink->sample_rate;
+ char *units;
+
+ if (x == 0)
+ units = av_asprintf("0");
+ else if (log10(seconds) > 6)
+ units = av_asprintf("%.2fh", seconds / (60 * 60));
+ else if (log10(seconds) > 3)
+ units = av_asprintf("%.2fm", seconds / 60);
+ else
+ units = av_asprintf("%.2fs", seconds);
+ if (!units)
+ return AVERROR(ENOMEM);
+
+ drawtext(s->outpicref, s->start_x - 8 * strlen(units) - 4, s->start_y + y - 4, units, 0);
+ av_free(units);
+ }
+ drawtext(s->outpicref, s->start_x / 7, outlink->h / 2 - 4 * 4, "TIME", 1);
+ drawtext(s->outpicref, outlink->w / 2 - 14 * 4, outlink->h - s->start_y / 2, "FREQUENCY (Hz)", 0);
+ }
+
+ for (ch = 0; ch < (multi ? s->nb_display_channels : 1); ch++) {
+ int h = multi ? s->h / s->nb_display_channels : s->h;
+
+ for (y = 0; y < h; y++) {
+ float out[3] = { 0., 127.5, 127.5};
+ int chn;
+
+ for (chn = 0; chn < (s->mode == SEPARATE ? 1 : s->nb_display_channels); chn++) {
+ float yf, uf, vf;
+ int channel = (multi) ? s->nb_display_channels - ch - 1 : chn;
+ float lout[3];
+
+ color_range(s, channel, &yf, &uf, &vf);
+ pick_color(s, yf, uf, vf, y / (float)h, lout);
+ out[0] += lout[0];
+ out[1] += lout[1];
+ out[2] += lout[2];
+ }
+ memset(s->outpicref->data[0]+(s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[0] + s->w + s->start_x + 20, av_clip_uint8(out[0]), 10);
+ memset(s->outpicref->data[1]+(s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[1] + s->w + s->start_x + 20, av_clip_uint8(out[1]), 10);
+ memset(s->outpicref->data[2]+(s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[2] + s->w + s->start_x + 20, av_clip_uint8(out[2]), 10);
+ }
+
+ for (y = 0; ch == 0 && y < h; y += h / 10) {
+ float value = 120.0 * log10(1. - y / (float)h);
+ char *text;
+
+ if (value < -120)
+ break;
+ text = av_asprintf("%.0f dB", value);
+ if (!text)
+ continue;
+ drawtext(s->outpicref, s->w + s->start_x + 35, s->start_y + y - 5, text, 0);
+ av_free(text);
+ }
+ }
+ }
+
+ ret = ff_filter_frame(outlink, s->outpicref);
+ s->outpicref = NULL;
+ }
+
+ return ret;
+}
+
+static int showspectrumpic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ShowSpectrumContext *s = ctx->priv;
+ int ret;
+
+ ret = av_audio_fifo_write(s->fifo, (void **)insamples->extended_data, insamples->nb_samples);
+ av_frame_free(&insamples);
+ return ret;
+}
+
+static const AVFilterPad showspectrumpic_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = showspectrumpic_filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad showspectrumpic_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = showspectrumpic_request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_avf_showspectrumpic = {
+ .name = "showspectrumpic",
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output single picture."),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ShowSpectrumContext),
+ .inputs = showspectrumpic_inputs,
+ .outputs = showspectrumpic_outputs,
+ .priv_class = &showspectrumpic_class,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
+};
+
+#endif // CONFIG_SHOWSPECTRUMPIC_FILTER
diff --git a/libavfilter/avf_showvolume.c b/libavfilter/avf_showvolume.c
new file mode 100644
index 0000000000..897e5709b8
--- /dev/null
+++ b/libavfilter/avf_showvolume.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/eval.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/xga_font_data.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "audio.h"
+#include "video.h"
+#include "internal.h"
+
+static const char *const var_names[] = { "VOLUME", "CHANNEL", "PEAK", NULL };
+enum { VAR_VOLUME, VAR_CHANNEL, VAR_PEAK, VAR_VARS_NB };
+
+typedef struct ShowVolumeContext {
+ const AVClass *class;
+ int w, h;
+ int b;
+ double f;
+ AVRational frame_rate;
+ char *color;
+ int orientation;
+ int step;
+
+ AVFrame *out;
+ AVExpr *c_expr;
+ int draw_text;
+ int draw_volume;
+ double *values;
+ uint32_t *color_lut;
+} ShowVolumeContext;
+
+#define OFFSET(x) offsetof(ShowVolumeContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption showvolume_options[] = {
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
+ { "b", "set border width", OFFSET(b), AV_OPT_TYPE_INT, {.i64=1}, 0, 5, FLAGS },
+ { "w", "set channel width", OFFSET(w), AV_OPT_TYPE_INT, {.i64=400}, 80, 8192, FLAGS },
+ { "h", "set channel height", OFFSET(h), AV_OPT_TYPE_INT, {.i64=20}, 1, 900, FLAGS },
+ { "f", "set fade", OFFSET(f), AV_OPT_TYPE_DOUBLE, {.dbl=0.95}, 0.001, 1, FLAGS },
+ { "c", "set volume color expression", OFFSET(color), AV_OPT_TYPE_STRING, {.str="PEAK*255+floor((1-PEAK)*255)*256+0xff000000"}, 0, 0, FLAGS },
+ { "t", "display channel names", OFFSET(draw_text), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
+ { "v", "display volume value", OFFSET(draw_volume), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
+ { "o", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "orientation" },
+ { "h", "horizontal", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "orientation" },
+ { "v", "vertical", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "orientation" },
+ { "s", "set step size", OFFSET(step), AV_OPT_TYPE_INT, {.i64=0}, 0, 5, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(showvolume);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ShowVolumeContext *s = ctx->priv;
+ int ret;
+
+ if (s->color) {
+ ret = av_expr_parse(&s->c_expr, s->color, var_names,
+ NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
+ int ret;
+
+ formats = ff_make_format_list(sample_fmts);
+ if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
+ return ret;
+
+ layouts = ff_all_channel_counts();
+ if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
+ return ret;
+
+ formats = ff_make_format_list(pix_fmts);
+ if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ShowVolumeContext *s = ctx->priv;
+ int nb_samples;
+
+ nb_samples = FFMAX(1024, ((double)inlink->sample_rate / av_q2d(s->frame_rate)) + 0.5);
+ inlink->partial_buf_size =
+ inlink->min_samples =
+ inlink->max_samples = nb_samples;
+ s->values = av_calloc(inlink->channels * VAR_VARS_NB, sizeof(double));
+ if (!s->values)
+ return AVERROR(ENOMEM);
+
+ s->color_lut = av_calloc(s->w, sizeof(*s->color_lut) * inlink->channels);
+ if (!s->color_lut)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ ShowVolumeContext *s = outlink->src->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ int ch;
+
+ if (s->orientation) {
+ outlink->h = s->w;
+ outlink->w = s->h * inlink->channels + (inlink->channels - 1) * s->b;
+ } else {
+ outlink->w = s->w;
+ outlink->h = s->h * inlink->channels + (inlink->channels - 1) * s->b;
+ }
+
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+ outlink->frame_rate = s->frame_rate;
+
+ for (ch = 0; ch < inlink->channels; ch++) {
+ int i;
+
+ for (i = 0; i < s->w; i++) {
+ float max = i / (float)(s->w - 1);
+
+ s->values[ch * VAR_VARS_NB + VAR_PEAK] = max;
+ s->values[ch * VAR_VARS_NB + VAR_VOLUME] = 20.0 * log10(max);
+ s->values[ch * VAR_VARS_NB + VAR_CHANNEL] = ch;
+ s->color_lut[ch * s->w + i] = av_expr_eval(s->c_expr, &s->values[ch * VAR_VARS_NB], NULL);
+ }
+ }
+
+ return 0;
+}
+
+static void drawtext(AVFrame *pic, int x, int y, const char *txt, int o)
+{
+ const uint8_t *font;
+ int font_height;
+ int i;
+
+ font = avpriv_cga_font, font_height = 8;
+
+ for (i = 0; txt[i]; i++) {
+ int char_y, mask;
+
+ if (o) {
+ for (char_y = font_height - 1; char_y >= 0; char_y--) {
+ uint8_t *p = pic->data[0] + (y + i * 10) * pic->linesize[0] + x * 4;
+ for (mask = 0x80; mask; mask >>= 1) {
+ if (font[txt[i] * font_height + font_height - 1 - char_y] & mask)
+ AV_WN32(&p[char_y * 4], ~AV_RN32(&p[char_y * 4]));
+ p += pic->linesize[0];
+ }
+ }
+ } else {
+ uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
+ for (char_y = 0; char_y < font_height; char_y++) {
+ for (mask = 0x80; mask; mask >>= 1) {
+ if (font[txt[i] * font_height + char_y] & mask)
+ AV_WN32(p, ~AV_RN32(p));
+ p += 4;
+ }
+ p += pic->linesize[0] - 8 * 4;
+ }
+ }
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ShowVolumeContext *s = ctx->priv;
+ const int step = s->step;
+ int c, i, j, k;
+ AVFrame *out;
+
+ if (!s->out || s->out->width != outlink->w ||
+ s->out->height != outlink->h) {
+ av_frame_free(&s->out);
+ s->out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!s->out) {
+ av_frame_free(&insamples);
+ return AVERROR(ENOMEM);
+ }
+
+ for (i = 0; i < outlink->h; i++)
+ memset(s->out->data[0] + i * s->out->linesize[0], 0, outlink->w * 4);
+ }
+ s->out->pts = insamples->pts;
+
+ for (j = 0; j < outlink->h; j++) {
+ uint8_t *dst = s->out->data[0] + j * s->out->linesize[0];
+ for (k = 0; k < outlink->w; k++) {
+ dst[k * 4 + 0] = FFMAX(dst[k * 4 + 0] * s->f, 0);
+ dst[k * 4 + 1] = FFMAX(dst[k * 4 + 1] * s->f, 0);
+ dst[k * 4 + 2] = FFMAX(dst[k * 4 + 2] * s->f, 0);
+ dst[k * 4 + 3] = FFMAX(dst[k * 4 + 3] * s->f, 0);
+ }
+ }
+
+ if (s->orientation) {
+ for (c = 0; c < inlink->channels; c++) {
+ float *src = (float *)insamples->extended_data[c];
+ uint32_t *lut = s->color_lut + s->w * c;
+ float max = 0;
+
+ for (i = 0; i < insamples->nb_samples; i++)
+ max = FFMAX(max, src[i]);
+
+ s->values[c * VAR_VARS_NB + VAR_VOLUME] = 20.0 * log10(max);
+ max = av_clipf(max, 0, 1);
+
+ for (j = outlink->h - outlink->h * max; j < s->w; j++) {
+ uint8_t *dst = s->out->data[0] + j * s->out->linesize[0] + c * (s->b + s->h) * 4;
+ for (k = 0; k < s->h; k++) {
+ AV_WN32A(&dst[k * 4], lut[s->w - j - 1]);
+ if (j & step)
+ j += step;
+ }
+ }
+
+ if (s->h >= 8 && s->draw_text) {
+ const char *channel_name = av_get_channel_name(av_channel_layout_extract_channel(insamples->channel_layout, c));
+ if (!channel_name)
+ continue;
+ drawtext(s->out, c * (s->h + s->b) + (s->h - 10) / 2, outlink->h - 35, channel_name, 1);
+ }
+ }
+ } else {
+ for (c = 0; c < inlink->channels; c++) {
+ float *src = (float *)insamples->extended_data[c];
+ uint32_t *lut = s->color_lut + s->w * c;
+ float max = 0;
+
+ for (i = 0; i < insamples->nb_samples; i++)
+ max = FFMAX(max, src[i]);
+
+ s->values[c * VAR_VARS_NB + VAR_VOLUME] = 20.0 * log10(max);
+ max = av_clipf(max, 0, 1);
+
+ for (j = 0; j < s->h; j++) {
+ uint8_t *dst = s->out->data[0] + (c * s->h + c * s->b + j) * s->out->linesize[0];
+
+ for (k = 0; k < s->w * max; k++) {
+ AV_WN32A(dst + k * 4, lut[k]);
+ if (k & step)
+ k += step;
+ }
+ }
+
+ if (s->h >= 8 && s->draw_text) {
+ const char *channel_name = av_get_channel_name(av_channel_layout_extract_channel(insamples->channel_layout, c));
+ if (!channel_name)
+ continue;
+ drawtext(s->out, 2, c * (s->h + s->b) + (s->h - 8) / 2, channel_name, 0);
+ }
+ }
+ }
+
+ av_frame_free(&insamples);
+ out = av_frame_clone(s->out);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_make_writable(out);
+
+ for (c = 0; c < inlink->channels && s->draw_volume; c++) {
+ char buf[16];
+ if (s->orientation) {
+ if (s->h >= 8) {
+ snprintf(buf, sizeof(buf), "%.2f", s->values[c * VAR_VARS_NB + VAR_VOLUME]);
+ drawtext(out, c * (s->h + s->b) + (s->h - 8) / 2, 2, buf, 1);
+ }
+ } else {
+ if (s->h >= 8) {
+ snprintf(buf, sizeof(buf), "%.2f", s->values[c * VAR_VARS_NB + VAR_VOLUME]);
+ drawtext(out, FFMAX(0, s->w - 8 * (int)strlen(buf)), c * (s->h + s->b) + (s->h - 8) / 2, buf, 0);
+ }
+ }
+ }
+
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ShowVolumeContext *s = ctx->priv;
+
+ av_frame_free(&s->out);
+ av_expr_free(s->c_expr);
+ av_freep(&s->values);
+ av_freep(&s->color_lut);
+}
+
+static const AVFilterPad showvolume_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad showvolume_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_avf_showvolume = {
+ .name = "showvolume",
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio volume to video output."),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ShowVolumeContext),
+ .inputs = showvolume_inputs,
+ .outputs = showvolume_outputs,
+ .priv_class = &showvolume_class,
+};
diff --git a/libavfilter/avf_showwaves.c b/libavfilter/avf_showwaves.c
new file mode 100644
index 0000000000..aadc5c1c2a
--- /dev/null
+++ b/libavfilter/avf_showwaves.c
@@ -0,0 +1,772 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * audio to video multimedia filter
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "audio.h"
+#include "video.h"
+#include "internal.h"
+
+enum ShowWavesMode {
+ MODE_POINT,
+ MODE_LINE,
+ MODE_P2P,
+ MODE_CENTERED_LINE,
+ MODE_NB,
+};
+
+enum ShowWavesScale {
+ SCALE_LIN,
+ SCALE_LOG,
+ SCALE_SQRT,
+ SCALE_CBRT,
+ SCALE_NB,
+};
+
+struct frame_node {
+ AVFrame *frame;
+ struct frame_node *next;
+};
+
+typedef struct {
+ const AVClass *class;
+ int w, h;
+ AVRational rate;
+ char *colors;
+ int buf_idx;
+ int16_t *buf_idy; /* y coordinate of previous sample for each channel */
+ AVFrame *outpicref;
+ int n;
+ int pixstep;
+ int sample_count_mod;
+ int mode; ///< ShowWavesMode
+ int scale; ///< ShowWavesScale
+ int split_channels;
+ uint8_t *fg;
+
+ int (*get_h)(int16_t sample, int height);
+ void (*draw_sample)(uint8_t *buf, int height, int linesize,
+ int16_t *prev_y, const uint8_t color[4], int h);
+
+ /* single picture */
+ int single_pic;
+ struct frame_node *audio_frames;
+ struct frame_node *last_frame;
+ int64_t total_samples;
+ int64_t *sum; /* abs sum of the samples per channel */
+} ShowWavesContext;
+
+#define OFFSET(x) offsetof(ShowWavesContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption showwaves_options[] = {
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
+ { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"},
+ { "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"},
+ { "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"},
+ { "p2p", "draw a line between samples", 0, AV_OPT_TYPE_CONST, {.i64=MODE_P2P}, .flags=FLAGS, .unit="mode"},
+ { "cline", "draw a centered line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_CENTERED_LINE}, .flags=FLAGS, .unit="mode"},
+ { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
+ { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
+ { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
+ { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
+ { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
+ { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
+ { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
+ { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
+ { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
+ { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(showwaves);
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ShowWavesContext *showwaves = ctx->priv;
+
+ av_frame_free(&showwaves->outpicref);
+ av_freep(&showwaves->buf_idy);
+ av_freep(&showwaves->fg);
+
+ if (showwaves->single_pic) {
+ struct frame_node *node = showwaves->audio_frames;
+ while (node) {
+ struct frame_node *tmp = node;
+
+ node = node->next;
+ av_frame_free(&tmp->frame);
+ av_freep(&tmp);
+ }
+ av_freep(&showwaves->sum);
+ showwaves->last_frame = NULL;
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
+ int ret;
+
+ /* set input audio formats */
+ formats = ff_make_format_list(sample_fmts);
+ if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
+ return ret;
+
+ layouts = ff_all_channel_layouts();
+ if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
+ return ret;
+
+ /* set output video format */
+ formats = ff_make_format_list(pix_fmts);
+ if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int get_lin_h(int16_t sample, int height)
+{
+ return height/2 - av_rescale(sample, height/2, INT16_MAX);
+}
+
+static int get_lin_h2(int16_t sample, int height)
+{
+ return av_rescale(FFABS(sample), height, INT16_MAX);
+}
+
+static int get_log_h(int16_t sample, int height)
+{
+ return height/2 - FFSIGN(sample) * (log10(1 + FFABS(sample)) * (height/2) / log10(1 + INT16_MAX));
+}
+
+static int get_log_h2(int16_t sample, int height)
+{
+ return log10(1 + FFABS(sample)) * height / log10(1 + INT16_MAX);
+}
+
+static int get_sqrt_h(int16_t sample, int height)
+{
+ return height/2 - FFSIGN(sample) * (sqrt(FFABS(sample)) * (height/2) / sqrt(INT16_MAX));
+}
+
+static int get_sqrt_h2(int16_t sample, int height)
+{
+ return sqrt(FFABS(sample)) * height / sqrt(INT16_MAX);
+}
+
+static int get_cbrt_h(int16_t sample, int height)
+{
+ return height/2 - FFSIGN(sample) * (cbrt(FFABS(sample)) * (height/2) / cbrt(INT16_MAX));
+}
+
+static int get_cbrt_h2(int16_t sample, int height)
+{
+ return cbrt(FFABS(sample)) * height / cbrt(INT16_MAX);
+}
+
+static void draw_sample_point_rgba(uint8_t *buf, int height, int linesize,
+ int16_t *prev_y,
+ const uint8_t color[4], int h)
+{
+ if (h >= 0 && h < height) {
+ buf[h * linesize + 0] += color[0];
+ buf[h * linesize + 1] += color[1];
+ buf[h * linesize + 2] += color[2];
+ buf[h * linesize + 3] += color[3];
+ }
+}
+
+static void draw_sample_line_rgba(uint8_t *buf, int height, int linesize,
+ int16_t *prev_y,
+ const uint8_t color[4], int h)
+{
+ int k;
+ int start = height/2;
+ int end = av_clip(h, 0, height-1);
+ if (start > end)
+ FFSWAP(int16_t, start, end);
+ for (k = start; k < end; k++) {
+ buf[k * linesize + 0] += color[0];
+ buf[k * linesize + 1] += color[1];
+ buf[k * linesize + 2] += color[2];
+ buf[k * linesize + 3] += color[3];
+ }
+}
+
+static void draw_sample_p2p_rgba(uint8_t *buf, int height, int linesize,
+ int16_t *prev_y,
+ const uint8_t color[4], int h)
+{
+ int k;
+ if (h >= 0 && h < height) {
+ buf[h * linesize + 0] += color[0];
+ buf[h * linesize + 1] += color[1];
+ buf[h * linesize + 2] += color[2];
+ buf[h * linesize + 3] += color[3];
+ if (*prev_y && h != *prev_y) {
+ int start = *prev_y;
+ int end = av_clip(h, 0, height-1);
+ if (start > end)
+ FFSWAP(int16_t, start, end);
+ for (k = start + 1; k < end; k++) {
+ buf[k * linesize + 0] += color[0];
+ buf[k * linesize + 1] += color[1];
+ buf[k * linesize + 2] += color[2];
+ buf[k * linesize + 3] += color[3];
+ }
+ }
+ }
+ *prev_y = h;
+}
+
+static void draw_sample_cline_rgba(uint8_t *buf, int height, int linesize,
+ int16_t *prev_y,
+ const uint8_t color[4], int h)
+{
+ int k;
+ const int start = (height - h) / 2;
+ const int end = start + h;
+ for (k = start; k < end; k++) {
+ buf[k * linesize + 0] += color[0];
+ buf[k * linesize + 1] += color[1];
+ buf[k * linesize + 2] += color[2];
+ buf[k * linesize + 3] += color[3];
+ }
+}
+
+static void draw_sample_point_gray(uint8_t *buf, int height, int linesize,
+ int16_t *prev_y,
+ const uint8_t color[4], int h)
+{
+ if (h >= 0 && h < height)
+ buf[h * linesize] += color[0];
+}
+
+static void draw_sample_line_gray(uint8_t *buf, int height, int linesize,
+ int16_t *prev_y,
+ const uint8_t color[4], int h)
+{
+ int k;
+ int start = height/2;
+ int end = av_clip(h, 0, height-1);
+ if (start > end)
+ FFSWAP(int16_t, start, end);
+ for (k = start; k < end; k++)
+ buf[k * linesize] += color[0];
+}
+
+static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize,
+ int16_t *prev_y,
+ const uint8_t color[4], int h)
+{
+ int k;
+ if (h >= 0 && h < height) {
+ buf[h * linesize] += color[0];
+ if (*prev_y && h != *prev_y) {
+ int start = *prev_y;
+ int end = av_clip(h, 0, height-1);
+ if (start > end)
+ FFSWAP(int16_t, start, end);
+ for (k = start + 1; k < end; k++)
+ buf[k * linesize] += color[0];
+ }
+ }
+ *prev_y = h;
+}
+
+static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize,
+ int16_t *prev_y,
+ const uint8_t color[4], int h)
+{
+ int k;
+ const int start = (height - h) / 2;
+ const int end = start + h;
+ for (k = start; k < end; k++)
+ buf[k * linesize] += color[0];
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ ShowWavesContext *showwaves = ctx->priv;
+ int nb_channels = inlink->channels;
+ char *colors, *saveptr = NULL;
+ uint8_t x;
+ int ch;
+
+ if (showwaves->single_pic)
+ showwaves->n = 1;
+
+ if (!showwaves->n)
+ showwaves->n = FFMAX(1, ((double)inlink->sample_rate / (showwaves->w * av_q2d(showwaves->rate))) + 0.5);
+
+ showwaves->buf_idx = 0;
+ if (!(showwaves->buf_idy = av_mallocz_array(nb_channels, sizeof(*showwaves->buf_idy)))) {
+ av_log(ctx, AV_LOG_ERROR, "Could not allocate showwaves buffer\n");
+ return AVERROR(ENOMEM);
+ }
+ outlink->w = showwaves->w;
+ outlink->h = showwaves->h;
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+
+ outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n},
+ (AVRational){showwaves->w,1});
+
+ av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n",
+ showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n);
+
+ switch (outlink->format) {
+ case AV_PIX_FMT_GRAY8:
+ switch (showwaves->mode) {
+ case MODE_POINT: showwaves->draw_sample = draw_sample_point_gray; break;
+ case MODE_LINE: showwaves->draw_sample = draw_sample_line_gray; break;
+ case MODE_P2P: showwaves->draw_sample = draw_sample_p2p_gray; break;
+ case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline_gray; break;
+ default:
+ return AVERROR_BUG;
+ }
+ showwaves->pixstep = 1;
+ break;
+ case AV_PIX_FMT_RGBA:
+ switch (showwaves->mode) {
+ case MODE_POINT: showwaves->draw_sample = draw_sample_point_rgba; break;
+ case MODE_LINE: showwaves->draw_sample = draw_sample_line_rgba; break;
+ case MODE_P2P: showwaves->draw_sample = draw_sample_p2p_rgba; break;
+ case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline_rgba; break;
+ default:
+ return AVERROR_BUG;
+ }
+ showwaves->pixstep = 4;
+ break;
+ }
+
+ switch (showwaves->scale) {
+ case SCALE_LIN:
+ switch (showwaves->mode) {
+ case MODE_POINT:
+ case MODE_LINE:
+ case MODE_P2P: showwaves->get_h = get_lin_h; break;
+ case MODE_CENTERED_LINE: showwaves->get_h = get_lin_h2; break;
+ default:
+ return AVERROR_BUG;
+ }
+ break;
+ case SCALE_LOG:
+ switch (showwaves->mode) {
+ case MODE_POINT:
+ case MODE_LINE:
+ case MODE_P2P: showwaves->get_h = get_log_h; break;
+ case MODE_CENTERED_LINE: showwaves->get_h = get_log_h2; break;
+ default:
+ return AVERROR_BUG;
+ }
+ break;
+ case SCALE_SQRT:
+ switch (showwaves->mode) {
+ case MODE_POINT:
+ case MODE_LINE:
+ case MODE_P2P: showwaves->get_h = get_sqrt_h; break;
+ case MODE_CENTERED_LINE: showwaves->get_h = get_sqrt_h2; break;
+ default:
+ return AVERROR_BUG;
+ }
+ break;
+ case SCALE_CBRT:
+ switch (showwaves->mode) {
+ case MODE_POINT:
+ case MODE_LINE:
+ case MODE_P2P: showwaves->get_h = get_cbrt_h; break;
+ case MODE_CENTERED_LINE: showwaves->get_h = get_cbrt_h2; break;
+ default:
+ return AVERROR_BUG;
+ }
+ break;
+ }
+
+ showwaves->fg = av_malloc_array(nb_channels, 4 * sizeof(*showwaves->fg));
+ if (!showwaves->fg)
+ return AVERROR(ENOMEM);
+
+ colors = av_strdup(showwaves->colors);
+ if (!colors)
+ return AVERROR(ENOMEM);
+
+ /* multiplication factor, pre-computed to avoid in-loop divisions */
+ x = 255 / ((showwaves->split_channels ? 1 : nb_channels) * showwaves->n);
+ if (outlink->format == AV_PIX_FMT_RGBA) {
+ uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
+
+ for (ch = 0; ch < nb_channels; ch++) {
+ char *color;
+
+ color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
+ if (color)
+ av_parse_color(fg, color, -1, ctx);
+ showwaves->fg[4*ch + 0] = fg[0] * x / 255.;
+ showwaves->fg[4*ch + 1] = fg[1] * x / 255.;
+ showwaves->fg[4*ch + 2] = fg[2] * x / 255.;
+ showwaves->fg[4*ch + 3] = fg[3] * x / 255.;
+ }
+ } else {
+ for (ch = 0; ch < nb_channels; ch++)
+ showwaves->fg[4 * ch + 0] = x;
+ }
+ av_free(colors);
+
+ return 0;
+}
+
+inline static int push_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ ShowWavesContext *showwaves = outlink->src->priv;
+ int nb_channels = inlink->channels;
+ int ret, i;
+
+ ret = ff_filter_frame(outlink, showwaves->outpicref);
+ showwaves->outpicref = NULL;
+ showwaves->buf_idx = 0;
+ for (i = 0; i < nb_channels; i++)
+ showwaves->buf_idy[i] = 0;
+ return ret;
+}
+
+static int push_single_pic(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ ShowWavesContext *showwaves = ctx->priv;
+ int64_t n = 0, max_samples = showwaves->total_samples / outlink->w;
+ AVFrame *out = showwaves->outpicref;
+ struct frame_node *node;
+ const int nb_channels = inlink->channels;
+ const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
+ const int linesize = out->linesize[0];
+ const int pixstep = showwaves->pixstep;
+ int col = 0;
+ int64_t *sum = showwaves->sum;
+
+ if (max_samples == 0) {
+ av_log(ctx, AV_LOG_ERROR, "Too few samples\n");
+ return AVERROR(EINVAL);
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "Create frame averaging %"PRId64" samples per column\n", max_samples);
+
+ memset(sum, 0, nb_channels);
+
+ for (node = showwaves->audio_frames; node; node = node->next) {
+ int i;
+ const AVFrame *frame = node->frame;
+ const int16_t *p = (const int16_t *)frame->data[0];
+
+ for (i = 0; i < frame->nb_samples; i++) {
+ int ch;
+
+ for (ch = 0; ch < nb_channels; ch++)
+ sum[ch] += abs(p[ch + i*nb_channels]) << 1;
+ if (n++ == max_samples) {
+ for (ch = 0; ch < nb_channels; ch++) {
+ int16_t sample = sum[ch] / max_samples;
+ uint8_t *buf = out->data[0] + col * pixstep;
+ int h;
+
+ if (showwaves->split_channels)
+ buf += ch*ch_height*linesize;
+ av_assert0(col < outlink->w);
+ h = showwaves->get_h(sample, ch_height);
+ showwaves->draw_sample(buf, ch_height, linesize, &showwaves->buf_idy[ch], &showwaves->fg[ch * 4], h);
+ sum[ch] = 0;
+ }
+ col++;
+ n = 0;
+ }
+ }
+ }
+
+ return push_frame(outlink);
+}
+
+
+static int request_frame(AVFilterLink *outlink)
+{
+ ShowWavesContext *showwaves = outlink->src->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ int ret;
+
+ ret = ff_request_frame(inlink);
+ if (ret == AVERROR_EOF && showwaves->outpicref) {
+ if (showwaves->single_pic)
+ push_single_pic(outlink);
+ else
+ push_frame(outlink);
+ }
+
+ return ret;
+}
+
+static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p,
+ const AVFilterLink *inlink, AVFilterLink *outlink,
+ const AVFrame *in)
+{
+ if (!showwaves->outpicref) {
+ int j;
+ AVFrame *out = showwaves->outpicref =
+ ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ out->width = outlink->w;
+ out->height = outlink->h;
+ out->pts = in->pts + av_rescale_q((p - (int16_t *)in->data[0]) / inlink->channels,
+ av_make_q(1, inlink->sample_rate),
+ outlink->time_base);
+ for (j = 0; j < outlink->h; j++)
+ memset(out->data[0] + j*out->linesize[0], 0, outlink->w * showwaves->pixstep);
+ }
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ShowWavesContext *showwaves = ctx->priv;
+
+ if (!strcmp(ctx->filter->name, "showwavespic")) {
+ showwaves->single_pic = 1;
+ showwaves->mode = MODE_CENTERED_LINE;
+ }
+
+ return 0;
+}
+
+#if CONFIG_SHOWWAVES_FILTER
+
+static int showwaves_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ShowWavesContext *showwaves = ctx->priv;
+ const int nb_samples = insamples->nb_samples;
+ AVFrame *outpicref = showwaves->outpicref;
+ int16_t *p = (int16_t *)insamples->data[0];
+ int nb_channels = inlink->channels;
+ int i, j, ret = 0;
+ const int pixstep = showwaves->pixstep;
+ const int n = showwaves->n;
+ const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
+
+ /* draw data in the buffer */
+ for (i = 0; i < nb_samples; i++) {
+
+ ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
+ if (ret < 0)
+ goto end;
+ outpicref = showwaves->outpicref;
+
+ for (j = 0; j < nb_channels; j++) {
+ uint8_t *buf = outpicref->data[0] + showwaves->buf_idx * pixstep;
+ const int linesize = outpicref->linesize[0];
+ int h;
+
+ if (showwaves->split_channels)
+ buf += j*ch_height*linesize;
+ h = showwaves->get_h(*p++, ch_height);
+ showwaves->draw_sample(buf, ch_height, linesize,
+ &showwaves->buf_idy[j], &showwaves->fg[j * 4], h);
+ }
+
+ showwaves->sample_count_mod++;
+ if (showwaves->sample_count_mod == n) {
+ showwaves->sample_count_mod = 0;
+ showwaves->buf_idx++;
+ }
+ if (showwaves->buf_idx == showwaves->w)
+ if ((ret = push_frame(outlink)) < 0)
+ break;
+ outpicref = showwaves->outpicref;
+ }
+
+end:
+ av_frame_free(&insamples);
+ return ret;
+}
+
+static const AVFilterPad showwaves_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = showwaves_filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad showwaves_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_avf_showwaves = {
+ .name = "showwaves",
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ShowWavesContext),
+ .inputs = showwaves_inputs,
+ .outputs = showwaves_outputs,
+ .priv_class = &showwaves_class,
+};
+
+#endif // CONFIG_SHOWWAVES_FILTER
+
+#if CONFIG_SHOWWAVESPIC_FILTER
+
+#define OFFSET(x) offsetof(ShowWavesContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption showwavespic_options[] = {
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
+ { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
+ { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
+ { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
+ { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
+ { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
+ { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
+ { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(showwavespic);
+
+static int showwavespic_config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ShowWavesContext *showwaves = ctx->priv;
+
+ if (showwaves->single_pic) {
+ showwaves->sum = av_mallocz_array(inlink->channels, sizeof(*showwaves->sum));
+ if (!showwaves->sum)
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
+static int showwavespic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ShowWavesContext *showwaves = ctx->priv;
+ int16_t *p = (int16_t *)insamples->data[0];
+ int ret = 0;
+
+ if (showwaves->single_pic) {
+ struct frame_node *f;
+
+ ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
+ if (ret < 0)
+ goto end;
+
+ /* queue the audio frame */
+ f = av_malloc(sizeof(*f));
+ if (!f) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ f->frame = insamples;
+ f->next = NULL;
+ if (!showwaves->last_frame) {
+ showwaves->audio_frames =
+ showwaves->last_frame = f;
+ } else {
+ showwaves->last_frame->next = f;
+ showwaves->last_frame = f;
+ }
+ showwaves->total_samples += insamples->nb_samples;
+
+ return 0;
+ }
+
+end:
+ av_frame_free(&insamples);
+ return ret;
+}
+
+static const AVFilterPad showwavespic_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = showwavespic_config_input,
+ .filter_frame = showwavespic_filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad showwavespic_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_avf_showwavespic = {
+ .name = "showwavespic",
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output single picture."),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ShowWavesContext),
+ .inputs = showwavespic_inputs,
+ .outputs = showwavespic_outputs,
+ .priv_class = &showwavespic_class,
+};
+
+#endif // CONFIG_SHOWWAVESPIC_FILTER
diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c
index 1cedb15db4..b431990edc 100644
--- a/libavfilter/avfilter.c
+++ b/libavfilter/avfilter.c
@@ -2,27 +2,30 @@
* filter layer
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/atomic.h"
+#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/buffer.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
+#include "libavutil/eval.h"
#include "libavutil/hwcontext.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
@@ -31,38 +34,91 @@
#include "libavutil/rational.h"
#include "libavutil/samplefmt.h"
+#define FF_INTERNAL_FIELDS 1
+#include "framequeue.h"
+
#include "audio.h"
#include "avfilter.h"
+#include "filters.h"
#include "formats.h"
#include "internal.h"
-#include "video.h"
+
+#include "libavutil/ffversion.h"
+const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
+
+void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
+{
+ av_unused char buf[16];
+ ff_tlog(ctx,
+ "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
+ ref, ref->buf, ref->data[0],
+ ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
+ ref->pts, av_frame_get_pkt_pos(ref));
+
+ if (ref->width) {
+ ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
+ ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
+ ref->width, ref->height,
+ !ref->interlaced_frame ? 'P' : /* Progressive */
+ ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
+ ref->key_frame,
+ av_get_picture_type_char(ref->pict_type));
+ }
+ if (ref->nb_samples) {
+ ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
+ ref->channel_layout,
+ ref->nb_samples,
+ ref->sample_rate);
+ }
+
+ ff_tlog(ctx, "]%s", end ? "\n" : "");
+}
unsigned avfilter_version(void)
{
+ av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
return LIBAVFILTER_VERSION_INT;
}
const char *avfilter_configuration(void)
{
- return LIBAV_CONFIGURATION;
+ return FFMPEG_CONFIGURATION;
}
const char *avfilter_license(void)
{
#define LICENSE_PREFIX "libavfilter license: "
- return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
+ return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
+}
+
+void ff_command_queue_pop(AVFilterContext *filter)
+{
+ AVFilterCommand *c= filter->command_queue;
+ av_freep(&c->arg);
+ av_freep(&c->command);
+ filter->command_queue= c->next;
+ av_free(c);
}
-void ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
+int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
AVFilterPad **pads, AVFilterLink ***links,
AVFilterPad *newpad)
{
+ AVFilterLink **newlinks;
+ AVFilterPad *newpads;
unsigned i;
idx = FFMIN(idx, *count);
- *pads = av_realloc(*pads, sizeof(AVFilterPad) * (*count + 1));
- *links = av_realloc(*links, sizeof(AVFilterLink*) * (*count + 1));
+ newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
+ newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
+ if (newpads)
+ *pads = newpads;
+ if (newlinks)
+ *links = newlinks;
+ if (!newpads || !newlinks)
+ return AVERROR(ENOMEM);
+
memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
@@ -70,8 +126,10 @@ void ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
(*count)++;
for (i = idx + 1; i < *count; i++)
- if (*links[i])
- (*(unsigned *)((uint8_t *) *links[i] + padidx_off))++;
+ if ((*links)[i])
+ (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
+
+ return 0;
}
int avfilter_link(AVFilterContext *src, unsigned srcpad,
@@ -79,14 +137,19 @@ int avfilter_link(AVFilterContext *src, unsigned srcpad,
{
AVFilterLink *link;
+ av_assert0(src->graph);
+ av_assert0(dst->graph);
+ av_assert0(src->graph == dst->graph);
+
if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
src->outputs[srcpad] || dst->inputs[dstpad])
return AVERROR(EINVAL);
if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
av_log(src, AV_LOG_ERROR,
- "Media type mismatch between the '%s' filter output pad %d and the '%s' filter input pad %d\n",
- src->name, srcpad, dst->name, dstpad);
+ "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
+ src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
+ dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
return AVERROR(EINVAL);
}
@@ -101,12 +164,77 @@ int avfilter_link(AVFilterContext *src, unsigned srcpad,
link->srcpad = &src->output_pads[srcpad];
link->dstpad = &dst->input_pads[dstpad];
link->type = src->output_pads[srcpad].type;
- assert(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
+ av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
link->format = -1;
+ ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
return 0;
}
+void avfilter_link_free(AVFilterLink **link)
+{
+ if (!*link)
+ return;
+
+ av_frame_free(&(*link)->partial_buf);
+ ff_framequeue_free(&(*link)->fifo);
+ ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
+
+ av_freep(link);
+}
+
+int avfilter_link_get_channels(AVFilterLink *link)
+{
+ return link->channels;
+}
+
+void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
+{
+ filter->ready = FFMAX(filter->ready, priority);
+}
+
+/**
+ * Clear frame_blocked_in on all outputs.
+ * This is necessary whenever something changes on input.
+ */
+static void filter_unblock(AVFilterContext *filter)
+{
+ unsigned i;
+
+ for (i = 0; i < filter->nb_outputs; i++)
+ filter->outputs[i]->frame_blocked_in = 0;
+}
+
+
+void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
+{
+ if (link->status_in == status)
+ return;
+ av_assert0(!link->status_in);
+ link->status_in = status;
+ link->status_in_pts = pts;
+ link->frame_wanted_out = 0;
+ link->frame_blocked_in = 0;
+ filter_unblock(link->dst);
+ ff_filter_set_ready(link->dst, 200);
+}
+
+void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
+{
+ av_assert0(!link->frame_wanted_out);
+ av_assert0(!link->status_out);
+ link->status_out = status;
+ if (pts != AV_NOPTS_VALUE)
+ ff_update_link_current_pts(link, pts);
+ filter_unblock(link->dst);
+ ff_filter_set_ready(link->src, 200);
+}
+
+void avfilter_link_set_closed(AVFilterLink *link, int closed)
+{
+ ff_avfilter_link_set_out_status(link, closed ? AVERROR_EOF : 0, AV_NOPTS_VALUE);
+}
+
int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
{
@@ -152,6 +280,7 @@ int avfilter_config_links(AVFilterContext *filter)
for (i = 0; i < filter->nb_inputs; i ++) {
AVFilterLink *link = filter->inputs[i];
+ AVFilterLink *inlink;
if (!link) continue;
if (!link->src || !link->dst) {
@@ -160,6 +289,10 @@ int avfilter_config_links(AVFilterContext *filter)
return AVERROR(EINVAL);
}
+ inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
+ link->current_pts =
+ link->current_pts_us = AV_NOPTS_VALUE;
+
switch (link->init_state) {
case AVLINK_INIT:
continue;
@@ -187,28 +320,38 @@ int avfilter_config_links(AVFilterContext *filter)
return ret;
}
- if (link->time_base.num == 0 && link->time_base.den == 0)
- link->time_base = link->src->nb_inputs ?
- link->src->inputs[0]->time_base : AV_TIME_BASE_Q;
+ switch (link->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ if (!link->time_base.num && !link->time_base.den)
+ link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
- if (link->type == AVMEDIA_TYPE_VIDEO) {
if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
- link->sample_aspect_ratio = link->src->nb_inputs ?
- link->src->inputs[0]->sample_aspect_ratio : (AVRational){1,1};
+ link->sample_aspect_ratio = inlink ?
+ inlink->sample_aspect_ratio : (AVRational){1,1};
- if (link->src->nb_inputs) {
+ if (inlink) {
if (!link->frame_rate.num && !link->frame_rate.den)
- link->frame_rate = link->src->inputs[0]->frame_rate;
+ link->frame_rate = inlink->frame_rate;
if (!link->w)
- link->w = link->src->inputs[0]->w;
+ link->w = inlink->w;
if (!link->h)
- link->h = link->src->inputs[0]->h;
+ link->h = inlink->h;
} else if (!link->w || !link->h) {
av_log(link->src, AV_LOG_ERROR,
"Video source filters must set their output link's "
"width and height\n");
return AVERROR(EINVAL);
}
+ break;
+
+ case AVMEDIA_TYPE_AUDIO:
+ if (inlink) {
+ if (!link->time_base.num && !link->time_base.den)
+ link->time_base = inlink->time_base;
+ }
+
+ if (!link->time_base.num && !link->time_base.den)
+ link->time_base = (AVRational) {1, link->sample_rate};
}
if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
@@ -237,11 +380,11 @@ int avfilter_config_links(AVFilterContext *filter)
return 0;
}
-void ff_dlog_link(void *ctx, AVFilterLink *link, int end)
+void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
{
if (link->type == AVMEDIA_TYPE_VIDEO) {
- av_log(ctx, AV_LOG_TRACE,
- "link[%p s:%dx%d fmt:%-16s %-16s->%-16s]%s",
+ ff_tlog(ctx,
+ "link[%p s:%dx%d fmt:%s %s->%s]%s",
link, link->w, link->h,
av_get_pix_fmt_name(link->format),
link->src ? link->src->filter->name : "",
@@ -251,9 +394,9 @@ void ff_dlog_link(void *ctx, AVFilterLink *link, int end)
char buf[128];
av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
- av_log(ctx, AV_LOG_TRACE,
- "link[%p r:%d cl:%s fmt:%-16s %-16s->%-16s]%s",
- link, link->sample_rate, buf,
+ ff_tlog(ctx,
+ "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
+ link, (int)link->sample_rate, buf,
av_get_sample_fmt_name(link->format),
link->src ? link->src->filter->name : "",
link->dst ? link->dst->filter->name : "",
@@ -263,14 +406,47 @@ void ff_dlog_link(void *ctx, AVFilterLink *link, int end)
int ff_request_frame(AVFilterLink *link)
{
- FF_DPRINTF_START(NULL, request_frame); ff_dlog_link(NULL, link, 1);
+ FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
+
+ av_assert1(!link->dst->filter->activate);
+ if (link->status_out)
+ return link->status_out;
+ if (link->status_in) {
+ if (ff_framequeue_queued_frames(&link->fifo)) {
+ av_assert1(!link->frame_wanted_out);
+ av_assert1(link->dst->ready >= 300);
+ return 0;
+ } else {
+ /* Acknowledge status change. Filters using ff_request_frame() will
+ handle the change automatically. Filters can also check the
+ status directly but none do yet. */
+ ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
+ return link->status_out;
+ }
+ }
+ link->frame_wanted_out = 1;
+ ff_filter_set_ready(link->src, 100);
+ return 0;
+}
+
+static int ff_request_frame_to_filter(AVFilterLink *link)
+{
+ int ret = -1;
+ FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
+ /* Assume the filter is blocked, let the method clear it if not */
+ link->frame_blocked_in = 1;
if (link->srcpad->request_frame)
- return link->srcpad->request_frame(link);
+ ret = link->srcpad->request_frame(link);
else if (link->src->inputs[0])
- return ff_request_frame(link->src->inputs[0]);
- else
- return AVERROR(EINVAL);
+ ret = ff_request_frame(link->src->inputs[0]);
+ if (ret < 0) {
+ if (ret != AVERROR(EAGAIN) && ret != link->status_in)
+ ff_avfilter_link_set_in_status(link, ret, AV_NOPTS_VALUE);
+ if (ret == AVERROR_EOF)
+ ret = 0;
+ }
+ return ret;
}
int ff_poll_frame(AVFilterLink *link)
@@ -291,7 +467,98 @@ int ff_poll_frame(AVFilterLink *link)
return min;
}
+static const char *const var_names[] = {
+ "t",
+ "n",
+ "pos",
+ "w",
+ "h",
+ NULL
+};
+
+enum {
+ VAR_T,
+ VAR_N,
+ VAR_POS,
+ VAR_W,
+ VAR_H,
+ VAR_VARS_NB
+};
+
+static int set_enable_expr(AVFilterContext *ctx, const char *expr)
+{
+ int ret;
+ char *expr_dup;
+ AVExpr *old = ctx->enable;
+
+ if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
+ av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
+ "with filter '%s'\n", ctx->filter->name);
+ return AVERROR_PATCHWELCOME;
+ }
+
+ expr_dup = av_strdup(expr);
+ if (!expr_dup)
+ return AVERROR(ENOMEM);
+
+ if (!ctx->var_values) {
+ ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
+ if (!ctx->var_values) {
+ av_free(expr_dup);
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
+ NULL, NULL, NULL, NULL, 0, ctx->priv);
+ if (ret < 0) {
+ av_log(ctx->priv, AV_LOG_ERROR,
+ "Error when evaluating the expression '%s' for enable\n",
+ expr_dup);
+ av_free(expr_dup);
+ return ret;
+ }
+
+ av_expr_free(old);
+ av_free(ctx->enable_str);
+ ctx->enable_str = expr_dup;
+ return 0;
+}
+
+void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
+{
+ if (pts == AV_NOPTS_VALUE)
+ return;
+ link->current_pts = pts;
+ link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
+ /* TODO use duration */
+ if (link->graph && link->age_index >= 0)
+ ff_avfilter_graph_update_heap(link->graph, link);
+}
+
+int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
+{
+ if(!strcmp(cmd, "ping")){
+ char local_res[256] = {0};
+
+ if (!res) {
+ res = local_res;
+ res_len = sizeof(local_res);
+ }
+ av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
+ if (res == local_res)
+ av_log(filter, AV_LOG_INFO, "%s", res);
+ return 0;
+ }else if(!strcmp(cmd, "enable")) {
+ return set_enable_expr(filter, arg);
+ }else if(filter->filter->process_command) {
+ return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
+ }
+ return AVERROR(ENOSYS);
+}
+
static AVFilter *first_filter;
+static AVFilter **last_filter = &first_filter;
#if !FF_API_NOCONST_GET_NAME
const
@@ -305,18 +572,24 @@ AVFilter *avfilter_get_by_name(const char *name)
while ((f = avfilter_next(f)))
if (!strcmp(f->name, name))
- return f;
+ return (AVFilter *)f;
return NULL;
}
int avfilter_register(AVFilter *filter)
{
- AVFilter **f = &first_filter;
- while (*f)
- f = &(*f)->next;
- *f = filter;
+ AVFilter **f = last_filter;
+
+ /* the filter must select generic or internal exclusively */
+ av_assert0((filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE) != AVFILTER_FLAG_SUPPORT_TIMELINE);
+
filter->next = NULL;
+
+ while(*f || avpriv_atomic_ptr_cas((void * volatile *)f, NULL, filter))
+ f = &(*f)->next;
+ last_filter = &filter->next;
+
return 0;
}
@@ -348,10 +621,10 @@ int avfilter_pad_count(const AVFilterPad *pads)
return count;
}
-static const char *filter_name(void *p)
+static const char *default_filter_name(void *filter_ctx)
{
- AVFilterContext *filter = p;
- return filter->filter->name;
+ AVFilterContext *ctx = filter_ctx;
+ return ctx->name ? ctx->name : ctx->filter->name;
}
static void *filter_child_next(void *obj, void *prev)
@@ -366,10 +639,16 @@ static const AVClass *filter_child_class_next(const AVClass *prev)
{
const AVFilter *f = NULL;
+ /* find the filter that corresponds to prev */
while (prev && (f = avfilter_next(f)))
if (f->priv_class == prev)
break;
+ /* could not find filter corresponding to prev */
+ if (prev && !f)
+ return NULL;
+
+ /* find next filter with specific options */
while ((f = avfilter_next(f)))
if (f->priv_class)
return f->priv_class;
@@ -378,18 +657,22 @@ static const AVClass *filter_child_class_next(const AVClass *prev)
}
#define OFFSET(x) offsetof(AVFilterContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM
static const AVOption avfilter_options[] = {
{ "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
{ .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
{ "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .unit = "thread_type" },
+ { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
+ { .i64 = 0 }, 0, INT_MAX, FLAGS },
{ NULL },
};
static const AVClass avfilter_class = {
.class_name = "AVFilter",
- .item_name = filter_name,
+ .item_name = default_filter_name,
.version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_FILTER,
.child_next = filter_child_next,
.child_class_next = filter_child_class_next,
.option = avfilter_options,
@@ -441,22 +724,22 @@ AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
ret->nb_inputs = avfilter_pad_count(filter->inputs);
if (ret->nb_inputs ) {
- ret->input_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_inputs);
+ ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
if (!ret->input_pads)
goto err;
memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
- ret->inputs = av_mallocz(sizeof(AVFilterLink*) * ret->nb_inputs);
+ ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
if (!ret->inputs)
goto err;
}
ret->nb_outputs = avfilter_pad_count(filter->outputs);
if (ret->nb_outputs) {
- ret->output_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_outputs);
+ ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
if (!ret->output_pads)
goto err;
memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
- ret->outputs = av_mallocz(sizeof(AVFilterLink*) * ret->nb_outputs);
+ ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
if (!ret->outputs)
goto err;
}
@@ -502,13 +785,16 @@ static void free_link(AVFilterLink *link)
ff_formats_unref(&link->out_samplerates);
ff_channel_layouts_unref(&link->in_channel_layouts);
ff_channel_layouts_unref(&link->out_channel_layouts);
- av_freep(&link);
+ avfilter_link_free(&link);
}
void avfilter_free(AVFilterContext *filter)
{
int i;
+ if (!filter)
+ return;
+
if (filter->graph)
ff_filter_graph_remove_filter(filter->graph, filter);
@@ -533,41 +819,100 @@ void avfilter_free(AVFilterContext *filter)
av_freep(&filter->inputs);
av_freep(&filter->outputs);
av_freep(&filter->priv);
+ while(filter->command_queue){
+ ff_command_queue_pop(filter);
+ }
+ av_opt_free(filter);
+ av_expr_free(filter->enable);
+ filter->enable = NULL;
+ av_freep(&filter->var_values);
av_freep(&filter->internal);
av_free(filter);
}
-/* process a list of value1:value2:..., each value corresponding
- * to subsequent AVOption, in the order they are declared */
-static int process_unnamed_options(AVFilterContext *ctx, AVDictionary **options,
- const char *args)
+int ff_filter_get_nb_threads(AVFilterContext *ctx)
+{
+ if (ctx->nb_threads > 0)
+ return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
+ return ctx->graph->nb_threads;
+}
+
+static int process_options(AVFilterContext *ctx, AVDictionary **options,
+ const char *args)
{
const AVOption *o = NULL;
- const char *p = args;
- char *val;
+ int ret, count = 0;
+ char *av_uninit(parsed_key), *av_uninit(value);
+ const char *key;
+ int offset= -1;
+
+ if (!args)
+ return 0;
+
+ while (*args) {
+ const char *shorthand = NULL;
- while (*p) {
o = av_opt_next(ctx->priv, o);
- if (!o) {
- av_log(ctx, AV_LOG_ERROR, "More options provided than "
- "this filter supports.\n");
- return AVERROR(EINVAL);
+ if (o) {
+ if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
+ continue;
+ offset = o->offset;
+ shorthand = o->name;
}
- if (o->type == AV_OPT_TYPE_CONST)
- continue;
- val = av_get_token(&p, ":");
- if (!val)
- return AVERROR(ENOMEM);
+ ret = av_opt_get_key_value(&args, "=", ":",
+ shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
+ &parsed_key, &value);
+ if (ret < 0) {
+ if (ret == AVERROR(EINVAL))
+ av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
+ else
+ av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
+ av_err2str(ret));
+ return ret;
+ }
+ if (*args)
+ args++;
+ if (parsed_key) {
+ key = parsed_key;
+ while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
+ } else {
+ key = shorthand;
+ }
- av_dict_set(options, o->name, val, 0);
+ av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
+
+ if (av_opt_find(ctx, key, NULL, 0, 0)) {
+ ret = av_opt_set(ctx, key, value, 0);
+ if (ret < 0) {
+ av_free(value);
+ av_free(parsed_key);
+ return ret;
+ }
+ } else {
+ av_dict_set(options, key, value, 0);
+ if ((ret = av_opt_set(ctx->priv, key, value, 0)) < 0) {
+ if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
+ if (ret == AVERROR_OPTION_NOT_FOUND)
+ av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
+ av_free(value);
+ av_free(parsed_key);
+ return ret;
+ }
+ }
+ }
- av_freep(&val);
- if (*p)
- p++;
+ av_free(value);
+ av_free(parsed_key);
+ count++;
}
- return 0;
+ if (ctx->enable_str) {
+ ret = set_enable_expr(ctx, ctx->enable_str);
+ if (ret < 0)
+ return ret;
+ }
+ return count;
}
#if FF_API_AVFILTER_INIT_FILTER
@@ -604,7 +949,9 @@ int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
}
}
- if (ctx->filter->init)
+ if (ctx->filter->init_opaque)
+ ret = ctx->filter->init_opaque(ctx, NULL);
+ else if (ctx->filter->init)
ret = ctx->filter->init(ctx);
else if (ctx->filter->init_dict)
ret = ctx->filter->init_dict(ctx, options);
@@ -625,52 +972,21 @@ int avfilter_init_str(AVFilterContext *filter, const char *args)
return AVERROR(EINVAL);
}
-#if FF_API_OLD_FILTER_OPTS
- if (!strcmp(filter->filter->name, "scale") &&
- strchr(args, ':') && strchr(args, ':') < strchr(args, '=')) {
- /* old w:h:flags=<flags> syntax */
- char *copy = av_strdup(args);
- char *p;
-
- av_log(filter, AV_LOG_WARNING, "The <w>:<h>:flags=<flags> option "
- "syntax is deprecated. Use either <w>:<h>:<flags> or "
- "w=<w>:h=<h>:flags=<flags>.\n");
-
- if (!copy) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
-
- p = strrchr(copy, ':');
- if (p) {
- *p++ = 0;
- ret = av_dict_parse_string(&options, p, "=", ":", 0);
- }
- if (ret >= 0)
- ret = process_unnamed_options(filter, &options, copy);
- av_freep(&copy);
-
- if (ret < 0)
- goto fail;
- } else
-#endif
-
- if (strchr(args, '=')) {
- /* assume a list of key1=value1:key2=value2:... */
- ret = av_dict_parse_string(&options, args, "=", ":", 0);
- if (ret < 0)
- goto fail;
-#if FF_API_OLD_FILTER_OPTS
- } else if (!strcmp(filter->filter->name, "format") ||
+#if FF_API_OLD_FILTER_OPTS || FF_API_OLD_FILTER_OPTS_ERROR
+ if ( !strcmp(filter->filter->name, "format") ||
!strcmp(filter->filter->name, "noformat") ||
!strcmp(filter->filter->name, "frei0r") ||
!strcmp(filter->filter->name, "frei0r_src") ||
- !strcmp(filter->filter->name, "ocv")) {
+ !strcmp(filter->filter->name, "ocv") ||
+ !strcmp(filter->filter->name, "pan") ||
+ !strcmp(filter->filter->name, "pp") ||
+ !strcmp(filter->filter->name, "aevalsrc")) {
/* a hack for compatibility with the old syntax
* replace colons with |s */
char *copy = av_strdup(args);
char *p = copy;
int nb_leading = 0; // number of leading colons to skip
+ int deprecated = 0;
if (!copy) {
ret = AVERROR(ENOMEM);
@@ -692,22 +1008,55 @@ int avfilter_init_str(AVFilterContext *filter, const char *args)
p++;
}
- if (strchr(p, ':')) {
- av_log(filter, AV_LOG_WARNING, "This syntax is deprecated. Use "
- "'|' to separate the list items.\n");
- }
-
+ deprecated = strchr(p, ':') != NULL;
+
+ if (!strcmp(filter->filter->name, "aevalsrc")) {
+ deprecated = 0;
+ while ((p = strchr(p, ':')) && p[1] != ':') {
+ const char *epos = strchr(p + 1, '=');
+ const char *spos = strchr(p + 1, ':');
+ const int next_token_is_opt = epos && (!spos || epos < spos);
+ if (next_token_is_opt) {
+ p++;
+ break;
+ }
+ /* next token does not contain a '=', assume a channel expression */
+ deprecated = 1;
+ *p++ = '|';
+ }
+ if (p && *p == ':') { // double sep '::' found
+ deprecated = 1;
+ memmove(p, p + 1, strlen(p));
+ }
+ } else
while ((p = strchr(p, ':')))
*p++ = '|';
- ret = process_unnamed_options(filter, &options, copy);
+#if FF_API_OLD_FILTER_OPTS
+ if (deprecated)
+ av_log(filter, AV_LOG_WARNING, "This syntax is deprecated. Use "
+ "'|' to separate the list items.\n");
+
+ av_log(filter, AV_LOG_DEBUG, "compat: called with args=[%s]\n", copy);
+ ret = process_options(filter, &options, copy);
+#else
+ if (deprecated) {
+ av_log(filter, AV_LOG_ERROR, "This syntax is deprecated. Use "
+ "'|' to separate the list items ('%s' instead of '%s')\n",
+ copy, args);
+ ret = AVERROR(EINVAL);
+ } else {
+ ret = process_options(filter, &options, copy);
+ }
+#endif
av_freep(&copy);
if (ret < 0)
goto fail;
+ } else
#endif
- } else {
- ret = process_unnamed_options(filter, &options, args);
+ {
+ ret = process_options(filter, &options, args);
if (ret < 0)
goto fail;
}
@@ -744,71 +1093,553 @@ static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
return ff_filter_frame(link->dst->outputs[0], frame);
}
-int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
+static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
{
int (*filter_frame)(AVFilterLink *, AVFrame *);
+ AVFilterContext *dstctx = link->dst;
AVFilterPad *dst = link->dstpad;
- AVFrame *out = NULL;
int ret;
- FF_DPRINTF_START(NULL, filter_frame);
- ff_dlog_link(NULL, link, 1);
-
if (!(filter_frame = dst->filter_frame))
filter_frame = default_filter_frame;
- /* copy the frame if needed */
- if (dst->needs_writable && !av_frame_is_writable(frame)) {
- av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
-
- switch (link->type) {
- case AVMEDIA_TYPE_VIDEO:
- out = ff_get_video_buffer(link, link->w, link->h);
- break;
- case AVMEDIA_TYPE_AUDIO:
- out = ff_get_audio_buffer(link, frame->nb_samples);
- break;
- default:
- ret = AVERROR(EINVAL);
+ if (dst->needs_writable) {
+ ret = ff_inlink_make_frame_writable(link, &frame);
+ if (ret < 0)
goto fail;
+ }
+
+ ff_inlink_process_commands(link, frame);
+ dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
+
+ if (dstctx->is_disabled &&
+ (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
+ filter_frame = default_filter_frame;
+ ret = filter_frame(link, frame);
+ link->frame_count_out++;
+ return ret;
+
+fail:
+ av_frame_free(&frame);
+ return ret;
+}
+
+int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
+{
+ int ret;
+ FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
+
+ /* Consistency checks */
+ if (link->type == AVMEDIA_TYPE_VIDEO) {
+ if (strcmp(link->dst->filter->name, "buffersink") &&
+ strcmp(link->dst->filter->name, "format") &&
+ strcmp(link->dst->filter->name, "idet") &&
+ strcmp(link->dst->filter->name, "null") &&
+ strcmp(link->dst->filter->name, "scale")) {
+ av_assert1(frame->format == link->format);
+ av_assert1(frame->width == link->w);
+ av_assert1(frame->height == link->h);
}
- if (!out) {
- ret = AVERROR(ENOMEM);
- goto fail;
+ } else {
+ if (frame->format != link->format) {
+ av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
+ goto error;
+ }
+ if (av_frame_get_channels(frame) != link->channels) {
+ av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
+ goto error;
}
+ if (frame->channel_layout != link->channel_layout) {
+ av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
+ goto error;
+ }
+ if (frame->sample_rate != link->sample_rate) {
+ av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
+ goto error;
+ }
+ }
- ret = av_frame_copy_props(out, frame);
- if (ret < 0)
- goto fail;
+ link->frame_blocked_in = link->frame_wanted_out = 0;
+ link->frame_count_in++;
+ filter_unblock(link->dst);
+ ret = ff_framequeue_add(&link->fifo, frame);
+ if (ret < 0) {
+ av_frame_free(&frame);
+ return ret;
+ }
+ ff_filter_set_ready(link->dst, 300);
+ return 0;
- switch (link->type) {
- case AVMEDIA_TYPE_VIDEO:
- av_image_copy(out->data, out->linesize, frame->data, frame->linesize,
- frame->format, frame->width, frame->height);
- break;
- case AVMEDIA_TYPE_AUDIO:
- av_samples_copy(out->extended_data, frame->extended_data,
- 0, 0, frame->nb_samples,
- av_get_channel_layout_nb_channels(frame->channel_layout),
- frame->format);
+error:
+ av_frame_free(&frame);
+ return AVERROR_PATCHWELCOME;
+}
+
+static int samples_ready(AVFilterLink *link, unsigned min)
+{
+ return ff_framequeue_queued_frames(&link->fifo) &&
+ (ff_framequeue_queued_samples(&link->fifo) >= min ||
+ link->status_in);
+}
+
+static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
+ AVFrame **rframe)
+{
+ AVFrame *frame0, *frame, *buf;
+ unsigned nb_samples, nb_frames, i, p;
+ int ret;
+
+ /* Note: this function relies on no format changes and must only be
+ called with enough samples. */
+ av_assert1(samples_ready(link, link->min_samples));
+ frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
+ if (frame->nb_samples >= min && frame->nb_samples < max) {
+ *rframe = ff_framequeue_take(&link->fifo);
+ return 0;
+ }
+ nb_frames = 0;
+ nb_samples = 0;
+ while (1) {
+ if (nb_samples + frame->nb_samples > max) {
+ if (nb_samples < min)
+ nb_samples = max;
break;
- default:
- ret = AVERROR(EINVAL);
- goto fail;
}
+ nb_samples += frame->nb_samples;
+ nb_frames++;
+ if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
+ break;
+ frame = ff_framequeue_peek(&link->fifo, nb_frames);
+ }
+ buf = ff_get_audio_buffer(link, nb_samples);
+ if (!buf)
+ return AVERROR(ENOMEM);
+ ret = av_frame_copy_props(buf, frame0);
+ if (ret < 0) {
+ av_frame_free(&buf);
+ return ret;
+ }
+ buf->pts = frame0->pts;
+
+ p = 0;
+ for (i = 0; i < nb_frames; i++) {
+ frame = ff_framequeue_take(&link->fifo);
+ av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
+ frame->nb_samples, link->channels, link->format);
+ p += frame->nb_samples;
av_frame_free(&frame);
- } else
- out = frame;
+ }
+ if (p < nb_samples) {
+ unsigned n = nb_samples - p;
+ frame = ff_framequeue_peek(&link->fifo, 0);
+ av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
+ link->channels, link->format);
+ ff_framequeue_skip_samples(&link->fifo, n, link->time_base);
+ }
- return filter_frame(link, out);
+ *rframe = buf;
+ return 0;
+}
-fail:
- av_frame_free(&out);
- av_frame_free(&frame);
+static int ff_filter_frame_to_filter(AVFilterLink *link)
+{
+ AVFrame *frame = NULL;
+ AVFilterContext *dst = link->dst;
+ int ret;
+
+ av_assert1(ff_framequeue_queued_frames(&link->fifo));
+ ret = link->min_samples ?
+ ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
+ ff_inlink_consume_frame(link, &frame);
+ av_assert1(ret);
+ if (ret < 0) {
+ av_assert1(!frame);
+ return ret;
+ }
+ /* The filter will soon have received a new frame, that may allow it to
+ produce one or more: unblock its outputs. */
+ filter_unblock(dst);
+ /* AVFilterPad.filter_frame() expect frame_count_out to have the value
+ before the frame; ff_filter_frame_framed() will re-increment it. */
+ link->frame_count_out--;
+ ret = ff_filter_frame_framed(link, frame);
+ if (ret < 0 && ret != link->status_out) {
+ ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
+ } else {
+ /* Run once again, to see if several frames were available, or if
+ the input status has also changed, or any other reason. */
+ ff_filter_set_ready(dst, 300);
+ }
return ret;
}
+static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
+{
+ unsigned out = 0, progress = 0;
+ int ret;
+
+ av_assert0(!in->status_out);
+ if (!filter->nb_outputs) {
+ /* not necessary with the current API and sinks */
+ return 0;
+ }
+ while (!in->status_out) {
+ if (!filter->outputs[out]->status_in) {
+ progress++;
+ ret = ff_request_frame_to_filter(filter->outputs[out]);
+ if (ret < 0)
+ return ret;
+ }
+ if (++out == filter->nb_outputs) {
+ if (!progress) {
+ /* Every output already closed: input no longer interesting
+ (example: overlay in shortest mode, other input closed). */
+ ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
+ return 0;
+ }
+ progress = 0;
+ out = 0;
+ }
+ }
+ ff_filter_set_ready(filter, 200);
+ return 0;
+}
+
+#define FFERROR_NOT_READY FFERRTAG('N','R','D','Y')
+
+static int ff_filter_activate_default(AVFilterContext *filter)
+{
+ unsigned i;
+
+ for (i = 0; i < filter->nb_inputs; i++) {
+ if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
+ return ff_filter_frame_to_filter(filter->inputs[i]);
+ }
+ }
+ for (i = 0; i < filter->nb_inputs; i++) {
+ if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
+ av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
+ return forward_status_change(filter, filter->inputs[i]);
+ }
+ }
+ for (i = 0; i < filter->nb_outputs; i++) {
+ if (filter->outputs[i]->frame_wanted_out &&
+ !filter->outputs[i]->frame_blocked_in) {
+ return ff_request_frame_to_filter(filter->outputs[i]);
+ }
+ }
+ return FFERROR_NOT_READY;
+}
+
+/*
+ Filter scheduling and activation
+
+ When a filter is activated, it must:
+ - if possible, output a frame;
+ - else, if relevant, forward the input status change;
+ - else, check outputs for wanted frames and forward the requests.
+
+ The following AVFilterLink fields are used for activation:
+
+ - frame_wanted_out:
+
+ This field indicates if a frame is needed on this input of the
+ destination filter. A positive value indicates that a frame is needed
+ to process queued frames or internal data or to satisfy the
+ application; a zero value indicates that a frame is not especially
+ needed but could be processed anyway; a negative value indicates that a
+ frame would just be queued.
+
+ It is set by filters using ff_request_frame() or ff_request_no_frame(),
+ when requested by the application through a specific API or when it is
+ set on one of the outputs.
+
+ It is cleared when a frame is sent from the source using
+ ff_filter_frame().
+
+ It is also cleared when a status change is sent from the source using
+ ff_avfilter_link_set_in_status().
+
+ - frame_blocked_in:
+
+ This field means that the source filter can not generate a frame as is.
+ Its goal is to avoid repeatedly calling the request_frame() method on
+ the same link.
+
+ It is set by the framework on all outputs of a filter before activating it.
+
+ It is automatically cleared by ff_filter_frame().
+
+ It is also automatically cleared by ff_avfilter_link_set_in_status().
+
+ It is also cleared on all outputs (using filter_unblock()) when
+ something happens on an input: processing a frame or changing the
+ status.
+
+ - fifo:
+
+ Contains the frames queued on a filter input. If it contains frames and
+ frame_wanted_out is not set, then the filter can be activated. If that
+ result in the filter not able to use these frames, the filter must set
+ frame_wanted_out to ask for more frames.
+
+ - status_in and status_in_pts:
+
+ Status (EOF or error code) of the link and timestamp of the status
+ change (in link time base, same as frames) as seen from the input of
+ the link. The status change is considered happening after the frames
+ queued in fifo.
+
+ It is set by the source filter using ff_avfilter_link_set_in_status().
+
+ - status_out:
+
+ Status of the link as seen from the output of the link. The status
+ change is considered having already happened.
+
+ It is set by the destination filter using
+ ff_avfilter_link_set_out_status().
+
+ Filters are activated according to the ready field, set using the
+ ff_filter_set_ready(). Eventually, a priority queue will be used.
+ ff_filter_set_ready() is called whenever anything could cause progress to
+ be possible. Marking a filter ready when it is not is not a problem,
+ except for the small overhead it causes.
+
+ Conditions that cause a filter to be marked ready are:
+
+ - frames added on an input link;
+
+ - changes in the input or output status of an input link;
+
+ - requests for a frame on an output link;
+
+ - after any actual processing using the legacy methods (filter_frame(),
+ and request_frame() to acknowledge status changes), to run once more
+ and check if enough input was present for several frames.
+
+ Exemples of scenarios to consider:
+
+ - buffersrc: activate if frame_wanted_out to notify the application;
+ activate when the application adds a frame to push it immediately.
+
+ - testsrc: activate only if frame_wanted_out to produce and push a frame.
+
+ - concat (not at stitch points): can process a frame on any output.
+ Activate if frame_wanted_out on output to forward on the corresponding
+ input. Activate when a frame is present on input to process it
+ immediately.
+
+ - framesync: needs at least one frame on each input; extra frames on the
+ wrong input will accumulate. When a frame is first added on one input,
+ set frame_wanted_out<0 on it to avoid getting more (would trigger
+ testsrc) and frame_wanted_out>0 on the other to allow processing it.
+
+ Activation of old filters:
+
+ In order to activate a filter implementing the legacy filter_frame() and
+ request_frame() methods, perform the first possible of the following
+ actions:
+
+ - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
+ frame and call filter_frame().
+
+ Ratinale: filter frames as soon as possible instead of leaving them
+ queued; frame_wanted_out < 0 is not possible since the old API does not
+ set it nor provides any similar feedback; frame_wanted_out > 0 happens
+ when min_samples > 0 and there are not enough samples queued.
+
+ - If an input has status_in set but not status_out, try to call
+ request_frame() on one of the outputs in the hope that it will trigger
+ request_frame() on the input with status_in and acknowledge it. This is
+ awkward and fragile, filters with several inputs or outputs should be
+ updated to direct activation as soon as possible.
+
+ - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
+ request_frame().
+
+ Rationale: checking frame_blocked_in is necessary to avoid requesting
+ repeatedly on a blocked input if another is not blocked (example:
+ [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
+
+ TODO: respect needs_fifo and remove auto-inserted fifos.
+
+ */
+
+int ff_filter_activate(AVFilterContext *filter)
+{
+ int ret;
+
+ /* Generic timeline support is not yet implemented but should be easy */
+ av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC &&
+ filter->filter->activate));
+ filter->ready = 0;
+ ret = filter->filter->activate ? filter->filter->activate(filter) :
+ ff_filter_activate_default(filter);
+ if (ret == FFERROR_NOT_READY)
+ ret = 0;
+ return ret;
+}
+
+int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
+{
+ *rpts = link->current_pts;
+ if (ff_framequeue_queued_frames(&link->fifo))
+ return *rstatus = 0;
+ if (link->status_out)
+ return *rstatus = link->status_out;
+ if (!link->status_in)
+ return *rstatus = 0;
+ *rstatus = link->status_out = link->status_in;
+ ff_update_link_current_pts(link, link->status_in_pts);
+ *rpts = link->current_pts;
+ return 1;
+}
+
+int ff_inlink_check_available_frame(AVFilterLink *link)
+{
+ return ff_framequeue_queued_frames(&link->fifo) > 0;
+}
+
+int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
+{
+ uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
+ av_assert1(min);
+ return samples >= min || (link->status_in && samples);
+}
+
+static void consume_update(AVFilterLink *link, const AVFrame *frame)
+{
+ ff_update_link_current_pts(link, frame->pts);
+ ff_inlink_process_commands(link, frame);
+ link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
+ link->frame_count_out++;
+}
+
+int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
+{
+ AVFrame *frame;
+
+ *rframe = NULL;
+ if (!ff_inlink_check_available_frame(link))
+ return 0;
+ frame = ff_framequeue_take(&link->fifo);
+ consume_update(link, frame);
+ *rframe = frame;
+ return 1;
+}
+
+int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
+ AVFrame **rframe)
+{
+ AVFrame *frame;
+ int ret;
+
+ av_assert1(min);
+ *rframe = NULL;
+ if (!ff_inlink_check_available_samples(link, min))
+ return 0;
+ if (link->status_in)
+ min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
+ ret = take_samples(link, min, link->max_samples, &frame);
+ if (ret < 0)
+ return ret;
+ consume_update(link, frame);
+ *rframe = frame;
+ return 1;
+}
+
+int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
+{
+ AVFrame *frame = *rframe;
+ AVFrame *out;
+ int ret;
+
+ if (av_frame_is_writable(frame))
+ return 0;
+ av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
+
+ switch (link->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ out = ff_get_video_buffer(link, link->w, link->h);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ out = ff_get_audio_buffer(link, frame->nb_samples);
+ break;
+ default:
+ return AVERROR(EINVAL);
+ }
+ if (!out)
+ return AVERROR(ENOMEM);
+
+ ret = av_frame_copy_props(out, frame);
+ if (ret < 0) {
+ av_frame_free(&out);
+ return ret;
+ }
+
+ switch (link->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
+ frame->format, frame->width, frame->height);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ av_samples_copy(out->extended_data, frame->extended_data,
+ 0, 0, frame->nb_samples,
+ av_frame_get_channels(frame),
+ frame->format);
+ break;
+ default:
+ av_assert0(!"reached");
+ }
+
+ av_frame_free(&frame);
+ *rframe = out;
+ return 0;
+}
+
+int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
+{
+ AVFilterCommand *cmd = link->dst->command_queue;
+
+ while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
+ av_log(link->dst, AV_LOG_DEBUG,
+ "Processing command time:%f command:%s arg:%s\n",
+ cmd->time, cmd->command, cmd->arg);
+ avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
+ ff_command_queue_pop(link->dst);
+ cmd= link->dst->command_queue;
+ }
+ return 0;
+}
+
+int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
+{
+ AVFilterContext *dstctx = link->dst;
+ int64_t pts = frame->pts;
+ int64_t pos = av_frame_get_pkt_pos(frame);
+
+ if (!dstctx->enable_str)
+ return 1;
+
+ dstctx->var_values[VAR_N] = link->frame_count_out;
+ dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
+ dstctx->var_values[VAR_W] = link->w;
+ dstctx->var_values[VAR_H] = link->h;
+ dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
+
+ return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
+}
+
+void ff_inlink_request_frame(AVFilterLink *link)
+{
+ av_assert1(!link->status_in);
+ av_assert1(!link->status_out);
+ link->frame_wanted_out = 1;
+ ff_filter_set_ready(link->src, 100);
+}
+
const AVClass *avfilter_get_class(void)
{
return &avfilter_class;
diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h
index a17b2a2f5c..ac6dca4fc0 100644
--- a/libavfilter/avfilter.h
+++ b/libavfilter/avfilter.h
@@ -2,20 +2,20 @@
* filter layer
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -29,21 +29,24 @@
*/
/**
- * @defgroup lavfi Libavfilter - graph-based frame editing library
+ * @defgroup lavfi libavfilter
+ * Graph-based frame editing library.
+ *
* @{
*/
+#include <stddef.h>
+
#include "libavutil/attributes.h"
#include "libavutil/avutil.h"
#include "libavutil/buffer.h"
+#include "libavutil/dict.h"
#include "libavutil/frame.h"
#include "libavutil/log.h"
#include "libavutil/samplefmt.h"
#include "libavutil/pixfmt.h"
#include "libavutil/rational.h"
-#include <stddef.h>
-
#include "libavfilter/version.h"
/**
@@ -61,7 +64,6 @@ const char *avfilter_configuration(void);
*/
const char *avfilter_license(void);
-
typedef struct AVFilterContext AVFilterContext;
typedef struct AVFilterLink AVFilterLink;
typedef struct AVFilterPad AVFilterPad;
@@ -112,6 +114,28 @@ enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx);
* and processing them concurrently.
*/
#define AVFILTER_FLAG_SLICE_THREADS (1 << 2)
+/**
+ * Some filters support a generic "enable" expression option that can be used
+ * to enable or disable a filter in the timeline. Filters supporting this
+ * option have this flag set. When the enable expression is false, the default
+ * no-op filter_frame() function is called in place of the filter_frame()
+ * callback defined on each input pad, thus the frame is passed unchanged to
+ * the next filters.
+ */
+#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC (1 << 16)
+/**
+ * Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will
+ * have its filter_frame() callback(s) called as usual even when the enable
+ * expression is false. The filter will disable filtering within the
+ * filter_frame() callback(s) itself, for example executing code depending on
+ * the AVFilterContext->is_disabled value.
+ */
+#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL (1 << 17)
+/**
+ * Handy mask to test whether the filter supports or no the timeline feature
+ * (internally or generically).
+ */
+#define AVFILTER_FLAG_SUPPORT_TIMELINE (AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL)
/**
* Filter definition. This defines the pads a filter contains, and all the
@@ -249,6 +273,41 @@ typedef struct AVFilter {
* code.
*/
struct AVFilter *next;
+
+ /**
+ * Make the filter instance process a command.
+ *
+ * @param cmd the command to process, for handling simplicity all commands must be alphanumeric only
+ * @param arg the argument for the command
+ * @param res a buffer with size res_size where the filter(s) can return a response. This must not change when the command is not supported.
+ * @param flags if AVFILTER_CMD_FLAG_FAST is set and the command would be
+ * time consuming then a filter should treat it like an unsupported command
+ *
+ * @returns >=0 on success otherwise an error code.
+ * AVERROR(ENOSYS) on unsupported commands
+ */
+ int (*process_command)(AVFilterContext *, const char *cmd, const char *arg, char *res, int res_len, int flags);
+
+ /**
+ * Filter initialization function, alternative to the init()
+ * callback. Args contains the user-supplied parameters, opaque is
+ * used for providing binary data.
+ */
+ int (*init_opaque)(AVFilterContext *ctx, void *opaque);
+
+ /**
+ * Filter activation function.
+ *
+ * Called when any processing is needed from the filter, instead of any
+ * filter_frame and request_frame on pads.
+ *
+ * The function must examine inlinks and outlinks and perform a single
+ * step of processing. If there is nothing to do, the function must do
+ * nothing and not return an error. If more steps are or may be
+ * possible, it must use ff_filter_set_ready() to schedule another
+ * activation.
+ */
+ int (*activate)(AVFilterContext *ctx);
} AVFilter;
/**
@@ -260,7 +319,7 @@ typedef struct AVFilterInternal AVFilterInternal;
/** An instance of a filter */
struct AVFilterContext {
- const AVClass *av_class; ///< needed for av_log()
+ const AVClass *av_class; ///< needed for av_log() and filters common options
const AVFilter *filter; ///< the AVFilter of which this is an instance
@@ -291,7 +350,7 @@ struct AVFilterContext {
* allowed threading types. I.e. a threading type needs to be set in both
* to be allowed.
*
- * After the filter is initialzed, libavfilter sets this field to the
+ * After the filter is initialized, libavfilter sets this field to the
* threading type that is actually used (0 for no multithreading).
*/
int thread_type;
@@ -301,6 +360,13 @@ struct AVFilterContext {
*/
AVFilterInternal *internal;
+ struct AVFilterCommand *command_queue;
+
+ char *enable_str; ///< enable expression string
+ void *enable; ///< parsed expression (AVExpr*)
+ double *var_values; ///< variable values for the enable expression
+ int is_disabled; ///< the enabled state from the last expression evaluation
+
/**
* For filters which will create hardware frames, sets the device the
* filter should create them in. All other filters will ignore this field:
@@ -309,6 +375,20 @@ struct AVFilterContext {
* hardware context information.
*/
AVBufferRef *hw_device_ctx;
+
+ /**
+ * Max number of threads allowed in this filter instance.
+ * If <= 0, its value is ignored.
+ * Overrides global number of threads set per filter graph.
+ */
+ int nb_threads;
+
+ /**
+ * Ready status of the filter.
+ * A non-0 value means that the filter needs activating;
+ * a higher value suggests a more urgent activation.
+ */
+ unsigned ready;
};
/**
@@ -317,6 +397,11 @@ struct AVFilterContext {
* the pads involved. In addition, this link also contains the parameters
* which have been negotiated and agreed upon between the filter, such as
* image dimensions, format, etc.
+ *
+ * Applications must not normally access the link structure directly.
+ * Use the buffersrc and buffersink API instead.
+ * In the future, access to the header may be reserved for filters
+ * implementation.
*/
struct AVFilterLink {
AVFilterContext *src; ///< source filter
@@ -331,7 +416,7 @@ struct AVFilterLink {
int w; ///< agreed upon image width
int h; ///< agreed upon image height
AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio
- /* These two parameters apply only to audio */
+ /* These parameters apply only to audio */
uint64_t channel_layout; ///< channel layout of current buffer (see libavutil/channel_layout.h)
int sample_rate; ///< samples per second
@@ -354,9 +439,11 @@ struct AVFilterLink {
*****************************************************************
*/
/**
- * Lists of formats supported by the input and output filters respectively.
- * These lists are used for negotiating the format to actually be used,
- * which will be loaded into the format member, above, when chosen.
+ * Lists of formats and channel layouts supported by the input and output
+ * filters respectively. These lists are used for negotiating the format
+ * to actually be used, which will be loaded into the format and
+ * channel_layout members, above, when chosen.
+ *
*/
AVFilterFormats *in_formats;
AVFilterFormats *out_formats;
@@ -387,22 +474,143 @@ struct AVFilterLink {
} init_state;
/**
+ * Graph the filter belongs to.
+ */
+ struct AVFilterGraph *graph;
+
+ /**
+ * Current timestamp of the link, as defined by the most recent
+ * frame(s), in link time_base units.
+ */
+ int64_t current_pts;
+
+ /**
+ * Current timestamp of the link, as defined by the most recent
+ * frame(s), in AV_TIME_BASE units.
+ */
+ int64_t current_pts_us;
+
+ /**
+ * Index in the age array.
+ */
+ int age_index;
+
+ /**
* Frame rate of the stream on the link, or 1/0 if unknown or variable;
* if left to 0/0, will be automatically copied from the first input
* of the source filter if it exists.
*
- * Sources should set it to the real constant frame rate.
+ * Sources should set it to the best estimation of the real frame rate.
* If the source frame rate is unknown or variable, set this to 1/0.
* Filters should update it if necessary depending on their function.
* Sinks can use it to set a default output frame rate.
+ * It is similar to the r_frame_rate field in AVStream.
*/
AVRational frame_rate;
/**
+ * Buffer partially filled with samples to achieve a fixed/minimum size.
+ */
+ AVFrame *partial_buf;
+
+ /**
+ * Size of the partial buffer to allocate.
+ * Must be between min_samples and max_samples.
+ */
+ int partial_buf_size;
+
+ /**
+ * Minimum number of samples to filter at once. If filter_frame() is
+ * called with fewer samples, it will accumulate them in partial_buf.
+ * This field and the related ones must not be changed after filtering
+ * has started.
+ * If 0, all related fields are ignored.
+ */
+ int min_samples;
+
+ /**
+ * Maximum number of samples to filter at once. If filter_frame() is
+ * called with more samples, it will split them.
+ */
+ int max_samples;
+
+ /**
+ * Number of channels.
+ */
+ int channels;
+
+ /**
+ * Link processing flags.
+ */
+ unsigned flags;
+
+ /**
+ * Number of past frames sent through the link.
+ */
+ int64_t frame_count_in, frame_count_out;
+
+ /**
+ * A pointer to a FFFramePool struct.
+ */
+ void *frame_pool;
+
+ /**
+ * True if a frame is currently wanted on the output of this filter.
+ * Set when ff_request_frame() is called by the output,
+ * cleared when a frame is filtered.
+ */
+ int frame_wanted_out;
+
+ /**
* For hwaccel pixel formats, this should be a reference to the
* AVHWFramesContext describing the frames.
*/
AVBufferRef *hw_frames_ctx;
+
+#ifndef FF_INTERNAL_FIELDS
+
+ /**
+ * Internal structure members.
+ * The fields below this limit are internal for libavfilter's use
+ * and must in no way be accessed by applications.
+ */
+ char reserved[0xF000];
+
+#else /* FF_INTERNAL_FIELDS */
+
+ /**
+ * Queue of frames waiting to be filtered.
+ */
+ FFFrameQueue fifo;
+
+ /**
+ * If set, the source filter can not generate a frame as is.
+ * The goal is to avoid repeatedly calling the request_frame() method on
+ * the same link.
+ */
+ int frame_blocked_in;
+
+ /**
+ * Link input status.
+ * If not zero, all attempts of filter_frame will fail with the
+ * corresponding code.
+ */
+ int status_in;
+
+ /**
+ * Timestamp of the input status change.
+ */
+ int64_t status_in_pts;
+
+ /**
+ * Link output status.
+ * If not zero, all attempts of request_frame will fail with the
+ * corresponding code.
+ */
+ int status_out;
+
+#endif /* FF_INTERNAL_FIELDS */
+
};
/**
@@ -418,6 +626,24 @@ int avfilter_link(AVFilterContext *src, unsigned srcpad,
AVFilterContext *dst, unsigned dstpad);
/**
+ * Free the link in *link, and set its pointer to NULL.
+ */
+void avfilter_link_free(AVFilterLink **link);
+
+/**
+ * Get the number of channels of a link.
+ */
+int avfilter_link_get_channels(AVFilterLink *link);
+
+/**
+ * Set the closed field of a link.
+ * @deprecated applications are not supposed to mess with links, they should
+ * close the sinks.
+ */
+attribute_deprecated
+void avfilter_link_set_closed(AVFilterLink *link, int closed);
+
+/**
* Negotiate the media format, dimensions, etc of all inputs to a filter.
*
* @param filter the filter to negotiate the properties for its inputs
@@ -425,6 +651,15 @@ int avfilter_link(AVFilterContext *src, unsigned srcpad,
*/
int avfilter_config_links(AVFilterContext *filter);
+#define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically
+#define AVFILTER_CMD_FLAG_FAST 2 ///< Only execute command when its fast (like a video out that supports contrast adjustment in hw)
+
+/**
+ * Make the filter instance process a command.
+ * It is recommended to use avfilter_graph_send_command().
+ */
+int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags);
+
/** Initialize the filter system. Register all builtin filters. */
void avfilter_register_all(void);
@@ -606,7 +841,9 @@ typedef struct AVFilterGraph {
unsigned nb_filters;
char *scale_sws_opts; ///< sws options to use for the auto-inserted scale filters
- char *resample_lavr_opts; ///< libavresample options to use for the auto-inserted resample filters
+#if FF_API_LAVR_OPTS
+ attribute_deprecated char *resample_lavr_opts; ///< libavresample options to use for the auto-inserted resample filters
+#endif
/**
* Type of multithreading allowed for filters in this graph. A combination
@@ -654,6 +891,20 @@ typedef struct AVFilterGraph {
* platform and build options.
*/
avfilter_execute_func *execute;
+
+ char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions
+
+ /**
+ * Private fields
+ *
+ * The following fields are for internal use only.
+ * Their type, offset, number and semantic can change without notice.
+ */
+
+ AVFilterLink **sink_links;
+ int sink_links_count;
+
+ unsigned disable_auto_convert;
} AVFilterGraph;
/**
@@ -675,19 +926,21 @@ AVFilterGraph *avfilter_graph_alloc(void);
*
* @return the context of the newly created filter instance (note that it is
* also retrievable directly through AVFilterGraph.filters or with
- * avfilter_graph_get_filter()) on success or NULL or failure.
+ * avfilter_graph_get_filter()) on success or NULL on failure.
*/
AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph,
const AVFilter *filter,
const char *name);
/**
- * Get a filter instance with name name from graph.
+ * Get a filter instance identified by instance name from graph.
*
+ * @param graph filter graph to search through.
+ * @param name filter instance name (should be unique in the graph).
* @return the pointer to the found filter instance or NULL if it
* cannot be found.
*/
-AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name);
+AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, const char *name);
#if FF_API_AVFILTER_OPEN
/**
@@ -721,11 +974,26 @@ int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *fil
AVFilterGraph *graph_ctx);
/**
+ * Enable or disable automatic format conversion inside the graph.
+ *
+ * Note that format conversion can still happen inside explicitly inserted
+ * scale and aresample filters.
+ *
+ * @param flags any of the AVFILTER_AUTO_CONVERT_* constants
+ */
+void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags);
+
+enum {
+ AVFILTER_AUTO_CONVERT_ALL = 0, /**< all automatic conversions enabled */
+ AVFILTER_AUTO_CONVERT_NONE = -1, /**< all automatic conversions disabled */
+};
+
+/**
* Check validity and configure all the links and formats in the graph.
*
* @param graphctx the filter graph
* @param log_ctx context used for logging
- * @return 0 in case of success, a negative AVERROR code otherwise
+ * @return >= 0 in case of success, a negative AVERROR code otherwise
*/
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx);
@@ -774,6 +1042,15 @@ void avfilter_inout_free(AVFilterInOut **inout);
/**
* Add a graph described by a string to a graph.
*
+ * @note The caller must provide the lists of inputs and outputs,
+ * which therefore must be known before calling the function.
+ *
+ * @note The inputs parameter describes inputs of the already existing
+ * part of the graph; i.e. from the point of view of the newly created
+ * part, they are outputs. Similarly the outputs parameter describes
+ * outputs of the already existing filters, which are provided as
+ * inputs to the parsed filters.
+ *
* @param graph the filter graph where to link the parsed graph context
* @param filters string to be parsed
* @param inputs linked list to the inputs of the graph
@@ -787,6 +1064,27 @@ int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
/**
* Add a graph described by a string to a graph.
*
+ * In the graph filters description, if the input label of the first
+ * filter is not specified, "in" is assumed; if the output label of
+ * the last filter is not specified, "out" is assumed.
+ *
+ * @param graph the filter graph where to link the parsed graph context
+ * @param filters string to be parsed
+ * @param inputs pointer to a linked list to the inputs of the graph, may be NULL.
+ * If non-NULL, *inputs is updated to contain the list of open inputs
+ * after the parsing, should be freed with avfilter_inout_free().
+ * @param outputs pointer to a linked list to the outputs of the graph, may be NULL.
+ * If non-NULL, *outputs is updated to contain the list of open outputs
+ * after the parsing, should be freed with avfilter_inout_free().
+ * @return non negative on success, a negative AVERROR code on error
+ */
+int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters,
+ AVFilterInOut **inputs, AVFilterInOut **outputs,
+ void *log_ctx);
+
+/**
+ * Add a graph described by a string to a graph.
+ *
* @param[in] graph the filter graph where to link the parsed graph context
* @param[in] filters string to be parsed
* @param[out] inputs a linked list of all free (unlinked) inputs of the
@@ -797,21 +1095,13 @@ int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
* caller using avfilter_inout_free().
* @return zero on success, a negative AVERROR code on error
*
- * @note the difference between avfilter_graph_parse2() and
- * avfilter_graph_parse() is that in avfilter_graph_parse(), the caller provides
- * the lists of inputs and outputs, which therefore must be known before calling
- * the function. On the other hand, avfilter_graph_parse2() \em returns the
- * inputs and outputs that are left unlinked after parsing the graph and the
- * caller then deals with them. Another difference is that in
- * avfilter_graph_parse(), the inputs parameter describes inputs of the
- * <em>already existing</em> part of the graph; i.e. from the point of view of
- * the newly created part, they are outputs. Similarly the outputs parameter
- * describes outputs of the already existing filters, which are provided as
- * inputs to the parsed filters.
- * avfilter_graph_parse2() takes the opposite approach -- it makes no reference
- * whatsoever to already existing parts of the graph and the inputs parameter
- * will on return contain inputs of the newly parsed part of the graph.
- * Analogously the outputs parameter will contain outputs of the newly created
+ * @note This function returns the inputs and outputs that are left
+ * unlinked after parsing the graph and the caller then deals with
+ * them.
+ * @note This function makes no reference whatsoever to already
+ * existing parts of the graph and the inputs parameter will on return
+ * contain inputs of the newly parsed part of the graph. Analogously
+ * the outputs parameter will contain outputs of the newly created
* filters.
*/
int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
@@ -819,6 +1109,71 @@ int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
AVFilterInOut **outputs);
/**
+ * Send a command to one or more filter instances.
+ *
+ * @param graph the filter graph
+ * @param target the filter(s) to which the command should be sent
+ * "all" sends to all filters
+ * otherwise it can be a filter or filter instance name
+ * which will send the command to all matching filters.
+ * @param cmd the command to send, for handling simplicity all commands must be alphanumeric only
+ * @param arg the argument for the command
+ * @param res a buffer with size res_size where the filter(s) can return a response.
+ *
+ * @returns >=0 on success otherwise an error code.
+ * AVERROR(ENOSYS) on unsupported commands
+ */
+int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags);
+
+/**
+ * Queue a command for one or more filter instances.
+ *
+ * @param graph the filter graph
+ * @param target the filter(s) to which the command should be sent
+ * "all" sends to all filters
+ * otherwise it can be a filter or filter instance name
+ * which will send the command to all matching filters.
+ * @param cmd the command to sent, for handling simplicity all commands must be alphanumeric only
+ * @param arg the argument for the command
+ * @param ts time at which the command should be sent to the filter
+ *
+ * @note As this executes commands after this function returns, no return code
+ * from the filter is provided, also AVFILTER_CMD_FLAG_ONE is not supported.
+ */
+int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts);
+
+
+/**
+ * Dump a graph into a human-readable string representation.
+ *
+ * @param graph the graph to dump
+ * @param options formatting options; currently ignored
+ * @return a string, or NULL in case of memory allocation failure;
+ * the string must be freed using av_free
+ */
+char *avfilter_graph_dump(AVFilterGraph *graph, const char *options);
+
+/**
+ * Request a frame on the oldest sink link.
+ *
+ * If the request returns AVERROR_EOF, try the next.
+ *
+ * Note that this function is not meant to be the sole scheduling mechanism
+ * of a filtergraph, only a convenience function to help drain a filtergraph
+ * in a balanced way under normal circumstances.
+ *
+ * Also note that AVERROR_EOF does not mean that frames did not arrive on
+ * some of the sinks during the process.
+ * When there are multiple sink links, in case the requested link
+ * returns an EOF, this may cause a filter to flush pending frames
+ * which are sent to another sink link, although unrequested.
+ *
+ * @return the return value of ff_request_frame(),
+ * or AVERROR_EOF if all links returned AVERROR_EOF
+ */
+int avfilter_graph_request_oldest(AVFilterGraph *graph);
+
+/**
* @}
*/
diff --git a/libavfilter/avfiltergraph.c b/libavfilter/avfiltergraph.c
index 5053e3c37a..75bd516896 100644
--- a/libavfilter/avfiltergraph.c
+++ b/libavfilter/avfiltergraph.c
@@ -3,20 +3,20 @@
* Copyright (c) 2008 Vitor Sessak
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -26,25 +26,33 @@
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
#include "libavutil/channel_layout.h"
-#include "libavutil/common.h"
#include "libavutil/internal.h"
-#include "libavutil/log.h"
#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+
+#define FF_INTERNAL_FIELDS 1
+#include "framequeue.h"
#include "avfilter.h"
+#include "buffersink.h"
#include "formats.h"
#include "internal.h"
#include "thread.h"
#define OFFSET(x) offsetof(AVFilterGraph, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption filtergraph_options[] = {
{ "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
{ .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
{ "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .flags = FLAGS, .unit = "thread_type" },
{ "threads", "Maximum number of threads", OFFSET(nb_threads),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
+ {"scale_sws_opts" , "default scale filter options" , OFFSET(scale_sws_opts) ,
+ AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ {"aresample_swr_opts" , "default aresample filter options" , OFFSET(aresample_swr_opts) ,
+ AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ NULL },
};
@@ -53,6 +61,7 @@ static const AVClass filtergraph_class = {
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
.option = filtergraph_options,
+ .category = AV_CLASS_CATEGORY_FILTER,
};
#if !HAVE_THREADS
@@ -82,18 +91,24 @@ AVFilterGraph *avfilter_graph_alloc(void)
ret->av_class = &filtergraph_class;
av_opt_set_defaults(ret);
+ ff_framequeue_global_init(&ret->internal->frame_queues);
return ret;
}
void ff_filter_graph_remove_filter(AVFilterGraph *graph, AVFilterContext *filter)
{
- int i;
+ int i, j;
for (i = 0; i < graph->nb_filters; i++) {
if (graph->filters[i] == filter) {
FFSWAP(AVFilterContext*, graph->filters[i],
graph->filters[graph->nb_filters - 1]);
graph->nb_filters--;
+ filter->graph = NULL;
+ for (j = 0; j<filter->nb_outputs; j++)
+ if (filter->outputs[j])
+ filter->outputs[j]->graph = NULL;
+
return;
}
}
@@ -109,8 +124,13 @@ void avfilter_graph_free(AVFilterGraph **graph)
ff_graph_thread_free(*graph);
+ av_freep(&(*graph)->sink_links);
+
av_freep(&(*graph)->scale_sws_opts);
+ av_freep(&(*graph)->aresample_swr_opts);
+#if FF_API_LAVR_OPTS
av_freep(&(*graph)->resample_lavr_opts);
+#endif
av_freep(&(*graph)->filters);
av_freep(&(*graph)->internal);
av_freep(graph);
@@ -156,6 +176,11 @@ fail:
return ret;
}
+void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
+{
+ graph->disable_auto_convert = flags;
+}
+
AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph,
const AVFilter *filter,
const char *name)
@@ -198,7 +223,7 @@ AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph,
* A graph is considered valid if all its input and output pads are
* connected.
*
- * @return 0 in case of success, a negative value otherwise
+ * @return >= 0 in case of success, a negative value otherwise
*/
static int graph_check_validity(AVFilterGraph *graph, AVClass *log_ctx)
{
@@ -206,22 +231,25 @@ static int graph_check_validity(AVFilterGraph *graph, AVClass *log_ctx)
int i, j;
for (i = 0; i < graph->nb_filters; i++) {
+ const AVFilterPad *pad;
filt = graph->filters[i];
for (j = 0; j < filt->nb_inputs; j++) {
if (!filt->inputs[j] || !filt->inputs[j]->src) {
+ pad = &filt->input_pads[j];
av_log(log_ctx, AV_LOG_ERROR,
- "Input pad \"%s\" for the filter \"%s\" of type \"%s\" not connected to any source\n",
- filt->input_pads[j].name, filt->name, filt->filter->name);
+ "Input pad \"%s\" with type %s of the filter instance \"%s\" of %s not connected to any source\n",
+ pad->name, av_get_media_type_string(pad->type), filt->name, filt->filter->name);
return AVERROR(EINVAL);
}
}
for (j = 0; j < filt->nb_outputs; j++) {
if (!filt->outputs[j] || !filt->outputs[j]->dst) {
+ pad = &filt->output_pads[j];
av_log(log_ctx, AV_LOG_ERROR,
- "Output pad \"%s\" for the filter \"%s\" of type \"%s\" not connected to any destination\n",
- filt->output_pads[j].name, filt->name, filt->filter->name);
+ "Output pad \"%s\" with type %s of the filter instance \"%s\" of %s not connected to any destination\n",
+ pad->name, av_get_media_type_string(pad->type), filt->name, filt->filter->name);
return AVERROR(EINVAL);
}
}
@@ -233,7 +261,7 @@ static int graph_check_validity(AVFilterGraph *graph, AVClass *log_ctx)
/**
* Configure all the links of graphctx.
*
- * @return 0 in case of success, a negative value otherwise
+ * @return >= 0 in case of success, a negative value otherwise
*/
static int graph_config_links(AVFilterGraph *graph, AVClass *log_ctx)
{
@@ -252,7 +280,7 @@ static int graph_config_links(AVFilterGraph *graph, AVClass *log_ctx)
return 0;
}
-AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name)
+AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, const char *name)
{
int i;
@@ -263,23 +291,166 @@ AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name)
return NULL;
}
+static void sanitize_channel_layouts(void *log, AVFilterChannelLayouts *l)
+{
+ if (!l)
+ return;
+ if (l->nb_channel_layouts) {
+ if (l->all_layouts || l->all_counts)
+ av_log(log, AV_LOG_WARNING, "All layouts set on non-empty list\n");
+ l->all_layouts = l->all_counts = 0;
+ } else {
+ if (l->all_counts && !l->all_layouts)
+ av_log(log, AV_LOG_WARNING, "All counts without all layouts\n");
+ l->all_layouts = 1;
+ }
+}
+
+static int filter_query_formats(AVFilterContext *ctx)
+{
+ int ret, i;
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *chlayouts;
+ AVFilterFormats *samplerates;
+ enum AVMediaType type = ctx->inputs && ctx->inputs [0] ? ctx->inputs [0]->type :
+ ctx->outputs && ctx->outputs[0] ? ctx->outputs[0]->type :
+ AVMEDIA_TYPE_VIDEO;
+
+ if ((ret = ctx->filter->query_formats(ctx)) < 0) {
+ if (ret != AVERROR(EAGAIN))
+ av_log(ctx, AV_LOG_ERROR, "Query format failed for '%s': %s\n",
+ ctx->name, av_err2str(ret));
+ return ret;
+ }
+
+ for (i = 0; i < ctx->nb_inputs; i++)
+ sanitize_channel_layouts(ctx, ctx->inputs[i]->out_channel_layouts);
+ for (i = 0; i < ctx->nb_outputs; i++)
+ sanitize_channel_layouts(ctx, ctx->outputs[i]->in_channel_layouts);
+
+ formats = ff_all_formats(type);
+ if ((ret = ff_set_common_formats(ctx, formats)) < 0)
+ return ret;
+ if (type == AVMEDIA_TYPE_AUDIO) {
+ samplerates = ff_all_samplerates();
+ if ((ret = ff_set_common_samplerates(ctx, samplerates)) < 0)
+ return ret;
+ chlayouts = ff_all_channel_layouts();
+ if ((ret = ff_set_common_channel_layouts(ctx, chlayouts)) < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int formats_declared(AVFilterContext *f)
+{
+ int i;
+
+ for (i = 0; i < f->nb_inputs; i++) {
+ if (!f->inputs[i]->out_formats)
+ return 0;
+ if (f->inputs[i]->type == AVMEDIA_TYPE_AUDIO &&
+ !(f->inputs[i]->out_samplerates &&
+ f->inputs[i]->out_channel_layouts))
+ return 0;
+ }
+ for (i = 0; i < f->nb_outputs; i++) {
+ if (!f->outputs[i]->in_formats)
+ return 0;
+ if (f->outputs[i]->type == AVMEDIA_TYPE_AUDIO &&
+ !(f->outputs[i]->in_samplerates &&
+ f->outputs[i]->in_channel_layouts))
+ return 0;
+ }
+ return 1;
+}
+
+static AVFilterFormats *clone_filter_formats(AVFilterFormats *arg)
+{
+ AVFilterFormats *a = av_memdup(arg, sizeof(*arg));
+ if (a) {
+ a->refcount = 0;
+ a->refs = NULL;
+ a->formats = av_memdup(a->formats, sizeof(*a->formats) * a->nb_formats);
+ if (!a->formats && arg->formats)
+ av_freep(&a);
+ }
+ return a;
+}
+
+static int can_merge_formats(AVFilterFormats *a_arg,
+ AVFilterFormats *b_arg,
+ enum AVMediaType type,
+ int is_sample_rate)
+{
+ AVFilterFormats *a, *b, *ret;
+ if (a_arg == b_arg)
+ return 1;
+ a = clone_filter_formats(a_arg);
+ b = clone_filter_formats(b_arg);
+
+ if (!a || !b) {
+ if (a)
+ av_freep(&a->formats);
+ if (b)
+ av_freep(&b->formats);
+
+ av_freep(&a);
+ av_freep(&b);
+
+ return 0;
+ }
+
+ if (is_sample_rate) {
+ ret = ff_merge_samplerates(a, b);
+ } else {
+ ret = ff_merge_formats(a, b, type);
+ }
+ if (ret) {
+ av_freep(&ret->formats);
+ av_freep(&ret->refs);
+ av_freep(&ret);
+ return 1;
+ } else {
+ av_freep(&a->formats);
+ av_freep(&b->formats);
+ av_freep(&a);
+ av_freep(&b);
+ return 0;
+ }
+}
+
+/**
+ * Perform one round of query_formats() and merging formats lists on the
+ * filter graph.
+ * @return >=0 if all links formats lists could be queried and merged;
+ * AVERROR(EAGAIN) some progress was made in the queries or merging
+ * and a later call may succeed;
+ * AVERROR(EIO) (may be changed) plus a log message if no progress
+ * was made and the negotiation is stuck;
+ * a negative error code if some other error happened
+ */
static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
{
int i, j, ret;
int scaler_count = 0, resampler_count = 0;
+ int count_queried = 0; /* successful calls to query_formats() */
+ int count_merged = 0; /* successful merge of formats lists */
+ int count_already_merged = 0; /* lists already merged */
+ int count_delayed = 0; /* lists that need to be merged later */
- /* ask all the sub-filters for their supported media formats */
for (i = 0; i < graph->nb_filters; i++) {
- if (graph->filters[i]->filter->query_formats)
- ret = graph->filters[i]->filter->query_formats(graph->filters[i]);
+ AVFilterContext *f = graph->filters[i];
+ if (formats_declared(f))
+ continue;
+ if (f->filter->query_formats)
+ ret = filter_query_formats(f);
else
- ret = ff_default_query_formats(graph->filters[i]);
- if (ret < 0) {
- av_log(log_ctx, AV_LOG_ERROR,
- "Error querying formats for the filter %s (%s)\n",
- graph->filters[i]->name, graph->filters[i]->filter->name);
+ ret = ff_default_query_formats(f);
+ if (ret < 0 && ret != AVERROR(EAGAIN))
return ret;
- }
+ /* note: EAGAIN could indicate a partial success, not counted yet */
+ count_queried += ret >= 0;
}
/* go through and merge as many format lists as possible */
@@ -293,21 +464,49 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
if (!link)
continue;
- if (link->in_formats != link->out_formats &&
- !ff_merge_formats(link->in_formats,
- link->out_formats))
- convert_needed = 1;
- if (link->type == AVMEDIA_TYPE_AUDIO) {
- if (link->in_channel_layouts != link->out_channel_layouts &&
- !ff_merge_channel_layouts(link->in_channel_layouts,
- link->out_channel_layouts))
- convert_needed = 1;
- if (link->in_samplerates != link->out_samplerates &&
- !ff_merge_samplerates(link->in_samplerates,
- link->out_samplerates))
+ if (link->in_formats != link->out_formats
+ && link->in_formats && link->out_formats)
+ if (!can_merge_formats(link->in_formats, link->out_formats,
+ link->type, 0))
convert_needed = 1;
+ if (link->type == AVMEDIA_TYPE_AUDIO) {
+ if (link->in_samplerates != link->out_samplerates
+ && link->in_samplerates && link->out_samplerates)
+ if (!can_merge_formats(link->in_samplerates,
+ link->out_samplerates,
+ 0, 1))
+ convert_needed = 1;
+ }
+
+#define MERGE_DISPATCH(field, statement) \
+ if (!(link->in_ ## field && link->out_ ## field)) { \
+ count_delayed++; \
+ } else if (link->in_ ## field == link->out_ ## field) { \
+ count_already_merged++; \
+ } else if (!convert_needed) { \
+ count_merged++; \
+ statement \
}
+ if (link->type == AVMEDIA_TYPE_AUDIO) {
+ MERGE_DISPATCH(channel_layouts,
+ if (!ff_merge_channel_layouts(link->in_channel_layouts,
+ link->out_channel_layouts))
+ convert_needed = 1;
+ )
+ MERGE_DISPATCH(samplerates,
+ if (!ff_merge_samplerates(link->in_samplerates,
+ link->out_samplerates))
+ convert_needed = 1;
+ )
+ }
+ MERGE_DISPATCH(formats,
+ if (!ff_merge_formats(link->in_formats, link->out_formats,
+ link->type))
+ convert_needed = 1;
+ )
+#undef MERGE_DISPATCH
+
if (convert_needed) {
AVFilterContext *convert;
AVFilter *filter;
@@ -315,6 +514,14 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
char scale_args[256];
char inst_name[30];
+ if (graph->disable_auto_convert) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "The filters '%s' and '%s' do not have a common format "
+ "and automatic conversion is disabled.\n",
+ link->src->name, link->dst->name);
+ return AVERROR(EINVAL);
+ }
+
/* couldn't merge format lists. auto-insert conversion filter */
switch (link->type) {
case AVMEDIA_TYPE_VIDEO:
@@ -324,7 +531,7 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
return AVERROR(EINVAL);
}
- snprintf(inst_name, sizeof(inst_name), "auto-inserted scaler %d",
+ snprintf(inst_name, sizeof(inst_name), "auto_scaler_%d",
scaler_count++);
if ((ret = avfilter_graph_create_filter(&convert, filter,
@@ -333,20 +540,20 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
return ret;
break;
case AVMEDIA_TYPE_AUDIO:
- if (!(filter = avfilter_get_by_name("resample"))) {
- av_log(log_ctx, AV_LOG_ERROR, "'resample' filter "
+ if (!(filter = avfilter_get_by_name("aresample"))) {
+ av_log(log_ctx, AV_LOG_ERROR, "'aresample' filter "
"not present, cannot convert audio formats.\n");
return AVERROR(EINVAL);
}
- snprintf(inst_name, sizeof(inst_name), "auto-inserted resampler %d",
+ snprintf(inst_name, sizeof(inst_name), "auto_resampler_%d",
resampler_count++);
scale_args[0] = '\0';
- if (graph->resample_lavr_opts)
+ if (graph->aresample_swr_opts)
snprintf(scale_args, sizeof(scale_args), "%s",
- graph->resample_lavr_opts);
+ graph->aresample_swr_opts);
if ((ret = avfilter_graph_create_filter(&convert, filter,
- inst_name, scale_args,
+ inst_name, graph->aresample_swr_opts,
NULL, graph)) < 0)
return ret;
break;
@@ -357,24 +564,40 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
if ((ret = avfilter_insert_filter(link, convert, 0, 0)) < 0)
return ret;
- convert->filter->query_formats(convert);
+ if ((ret = filter_query_formats(convert)) < 0)
+ return ret;
+
inlink = convert->inputs[0];
outlink = convert->outputs[0];
- if (!ff_merge_formats( inlink->in_formats, inlink->out_formats) ||
- !ff_merge_formats(outlink->in_formats, outlink->out_formats))
- ret |= AVERROR(ENOSYS);
+ av_assert0( inlink-> in_formats->refcount > 0);
+ av_assert0( inlink->out_formats->refcount > 0);
+ av_assert0(outlink-> in_formats->refcount > 0);
+ av_assert0(outlink->out_formats->refcount > 0);
+ if (outlink->type == AVMEDIA_TYPE_AUDIO) {
+ av_assert0( inlink-> in_samplerates->refcount > 0);
+ av_assert0( inlink->out_samplerates->refcount > 0);
+ av_assert0(outlink-> in_samplerates->refcount > 0);
+ av_assert0(outlink->out_samplerates->refcount > 0);
+ av_assert0( inlink-> in_channel_layouts->refcount > 0);
+ av_assert0( inlink->out_channel_layouts->refcount > 0);
+ av_assert0(outlink-> in_channel_layouts->refcount > 0);
+ av_assert0(outlink->out_channel_layouts->refcount > 0);
+ }
+ if (!ff_merge_formats( inlink->in_formats, inlink->out_formats, inlink->type) ||
+ !ff_merge_formats(outlink->in_formats, outlink->out_formats, outlink->type))
+ ret = AVERROR(ENOSYS);
if (inlink->type == AVMEDIA_TYPE_AUDIO &&
(!ff_merge_samplerates(inlink->in_samplerates,
inlink->out_samplerates) ||
!ff_merge_channel_layouts(inlink->in_channel_layouts,
inlink->out_channel_layouts)))
- ret |= AVERROR(ENOSYS);
+ ret = AVERROR(ENOSYS);
if (outlink->type == AVMEDIA_TYPE_AUDIO &&
(!ff_merge_samplerates(outlink->in_samplerates,
outlink->out_samplerates) ||
!ff_merge_channel_layouts(outlink->in_channel_layouts,
outlink->out_channel_layouts)))
- ret |= AVERROR(ENOSYS);
+ ret = AVERROR(ENOSYS);
if (ret < 0) {
av_log(log_ctx, AV_LOG_ERROR,
@@ -386,14 +609,101 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
}
}
+ av_log(graph, AV_LOG_DEBUG, "query_formats: "
+ "%d queried, %d merged, %d already done, %d delayed\n",
+ count_queried, count_merged, count_already_merged, count_delayed);
+ if (count_delayed) {
+ AVBPrint bp;
+
+ /* if count_queried > 0, one filter at least did set its formats,
+ that will give additional information to its neighbour;
+ if count_merged > 0, one pair of formats lists at least was merged,
+ that will give additional information to all connected filters;
+ in both cases, progress was made and a new round must be done */
+ if (count_queried || count_merged)
+ return AVERROR(EAGAIN);
+ av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC);
+ for (i = 0; i < graph->nb_filters; i++)
+ if (!formats_declared(graph->filters[i]))
+ av_bprintf(&bp, "%s%s", bp.len ? ", " : "",
+ graph->filters[i]->name);
+ av_log(graph, AV_LOG_ERROR,
+ "The following filters could not choose their formats: %s\n"
+ "Consider inserting the (a)format filter near their input or "
+ "output.\n", bp.str);
+ return AVERROR(EIO);
+ }
return 0;
}
-static int pick_format(AVFilterLink *link)
+static int get_fmt_score(enum AVSampleFormat dst_fmt, enum AVSampleFormat src_fmt)
+{
+ int score = 0;
+
+ if (av_sample_fmt_is_planar(dst_fmt) != av_sample_fmt_is_planar(src_fmt))
+ score ++;
+
+ if (av_get_bytes_per_sample(dst_fmt) < av_get_bytes_per_sample(src_fmt)) {
+ score += 100 * (av_get_bytes_per_sample(src_fmt) - av_get_bytes_per_sample(dst_fmt));
+ }else
+ score += 10 * (av_get_bytes_per_sample(dst_fmt) - av_get_bytes_per_sample(src_fmt));
+
+ if (av_get_packed_sample_fmt(dst_fmt) == AV_SAMPLE_FMT_S32 &&
+ av_get_packed_sample_fmt(src_fmt) == AV_SAMPLE_FMT_FLT)
+ score += 20;
+
+ if (av_get_packed_sample_fmt(dst_fmt) == AV_SAMPLE_FMT_FLT &&
+ av_get_packed_sample_fmt(src_fmt) == AV_SAMPLE_FMT_S32)
+ score += 2;
+
+ return score;
+}
+
+static enum AVSampleFormat find_best_sample_fmt_of_2(enum AVSampleFormat dst_fmt1, enum AVSampleFormat dst_fmt2,
+ enum AVSampleFormat src_fmt)
+{
+ int score1, score2;
+
+ score1 = get_fmt_score(dst_fmt1, src_fmt);
+ score2 = get_fmt_score(dst_fmt2, src_fmt);
+
+ return score1 < score2 ? dst_fmt1 : dst_fmt2;
+}
+
+static int pick_format(AVFilterLink *link, AVFilterLink *ref)
{
if (!link || !link->in_formats)
return 0;
+ if (link->type == AVMEDIA_TYPE_VIDEO) {
+ if(ref && ref->type == AVMEDIA_TYPE_VIDEO){
+ int has_alpha= av_pix_fmt_desc_get(ref->format)->nb_components % 2 == 0;
+ enum AVPixelFormat best= AV_PIX_FMT_NONE;
+ int i;
+ for (i=0; i<link->in_formats->nb_formats; i++) {
+ enum AVPixelFormat p = link->in_formats->formats[i];
+ best= av_find_best_pix_fmt_of_2(best, p, ref->format, has_alpha, NULL);
+ }
+ av_log(link->src,AV_LOG_DEBUG, "picking %s out of %d ref:%s alpha:%d\n",
+ av_get_pix_fmt_name(best), link->in_formats->nb_formats,
+ av_get_pix_fmt_name(ref->format), has_alpha);
+ link->in_formats->formats[0] = best;
+ }
+ } else if (link->type == AVMEDIA_TYPE_AUDIO) {
+ if(ref && ref->type == AVMEDIA_TYPE_AUDIO){
+ enum AVSampleFormat best= AV_SAMPLE_FMT_NONE;
+ int i;
+ for (i=0; i<link->in_formats->nb_formats; i++) {
+ enum AVSampleFormat p = link->in_formats->formats[i];
+ best = find_best_sample_fmt_of_2(best, p, ref->format);
+ }
+ av_log(link->src,AV_LOG_DEBUG, "picking %s out of %d ref:%s\n",
+ av_get_sample_fmt_name(best), link->in_formats->nb_formats,
+ av_get_sample_fmt_name(ref->format));
+ link->in_formats->formats[0] = best;
+ }
+ }
+
link->in_formats->nb_formats = 1;
link->format = link->in_formats->formats[0];
@@ -407,14 +717,22 @@ static int pick_format(AVFilterLink *link)
link->in_samplerates->nb_formats = 1;
link->sample_rate = link->in_samplerates->formats[0];
- if (!link->in_channel_layouts->nb_channel_layouts) {
+ if (link->in_channel_layouts->all_layouts) {
av_log(link->src, AV_LOG_ERROR, "Cannot select channel layout for"
- "the link between filters %s and %s.\n", link->src->name,
+ " the link between filters %s and %s.\n", link->src->name,
link->dst->name);
+ if (!link->in_channel_layouts->all_counts)
+ av_log(link->src, AV_LOG_ERROR, "Unknown channel layouts not "
+ "supported, try specifying a channel layout using "
+ "'aformat=channel_layouts=something'.\n");
return AVERROR(EINVAL);
}
link->in_channel_layouts->nb_channel_layouts = 1;
link->channel_layout = link->in_channel_layouts->channel_layouts[0];
+ if ((link->channels = FF_LAYOUT2COUNT(link->channel_layout)))
+ link->channel_layout = 0;
+ else
+ link->channels = av_get_channel_layout_nb_channels(link->channel_layout);
}
ff_formats_unref(&link->in_formats);
@@ -427,7 +745,7 @@ static int pick_format(AVFilterLink *link)
return 0;
}
-#define REDUCE_FORMATS(fmt_type, list_type, list, var, nb, add_format) \
+#define REDUCE_FORMATS(fmt_type, list_type, list, var, nb, add_format, unref_format) \
do { \
for (i = 0; i < filter->nb_inputs; i++) { \
AVFilterLink *link = filter->inputs[i]; \
@@ -447,7 +765,9 @@ do { \
fmts = out_link->in_ ## list; \
\
if (!out_link->in_ ## list->nb) { \
- add_format(&out_link->in_ ##list, fmt); \
+ if ((ret = add_format(&out_link->in_ ##list, fmt)) < 0)\
+ return ret; \
+ ret = 1; \
break; \
} \
\
@@ -467,25 +787,66 @@ static int reduce_formats_on_filter(AVFilterContext *filter)
int i, j, k, ret = 0;
REDUCE_FORMATS(int, AVFilterFormats, formats, formats,
- nb_formats, ff_add_format);
+ nb_formats, ff_add_format, ff_formats_unref);
REDUCE_FORMATS(int, AVFilterFormats, samplerates, formats,
- nb_formats, ff_add_format);
- REDUCE_FORMATS(uint64_t, AVFilterChannelLayouts, channel_layouts,
- channel_layouts, nb_channel_layouts, ff_add_channel_layout);
+ nb_formats, ff_add_format, ff_formats_unref);
+
+ /* reduce channel layouts */
+ for (i = 0; i < filter->nb_inputs; i++) {
+ AVFilterLink *inlink = filter->inputs[i];
+ uint64_t fmt;
+
+ if (!inlink->out_channel_layouts ||
+ inlink->out_channel_layouts->nb_channel_layouts != 1)
+ continue;
+ fmt = inlink->out_channel_layouts->channel_layouts[0];
+
+ for (j = 0; j < filter->nb_outputs; j++) {
+ AVFilterLink *outlink = filter->outputs[j];
+ AVFilterChannelLayouts *fmts;
+
+ fmts = outlink->in_channel_layouts;
+ if (inlink->type != outlink->type || fmts->nb_channel_layouts == 1)
+ continue;
+
+ if (fmts->all_layouts &&
+ (!FF_LAYOUT2COUNT(fmt) || fmts->all_counts)) {
+ /* Turn the infinite list into a singleton */
+ fmts->all_layouts = fmts->all_counts = 0;
+ if (ff_add_channel_layout(&outlink->in_channel_layouts, fmt) < 0)
+ ret = 1;
+ break;
+ }
+
+ for (k = 0; k < outlink->in_channel_layouts->nb_channel_layouts; k++) {
+ if (fmts->channel_layouts[k] == fmt) {
+ fmts->channel_layouts[0] = fmt;
+ fmts->nb_channel_layouts = 1;
+ ret = 1;
+ break;
+ }
+ }
+ }
+ }
return ret;
}
-static void reduce_formats(AVFilterGraph *graph)
+static int reduce_formats(AVFilterGraph *graph)
{
- int i, reduced;
+ int i, reduced, ret;
do {
reduced = 0;
- for (i = 0; i < graph->nb_filters; i++)
- reduced |= reduce_formats_on_filter(graph->filters[i]);
+ for (i = 0; i < graph->nb_filters; i++) {
+ if ((ret = reduce_formats_on_filter(graph->filters[i])) < 0)
+ return ret;
+ reduced |= ret;
+ }
} while (reduced);
+
+ return 0;
}
static void swap_samplerates_on_filter(AVFilterContext *filter)
@@ -517,6 +878,8 @@ static void swap_samplerates_on_filter(AVFilterContext *filter)
for (j = 0; j < outlink->in_samplerates->nb_formats; j++) {
int diff = abs(sample_rate - outlink->in_samplerates->formats[j]);
+ av_assert0(diff < INT_MAX); // This would lead to the use of uninitialized best_diff but is only possible with invalid sample rates
+
if (diff < best_diff) {
best_diff = diff;
best_idx = j;
@@ -599,7 +962,23 @@ static void swap_channel_layouts_on_filter(AVFilterContext *filter)
int out_channels = av_get_channel_layout_nb_channels(out_chlayout);
int count_diff = out_channels - in_channels;
int matched_channels, extra_channels;
- int score = 0;
+ int score = 100000;
+
+ if (FF_LAYOUT2COUNT(in_chlayout) || FF_LAYOUT2COUNT(out_chlayout)) {
+ /* Compute score in case the input or output layout encodes
+ a channel count; in this case the score is not altered by
+ the computation afterwards, as in_chlayout and
+ out_chlayout have both been set to 0 */
+ if (FF_LAYOUT2COUNT(in_chlayout))
+ in_channels = FF_LAYOUT2COUNT(in_chlayout);
+ if (FF_LAYOUT2COUNT(out_chlayout))
+ out_channels = FF_LAYOUT2COUNT(out_chlayout);
+ score -= 10000 + FFABS(out_channels - in_channels) +
+ (in_channels > out_channels ? 10000 : 0);
+ in_chlayout = out_chlayout = 0;
+ /* Let the remaining computation run, even if the score
+ value is not altered */
+ }
/* channel substitution */
for (k = 0; k < FF_ARRAY_ELEMS(ch_subst); k++) {
@@ -722,15 +1101,50 @@ static void swap_sample_fmts(AVFilterGraph *graph)
static int pick_formats(AVFilterGraph *graph)
{
int i, j, ret;
+ int change;
+
+ do{
+ change = 0;
+ for (i = 0; i < graph->nb_filters; i++) {
+ AVFilterContext *filter = graph->filters[i];
+ if (filter->nb_inputs){
+ for (j = 0; j < filter->nb_inputs; j++){
+ if(filter->inputs[j]->in_formats && filter->inputs[j]->in_formats->nb_formats == 1) {
+ if ((ret = pick_format(filter->inputs[j], NULL)) < 0)
+ return ret;
+ change = 1;
+ }
+ }
+ }
+ if (filter->nb_outputs){
+ for (j = 0; j < filter->nb_outputs; j++){
+ if(filter->outputs[j]->in_formats && filter->outputs[j]->in_formats->nb_formats == 1) {
+ if ((ret = pick_format(filter->outputs[j], NULL)) < 0)
+ return ret;
+ change = 1;
+ }
+ }
+ }
+ if (filter->nb_inputs && filter->nb_outputs && filter->inputs[0]->format>=0) {
+ for (j = 0; j < filter->nb_outputs; j++) {
+ if(filter->outputs[j]->format<0) {
+ if ((ret = pick_format(filter->outputs[j], filter->inputs[0])) < 0)
+ return ret;
+ change = 1;
+ }
+ }
+ }
+ }
+ }while(change);
for (i = 0; i < graph->nb_filters; i++) {
AVFilterContext *filter = graph->filters[i];
for (j = 0; j < filter->nb_inputs; j++)
- if ((ret = pick_format(filter->inputs[j])) < 0)
+ if ((ret = pick_format(filter->inputs[j], NULL)) < 0)
return ret;
for (j = 0; j < filter->nb_outputs; j++)
- if ((ret = pick_format(filter->outputs[j])) < 0)
+ if ((ret = pick_format(filter->outputs[j], NULL)) < 0)
return ret;
}
return 0;
@@ -744,13 +1158,16 @@ static int graph_config_formats(AVFilterGraph *graph, AVClass *log_ctx)
int ret;
/* find supported formats from sub-filters, and merge along links */
- if ((ret = query_formats(graph, log_ctx)) < 0)
+ while ((ret = query_formats(graph, log_ctx)) == AVERROR(EAGAIN))
+ av_log(graph, AV_LOG_DEBUG, "query_formats not finished\n");
+ if (ret < 0)
return ret;
/* Once everything is merged, it's possible that we'll still have
* multiple valid media format choices. We try to minimize the amount
* of format conversion inside filters */
- reduce_formats(graph);
+ if ((ret = reduce_formats(graph)) < 0)
+ return ret;
/* for audio filters, ensure the best format, sample rate and channel layout
* is selected */
@@ -764,6 +1181,48 @@ static int graph_config_formats(AVFilterGraph *graph, AVClass *log_ctx)
return 0;
}
+static int graph_config_pointers(AVFilterGraph *graph,
+ AVClass *log_ctx)
+{
+ unsigned i, j;
+ int sink_links_count = 0, n = 0;
+ AVFilterContext *f;
+ AVFilterLink **sinks;
+
+ for (i = 0; i < graph->nb_filters; i++) {
+ f = graph->filters[i];
+ for (j = 0; j < f->nb_inputs; j++) {
+ f->inputs[j]->graph = graph;
+ f->inputs[j]->age_index = -1;
+ }
+ for (j = 0; j < f->nb_outputs; j++) {
+ f->outputs[j]->graph = graph;
+ f->outputs[j]->age_index= -1;
+ }
+ if (!f->nb_outputs) {
+ if (f->nb_inputs > INT_MAX - sink_links_count)
+ return AVERROR(EINVAL);
+ sink_links_count += f->nb_inputs;
+ }
+ }
+ sinks = av_calloc(sink_links_count, sizeof(*sinks));
+ if (!sinks)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < graph->nb_filters; i++) {
+ f = graph->filters[i];
+ if (!f->nb_outputs) {
+ for (j = 0; j < f->nb_inputs; j++) {
+ sinks[n] = f->inputs[j];
+ f->inputs[j]->age_index = n++;
+ }
+ }
+ }
+ av_assert0(n == sink_links_count);
+ graph->sink_links = sinks;
+ graph->sink_links_count = sink_links_count;
+ return 0;
+}
+
static int graph_insert_fifos(AVFilterGraph *graph, AVClass *log_ctx)
{
AVFilterContext *f;
@@ -786,7 +1245,7 @@ static int graph_insert_fifos(AVFilterGraph *graph, AVClass *log_ctx)
avfilter_get_by_name("fifo") :
avfilter_get_by_name("afifo");
- snprintf(name, sizeof(name), "auto-inserted fifo %d", fifo_count++);
+ snprintf(name, sizeof(name), "auto_fifo_%d", fifo_count++);
ret = avfilter_graph_create_filter(&fifo_ctx, fifo, name, NULL,
NULL, graph);
@@ -814,6 +1273,172 @@ int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
return ret;
if ((ret = graph_config_links(graphctx, log_ctx)))
return ret;
+ if ((ret = graph_config_pointers(graphctx, log_ctx)))
+ return ret;
+
+ return 0;
+}
+
+int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
+{
+ int i, r = AVERROR(ENOSYS);
+
+ if (!graph)
+ return r;
+
+ if ((flags & AVFILTER_CMD_FLAG_ONE) && !(flags & AVFILTER_CMD_FLAG_FAST)) {
+ r = avfilter_graph_send_command(graph, target, cmd, arg, res, res_len, flags | AVFILTER_CMD_FLAG_FAST);
+ if (r != AVERROR(ENOSYS))
+ return r;
+ }
+
+ if (res_len && res)
+ res[0] = 0;
+
+ for (i = 0; i < graph->nb_filters; i++) {
+ AVFilterContext *filter = graph->filters[i];
+ if (!strcmp(target, "all") || (filter->name && !strcmp(target, filter->name)) || !strcmp(target, filter->filter->name)) {
+ r = avfilter_process_command(filter, cmd, arg, res, res_len, flags);
+ if (r != AVERROR(ENOSYS)) {
+ if ((flags & AVFILTER_CMD_FLAG_ONE) || r < 0)
+ return r;
+ }
+ }
+ }
+
+ return r;
+}
+
+int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *command, const char *arg, int flags, double ts)
+{
+ int i;
+
+ if(!graph)
+ return 0;
+
+ for (i = 0; i < graph->nb_filters; i++) {
+ AVFilterContext *filter = graph->filters[i];
+ if(filter && (!strcmp(target, "all") || !strcmp(target, filter->name) || !strcmp(target, filter->filter->name))){
+ AVFilterCommand **queue = &filter->command_queue, *next;
+ while (*queue && (*queue)->time <= ts)
+ queue = &(*queue)->next;
+ next = *queue;
+ *queue = av_mallocz(sizeof(AVFilterCommand));
+ (*queue)->command = av_strdup(command);
+ (*queue)->arg = av_strdup(arg);
+ (*queue)->time = ts;
+ (*queue)->flags = flags;
+ (*queue)->next = next;
+ if(flags & AVFILTER_CMD_FLAG_ONE)
+ return 0;
+ }
+ }
return 0;
}
+
+static void heap_bubble_up(AVFilterGraph *graph,
+ AVFilterLink *link, int index)
+{
+ AVFilterLink **links = graph->sink_links;
+
+ av_assert0(index >= 0);
+
+ while (index) {
+ int parent = (index - 1) >> 1;
+ if (links[parent]->current_pts_us >= link->current_pts_us)
+ break;
+ links[index] = links[parent];
+ links[index]->age_index = index;
+ index = parent;
+ }
+ links[index] = link;
+ link->age_index = index;
+}
+
+static void heap_bubble_down(AVFilterGraph *graph,
+ AVFilterLink *link, int index)
+{
+ AVFilterLink **links = graph->sink_links;
+
+ av_assert0(index >= 0);
+
+ while (1) {
+ int child = 2 * index + 1;
+ if (child >= graph->sink_links_count)
+ break;
+ if (child + 1 < graph->sink_links_count &&
+ links[child + 1]->current_pts_us < links[child]->current_pts_us)
+ child++;
+ if (link->current_pts_us < links[child]->current_pts_us)
+ break;
+ links[index] = links[child];
+ links[index]->age_index = index;
+ index = child;
+ }
+ links[index] = link;
+ link->age_index = index;
+}
+
+void ff_avfilter_graph_update_heap(AVFilterGraph *graph, AVFilterLink *link)
+{
+ heap_bubble_up (graph, link, link->age_index);
+ heap_bubble_down(graph, link, link->age_index);
+}
+
+int avfilter_graph_request_oldest(AVFilterGraph *graph)
+{
+ AVFilterLink *oldest = graph->sink_links[0];
+ int64_t frame_count;
+ int r;
+
+ while (graph->sink_links_count) {
+ oldest = graph->sink_links[0];
+ if (oldest->dst->filter->activate) {
+ /* For now, buffersink is the only filter implementing activate. */
+ return av_buffersink_get_frame_flags(oldest->dst, NULL,
+ AV_BUFFERSINK_FLAG_PEEK);
+ }
+ r = ff_request_frame(oldest);
+ if (r != AVERROR_EOF)
+ break;
+ av_log(oldest->dst, AV_LOG_DEBUG, "EOF on sink link %s:%s.\n",
+ oldest->dst ? oldest->dst->name : "unknown",
+ oldest->dstpad ? oldest->dstpad->name : "unknown");
+ /* EOF: remove the link from the heap */
+ if (oldest->age_index < --graph->sink_links_count)
+ heap_bubble_down(graph, graph->sink_links[graph->sink_links_count],
+ oldest->age_index);
+ oldest->age_index = -1;
+ }
+ if (!graph->sink_links_count)
+ return AVERROR_EOF;
+ av_assert1(!oldest->dst->filter->activate);
+ av_assert1(oldest->age_index >= 0);
+ frame_count = oldest->frame_count_out;
+ while (frame_count == oldest->frame_count_out) {
+ r = ff_filter_graph_run_once(graph);
+ if (r == AVERROR(EAGAIN) &&
+ !oldest->frame_wanted_out && !oldest->frame_blocked_in &&
+ !oldest->status_in)
+ ff_request_frame(oldest);
+ else if (r < 0)
+ return r;
+ }
+ return 0;
+}
+
+int ff_filter_graph_run_once(AVFilterGraph *graph)
+{
+ AVFilterContext *filter;
+ unsigned i;
+
+ av_assert0(graph->nb_filters);
+ filter = graph->filters[0];
+ for (i = 1; i < graph->nb_filters; i++)
+ if (graph->filters[i]->ready > filter->ready)
+ filter = graph->filters[i];
+ if (!filter->ready)
+ return AVERROR(EAGAIN);
+ return ff_filter_activate(filter);
+}
diff --git a/libavfilter/avfiltergraph.h b/libavfilter/avfiltergraph.h
index 47174efc6c..b31d581ca0 100644
--- a/libavfilter/avfiltergraph.h
+++ b/libavfilter/avfiltergraph.h
@@ -2,20 +2,20 @@
* Filter graphs
* copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -25,5 +25,4 @@
#include "avfilter.h"
#include "libavutil/log.h"
-
#endif /* AVFILTER_AVFILTERGRAPH_H */
diff --git a/libavfilter/avfilterres.rc b/libavfilter/avfilterres.rc
new file mode 100644
index 0000000000..8be62473e2
--- /dev/null
+++ b/libavfilter/avfilterres.rc
@@ -0,0 +1,55 @@
+/*
+ * Windows resource file for libavfilter
+ *
+ * Copyright (C) 2012 James Almer
+ * Copyright (C) 2013 Tiancheng "Timothy" Gu
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <windows.h>
+#include "libavfilter/version.h"
+#include "libavutil/ffversion.h"
+#include "config.h"
+
+1 VERSIONINFO
+FILEVERSION LIBAVFILTER_VERSION_MAJOR, LIBAVFILTER_VERSION_MINOR, LIBAVFILTER_VERSION_MICRO, 0
+PRODUCTVERSION LIBAVFILTER_VERSION_MAJOR, LIBAVFILTER_VERSION_MINOR, LIBAVFILTER_VERSION_MICRO, 0
+FILEFLAGSMASK VS_FFI_FILEFLAGSMASK
+FILEOS VOS_NT_WINDOWS32
+FILETYPE VFT_DLL
+{
+ BLOCK "StringFileInfo"
+ {
+ BLOCK "040904B0"
+ {
+ VALUE "CompanyName", "FFmpeg Project"
+ VALUE "FileDescription", "FFmpeg audio/video filtering library"
+ VALUE "FileVersion", AV_STRINGIFY(LIBAVFILTER_VERSION)
+ VALUE "InternalName", "libavfilter"
+ VALUE "LegalCopyright", "Copyright (C) 2000-" AV_STRINGIFY(CONFIG_THIS_YEAR) " FFmpeg Project"
+ VALUE "OriginalFilename", "avfilter" BUILDSUF "-" AV_STRINGIFY(LIBAVFILTER_VERSION_MAJOR) SLIBSUF
+ VALUE "ProductName", "FFmpeg"
+ VALUE "ProductVersion", FFMPEG_VERSION
+ }
+ }
+
+ BLOCK "VarFileInfo"
+ {
+ VALUE "Translation", 0x0409, 0x04B0
+ }
+}
diff --git a/libavfilter/bbox.c b/libavfilter/bbox.c
new file mode 100644
index 0000000000..be9b2e6b73
--- /dev/null
+++ b/libavfilter/bbox.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2005 Robert Edele <yartrebo@earthlink.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "bbox.h"
+
+int ff_calculate_bounding_box(FFBoundingBox *bbox,
+ const uint8_t *data, int linesize, int w, int h,
+ int min_val)
+{
+ int x, y;
+ int start_x;
+ int start_y;
+ int end_x;
+ int end_y;
+ const uint8_t *line;
+
+ /* left bound */
+ for (start_x = 0; start_x < w; start_x++)
+ for (y = 0; y < h; y++)
+ if ((data[y * linesize + start_x] > min_val))
+ goto outl;
+outl:
+ if (start_x == w) /* no points found */
+ return 0;
+
+ /* right bound */
+ for (end_x = w - 1; end_x >= start_x; end_x--)
+ for (y = 0; y < h; y++)
+ if ((data[y * linesize + end_x] > min_val))
+ goto outr;
+outr:
+
+ /* top bound */
+ line = data;
+ for (start_y = 0; start_y < h; start_y++) {
+ for (x = 0; x < w; x++)
+ if (line[x] > min_val)
+ goto outt;
+ line += linesize;
+ }
+outt:
+
+ /* bottom bound */
+ line = data + (h-1)*linesize;
+ for (end_y = h - 1; end_y >= start_y; end_y--) {
+ for (x = 0; x < w; x++)
+ if (line[x] > min_val)
+ goto outb;
+ line -= linesize;
+ }
+outb:
+
+ bbox->x1 = start_x;
+ bbox->y1 = start_y;
+ bbox->x2 = end_x;
+ bbox->y2 = end_y;
+ return 1;
+}
diff --git a/libavfilter/bbox.h b/libavfilter/bbox.h
new file mode 100644
index 0000000000..eb73154c14
--- /dev/null
+++ b/libavfilter/bbox.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2005 Robert Edele <yartrebo@earthlink.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_BBOX_H
+#define AVFILTER_BBOX_H
+
+#include <stdint.h>
+
+typedef struct {
+ int x1, x2, y1, y2;
+} FFBoundingBox;
+
+/**
+ * Calculate the smallest rectangle that will encompass the
+ * region with values > min_val.
+ *
+ * @param bbox bounding box structure which is updated with the found values.
+ * If no pixels could be found with value > min_val, the
+ * structure is not modified.
+ * @return 1 in case at least one pixel with value > min_val was found,
+ * 0 otherwise
+ */
+int ff_calculate_bounding_box(FFBoundingBox *bbox,
+ const uint8_t *data, int linesize,
+ int w, int h, int min_val);
+
+#endif /* AVFILTER_BBOX_H */
diff --git a/libavfilter/blend.h b/libavfilter/blend.h
new file mode 100644
index 0000000000..54c4fdb6d1
--- /dev/null
+++ b/libavfilter/blend.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_BLEND_H
+#define AVFILTER_BLEND_H
+
+#include "libavutil/eval.h"
+#include "avfilter.h"
+
+enum BlendMode {
+ BLEND_UNSET = -1,
+ BLEND_NORMAL,
+ BLEND_ADDITION,
+ BLEND_AND,
+ BLEND_AVERAGE,
+ BLEND_BURN,
+ BLEND_DARKEN,
+ BLEND_DIFFERENCE,
+ BLEND_DIFFERENCE128,
+ BLEND_DIVIDE,
+ BLEND_DODGE,
+ BLEND_EXCLUSION,
+ BLEND_HARDLIGHT,
+ BLEND_LIGHTEN,
+ BLEND_MULTIPLY,
+ BLEND_NEGATION,
+ BLEND_OR,
+ BLEND_OVERLAY,
+ BLEND_PHOENIX,
+ BLEND_PINLIGHT,
+ BLEND_REFLECT,
+ BLEND_SCREEN,
+ BLEND_SOFTLIGHT,
+ BLEND_SUBTRACT,
+ BLEND_VIVIDLIGHT,
+ BLEND_XOR,
+ BLEND_HARDMIX,
+ BLEND_LINEARLIGHT,
+ BLEND_GLOW,
+ BLEND_ADDITION128,
+ BLEND_MULTIPLY128,
+ BLEND_HEAT,
+ BLEND_FREEZE,
+ BLEND_NB
+};
+
+typedef struct FilterParams {
+ enum BlendMode mode;
+ double opacity;
+ AVExpr *e;
+ char *expr_str;
+ void (*blend)(const uint8_t *top, ptrdiff_t top_linesize,
+ const uint8_t *bottom, ptrdiff_t bottom_linesize,
+ uint8_t *dst, ptrdiff_t dst_linesize,
+ ptrdiff_t width, ptrdiff_t height,
+ struct FilterParams *param, double *values, int starty);
+} FilterParams;
+
+void ff_blend_init(FilterParams *param, int is_16bit);
+void ff_blend_init_x86(FilterParams *param, int is_16bit);
+
+#endif /* AVFILTER_BLEND_H */
diff --git a/libavfilter/bufferqueue.h b/libavfilter/bufferqueue.h
new file mode 100644
index 0000000000..f5e5df2d72
--- /dev/null
+++ b/libavfilter/bufferqueue.h
@@ -0,0 +1,121 @@
+/*
+ * Generic buffer queue
+ * Copyright (c) 2012 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_BUFFERQUEUE_H
+#define AVFILTER_BUFFERQUEUE_H
+
+/**
+ * FFBufQueue: simple AVFrame queue API
+ *
+ * Note: this API is not thread-safe. Concurrent access to the same queue
+ * must be protected by a mutex or any synchronization mechanism.
+ */
+
+/**
+ * Maximum size of the queue.
+ *
+ * This value can be overridden by definying it before including this
+ * header.
+ * Powers of 2 are recommended.
+ */
+#ifndef FF_BUFQUEUE_SIZE
+#define FF_BUFQUEUE_SIZE 64
+#endif
+
+#include "avfilter.h"
+#include "libavutil/avassert.h"
+
+/**
+ * Structure holding the queue
+ */
+struct FFBufQueue {
+ AVFrame *queue[FF_BUFQUEUE_SIZE];
+ unsigned short head;
+ unsigned short available; /**< number of available buffers */
+};
+
+#define BUCKET(i) queue->queue[(queue->head + (i)) % FF_BUFQUEUE_SIZE]
+
+/**
+ * Test if a buffer queue is full.
+ */
+static inline int ff_bufqueue_is_full(struct FFBufQueue *queue)
+{
+ return queue->available == FF_BUFQUEUE_SIZE;
+}
+
+/**
+ * Add a buffer to the queue.
+ *
+ * If the queue is already full, then the current last buffer is dropped
+ * (and unrefed) with a warning before adding the new buffer.
+ */
+static inline void ff_bufqueue_add(void *log, struct FFBufQueue *queue,
+ AVFrame *buf)
+{
+ if (ff_bufqueue_is_full(queue)) {
+ av_log(log, AV_LOG_WARNING, "Buffer queue overflow, dropping.\n");
+ av_frame_free(&BUCKET(--queue->available));
+ }
+ BUCKET(queue->available++) = buf;
+}
+
+/**
+ * Get a buffer from the queue without altering it.
+ *
+ * Buffer with index 0 is the first buffer in the queue.
+ * Return NULL if the queue has not enough buffers.
+ */
+static inline AVFrame *ff_bufqueue_peek(struct FFBufQueue *queue,
+ unsigned index)
+{
+ return index < queue->available ? BUCKET(index) : NULL;
+}
+
+/**
+ * Get the first buffer from the queue and remove it.
+ *
+ * Do not use on an empty queue.
+ */
+static inline AVFrame *ff_bufqueue_get(struct FFBufQueue *queue)
+{
+ AVFrame *ret = queue->queue[queue->head];
+ av_assert0(queue->available);
+ queue->available--;
+ queue->queue[queue->head] = NULL;
+ queue->head = (queue->head + 1) % FF_BUFQUEUE_SIZE;
+ return ret;
+}
+
+/**
+ * Unref and remove all buffers from the queue.
+ */
+static inline void ff_bufqueue_discard_all(struct FFBufQueue *queue)
+{
+ while (queue->available) {
+ AVFrame *buf = ff_bufqueue_get(queue);
+ av_frame_free(&buf);
+ }
+}
+
+#undef BUCKET
+
+#endif /* AVFILTER_BUFFERQUEUE_H */
diff --git a/libavfilter/buffersink.c b/libavfilter/buffersink.c
index 3b4d285ffd..0f87b5439a 100644
--- a/libavfilter/buffersink.c
+++ b/libavfilter/buffersink.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2011 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -23,126 +23,321 @@
* buffer sink
*/
-#include "libavutil/audio_fifo.h"
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/internal.h"
-#include "libavutil/mathematics.h"
+#include "libavutil/opt.h"
+
+#define FF_INTERNAL_FIELDS 1
+#include "framequeue.h"
#include "audio.h"
#include "avfilter.h"
#include "buffersink.h"
+#include "filters.h"
#include "internal.h"
typedef struct BufferSinkContext {
- AVFrame *cur_frame; ///< last frame delivered on the sink
- AVAudioFifo *audio_fifo; ///< FIFO for audio samples
- int64_t next_pts; ///< interpolating audio pts
+ const AVClass *class;
+ unsigned warning_limit;
+
+ /* only used for video */
+ enum AVPixelFormat *pixel_fmts; ///< list of accepted pixel formats, must be terminated with -1
+ int pixel_fmts_size;
+
+ /* only used for audio */
+ enum AVSampleFormat *sample_fmts; ///< list of accepted sample formats, terminated by AV_SAMPLE_FMT_NONE
+ int sample_fmts_size;
+ int64_t *channel_layouts; ///< list of accepted channel layouts, terminated by -1
+ int channel_layouts_size;
+ int *channel_counts; ///< list of accepted channel counts, terminated by -1
+ int channel_counts_size;
+ int all_channel_counts;
+ int *sample_rates; ///< list of accepted sample rates, terminated by -1
+ int sample_rates_size;
+
+ AVFrame *peeked_frame;
} BufferSinkContext;
-static av_cold void uninit(AVFilterContext *ctx)
+#define NB_ITEMS(list) (list ## _size / sizeof(*list))
+#define FIFO_INIT_SIZE 8
+#define FIFO_INIT_ELEMENT_SIZE sizeof(void *)
+
+int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
{
- BufferSinkContext *sink = ctx->priv;
+ return av_buffersink_get_frame_flags(ctx, frame, 0);
+}
- if (sink->audio_fifo)
- av_audio_fifo_free(sink->audio_fifo);
+static int return_or_keep_frame(BufferSinkContext *buf, AVFrame *out, AVFrame *in, int flags)
+{
+ if ((flags & AV_BUFFERSINK_FLAG_PEEK)) {
+ buf->peeked_frame = in;
+ return out ? av_frame_ref(out, in) : 0;
+ } else {
+ av_assert1(out);
+ buf->peeked_frame = NULL;
+ av_frame_move_ref(out, in);
+ av_frame_free(&in);
+ return 0;
+ }
}
-static int filter_frame(AVFilterLink *link, AVFrame *frame)
+static int get_frame_internal(AVFilterContext *ctx, AVFrame *frame, int flags, int samples)
{
- BufferSinkContext *s = link->dst->priv;
+ BufferSinkContext *buf = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ int status, ret;
+ AVFrame *cur_frame;
+ int64_t pts;
+
+ if (buf->peeked_frame)
+ return return_or_keep_frame(buf, frame, buf->peeked_frame, flags);
+
+ while (1) {
+ ret = samples ? ff_inlink_consume_samples(inlink, samples, samples, &cur_frame) :
+ ff_inlink_consume_frame(inlink, &cur_frame);
+ if (ret < 0) {
+ return ret;
+ } else if (ret) {
+ /* TODO return the frame instead of copying it */
+ return return_or_keep_frame(buf, frame, cur_frame, flags);
+ } else if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
+ return status;
+ } else if ((flags & AV_BUFFERSINK_FLAG_NO_REQUEST)) {
+ return AVERROR(EAGAIN);
+ } else if (inlink->frame_wanted_out) {
+ ret = ff_filter_graph_run_once(ctx->graph);
+ if (ret < 0)
+ return ret;
+ } else {
+ ff_inlink_request_frame(inlink);
+ }
+ }
+}
- av_assert0(!s->cur_frame);
- s->cur_frame = frame;
+int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
+{
+ return get_frame_internal(ctx, frame, flags, ctx->inputs[0]->min_samples);
+}
- return 0;
+int attribute_align_arg av_buffersink_get_samples(AVFilterContext *ctx,
+ AVFrame *frame, int nb_samples)
+{
+ return get_frame_internal(ctx, frame, 0, nb_samples);
}
-int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx,
- AVFrame *frame)
+AVBufferSinkParams *av_buffersink_params_alloc(void)
{
- BufferSinkContext *s = ctx->priv;
- AVFilterLink *link = ctx->inputs[0];
- int ret;
+ static const int pixel_fmts[] = { AV_PIX_FMT_NONE };
+ AVBufferSinkParams *params = av_malloc(sizeof(AVBufferSinkParams));
+ if (!params)
+ return NULL;
- if ((ret = ff_request_frame(link)) < 0)
- return ret;
+ params->pixel_fmts = pixel_fmts;
+ return params;
+}
- if (!s->cur_frame)
- return AVERROR(EINVAL);
+AVABufferSinkParams *av_abuffersink_params_alloc(void)
+{
+ AVABufferSinkParams *params = av_mallocz(sizeof(AVABufferSinkParams));
+
+ if (!params)
+ return NULL;
+ return params;
+}
+
+static av_cold int common_init(AVFilterContext *ctx)
+{
+ BufferSinkContext *buf = ctx->priv;
+
+ buf->warning_limit = 100;
+ return 0;
+}
- av_frame_move_ref(frame, s->cur_frame);
- av_frame_free(&s->cur_frame);
+static int activate(AVFilterContext *ctx)
+{
+ BufferSinkContext *buf = ctx->priv;
+
+ if (buf->warning_limit &&
+ ff_framequeue_queued_frames(&ctx->inputs[0]->fifo) >= buf->warning_limit) {
+ av_log(ctx, AV_LOG_WARNING,
+ "%d buffers queued in %s, something may be wrong.\n",
+ buf->warning_limit,
+ (char *)av_x_if_null(ctx->name, ctx->filter->name));
+ buf->warning_limit *= 10;
+ }
+ /* The frame is queued, the rest is up to get_frame_internal */
return 0;
}
-static int read_from_fifo(AVFilterContext *ctx, AVFrame *frame,
- int nb_samples)
+void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
+{
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ inlink->min_samples = inlink->max_samples =
+ inlink->partial_buf_size = frame_size;
+}
+
+#define MAKE_AVFILTERLINK_ACCESSOR(type, field) \
+type av_buffersink_get_##field(const AVFilterContext *ctx) { \
+ av_assert0(ctx->filter->activate == activate); \
+ return ctx->inputs[0]->field; \
+}
+
+MAKE_AVFILTERLINK_ACCESSOR(enum AVMediaType , type )
+MAKE_AVFILTERLINK_ACCESSOR(AVRational , time_base )
+MAKE_AVFILTERLINK_ACCESSOR(int , format )
+
+MAKE_AVFILTERLINK_ACCESSOR(AVRational , frame_rate )
+MAKE_AVFILTERLINK_ACCESSOR(int , w )
+MAKE_AVFILTERLINK_ACCESSOR(int , h )
+MAKE_AVFILTERLINK_ACCESSOR(AVRational , sample_aspect_ratio)
+
+MAKE_AVFILTERLINK_ACCESSOR(int , channels )
+MAKE_AVFILTERLINK_ACCESSOR(uint64_t , channel_layout )
+MAKE_AVFILTERLINK_ACCESSOR(int , sample_rate )
+
+MAKE_AVFILTERLINK_ACCESSOR(AVBufferRef * , hw_frames_ctx )
+
+static av_cold int vsink_init(AVFilterContext *ctx, void *opaque)
{
- BufferSinkContext *s = ctx->priv;
- AVFilterLink *link = ctx->inputs[0];
- AVFrame *tmp;
+ BufferSinkContext *buf = ctx->priv;
+ AVBufferSinkParams *params = opaque;
+ int ret;
- if (!(tmp = ff_get_audio_buffer(link, nb_samples)))
- return AVERROR(ENOMEM);
- av_audio_fifo_read(s->audio_fifo, (void**)tmp->extended_data, nb_samples);
+ if (params) {
+ if ((ret = av_opt_set_int_list(buf, "pix_fmts", params->pixel_fmts, AV_PIX_FMT_NONE, 0)) < 0)
+ return ret;
+ }
- tmp->pts = s->next_pts;
- s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate},
- link->time_base);
+ return common_init(ctx);
+}
- av_frame_move_ref(frame, tmp);
- av_frame_free(&tmp);
+#define CHECK_LIST_SIZE(field) \
+ if (buf->field ## _size % sizeof(*buf->field)) { \
+ av_log(ctx, AV_LOG_ERROR, "Invalid size for " #field ": %d, " \
+ "should be multiple of %d\n", \
+ buf->field ## _size, (int)sizeof(*buf->field)); \
+ return AVERROR(EINVAL); \
+ }
+static int vsink_query_formats(AVFilterContext *ctx)
+{
+ BufferSinkContext *buf = ctx->priv;
+ AVFilterFormats *formats = NULL;
+ unsigned i;
+ int ret;
+
+ CHECK_LIST_SIZE(pixel_fmts)
+ if (buf->pixel_fmts_size) {
+ for (i = 0; i < NB_ITEMS(buf->pixel_fmts); i++)
+ if ((ret = ff_add_format(&formats, buf->pixel_fmts[i])) < 0)
+ return ret;
+ if ((ret = ff_set_common_formats(ctx, formats)) < 0)
+ return ret;
+ } else {
+ if ((ret = ff_default_query_formats(ctx)) < 0)
+ return ret;
+ }
return 0;
}
-int attribute_align_arg av_buffersink_get_samples(AVFilterContext *ctx,
- AVFrame *frame, int nb_samples)
+static av_cold int asink_init(AVFilterContext *ctx, void *opaque)
{
- BufferSinkContext *s = ctx->priv;
- AVFilterLink *link = ctx->inputs[0];
- int ret = 0;
-
- if (!s->audio_fifo) {
- int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
- if (!(s->audio_fifo = av_audio_fifo_alloc(link->format, nb_channels, nb_samples)))
- return AVERROR(ENOMEM);
+ BufferSinkContext *buf = ctx->priv;
+ AVABufferSinkParams *params = opaque;
+ int ret;
+
+ if (params) {
+ if ((ret = av_opt_set_int_list(buf, "sample_fmts", params->sample_fmts, AV_SAMPLE_FMT_NONE, 0)) < 0 ||
+ (ret = av_opt_set_int_list(buf, "sample_rates", params->sample_rates, -1, 0)) < 0 ||
+ (ret = av_opt_set_int_list(buf, "channel_layouts", params->channel_layouts, -1, 0)) < 0 ||
+ (ret = av_opt_set_int_list(buf, "channel_counts", params->channel_counts, -1, 0)) < 0 ||
+ (ret = av_opt_set_int(buf, "all_channel_counts", params->all_channel_counts, 0)) < 0)
+ return ret;
}
+ return common_init(ctx);
+}
- while (ret >= 0) {
- if (av_audio_fifo_size(s->audio_fifo) >= nb_samples)
- return read_from_fifo(ctx, frame, nb_samples);
+static int asink_query_formats(AVFilterContext *ctx)
+{
+ BufferSinkContext *buf = ctx->priv;
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ unsigned i;
+ int ret;
- ret = ff_request_frame(link);
- if (ret == AVERROR_EOF && av_audio_fifo_size(s->audio_fifo))
- return read_from_fifo(ctx, frame, av_audio_fifo_size(s->audio_fifo));
- else if (ret < 0)
+ CHECK_LIST_SIZE(sample_fmts)
+ CHECK_LIST_SIZE(sample_rates)
+ CHECK_LIST_SIZE(channel_layouts)
+ CHECK_LIST_SIZE(channel_counts)
+
+ if (buf->sample_fmts_size) {
+ for (i = 0; i < NB_ITEMS(buf->sample_fmts); i++)
+ if ((ret = ff_add_format(&formats, buf->sample_fmts[i])) < 0)
+ return ret;
+ if ((ret = ff_set_common_formats(ctx, formats)) < 0)
return ret;
+ }
- if (s->cur_frame->pts != AV_NOPTS_VALUE) {
- s->next_pts = s->cur_frame->pts -
- av_rescale_q(av_audio_fifo_size(s->audio_fifo),
- (AVRational){ 1, link->sample_rate },
- link->time_base);
+ if (buf->channel_layouts_size || buf->channel_counts_size ||
+ buf->all_channel_counts) {
+ for (i = 0; i < NB_ITEMS(buf->channel_layouts); i++)
+ if ((ret = ff_add_channel_layout(&layouts, buf->channel_layouts[i])) < 0)
+ return ret;
+ for (i = 0; i < NB_ITEMS(buf->channel_counts); i++)
+ if ((ret = ff_add_channel_layout(&layouts, FF_COUNT2LAYOUT(buf->channel_counts[i]))) < 0)
+ return ret;
+ if (buf->all_channel_counts) {
+ if (layouts)
+ av_log(ctx, AV_LOG_WARNING,
+ "Conflicting all_channel_counts and list in options\n");
+ else if (!(layouts = ff_all_channel_counts()))
+ return AVERROR(ENOMEM);
}
+ if ((ret = ff_set_common_channel_layouts(ctx, layouts)) < 0)
+ return ret;
+ }
- ret = av_audio_fifo_write(s->audio_fifo, (void**)s->cur_frame->extended_data,
- s->cur_frame->nb_samples);
- av_frame_free(&s->cur_frame);
+ if (buf->sample_rates_size) {
+ formats = NULL;
+ for (i = 0; i < NB_ITEMS(buf->sample_rates); i++)
+ if ((ret = ff_add_format(&formats, buf->sample_rates[i])) < 0)
+ return ret;
+ if ((ret = ff_set_common_samplerates(ctx, formats)) < 0)
+ return ret;
}
- return ret;
+ return 0;
}
+#define OFFSET(x) offsetof(BufferSinkContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption buffersink_options[] = {
+ { "pix_fmts", "set the supported pixel formats", OFFSET(pixel_fmts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
+ { NULL },
+};
+#undef FLAGS
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption abuffersink_options[] = {
+ { "sample_fmts", "set the supported sample formats", OFFSET(sample_fmts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
+ { "sample_rates", "set the supported sample rates", OFFSET(sample_rates), AV_OPT_TYPE_BINARY, .flags = FLAGS },
+ { "channel_layouts", "set the supported channel layouts", OFFSET(channel_layouts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
+ { "channel_counts", "set the supported channel counts", OFFSET(channel_counts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
+ { "all_channel_counts", "accept all channel counts", OFFSET(all_channel_counts), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
+ { NULL },
+};
+#undef FLAGS
+
+AVFILTER_DEFINE_CLASS(buffersink);
+AVFILTER_DEFINE_CLASS(abuffersink);
+
static const AVFilterPad avfilter_vsink_buffer_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame,
- .needs_fifo = 1
},
{ NULL }
};
@@ -151,8 +346,11 @@ AVFilter ff_vsink_buffer = {
.name = "buffersink",
.description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
.priv_size = sizeof(BufferSinkContext),
- .uninit = uninit,
+ .priv_class = &buffersink_class,
+ .init_opaque = vsink_init,
+ .query_formats = vsink_query_formats,
+ .activate = activate,
.inputs = avfilter_vsink_buffer_inputs,
.outputs = NULL,
};
@@ -161,8 +359,6 @@ static const AVFilterPad avfilter_asink_abuffer_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
- .filter_frame = filter_frame,
- .needs_fifo = 1
},
{ NULL }
};
@@ -170,9 +366,12 @@ static const AVFilterPad avfilter_asink_abuffer_inputs[] = {
AVFilter ff_asink_abuffer = {
.name = "abuffersink",
.description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
+ .priv_class = &abuffersink_class,
.priv_size = sizeof(BufferSinkContext),
- .uninit = uninit,
+ .init_opaque = asink_init,
+ .query_formats = asink_query_formats,
+ .activate = activate,
.inputs = avfilter_asink_abuffer_inputs,
.outputs = NULL,
};
diff --git a/libavfilter/buffersink.h b/libavfilter/buffersink.h
index 76f5a29806..f51fa7c1dd 100644
--- a/libavfilter/buffersink.h
+++ b/libavfilter/buffersink.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -22,7 +22,7 @@
/**
* @file
* @ingroup lavfi_buffersink
- * memory buffer sink API
+ * memory buffer sink API for audio and video
*/
#include "avfilter.h"
@@ -36,6 +36,96 @@
/**
* Get a frame with filtered data from sink and put it in frame.
*
+ * @param ctx pointer to a buffersink or abuffersink filter context.
+ * @param frame pointer to an allocated frame that will be filled with data.
+ * The data must be freed using av_frame_unref() / av_frame_free()
+ * @param flags a combination of AV_BUFFERSINK_FLAG_* flags
+ *
+ * @return >= 0 in for success, a negative AVERROR code for failure.
+ */
+int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags);
+
+/**
+ * Tell av_buffersink_get_buffer_ref() to read video/samples buffer
+ * reference, but not remove it from the buffer. This is useful if you
+ * need only to read a video/samples buffer, without to fetch it.
+ */
+#define AV_BUFFERSINK_FLAG_PEEK 1
+
+/**
+ * Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
+ * If a frame is already buffered, it is read (and removed from the buffer),
+ * but if no frame is present, return AVERROR(EAGAIN).
+ */
+#define AV_BUFFERSINK_FLAG_NO_REQUEST 2
+
+/**
+ * Struct to use for initializing a buffersink context.
+ */
+typedef struct {
+ const enum AVPixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by AV_PIX_FMT_NONE
+} AVBufferSinkParams;
+
+/**
+ * Create an AVBufferSinkParams structure.
+ *
+ * Must be freed with av_free().
+ */
+AVBufferSinkParams *av_buffersink_params_alloc(void);
+
+/**
+ * Struct to use for initializing an abuffersink context.
+ */
+typedef struct {
+ const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE
+ const int64_t *channel_layouts; ///< list of allowed channel layouts, terminated by -1
+ const int *channel_counts; ///< list of allowed channel counts, terminated by -1
+ int all_channel_counts; ///< if not 0, accept any channel count or layout
+ int *sample_rates; ///< list of allowed sample rates, terminated by -1
+} AVABufferSinkParams;
+
+/**
+ * Create an AVABufferSinkParams structure.
+ *
+ * Must be freed with av_free().
+ */
+AVABufferSinkParams *av_abuffersink_params_alloc(void);
+
+/**
+ * Set the frame size for an audio buffer sink.
+ *
+ * All calls to av_buffersink_get_buffer_ref will return a buffer with
+ * exactly the specified number of samples, or AVERROR(EAGAIN) if there is
+ * not enough. The last buffer at EOF will be padded with 0.
+ */
+void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size);
+
+/**
+ * @defgroup lavfi_buffersink_accessors Buffer sink accessors
+ * Get the properties of the stream
+ * @{
+ */
+
+enum AVMediaType av_buffersink_get_type (const AVFilterContext *ctx);
+AVRational av_buffersink_get_time_base (const AVFilterContext *ctx);
+int av_buffersink_get_format (const AVFilterContext *ctx);
+
+AVRational av_buffersink_get_frame_rate (const AVFilterContext *ctx);
+int av_buffersink_get_w (const AVFilterContext *ctx);
+int av_buffersink_get_h (const AVFilterContext *ctx);
+AVRational av_buffersink_get_sample_aspect_ratio (const AVFilterContext *ctx);
+
+int av_buffersink_get_channels (const AVFilterContext *ctx);
+uint64_t av_buffersink_get_channel_layout (const AVFilterContext *ctx);
+int av_buffersink_get_sample_rate (const AVFilterContext *ctx);
+
+AVBufferRef * av_buffersink_get_hw_frames_ctx (const AVFilterContext *ctx);
+
+/** @} */
+
+/**
+ * Get a frame with filtered data from sink and put it in frame.
+ *
* @param ctx pointer to a context of a buffersink or abuffersink AVFilter.
* @param frame pointer to an allocated frame that will be filled with data.
* The data must be freed using av_frame_unref() / av_frame_free()
diff --git a/libavfilter/buffersrc.c b/libavfilter/buffersrc.c
index f553508cf1..3f80d5f413 100644
--- a/libavfilter/buffersrc.c
+++ b/libavfilter/buffersrc.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2008 Vitor Sessak
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -45,19 +45,21 @@ typedef struct BufferSourceContext {
AVFifoBuffer *fifo;
AVRational time_base; ///< time_base to set in the output link
AVRational frame_rate; ///< frame_rate to set in the output link
+ unsigned nb_failed_requests;
+ unsigned warning_limit;
/* video only */
- int h, w;
+ int w, h;
enum AVPixelFormat pix_fmt;
- char *pix_fmt_str;
AVRational pixel_aspect;
+ char *sws_param;
AVBufferRef *hw_frames_ctx;
/* audio only */
int sample_rate;
enum AVSampleFormat sample_fmt;
- char *sample_fmt_str;
+ int channels;
uint64_t channel_layout;
char *channel_layout_str;
@@ -67,13 +69,12 @@ typedef struct BufferSourceContext {
#define CHECK_VIDEO_PARAM_CHANGE(s, c, width, height, format)\
if (c->w != width || c->h != height || c->pix_fmt != format) {\
- av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
- return AVERROR(EINVAL);\
+ av_log(s, AV_LOG_INFO, "Changing frame properties on the fly is not supported by all filters.\n");\
}
-#define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, ch_layout, format)\
+#define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, ch_layout, ch_count, format)\
if (c->sample_fmt != format || c->sample_rate != srate ||\
- c->channel_layout != ch_layout) {\
+ c->channel_layout != ch_layout || c->channels != ch_count) {\
av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
return AVERROR(EINVAL);\
}
@@ -136,47 +137,99 @@ int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *par
int attribute_align_arg av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame)
{
- AVFrame *copy;
+ return av_buffersrc_add_frame_flags(ctx, (AVFrame *)frame,
+ AV_BUFFERSRC_FLAG_KEEP_REF);
+}
+
+int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
+{
+ return av_buffersrc_add_frame_flags(ctx, frame, 0);
+}
+
+static int av_buffersrc_add_frame_internal(AVFilterContext *ctx,
+ AVFrame *frame, int flags);
+
+int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
+{
+ AVFrame *copy = NULL;
int ret = 0;
+ if (frame && frame->channel_layout &&
+ av_get_channel_layout_nb_channels(frame->channel_layout) != av_frame_get_channels(frame)) {
+ av_log(ctx, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (!(flags & AV_BUFFERSRC_FLAG_KEEP_REF) || !frame)
+ return av_buffersrc_add_frame_internal(ctx, frame, flags);
+
if (!(copy = av_frame_alloc()))
return AVERROR(ENOMEM);
ret = av_frame_ref(copy, frame);
if (ret >= 0)
- ret = av_buffersrc_add_frame(ctx, copy);
+ ret = av_buffersrc_add_frame_internal(ctx, copy, flags);
av_frame_free(&copy);
return ret;
}
-int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx,
- AVFrame *frame)
+static int push_frame(AVFilterGraph *graph)
+{
+ int ret;
+
+ while (1) {
+ ret = ff_filter_graph_run_once(graph);
+ if (ret == AVERROR(EAGAIN))
+ break;
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int av_buffersrc_add_frame_internal(AVFilterContext *ctx,
+ AVFrame *frame, int flags)
{
BufferSourceContext *s = ctx->priv;
AVFrame *copy;
int refcounted, ret;
+ s->nb_failed_requests = 0;
+
if (!frame) {
s->eof = 1;
+ ff_avfilter_link_set_in_status(ctx->outputs[0], AVERROR_EOF, AV_NOPTS_VALUE);
+ if ((flags & AV_BUFFERSRC_FLAG_PUSH)) {
+ ret = push_frame(ctx->graph);
+ if (ret < 0)
+ return ret;
+ }
return 0;
} else if (s->eof)
return AVERROR(EINVAL);
refcounted = !!frame->buf[0];
+ if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {
+
switch (ctx->outputs[0]->type) {
case AVMEDIA_TYPE_VIDEO:
CHECK_VIDEO_PARAM_CHANGE(ctx, s, frame->width, frame->height,
frame->format);
break;
case AVMEDIA_TYPE_AUDIO:
+ /* For layouts unknown on input but known on link after negotiation. */
+ if (!frame->channel_layout)
+ frame->channel_layout = s->channel_layout;
CHECK_AUDIO_PARAM_CHANGE(ctx, s, frame->sample_rate, frame->channel_layout,
- frame->format);
+ av_frame_get_channels(frame), frame->format);
break;
default:
return AVERROR(EINVAL);
}
+ }
+
if (!av_fifo_space(s->fifo) &&
(ret = av_fifo_realloc2(s->fifo, av_fifo_size(s->fifo) +
sizeof(copy))) < 0)
@@ -202,6 +255,15 @@ int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx,
return ret;
}
+ if ((ret = ctx->output_pads[0].request_frame(ctx->outputs[0])) < 0)
+ return ret;
+
+ if ((flags & AV_BUFFERSRC_FLAG_PUSH)) {
+ ret = push_frame(ctx->graph);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -209,41 +271,37 @@ static av_cold int init_video(AVFilterContext *ctx)
{
BufferSourceContext *c = ctx->priv;
- if (!(c->pix_fmt_str || c->got_format_from_params) || !c->w || !c->h ||
+ if (!(c->pix_fmt != AV_PIX_FMT_NONE || c->got_format_from_params) || !c->w || !c->h ||
av_q2d(c->time_base) <= 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid parameters provided.\n");
return AVERROR(EINVAL);
}
- if (c->pix_fmt_str) {
- if ((c->pix_fmt = av_get_pix_fmt(c->pix_fmt_str)) == AV_PIX_FMT_NONE) {
- char *tail;
- c->pix_fmt = strtol(c->pix_fmt_str, &tail, 10);
- if (*tail || c->pix_fmt < 0 || !av_pix_fmt_desc_get(c->pix_fmt)) {
- av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", c->pix_fmt_str);
- return AVERROR(EINVAL);
- }
- }
- }
-
if (!(c->fifo = av_fifo_alloc(sizeof(AVFrame*))))
return AVERROR(ENOMEM);
- av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d sar:%d/%d\n",
+ av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n",
c->w, c->h, av_get_pix_fmt_name(c->pix_fmt),
- c->time_base.num, c->time_base.den,
- c->pixel_aspect.num, c->pixel_aspect.den);
+ c->time_base.num, c->time_base.den, c->frame_rate.num, c->frame_rate.den,
+ c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, ""));
+ c->warning_limit = 100;
return 0;
}
+unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
+{
+ return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
+}
+
#define OFFSET(x) offsetof(BufferSourceContext, x)
-#define A AV_OPT_FLAG_AUDIO_PARAM
-#define V AV_OPT_FLAG_VIDEO_PARAM
+#define A AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
+#define V AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption video_options[] = {
+static const AVOption buffer_options[] = {
{ "width", NULL, OFFSET(w), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
+ { "video_size", NULL, OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, .flags = V },
{ "height", NULL, OFFSET(h), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
- { "pix_fmt", NULL, OFFSET(pix_fmt_str), AV_OPT_TYPE_STRING, .flags = V },
+ { "pix_fmt", NULL, OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, { .i64 = AV_PIX_FMT_NONE }, .min = AV_PIX_FMT_NONE, .max = INT_MAX, .flags = V },
#if FF_API_OLD_FILTER_OPTS
/* those 4 are for compatibility with the old option passing system where each filter
* did its own parsing */
@@ -253,57 +311,61 @@ static const AVOption video_options[] = {
{ "sar_den", "deprecated, do not use", OFFSET(pixel_aspect.den), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
#endif
{ "sar", "sample aspect ratio", OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
+ { "pixel_aspect", "sample aspect ratio", OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
{ "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
{ "frame_rate", NULL, OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
+ { "sws_param", NULL, OFFSET(sws_param), AV_OPT_TYPE_STRING, .flags = V },
{ NULL },
};
-static const AVClass buffer_class = {
- .class_name = "buffer source",
- .item_name = av_default_item_name,
- .option = video_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(buffer);
-static const AVOption audio_options[] = {
+static const AVOption abuffer_options[] = {
{ "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, A },
{ "sample_rate", NULL, OFFSET(sample_rate), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A },
- { "sample_fmt", NULL, OFFSET(sample_fmt_str), AV_OPT_TYPE_STRING, .flags = A },
+ { "sample_fmt", NULL, OFFSET(sample_fmt), AV_OPT_TYPE_SAMPLE_FMT, { .i64 = AV_SAMPLE_FMT_NONE }, .min = AV_SAMPLE_FMT_NONE, .max = INT_MAX, .flags = A },
{ "channel_layout", NULL, OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A },
+ { "channels", NULL, OFFSET(channels), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A },
{ NULL },
};
-static const AVClass abuffer_class = {
- .class_name = "abuffer source",
- .item_name = av_default_item_name,
- .option = audio_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(abuffer);
static av_cold int init_audio(AVFilterContext *ctx)
{
BufferSourceContext *s = ctx->priv;
int ret = 0;
- if (!(s->sample_fmt_str || s->got_format_from_params)) {
- av_log(ctx, AV_LOG_ERROR, "Sample format not provided\n");
- return AVERROR(EINVAL);
- }
- if (s->sample_fmt_str)
- s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str);
-
- if (s->sample_fmt == AV_SAMPLE_FMT_NONE) {
- av_log(ctx, AV_LOG_ERROR, "Invalid sample format %s.\n",
- s->sample_fmt_str);
+ if (!(s->sample_fmt != AV_SAMPLE_FMT_NONE || s->got_format_from_params)) {
+ av_log(ctx, AV_LOG_ERROR, "Sample format was not set or was invalid\n");
return AVERROR(EINVAL);
}
- if (s->channel_layout_str)
- s->channel_layout = av_get_channel_layout(s->channel_layout_str);
+ if (s->channel_layout_str || s->channel_layout) {
+ int n;
- if (!s->channel_layout) {
- av_log(ctx, AV_LOG_ERROR, "Invalid channel layout %s.\n",
- s->channel_layout_str);
+ if (!s->channel_layout) {
+ s->channel_layout = av_get_channel_layout(s->channel_layout_str);
+ if (!s->channel_layout) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid channel layout %s.\n",
+ s->channel_layout_str);
+ return AVERROR(EINVAL);
+ }
+ }
+ n = av_get_channel_layout_nb_channels(s->channel_layout);
+ if (s->channels) {
+ if (n != s->channels) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Mismatching channel count %d and layout '%s' "
+ "(%d channels)\n",
+ s->channels, s->channel_layout_str, n);
+ return AVERROR(EINVAL);
+ }
+ }
+ s->channels = n;
+ } else if (!s->channels) {
+ av_log(ctx, AV_LOG_ERROR, "Neither number of channels nor "
+ "channel layout specified\n");
return AVERROR(EINVAL);
}
@@ -313,9 +375,11 @@ static av_cold int init_audio(AVFilterContext *ctx)
if (!s->time_base.num)
s->time_base = (AVRational){1, s->sample_rate};
- av_log(ctx, AV_LOG_VERBOSE, "tb:%d/%d samplefmt:%s samplerate: %d "
- "ch layout:%s\n", s->time_base.num, s->time_base.den, s->sample_fmt_str,
+ av_log(ctx, AV_LOG_VERBOSE,
+ "tb:%d/%d samplefmt:%s samplerate:%d chlayout:%s\n",
+ s->time_base.num, s->time_base.den, av_get_sample_fmt_name(s->sample_fmt),
s->sample_rate, s->channel_layout_str);
+ s->warning_limit = 100;
return ret;
}
@@ -329,8 +393,7 @@ static av_cold void uninit(AVFilterContext *ctx)
av_frame_free(&frame);
}
av_buffer_unref(&s->hw_frames_ctx);
- av_fifo_free(s->fifo);
- s->fifo = NULL;
+ av_fifo_freep(&s->fifo);
}
static int query_formats(AVFilterContext *ctx)
@@ -339,21 +402,27 @@ static int query_formats(AVFilterContext *ctx)
AVFilterChannelLayouts *channel_layouts = NULL;
AVFilterFormats *formats = NULL;
AVFilterFormats *samplerates = NULL;
+ int ret;
switch (ctx->outputs[0]->type) {
case AVMEDIA_TYPE_VIDEO:
- ff_add_format(&formats, c->pix_fmt);
- ff_set_common_formats(ctx, formats);
+ if ((ret = ff_add_format (&formats, c->pix_fmt)) < 0 ||
+ (ret = ff_set_common_formats (ctx , formats )) < 0)
+ return ret;
break;
case AVMEDIA_TYPE_AUDIO:
- ff_add_format(&formats, c->sample_fmt);
- ff_set_common_formats(ctx, formats);
-
- ff_add_format(&samplerates, c->sample_rate);
- ff_set_common_samplerates(ctx, samplerates);
+ if ((ret = ff_add_format (&formats , c->sample_fmt )) < 0 ||
+ (ret = ff_set_common_formats (ctx , formats )) < 0 ||
+ (ret = ff_add_format (&samplerates, c->sample_rate)) < 0 ||
+ (ret = ff_set_common_samplerates (ctx , samplerates )) < 0)
+ return ret;
- ff_add_channel_layout(&channel_layouts, c->channel_layout);
- ff_set_common_channel_layouts(ctx, channel_layouts);
+ if ((ret = ff_add_channel_layout(&channel_layouts,
+ c->channel_layout ? c->channel_layout :
+ FF_COUNT2LAYOUT(c->channels))) < 0)
+ return ret;
+ if ((ret = ff_set_common_channel_layouts(ctx, channel_layouts)) < 0)
+ return ret;
break;
default:
return AVERROR(EINVAL);
@@ -379,8 +448,8 @@ static int config_props(AVFilterLink *link)
}
break;
case AVMEDIA_TYPE_AUDIO:
- link->channel_layout = c->channel_layout;
- link->sample_rate = c->sample_rate;
+ if (!c->channel_layout)
+ c->channel_layout = link->channel_layout;
break;
default:
return AVERROR(EINVAL);
@@ -395,11 +464,12 @@ static int request_frame(AVFilterLink *link)
{
BufferSourceContext *c = link->src->priv;
AVFrame *frame;
- int ret = 0;
+ int ret;
if (!av_fifo_size(c->fifo)) {
if (c->eof)
return AVERROR_EOF;
+ c->nb_failed_requests++;
return AVERROR(EAGAIN);
}
av_fifo_generic_read(c->fifo, &frame, sizeof(frame), NULL);
@@ -433,7 +503,6 @@ AVFilter ff_vsrc_buffer = {
.name = "buffer",
.description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
.priv_size = sizeof(BufferSourceContext),
- .priv_class = &buffer_class,
.query_formats = query_formats,
.init = init_video,
@@ -441,6 +510,7 @@ AVFilter ff_vsrc_buffer = {
.inputs = NULL,
.outputs = avfilter_vsrc_buffer_outputs,
+ .priv_class = &buffer_class,
};
static const AVFilterPad avfilter_asrc_abuffer_outputs[] = {
@@ -458,7 +528,6 @@ AVFilter ff_asrc_abuffer = {
.name = "abuffer",
.description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
.priv_size = sizeof(BufferSourceContext),
- .priv_class = &abuffer_class,
.query_formats = query_formats,
.init = init_audio,
@@ -466,4 +535,5 @@ AVFilter ff_asrc_abuffer = {
.inputs = NULL,
.outputs = avfilter_asrc_abuffer_outputs,
+ .priv_class = &abuffer_class,
};
diff --git a/libavfilter/buffersrc.h b/libavfilter/buffersrc.h
index dcea3da79b..e42c78196b 100644
--- a/libavfilter/buffersrc.h
+++ b/libavfilter/buffersrc.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -33,6 +33,36 @@
* @{
*/
+enum {
+
+ /**
+ * Do not check for format changes.
+ */
+ AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1,
+
+ /**
+ * Immediately push the frame to the output.
+ */
+ AV_BUFFERSRC_FLAG_PUSH = 4,
+
+ /**
+ * Keep a reference to the frame.
+ * If the frame if reference-counted, create a new reference; otherwise
+ * copy the frame data.
+ */
+ AV_BUFFERSRC_FLAG_KEEP_REF = 8,
+
+};
+
+/**
+ * Get the number of failed requests.
+ *
+ * A failed request is when the request_frame method is called while no
+ * frame is present in the buffer.
+ * The number is reset when a frame is added.
+ */
+unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src);
+
/**
* This structure contains the parameters describing the frames that will be
* passed to this filter.
@@ -116,7 +146,11 @@ int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *par
* copied.
*
* @return 0 on success, a negative AVERROR on error
+ *
+ * This function is equivalent to av_buffersrc_add_frame_flags() with the
+ * AV_BUFFERSRC_FLAG_KEEP_REF flag.
*/
+av_warn_unused_result
int av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame);
/**
@@ -133,10 +167,34 @@ int av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame);
* @note the difference between this function and av_buffersrc_write_frame() is
* that av_buffersrc_write_frame() creates a new reference to the input frame,
* while this function takes ownership of the reference passed to it.
+ *
+ * This function is equivalent to av_buffersrc_add_frame_flags() without the
+ * AV_BUFFERSRC_FLAG_KEEP_REF flag.
*/
+av_warn_unused_result
int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame);
/**
+ * Add a frame to the buffer source.
+ *
+ * By default, if the frame is reference-counted, this function will take
+ * ownership of the reference(s) and reset the frame. This can be controlled
+ * using the flags.
+ *
+ * If this function returns an error, the input frame is not touched.
+ *
+ * @param buffer_src pointer to a buffer source context
+ * @param frame a frame, or NULL to mark EOF
+ * @param flags a combination of AV_BUFFERSRC_FLAG_*
+ * @return >= 0 in case of success, a negative AVERROR code
+ * in case of failure
+ */
+av_warn_unused_result
+int av_buffersrc_add_frame_flags(AVFilterContext *buffer_src,
+ AVFrame *frame, int flags);
+
+
+/**
* @}
*/
diff --git a/libavfilter/bwdif.h b/libavfilter/bwdif.h
new file mode 100644
index 0000000000..8b42c760a0
--- /dev/null
+++ b/libavfilter/bwdif.h
@@ -0,0 +1,72 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_BWDIF_H
+#define AVFILTER_BWDIF_H
+
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+
+enum BWDIFMode {
+ BWDIF_MODE_SEND_FRAME = 0, ///< send 1 frame for each frame
+ BWDIF_MODE_SEND_FIELD = 1, ///< send 1 frame for each field
+};
+
+enum BWDIFParity {
+ BWDIF_PARITY_TFF = 0, ///< top field first
+ BWDIF_PARITY_BFF = 1, ///< bottom field first
+ BWDIF_PARITY_AUTO = -1, ///< auto detection
+};
+
+enum BWDIFDeint {
+ BWDIF_DEINT_ALL = 0, ///< deinterlace all frames
+ BWDIF_DEINT_INTERLACED = 1, ///< only deinterlace frames marked as interlaced
+};
+
+typedef struct BWDIFContext {
+ const AVClass *class;
+
+ int mode; ///< BWDIFMode
+ int parity; ///< BWDIFParity
+ int deint; ///< BWDIFDeint
+
+ int frame_pending;
+
+ AVFrame *cur;
+ AVFrame *next;
+ AVFrame *prev;
+ AVFrame *out;
+
+ void (*filter_intra)(void *dst1, void *cur1, int w, int prefs, int mrefs,
+ int prefs3, int mrefs3, int parity, int clip_max);
+ void (*filter_line)(void *dst, void *prev, void *cur, void *next,
+ int w, int prefs, int mrefs, int prefs2, int mrefs2,
+ int prefs3, int mrefs3, int prefs4, int mrefs4,
+ int parity, int clip_max);
+ void (*filter_edge)(void *dst, void *prev, void *cur, void *next,
+ int w, int prefs, int mrefs, int prefs2, int mrefs2,
+ int parity, int clip_max, int spat);
+
+ const AVPixFmtDescriptor *csp;
+ int inter_field;
+ int eof;
+} BWDIFContext;
+
+void ff_bwdif_init_x86(BWDIFContext *bwdif);
+
+#endif /* AVFILTER_BWDIF_H */
diff --git a/libavfilter/colorspacedsp.c b/libavfilter/colorspacedsp.c
new file mode 100644
index 0000000000..b8ba5c06e4
--- /dev/null
+++ b/libavfilter/colorspacedsp.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "colorspacedsp.h"
+
+/*
+ * SS_W/H stands for "subsampling_w/h"
+ * it's analogous to AVPixFmtDescriptor->log2_chroma_w/h.
+ */
+#define SS_W 0
+#define SS_H 0
+
+#define BIT_DEPTH 8
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 10
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 12
+#include "colorspacedsp_template.c"
+
+#undef SS_W
+#undef SS_H
+
+#define SS_W 1
+#define SS_H 0
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 8
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 10
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 12
+#include "colorspacedsp_template.c"
+
+#undef SS_W
+#undef SS_H
+
+#define SS_W 1
+#define SS_H 1
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 8
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 10
+#include "colorspacedsp_template.c"
+
+#undef BIT_DEPTH
+#define BIT_DEPTH 12
+#include "colorspacedsp_template.c"
+
+static void multiply3x3_c(int16_t *buf[3], ptrdiff_t stride,
+ int w, int h, const int16_t m[3][3][8])
+{
+ int y, x;
+ int16_t *buf0 = buf[0], *buf1 = buf[1], *buf2 = buf[2];
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ int v0 = buf0[x], v1 = buf1[x], v2 = buf2[x];
+
+ buf0[x] = av_clip_int16((m[0][0][0] * v0 + m[0][1][0] * v1 +
+ m[0][2][0] * v2 + 8192) >> 14);
+ buf1[x] = av_clip_int16((m[1][0][0] * v0 + m[1][1][0] * v1 +
+ m[1][2][0] * v2 + 8192) >> 14);
+ buf2[x] = av_clip_int16((m[2][0][0] * v0 + m[2][1][0] * v1 +
+ m[2][2][0] * v2 + 8192) >> 14);
+ }
+
+ buf0 += stride;
+ buf1 += stride;
+ buf2 += stride;
+ }
+}
+
+void ff_colorspacedsp_init(ColorSpaceDSPContext *dsp)
+{
+#define init_yuv2rgb_fn(bit) \
+ dsp->yuv2rgb[BPP_##bit][SS_444] = yuv2rgb_444p##bit##_c; \
+ dsp->yuv2rgb[BPP_##bit][SS_422] = yuv2rgb_422p##bit##_c; \
+ dsp->yuv2rgb[BPP_##bit][SS_420] = yuv2rgb_420p##bit##_c
+
+ init_yuv2rgb_fn( 8);
+ init_yuv2rgb_fn(10);
+ init_yuv2rgb_fn(12);
+
+#define init_rgb2yuv_fn(bit) \
+ dsp->rgb2yuv[BPP_##bit][SS_444] = rgb2yuv_444p##bit##_c; \
+ dsp->rgb2yuv[BPP_##bit][SS_422] = rgb2yuv_422p##bit##_c; \
+ dsp->rgb2yuv[BPP_##bit][SS_420] = rgb2yuv_420p##bit##_c
+
+ init_rgb2yuv_fn( 8);
+ init_rgb2yuv_fn(10);
+ init_rgb2yuv_fn(12);
+
+#define init_rgb2yuv_fsb_fn(bit) \
+ dsp->rgb2yuv_fsb[BPP_##bit][SS_444] = rgb2yuv_fsb_444p##bit##_c; \
+ dsp->rgb2yuv_fsb[BPP_##bit][SS_422] = rgb2yuv_fsb_422p##bit##_c; \
+ dsp->rgb2yuv_fsb[BPP_##bit][SS_420] = rgb2yuv_fsb_420p##bit##_c
+
+ init_rgb2yuv_fsb_fn( 8);
+ init_rgb2yuv_fsb_fn(10);
+ init_rgb2yuv_fsb_fn(12);
+
+#define init_yuv2yuv_fn(idx1, bit1, bit2) \
+ dsp->yuv2yuv[idx1][BPP_##bit2][SS_444] = yuv2yuv_444p##bit1##to##bit2##_c; \
+ dsp->yuv2yuv[idx1][BPP_##bit2][SS_422] = yuv2yuv_422p##bit1##to##bit2##_c; \
+ dsp->yuv2yuv[idx1][BPP_##bit2][SS_420] = yuv2yuv_420p##bit1##to##bit2##_c
+#define init_yuv2yuv_fns(bit1) \
+ init_yuv2yuv_fn(BPP_##bit1, bit1, 8); \
+ init_yuv2yuv_fn(BPP_##bit1, bit1, 10); \
+ init_yuv2yuv_fn(BPP_##bit1, bit1, 12)
+
+ init_yuv2yuv_fns( 8);
+ init_yuv2yuv_fns(10);
+ init_yuv2yuv_fns(12);
+
+ dsp->multiply3x3 = multiply3x3_c;
+
+ if (ARCH_X86)
+ ff_colorspacedsp_x86_init(dsp);
+}
diff --git a/libavfilter/colorspacedsp.h b/libavfilter/colorspacedsp.h
new file mode 100644
index 0000000000..a81e4f0a52
--- /dev/null
+++ b/libavfilter/colorspacedsp.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_COLORSPACEDSP_H
+#define AVFILTER_COLORSPACEDSP_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+typedef void (*yuv2rgb_fn)(int16_t *rgb[3], ptrdiff_t rgb_stride,
+ uint8_t *yuv[3], const ptrdiff_t yuv_stride[3],
+ int w, int h, const int16_t yuv2rgb_coeffs[3][3][8],
+ const int16_t yuv_offset[8]);
+typedef void (*rgb2yuv_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3],
+ int16_t *rgb[3], ptrdiff_t rgb_stride,
+ int w, int h, const int16_t rgb2yuv_coeffs[3][3][8],
+ const int16_t yuv_offset[8]);
+typedef void (*rgb2yuv_fsb_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3],
+ int16_t *rgb[3], ptrdiff_t rgb_stride,
+ int w, int h, const int16_t rgb2yuv_coeffs[3][3][8],
+ const int16_t yuv_offset[8],
+ int *rnd[3][2]);
+typedef void (*yuv2yuv_fn)(uint8_t *yuv_out[3], const ptrdiff_t yuv_out_stride[3],
+ uint8_t *yuv_in[3], const ptrdiff_t yuv_in_stride[3],
+ int w, int h, const int16_t yuv2yuv_coeffs[3][3][8],
+ const int16_t yuv_offset[2][8]);
+
+enum BitDepthIndex {
+ BPP_8,
+ BPP_10,
+ BPP_12,
+ NB_BPP,
+};
+
+enum ChromaSubsamplingIndex {
+ SS_444,
+ SS_422,
+ SS_420,
+ NB_SS,
+};
+
+typedef struct ColorSpaceDSPContext {
+ /* Convert input YUV pixel buffer from a user into an internal, 15bpp array
+ * of intermediate RGB data. */
+ yuv2rgb_fn yuv2rgb[NB_BPP][NB_SS];
+ /* Convert intermediate RGB data (15bpp, internal format) into YUV data and
+ * store into user-provided output buffer */
+ rgb2yuv_fn rgb2yuv[NB_BPP][NB_SS];
+ /* Same as rgb2yuv(), but use floyd-steinberg dithering */
+ rgb2yuv_fsb_fn rgb2yuv_fsb[NB_BPP][NB_SS];
+ /* Direct yuv-to-yuv conversion (input and output are both user-provided
+ * buffers) */
+ yuv2yuv_fn yuv2yuv[NB_BPP /* in */][NB_BPP /* out */][NB_SS];
+
+ /* In-place 3x3 matrix multiplication. Input and output are both 15bpp
+ * (our internal data format) */
+ void (*multiply3x3)(int16_t *data[3], ptrdiff_t stride,
+ int w, int h, const int16_t m[3][3][8]);
+} ColorSpaceDSPContext;
+
+void ff_colorspacedsp_init(ColorSpaceDSPContext *dsp);
+
+/* internal */
+void ff_colorspacedsp_x86_init(ColorSpaceDSPContext *dsp);
+
+#endif /* AVFILTER_COLORSPACEDSP_H */
diff --git a/libavfilter/colorspacedsp_template.c b/libavfilter/colorspacedsp_template.c
new file mode 100644
index 0000000000..53ac0d7224
--- /dev/null
+++ b/libavfilter/colorspacedsp_template.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+
+#undef avg
+#undef ss
+
+#if SS_W == 0
+#define ss 444
+#define avg(a,b,c,d) (a)
+#elif SS_H == 0
+#define ss 422
+#define avg(a,b,c,d) (((a) + (b) + 1) >> 1)
+#else
+#define ss 420
+#define avg(a,b,c,d) (((a) + (b) + (c) + (d) + 2) >> 2)
+#endif
+
+#undef fn
+#undef fn2
+#undef fn3
+#define fn3(a,b,c) a##_##c##p##b##_c
+#define fn2(a,b,c) fn3(a,b,c)
+#define fn(a) fn2(a, BIT_DEPTH, ss)
+
+#undef pixel
+#undef av_clip_pixel
+#if BIT_DEPTH == 8
+#define pixel uint8_t
+#define av_clip_pixel(x) av_clip_uint8(x)
+#else
+#define pixel uint16_t
+#define av_clip_pixel(x) av_clip_uintp2(x, BIT_DEPTH)
+#endif
+
+static void fn(yuv2rgb)(int16_t *rgb[3], ptrdiff_t rgb_stride,
+ uint8_t *_yuv[3], const ptrdiff_t yuv_stride[3],
+ int w, int h, const int16_t yuv2rgb_coeffs[3][3][8],
+ const int16_t yuv_offset[8])
+{
+ pixel **yuv = (pixel **) _yuv;
+ const pixel *yuv0 = yuv[0], *yuv1 = yuv[1], *yuv2 = yuv[2];
+ int16_t *rgb0 = rgb[0], *rgb1 = rgb[1], *rgb2 = rgb[2];
+ int y, x;
+ int cy = yuv2rgb_coeffs[0][0][0];
+ int crv = yuv2rgb_coeffs[0][2][0];
+ int cgu = yuv2rgb_coeffs[1][1][0];
+ int cgv = yuv2rgb_coeffs[1][2][0];
+ int cbu = yuv2rgb_coeffs[2][1][0];
+ const int sh = BIT_DEPTH - 1, rnd = 1 << (sh - 1);
+ const int uv_offset = 128 << (BIT_DEPTH - 8);
+
+ av_assert2(yuv2rgb_coeffs[0][1][0] == 0);
+ av_assert2(yuv2rgb_coeffs[2][2][0] == 0);
+ av_assert2(yuv2rgb_coeffs[1][0][0] == cy && yuv2rgb_coeffs[2][0][0] == cy);
+
+ w = AV_CEIL_RSHIFT(w, SS_W);
+ h = AV_CEIL_RSHIFT(h, SS_H);
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ int y00 = yuv0[x << SS_W] - yuv_offset[0];
+#if SS_W == 1
+ int y01 = yuv0[2 * x + 1] - yuv_offset[0];
+#if SS_H == 1
+ int y10 = yuv0[yuv_stride[0] / sizeof(pixel) + 2 * x] - yuv_offset[0];
+ int y11 = yuv0[yuv_stride[0] / sizeof(pixel) + 2 * x + 1] - yuv_offset[0];
+#endif
+#endif
+ int u = yuv1[x] - uv_offset, v = yuv2[x] - uv_offset;
+
+ rgb0[x << SS_W] = av_clip_int16((y00 * cy + crv * v + rnd) >> sh);
+#if SS_W == 1
+ rgb0[2 * x + 1] = av_clip_int16((y01 * cy + crv * v + rnd) >> sh);
+#if SS_H == 1
+ rgb0[2 * x + rgb_stride] = av_clip_int16((y10 * cy + crv * v + rnd) >> sh);
+ rgb0[2 * x + rgb_stride + 1] = av_clip_int16((y11 * cy + crv * v + rnd) >> sh);
+#endif
+#endif
+
+ rgb1[x << SS_W] = av_clip_int16((y00 * cy + cgu * u +
+ cgv * v + rnd) >> sh);
+#if SS_W == 1
+ rgb1[2 * x + 1] = av_clip_int16((y01 * cy + cgu * u +
+ cgv * v + rnd) >> sh);
+#if SS_H == 1
+ rgb1[2 * x + rgb_stride] = av_clip_int16((y10 * cy + cgu * u +
+ cgv * v + rnd) >> sh);
+ rgb1[2 * x + rgb_stride + 1] = av_clip_int16((y11 * cy + cgu * u +
+ cgv * v + rnd) >> sh);
+#endif
+#endif
+
+ rgb2[x << SS_W] = av_clip_int16((y00 * cy + cbu * u + rnd) >> sh);
+#if SS_W == 1
+ rgb2[2 * x + 1] = av_clip_int16((y01 * cy + cbu * u + rnd) >> sh);
+#if SS_H == 1
+ rgb2[2 * x + rgb_stride] = av_clip_int16((y10 * cy + cbu * u + rnd) >> sh);
+ rgb2[2 * x + rgb_stride + 1] = av_clip_int16((y11 * cy + cbu * u + rnd) >> sh);
+#endif
+#endif
+ }
+
+ yuv0 += (yuv_stride[0] * (1 << SS_H)) / sizeof(pixel);
+ yuv1 += yuv_stride[1] / sizeof(pixel);
+ yuv2 += yuv_stride[2] / sizeof(pixel);
+ rgb0 += rgb_stride * (1 << SS_H);
+ rgb1 += rgb_stride * (1 << SS_H);
+ rgb2 += rgb_stride * (1 << SS_H);
+ }
+}
+
+static void fn(rgb2yuv)(uint8_t *_yuv[3], const ptrdiff_t yuv_stride[3],
+ int16_t *rgb[3], ptrdiff_t s,
+ int w, int h, const int16_t rgb2yuv_coeffs[3][3][8],
+ const int16_t yuv_offset[8])
+{
+ pixel **yuv = (pixel **) _yuv;
+ pixel *yuv0 = yuv[0], *yuv1 = yuv[1], *yuv2 = yuv[2];
+ const int16_t *rgb0 = rgb[0], *rgb1 = rgb[1], *rgb2 = rgb[2];
+ int y, x;
+ const int sh = 29 - BIT_DEPTH;
+ const int rnd = 1 << (sh - 1);
+ int cry = rgb2yuv_coeffs[0][0][0];
+ int cgy = rgb2yuv_coeffs[0][1][0];
+ int cby = rgb2yuv_coeffs[0][2][0];
+ int cru = rgb2yuv_coeffs[1][0][0];
+ int cgu = rgb2yuv_coeffs[1][1][0];
+ int cburv = rgb2yuv_coeffs[1][2][0];
+ int cgv = rgb2yuv_coeffs[2][1][0];
+ int cbv = rgb2yuv_coeffs[2][2][0];
+ ptrdiff_t s0 = yuv_stride[0] / sizeof(pixel);
+ const int uv_offset = 128 << (BIT_DEPTH - 8);
+
+ av_assert2(rgb2yuv_coeffs[1][2][0] == rgb2yuv_coeffs[2][0][0]);
+ w = AV_CEIL_RSHIFT(w, SS_W);
+ h = AV_CEIL_RSHIFT(h, SS_H);
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ int r00 = rgb0[x << SS_W], g00 = rgb1[x << SS_W], b00 = rgb2[x << SS_W];
+#if SS_W == 1
+ int r01 = rgb0[x * 2 + 1], g01 = rgb1[x * 2 + 1], b01 = rgb2[x * 2 + 1];
+#if SS_H == 1
+ int r10 = rgb0[x * 2 + 0 + s], g10 = rgb1[x * 2 + 0 + s], b10 = rgb2[x * 2 + 0 + s];
+ int r11 = rgb0[x * 2 + 1 + s], g11 = rgb1[x * 2 + 1 + s], b11 = rgb2[x * 2 + 1 + s];
+#endif
+#endif
+
+ yuv0[x << SS_W] = av_clip_pixel(yuv_offset[0] +
+ ((r00 * cry + g00 * cgy +
+ b00 * cby + rnd) >> sh));
+#if SS_W == 1
+ yuv0[x * 2 + 1] = av_clip_pixel(yuv_offset[0] +
+ ((r01 * cry + g01 * cgy +
+ b01 * cby + rnd) >> sh));
+#if SS_H == 1
+ yuv0[x * 2 + 0 + s0] = av_clip_pixel(yuv_offset[0] +
+ ((r10 * cry + g10 * cgy +
+ b10 * cby + rnd) >> sh));
+ yuv0[x * 2 + 1 + s0] = av_clip_pixel(yuv_offset[0] +
+ ((r11 * cry + g11 * cgy +
+ b11 * cby + rnd) >> sh));
+#endif
+#endif
+
+ yuv1[x] = av_clip_pixel(uv_offset +
+ ((avg(r00, r01, r10, r11) * cru +
+ avg(g00, g01, g10, g11) * cgu +
+ avg(b00, b01, b10, b11) * cburv + rnd) >> sh));
+ yuv2[x] = av_clip_pixel(uv_offset +
+ ((avg(r00, r01, r10, r11) * cburv +
+ avg(g00, g01, g10, g11) * cgv +
+ avg(b00, b01, b10, b11) * cbv + rnd) >> sh));
+ }
+
+ yuv0 += s0 * (1 << SS_H);
+ yuv1 += yuv_stride[1] / sizeof(pixel);
+ yuv2 += yuv_stride[2] / sizeof(pixel);
+ rgb0 += s * (1 << SS_H);
+ rgb1 += s * (1 << SS_H);
+ rgb2 += s * (1 << SS_H);
+ }
+}
+
+/* floyd-steinberg dithering - for any mid-top pixel A in a 3x2 block of pixels:
+ * 1 A 2
+ * 3 4 5
+ * the rounding error is distributed over the neighbouring pixels:
+ * 2: 7/16th, 3: 3/16th, 4: 5/16th and 5: 1/16th
+ */
+static void fn(rgb2yuv_fsb)(uint8_t *_yuv[3], const ptrdiff_t yuv_stride[3],
+ int16_t *rgb[3], ptrdiff_t s,
+ int w, int h, const int16_t rgb2yuv_coeffs[3][3][8],
+ const int16_t yuv_offset[8],
+ int *rnd_scratch[3][2])
+{
+ pixel **yuv = (pixel **) _yuv;
+ pixel *yuv0 = yuv[0], *yuv1 = yuv[1], *yuv2 = yuv[2];
+ const int16_t *rgb0 = rgb[0], *rgb1 = rgb[1], *rgb2 = rgb[2];
+ int y, x;
+ const int sh = 29 - BIT_DEPTH;
+ const int rnd = 1 << (sh - 1);
+ int cry = rgb2yuv_coeffs[0][0][0];
+ int cgy = rgb2yuv_coeffs[0][1][0];
+ int cby = rgb2yuv_coeffs[0][2][0];
+ int cru = rgb2yuv_coeffs[1][0][0];
+ int cgu = rgb2yuv_coeffs[1][1][0];
+ int cburv = rgb2yuv_coeffs[1][2][0];
+ int cgv = rgb2yuv_coeffs[2][1][0];
+ int cbv = rgb2yuv_coeffs[2][2][0];
+ ptrdiff_t s0 = yuv_stride[0] / sizeof(pixel);
+ const int uv_offset = 128 << (BIT_DEPTH - 8);
+ unsigned mask = (1 << sh) - 1;
+
+ for (x = 0; x < w; x++) {
+ rnd_scratch[0][0][x] =
+ rnd_scratch[0][1][x] = rnd;
+ }
+ av_assert2(rgb2yuv_coeffs[1][2][0] == rgb2yuv_coeffs[2][0][0]);
+ w = AV_CEIL_RSHIFT(w, SS_W);
+ h = AV_CEIL_RSHIFT(h, SS_H);
+ for (x = 0; x < w; x++) {
+ rnd_scratch[1][0][x] =
+ rnd_scratch[1][1][x] =
+ rnd_scratch[2][0][x] =
+ rnd_scratch[2][1][x] = rnd;
+ }
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ int r00 = rgb0[x << SS_W], g00 = rgb1[x << SS_W], b00 = rgb2[x << SS_W];
+ int y00;
+#if SS_W == 1
+ int r01 = rgb0[x * 2 + 1], g01 = rgb1[x * 2 + 1], b01 = rgb2[x * 2 + 1];
+ int y01;
+#if SS_H == 1
+ int r10 = rgb0[x * 2 + 0 + s], g10 = rgb1[x * 2 + 0 + s], b10 = rgb2[x * 2 + 0 + s];
+ int r11 = rgb0[x * 2 + 1 + s], g11 = rgb1[x * 2 + 1 + s], b11 = rgb2[x * 2 + 1 + s];
+ int y10, y11;
+#endif
+#endif
+ int u, v, diff;
+
+ y00 = r00 * cry + g00 * cgy + b00 * cby + rnd_scratch[0][y & !SS_H][x << SS_W];
+ diff = (y00 & mask) - rnd;
+ yuv0[x << SS_W] = av_clip_pixel(yuv_offset[0] + (y00 >> sh));
+ rnd_scratch[0][ (y & !SS_H)][(x << SS_W) + 1] += (diff * 7 + 8) >> 4;
+ rnd_scratch[0][!(y & !SS_H)][(x << SS_W) - 1] += (diff * 3 + 8) >> 4;
+ rnd_scratch[0][!(y & !SS_H)][(x << SS_W) + 0] += (diff * 5 + 8) >> 4;
+ rnd_scratch[0][!(y & !SS_H)][(x << SS_W) + 1] += (diff * 1 + 8) >> 4;
+ rnd_scratch[0][ (y & !SS_H)][(x << SS_W) + 0] = rnd;
+#if SS_W == 1
+ y01 = r01 * cry + g01 * cgy + b01 * cby + rnd_scratch[0][y & !SS_H][x * 2 + 1];
+ diff = (y01 & mask) - rnd;
+ yuv0[x * 2 + 1] = av_clip_pixel(yuv_offset[0] + (y01 >> sh));
+ rnd_scratch[0][ (y & !SS_H)][x * 2 + 2] += (diff * 7 + 8) >> 4;
+ rnd_scratch[0][!(y & !SS_H)][x * 2 + 0] += (diff * 3 + 8) >> 4;
+ rnd_scratch[0][!(y & !SS_H)][x * 2 + 1] += (diff * 5 + 8) >> 4;
+ rnd_scratch[0][!(y & !SS_H)][x * 2 + 2] += (diff * 1 + 8) >> 4;
+ rnd_scratch[0][ (y & !SS_H)][x * 2 + 1] = rnd;
+#if SS_H == 1
+ y10 = r10 * cry + g10 * cgy + b10 * cby + rnd_scratch[0][1][x * 2 + 0];
+ diff = (y10 & mask) - rnd;
+ yuv0[x * 2 + 0 + s0] = av_clip_pixel(yuv_offset[0] + (y10 >> sh));
+ rnd_scratch[0][1][x * 2 + 1] += (diff * 7 + 8) >> 4;
+ rnd_scratch[0][0][x * 2 - 1] += (diff * 3 + 8) >> 4;
+ rnd_scratch[0][0][x * 2 + 0] += (diff * 5 + 8) >> 4;
+ rnd_scratch[0][0][x * 2 + 1] += (diff * 1 + 8) >> 4;
+ rnd_scratch[0][1][x * 2 + 0] = rnd;
+
+ y11 = r11 * cry + g11 * cgy + b11 * cby + rnd_scratch[0][1][x * 2 + 1];
+ diff = (y11 & mask) - rnd;
+ yuv0[x * 2 + 1 + s0] = av_clip_pixel(yuv_offset[0] + (y11 >> sh));
+ rnd_scratch[0][1][x * 2 + 2] += (diff * 7 + 8) >> 4;
+ rnd_scratch[0][0][x * 2 + 0] += (diff * 3 + 8) >> 4;
+ rnd_scratch[0][0][x * 2 + 1] += (diff * 5 + 8) >> 4;
+ rnd_scratch[0][0][x * 2 + 2] += (diff * 1 + 8) >> 4;
+ rnd_scratch[0][1][x * 2 + 1] = rnd;
+#endif
+#endif
+
+ u = avg(r00, r01, r10, r11) * cru +
+ avg(g00, g01, g10, g11) * cgu +
+ avg(b00, b01, b10, b11) * cburv + rnd_scratch[1][y & 1][x];
+ diff = (u & mask) - rnd;
+ yuv1[x] = av_clip_pixel(uv_offset + (u >> sh));
+ rnd_scratch[1][ (y & 1)][x + 1] += (diff * 7 + 8) >> 4;
+ rnd_scratch[1][!(y & 1)][x - 1] += (diff * 3 + 8) >> 4;
+ rnd_scratch[1][!(y & 1)][x + 0] += (diff * 5 + 8) >> 4;
+ rnd_scratch[1][!(y & 1)][x + 1] += (diff * 1 + 8) >> 4;
+ rnd_scratch[1][ (y & 1)][x + 0] = rnd;
+
+ v = avg(r00, r01, r10, r11) * cburv +
+ avg(g00, g01, g10, g11) * cgv +
+ avg(b00, b01, b10, b11) * cbv + rnd_scratch[2][y & 1][x];
+ diff = (v & mask) - rnd;
+ yuv2[x] = av_clip_pixel(uv_offset + (v >> sh));
+ rnd_scratch[2][ (y & 1)][x + 1] += (diff * 7 + 8) >> 4;
+ rnd_scratch[2][!(y & 1)][x - 1] += (diff * 3 + 8) >> 4;
+ rnd_scratch[2][!(y & 1)][x + 0] += (diff * 5 + 8) >> 4;
+ rnd_scratch[2][!(y & 1)][x + 1] += (diff * 1 + 8) >> 4;
+ rnd_scratch[2][ (y & 1)][x + 0] = rnd;
+ }
+
+ yuv0 += s0 * (1 << SS_H);
+ yuv1 += yuv_stride[1] / sizeof(pixel);
+ yuv2 += yuv_stride[2] / sizeof(pixel);
+ rgb0 += s * (1 << SS_H);
+ rgb1 += s * (1 << SS_H);
+ rgb2 += s * (1 << SS_H);
+ }
+}
+
+#undef IN_BIT_DEPTH
+#undef OUT_BIT_DEPTH
+#define OUT_BIT_DEPTH BIT_DEPTH
+#define IN_BIT_DEPTH 8
+#include "colorspacedsp_yuv2yuv_template.c"
+
+#undef IN_BIT_DEPTH
+#define IN_BIT_DEPTH 10
+#include "colorspacedsp_yuv2yuv_template.c"
+
+#undef IN_BIT_DEPTH
+#define IN_BIT_DEPTH 12
+#include "colorspacedsp_yuv2yuv_template.c"
diff --git a/libavfilter/colorspacedsp_yuv2yuv_template.c b/libavfilter/colorspacedsp_yuv2yuv_template.c
new file mode 100644
index 0000000000..3fae38c12a
--- /dev/null
+++ b/libavfilter/colorspacedsp_yuv2yuv_template.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+
+#undef opixel
+#define opixel pixel
+
+#undef ipixel
+#if IN_BIT_DEPTH == 8
+#define ipixel uint8_t
+#else
+#define ipixel uint16_t
+#endif
+
+#undef fn
+#undef fn2
+#undef fn3
+#define fn3(a,b,c,d) a##_##d##p##b##to##c##_c
+#define fn2(a,b,c,d) fn3(a,b,c,d)
+#define fn(a) fn2(a, IN_BIT_DEPTH, OUT_BIT_DEPTH, ss)
+
+static void fn(yuv2yuv)(uint8_t *_dst[3], const ptrdiff_t dst_stride[3],
+ uint8_t *_src[3], const ptrdiff_t src_stride[3],
+ int w, int h, const int16_t c[3][3][8],
+ const int16_t yuv_offset[2][8])
+{
+ opixel **dst = (opixel **) _dst;
+ ipixel **src = (ipixel **) _src;
+ const ipixel *src0 = src[0], *src1 = src[1], *src2 = src[2];
+ opixel *dst0 = dst[0], *dst1 = dst[1], *dst2 = dst[2];
+ int y, x;
+ const int sh = 14 + IN_BIT_DEPTH - OUT_BIT_DEPTH;
+ const int rnd = 1 << (sh - 1);
+ int y_off_in = yuv_offset[0][0];
+ int y_off_out = yuv_offset[1][0] << sh;
+ const int uv_off_in = 128 << (IN_BIT_DEPTH - 8);
+ const int uv_off_out = rnd + (128 << (OUT_BIT_DEPTH - 8 + sh));
+ int cyy = c[0][0][0], cyu = c[0][1][0], cyv = c[0][2][0];
+ int cuu = c[1][1][0], cuv = c[1][2][0], cvu = c[2][1][0], cvv = c[2][2][0];
+
+ av_assert2(c[1][0][0] == 0);
+ av_assert2(c[2][0][0] == 0);
+ w = AV_CEIL_RSHIFT(w, SS_W);
+ h = AV_CEIL_RSHIFT(h, SS_H);
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ int y00 = src0[x << SS_W] - y_off_in;
+#if SS_W == 1
+ int y01 = src0[2 * x + 1] - y_off_in;
+#if SS_H == 1
+ int y10 = src0[src_stride[0] / sizeof(ipixel) + 2 * x] - y_off_in;
+ int y11 = src0[src_stride[0] / sizeof(ipixel) + 2 * x + 1] - y_off_in;
+#endif
+#endif
+ int u = src1[x] - uv_off_in, v = src2[x] - uv_off_in;
+ int uv_val = cyu * u + cyv * v + rnd + y_off_out;
+
+ dst0[x << SS_W] = av_clip_pixel((cyy * y00 + uv_val) >> sh);
+#if SS_W == 1
+ dst0[x * 2 + 1] = av_clip_pixel((cyy * y01 + uv_val) >> sh);
+#if SS_H == 1
+ dst0[x * 2 + 0 + dst_stride[0] / sizeof(opixel)] =
+ av_clip_pixel((cyy * y10 + uv_val) >> sh);
+ dst0[x * 2 + 1 + dst_stride[0] / sizeof(opixel)] =
+ av_clip_pixel((cyy * y11 + uv_val) >> sh);
+#endif
+#endif
+
+ dst1[x] = av_clip_pixel((u * cuu + v * cuv + uv_off_out) >> sh);
+ dst2[x] = av_clip_pixel((u * cvu + v * cvv + uv_off_out) >> sh);
+ }
+
+ dst0 += (dst_stride[0] * (1 << SS_H)) / sizeof(opixel);
+ dst1 += dst_stride[1] / sizeof(opixel);
+ dst2 += dst_stride[2] / sizeof(opixel);
+ src0 += (src_stride[0] * (1 << SS_H)) / sizeof(ipixel);
+ src1 += src_stride[1] / sizeof(ipixel);
+ src2 += src_stride[2] / sizeof(ipixel);
+ }
+}
diff --git a/libavfilter/deshake.h b/libavfilter/deshake.h
new file mode 100644
index 0000000000..becd6c248b
--- /dev/null
+++ b/libavfilter/deshake.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ * Copyright (C) 2013 Lenny Wang
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_DESHAKE_H
+#define AVFILTER_DESHAKE_H
+
+#include "config.h"
+#include "avfilter.h"
+#include "transform.h"
+#include "libavutil/pixelutils.h"
+#if CONFIG_OPENCL
+#include "libavutil/opencl.h"
+#endif
+
+
+enum SearchMethod {
+ EXHAUSTIVE, ///< Search all possible positions
+ SMART_EXHAUSTIVE, ///< Search most possible positions (faster)
+ SEARCH_COUNT
+};
+
+typedef struct {
+ int x; ///< Horizontal shift
+ int y; ///< Vertical shift
+} IntMotionVector;
+
+typedef struct {
+ double x; ///< Horizontal shift
+ double y; ///< Vertical shift
+} MotionVector;
+
+typedef struct {
+ MotionVector vec; ///< Motion vector
+ double angle; ///< Angle of rotation
+ double zoom; ///< Zoom percentage
+} Transform;
+
+#if CONFIG_OPENCL
+
+typedef struct {
+ cl_command_queue command_queue;
+ cl_program program;
+ cl_kernel kernel_luma;
+ cl_kernel kernel_chroma;
+ int in_plane_size[8];
+ int out_plane_size[8];
+ int plane_num;
+ cl_mem cl_inbuf;
+ size_t cl_inbuf_size;
+ cl_mem cl_outbuf;
+ size_t cl_outbuf_size;
+} DeshakeOpenclContext;
+
+#endif
+
+#define MAX_R 64
+
+typedef struct {
+ const AVClass *class;
+ int counts[2*MAX_R+1][2*MAX_R+1]; /// < Scratch buffer for motion search
+ double *angles; ///< Scratch buffer for block angles
+ unsigned angles_size;
+ AVFrame *ref; ///< Previous frame
+ int rx; ///< Maximum horizontal shift
+ int ry; ///< Maximum vertical shift
+ int edge; ///< Edge fill method
+ int blocksize; ///< Size of blocks to compare
+ int contrast; ///< Contrast threshold
+ int search; ///< Motion search method
+ av_pixelutils_sad_fn sad; ///< Sum of the absolute difference function
+ Transform last; ///< Transform from last frame
+ int refcount; ///< Number of reference frames (defines averaging window)
+ FILE *fp;
+ Transform avg;
+ int cw; ///< Crop motion search to this box
+ int ch;
+ int cx;
+ int cy;
+ char *filename; ///< Motion search detailed log filename
+ int opencl;
+#if CONFIG_OPENCL
+ DeshakeOpenclContext opencl_ctx;
+#endif
+ int (* transform)(AVFilterContext *ctx, int width, int height, int cw, int ch,
+ const float *matrix_y, const float *matrix_uv, enum InterpolateMethod interpolate,
+ enum FillMethod fill, AVFrame *in, AVFrame *out);
+} DeshakeContext;
+
+#endif /* AVFILTER_DESHAKE_H */
diff --git a/libavfilter/deshake_opencl.c b/libavfilter/deshake_opencl.c
new file mode 100644
index 0000000000..91ae7d5859
--- /dev/null
+++ b/libavfilter/deshake_opencl.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ * Copyright (C) 2013 Lenny Wang
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * transform input video
+ */
+
+#include "libavutil/common.h"
+#include "libavutil/dict.h"
+#include "libavutil/pixdesc.h"
+#include "deshake_opencl.h"
+#include "libavutil/opencl_internal.h"
+
+#define PLANE_NUM 3
+#define ROUND_TO_16(a) (((((a) - 1)/16)+1)*16)
+
+int ff_opencl_transform(AVFilterContext *ctx,
+ int width, int height, int cw, int ch,
+ const float *matrix_y, const float *matrix_uv,
+ enum InterpolateMethod interpolate,
+ enum FillMethod fill, AVFrame *in, AVFrame *out)
+{
+ int ret = 0;
+ cl_int status;
+ DeshakeContext *deshake = ctx->priv;
+ float4 packed_matrix_lu = {matrix_y[0], matrix_y[1], matrix_y[2], matrix_y[5]};
+ float4 packed_matrix_ch = {matrix_uv[0], matrix_uv[1], matrix_uv[2], matrix_uv[5]};
+ size_t global_worksize_lu[2] = {(size_t)ROUND_TO_16(width), (size_t)ROUND_TO_16(height)};
+ size_t global_worksize_ch[2] = {(size_t)ROUND_TO_16(cw), (size_t)(2*ROUND_TO_16(ch))};
+ size_t local_worksize[2] = {16, 16};
+ FFOpenclParam param_lu = {0};
+ FFOpenclParam param_ch = {0};
+ param_lu.ctx = param_ch.ctx = ctx;
+ param_lu.kernel = deshake->opencl_ctx.kernel_luma;
+ param_ch.kernel = deshake->opencl_ctx.kernel_chroma;
+
+ if ((unsigned int)interpolate > INTERPOLATE_BIQUADRATIC) {
+ av_log(ctx, AV_LOG_ERROR, "Selected interpolate method is invalid\n");
+ return AVERROR(EINVAL);
+ }
+ ret = avpriv_opencl_set_parameter(&param_lu,
+ FF_OPENCL_PARAM_INFO(deshake->opencl_ctx.cl_inbuf),
+ FF_OPENCL_PARAM_INFO(deshake->opencl_ctx.cl_outbuf),
+ FF_OPENCL_PARAM_INFO(packed_matrix_lu),
+ FF_OPENCL_PARAM_INFO(interpolate),
+ FF_OPENCL_PARAM_INFO(fill),
+ FF_OPENCL_PARAM_INFO(in->linesize[0]),
+ FF_OPENCL_PARAM_INFO(out->linesize[0]),
+ FF_OPENCL_PARAM_INFO(height),
+ FF_OPENCL_PARAM_INFO(width),
+ NULL);
+ if (ret < 0)
+ return ret;
+ ret = avpriv_opencl_set_parameter(&param_ch,
+ FF_OPENCL_PARAM_INFO(deshake->opencl_ctx.cl_inbuf),
+ FF_OPENCL_PARAM_INFO(deshake->opencl_ctx.cl_outbuf),
+ FF_OPENCL_PARAM_INFO(packed_matrix_ch),
+ FF_OPENCL_PARAM_INFO(interpolate),
+ FF_OPENCL_PARAM_INFO(fill),
+ FF_OPENCL_PARAM_INFO(in->linesize[0]),
+ FF_OPENCL_PARAM_INFO(out->linesize[0]),
+ FF_OPENCL_PARAM_INFO(in->linesize[1]),
+ FF_OPENCL_PARAM_INFO(out->linesize[1]),
+ FF_OPENCL_PARAM_INFO(height),
+ FF_OPENCL_PARAM_INFO(width),
+ FF_OPENCL_PARAM_INFO(ch),
+ FF_OPENCL_PARAM_INFO(cw),
+ NULL);
+ if (ret < 0)
+ return ret;
+ status = clEnqueueNDRangeKernel(deshake->opencl_ctx.command_queue,
+ deshake->opencl_ctx.kernel_luma, 2, NULL,
+ global_worksize_lu, local_worksize, 0, NULL, NULL);
+ status |= clEnqueueNDRangeKernel(deshake->opencl_ctx.command_queue,
+ deshake->opencl_ctx.kernel_chroma, 2, NULL,
+ global_worksize_ch, local_worksize, 0, NULL, NULL);
+ if (status != CL_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL run kernel error occurred: %s\n", av_opencl_errstr(status));
+ return AVERROR_EXTERNAL;
+ }
+ ret = av_opencl_buffer_read_image(out->data, deshake->opencl_ctx.out_plane_size,
+ deshake->opencl_ctx.plane_num, deshake->opencl_ctx.cl_outbuf,
+ deshake->opencl_ctx.cl_outbuf_size);
+ if (ret < 0)
+ return ret;
+ return ret;
+}
+
+int ff_opencl_deshake_init(AVFilterContext *ctx)
+{
+ int ret = 0;
+ DeshakeContext *deshake = ctx->priv;
+ ret = av_opencl_init(NULL);
+ if (ret < 0)
+ return ret;
+ deshake->opencl_ctx.plane_num = PLANE_NUM;
+ deshake->opencl_ctx.command_queue = av_opencl_get_command_queue();
+ if (!deshake->opencl_ctx.command_queue) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to get OpenCL command queue in filter 'deshake'\n");
+ return AVERROR(EINVAL);
+ }
+ deshake->opencl_ctx.program = av_opencl_compile("avfilter_transform", NULL);
+ if (!deshake->opencl_ctx.program) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL failed to compile program 'avfilter_transform'\n");
+ return AVERROR(EINVAL);
+ }
+ if (!deshake->opencl_ctx.kernel_luma) {
+ deshake->opencl_ctx.kernel_luma = clCreateKernel(deshake->opencl_ctx.program,
+ "avfilter_transform_luma", &ret);
+ if (ret != CL_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL failed to create kernel 'avfilter_transform_luma'\n");
+ return AVERROR(EINVAL);
+ }
+ }
+ if (!deshake->opencl_ctx.kernel_chroma) {
+ deshake->opencl_ctx.kernel_chroma = clCreateKernel(deshake->opencl_ctx.program,
+ "avfilter_transform_chroma", &ret);
+ if (ret != CL_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL failed to create kernel 'avfilter_transform_chroma'\n");
+ return AVERROR(EINVAL);
+ }
+ }
+ return ret;
+}
+
+void ff_opencl_deshake_uninit(AVFilterContext *ctx)
+{
+ DeshakeContext *deshake = ctx->priv;
+ av_opencl_buffer_release(&deshake->opencl_ctx.cl_inbuf);
+ av_opencl_buffer_release(&deshake->opencl_ctx.cl_outbuf);
+ clReleaseKernel(deshake->opencl_ctx.kernel_luma);
+ clReleaseKernel(deshake->opencl_ctx.kernel_chroma);
+ clReleaseProgram(deshake->opencl_ctx.program);
+ deshake->opencl_ctx.command_queue = NULL;
+ av_opencl_uninit();
+}
+
+int ff_opencl_deshake_process_inout_buf(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
+{
+ int ret = 0;
+ AVFilterLink *link = ctx->inputs[0];
+ DeshakeContext *deshake = ctx->priv;
+ const int hshift = av_pix_fmt_desc_get(link->format)->log2_chroma_h;
+ int chroma_height = AV_CEIL_RSHIFT(link->h, hshift);
+
+ if ((!deshake->opencl_ctx.cl_inbuf) || (!deshake->opencl_ctx.cl_outbuf)) {
+ deshake->opencl_ctx.in_plane_size[0] = (in->linesize[0] * in->height);
+ deshake->opencl_ctx.in_plane_size[1] = (in->linesize[1] * chroma_height);
+ deshake->opencl_ctx.in_plane_size[2] = (in->linesize[2] * chroma_height);
+ deshake->opencl_ctx.out_plane_size[0] = (out->linesize[0] * out->height);
+ deshake->opencl_ctx.out_plane_size[1] = (out->linesize[1] * chroma_height);
+ deshake->opencl_ctx.out_plane_size[2] = (out->linesize[2] * chroma_height);
+ deshake->opencl_ctx.cl_inbuf_size = deshake->opencl_ctx.in_plane_size[0] +
+ deshake->opencl_ctx.in_plane_size[1] +
+ deshake->opencl_ctx.in_plane_size[2];
+ deshake->opencl_ctx.cl_outbuf_size = deshake->opencl_ctx.out_plane_size[0] +
+ deshake->opencl_ctx.out_plane_size[1] +
+ deshake->opencl_ctx.out_plane_size[2];
+ if (!deshake->opencl_ctx.cl_inbuf) {
+ ret = av_opencl_buffer_create(&deshake->opencl_ctx.cl_inbuf,
+ deshake->opencl_ctx.cl_inbuf_size,
+ CL_MEM_READ_ONLY, NULL);
+ if (ret < 0)
+ return ret;
+ }
+ if (!deshake->opencl_ctx.cl_outbuf) {
+ ret = av_opencl_buffer_create(&deshake->opencl_ctx.cl_outbuf,
+ deshake->opencl_ctx.cl_outbuf_size,
+ CL_MEM_READ_WRITE, NULL);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ ret = av_opencl_buffer_write_image(deshake->opencl_ctx.cl_inbuf,
+ deshake->opencl_ctx.cl_inbuf_size,
+ 0, in->data,deshake->opencl_ctx.in_plane_size,
+ deshake->opencl_ctx.plane_num);
+ if(ret < 0)
+ return ret;
+ return ret;
+}
diff --git a/libavfilter/deshake_opencl.h b/libavfilter/deshake_opencl.h
new file mode 100644
index 0000000000..5b0a2414b8
--- /dev/null
+++ b/libavfilter/deshake_opencl.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_DESHAKE_OPENCL_H
+#define AVFILTER_DESHAKE_OPENCL_H
+
+#include "deshake.h"
+
+typedef struct {
+ float x;
+ float y;
+ float z;
+ float w;
+} float4;
+
+int ff_opencl_deshake_init(AVFilterContext *ctx);
+
+void ff_opencl_deshake_uninit(AVFilterContext *ctx);
+
+int ff_opencl_deshake_process_inout_buf(AVFilterContext *ctx, AVFrame *in, AVFrame *out);
+
+int ff_opencl_transform(AVFilterContext *ctx,
+ int width, int height, int cw, int ch,
+ const float *matrix_y, const float *matrix_uv,
+ enum InterpolateMethod interpolate,
+ enum FillMethod fill, AVFrame *in, AVFrame *out);
+
+#endif /* AVFILTER_DESHAKE_OPENCL_H */
diff --git a/libavfilter/deshake_opencl_kernel.h b/libavfilter/deshake_opencl_kernel.h
new file mode 100644
index 0000000000..dd45d6f60b
--- /dev/null
+++ b/libavfilter/deshake_opencl_kernel.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ * Copyright (C) 2013 Lenny Wang
+ *
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_DESHAKE_OPENCL_KERNEL_H
+#define AVFILTER_DESHAKE_OPENCL_KERNEL_H
+
+#include "libavutil/opencl.h"
+
+const char *ff_kernel_deshake_opencl = AV_OPENCL_KERNEL(
+inline unsigned char pixel(global const unsigned char *src, int x, int y,
+ int w, int h,int stride, unsigned char def)
+{
+ return (x < 0 || y < 0 || x >= w || y >= h) ? def : src[x + y * stride];
+}
+
+unsigned char interpolate_nearest(float x, float y, global const unsigned char *src,
+ int width, int height, int stride, unsigned char def)
+{
+ return pixel(src, (int)(x + 0.5f), (int)(y + 0.5f), width, height, stride, def);
+}
+
+unsigned char interpolate_bilinear(float x, float y, global const unsigned char *src,
+ int width, int height, int stride, unsigned char def)
+{
+ int x_c, x_f, y_c, y_f;
+ int v1, v2, v3, v4;
+ x_f = (int)x;
+ y_f = (int)y;
+ x_c = x_f + 1;
+ y_c = y_f + 1;
+
+ if (x_f < -1 || x_f > width || y_f < -1 || y_f > height) {
+ return def;
+ } else {
+ v4 = pixel(src, x_f, y_f, width, height, stride, def);
+ v2 = pixel(src, x_c, y_f, width, height, stride, def);
+ v3 = pixel(src, x_f, y_c, width, height, stride, def);
+ v1 = pixel(src, x_c, y_c, width, height, stride, def);
+ return (v1*(x - x_f)*(y - y_f) + v2*((x - x_f)*(y_c - y)) +
+ v3*(x_c - x)*(y - y_f) + v4*((x_c - x)*(y_c - y)));
+ }
+}
+
+unsigned char interpolate_biquadratic(float x, float y, global const unsigned char *src,
+ int width, int height, int stride, unsigned char def)
+{
+ int x_c, x_f, y_c, y_f;
+ unsigned char v1, v2, v3, v4;
+ float f1, f2, f3, f4;
+ x_f = (int)x;
+ y_f = (int)y;
+ x_c = x_f + 1;
+ y_c = y_f + 1;
+
+ if (x_f < - 1 || x_f > width || y_f < -1 || y_f > height)
+ return def;
+ else {
+ v4 = pixel(src, x_f, y_f, width, height, stride, def);
+ v2 = pixel(src, x_c, y_f, width, height, stride, def);
+ v3 = pixel(src, x_f, y_c, width, height, stride, def);
+ v1 = pixel(src, x_c, y_c, width, height, stride, def);
+
+ f1 = 1 - sqrt((x_c - x) * (y_c - y));
+ f2 = 1 - sqrt((x_c - x) * (y - y_f));
+ f3 = 1 - sqrt((x - x_f) * (y_c - y));
+ f4 = 1 - sqrt((x - x_f) * (y - y_f));
+ return (v1 * f1 + v2 * f2 + v3 * f3 + v4 * f4) / (f1 + f2 + f3 + f4);
+ }
+}
+
+inline const float clipf(float a, float amin, float amax)
+{
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+inline int mirror(int v, int m)
+{
+ while ((unsigned)v > (unsigned)m) {
+ v = -v;
+ if (v < 0)
+ v += 2 * m;
+ }
+ return v;
+}
+
+kernel void avfilter_transform_luma(global unsigned char *src,
+ global unsigned char *dst,
+ float4 matrix,
+ int interpolate,
+ int fill,
+ int src_stride_lu,
+ int dst_stride_lu,
+ int height,
+ int width)
+{
+ int x = get_global_id(0);
+ int y = get_global_id(1);
+ int idx_dst = y * dst_stride_lu + x;
+ unsigned char def = 0;
+ float x_s = x * matrix.x + y * matrix.y + matrix.z;
+ float y_s = x * (-matrix.y) + y * matrix.x + matrix.w;
+
+ if (x < width && y < height) {
+ switch (fill) {
+ case 0: //FILL_BLANK
+ def = 0;
+ break;
+ case 1: //FILL_ORIGINAL
+ def = src[y*src_stride_lu + x];
+ break;
+ case 2: //FILL_CLAMP
+ y_s = clipf(y_s, 0, height - 1);
+ x_s = clipf(x_s, 0, width - 1);
+ def = src[(int)y_s * src_stride_lu + (int)x_s];
+ break;
+ case 3: //FILL_MIRROR
+ y_s = mirror(y_s, height - 1);
+ x_s = mirror(x_s, width - 1);
+ def = src[(int)y_s * src_stride_lu + (int)x_s];
+ break;
+ }
+ switch (interpolate) {
+ case 0: //INTERPOLATE_NEAREST
+ dst[idx_dst] = interpolate_nearest(x_s, y_s, src, width, height, src_stride_lu, def);
+ break;
+ case 1: //INTERPOLATE_BILINEAR
+ dst[idx_dst] = interpolate_bilinear(x_s, y_s, src, width, height, src_stride_lu, def);
+ break;
+ case 2: //INTERPOLATE_BIQUADRATIC
+ dst[idx_dst] = interpolate_biquadratic(x_s, y_s, src, width, height, src_stride_lu, def);
+ break;
+ default:
+ return;
+ }
+ }
+}
+
+kernel void avfilter_transform_chroma(global unsigned char *src,
+ global unsigned char *dst,
+ float4 matrix,
+ int interpolate,
+ int fill,
+ int src_stride_lu,
+ int dst_stride_lu,
+ int src_stride_ch,
+ int dst_stride_ch,
+ int height,
+ int width,
+ int ch,
+ int cw)
+{
+
+ int x = get_global_id(0);
+ int y = get_global_id(1);
+ int pad_ch = get_global_size(1)>>1;
+ global unsigned char *dst_u = dst + height * dst_stride_lu;
+ global unsigned char *src_u = src + height * src_stride_lu;
+ global unsigned char *dst_v = dst_u + ch * dst_stride_ch;
+ global unsigned char *src_v = src_u + ch * src_stride_ch;
+ src = y < pad_ch ? src_u : src_v;
+ dst = y < pad_ch ? dst_u : dst_v;
+ y = select(y - pad_ch, y, y < pad_ch);
+ float x_s = x * matrix.x + y * matrix.y + matrix.z;
+ float y_s = x * (-matrix.y) + y * matrix.x + matrix.w;
+ int idx_dst = y * dst_stride_ch + x;
+ unsigned char def;
+
+ if (x < cw && y < ch) {
+ switch (fill) {
+ case 0: //FILL_BLANK
+ def = 0;
+ break;
+ case 1: //FILL_ORIGINAL
+ def = src[y*src_stride_ch + x];
+ break;
+ case 2: //FILL_CLAMP
+ y_s = clipf(y_s, 0, ch - 1);
+ x_s = clipf(x_s, 0, cw - 1);
+ def = src[(int)y_s * src_stride_ch + (int)x_s];
+ break;
+ case 3: //FILL_MIRROR
+ y_s = mirror(y_s, ch - 1);
+ x_s = mirror(x_s, cw - 1);
+ def = src[(int)y_s * src_stride_ch + (int)x_s];
+ break;
+ }
+ switch (interpolate) {
+ case 0: //INTERPOLATE_NEAREST
+ dst[idx_dst] = interpolate_nearest(x_s, y_s, src, cw, ch, src_stride_ch, def);
+ break;
+ case 1: //INTERPOLATE_BILINEAR
+ dst[idx_dst] = interpolate_bilinear(x_s, y_s, src, cw, ch, src_stride_ch, def);
+ break;
+ case 2: //INTERPOLATE_BIQUADRATIC
+ dst[idx_dst] = interpolate_biquadratic(x_s, y_s, src, cw, ch, src_stride_ch, def);
+ break;
+ default:
+ return;
+ }
+ }
+}
+);
+
+#endif /* AVFILTER_DESHAKE_OPENCL_KERNEL_H */
diff --git a/libavfilter/drawutils.c b/libavfilter/drawutils.c
index e837760459..f19fc5df1d 100644
--- a/libavfilter/drawutils.c
+++ b/libavfilter/drawutils.c
@@ -1,58 +1,103 @@
/*
- * This file is part of Libav.
+ * Copyright 2011 Stefano Sabatini <stefano.sabatini-lala poste it>
+ * Copyright 2012 Nicolas George <nicolas.george normalesup org>
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <string.h>
+#include "libavutil/avassert.h"
#include "libavutil/avutil.h"
#include "libavutil/colorspace.h"
+#include "libavutil/intreadwrite.h"
#include "libavutil/mem.h"
#include "libavutil/pixdesc.h"
#include "drawutils.h"
+#include "formats.h"
enum { RED = 0, GREEN, BLUE, ALPHA };
-int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t dst_color[4],
- enum AVPixelFormat pix_fmt, uint8_t rgba_color[4],
- int *is_packed_rgba, uint8_t rgba_map_ptr[4])
+int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
{
- uint8_t rgba_map[4] = {0};
- int i;
- const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(pix_fmt);
- int hsub = pix_desc->log2_chroma_w;
-
- *is_packed_rgba = 1;
switch (pix_fmt) {
+ case AV_PIX_FMT_0RGB:
case AV_PIX_FMT_ARGB: rgba_map[ALPHA] = 0; rgba_map[RED ] = 1; rgba_map[GREEN] = 2; rgba_map[BLUE ] = 3; break;
+ case AV_PIX_FMT_0BGR:
case AV_PIX_FMT_ABGR: rgba_map[ALPHA] = 0; rgba_map[BLUE ] = 1; rgba_map[GREEN] = 2; rgba_map[RED ] = 3; break;
+ case AV_PIX_FMT_RGB48LE:
+ case AV_PIX_FMT_RGB48BE:
+ case AV_PIX_FMT_RGBA64BE:
+ case AV_PIX_FMT_RGBA64LE:
+ case AV_PIX_FMT_RGB0:
case AV_PIX_FMT_RGBA:
case AV_PIX_FMT_RGB24: rgba_map[RED ] = 0; rgba_map[GREEN] = 1; rgba_map[BLUE ] = 2; rgba_map[ALPHA] = 3; break;
+ case AV_PIX_FMT_BGR48LE:
+ case AV_PIX_FMT_BGR48BE:
+ case AV_PIX_FMT_BGRA64BE:
+ case AV_PIX_FMT_BGRA64LE:
case AV_PIX_FMT_BGRA:
+ case AV_PIX_FMT_BGR0:
case AV_PIX_FMT_BGR24: rgba_map[BLUE ] = 0; rgba_map[GREEN] = 1; rgba_map[RED ] = 2; rgba_map[ALPHA] = 3; break;
- default:
- *is_packed_rgba = 0;
+ case AV_PIX_FMT_GBRP9LE:
+ case AV_PIX_FMT_GBRP9BE:
+ case AV_PIX_FMT_GBRP10LE:
+ case AV_PIX_FMT_GBRP10BE:
+ case AV_PIX_FMT_GBRP12LE:
+ case AV_PIX_FMT_GBRP12BE:
+ case AV_PIX_FMT_GBRP14LE:
+ case AV_PIX_FMT_GBRP14BE:
+ case AV_PIX_FMT_GBRP16LE:
+ case AV_PIX_FMT_GBRP16BE:
+ case AV_PIX_FMT_GBRAP:
+ case AV_PIX_FMT_GBRAP12LE:
+ case AV_PIX_FMT_GBRAP12BE:
+ case AV_PIX_FMT_GBRAP16LE:
+ case AV_PIX_FMT_GBRAP16BE:
+ case AV_PIX_FMT_GBRP: rgba_map[GREEN] = 0; rgba_map[BLUE ] = 1; rgba_map[RED ] = 2; rgba_map[ALPHA] = 3; break;
+ default: /* unsupported */
+ return AVERROR(EINVAL);
}
+ return 0;
+}
+
+int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t dst_color[4],
+ enum AVPixelFormat pix_fmt, uint8_t rgba_color[4],
+ int *is_packed_rgba, uint8_t rgba_map_ptr[4])
+{
+ uint8_t rgba_map[4] = {0};
+ int i;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(pix_fmt);
+ int hsub;
+
+ av_assert0(pix_desc);
+
+ hsub = pix_desc->log2_chroma_w;
+
+ *is_packed_rgba = ff_fill_rgba_map(rgba_map, pix_fmt) >= 0;
if (*is_packed_rgba) {
pixel_step[0] = (av_get_bits_per_pixel(pix_desc))>>3;
for (i = 0; i < 4; i++)
dst_color[rgba_map[i]] = rgba_color[i];
- line[0] = av_malloc(w * pixel_step[0]);
+ line[0] = av_malloc_array(w, pixel_step[0]);
+ if (!line[0])
+ return AVERROR(ENOMEM);
for (i = 0; i < w; i++)
memcpy(line[0] + i * pixel_step[0], dst_color, pixel_step[0]);
if (rgba_map_ptr)
@@ -70,8 +115,13 @@ int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t
int hsub1 = (plane == 1 || plane == 2) ? hsub : 0;
pixel_step[plane] = 1;
- line_size = (w >> hsub1) * pixel_step[plane];
+ line_size = AV_CEIL_RSHIFT(w, hsub1) * pixel_step[plane];
line[plane] = av_malloc(line_size);
+ if (!line[plane]) {
+ while(plane && line[plane-1])
+ av_freep(&line[--plane]);
+ return AVERROR(ENOMEM);
+ }
memset(line[plane], dst_color[plane], line_size);
}
}
@@ -89,11 +139,13 @@ void ff_draw_rectangle(uint8_t *dst[4], int dst_linesize[4],
for (plane = 0; plane < 4 && dst[plane]; plane++) {
int hsub1 = plane == 1 || plane == 2 ? hsub : 0;
int vsub1 = plane == 1 || plane == 2 ? vsub : 0;
+ int width = AV_CEIL_RSHIFT(w, hsub1);
+ int height = AV_CEIL_RSHIFT(h, vsub1);
p = dst[plane] + (y >> vsub1) * dst_linesize[plane];
- for (i = 0; i < (h >> vsub1); i++) {
+ for (i = 0; i < height; i++) {
memcpy(p + (x >> hsub1) * pixelstep[plane],
- src[plane], (w >> hsub1) * pixelstep[plane]);
+ src[plane], width * pixelstep[plane]);
p += dst_linesize[plane];
}
}
@@ -109,12 +161,573 @@ void ff_copy_rectangle(uint8_t *dst[4], int dst_linesize[4],
for (plane = 0; plane < 4 && dst[plane]; plane++) {
int hsub1 = plane == 1 || plane == 2 ? hsub : 0;
int vsub1 = plane == 1 || plane == 2 ? vsub : 0;
+ int width = AV_CEIL_RSHIFT(w, hsub1);
+ int height = AV_CEIL_RSHIFT(h, vsub1);
p = dst[plane] + (y >> vsub1) * dst_linesize[plane];
- for (i = 0; i < (h >> vsub1); i++) {
+ for (i = 0; i < height; i++) {
memcpy(p + (x >> hsub1) * pixelstep[plane],
- src[plane] + src_linesize[plane]*(i+(y2>>vsub1)), (w >> hsub1) * pixelstep[plane]);
+ src[plane] + src_linesize[plane]*(i+(y2>>vsub1)), width * pixelstep[plane]);
+ p += dst_linesize[plane];
+ }
+ }
+}
+
+int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);
+ const AVComponentDescriptor *c;
+ unsigned i, nb_planes = 0;
+ int pixelstep[MAX_PLANES] = { 0 };
+
+ if (!desc || !desc->name)
+ return AVERROR(EINVAL);
+ if (desc->flags & ~(AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_PSEUDOPAL | AV_PIX_FMT_FLAG_ALPHA))
+ return AVERROR(ENOSYS);
+ if (format == AV_PIX_FMT_P010LE || format == AV_PIX_FMT_P010BE)
+ return AVERROR(ENOSYS);
+ for (i = 0; i < desc->nb_components; i++) {
+ c = &desc->comp[i];
+ /* for now, only 8-16 bits formats */
+ if (c->depth < 8 || c->depth > 16)
+ return AVERROR(ENOSYS);
+ if (desc->flags & AV_PIX_FMT_FLAG_BE)
+ return AVERROR(ENOSYS);
+ if (c->plane >= MAX_PLANES)
+ return AVERROR(ENOSYS);
+ /* strange interleaving */
+ if (pixelstep[c->plane] != 0 &&
+ pixelstep[c->plane] != c->step)
+ return AVERROR(ENOSYS);
+ if (pixelstep[c->plane] == 6 &&
+ c->depth == 16)
+ return AVERROR(ENOSYS);
+ pixelstep[c->plane] = c->step;
+ if (pixelstep[c->plane] >= 8)
+ return AVERROR(ENOSYS);
+ nb_planes = FFMAX(nb_planes, c->plane + 1);
+ }
+ memset(draw, 0, sizeof(*draw));
+ draw->desc = desc;
+ draw->format = format;
+ draw->nb_planes = nb_planes;
+ draw->flags = flags;
+ memcpy(draw->pixelstep, pixelstep, sizeof(draw->pixelstep));
+ draw->hsub[1] = draw->hsub[2] = draw->hsub_max = desc->log2_chroma_w;
+ draw->vsub[1] = draw->vsub[2] = draw->vsub_max = desc->log2_chroma_h;
+ for (i = 0; i < (desc->nb_components - !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA && !(flags & FF_DRAW_PROCESS_ALPHA))); i++)
+ draw->comp_mask[desc->comp[i].plane] |=
+ 1 << desc->comp[i].offset;
+ return 0;
+}
+
+void ff_draw_color(FFDrawContext *draw, FFDrawColor *color, const uint8_t rgba[4])
+{
+ unsigned i;
+ uint8_t rgba_map[4];
+
+ if (rgba != color->rgba)
+ memcpy(color->rgba, rgba, sizeof(color->rgba));
+ if ((draw->desc->flags & AV_PIX_FMT_FLAG_RGB) &&
+ ff_fill_rgba_map(rgba_map, draw->format) >= 0) {
+ if (draw->nb_planes == 1) {
+ for (i = 0; i < 4; i++) {
+ color->comp[0].u8[rgba_map[i]] = rgba[i];
+ if (draw->desc->comp[rgba_map[i]].depth > 8) {
+ color->comp[0].u16[rgba_map[i]] = color->comp[0].u8[rgba_map[i]] << 8;
+ }
+ }
+ } else {
+ for (i = 0; i < 4; i++) {
+ color->comp[rgba_map[i]].u8[0] = rgba[i];
+ if (draw->desc->comp[rgba_map[i]].depth > 8)
+ color->comp[rgba_map[i]].u16[0] = color->comp[rgba_map[i]].u8[0] << (draw->desc->comp[rgba_map[i]].depth - 8);
+ }
+ }
+ } else if (draw->nb_planes >= 2) {
+ /* assume YUV */
+ const AVPixFmtDescriptor *desc = draw->desc;
+ color->comp[desc->comp[0].plane].u8[desc->comp[0].offset] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]);
+ color->comp[desc->comp[1].plane].u8[desc->comp[1].offset] = RGB_TO_U_CCIR(rgba[0], rgba[1], rgba[2], 0);
+ color->comp[desc->comp[2].plane].u8[desc->comp[2].offset] = RGB_TO_V_CCIR(rgba[0], rgba[1], rgba[2], 0);
+ color->comp[3].u8[0] = rgba[3];
+#define EXPAND(compn) \
+ if (desc->comp[compn].depth > 8) \
+ color->comp[desc->comp[compn].plane].u16[desc->comp[compn].offset] = \
+ color->comp[desc->comp[compn].plane].u8[desc->comp[compn].offset] << \
+ (draw->desc->comp[compn].depth + draw->desc->comp[compn].shift - 8)
+ EXPAND(3);
+ EXPAND(2);
+ EXPAND(1);
+ EXPAND(0);
+ } else if (draw->format == AV_PIX_FMT_GRAY8 || draw->format == AV_PIX_FMT_GRAY8A) {
+ color->comp[0].u8[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]);
+ color->comp[1].u8[0] = rgba[3];
+ } else if (draw->format == AV_PIX_FMT_GRAY16LE || draw->format == AV_PIX_FMT_YA16LE) {
+ color->comp[0].u8[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]);
+ color->comp[0].u16[0] = color->comp[0].u8[0] << 8;
+ color->comp[1].u8[0] = rgba[3];
+ color->comp[1].u16[0] = color->comp[1].u8[0] << 8;
+ } else {
+ av_log(NULL, AV_LOG_WARNING,
+ "Color conversion not implemented for %s\n", draw->desc->name);
+ memset(color, 128, sizeof(*color));
+ }
+}
+
+static uint8_t *pointer_at(FFDrawContext *draw, uint8_t *data[], int linesize[],
+ int plane, int x, int y)
+{
+ return data[plane] +
+ (y >> draw->vsub[plane]) * linesize[plane] +
+ (x >> draw->hsub[plane]) * draw->pixelstep[plane];
+}
+
+void ff_copy_rectangle2(FFDrawContext *draw,
+ uint8_t *dst[], int dst_linesize[],
+ uint8_t *src[], int src_linesize[],
+ int dst_x, int dst_y, int src_x, int src_y,
+ int w, int h)
+{
+ int plane, y, wp, hp;
+ uint8_t *p, *q;
+
+ for (plane = 0; plane < draw->nb_planes; plane++) {
+ p = pointer_at(draw, src, src_linesize, plane, src_x, src_y);
+ q = pointer_at(draw, dst, dst_linesize, plane, dst_x, dst_y);
+ wp = AV_CEIL_RSHIFT(w, draw->hsub[plane]) * draw->pixelstep[plane];
+ hp = AV_CEIL_RSHIFT(h, draw->vsub[plane]);
+ for (y = 0; y < hp; y++) {
+ memcpy(q, p, wp);
+ p += src_linesize[plane];
+ q += dst_linesize[plane];
+ }
+ }
+}
+
+void ff_fill_rectangle(FFDrawContext *draw, FFDrawColor *color,
+ uint8_t *dst[], int dst_linesize[],
+ int dst_x, int dst_y, int w, int h)
+{
+ int plane, x, y, wp, hp;
+ uint8_t *p0, *p;
+ FFDrawColor color_tmp = *color;
+
+ for (plane = 0; plane < draw->nb_planes; plane++) {
+ p0 = pointer_at(draw, dst, dst_linesize, plane, dst_x, dst_y);
+ wp = AV_CEIL_RSHIFT(w, draw->hsub[plane]);
+ hp = AV_CEIL_RSHIFT(h, draw->vsub[plane]);
+ if (!hp)
+ return;
+ p = p0;
+
+ if (HAVE_BIGENDIAN && draw->desc->comp[0].depth > 8) {
+ for (x = 0; 2*x < draw->pixelstep[plane]; x++)
+ color_tmp.comp[plane].u16[x] = av_bswap16(color_tmp.comp[plane].u16[x]);
+ }
+
+ /* copy first line from color */
+ for (x = 0; x < wp; x++) {
+ memcpy(p, color_tmp.comp[plane].u8, draw->pixelstep[plane]);
+ p += draw->pixelstep[plane];
+ }
+ wp *= draw->pixelstep[plane];
+ /* copy next lines from first line */
+ p = p0 + dst_linesize[plane];
+ for (y = 1; y < hp; y++) {
+ memcpy(p, p0, wp);
p += dst_linesize[plane];
}
}
}
+
+/**
+ * Clip interval [x; x+w[ within [0; wmax[.
+ * The resulting w may be negative if the final interval is empty.
+ * dx, if not null, return the difference between in and out value of x.
+ */
+static void clip_interval(int wmax, int *x, int *w, int *dx)
+{
+ if (dx)
+ *dx = 0;
+ if (*x < 0) {
+ if (dx)
+ *dx = -*x;
+ *w += *x;
+ *x = 0;
+ }
+ if (*x + *w > wmax)
+ *w = wmax - *x;
+}
+
+/**
+ * Decompose w pixels starting at x
+ * into start + (w starting at x) + end
+ * with x and w aligned on multiples of 1<<sub.
+ */
+static void subsampling_bounds(int sub, int *x, int *w, int *start, int *end)
+{
+ int mask = (1 << sub) - 1;
+
+ *start = (-*x) & mask;
+ *x += *start;
+ *start = FFMIN(*start, *w);
+ *w -= *start;
+ *end = *w & mask;
+ *w >>= sub;
+}
+
+static int component_used(FFDrawContext *draw, int plane, int comp)
+{
+ return (draw->comp_mask[plane] >> comp) & 1;
+}
+
+/* If alpha is in the [ 0 ; 0x1010101 ] range,
+ then alpha * value is in the [ 0 ; 0xFFFFFFFF ] range,
+ and >> 24 gives a correct rounding. */
+static void blend_line(uint8_t *dst, unsigned src, unsigned alpha,
+ int dx, int w, unsigned hsub, int left, int right)
+{
+ unsigned asrc = alpha * src;
+ unsigned tau = 0x1010101 - alpha;
+ int x;
+
+ if (left) {
+ unsigned suba = (left * alpha) >> hsub;
+ *dst = (*dst * (0x1010101 - suba) + src * suba) >> 24;
+ dst += dx;
+ }
+ for (x = 0; x < w; x++) {
+ *dst = (*dst * tau + asrc) >> 24;
+ dst += dx;
+ }
+ if (right) {
+ unsigned suba = (right * alpha) >> hsub;
+ *dst = (*dst * (0x1010101 - suba) + src * suba) >> 24;
+ }
+}
+
+static void blend_line16(uint8_t *dst, unsigned src, unsigned alpha,
+ int dx, int w, unsigned hsub, int left, int right)
+{
+ unsigned asrc = alpha * src;
+ unsigned tau = 0x10001 - alpha;
+ int x;
+
+ if (left) {
+ unsigned suba = (left * alpha) >> hsub;
+ uint16_t value = AV_RL16(dst);
+ AV_WL16(dst, (value * (0x10001 - suba) + src * suba) >> 16);
+ dst += dx;
+ }
+ for (x = 0; x < w; x++) {
+ uint16_t value = AV_RL16(dst);
+ AV_WL16(dst, (value * tau + asrc) >> 16);
+ dst += dx;
+ }
+ if (right) {
+ unsigned suba = (right * alpha) >> hsub;
+ uint16_t value = AV_RL16(dst);
+ AV_WL16(dst, (value * (0x10001 - suba) + src * suba) >> 16);
+ }
+}
+
+void ff_blend_rectangle(FFDrawContext *draw, FFDrawColor *color,
+ uint8_t *dst[], int dst_linesize[],
+ int dst_w, int dst_h,
+ int x0, int y0, int w, int h)
+{
+ unsigned alpha, nb_planes, nb_comp, plane, comp;
+ int w_sub, h_sub, x_sub, y_sub, left, right, top, bottom, y;
+ uint8_t *p0, *p;
+
+ /* TODO optimize if alpha = 0xFF */
+ clip_interval(dst_w, &x0, &w, NULL);
+ clip_interval(dst_h, &y0, &h, NULL);
+ if (w <= 0 || h <= 0 || !color->rgba[3])
+ return;
+ if (draw->desc->comp[0].depth <= 8) {
+ /* 0x10203 * alpha + 2 is in the [ 2 ; 0x1010101 - 2 ] range */
+ alpha = 0x10203 * color->rgba[3] + 0x2;
+ } else {
+ /* 0x101 * alpha is in the [ 2 ; 0x1001] range */
+ alpha = 0x101 * color->rgba[3] + 0x2;
+ }
+ nb_planes = draw->nb_planes - !!(draw->desc->flags & AV_PIX_FMT_FLAG_ALPHA && !(draw->flags & FF_DRAW_PROCESS_ALPHA));
+ nb_planes += !nb_planes;
+ for (plane = 0; plane < nb_planes; plane++) {
+ nb_comp = draw->pixelstep[plane];
+ p0 = pointer_at(draw, dst, dst_linesize, plane, x0, y0);
+ w_sub = w;
+ h_sub = h;
+ x_sub = x0;
+ y_sub = y0;
+ subsampling_bounds(draw->hsub[plane], &x_sub, &w_sub, &left, &right);
+ subsampling_bounds(draw->vsub[plane], &y_sub, &h_sub, &top, &bottom);
+ for (comp = 0; comp < nb_comp; comp++) {
+ const int depth = draw->desc->comp[comp].depth;
+
+ if (!component_used(draw, plane, comp))
+ continue;
+ p = p0 + comp;
+ if (top) {
+ if (depth <= 8) {
+ blend_line(p, color->comp[plane].u8[comp], alpha >> 1,
+ draw->pixelstep[plane], w_sub,
+ draw->hsub[plane], left, right);
+ } else {
+ blend_line16(p, color->comp[plane].u16[comp], alpha >> 1,
+ draw->pixelstep[plane], w_sub,
+ draw->hsub[plane], left, right);
+ }
+ p += dst_linesize[plane];
+ }
+ if (depth <= 8) {
+ for (y = 0; y < h_sub; y++) {
+ blend_line(p, color->comp[plane].u8[comp], alpha,
+ draw->pixelstep[plane], w_sub,
+ draw->hsub[plane], left, right);
+ p += dst_linesize[plane];
+ }
+ } else {
+ for (y = 0; y < h_sub; y++) {
+ blend_line16(p, color->comp[plane].u16[comp], alpha,
+ draw->pixelstep[plane], w_sub,
+ draw->hsub[plane], left, right);
+ p += dst_linesize[plane];
+ }
+ }
+ if (bottom) {
+ if (depth <= 8) {
+ blend_line(p, color->comp[plane].u8[comp], alpha >> 1,
+ draw->pixelstep[plane], w_sub,
+ draw->hsub[plane], left, right);
+ } else {
+ blend_line16(p, color->comp[plane].u16[comp], alpha >> 1,
+ draw->pixelstep[plane], w_sub,
+ draw->hsub[plane], left, right);
+ }
+ }
+ }
+ }
+}
+
+static void blend_pixel16(uint8_t *dst, unsigned src, unsigned alpha,
+ const uint8_t *mask, int mask_linesize, int l2depth,
+ unsigned w, unsigned h, unsigned shift, unsigned xm0)
+{
+ unsigned xm, x, y, t = 0;
+ unsigned xmshf = 3 - l2depth;
+ unsigned xmmod = 7 >> l2depth;
+ unsigned mbits = (1 << (1 << l2depth)) - 1;
+ unsigned mmult = 255 / mbits;
+ uint16_t value = AV_RL16(dst);
+
+ for (y = 0; y < h; y++) {
+ xm = xm0;
+ for (x = 0; x < w; x++) {
+ t += ((mask[xm >> xmshf] >> ((~xm & xmmod) << l2depth)) & mbits)
+ * mmult;
+ xm++;
+ }
+ mask += mask_linesize;
+ }
+ alpha = (t >> shift) * alpha;
+ AV_WL16(dst, ((0x10001 - alpha) * value + alpha * src) >> 16);
+}
+
+static void blend_pixel(uint8_t *dst, unsigned src, unsigned alpha,
+ const uint8_t *mask, int mask_linesize, int l2depth,
+ unsigned w, unsigned h, unsigned shift, unsigned xm0)
+{
+ unsigned xm, x, y, t = 0;
+ unsigned xmshf = 3 - l2depth;
+ unsigned xmmod = 7 >> l2depth;
+ unsigned mbits = (1 << (1 << l2depth)) - 1;
+ unsigned mmult = 255 / mbits;
+
+ for (y = 0; y < h; y++) {
+ xm = xm0;
+ for (x = 0; x < w; x++) {
+ t += ((mask[xm >> xmshf] >> ((~xm & xmmod) << l2depth)) & mbits)
+ * mmult;
+ xm++;
+ }
+ mask += mask_linesize;
+ }
+ alpha = (t >> shift) * alpha;
+ *dst = ((0x1010101 - alpha) * *dst + alpha * src) >> 24;
+}
+
+static void blend_line_hv16(uint8_t *dst, int dst_delta,
+ unsigned src, unsigned alpha,
+ const uint8_t *mask, int mask_linesize, int l2depth, int w,
+ unsigned hsub, unsigned vsub,
+ int xm, int left, int right, int hband)
+{
+ int x;
+
+ if (left) {
+ blend_pixel16(dst, src, alpha, mask, mask_linesize, l2depth,
+ left, hband, hsub + vsub, xm);
+ dst += dst_delta;
+ xm += left;
+ }
+ for (x = 0; x < w; x++) {
+ blend_pixel16(dst, src, alpha, mask, mask_linesize, l2depth,
+ 1 << hsub, hband, hsub + vsub, xm);
+ dst += dst_delta;
+ xm += 1 << hsub;
+ }
+ if (right)
+ blend_pixel16(dst, src, alpha, mask, mask_linesize, l2depth,
+ right, hband, hsub + vsub, xm);
+}
+
+static void blend_line_hv(uint8_t *dst, int dst_delta,
+ unsigned src, unsigned alpha,
+ const uint8_t *mask, int mask_linesize, int l2depth, int w,
+ unsigned hsub, unsigned vsub,
+ int xm, int left, int right, int hband)
+{
+ int x;
+
+ if (left) {
+ blend_pixel(dst, src, alpha, mask, mask_linesize, l2depth,
+ left, hband, hsub + vsub, xm);
+ dst += dst_delta;
+ xm += left;
+ }
+ for (x = 0; x < w; x++) {
+ blend_pixel(dst, src, alpha, mask, mask_linesize, l2depth,
+ 1 << hsub, hband, hsub + vsub, xm);
+ dst += dst_delta;
+ xm += 1 << hsub;
+ }
+ if (right)
+ blend_pixel(dst, src, alpha, mask, mask_linesize, l2depth,
+ right, hband, hsub + vsub, xm);
+}
+
+void ff_blend_mask(FFDrawContext *draw, FFDrawColor *color,
+ uint8_t *dst[], int dst_linesize[], int dst_w, int dst_h,
+ const uint8_t *mask, int mask_linesize, int mask_w, int mask_h,
+ int l2depth, unsigned endianness, int x0, int y0)
+{
+ unsigned alpha, nb_planes, nb_comp, plane, comp;
+ int xm0, ym0, w_sub, h_sub, x_sub, y_sub, left, right, top, bottom, y;
+ uint8_t *p0, *p;
+ const uint8_t *m;
+
+ clip_interval(dst_w, &x0, &mask_w, &xm0);
+ clip_interval(dst_h, &y0, &mask_h, &ym0);
+ mask += ym0 * mask_linesize;
+ if (mask_w <= 0 || mask_h <= 0 || !color->rgba[3])
+ return;
+ if (draw->desc->comp[0].depth <= 8) {
+ /* alpha is in the [ 0 ; 0x10203 ] range,
+ alpha * mask is in the [ 0 ; 0x1010101 - 4 ] range */
+ alpha = (0x10307 * color->rgba[3] + 0x3) >> 8;
+ } else {
+ alpha = (0x101 * color->rgba[3] + 0x2) >> 8;
+ }
+ nb_planes = draw->nb_planes - !!(draw->desc->flags & AV_PIX_FMT_FLAG_ALPHA && !(draw->flags & FF_DRAW_PROCESS_ALPHA));
+ nb_planes += !nb_planes;
+ for (plane = 0; plane < nb_planes; plane++) {
+ nb_comp = draw->pixelstep[plane];
+ p0 = pointer_at(draw, dst, dst_linesize, plane, x0, y0);
+ w_sub = mask_w;
+ h_sub = mask_h;
+ x_sub = x0;
+ y_sub = y0;
+ subsampling_bounds(draw->hsub[plane], &x_sub, &w_sub, &left, &right);
+ subsampling_bounds(draw->vsub[plane], &y_sub, &h_sub, &top, &bottom);
+ for (comp = 0; comp < nb_comp; comp++) {
+ const int depth = draw->desc->comp[comp].depth;
+
+ if (!component_used(draw, plane, comp))
+ continue;
+ p = p0 + comp;
+ m = mask;
+ if (top) {
+ if (depth <= 8) {
+ blend_line_hv(p, draw->pixelstep[plane],
+ color->comp[plane].u8[comp], alpha,
+ m, mask_linesize, l2depth, w_sub,
+ draw->hsub[plane], draw->vsub[plane],
+ xm0, left, right, top);
+ } else {
+ blend_line_hv16(p, draw->pixelstep[plane],
+ color->comp[plane].u16[comp], alpha,
+ m, mask_linesize, l2depth, w_sub,
+ draw->hsub[plane], draw->vsub[plane],
+ xm0, left, right, top);
+ }
+ p += dst_linesize[plane];
+ m += top * mask_linesize;
+ }
+ if (depth <= 8) {
+ for (y = 0; y < h_sub; y++) {
+ blend_line_hv(p, draw->pixelstep[plane],
+ color->comp[plane].u8[comp], alpha,
+ m, mask_linesize, l2depth, w_sub,
+ draw->hsub[plane], draw->vsub[plane],
+ xm0, left, right, 1 << draw->vsub[plane]);
+ p += dst_linesize[plane];
+ m += mask_linesize << draw->vsub[plane];
+ }
+ } else {
+ for (y = 0; y < h_sub; y++) {
+ blend_line_hv16(p, draw->pixelstep[plane],
+ color->comp[plane].u16[comp], alpha,
+ m, mask_linesize, l2depth, w_sub,
+ draw->hsub[plane], draw->vsub[plane],
+ xm0, left, right, 1 << draw->vsub[plane]);
+ p += dst_linesize[plane];
+ m += mask_linesize << draw->vsub[plane];
+ }
+ }
+ if (bottom) {
+ if (depth <= 8) {
+ blend_line_hv(p, draw->pixelstep[plane],
+ color->comp[plane].u8[comp], alpha,
+ m, mask_linesize, l2depth, w_sub,
+ draw->hsub[plane], draw->vsub[plane],
+ xm0, left, right, bottom);
+ } else {
+ blend_line_hv16(p, draw->pixelstep[plane],
+ color->comp[plane].u16[comp], alpha,
+ m, mask_linesize, l2depth, w_sub,
+ draw->hsub[plane], draw->vsub[plane],
+ xm0, left, right, bottom);
+ }
+ }
+ }
+ }
+}
+
+int ff_draw_round_to_sub(FFDrawContext *draw, int sub_dir, int round_dir,
+ int value)
+{
+ unsigned shift = sub_dir ? draw->vsub_max : draw->hsub_max;
+
+ if (!shift)
+ return value;
+ if (round_dir >= 0)
+ value += round_dir ? (1 << shift) - 1 : 1 << (shift - 1);
+ return (value >> shift) << shift;
+}
+
+AVFilterFormats *ff_draw_supported_pixel_formats(unsigned flags)
+{
+ enum AVPixelFormat i;
+ FFDrawContext draw;
+ AVFilterFormats *fmts = NULL;
+ int ret;
+
+ for (i = 0; av_pix_fmt_desc_get(i); i++)
+ if (ff_draw_init(&draw, i, flags) >= 0 &&
+ (ret = ff_add_format(&fmts, i)) < 0)
+ return NULL;
+ return fmts;
+}
diff --git a/libavfilter/drawutils.h b/libavfilter/drawutils.h
index 73f482e83e..cf53635456 100644
--- a/libavfilter/drawutils.h
+++ b/libavfilter/drawutils.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -25,8 +25,11 @@
*/
#include <stdint.h>
+#include "avfilter.h"
#include "libavutil/pixfmt.h"
+int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt);
+
int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w,
uint8_t dst_color[4],
enum AVPixelFormat pix_fmt, uint8_t rgba_color[4],
@@ -40,4 +43,119 @@ void ff_copy_rectangle(uint8_t *dst[4], int dst_linesize[4],
uint8_t *src[4], int src_linesize[4], int pixelstep[4],
int hsub, int vsub, int x, int y, int y2, int w, int h);
+#define MAX_PLANES 4
+
+typedef struct FFDrawContext {
+ const struct AVPixFmtDescriptor *desc;
+ enum AVPixelFormat format;
+ unsigned nb_planes;
+ int pixelstep[MAX_PLANES]; /*< offset between pixels */
+ uint8_t comp_mask[MAX_PLANES]; /*< bitmask of used non-alpha components */
+ uint8_t hsub[MAX_PLANES]; /*< horizontal subsampling */
+ uint8_t vsub[MAX_PLANES]; /*< vertical subsampling */
+ uint8_t hsub_max;
+ uint8_t vsub_max;
+ unsigned flags;
+} FFDrawContext;
+
+typedef struct FFDrawColor {
+ uint8_t rgba[4];
+ union {
+ uint32_t u32[4];
+ uint16_t u16[8];
+ uint8_t u8[16];
+ } comp[MAX_PLANES];
+} FFDrawColor;
+
+/**
+ * Process alpha pixel component.
+ */
+#define FF_DRAW_PROCESS_ALPHA 1
+
+/**
+ * Init a draw context.
+ *
+ * Only a limited number of pixel formats are supported, if format is not
+ * supported the function will return an error.
+ * flags is combination of FF_DRAW_* flags.
+ * @return 0 for success, < 0 for error
+ */
+int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags);
+
+/**
+ * Prepare a color.
+ */
+void ff_draw_color(FFDrawContext *draw, FFDrawColor *color, const uint8_t rgba[4]);
+
+/**
+ * Copy a rectangle from an image to another.
+ *
+ * The coordinates must be as even as the subsampling requires.
+ */
+void ff_copy_rectangle2(FFDrawContext *draw,
+ uint8_t *dst[], int dst_linesize[],
+ uint8_t *src[], int src_linesize[],
+ int dst_x, int dst_y, int src_x, int src_y,
+ int w, int h);
+
+/**
+ * Fill a rectangle with an uniform color.
+ *
+ * The coordinates must be as even as the subsampling requires.
+ * The color needs to be inited with ff_draw_color.
+ */
+void ff_fill_rectangle(FFDrawContext *draw, FFDrawColor *color,
+ uint8_t *dst[], int dst_linesize[],
+ int dst_x, int dst_y, int w, int h);
+
+/**
+ * Blend a rectangle with an uniform color.
+ */
+void ff_blend_rectangle(FFDrawContext *draw, FFDrawColor *color,
+ uint8_t *dst[], int dst_linesize[],
+ int dst_w, int dst_h,
+ int x0, int y0, int w, int h);
+
+/**
+ * Blend an alpha mask with an uniform color.
+ *
+ * @param draw draw context
+ * @param color color for the overlay;
+ * @param dst destination image
+ * @param dst_linesize line stride of the destination
+ * @param dst_w width of the destination image
+ * @param dst_h height of the destination image
+ * @param mask mask
+ * @param mask_linesize line stride of the mask
+ * @param mask_w width of the mask
+ * @param mask_h height of the mask
+ * @param l2depth log2 of depth of the mask (0 for 1bpp, 3 for 8bpp)
+ * @param endianness bit order of the mask (0: MSB to the left)
+ * @param x0 horizontal position of the overlay
+ * @param y0 vertical position of the overlay
+ */
+void ff_blend_mask(FFDrawContext *draw, FFDrawColor *color,
+ uint8_t *dst[], int dst_linesize[], int dst_w, int dst_h,
+ const uint8_t *mask, int mask_linesize, int mask_w, int mask_h,
+ int l2depth, unsigned endianness, int x0, int y0);
+
+/**
+ * Round a dimension according to subsampling.
+ *
+ * @param draw draw context
+ * @param sub_dir 0 for horizontal, 1 for vertical
+ * @param round_dir 0 nearest, -1 round down, +1 round up
+ * @param value value to round
+ * @return the rounded value
+ */
+int ff_draw_round_to_sub(FFDrawContext *draw, int sub_dir, int round_dir,
+ int value);
+
+/**
+ * Return the list of pixel formats supported by the draw functions.
+ *
+ * The flags are the same as ff_draw_init, i.e., none currently.
+ */
+AVFilterFormats *ff_draw_supported_pixel_formats(unsigned flags);
+
#endif /* AVFILTER_DRAWUTILS_H */
diff --git a/libavfilter/dualinput.c b/libavfilter/dualinput.c
new file mode 100644
index 0000000000..44750973a6
--- /dev/null
+++ b/libavfilter/dualinput.c
@@ -0,0 +1,90 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "dualinput.h"
+#include "libavutil/timestamp.h"
+
+static int process_frame(FFFrameSync *fs)
+{
+ AVFilterContext *ctx = fs->parent;
+ FFDualInputContext *s = fs->opaque;
+ AVFrame *mainpic = NULL, *secondpic = NULL;
+ int ret = 0;
+
+ if ((ret = ff_framesync_get_frame(&s->fs, 0, &mainpic, 1)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 1, &secondpic, 0)) < 0) {
+ av_frame_free(&mainpic);
+ return ret;
+ }
+ av_assert0(mainpic);
+ mainpic->pts = av_rescale_q(s->fs.pts, s->fs.time_base, ctx->outputs[0]->time_base);
+ if (secondpic && !ctx->is_disabled)
+ mainpic = s->process(ctx, mainpic, secondpic);
+ ret = ff_filter_frame(ctx->outputs[0], mainpic);
+ av_assert1(ret != AVERROR(EAGAIN));
+ return ret;
+}
+
+int ff_dualinput_init(AVFilterContext *ctx, FFDualInputContext *s)
+{
+ FFFrameSyncIn *in;
+ int ret;
+
+ if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
+ return ret;
+
+ in = s->fs.in;
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+ in[0].time_base = ctx->inputs[0]->time_base;
+ in[1].time_base = ctx->inputs[1]->time_base;
+ in[0].sync = 2;
+ in[0].before = EXT_STOP;
+ in[0].after = EXT_INFINITY;
+ in[1].sync = 1;
+ in[1].before = EXT_NULL;
+ in[1].after = EXT_INFINITY;
+
+ if (s->shortest)
+ in[0].after = in[1].after = EXT_STOP;
+ if (!s->repeatlast) {
+ in[1].after = EXT_NULL;
+ in[1].sync = 0;
+ }
+ if (s->skip_initial_unpaired) {
+ in[1].before = EXT_STOP;
+ }
+
+ return ff_framesync_configure(&s->fs);
+}
+
+int ff_dualinput_filter_frame(FFDualInputContext *s,
+ AVFilterLink *inlink, AVFrame *in)
+{
+ return ff_framesync_filter_frame(&s->fs, inlink, in);
+}
+
+int ff_dualinput_request_frame(FFDualInputContext *s, AVFilterLink *outlink)
+{
+ return ff_framesync_request_frame(&s->fs, outlink);
+}
+
+void ff_dualinput_uninit(FFDualInputContext *s)
+{
+ ff_framesync_uninit(&s->fs);
+}
diff --git a/libavfilter/dualinput.h b/libavfilter/dualinput.h
new file mode 100644
index 0000000000..707b10c397
--- /dev/null
+++ b/libavfilter/dualinput.h
@@ -0,0 +1,46 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Double input streams helper for filters
+ */
+
+#ifndef AVFILTER_DUALINPUT_H
+#define AVFILTER_DUALINPUT_H
+
+#include <stdint.h>
+#include "bufferqueue.h"
+#include "framesync.h"
+#include "internal.h"
+
+typedef struct {
+ FFFrameSync fs;
+
+ AVFrame *(*process)(AVFilterContext *ctx, AVFrame *main, const AVFrame *second);
+ int shortest; ///< terminate stream when the second input terminates
+ int repeatlast; ///< repeat last second frame
+ int skip_initial_unpaired; ///< Skip initial frames that do not have a 2nd input
+} FFDualInputContext;
+
+int ff_dualinput_init(AVFilterContext *ctx, FFDualInputContext *s);
+int ff_dualinput_filter_frame(FFDualInputContext *s, AVFilterLink *inlink, AVFrame *in);
+int ff_dualinput_request_frame(FFDualInputContext *s, AVFilterLink *outlink);
+void ff_dualinput_uninit(FFDualInputContext *s);
+
+#endif /* AVFILTER_DUALINPUT_H */
diff --git a/libavfilter/ebur128.c b/libavfilter/ebur128.c
new file mode 100644
index 0000000000..e11008078d
--- /dev/null
+++ b/libavfilter/ebur128.c
@@ -0,0 +1,769 @@
+/*
+ * Copyright (c) 2011 Jan Kokemüller
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * This file is based on libebur128 which is available at
+ * https://github.com/jiixyj/libebur128/
+ *
+ * Libebur128 has the following copyright:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+*/
+
+#include "ebur128.h"
+
+#include <float.h>
+#include <limits.h>
+#include <math.h> /* You may have to define _USE_MATH_DEFINES if you use MSVC */
+
+#include "libavutil/common.h"
+#include "libavutil/mem.h"
+#include "libavutil/thread.h"
+
+#define CHECK_ERROR(condition, errorcode, goto_point) \
+ if ((condition)) { \
+ errcode = (errorcode); \
+ goto goto_point; \
+ }
+
+#define ALMOST_ZERO 0.000001
+
+#define RELATIVE_GATE (-10.0)
+#define RELATIVE_GATE_FACTOR pow(10.0, RELATIVE_GATE / 10.0)
+#define MINUS_20DB pow(10.0, -20.0 / 10.0)
+
+struct FFEBUR128StateInternal {
+ /** Filtered audio data (used as ring buffer). */
+ double *audio_data;
+ /** Size of audio_data array. */
+ size_t audio_data_frames;
+ /** Current index for audio_data. */
+ size_t audio_data_index;
+ /** How many frames are needed for a gating block. Will correspond to 400ms
+ * of audio at initialization, and 100ms after the first block (75% overlap
+ * as specified in the 2011 revision of BS1770). */
+ unsigned long needed_frames;
+ /** The channel map. Has as many elements as there are channels. */
+ int *channel_map;
+ /** How many samples fit in 100ms (rounded). */
+ unsigned long samples_in_100ms;
+ /** BS.1770 filter coefficients (nominator). */
+ double b[5];
+ /** BS.1770 filter coefficients (denominator). */
+ double a[5];
+ /** BS.1770 filter state. */
+ double v[5][5];
+ /** Histograms, used to calculate LRA. */
+ unsigned long *block_energy_histogram;
+ unsigned long *short_term_block_energy_histogram;
+ /** Keeps track of when a new short term block is needed. */
+ size_t short_term_frame_counter;
+ /** Maximum sample peak, one per channel */
+ double *sample_peak;
+ /** The maximum window duration in ms. */
+ unsigned long window;
+ /** Data pointer array for interleaved data */
+ void **data_ptrs;
+};
+
+static AVOnce histogram_init = AV_ONCE_INIT;
+static DECLARE_ALIGNED(32, double, histogram_energies)[1000];
+static DECLARE_ALIGNED(32, double, histogram_energy_boundaries)[1001];
+
+static void ebur128_init_filter(FFEBUR128State * st)
+{
+ int i, j;
+
+ double f0 = 1681.974450955533;
+ double G = 3.999843853973347;
+ double Q = 0.7071752369554196;
+
+ double K = tan(M_PI * f0 / (double) st->samplerate);
+ double Vh = pow(10.0, G / 20.0);
+ double Vb = pow(Vh, 0.4996667741545416);
+
+ double pb[3] = { 0.0, 0.0, 0.0 };
+ double pa[3] = { 1.0, 0.0, 0.0 };
+ double rb[3] = { 1.0, -2.0, 1.0 };
+ double ra[3] = { 1.0, 0.0, 0.0 };
+
+ double a0 = 1.0 + K / Q + K * K;
+ pb[0] = (Vh + Vb * K / Q + K * K) / a0;
+ pb[1] = 2.0 * (K * K - Vh) / a0;
+ pb[2] = (Vh - Vb * K / Q + K * K) / a0;
+ pa[1] = 2.0 * (K * K - 1.0) / a0;
+ pa[2] = (1.0 - K / Q + K * K) / a0;
+
+ f0 = 38.13547087602444;
+ Q = 0.5003270373238773;
+ K = tan(M_PI * f0 / (double) st->samplerate);
+
+ ra[1] = 2.0 * (K * K - 1.0) / (1.0 + K / Q + K * K);
+ ra[2] = (1.0 - K / Q + K * K) / (1.0 + K / Q + K * K);
+
+ st->d->b[0] = pb[0] * rb[0];
+ st->d->b[1] = pb[0] * rb[1] + pb[1] * rb[0];
+ st->d->b[2] = pb[0] * rb[2] + pb[1] * rb[1] + pb[2] * rb[0];
+ st->d->b[3] = pb[1] * rb[2] + pb[2] * rb[1];
+ st->d->b[4] = pb[2] * rb[2];
+
+ st->d->a[0] = pa[0] * ra[0];
+ st->d->a[1] = pa[0] * ra[1] + pa[1] * ra[0];
+ st->d->a[2] = pa[0] * ra[2] + pa[1] * ra[1] + pa[2] * ra[0];
+ st->d->a[3] = pa[1] * ra[2] + pa[2] * ra[1];
+ st->d->a[4] = pa[2] * ra[2];
+
+ for (i = 0; i < 5; ++i) {
+ for (j = 0; j < 5; ++j) {
+ st->d->v[i][j] = 0.0;
+ }
+ }
+}
+
+static int ebur128_init_channel_map(FFEBUR128State * st)
+{
+ size_t i;
+ st->d->channel_map =
+ (int *) av_malloc_array(st->channels, sizeof(int));
+ if (!st->d->channel_map)
+ return AVERROR(ENOMEM);
+ if (st->channels == 4) {
+ st->d->channel_map[0] = FF_EBUR128_LEFT;
+ st->d->channel_map[1] = FF_EBUR128_RIGHT;
+ st->d->channel_map[2] = FF_EBUR128_LEFT_SURROUND;
+ st->d->channel_map[3] = FF_EBUR128_RIGHT_SURROUND;
+ } else if (st->channels == 5) {
+ st->d->channel_map[0] = FF_EBUR128_LEFT;
+ st->d->channel_map[1] = FF_EBUR128_RIGHT;
+ st->d->channel_map[2] = FF_EBUR128_CENTER;
+ st->d->channel_map[3] = FF_EBUR128_LEFT_SURROUND;
+ st->d->channel_map[4] = FF_EBUR128_RIGHT_SURROUND;
+ } else {
+ for (i = 0; i < st->channels; ++i) {
+ switch (i) {
+ case 0:
+ st->d->channel_map[i] = FF_EBUR128_LEFT;
+ break;
+ case 1:
+ st->d->channel_map[i] = FF_EBUR128_RIGHT;
+ break;
+ case 2:
+ st->d->channel_map[i] = FF_EBUR128_CENTER;
+ break;
+ case 3:
+ st->d->channel_map[i] = FF_EBUR128_UNUSED;
+ break;
+ case 4:
+ st->d->channel_map[i] = FF_EBUR128_LEFT_SURROUND;
+ break;
+ case 5:
+ st->d->channel_map[i] = FF_EBUR128_RIGHT_SURROUND;
+ break;
+ default:
+ st->d->channel_map[i] = FF_EBUR128_UNUSED;
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static inline void init_histogram(void)
+{
+ int i;
+ /* initialize static constants */
+ histogram_energy_boundaries[0] = pow(10.0, (-70.0 + 0.691) / 10.0);
+ for (i = 0; i < 1000; ++i) {
+ histogram_energies[i] =
+ pow(10.0, ((double) i / 10.0 - 69.95 + 0.691) / 10.0);
+ }
+ for (i = 1; i < 1001; ++i) {
+ histogram_energy_boundaries[i] =
+ pow(10.0, ((double) i / 10.0 - 70.0 + 0.691) / 10.0);
+ }
+}
+
+FFEBUR128State *ff_ebur128_init(unsigned int channels,
+ unsigned long samplerate,
+ unsigned long window, int mode)
+{
+ int errcode;
+ FFEBUR128State *st;
+
+ st = (FFEBUR128State *) av_malloc(sizeof(FFEBUR128State));
+ CHECK_ERROR(!st, 0, exit)
+ st->d = (struct FFEBUR128StateInternal *)
+ av_malloc(sizeof(struct FFEBUR128StateInternal));
+ CHECK_ERROR(!st->d, 0, free_state)
+ st->channels = channels;
+ errcode = ebur128_init_channel_map(st);
+ CHECK_ERROR(errcode, 0, free_internal)
+
+ st->d->sample_peak =
+ (double *) av_mallocz_array(channels, sizeof(double));
+ CHECK_ERROR(!st->d->sample_peak, 0, free_channel_map)
+
+ st->samplerate = samplerate;
+ st->d->samples_in_100ms = (st->samplerate + 5) / 10;
+ st->mode = mode;
+ if ((mode & FF_EBUR128_MODE_S) == FF_EBUR128_MODE_S) {
+ st->d->window = FFMAX(window, 3000);
+ } else if ((mode & FF_EBUR128_MODE_M) == FF_EBUR128_MODE_M) {
+ st->d->window = FFMAX(window, 400);
+ } else {
+ goto free_sample_peak;
+ }
+ st->d->audio_data_frames = st->samplerate * st->d->window / 1000;
+ if (st->d->audio_data_frames % st->d->samples_in_100ms) {
+ /* round up to multiple of samples_in_100ms */
+ st->d->audio_data_frames = st->d->audio_data_frames
+ + st->d->samples_in_100ms
+ - (st->d->audio_data_frames % st->d->samples_in_100ms);
+ }
+ st->d->audio_data =
+ (double *) av_mallocz_array(st->d->audio_data_frames,
+ st->channels * sizeof(double));
+ CHECK_ERROR(!st->d->audio_data, 0, free_sample_peak)
+
+ ebur128_init_filter(st);
+
+ st->d->block_energy_histogram =
+ av_mallocz(1000 * sizeof(unsigned long));
+ CHECK_ERROR(!st->d->block_energy_histogram, 0, free_audio_data)
+ st->d->short_term_block_energy_histogram =
+ av_mallocz(1000 * sizeof(unsigned long));
+ CHECK_ERROR(!st->d->short_term_block_energy_histogram, 0,
+ free_block_energy_histogram)
+ st->d->short_term_frame_counter = 0;
+
+ /* the first block needs 400ms of audio data */
+ st->d->needed_frames = st->d->samples_in_100ms * 4;
+ /* start at the beginning of the buffer */
+ st->d->audio_data_index = 0;
+
+ if (ff_thread_once(&histogram_init, &init_histogram) != 0)
+ goto free_short_term_block_energy_histogram;
+
+ st->d->data_ptrs = av_malloc_array(channels, sizeof(void *));
+ CHECK_ERROR(!st->d->data_ptrs, 0,
+ free_short_term_block_energy_histogram);
+
+ return st;
+
+free_short_term_block_energy_histogram:
+ av_free(st->d->short_term_block_energy_histogram);
+free_block_energy_histogram:
+ av_free(st->d->block_energy_histogram);
+free_audio_data:
+ av_free(st->d->audio_data);
+free_sample_peak:
+ av_free(st->d->sample_peak);
+free_channel_map:
+ av_free(st->d->channel_map);
+free_internal:
+ av_free(st->d);
+free_state:
+ av_free(st);
+exit:
+ return NULL;
+}
+
+void ff_ebur128_destroy(FFEBUR128State ** st)
+{
+ av_free((*st)->d->block_energy_histogram);
+ av_free((*st)->d->short_term_block_energy_histogram);
+ av_free((*st)->d->audio_data);
+ av_free((*st)->d->channel_map);
+ av_free((*st)->d->sample_peak);
+ av_free((*st)->d->data_ptrs);
+ av_free((*st)->d);
+ av_free(*st);
+ *st = NULL;
+}
+
+#define EBUR128_FILTER(type, scaling_factor) \
+static void ebur128_filter_##type(FFEBUR128State* st, const type** srcs, \
+ size_t src_index, size_t frames, \
+ int stride) { \
+ double* audio_data = st->d->audio_data + st->d->audio_data_index; \
+ size_t i, c; \
+ \
+ if ((st->mode & FF_EBUR128_MODE_SAMPLE_PEAK) == FF_EBUR128_MODE_SAMPLE_PEAK) { \
+ for (c = 0; c < st->channels; ++c) { \
+ double max = 0.0; \
+ for (i = 0; i < frames; ++i) { \
+ type v = srcs[c][src_index + i * stride]; \
+ if (v > max) { \
+ max = v; \
+ } else if (-v > max) { \
+ max = -1.0 * v; \
+ } \
+ } \
+ max /= scaling_factor; \
+ if (max > st->d->sample_peak[c]) st->d->sample_peak[c] = max; \
+ } \
+ } \
+ for (c = 0; c < st->channels; ++c) { \
+ int ci = st->d->channel_map[c] - 1; \
+ if (ci < 0) continue; \
+ else if (ci == FF_EBUR128_DUAL_MONO - 1) ci = 0; /*dual mono */ \
+ for (i = 0; i < frames; ++i) { \
+ st->d->v[ci][0] = (double) (srcs[c][src_index + i * stride] / scaling_factor) \
+ - st->d->a[1] * st->d->v[ci][1] \
+ - st->d->a[2] * st->d->v[ci][2] \
+ - st->d->a[3] * st->d->v[ci][3] \
+ - st->d->a[4] * st->d->v[ci][4]; \
+ audio_data[i * st->channels + c] = \
+ st->d->b[0] * st->d->v[ci][0] \
+ + st->d->b[1] * st->d->v[ci][1] \
+ + st->d->b[2] * st->d->v[ci][2] \
+ + st->d->b[3] * st->d->v[ci][3] \
+ + st->d->b[4] * st->d->v[ci][4]; \
+ st->d->v[ci][4] = st->d->v[ci][3]; \
+ st->d->v[ci][3] = st->d->v[ci][2]; \
+ st->d->v[ci][2] = st->d->v[ci][1]; \
+ st->d->v[ci][1] = st->d->v[ci][0]; \
+ } \
+ st->d->v[ci][4] = fabs(st->d->v[ci][4]) < DBL_MIN ? 0.0 : st->d->v[ci][4]; \
+ st->d->v[ci][3] = fabs(st->d->v[ci][3]) < DBL_MIN ? 0.0 : st->d->v[ci][3]; \
+ st->d->v[ci][2] = fabs(st->d->v[ci][2]) < DBL_MIN ? 0.0 : st->d->v[ci][2]; \
+ st->d->v[ci][1] = fabs(st->d->v[ci][1]) < DBL_MIN ? 0.0 : st->d->v[ci][1]; \
+ } \
+}
+EBUR128_FILTER(short, -((double)SHRT_MIN))
+EBUR128_FILTER(int, -((double)INT_MIN))
+EBUR128_FILTER(float, 1.0)
+EBUR128_FILTER(double, 1.0)
+
+static double ebur128_energy_to_loudness(double energy)
+{
+ return 10 * (log(energy) / log(10.0)) - 0.691;
+}
+
+static size_t find_histogram_index(double energy)
+{
+ size_t index_min = 0;
+ size_t index_max = 1000;
+ size_t index_mid;
+
+ do {
+ index_mid = (index_min + index_max) / 2;
+ if (energy >= histogram_energy_boundaries[index_mid]) {
+ index_min = index_mid;
+ } else {
+ index_max = index_mid;
+ }
+ } while (index_max - index_min != 1);
+
+ return index_min;
+}
+
+static void ebur128_calc_gating_block(FFEBUR128State * st,
+ size_t frames_per_block,
+ double *optional_output)
+{
+ size_t i, c;
+ double sum = 0.0;
+ double channel_sum;
+ for (c = 0; c < st->channels; ++c) {
+ if (st->d->channel_map[c] == FF_EBUR128_UNUSED)
+ continue;
+ channel_sum = 0.0;
+ if (st->d->audio_data_index < frames_per_block * st->channels) {
+ for (i = 0; i < st->d->audio_data_index / st->channels; ++i) {
+ channel_sum += st->d->audio_data[i * st->channels + c] *
+ st->d->audio_data[i * st->channels + c];
+ }
+ for (i = st->d->audio_data_frames -
+ (frames_per_block -
+ st->d->audio_data_index / st->channels);
+ i < st->d->audio_data_frames; ++i) {
+ channel_sum += st->d->audio_data[i * st->channels + c] *
+ st->d->audio_data[i * st->channels + c];
+ }
+ } else {
+ for (i =
+ st->d->audio_data_index / st->channels - frames_per_block;
+ i < st->d->audio_data_index / st->channels; ++i) {
+ channel_sum +=
+ st->d->audio_data[i * st->channels +
+ c] * st->d->audio_data[i *
+ st->channels +
+ c];
+ }
+ }
+ if (st->d->channel_map[c] == FF_EBUR128_Mp110 ||
+ st->d->channel_map[c] == FF_EBUR128_Mm110 ||
+ st->d->channel_map[c] == FF_EBUR128_Mp060 ||
+ st->d->channel_map[c] == FF_EBUR128_Mm060 ||
+ st->d->channel_map[c] == FF_EBUR128_Mp090 ||
+ st->d->channel_map[c] == FF_EBUR128_Mm090) {
+ channel_sum *= 1.41;
+ } else if (st->d->channel_map[c] == FF_EBUR128_DUAL_MONO) {
+ channel_sum *= 2.0;
+ }
+ sum += channel_sum;
+ }
+ sum /= (double) frames_per_block;
+ if (optional_output) {
+ *optional_output = sum;
+ } else if (sum >= histogram_energy_boundaries[0]) {
+ ++st->d->block_energy_histogram[find_histogram_index(sum)];
+ }
+}
+
+int ff_ebur128_set_channel(FFEBUR128State * st,
+ unsigned int channel_number, int value)
+{
+ if (channel_number >= st->channels) {
+ return 1;
+ }
+ if (value == FF_EBUR128_DUAL_MONO &&
+ (st->channels != 1 || channel_number != 0)) {
+ return 1;
+ }
+ st->d->channel_map[channel_number] = value;
+ return 0;
+}
+
+static int ebur128_energy_shortterm(FFEBUR128State * st, double *out);
+#define FF_EBUR128_ADD_FRAMES_PLANAR(type) \
+void ff_ebur128_add_frames_planar_##type(FFEBUR128State* st, const type** srcs, \
+ size_t frames, int stride) { \
+ size_t src_index = 0; \
+ while (frames > 0) { \
+ if (frames >= st->d->needed_frames) { \
+ ebur128_filter_##type(st, srcs, src_index, st->d->needed_frames, stride); \
+ src_index += st->d->needed_frames * stride; \
+ frames -= st->d->needed_frames; \
+ st->d->audio_data_index += st->d->needed_frames * st->channels; \
+ /* calculate the new gating block */ \
+ if ((st->mode & FF_EBUR128_MODE_I) == FF_EBUR128_MODE_I) { \
+ ebur128_calc_gating_block(st, st->d->samples_in_100ms * 4, NULL); \
+ } \
+ if ((st->mode & FF_EBUR128_MODE_LRA) == FF_EBUR128_MODE_LRA) { \
+ st->d->short_term_frame_counter += st->d->needed_frames; \
+ if (st->d->short_term_frame_counter == st->d->samples_in_100ms * 30) { \
+ double st_energy; \
+ ebur128_energy_shortterm(st, &st_energy); \
+ if (st_energy >= histogram_energy_boundaries[0]) { \
+ ++st->d->short_term_block_energy_histogram[ \
+ find_histogram_index(st_energy)]; \
+ } \
+ st->d->short_term_frame_counter = st->d->samples_in_100ms * 20; \
+ } \
+ } \
+ /* 100ms are needed for all blocks besides the first one */ \
+ st->d->needed_frames = st->d->samples_in_100ms; \
+ /* reset audio_data_index when buffer full */ \
+ if (st->d->audio_data_index == st->d->audio_data_frames * st->channels) { \
+ st->d->audio_data_index = 0; \
+ } \
+ } else { \
+ ebur128_filter_##type(st, srcs, src_index, frames, stride); \
+ st->d->audio_data_index += frames * st->channels; \
+ if ((st->mode & FF_EBUR128_MODE_LRA) == FF_EBUR128_MODE_LRA) { \
+ st->d->short_term_frame_counter += frames; \
+ } \
+ st->d->needed_frames -= frames; \
+ frames = 0; \
+ } \
+ } \
+}
+FF_EBUR128_ADD_FRAMES_PLANAR(short)
+FF_EBUR128_ADD_FRAMES_PLANAR(int)
+FF_EBUR128_ADD_FRAMES_PLANAR(float)
+FF_EBUR128_ADD_FRAMES_PLANAR(double)
+#define FF_EBUR128_ADD_FRAMES(type) \
+void ff_ebur128_add_frames_##type(FFEBUR128State* st, const type* src, \
+ size_t frames) { \
+ int i; \
+ const type **buf = (const type**)st->d->data_ptrs; \
+ for (i = 0; i < st->channels; i++) \
+ buf[i] = src + i; \
+ ff_ebur128_add_frames_planar_##type(st, buf, frames, st->channels); \
+}
+FF_EBUR128_ADD_FRAMES(short)
+FF_EBUR128_ADD_FRAMES(int)
+FF_EBUR128_ADD_FRAMES(float)
+FF_EBUR128_ADD_FRAMES(double)
+
+static int ebur128_calc_relative_threshold(FFEBUR128State **sts, size_t size,
+ double *relative_threshold)
+{
+ size_t i, j;
+ int above_thresh_counter = 0;
+ *relative_threshold = 0.0;
+
+ for (i = 0; i < size; i++) {
+ unsigned long *block_energy_histogram = sts[i]->d->block_energy_histogram;
+ for (j = 0; j < 1000; ++j) {
+ *relative_threshold += block_energy_histogram[j] * histogram_energies[j];
+ above_thresh_counter += block_energy_histogram[j];
+ }
+ }
+
+ if (above_thresh_counter != 0) {
+ *relative_threshold /= (double)above_thresh_counter;
+ *relative_threshold *= RELATIVE_GATE_FACTOR;
+ }
+
+ return above_thresh_counter;
+}
+
+static int ebur128_gated_loudness(FFEBUR128State ** sts, size_t size,
+ double *out)
+{
+ double gated_loudness = 0.0;
+ double relative_threshold;
+ size_t above_thresh_counter;
+ size_t i, j, start_index;
+
+ for (i = 0; i < size; i++)
+ if ((sts[i]->mode & FF_EBUR128_MODE_I) != FF_EBUR128_MODE_I)
+ return AVERROR(EINVAL);
+
+ if (!ebur128_calc_relative_threshold(sts, size, &relative_threshold)) {
+ *out = -HUGE_VAL;
+ return 0;
+ }
+
+ above_thresh_counter = 0;
+ if (relative_threshold < histogram_energy_boundaries[0]) {
+ start_index = 0;
+ } else {
+ start_index = find_histogram_index(relative_threshold);
+ if (relative_threshold > histogram_energies[start_index]) {
+ ++start_index;
+ }
+ }
+ for (i = 0; i < size; i++) {
+ for (j = start_index; j < 1000; ++j) {
+ gated_loudness += sts[i]->d->block_energy_histogram[j] *
+ histogram_energies[j];
+ above_thresh_counter += sts[i]->d->block_energy_histogram[j];
+ }
+ }
+ if (!above_thresh_counter) {
+ *out = -HUGE_VAL;
+ return 0;
+ }
+ gated_loudness /= (double) above_thresh_counter;
+ *out = ebur128_energy_to_loudness(gated_loudness);
+ return 0;
+}
+
+int ff_ebur128_relative_threshold(FFEBUR128State * st, double *out)
+{
+ double relative_threshold;
+
+ if ((st->mode & FF_EBUR128_MODE_I) != FF_EBUR128_MODE_I)
+ return AVERROR(EINVAL);
+
+ if (!ebur128_calc_relative_threshold(&st, 1, &relative_threshold)) {
+ *out = -70.0;
+ return 0;
+ }
+
+ *out = ebur128_energy_to_loudness(relative_threshold);
+ return 0;
+}
+
+int ff_ebur128_loudness_global(FFEBUR128State * st, double *out)
+{
+ return ebur128_gated_loudness(&st, 1, out);
+}
+
+int ff_ebur128_loudness_global_multiple(FFEBUR128State ** sts, size_t size,
+ double *out)
+{
+ return ebur128_gated_loudness(sts, size, out);
+}
+
+static int ebur128_energy_in_interval(FFEBUR128State * st,
+ size_t interval_frames, double *out)
+{
+ if (interval_frames > st->d->audio_data_frames) {
+ return AVERROR(EINVAL);
+ }
+ ebur128_calc_gating_block(st, interval_frames, out);
+ return 0;
+}
+
+static int ebur128_energy_shortterm(FFEBUR128State * st, double *out)
+{
+ return ebur128_energy_in_interval(st, st->d->samples_in_100ms * 30,
+ out);
+}
+
+int ff_ebur128_loudness_momentary(FFEBUR128State * st, double *out)
+{
+ double energy;
+ int error = ebur128_energy_in_interval(st, st->d->samples_in_100ms * 4,
+ &energy);
+ if (error) {
+ return error;
+ } else if (energy <= 0.0) {
+ *out = -HUGE_VAL;
+ return 0;
+ }
+ *out = ebur128_energy_to_loudness(energy);
+ return 0;
+}
+
+int ff_ebur128_loudness_shortterm(FFEBUR128State * st, double *out)
+{
+ double energy;
+ int error = ebur128_energy_shortterm(st, &energy);
+ if (error) {
+ return error;
+ } else if (energy <= 0.0) {
+ *out = -HUGE_VAL;
+ return 0;
+ }
+ *out = ebur128_energy_to_loudness(energy);
+ return 0;
+}
+
+int ff_ebur128_loudness_window(FFEBUR128State * st,
+ unsigned long window, double *out)
+{
+ double energy;
+ size_t interval_frames = st->samplerate * window / 1000;
+ int error = ebur128_energy_in_interval(st, interval_frames, &energy);
+ if (error) {
+ return error;
+ } else if (energy <= 0.0) {
+ *out = -HUGE_VAL;
+ return 0;
+ }
+ *out = ebur128_energy_to_loudness(energy);
+ return 0;
+}
+
+/* EBU - TECH 3342 */
+int ff_ebur128_loudness_range_multiple(FFEBUR128State ** sts, size_t size,
+ double *out)
+{
+ size_t i, j;
+ size_t stl_size;
+ double stl_power, stl_integrated;
+ /* High and low percentile energy */
+ double h_en, l_en;
+ unsigned long hist[1000] = { 0 };
+ size_t percentile_low, percentile_high;
+ size_t index;
+
+ for (i = 0; i < size; ++i) {
+ if (sts[i]) {
+ if ((sts[i]->mode & FF_EBUR128_MODE_LRA) !=
+ FF_EBUR128_MODE_LRA) {
+ return AVERROR(EINVAL);
+ }
+ }
+ }
+
+ stl_size = 0;
+ stl_power = 0.0;
+ for (i = 0; i < size; ++i) {
+ if (!sts[i])
+ continue;
+ for (j = 0; j < 1000; ++j) {
+ hist[j] += sts[i]->d->short_term_block_energy_histogram[j];
+ stl_size += sts[i]->d->short_term_block_energy_histogram[j];
+ stl_power += sts[i]->d->short_term_block_energy_histogram[j]
+ * histogram_energies[j];
+ }
+ }
+ if (!stl_size) {
+ *out = 0.0;
+ return 0;
+ }
+
+ stl_power /= stl_size;
+ stl_integrated = MINUS_20DB * stl_power;
+
+ if (stl_integrated < histogram_energy_boundaries[0]) {
+ index = 0;
+ } else {
+ index = find_histogram_index(stl_integrated);
+ if (stl_integrated > histogram_energies[index]) {
+ ++index;
+ }
+ }
+ stl_size = 0;
+ for (j = index; j < 1000; ++j) {
+ stl_size += hist[j];
+ }
+ if (!stl_size) {
+ *out = 0.0;
+ return 0;
+ }
+
+ percentile_low = (size_t) ((stl_size - 1) * 0.1 + 0.5);
+ percentile_high = (size_t) ((stl_size - 1) * 0.95 + 0.5);
+
+ stl_size = 0;
+ j = index;
+ while (stl_size <= percentile_low) {
+ stl_size += hist[j++];
+ }
+ l_en = histogram_energies[j - 1];
+ while (stl_size <= percentile_high) {
+ stl_size += hist[j++];
+ }
+ h_en = histogram_energies[j - 1];
+ *out =
+ ebur128_energy_to_loudness(h_en) -
+ ebur128_energy_to_loudness(l_en);
+ return 0;
+}
+
+int ff_ebur128_loudness_range(FFEBUR128State * st, double *out)
+{
+ return ff_ebur128_loudness_range_multiple(&st, 1, out);
+}
+
+int ff_ebur128_sample_peak(FFEBUR128State * st,
+ unsigned int channel_number, double *out)
+{
+ if ((st->mode & FF_EBUR128_MODE_SAMPLE_PEAK) !=
+ FF_EBUR128_MODE_SAMPLE_PEAK) {
+ return AVERROR(EINVAL);
+ } else if (channel_number >= st->channels) {
+ return AVERROR(EINVAL);
+ }
+ *out = st->d->sample_peak[channel_number];
+ return 0;
+}
diff --git a/libavfilter/ebur128.h b/libavfilter/ebur128.h
new file mode 100644
index 0000000000..b94cd24928
--- /dev/null
+++ b/libavfilter/ebur128.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2011 Jan Kokemüller
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * This file is based on libebur128 which is available at
+ * https://github.com/jiixyj/libebur128/
+ *
+*/
+
+#ifndef AVFILTER_EBUR128_H
+#define AVFILTER_EBUR128_H
+
+/** \file ebur128.h
+ * \brief libebur128 - a library for loudness measurement according to
+ * the EBU R128 standard.
+ */
+
+#include <stddef.h> /* for size_t */
+
+/** \enum channel
+ * Use these values when setting the channel map with ebur128_set_channel().
+ * See definitions in ITU R-REC-BS 1770-4
+ */
+enum channel {
+ FF_EBUR128_UNUSED = 0, /**< unused channel (for example LFE channel) */
+ FF_EBUR128_LEFT,
+ FF_EBUR128_Mp030 = 1, /**< itu M+030 */
+ FF_EBUR128_RIGHT,
+ FF_EBUR128_Mm030 = 2, /**< itu M-030 */
+ FF_EBUR128_CENTER,
+ FF_EBUR128_Mp000 = 3, /**< itu M+000 */
+ FF_EBUR128_LEFT_SURROUND,
+ FF_EBUR128_Mp110 = 4, /**< itu M+110 */
+ FF_EBUR128_RIGHT_SURROUND,
+ FF_EBUR128_Mm110 = 5, /**< itu M-110 */
+ FF_EBUR128_DUAL_MONO, /**< a channel that is counted twice */
+ FF_EBUR128_MpSC, /**< itu M+SC */
+ FF_EBUR128_MmSC, /**< itu M-SC */
+ FF_EBUR128_Mp060, /**< itu M+060 */
+ FF_EBUR128_Mm060, /**< itu M-060 */
+ FF_EBUR128_Mp090, /**< itu M+090 */
+ FF_EBUR128_Mm090, /**< itu M-090 */
+ FF_EBUR128_Mp135, /**< itu M+135 */
+ FF_EBUR128_Mm135, /**< itu M-135 */
+ FF_EBUR128_Mp180, /**< itu M+180 */
+ FF_EBUR128_Up000, /**< itu U+000 */
+ FF_EBUR128_Up030, /**< itu U+030 */
+ FF_EBUR128_Um030, /**< itu U-030 */
+ FF_EBUR128_Up045, /**< itu U+045 */
+ FF_EBUR128_Um045, /**< itu U-030 */
+ FF_EBUR128_Up090, /**< itu U+090 */
+ FF_EBUR128_Um090, /**< itu U-090 */
+ FF_EBUR128_Up110, /**< itu U+110 */
+ FF_EBUR128_Um110, /**< itu U-110 */
+ FF_EBUR128_Up135, /**< itu U+135 */
+ FF_EBUR128_Um135, /**< itu U-135 */
+ FF_EBUR128_Up180, /**< itu U+180 */
+ FF_EBUR128_Tp000, /**< itu T+000 */
+ FF_EBUR128_Bp000, /**< itu B+000 */
+ FF_EBUR128_Bp045, /**< itu B+045 */
+ FF_EBUR128_Bm045 /**< itu B-045 */
+};
+
+/** \enum mode
+ * Use these values in ebur128_init (or'ed). Try to use the lowest possible
+ * modes that suit your needs, as performance will be better.
+ */
+enum mode {
+ /** can call ff_ebur128_loudness_momentary */
+ FF_EBUR128_MODE_M = (1 << 0),
+ /** can call ff_ebur128_loudness_shortterm */
+ FF_EBUR128_MODE_S = (1 << 1) | FF_EBUR128_MODE_M,
+ /** can call ff_ebur128_loudness_global_* and ff_ebur128_relative_threshold */
+ FF_EBUR128_MODE_I = (1 << 2) | FF_EBUR128_MODE_M,
+ /** can call ff_ebur128_loudness_range */
+ FF_EBUR128_MODE_LRA = (1 << 3) | FF_EBUR128_MODE_S,
+ /** can call ff_ebur128_sample_peak */
+ FF_EBUR128_MODE_SAMPLE_PEAK = (1 << 4) | FF_EBUR128_MODE_M,
+};
+
+/** forward declaration of FFEBUR128StateInternal */
+struct FFEBUR128StateInternal;
+
+/** \brief Contains information about the state of a loudness measurement.
+ *
+ * You should not need to modify this struct directly.
+ */
+typedef struct {
+ int mode; /**< The current mode. */
+ unsigned int channels; /**< The number of channels. */
+ unsigned long samplerate; /**< The sample rate. */
+ struct FFEBUR128StateInternal *d; /**< Internal state. */
+} FFEBUR128State;
+
+/** \brief Initialize library state.
+ *
+ * @param channels the number of channels.
+ * @param samplerate the sample rate.
+ * @param window set the maximum window size in ms, set to 0 for auto.
+ * @param mode see the mode enum for possible values.
+ * @return an initialized library state.
+ */
+FFEBUR128State *ff_ebur128_init(unsigned int channels,
+ unsigned long samplerate,
+ unsigned long window, int mode);
+
+/** \brief Destroy library state.
+ *
+ * @param st pointer to a library state.
+ */
+void ff_ebur128_destroy(FFEBUR128State ** st);
+
+/** \brief Set channel type.
+ *
+ * The default is:
+ * - 0 -> FF_EBUR128_LEFT
+ * - 1 -> FF_EBUR128_RIGHT
+ * - 2 -> FF_EBUR128_CENTER
+ * - 3 -> FF_EBUR128_UNUSED
+ * - 4 -> FF_EBUR128_LEFT_SURROUND
+ * - 5 -> FF_EBUR128_RIGHT_SURROUND
+ *
+ * @param st library state.
+ * @param channel_number zero based channel index.
+ * @param value channel type from the "channel" enum.
+ * @return
+ * - 0 on success.
+ * - AVERROR(EINVAL) if invalid channel index.
+ */
+int ff_ebur128_set_channel(FFEBUR128State * st,
+ unsigned int channel_number, int value);
+
+/** \brief Add frames to be processed.
+ *
+ * @param st library state.
+ * @param src array of source frames. Channels must be interleaved.
+ * @param frames number of frames. Not number of samples!
+ */
+void ff_ebur128_add_frames_short(FFEBUR128State * st,
+ const short *src, size_t frames);
+/** \brief See \ref ebur128_add_frames_short */
+void ff_ebur128_add_frames_int(FFEBUR128State * st,
+ const int *src, size_t frames);
+/** \brief See \ref ebur128_add_frames_short */
+void ff_ebur128_add_frames_float(FFEBUR128State * st,
+ const float *src, size_t frames);
+/** \brief See \ref ebur128_add_frames_short */
+void ff_ebur128_add_frames_double(FFEBUR128State * st,
+ const double *src, size_t frames);
+
+/** \brief Add frames to be processed.
+ *
+ * @param st library state.
+ * @param srcs array of source frame channel data pointers
+ * @param frames number of frames. Not number of samples!
+ * @param stride number of samples to skip to for the next sample of the same channel
+ */
+void ff_ebur128_add_frames_planar_short(FFEBUR128State * st,
+ const short **srcs,
+ size_t frames, int stride);
+/** \brief See \ref ebur128_add_frames_planar_short */
+void ff_ebur128_add_frames_planar_int(FFEBUR128State * st,
+ const int **srcs,
+ size_t frames, int stride);
+/** \brief See \ref ebur128_add_frames_planar_short */
+void ff_ebur128_add_frames_planar_float(FFEBUR128State * st,
+ const float **srcs,
+ size_t frames, int stride);
+/** \brief See \ref ebur128_add_frames_planar_short */
+void ff_ebur128_add_frames_planar_double(FFEBUR128State * st,
+ const double **srcs,
+ size_t frames, int stride);
+
+/** \brief Get global integrated loudness in LUFS.
+ *
+ * @param st library state.
+ * @param out integrated loudness in LUFS. -HUGE_VAL if result is negative
+ * infinity.
+ * @return
+ * - 0 on success.
+ * - AVERROR(EINVAL) if mode "FF_EBUR128_MODE_I" has not been set.
+ */
+int ff_ebur128_loudness_global(FFEBUR128State * st, double *out);
+/** \brief Get global integrated loudness in LUFS across multiple instances.
+ *
+ * @param sts array of library states.
+ * @param size length of sts
+ * @param out integrated loudness in LUFS. -HUGE_VAL if result is negative
+ * infinity.
+ * @return
+ * - 0 on success.
+ * - AVERROR(EINVAL) if mode "FF_EBUR128_MODE_I" has not been set.
+ */
+int ff_ebur128_loudness_global_multiple(FFEBUR128State ** sts,
+ size_t size, double *out);
+
+/** \brief Get momentary loudness (last 400ms) in LUFS.
+ *
+ * @param st library state.
+ * @param out momentary loudness in LUFS. -HUGE_VAL if result is negative
+ * infinity.
+ * @return
+ * - 0 on success.
+ */
+int ff_ebur128_loudness_momentary(FFEBUR128State * st, double *out);
+/** \brief Get short-term loudness (last 3s) in LUFS.
+ *
+ * @param st library state.
+ * @param out short-term loudness in LUFS. -HUGE_VAL if result is negative
+ * infinity.
+ * @return
+ * - 0 on success.
+ * - AVERROR(EINVAL) if mode "FF_EBUR128_MODE_S" has not been set.
+ */
+int ff_ebur128_loudness_shortterm(FFEBUR128State * st, double *out);
+
+/** \brief Get loudness of the specified window in LUFS.
+ *
+ * window must not be larger than the current window set in st.
+ *
+ * @param st library state.
+ * @param window window in ms to calculate loudness.
+ * @param out loudness in LUFS. -HUGE_VAL if result is negative infinity.
+ * @return
+ * - 0 on success.
+ * - AVERROR(EINVAL) if window larger than current window in st.
+ */
+int ff_ebur128_loudness_window(FFEBUR128State * st,
+ unsigned long window, double *out);
+
+/** \brief Get loudness range (LRA) of programme in LU.
+ *
+ * Calculates loudness range according to EBU 3342.
+ *
+ * @param st library state.
+ * @param out loudness range (LRA) in LU. Will not be changed in case of
+ * error. AVERROR(EINVAL) will be returned in this case.
+ * @return
+ * - 0 on success.
+ * - AVERROR(EINVAL) if mode "FF_EBUR128_MODE_LRA" has not been set.
+ */
+int ff_ebur128_loudness_range(FFEBUR128State * st, double *out);
+/** \brief Get loudness range (LRA) in LU across multiple instances.
+ *
+ * Calculates loudness range according to EBU 3342.
+ *
+ * @param sts array of library states.
+ * @param size length of sts
+ * @param out loudness range (LRA) in LU. Will not be changed in case of
+ * error. AVERROR(EINVAL) will be returned in this case.
+ * @return
+ * - 0 on success.
+ * - AVERROR(EINVAL) if mode "FF_EBUR128_MODE_LRA" has not been set.
+ */
+int ff_ebur128_loudness_range_multiple(FFEBUR128State ** sts,
+ size_t size, double *out);
+
+/** \brief Get maximum sample peak of selected channel in float format.
+ *
+ * @param st library state
+ * @param channel_number channel to analyse
+ * @param out maximum sample peak in float format (1.0 is 0 dBFS)
+ * @return
+ * - 0 on success.
+ * - AVERROR(EINVAL) if mode "FF_EBUR128_MODE_SAMPLE_PEAK" has not been set.
+ * - AVERROR(EINVAL) if invalid channel index.
+ */
+int ff_ebur128_sample_peak(FFEBUR128State * st,
+ unsigned int channel_number, double *out);
+
+/** \brief Get relative threshold in LUFS.
+ *
+ * @param st library state
+ * @param out relative threshold in LUFS.
+ * @return
+ * - 0 on success.
+ * - AVERROR(EINVAL) if mode "FF_EBUR128_MODE_I" has not been set.
+ */
+int ff_ebur128_relative_threshold(FFEBUR128State * st, double *out);
+
+#endif /* AVFILTER_EBUR128_H */
diff --git a/libavfilter/f_bench.c b/libavfilter/f_bench.c
new file mode 100644
index 0000000000..b7b179209a
--- /dev/null
+++ b/libavfilter/f_bench.c
@@ -0,0 +1,151 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/time.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+enum BenchAction {
+ ACTION_START,
+ ACTION_STOP,
+ NB_ACTION
+};
+
+typedef struct {
+ const AVClass *class;
+ int action;
+ int64_t max, min;
+ int64_t sum;
+ int n;
+} BenchContext;
+
+#define OFFSET(x) offsetof(BenchContext, x)
+#define DEFINE_OPTIONS(filt_name, FLAGS) \
+static const AVOption filt_name##_options[] = { \
+ { "action", "set action", OFFSET(action), AV_OPT_TYPE_INT, {.i64=ACTION_START}, 0, NB_ACTION-1, FLAGS, "action" }, \
+ { "start", "start timer", 0, AV_OPT_TYPE_CONST, {.i64=ACTION_START}, INT_MIN, INT_MAX, FLAGS, "action" }, \
+ { "stop", "stop timer", 0, AV_OPT_TYPE_CONST, {.i64=ACTION_STOP}, INT_MIN, INT_MAX, FLAGS, "action" }, \
+ { NULL } \
+}
+
+#define START_TIME_KEY "lavfi.bench.start_time"
+#define T2F(v) ((v) / 1000000.)
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ BenchContext *s = ctx->priv;
+ s->min = INT64_MAX;
+ s->max = INT64_MIN;
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ BenchContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ const int64_t t = av_gettime();
+
+ if (t < 0)
+ return ff_filter_frame(outlink, in);
+
+ if (s->action == ACTION_START) {
+ av_dict_set_int(&in->metadata, START_TIME_KEY, t, 0);
+ } else if (s->action == ACTION_STOP) {
+ AVDictionaryEntry *e = av_dict_get(in->metadata, START_TIME_KEY, NULL, 0);
+ if (e) {
+ const int64_t start = strtoll(e->value, NULL, 0);
+ const int64_t diff = t - start;
+ s->sum += diff;
+ s->n++;
+ s->min = FFMIN(s->min, diff);
+ s->max = FFMAX(s->max, diff);
+ av_log(s, AV_LOG_INFO, "t:%f avg:%f max:%f min:%f\n",
+ T2F(diff), T2F(s->sum / s->n), T2F(s->max), T2F(s->min));
+ }
+ av_dict_set(&in->metadata, START_TIME_KEY, NULL, 0);
+ }
+
+ return ff_filter_frame(outlink, in);
+}
+
+#if CONFIG_BENCH_FILTER
+DEFINE_OPTIONS(bench, AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM);
+AVFILTER_DEFINE_CLASS(bench);
+
+static const AVFilterPad bench_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad bench_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_bench = {
+ .name = "bench",
+ .description = NULL_IF_CONFIG_SMALL("Benchmark part of a filtergraph."),
+ .priv_size = sizeof(BenchContext),
+ .init = init,
+ .inputs = bench_inputs,
+ .outputs = bench_outputs,
+ .priv_class = &bench_class,
+};
+#endif /* CONFIG_BENCH_FILTER */
+
+#if CONFIG_ABENCH_FILTER
+DEFINE_OPTIONS(abench, AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM);
+AVFILTER_DEFINE_CLASS(abench);
+
+static const AVFilterPad abench_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad abench_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_abench = {
+ .name = "abench",
+ .description = NULL_IF_CONFIG_SMALL("Benchmark part of a filtergraph."),
+ .priv_size = sizeof(BenchContext),
+ .init = init,
+ .inputs = abench_inputs,
+ .outputs = abench_outputs,
+ .priv_class = &abench_class,
+};
+#endif /* CONFIG_ABENCH_FILTER */
diff --git a/libavfilter/f_drawgraph.c b/libavfilter/f_drawgraph.c
new file mode 100644
index 0000000000..4c705fe851
--- /dev/null
+++ b/libavfilter/f_drawgraph.c
@@ -0,0 +1,501 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "float.h"
+
+#include "libavutil/eval.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct DrawGraphContext {
+ const AVClass *class;
+
+ char *key[4];
+ float min, max;
+ char *fg_str[4];
+ AVExpr *fg_expr[4];
+ uint8_t bg[4];
+ int mode;
+ int slide;
+ int w, h;
+
+ AVFrame *out;
+ int x;
+ int prev_y[4];
+ int first;
+ float *values[4];
+ int values_size[4];
+ int nb_values;
+} DrawGraphContext;
+
+#define OFFSET(x) offsetof(DrawGraphContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption drawgraph_options[] = {
+ { "m1", "set 1st metadata key", OFFSET(key[0]), AV_OPT_TYPE_STRING, {.str=""}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "fg1", "set 1st foreground color expression", OFFSET(fg_str[0]), AV_OPT_TYPE_STRING, {.str="0xffff0000"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "m2", "set 2nd metadata key", OFFSET(key[1]), AV_OPT_TYPE_STRING, {.str=""}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "fg2", "set 2nd foreground color expression", OFFSET(fg_str[1]), AV_OPT_TYPE_STRING, {.str="0xff00ff00"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "m3", "set 3rd metadata key", OFFSET(key[2]), AV_OPT_TYPE_STRING, {.str=""}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "fg3", "set 3rd foreground color expression", OFFSET(fg_str[2]), AV_OPT_TYPE_STRING, {.str="0xffff00ff"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "m4", "set 4th metadata key", OFFSET(key[3]), AV_OPT_TYPE_STRING, {.str=""}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "fg4", "set 4th foreground color expression", OFFSET(fg_str[3]), AV_OPT_TYPE_STRING, {.str="0xffffff00"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "bg", "set background color", OFFSET(bg), AV_OPT_TYPE_COLOR, {.str="white"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "min", "set minimal value", OFFSET(min), AV_OPT_TYPE_FLOAT, {.dbl=-1.}, INT_MIN, INT_MAX, FLAGS },
+ { "max", "set maximal value", OFFSET(max), AV_OPT_TYPE_FLOAT, {.dbl=1.}, INT_MIN, INT_MAX, FLAGS },
+ { "mode", "set graph mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=2}, 0, 2, FLAGS, "mode" },
+ {"bar", "draw bars", OFFSET(mode), AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "mode"},
+ {"dot", "draw dots", OFFSET(mode), AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "mode"},
+ {"line", "draw lines", OFFSET(mode), AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "mode"},
+ { "slide", "set slide mode", OFFSET(slide), AV_OPT_TYPE_INT, {.i64=0}, 0, 4, FLAGS, "slide" },
+ {"frame", "draw new frames", OFFSET(slide), AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "slide"},
+ {"replace", "replace old columns with new", OFFSET(slide), AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "slide"},
+ {"scroll", "scroll from right to left", OFFSET(slide), AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "slide"},
+ {"rscroll", "scroll from left to right", OFFSET(slide), AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, FLAGS, "slide"},
+ {"picture", "display graph in single frame", OFFSET(slide), AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, FLAGS, "slide"},
+ { "size", "set graph size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="900x256"}, 0, 0, FLAGS },
+ { "s", "set graph size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="900x256"}, 0, 0, FLAGS },
+ { NULL }
+};
+
+static const char *const var_names[] = { "MAX", "MIN", "VAL", NULL };
+enum { VAR_MAX, VAR_MIN, VAR_VAL, VAR_VARS_NB };
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ DrawGraphContext *s = ctx->priv;
+ int ret, i;
+
+ if (s->max <= s->min) {
+ av_log(ctx, AV_LOG_ERROR, "max is same or lower than min\n");
+ return AVERROR(EINVAL);
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (s->fg_str[i]) {
+ ret = av_expr_parse(&s->fg_expr[i], s->fg_str[i], var_names,
+ NULL, NULL, NULL, NULL, 0, ctx);
+
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ s->first = 1;
+
+ if (s->slide == 4) {
+ s->values[0] = av_fast_realloc(NULL, &s->values_size[0], 2000);
+ s->values[1] = av_fast_realloc(NULL, &s->values_size[1], 2000);
+ s->values[2] = av_fast_realloc(NULL, &s->values_size[2], 2000);
+ s->values[3] = av_fast_realloc(NULL, &s->values_size[3], 2000);
+
+ if (!s->values[0] || !s->values[1] ||
+ !s->values[2] || !s->values[3]) {
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterLink *outlink = ctx->outputs[0];
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_NONE
+ };
+ int ret;
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if ((ret = ff_formats_ref(fmts_list, &outlink->in_formats)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static void clear_image(DrawGraphContext *s, AVFrame *out, AVFilterLink *outlink)
+{
+ int i, j;
+ int bg = AV_RN32(s->bg);
+
+ for (i = 0; i < out->height; i++)
+ for (j = 0; j < out->width; j++)
+ AV_WN32(out->data[0] + i * out->linesize[0] + j * 4, bg);
+}
+
+static inline void draw_dot(int fg, int x, int y, AVFrame *out)
+{
+ AV_WN32(out->data[0] + y * out->linesize[0] + x * 4, fg);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ DrawGraphContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVDictionary *metadata;
+ AVDictionaryEntry *e;
+ AVFrame *out = s->out;
+ int i;
+
+ if (s->slide == 4 && s->nb_values >= s->values_size[0] / sizeof(float)) {
+ float *ptr;
+
+ ptr = av_fast_realloc(s->values[0], &s->values_size[0], s->values_size[0] * 2);
+ if (!ptr)
+ return AVERROR(ENOMEM);
+ s->values[0] = ptr;
+
+ ptr = av_fast_realloc(s->values[1], &s->values_size[1], s->values_size[1] * 2);
+ if (!ptr)
+ return AVERROR(ENOMEM);
+ s->values[1] = ptr;
+
+ ptr = av_fast_realloc(s->values[2], &s->values_size[2], s->values_size[2] * 2);
+ if (!ptr)
+ return AVERROR(ENOMEM);
+ s->values[2] = ptr;
+
+ ptr = av_fast_realloc(s->values[3], &s->values_size[3], s->values_size[3] * 2);
+ if (!ptr)
+ return AVERROR(ENOMEM);
+ s->values[3] = ptr;
+ }
+
+ if (s->slide != 4 || s->nb_values == 0) {
+ if (!s->out || s->out->width != outlink->w ||
+ s->out->height != outlink->h) {
+ av_frame_free(&s->out);
+ s->out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ out = s->out;
+ if (!s->out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+
+ clear_image(s, out, outlink);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ metadata = av_frame_get_metadata(in);
+
+ for (i = 0; i < 4; i++) {
+ double values[VAR_VARS_NB];
+ int j, y, x, old;
+ uint32_t fg, bg;
+ float vf;
+
+ if (s->slide == 4)
+ s->values[i][s->nb_values] = NAN;
+
+ e = av_dict_get(metadata, s->key[i], NULL, 0);
+ if (!e || !e->value)
+ continue;
+
+ if (sscanf(e->value, "%f", &vf) != 1)
+ continue;
+
+ vf = av_clipf(vf, s->min, s->max);
+
+ if (s->slide == 4) {
+ s->values[i][s->nb_values] = vf;
+ continue;
+ }
+
+ values[VAR_MIN] = s->min;
+ values[VAR_MAX] = s->max;
+ values[VAR_VAL] = vf;
+
+ fg = av_expr_eval(s->fg_expr[i], values, NULL);
+ bg = AV_RN32(s->bg);
+
+ if (i == 0 && (s->x >= outlink->w || s->slide == 3)) {
+ if (s->slide == 0 || s->slide == 1)
+ s->x = 0;
+
+ if (s->slide == 2) {
+ s->x = outlink->w - 1;
+ for (j = 0; j < outlink->h; j++) {
+ memmove(out->data[0] + j * out->linesize[0] ,
+ out->data[0] + j * out->linesize[0] + 4,
+ (outlink->w - 1) * 4);
+ }
+ } else if (s->slide == 3) {
+ s->x = 0;
+ for (j = 0; j < outlink->h; j++) {
+ memmove(out->data[0] + j * out->linesize[0] + 4,
+ out->data[0] + j * out->linesize[0],
+ (outlink->w - 1) * 4);
+ }
+ } else if (s->slide == 0) {
+ clear_image(s, out, outlink);
+ }
+ }
+
+ x = s->x;
+ y = (outlink->h - 1) * (1 - ((vf - s->min) / (s->max - s->min)));
+
+ switch (s->mode) {
+ case 0:
+ if (i == 0 && (s->slide > 0))
+ for (j = 0; j < outlink->h; j++)
+ draw_dot(bg, x, j, out);
+
+ old = AV_RN32(out->data[0] + y * out->linesize[0] + x * 4);
+ for (j = y; j < outlink->h; j++) {
+ if (old != bg &&
+ (AV_RN32(out->data[0] + j * out->linesize[0] + x * 4) != old) ||
+ AV_RN32(out->data[0] + FFMIN(j+1, outlink->h - 1) * out->linesize[0] + x * 4) != old) {
+ draw_dot(fg, x, j, out);
+ break;
+ }
+ draw_dot(fg, x, j, out);
+ }
+ break;
+ case 1:
+ if (i == 0 && (s->slide > 0))
+ for (j = 0; j < outlink->h; j++)
+ draw_dot(bg, x, j, out);
+ draw_dot(fg, x, y, out);
+ break;
+ case 2:
+ if (s->first) {
+ s->first = 0;
+ s->prev_y[i] = y;
+ }
+
+ if (i == 0 && (s->slide > 0)) {
+ for (j = 0; j < y; j++)
+ draw_dot(bg, x, j, out);
+ for (j = outlink->h - 1; j > y; j--)
+ draw_dot(bg, x, j, out);
+ }
+ if (y <= s->prev_y[i]) {
+ for (j = y; j <= s->prev_y[i]; j++)
+ draw_dot(fg, x, j, out);
+ } else {
+ for (j = s->prev_y[i]; j <= y; j++)
+ draw_dot(fg, x, j, out);
+ }
+ s->prev_y[i] = y;
+ break;
+ }
+ }
+
+ s->nb_values++;
+ s->x++;
+
+ av_frame_free(&in);
+
+ if (s->slide == 4)
+ return 0;
+
+ return ff_filter_frame(outlink, av_frame_clone(s->out));
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ DrawGraphContext *s = ctx->priv;
+ AVFrame *out = s->out;
+ int ret, i, k, step, l;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (s->slide == 4 && ret == AVERROR_EOF && s->nb_values > 0) {
+ s->x = l = 0;
+ step = ceil(s->nb_values / (float)s->w);
+
+ for (k = 0; k < s->nb_values; k++) {
+ for (i = 0; i < 4; i++) {
+ double values[VAR_VARS_NB];
+ int j, y, x, old;
+ uint32_t fg, bg;
+ float vf = s->values[i][k];
+
+ if (isnan(vf))
+ continue;
+
+ values[VAR_MIN] = s->min;
+ values[VAR_MAX] = s->max;
+ values[VAR_VAL] = vf;
+
+ fg = av_expr_eval(s->fg_expr[i], values, NULL);
+ bg = AV_RN32(s->bg);
+
+ x = s->x;
+ y = (outlink->h - 1) * (1 - ((vf - s->min) / (s->max - s->min)));
+
+ switch (s->mode) {
+ case 0:
+ old = AV_RN32(out->data[0] + y * out->linesize[0] + x * 4);
+ for (j = y; j < outlink->h; j++) {
+ if (old != bg &&
+ (AV_RN32(out->data[0] + j * out->linesize[0] + x * 4) != old) ||
+ AV_RN32(out->data[0] + FFMIN(j+1, outlink->h - 1) * out->linesize[0] + x * 4) != old) {
+ draw_dot(fg, x, j, out);
+ break;
+ }
+ draw_dot(fg, x, j, out);
+ }
+ break;
+ case 1:
+ draw_dot(fg, x, y, out);
+ break;
+ case 2:
+ if (s->first) {
+ s->first = 0;
+ s->prev_y[i] = y;
+ }
+
+ if (y <= s->prev_y[i]) {
+ for (j = y; j <= s->prev_y[i]; j++)
+ draw_dot(fg, x, j, out);
+ } else {
+ for (j = s->prev_y[i]; j <= y; j++)
+ draw_dot(fg, x, j, out);
+ }
+ s->prev_y[i] = y;
+ break;
+ }
+ }
+
+ l++;
+ if (l >= step) {
+ l = 0;
+ s->x++;
+ }
+ }
+
+ s->nb_values = 0;
+ out->pts = 0;
+ ret = ff_filter_frame(ctx->outputs[0], s->out);
+ }
+
+ return ret;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ DrawGraphContext *s = outlink->src->priv;
+
+ outlink->w = s->w;
+ outlink->h = s->h;
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ DrawGraphContext *s = ctx->priv;
+ int i;
+
+ for (i = 0; i < 4; i++)
+ av_expr_free(s->fg_expr[i]);
+
+ if (s->slide != 4)
+ av_frame_free(&s->out);
+
+ av_freep(&s->values[0]);
+ av_freep(&s->values[1]);
+ av_freep(&s->values[2]);
+ av_freep(&s->values[3]);
+}
+
+#if CONFIG_DRAWGRAPH_FILTER
+
+AVFILTER_DEFINE_CLASS(drawgraph);
+
+static const AVFilterPad drawgraph_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad drawgraph_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_drawgraph = {
+ .name = "drawgraph",
+ .description = NULL_IF_CONFIG_SMALL("Draw a graph using input video metadata."),
+ .priv_size = sizeof(DrawGraphContext),
+ .priv_class = &drawgraph_class,
+ .query_formats = query_formats,
+ .init = init,
+ .uninit = uninit,
+ .inputs = drawgraph_inputs,
+ .outputs = drawgraph_outputs,
+};
+
+#endif // CONFIG_DRAWGRAPH_FILTER
+
+#if CONFIG_ADRAWGRAPH_FILTER
+
+#define adrawgraph_options drawgraph_options
+AVFILTER_DEFINE_CLASS(adrawgraph);
+
+static const AVFilterPad adrawgraph_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad adrawgraph_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_avf_adrawgraph = {
+ .name = "adrawgraph",
+ .description = NULL_IF_CONFIG_SMALL("Draw a graph using input audio metadata."),
+ .priv_size = sizeof(DrawGraphContext),
+ .priv_class = &adrawgraph_class,
+ .query_formats = query_formats,
+ .init = init,
+ .uninit = uninit,
+ .inputs = adrawgraph_inputs,
+ .outputs = adrawgraph_outputs,
+};
+#endif // CONFIG_ADRAWGRAPH_FILTER
diff --git a/libavfilter/f_ebur128.c b/libavfilter/f_ebur128.c
new file mode 100644
index 0000000000..1fd85bbf79
--- /dev/null
+++ b/libavfilter/f_ebur128.c
@@ -0,0 +1,950 @@
+/*
+ * Copyright (c) 2012 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * EBU R.128 implementation
+ * @see http://tech.ebu.ch/loudness
+ * @see https://www.youtube.com/watch?v=iuEtQqC-Sqo "EBU R128 Introduction - Florian Camerer"
+ * @todo implement start/stop/reset through filter command injection
+ * @todo support other frequencies to avoid resampling
+ */
+
+#include <math.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/dict.h"
+#include "libavutil/ffmath.h"
+#include "libavutil/xga_font_data.h"
+#include "libavutil/opt.h"
+#include "libavutil/timestamp.h"
+#include "libswresample/swresample.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+#define MAX_CHANNELS 63
+
+/* pre-filter coefficients */
+#define PRE_B0 1.53512485958697
+#define PRE_B1 -2.69169618940638
+#define PRE_B2 1.19839281085285
+#define PRE_A1 -1.69065929318241
+#define PRE_A2 0.73248077421585
+
+/* RLB-filter coefficients */
+#define RLB_B0 1.0
+#define RLB_B1 -2.0
+#define RLB_B2 1.0
+#define RLB_A1 -1.99004745483398
+#define RLB_A2 0.99007225036621
+
+#define ABS_THRES -70 ///< silence gate: we discard anything below this absolute (LUFS) threshold
+#define ABS_UP_THRES 10 ///< upper loud limit to consider (ABS_THRES being the minimum)
+#define HIST_GRAIN 100 ///< defines histogram precision
+#define HIST_SIZE ((ABS_UP_THRES - ABS_THRES) * HIST_GRAIN + 1)
+
+/**
+ * A histogram is an array of HIST_SIZE hist_entry storing all the energies
+ * recorded (with an accuracy of 1/HIST_GRAIN) of the loudnesses from ABS_THRES
+ * (at 0) to ABS_UP_THRES (at HIST_SIZE-1).
+ * This fixed-size system avoids the need of a list of energies growing
+ * infinitely over the time and is thus more scalable.
+ */
+struct hist_entry {
+ int count; ///< how many times the corresponding value occurred
+ double energy; ///< E = 10^((L + 0.691) / 10)
+ double loudness; ///< L = -0.691 + 10 * log10(E)
+};
+
+struct integrator {
+ double *cache[MAX_CHANNELS]; ///< window of filtered samples (N ms)
+ int cache_pos; ///< focus on the last added bin in the cache array
+ double sum[MAX_CHANNELS]; ///< sum of the last N ms filtered samples (cache content)
+ int filled; ///< 1 if the cache is completely filled, 0 otherwise
+ double rel_threshold; ///< relative threshold
+ double sum_kept_powers; ///< sum of the powers (weighted sums) above absolute threshold
+ int nb_kept_powers; ///< number of sum above absolute threshold
+ struct hist_entry *histogram; ///< histogram of the powers, used to compute LRA and I
+};
+
+struct rect { int x, y, w, h; };
+
+typedef struct {
+ const AVClass *class; ///< AVClass context for log and options purpose
+
+ /* peak metering */
+ int peak_mode; ///< enabled peak modes
+ double *true_peaks; ///< true peaks per channel
+ double *sample_peaks; ///< sample peaks per channel
+ double *true_peaks_per_frame; ///< true peaks in a frame per channel
+#if CONFIG_SWRESAMPLE
+ SwrContext *swr_ctx; ///< over-sampling context for true peak metering
+ double *swr_buf; ///< resampled audio data for true peak metering
+ int swr_linesize;
+#endif
+
+ /* video */
+ int do_video; ///< 1 if video output enabled, 0 otherwise
+ int w, h; ///< size of the video output
+ struct rect text; ///< rectangle for the LU legend on the left
+ struct rect graph; ///< rectangle for the main graph in the center
+ struct rect gauge; ///< rectangle for the gauge on the right
+ AVFrame *outpicref; ///< output picture reference, updated regularly
+ int meter; ///< select a EBU mode between +9 and +18
+ int scale_range; ///< the range of LU values according to the meter
+ int y_zero_lu; ///< the y value (pixel position) for 0 LU
+ int *y_line_ref; ///< y reference values for drawing the LU lines in the graph and the gauge
+
+ /* audio */
+ int nb_channels; ///< number of channels in the input
+ double *ch_weighting; ///< channel weighting mapping
+ int sample_count; ///< sample count used for refresh frequency, reset at refresh
+
+ /* Filter caches.
+ * The mult by 3 in the following is for X[i], X[i-1] and X[i-2] */
+ double x[MAX_CHANNELS * 3]; ///< 3 input samples cache for each channel
+ double y[MAX_CHANNELS * 3]; ///< 3 pre-filter samples cache for each channel
+ double z[MAX_CHANNELS * 3]; ///< 3 RLB-filter samples cache for each channel
+
+#define I400_BINS (48000 * 4 / 10)
+#define I3000_BINS (48000 * 3)
+ struct integrator i400; ///< 400ms integrator, used for Momentary loudness (M), and Integrated loudness (I)
+ struct integrator i3000; ///< 3s integrator, used for Short term loudness (S), and Loudness Range (LRA)
+
+ /* I and LRA specific */
+ double integrated_loudness; ///< integrated loudness in LUFS (I)
+ double loudness_range; ///< loudness range in LU (LRA)
+ double lra_low, lra_high; ///< low and high LRA values
+
+ /* misc */
+ int loglevel; ///< log level for frame logging
+ int metadata; ///< whether or not to inject loudness results in frames
+ int dual_mono; ///< whether or not to treat single channel input files as dual-mono
+ double pan_law; ///< pan law value used to calculate dual-mono measurements
+} EBUR128Context;
+
+enum {
+ PEAK_MODE_NONE = 0,
+ PEAK_MODE_SAMPLES_PEAKS = 1<<1,
+ PEAK_MODE_TRUE_PEAKS = 1<<2,
+};
+
+#define OFFSET(x) offsetof(EBUR128Context, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM
+#define V AV_OPT_FLAG_VIDEO_PARAM
+#define F AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption ebur128_options[] = {
+ { "video", "set video output", OFFSET(do_video), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, V|F },
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x480"}, 0, 0, V|F },
+ { "meter", "set scale meter (+9 to +18)", OFFSET(meter), AV_OPT_TYPE_INT, {.i64 = 9}, 9, 18, V|F },
+ { "framelog", "force frame logging level", OFFSET(loglevel), AV_OPT_TYPE_INT, {.i64 = -1}, INT_MIN, INT_MAX, A|V|F, "level" },
+ { "info", "information logging level", 0, AV_OPT_TYPE_CONST, {.i64 = AV_LOG_INFO}, INT_MIN, INT_MAX, A|V|F, "level" },
+ { "verbose", "verbose logging level", 0, AV_OPT_TYPE_CONST, {.i64 = AV_LOG_VERBOSE}, INT_MIN, INT_MAX, A|V|F, "level" },
+ { "metadata", "inject metadata in the filtergraph", OFFSET(metadata), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, A|V|F },
+ { "peak", "set peak mode", OFFSET(peak_mode), AV_OPT_TYPE_FLAGS, {.i64 = PEAK_MODE_NONE}, 0, INT_MAX, A|F, "mode" },
+ { "none", "disable any peak mode", 0, AV_OPT_TYPE_CONST, {.i64 = PEAK_MODE_NONE}, INT_MIN, INT_MAX, A|F, "mode" },
+ { "sample", "enable peak-sample mode", 0, AV_OPT_TYPE_CONST, {.i64 = PEAK_MODE_SAMPLES_PEAKS}, INT_MIN, INT_MAX, A|F, "mode" },
+ { "true", "enable true-peak mode", 0, AV_OPT_TYPE_CONST, {.i64 = PEAK_MODE_TRUE_PEAKS}, INT_MIN, INT_MAX, A|F, "mode" },
+ { "dualmono", "treat mono input files as dual-mono", OFFSET(dual_mono), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, A|F },
+ { "panlaw", "set a specific pan law for dual-mono files", OFFSET(pan_law), AV_OPT_TYPE_DOUBLE, {.dbl = -3.01029995663978}, -10.0, 0.0, A|F },
+ { NULL },
+};
+
+AVFILTER_DEFINE_CLASS(ebur128);
+
+static const uint8_t graph_colors[] = {
+ 0xdd, 0x66, 0x66, // value above 0LU non reached
+ 0x66, 0x66, 0xdd, // value below 0LU non reached
+ 0x96, 0x33, 0x33, // value above 0LU reached
+ 0x33, 0x33, 0x96, // value below 0LU reached
+ 0xdd, 0x96, 0x96, // value above 0LU line non reached
+ 0x96, 0x96, 0xdd, // value below 0LU line non reached
+ 0xdd, 0x33, 0x33, // value above 0LU line reached
+ 0x33, 0x33, 0xdd, // value below 0LU line reached
+};
+
+static const uint8_t *get_graph_color(const EBUR128Context *ebur128, int v, int y)
+{
+ const int below0 = y > ebur128->y_zero_lu;
+ const int reached = y >= v;
+ const int line = ebur128->y_line_ref[y] || y == ebur128->y_zero_lu;
+ const int colorid = 4*line + 2*reached + below0;
+ return graph_colors + 3*colorid;
+}
+
+static inline int lu_to_y(const EBUR128Context *ebur128, double v)
+{
+ v += 2 * ebur128->meter; // make it in range [0;...]
+ v = av_clipf(v, 0, ebur128->scale_range); // make sure it's in the graph scale
+ v = ebur128->scale_range - v; // invert value (y=0 is on top)
+ return v * ebur128->graph.h / ebur128->scale_range; // rescale from scale range to px height
+}
+
+#define FONT8 0
+#define FONT16 1
+
+static const uint8_t font_colors[] = {
+ 0xdd, 0xdd, 0x00,
+ 0x00, 0x96, 0x96,
+};
+
+static void drawtext(AVFrame *pic, int x, int y, int ftid, const uint8_t *color, const char *fmt, ...)
+{
+ int i;
+ char buf[128] = {0};
+ const uint8_t *font;
+ int font_height;
+ va_list vl;
+
+ if (ftid == FONT16) font = avpriv_vga16_font, font_height = 16;
+ else if (ftid == FONT8) font = avpriv_cga_font, font_height = 8;
+ else return;
+
+ va_start(vl, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, vl);
+ va_end(vl);
+
+ for (i = 0; buf[i]; i++) {
+ int char_y, mask;
+ uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8)*3;
+
+ for (char_y = 0; char_y < font_height; char_y++) {
+ for (mask = 0x80; mask; mask >>= 1) {
+ if (font[buf[i] * font_height + char_y] & mask)
+ memcpy(p, color, 3);
+ else
+ memcpy(p, "\x00\x00\x00", 3);
+ p += 3;
+ }
+ p += pic->linesize[0] - 8*3;
+ }
+ }
+}
+
+static void drawline(AVFrame *pic, int x, int y, int len, int step)
+{
+ int i;
+ uint8_t *p = pic->data[0] + y*pic->linesize[0] + x*3;
+
+ for (i = 0; i < len; i++) {
+ memcpy(p, "\x00\xff\x00", 3);
+ p += step;
+ }
+}
+
+static int config_video_output(AVFilterLink *outlink)
+{
+ int i, x, y;
+ uint8_t *p;
+ AVFilterContext *ctx = outlink->src;
+ EBUR128Context *ebur128 = ctx->priv;
+ AVFrame *outpicref;
+
+ /* check if there is enough space to represent everything decently */
+ if (ebur128->w < 640 || ebur128->h < 480) {
+ av_log(ctx, AV_LOG_ERROR, "Video size %dx%d is too small, "
+ "minimum size is 640x480\n", ebur128->w, ebur128->h);
+ return AVERROR(EINVAL);
+ }
+ outlink->w = ebur128->w;
+ outlink->h = ebur128->h;
+
+#define PAD 8
+
+ /* configure text area position and size */
+ ebur128->text.x = PAD;
+ ebur128->text.y = 40;
+ ebur128->text.w = 3 * 8; // 3 characters
+ ebur128->text.h = ebur128->h - PAD - ebur128->text.y;
+
+ /* configure gauge position and size */
+ ebur128->gauge.w = 20;
+ ebur128->gauge.h = ebur128->text.h;
+ ebur128->gauge.x = ebur128->w - PAD - ebur128->gauge.w;
+ ebur128->gauge.y = ebur128->text.y;
+
+ /* configure graph position and size */
+ ebur128->graph.x = ebur128->text.x + ebur128->text.w + PAD;
+ ebur128->graph.y = ebur128->gauge.y;
+ ebur128->graph.w = ebur128->gauge.x - ebur128->graph.x - PAD;
+ ebur128->graph.h = ebur128->gauge.h;
+
+ /* graph and gauge share the LU-to-pixel code */
+ av_assert0(ebur128->graph.h == ebur128->gauge.h);
+
+ /* prepare the initial picref buffer */
+ av_frame_free(&ebur128->outpicref);
+ ebur128->outpicref = outpicref =
+ ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpicref)
+ return AVERROR(ENOMEM);
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+
+ /* init y references values (to draw LU lines) */
+ ebur128->y_line_ref = av_calloc(ebur128->graph.h + 1, sizeof(*ebur128->y_line_ref));
+ if (!ebur128->y_line_ref)
+ return AVERROR(ENOMEM);
+
+ /* black background */
+ memset(outpicref->data[0], 0, ebur128->h * outpicref->linesize[0]);
+
+ /* draw LU legends */
+ drawtext(outpicref, PAD, PAD+16, FONT8, font_colors+3, " LU");
+ for (i = ebur128->meter; i >= -ebur128->meter * 2; i--) {
+ y = lu_to_y(ebur128, i);
+ x = PAD + (i < 10 && i > -10) * 8;
+ ebur128->y_line_ref[y] = i;
+ y -= 4; // -4 to center vertically
+ drawtext(outpicref, x, y + ebur128->graph.y, FONT8, font_colors+3,
+ "%c%d", i < 0 ? '-' : i > 0 ? '+' : ' ', FFABS(i));
+ }
+
+ /* draw graph */
+ ebur128->y_zero_lu = lu_to_y(ebur128, 0);
+ p = outpicref->data[0] + ebur128->graph.y * outpicref->linesize[0]
+ + ebur128->graph.x * 3;
+ for (y = 0; y < ebur128->graph.h; y++) {
+ const uint8_t *c = get_graph_color(ebur128, INT_MAX, y);
+
+ for (x = 0; x < ebur128->graph.w; x++)
+ memcpy(p + x*3, c, 3);
+ p += outpicref->linesize[0];
+ }
+
+ /* draw fancy rectangles around the graph and the gauge */
+#define DRAW_RECT(r) do { \
+ drawline(outpicref, r.x, r.y - 1, r.w, 3); \
+ drawline(outpicref, r.x, r.y + r.h, r.w, 3); \
+ drawline(outpicref, r.x - 1, r.y, r.h, outpicref->linesize[0]); \
+ drawline(outpicref, r.x + r.w, r.y, r.h, outpicref->linesize[0]); \
+} while (0)
+ DRAW_RECT(ebur128->graph);
+ DRAW_RECT(ebur128->gauge);
+
+ return 0;
+}
+
+static int config_audio_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ EBUR128Context *ebur128 = ctx->priv;
+
+ /* Force 100ms framing in case of metadata injection: the frames must have
+ * a granularity of the window overlap to be accurately exploited.
+ * As for the true peaks mode, it just simplifies the resampling buffer
+ * allocation and the lookup in it (since sample buffers differ in size, it
+ * can be more complex to integrate in the one-sample loop of
+ * filter_frame()). */
+ if (ebur128->metadata || (ebur128->peak_mode & PEAK_MODE_TRUE_PEAKS))
+ inlink->min_samples =
+ inlink->max_samples =
+ inlink->partial_buf_size = inlink->sample_rate / 10;
+ return 0;
+}
+
+static int config_audio_output(AVFilterLink *outlink)
+{
+ int i;
+ AVFilterContext *ctx = outlink->src;
+ EBUR128Context *ebur128 = ctx->priv;
+ const int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);
+
+#define BACK_MASK (AV_CH_BACK_LEFT |AV_CH_BACK_CENTER |AV_CH_BACK_RIGHT| \
+ AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_CENTER|AV_CH_TOP_BACK_RIGHT| \
+ AV_CH_SIDE_LEFT |AV_CH_SIDE_RIGHT| \
+ AV_CH_SURROUND_DIRECT_LEFT |AV_CH_SURROUND_DIRECT_RIGHT)
+
+ ebur128->nb_channels = nb_channels;
+ ebur128->ch_weighting = av_calloc(nb_channels, sizeof(*ebur128->ch_weighting));
+ if (!ebur128->ch_weighting)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < nb_channels; i++) {
+ /* channel weighting */
+ const uint16_t chl = av_channel_layout_extract_channel(outlink->channel_layout, i);
+ if (chl & (AV_CH_LOW_FREQUENCY|AV_CH_LOW_FREQUENCY_2)) {
+ ebur128->ch_weighting[i] = 0;
+ } else if (chl & BACK_MASK) {
+ ebur128->ch_weighting[i] = 1.41;
+ } else {
+ ebur128->ch_weighting[i] = 1.0;
+ }
+
+ if (!ebur128->ch_weighting[i])
+ continue;
+
+ /* bins buffer for the two integration window (400ms and 3s) */
+ ebur128->i400.cache[i] = av_calloc(I400_BINS, sizeof(*ebur128->i400.cache[0]));
+ ebur128->i3000.cache[i] = av_calloc(I3000_BINS, sizeof(*ebur128->i3000.cache[0]));
+ if (!ebur128->i400.cache[i] || !ebur128->i3000.cache[i])
+ return AVERROR(ENOMEM);
+ }
+
+#if CONFIG_SWRESAMPLE
+ if (ebur128->peak_mode & PEAK_MODE_TRUE_PEAKS) {
+ int ret;
+
+ ebur128->swr_buf = av_malloc_array(nb_channels, 19200 * sizeof(double));
+ ebur128->true_peaks = av_calloc(nb_channels, sizeof(*ebur128->true_peaks));
+ ebur128->true_peaks_per_frame = av_calloc(nb_channels, sizeof(*ebur128->true_peaks_per_frame));
+ ebur128->swr_ctx = swr_alloc();
+ if (!ebur128->swr_buf || !ebur128->true_peaks ||
+ !ebur128->true_peaks_per_frame || !ebur128->swr_ctx)
+ return AVERROR(ENOMEM);
+
+ av_opt_set_int(ebur128->swr_ctx, "in_channel_layout", outlink->channel_layout, 0);
+ av_opt_set_int(ebur128->swr_ctx, "in_sample_rate", outlink->sample_rate, 0);
+ av_opt_set_sample_fmt(ebur128->swr_ctx, "in_sample_fmt", outlink->format, 0);
+
+ av_opt_set_int(ebur128->swr_ctx, "out_channel_layout", outlink->channel_layout, 0);
+ av_opt_set_int(ebur128->swr_ctx, "out_sample_rate", 192000, 0);
+ av_opt_set_sample_fmt(ebur128->swr_ctx, "out_sample_fmt", outlink->format, 0);
+
+ ret = swr_init(ebur128->swr_ctx);
+ if (ret < 0)
+ return ret;
+ }
+#endif
+
+ if (ebur128->peak_mode & PEAK_MODE_SAMPLES_PEAKS) {
+ ebur128->sample_peaks = av_calloc(nb_channels, sizeof(*ebur128->sample_peaks));
+ if (!ebur128->sample_peaks)
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
+#define ENERGY(loudness) (ff_exp10(((loudness) + 0.691) / 10.))
+#define LOUDNESS(energy) (-0.691 + 10 * log10(energy))
+#define DBFS(energy) (20 * log10(energy))
+
+static struct hist_entry *get_histogram(void)
+{
+ int i;
+ struct hist_entry *h = av_calloc(HIST_SIZE, sizeof(*h));
+
+ if (!h)
+ return NULL;
+ for (i = 0; i < HIST_SIZE; i++) {
+ h[i].loudness = i / (double)HIST_GRAIN + ABS_THRES;
+ h[i].energy = ENERGY(h[i].loudness);
+ }
+ return h;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ EBUR128Context *ebur128 = ctx->priv;
+ AVFilterPad pad;
+
+ if (ebur128->loglevel != AV_LOG_INFO &&
+ ebur128->loglevel != AV_LOG_VERBOSE) {
+ if (ebur128->do_video || ebur128->metadata)
+ ebur128->loglevel = AV_LOG_VERBOSE;
+ else
+ ebur128->loglevel = AV_LOG_INFO;
+ }
+
+ if (!CONFIG_SWRESAMPLE && (ebur128->peak_mode & PEAK_MODE_TRUE_PEAKS)) {
+ av_log(ctx, AV_LOG_ERROR,
+ "True-peak mode requires libswresample to be performed\n");
+ return AVERROR(EINVAL);
+ }
+
+ // if meter is +9 scale, scale range is from -18 LU to +9 LU (or 3*9)
+ // if meter is +18 scale, scale range is from -36 LU to +18 LU (or 3*18)
+ ebur128->scale_range = 3 * ebur128->meter;
+
+ ebur128->i400.histogram = get_histogram();
+ ebur128->i3000.histogram = get_histogram();
+ if (!ebur128->i400.histogram || !ebur128->i3000.histogram)
+ return AVERROR(ENOMEM);
+
+ ebur128->integrated_loudness = ABS_THRES;
+ ebur128->loudness_range = 0;
+
+ /* insert output pads */
+ if (ebur128->do_video) {
+ pad = (AVFilterPad){
+ .name = av_strdup("out0"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_video_output,
+ };
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ ff_insert_outpad(ctx, 0, &pad);
+ }
+ pad = (AVFilterPad){
+ .name = av_asprintf("out%d", ebur128->do_video),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_audio_output,
+ };
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ ff_insert_outpad(ctx, ebur128->do_video, &pad);
+
+ /* summary */
+ av_log(ctx, AV_LOG_VERBOSE, "EBU +%d scale\n", ebur128->meter);
+
+ return 0;
+}
+
+#define HIST_POS(power) (int)(((power) - ABS_THRES) * HIST_GRAIN)
+
+/* loudness and power should be set such as loudness = -0.691 +
+ * 10*log10(power), we just avoid doing that calculus two times */
+static int gate_update(struct integrator *integ, double power,
+ double loudness, int gate_thres)
+{
+ int ipower;
+ double relative_threshold;
+ int gate_hist_pos;
+
+ /* update powers histograms by incrementing current power count */
+ ipower = av_clip(HIST_POS(loudness), 0, HIST_SIZE - 1);
+ integ->histogram[ipower].count++;
+
+ /* compute relative threshold and get its position in the histogram */
+ integ->sum_kept_powers += power;
+ integ->nb_kept_powers++;
+ relative_threshold = integ->sum_kept_powers / integ->nb_kept_powers;
+ if (!relative_threshold)
+ relative_threshold = 1e-12;
+ integ->rel_threshold = LOUDNESS(relative_threshold) + gate_thres;
+ gate_hist_pos = av_clip(HIST_POS(integ->rel_threshold), 0, HIST_SIZE - 1);
+
+ return gate_hist_pos;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ int i, ch, idx_insample;
+ AVFilterContext *ctx = inlink->dst;
+ EBUR128Context *ebur128 = ctx->priv;
+ const int nb_channels = ebur128->nb_channels;
+ const int nb_samples = insamples->nb_samples;
+ const double *samples = (double *)insamples->data[0];
+ AVFrame *pic = ebur128->outpicref;
+
+#if CONFIG_SWRESAMPLE
+ if (ebur128->peak_mode & PEAK_MODE_TRUE_PEAKS) {
+ const double *swr_samples = ebur128->swr_buf;
+ int ret = swr_convert(ebur128->swr_ctx, (uint8_t**)&ebur128->swr_buf, 19200,
+ (const uint8_t **)insamples->data, nb_samples);
+ if (ret < 0)
+ return ret;
+ for (ch = 0; ch < nb_channels; ch++)
+ ebur128->true_peaks_per_frame[ch] = 0.0;
+ for (idx_insample = 0; idx_insample < ret; idx_insample++) {
+ for (ch = 0; ch < nb_channels; ch++) {
+ ebur128->true_peaks[ch] = FFMAX(ebur128->true_peaks[ch], fabs(*swr_samples));
+ ebur128->true_peaks_per_frame[ch] = FFMAX(ebur128->true_peaks_per_frame[ch],
+ fabs(*swr_samples));
+ swr_samples++;
+ }
+ }
+ }
+#endif
+
+ for (idx_insample = 0; idx_insample < nb_samples; idx_insample++) {
+ const int bin_id_400 = ebur128->i400.cache_pos;
+ const int bin_id_3000 = ebur128->i3000.cache_pos;
+
+#define MOVE_TO_NEXT_CACHED_ENTRY(time) do { \
+ ebur128->i##time.cache_pos++; \
+ if (ebur128->i##time.cache_pos == I##time##_BINS) { \
+ ebur128->i##time.filled = 1; \
+ ebur128->i##time.cache_pos = 0; \
+ } \
+} while (0)
+
+ MOVE_TO_NEXT_CACHED_ENTRY(400);
+ MOVE_TO_NEXT_CACHED_ENTRY(3000);
+
+ for (ch = 0; ch < nb_channels; ch++) {
+ double bin;
+
+ if (ebur128->peak_mode & PEAK_MODE_SAMPLES_PEAKS)
+ ebur128->sample_peaks[ch] = FFMAX(ebur128->sample_peaks[ch], fabs(*samples));
+
+ ebur128->x[ch * 3] = *samples++; // set X[i]
+
+ if (!ebur128->ch_weighting[ch])
+ continue;
+
+ /* Y[i] = X[i]*b0 + X[i-1]*b1 + X[i-2]*b2 - Y[i-1]*a1 - Y[i-2]*a2 */
+#define FILTER(Y, X, name) do { \
+ double *dst = ebur128->Y + ch*3; \
+ double *src = ebur128->X + ch*3; \
+ dst[2] = dst[1]; \
+ dst[1] = dst[0]; \
+ dst[0] = src[0]*name##_B0 + src[1]*name##_B1 + src[2]*name##_B2 \
+ - dst[1]*name##_A1 - dst[2]*name##_A2; \
+} while (0)
+
+ // TODO: merge both filters in one?
+ FILTER(y, x, PRE); // apply pre-filter
+ ebur128->x[ch * 3 + 2] = ebur128->x[ch * 3 + 1];
+ ebur128->x[ch * 3 + 1] = ebur128->x[ch * 3 ];
+ FILTER(z, y, RLB); // apply RLB-filter
+
+ bin = ebur128->z[ch * 3] * ebur128->z[ch * 3];
+
+ /* add the new value, and limit the sum to the cache size (400ms or 3s)
+ * by removing the oldest one */
+ ebur128->i400.sum [ch] = ebur128->i400.sum [ch] + bin - ebur128->i400.cache [ch][bin_id_400];
+ ebur128->i3000.sum[ch] = ebur128->i3000.sum[ch] + bin - ebur128->i3000.cache[ch][bin_id_3000];
+
+ /* override old cache entry with the new value */
+ ebur128->i400.cache [ch][bin_id_400 ] = bin;
+ ebur128->i3000.cache[ch][bin_id_3000] = bin;
+ }
+
+ /* For integrated loudness, gating blocks are 400ms long with 75%
+ * overlap (see BS.1770-2 p5), so a re-computation is needed each 100ms
+ * (4800 samples at 48kHz). */
+ if (++ebur128->sample_count == 4800) {
+ double loudness_400, loudness_3000;
+ double power_400 = 1e-12, power_3000 = 1e-12;
+ AVFilterLink *outlink = ctx->outputs[0];
+ const int64_t pts = insamples->pts +
+ av_rescale_q(idx_insample, (AVRational){ 1, inlink->sample_rate },
+ outlink->time_base);
+
+ ebur128->sample_count = 0;
+
+#define COMPUTE_LOUDNESS(m, time) do { \
+ if (ebur128->i##time.filled) { \
+ /* weighting sum of the last <time> ms */ \
+ for (ch = 0; ch < nb_channels; ch++) \
+ power_##time += ebur128->ch_weighting[ch] * ebur128->i##time.sum[ch]; \
+ power_##time /= I##time##_BINS; \
+ } \
+ loudness_##time = LOUDNESS(power_##time); \
+} while (0)
+
+ COMPUTE_LOUDNESS(M, 400);
+ COMPUTE_LOUDNESS(S, 3000);
+
+ /* Integrated loudness */
+#define I_GATE_THRES -10 // initially defined to -8 LU in the first EBU standard
+
+ if (loudness_400 >= ABS_THRES) {
+ double integrated_sum = 0;
+ int nb_integrated = 0;
+ int gate_hist_pos = gate_update(&ebur128->i400, power_400,
+ loudness_400, I_GATE_THRES);
+
+ /* compute integrated loudness by summing the histogram values
+ * above the relative threshold */
+ for (i = gate_hist_pos; i < HIST_SIZE; i++) {
+ const int nb_v = ebur128->i400.histogram[i].count;
+ nb_integrated += nb_v;
+ integrated_sum += nb_v * ebur128->i400.histogram[i].energy;
+ }
+ if (nb_integrated) {
+ ebur128->integrated_loudness = LOUDNESS(integrated_sum / nb_integrated);
+ /* dual-mono correction */
+ if (nb_channels == 1 && ebur128->dual_mono) {
+ ebur128->integrated_loudness -= ebur128->pan_law;
+ }
+ }
+ }
+
+ /* LRA */
+#define LRA_GATE_THRES -20
+#define LRA_LOWER_PRC 10
+#define LRA_HIGHER_PRC 95
+
+ /* XXX: example code in EBU 3342 is ">=" but formula in BS.1770
+ * specs is ">" */
+ if (loudness_3000 >= ABS_THRES) {
+ int nb_powers = 0;
+ int gate_hist_pos = gate_update(&ebur128->i3000, power_3000,
+ loudness_3000, LRA_GATE_THRES);
+
+ for (i = gate_hist_pos; i < HIST_SIZE; i++)
+ nb_powers += ebur128->i3000.histogram[i].count;
+ if (nb_powers) {
+ int n, nb_pow;
+
+ /* get lower loudness to consider */
+ n = 0;
+ nb_pow = LRA_LOWER_PRC * nb_powers / 100. + 0.5;
+ for (i = gate_hist_pos; i < HIST_SIZE; i++) {
+ n += ebur128->i3000.histogram[i].count;
+ if (n >= nb_pow) {
+ ebur128->lra_low = ebur128->i3000.histogram[i].loudness;
+ break;
+ }
+ }
+
+ /* get higher loudness to consider */
+ n = nb_powers;
+ nb_pow = LRA_HIGHER_PRC * nb_powers / 100. + 0.5;
+ for (i = HIST_SIZE - 1; i >= 0; i--) {
+ n -= ebur128->i3000.histogram[i].count;
+ if (n < nb_pow) {
+ ebur128->lra_high = ebur128->i3000.histogram[i].loudness;
+ break;
+ }
+ }
+
+ // XXX: show low & high on the graph?
+ ebur128->loudness_range = ebur128->lra_high - ebur128->lra_low;
+ }
+ }
+
+ /* dual-mono correction */
+ if (nb_channels == 1 && ebur128->dual_mono) {
+ loudness_400 -= ebur128->pan_law;
+ loudness_3000 -= ebur128->pan_law;
+ }
+
+#define LOG_FMT "M:%6.1f S:%6.1f I:%6.1f LUFS LRA:%6.1f LU"
+
+ /* push one video frame */
+ if (ebur128->do_video) {
+ int x, y, ret;
+ uint8_t *p;
+
+ const int y_loudness_lu_graph = lu_to_y(ebur128, loudness_3000 + 23);
+ const int y_loudness_lu_gauge = lu_to_y(ebur128, loudness_400 + 23);
+
+ /* draw the graph using the short-term loudness */
+ p = pic->data[0] + ebur128->graph.y*pic->linesize[0] + ebur128->graph.x*3;
+ for (y = 0; y < ebur128->graph.h; y++) {
+ const uint8_t *c = get_graph_color(ebur128, y_loudness_lu_graph, y);
+
+ memmove(p, p + 3, (ebur128->graph.w - 1) * 3);
+ memcpy(p + (ebur128->graph.w - 1) * 3, c, 3);
+ p += pic->linesize[0];
+ }
+
+ /* draw the gauge using the momentary loudness */
+ p = pic->data[0] + ebur128->gauge.y*pic->linesize[0] + ebur128->gauge.x*3;
+ for (y = 0; y < ebur128->gauge.h; y++) {
+ const uint8_t *c = get_graph_color(ebur128, y_loudness_lu_gauge, y);
+
+ for (x = 0; x < ebur128->gauge.w; x++)
+ memcpy(p + x*3, c, 3);
+ p += pic->linesize[0];
+ }
+
+ /* draw textual info */
+ drawtext(pic, PAD, PAD - PAD/2, FONT16, font_colors,
+ LOG_FMT " ", // padding to erase trailing characters
+ loudness_400, loudness_3000,
+ ebur128->integrated_loudness, ebur128->loudness_range);
+
+ /* set pts and push frame */
+ pic->pts = pts;
+ ret = ff_filter_frame(outlink, av_frame_clone(pic));
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ebur128->metadata) { /* happens only once per filter_frame call */
+ char metabuf[128];
+#define META_PREFIX "lavfi.r128."
+
+#define SET_META(name, var) do { \
+ snprintf(metabuf, sizeof(metabuf), "%.3f", var); \
+ av_dict_set(&insamples->metadata, name, metabuf, 0); \
+} while (0)
+
+#define SET_META_PEAK(name, ptype) do { \
+ if (ebur128->peak_mode & PEAK_MODE_ ## ptype ## _PEAKS) { \
+ char key[64]; \
+ for (ch = 0; ch < nb_channels; ch++) { \
+ snprintf(key, sizeof(key), \
+ META_PREFIX AV_STRINGIFY(name) "_peaks_ch%d", ch); \
+ SET_META(key, ebur128->name##_peaks[ch]); \
+ } \
+ } \
+} while (0)
+
+ SET_META(META_PREFIX "M", loudness_400);
+ SET_META(META_PREFIX "S", loudness_3000);
+ SET_META(META_PREFIX "I", ebur128->integrated_loudness);
+ SET_META(META_PREFIX "LRA", ebur128->loudness_range);
+ SET_META(META_PREFIX "LRA.low", ebur128->lra_low);
+ SET_META(META_PREFIX "LRA.high", ebur128->lra_high);
+
+ SET_META_PEAK(sample, SAMPLES);
+ SET_META_PEAK(true, TRUE);
+ }
+
+ av_log(ctx, ebur128->loglevel, "t: %-10s " LOG_FMT,
+ av_ts2timestr(pts, &outlink->time_base),
+ loudness_400, loudness_3000,
+ ebur128->integrated_loudness, ebur128->loudness_range);
+
+#define PRINT_PEAKS(str, sp, ptype) do { \
+ if (ebur128->peak_mode & PEAK_MODE_ ## ptype ## _PEAKS) { \
+ av_log(ctx, ebur128->loglevel, " " str ":"); \
+ for (ch = 0; ch < nb_channels; ch++) \
+ av_log(ctx, ebur128->loglevel, " %5.1f", DBFS(sp[ch])); \
+ av_log(ctx, ebur128->loglevel, " dBFS"); \
+ } \
+} while (0)
+
+ PRINT_PEAKS("SPK", ebur128->sample_peaks, SAMPLES);
+ PRINT_PEAKS("FTPK", ebur128->true_peaks_per_frame, TRUE);
+ PRINT_PEAKS("TPK", ebur128->true_peaks, TRUE);
+ av_log(ctx, ebur128->loglevel, "\n");
+ }
+ }
+
+ return ff_filter_frame(ctx->outputs[ebur128->do_video], insamples);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ EBUR128Context *ebur128 = ctx->priv;
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ int ret;
+
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_NONE };
+ static const int input_srate[] = {48000, -1}; // ITU-R BS.1770 provides coeff only for 48kHz
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE };
+
+ /* set optional output video format */
+ if (ebur128->do_video) {
+ formats = ff_make_format_list(pix_fmts);
+ if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
+ return ret;
+ outlink = ctx->outputs[1];
+ }
+
+ /* set input and output audio formats
+ * Note: ff_set_common_* functions are not used because they affect all the
+ * links, and thus break the video format negotiation */
+ formats = ff_make_format_list(sample_fmts);
+ if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0 ||
+ (ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
+ return ret;
+
+ layouts = ff_all_channel_layouts();
+ if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0 ||
+ (ret = ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts)) < 0)
+ return ret;
+
+ formats = ff_make_format_list(input_srate);
+ if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0 ||
+ (ret = ff_formats_ref(formats, &outlink->in_samplerates)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ int i;
+ EBUR128Context *ebur128 = ctx->priv;
+
+ /* dual-mono correction */
+ if (ebur128->nb_channels == 1 && ebur128->dual_mono) {
+ ebur128->i400.rel_threshold -= ebur128->pan_law;
+ ebur128->i3000.rel_threshold -= ebur128->pan_law;
+ ebur128->lra_low -= ebur128->pan_law;
+ ebur128->lra_high -= ebur128->pan_law;
+ }
+
+ av_log(ctx, AV_LOG_INFO, "Summary:\n\n"
+ " Integrated loudness:\n"
+ " I: %5.1f LUFS\n"
+ " Threshold: %5.1f LUFS\n\n"
+ " Loudness range:\n"
+ " LRA: %5.1f LU\n"
+ " Threshold: %5.1f LUFS\n"
+ " LRA low: %5.1f LUFS\n"
+ " LRA high: %5.1f LUFS",
+ ebur128->integrated_loudness, ebur128->i400.rel_threshold,
+ ebur128->loudness_range, ebur128->i3000.rel_threshold,
+ ebur128->lra_low, ebur128->lra_high);
+
+#define PRINT_PEAK_SUMMARY(str, sp, ptype) do { \
+ int ch; \
+ double maxpeak; \
+ maxpeak = 0.0; \
+ if (ebur128->peak_mode & PEAK_MODE_ ## ptype ## _PEAKS) { \
+ for (ch = 0; ch < ebur128->nb_channels; ch++) \
+ maxpeak = FFMAX(maxpeak, sp[ch]); \
+ av_log(ctx, AV_LOG_INFO, "\n\n " str " peak:\n" \
+ " Peak: %5.1f dBFS", \
+ DBFS(maxpeak)); \
+ } \
+} while (0)
+
+ PRINT_PEAK_SUMMARY("Sample", ebur128->sample_peaks, SAMPLES);
+ PRINT_PEAK_SUMMARY("True", ebur128->true_peaks, TRUE);
+ av_log(ctx, AV_LOG_INFO, "\n");
+
+ av_freep(&ebur128->y_line_ref);
+ av_freep(&ebur128->ch_weighting);
+ av_freep(&ebur128->true_peaks);
+ av_freep(&ebur128->sample_peaks);
+ av_freep(&ebur128->true_peaks_per_frame);
+ av_freep(&ebur128->i400.histogram);
+ av_freep(&ebur128->i3000.histogram);
+ for (i = 0; i < ebur128->nb_channels; i++) {
+ av_freep(&ebur128->i400.cache[i]);
+ av_freep(&ebur128->i3000.cache[i]);
+ }
+ for (i = 0; i < ctx->nb_outputs; i++)
+ av_freep(&ctx->output_pads[i].name);
+ av_frame_free(&ebur128->outpicref);
+#if CONFIG_SWRESAMPLE
+ av_freep(&ebur128->swr_buf);
+ swr_free(&ebur128->swr_ctx);
+#endif
+}
+
+static const AVFilterPad ebur128_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ .config_props = config_audio_input,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_ebur128 = {
+ .name = "ebur128",
+ .description = NULL_IF_CONFIG_SMALL("EBU R128 scanner."),
+ .priv_size = sizeof(EBUR128Context),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = ebur128_inputs,
+ .outputs = NULL,
+ .priv_class = &ebur128_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
diff --git a/libavfilter/f_interleave.c b/libavfilter/f_interleave.c
new file mode 100644
index 0000000000..b9192e9b14
--- /dev/null
+++ b/libavfilter/f_interleave.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2013 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * audio and video interleaver
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/opt.h"
+
+#define FF_INTERNAL_FIELDS 1
+#include "framequeue.h"
+
+#include "avfilter.h"
+#include "bufferqueue.h"
+#include "formats.h"
+#include "internal.h"
+#include "audio.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ int nb_inputs;
+ struct FFBufQueue *queues;
+} InterleaveContext;
+
+#define OFFSET(x) offsetof(InterleaveContext, x)
+
+#define DEFINE_OPTIONS(filt_name, flags_) \
+static const AVOption filt_name##_options[] = { \
+ { "nb_inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
+ { "n", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
+ { NULL } \
+}
+
+inline static int push_frame(AVFilterContext *ctx)
+{
+ InterleaveContext *s = ctx->priv;
+ AVFrame *frame;
+ int i, queue_idx = -1;
+ int64_t pts_min = INT64_MAX;
+
+ /* look for oldest frame */
+ for (i = 0; i < ctx->nb_inputs; i++) {
+ struct FFBufQueue *q = &s->queues[i];
+
+ if (!q->available && !ctx->inputs[i]->status_out)
+ return 0;
+ if (q->available) {
+ frame = ff_bufqueue_peek(q, 0);
+ if (frame->pts < pts_min) {
+ pts_min = frame->pts;
+ queue_idx = i;
+ }
+ }
+ }
+
+ /* all inputs are closed */
+ if (queue_idx < 0)
+ return AVERROR_EOF;
+
+ frame = ff_bufqueue_get(&s->queues[queue_idx]);
+ av_log(ctx, AV_LOG_DEBUG, "queue:%d -> frame time:%f\n",
+ queue_idx, frame->pts * av_q2d(AV_TIME_BASE_Q));
+ return ff_filter_frame(ctx->outputs[0], frame);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ InterleaveContext *s = ctx->priv;
+ unsigned in_no = FF_INLINK_IDX(inlink);
+
+ if (frame->pts == AV_NOPTS_VALUE) {
+ av_log(ctx, AV_LOG_WARNING,
+ "NOPTS value for input frame cannot be accepted, frame discarded\n");
+ av_frame_free(&frame);
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* queue frame */
+ frame->pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q);
+ av_log(ctx, AV_LOG_DEBUG, "frame pts:%f -> queue idx:%d available:%d\n",
+ frame->pts * av_q2d(AV_TIME_BASE_Q), in_no, s->queues[in_no].available);
+ ff_bufqueue_add(ctx, &s->queues[in_no], frame);
+
+ return push_frame(ctx);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ InterleaveContext *s = ctx->priv;
+ const AVFilterPad *outpad = &ctx->filter->outputs[0];
+ int i;
+
+ s->queues = av_calloc(s->nb_inputs, sizeof(s->queues[0]));
+ if (!s->queues)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < s->nb_inputs; i++) {
+ AVFilterPad inpad = { 0 };
+
+ inpad.name = av_asprintf("input%d", i);
+ if (!inpad.name)
+ return AVERROR(ENOMEM);
+ inpad.type = outpad->type;
+ inpad.filter_frame = filter_frame;
+
+ switch (outpad->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ inpad.get_video_buffer = ff_null_get_video_buffer; break;
+ case AVMEDIA_TYPE_AUDIO:
+ inpad.get_audio_buffer = ff_null_get_audio_buffer; break;
+ default:
+ av_assert0(0);
+ }
+ ff_insert_inpad(ctx, i, &inpad);
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ InterleaveContext *s = ctx->priv;
+ int i;
+
+ for (i = 0; i < ctx->nb_inputs; i++) {
+ ff_bufqueue_discard_all(&s->queues[i]);
+ av_freep(&s->queues[i]);
+ av_freep(&ctx->input_pads[i].name);
+ }
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink0 = ctx->inputs[0];
+ int i;
+
+ if (outlink->type == AVMEDIA_TYPE_VIDEO) {
+ outlink->time_base = AV_TIME_BASE_Q;
+ outlink->w = inlink0->w;
+ outlink->h = inlink0->h;
+ outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio;
+ outlink->format = inlink0->format;
+ outlink->frame_rate = (AVRational) {1, 0};
+ for (i = 1; i < ctx->nb_inputs; i++) {
+ AVFilterLink *inlink = ctx->inputs[i];
+
+ if (outlink->w != inlink->w ||
+ outlink->h != inlink->h ||
+ outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num ||
+ outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
+ av_log(ctx, AV_LOG_ERROR, "Parameters for input link %s "
+ "(size %dx%d, SAR %d:%d) do not match the corresponding "
+ "output link parameters (%dx%d, SAR %d:%d)\n",
+ ctx->input_pads[i].name, inlink->w, inlink->h,
+ inlink->sample_aspect_ratio.num,
+ inlink->sample_aspect_ratio.den,
+ outlink->w, outlink->h,
+ outlink->sample_aspect_ratio.num,
+ outlink->sample_aspect_ratio.den);
+ return AVERROR(EINVAL);
+ }
+ }
+ }
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ InterleaveContext *s = ctx->priv;
+ int i, ret;
+
+ for (i = 0; i < ctx->nb_inputs; i++) {
+ if (!s->queues[i].available && !ctx->inputs[i]->status_out) {
+ ret = ff_request_frame(ctx->inputs[i]);
+ if (ret != AVERROR_EOF)
+ return ret;
+ }
+ }
+
+ return push_frame(ctx);
+}
+
+#if CONFIG_INTERLEAVE_FILTER
+
+DEFINE_OPTIONS(interleave, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
+AVFILTER_DEFINE_CLASS(interleave);
+
+static const AVFilterPad interleave_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_interleave = {
+ .name = "interleave",
+ .description = NULL_IF_CONFIG_SMALL("Temporally interleave video inputs."),
+ .priv_size = sizeof(InterleaveContext),
+ .init = init,
+ .uninit = uninit,
+ .outputs = interleave_outputs,
+ .priv_class = &interleave_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
+
+#endif
+
+#if CONFIG_AINTERLEAVE_FILTER
+
+DEFINE_OPTIONS(ainterleave, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
+AVFILTER_DEFINE_CLASS(ainterleave);
+
+static const AVFilterPad ainterleave_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_ainterleave = {
+ .name = "ainterleave",
+ .description = NULL_IF_CONFIG_SMALL("Temporally interleave audio inputs."),
+ .priv_size = sizeof(InterleaveContext),
+ .init = init,
+ .uninit = uninit,
+ .outputs = ainterleave_outputs,
+ .priv_class = &ainterleave_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
+
+#endif
diff --git a/libavfilter/f_loop.c b/libavfilter/f_loop.c
new file mode 100644
index 0000000000..5a3280772e
--- /dev/null
+++ b/libavfilter/f_loop.c
@@ -0,0 +1,382 @@
+/*
+ * Copyright (c) 2016 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/audio_fifo.h"
+#include "libavutil/avassert.h"
+#include "libavutil/fifo.h"
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct LoopContext {
+ const AVClass *class;
+
+ AVAudioFifo *fifo;
+ AVAudioFifo *left;
+ AVFrame **frames;
+ int nb_frames;
+ int current_frame;
+ int64_t start_pts;
+ int64_t duration;
+ int64_t current_sample;
+ int64_t nb_samples;
+ int64_t ignored_samples;
+
+ int loop;
+ int64_t size;
+ int64_t start;
+ int64_t pts;
+} LoopContext;
+
+#define AFLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+#define VFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+#define OFFSET(x) offsetof(LoopContext, x)
+
+#if CONFIG_ALOOP_FILTER
+
+static int aconfig_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ LoopContext *s = ctx->priv;
+
+ s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, 8192);
+ s->left = av_audio_fifo_alloc(inlink->format, inlink->channels, 8192);
+ if (!s->fifo || !s->left)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static av_cold void auninit(AVFilterContext *ctx)
+{
+ LoopContext *s = ctx->priv;
+
+ av_audio_fifo_free(s->fifo);
+ av_audio_fifo_free(s->left);
+}
+
+static int push_samples(AVFilterContext *ctx, int nb_samples)
+{
+ AVFilterLink *outlink = ctx->outputs[0];
+ LoopContext *s = ctx->priv;
+ AVFrame *out;
+ int ret, i = 0;
+
+ while (s->loop != 0 && i < nb_samples) {
+ out = ff_get_audio_buffer(outlink, FFMIN(nb_samples, s->nb_samples - s->current_sample));
+ if (!out)
+ return AVERROR(ENOMEM);
+ ret = av_audio_fifo_peek_at(s->fifo, (void **)out->extended_data, out->nb_samples, s->current_sample);
+ if (ret < 0) {
+ av_frame_free(&out);
+ return ret;
+ }
+ out->pts = s->pts;
+ out->nb_samples = ret;
+ s->pts += out->nb_samples;
+ i += out->nb_samples;
+ s->current_sample += out->nb_samples;
+
+ ret = ff_filter_frame(outlink, out);
+ if (ret < 0)
+ return ret;
+
+ if (s->current_sample >= s->nb_samples) {
+ s->current_sample = 0;
+
+ if (s->loop > 0)
+ s->loop--;
+ }
+ }
+
+ return ret;
+}
+
+static int afilter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ LoopContext *s = ctx->priv;
+ int ret = 0;
+
+ if (s->ignored_samples + frame->nb_samples > s->start && s->size > 0 && s->loop != 0) {
+ if (s->nb_samples < s->size) {
+ int written = FFMIN(frame->nb_samples, s->size - s->nb_samples);
+ int drain = 0;
+
+ ret = av_audio_fifo_write(s->fifo, (void **)frame->extended_data, written);
+ if (ret < 0)
+ return ret;
+ if (!s->nb_samples) {
+ drain = FFMAX(0, s->start - s->ignored_samples);
+ s->pts = frame->pts;
+ av_audio_fifo_drain(s->fifo, drain);
+ s->pts += s->start - s->ignored_samples;
+ }
+ s->nb_samples += ret - drain;
+ drain = frame->nb_samples - written;
+ if (s->nb_samples == s->size && drain > 0) {
+ int ret2;
+
+ ret2 = av_audio_fifo_write(s->left, (void **)frame->extended_data, frame->nb_samples);
+ if (ret2 < 0)
+ return ret2;
+ av_audio_fifo_drain(s->left, drain);
+ }
+ frame->nb_samples = ret;
+ s->pts += ret;
+ ret = ff_filter_frame(outlink, frame);
+ } else {
+ int nb_samples = frame->nb_samples;
+
+ av_frame_free(&frame);
+ ret = push_samples(ctx, nb_samples);
+ }
+ } else {
+ s->ignored_samples += frame->nb_samples;
+ frame->pts = s->pts;
+ s->pts += frame->nb_samples;
+ ret = ff_filter_frame(outlink, frame);
+ }
+
+ return ret;
+}
+
+static int arequest_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ LoopContext *s = ctx->priv;
+ int ret = 0;
+
+ if ((!s->size) ||
+ (s->nb_samples < s->size) ||
+ (s->nb_samples >= s->size && s->loop == 0)) {
+ int nb_samples = av_audio_fifo_size(s->left);
+
+ if (s->loop == 0 && nb_samples > 0) {
+ AVFrame *out;
+
+ out = ff_get_audio_buffer(outlink, nb_samples);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_audio_fifo_read(s->left, (void **)out->extended_data, nb_samples);
+ out->pts = s->pts;
+ s->pts += nb_samples;
+ ret = ff_filter_frame(outlink, out);
+ if (ret < 0)
+ return ret;
+ }
+ ret = ff_request_frame(ctx->inputs[0]);
+ } else {
+ ret = push_samples(ctx, 1024);
+ }
+
+ if (ret == AVERROR_EOF && s->nb_samples > 0 && s->loop != 0) {
+ ret = push_samples(ctx, outlink->sample_rate);
+ }
+
+ return ret;
+}
+
+static const AVOption aloop_options[] = {
+ { "loop", "number of loops", OFFSET(loop), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, INT_MAX, AFLAGS },
+ { "size", "max number of samples to loop", OFFSET(size), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT32_MAX, AFLAGS },
+ { "start", "set the loop start sample", OFFSET(start), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, AFLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(aloop);
+
+static const AVFilterPad ainputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = afilter_frame,
+ .config_props = aconfig_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad aoutputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .request_frame = arequest_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_aloop = {
+ .name = "aloop",
+ .description = NULL_IF_CONFIG_SMALL("Loop audio samples."),
+ .priv_size = sizeof(LoopContext),
+ .priv_class = &aloop_class,
+ .uninit = auninit,
+ .inputs = ainputs,
+ .outputs = aoutputs,
+};
+#endif /* CONFIG_ALOOP_FILTER */
+
+#if CONFIG_LOOP_FILTER
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ LoopContext *s = ctx->priv;
+
+ s->frames = av_calloc(s->size, sizeof(*s->frames));
+ if (!s->frames)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ LoopContext *s = ctx->priv;
+ int i;
+
+ for (i = 0; i < s->nb_frames; i++)
+ av_frame_free(&s->frames[i]);
+
+ av_freep(&s->frames);
+ s->nb_frames = 0;
+}
+
+static int push_frame(AVFilterContext *ctx)
+{
+ AVFilterLink *outlink = ctx->outputs[0];
+ LoopContext *s = ctx->priv;
+ int64_t pts;
+ int ret;
+
+ AVFrame *out = av_frame_clone(s->frames[s->current_frame]);
+
+ if (!out)
+ return AVERROR(ENOMEM);
+ out->pts += s->duration - s->start_pts;
+ pts = out->pts + av_frame_get_pkt_duration(out);
+ ret = ff_filter_frame(outlink, out);
+ s->current_frame++;
+
+ if (s->current_frame >= s->nb_frames) {
+ s->duration = pts;
+ s->current_frame = 0;
+
+ if (s->loop > 0)
+ s->loop--;
+ }
+
+ return ret;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ LoopContext *s = ctx->priv;
+ int ret = 0;
+
+ if (inlink->frame_count_out >= s->start && s->size > 0 && s->loop != 0) {
+ if (s->nb_frames < s->size) {
+ if (!s->nb_frames)
+ s->start_pts = frame->pts;
+ s->frames[s->nb_frames] = av_frame_clone(frame);
+ if (!s->frames[s->nb_frames]) {
+ av_frame_free(&frame);
+ return AVERROR(ENOMEM);
+ }
+ s->nb_frames++;
+ s->duration = frame->pts + av_frame_get_pkt_duration(frame);
+ ret = ff_filter_frame(outlink, frame);
+ } else {
+ av_frame_free(&frame);
+ ret = push_frame(ctx);
+ }
+ } else {
+ frame->pts += s->duration;
+ ret = ff_filter_frame(outlink, frame);
+ }
+
+ return ret;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ LoopContext *s = ctx->priv;
+ int ret = 0;
+
+ if ((!s->size) ||
+ (s->nb_frames < s->size) ||
+ (s->nb_frames >= s->size && s->loop == 0)) {
+ ret = ff_request_frame(ctx->inputs[0]);
+ } else {
+ ret = push_frame(ctx);
+ }
+
+ if (ret == AVERROR_EOF && s->nb_frames > 0 && s->loop != 0) {
+ ret = push_frame(ctx);
+ }
+
+ return ret;
+}
+
+static const AVOption loop_options[] = {
+ { "loop", "number of loops", OFFSET(loop), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, INT_MAX, VFLAGS },
+ { "size", "max number of frames to loop", OFFSET(size), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT16_MAX, VFLAGS },
+ { "start", "set the loop start frame", OFFSET(start), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, VFLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(loop);
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_loop = {
+ .name = "loop",
+ .description = NULL_IF_CONFIG_SMALL("Loop video frames."),
+ .priv_size = sizeof(LoopContext),
+ .priv_class = &loop_class,
+ .init = init,
+ .uninit = uninit,
+ .inputs = inputs,
+ .outputs = outputs,
+};
+#endif /* CONFIG_LOOP_FILTER */
diff --git a/libavfilter/f_metadata.c b/libavfilter/f_metadata.c
new file mode 100644
index 0000000000..1f613ecb56
--- /dev/null
+++ b/libavfilter/f_metadata.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2016 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * filter for manipulating frame metadata
+ */
+
+#include <float.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/eval.h"
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "libavutil/timestamp.h"
+#include "libavformat/avio.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+enum MetadataMode {
+ METADATA_SELECT,
+ METADATA_ADD,
+ METADATA_MODIFY,
+ METADATA_DELETE,
+ METADATA_PRINT,
+ METADATA_NB
+};
+
+enum MetadataFunction {
+ METADATAF_SAME_STR,
+ METADATAF_STARTS_WITH,
+ METADATAF_LESS,
+ METADATAF_EQUAL,
+ METADATAF_GREATER,
+ METADATAF_EXPR,
+ METADATAF_NB
+};
+
+static const char *const var_names[] = {
+ "VALUE1",
+ "VALUE2",
+ NULL
+};
+
+enum var_name {
+ VAR_VALUE1,
+ VAR_VALUE2,
+ VAR_VARS_NB
+};
+
+typedef struct MetadataContext {
+ const AVClass *class;
+
+ int mode;
+ char *key;
+ char *value;
+ int function;
+
+ char *expr_str;
+ AVExpr *expr;
+ double var_values[VAR_VARS_NB];
+
+ AVIOContext* avio_context;
+ char *file_str;
+
+ int (*compare)(struct MetadataContext *s,
+ const char *value1, const char *value2);
+ void (*print)(AVFilterContext *ctx, const char *msg, ...) av_printf_format(2, 3);
+} MetadataContext;
+
+#define OFFSET(x) offsetof(MetadataContext, x)
+#define DEFINE_OPTIONS(filt_name, FLAGS) \
+static const AVOption filt_name##_options[] = { \
+ { "mode", "set a mode of operation", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, METADATA_NB-1, FLAGS, "mode" }, \
+ { "select", "select frame", 0, AV_OPT_TYPE_CONST, {.i64 = METADATA_SELECT }, 0, 0, FLAGS, "mode" }, \
+ { "add", "add new metadata", 0, AV_OPT_TYPE_CONST, {.i64 = METADATA_ADD }, 0, 0, FLAGS, "mode" }, \
+ { "modify", "modify metadata", 0, AV_OPT_TYPE_CONST, {.i64 = METADATA_MODIFY }, 0, 0, FLAGS, "mode" }, \
+ { "delete", "delete metadata", 0, AV_OPT_TYPE_CONST, {.i64 = METADATA_DELETE }, 0, 0, FLAGS, "mode" }, \
+ { "print", "print metadata", 0, AV_OPT_TYPE_CONST, {.i64 = METADATA_PRINT }, 0, 0, FLAGS, "mode" }, \
+ { "key", "set metadata key", OFFSET(key), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, FLAGS }, \
+ { "value", "set metadata value", OFFSET(value), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, FLAGS }, \
+ { "function", "function for comparing values", OFFSET(function), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, METADATAF_NB-1, FLAGS, "function" }, \
+ { "same_str", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = METADATAF_SAME_STR }, 0, 3, FLAGS, "function" }, \
+ { "starts_with", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = METADATAF_STARTS_WITH }, 0, 0, FLAGS, "function" }, \
+ { "less", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = METADATAF_LESS }, 0, 3, FLAGS, "function" }, \
+ { "equal", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = METADATAF_EQUAL }, 0, 3, FLAGS, "function" }, \
+ { "greater", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = METADATAF_GREATER }, 0, 3, FLAGS, "function" }, \
+ { "expr", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = METADATAF_EXPR }, 0, 3, FLAGS, "function" }, \
+ { "expr", "set expression for expr function", OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, FLAGS }, \
+ { "file", "set file where to print metadata information", OFFSET(file_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, \
+ { NULL } \
+}
+
+static int same_str(MetadataContext *s, const char *value1, const char *value2)
+{
+ return !strcmp(value1, value2);
+}
+
+static int starts_with(MetadataContext *s, const char *value1, const char *value2)
+{
+ return !strncmp(value1, value2, strlen(value2));
+}
+
+static int equal(MetadataContext *s, const char *value1, const char *value2)
+{
+ float f1, f2;
+
+ if (sscanf(value1, "%f", &f1) + sscanf(value2, "%f", &f2) != 2)
+ return 0;
+
+ return fabsf(f1 - f2) < FLT_EPSILON;
+}
+
+static int less(MetadataContext *s, const char *value1, const char *value2)
+{
+ float f1, f2;
+
+ if (sscanf(value1, "%f", &f1) + sscanf(value2, "%f", &f2) != 2)
+ return 0;
+
+ return (f1 - f2) < FLT_EPSILON;
+}
+
+static int greater(MetadataContext *s, const char *value1, const char *value2)
+{
+ float f1, f2;
+
+ if (sscanf(value1, "%f", &f1) + sscanf(value2, "%f", &f2) != 2)
+ return 0;
+
+ return (f2 - f1) < FLT_EPSILON;
+}
+
+static int parse_expr(MetadataContext *s, const char *value1, const char *value2)
+{
+ double f1, f2;
+
+ if (sscanf(value1, "%lf", &f1) + sscanf(value2, "%lf", &f2) != 2)
+ return 0;
+
+ s->var_values[VAR_VALUE1] = f1;
+ s->var_values[VAR_VALUE2] = f2;
+
+ return av_expr_eval(s->expr, s->var_values, NULL);
+}
+
+static void print_log(AVFilterContext *ctx, const char *msg, ...)
+{
+ va_list argument_list;
+
+ va_start(argument_list, msg);
+ if (msg)
+ av_vlog(ctx, AV_LOG_INFO, msg, argument_list);
+ va_end(argument_list);
+}
+
+static void print_file(AVFilterContext *ctx, const char *msg, ...)
+{
+ MetadataContext *s = ctx->priv;
+ va_list argument_list;
+
+ va_start(argument_list, msg);
+ if (msg) {
+ char buf[128];
+ vsnprintf(buf, sizeof(buf), msg, argument_list);
+ avio_write(s->avio_context, buf, av_strnlen(buf, sizeof(buf)));
+ }
+ va_end(argument_list);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ MetadataContext *s = ctx->priv;
+ int ret;
+
+ if (!s->key && s->mode != METADATA_PRINT && s->mode != METADATA_DELETE) {
+ av_log(ctx, AV_LOG_WARNING, "Metadata key must be set\n");
+ return AVERROR(EINVAL);
+ }
+
+ if ((s->mode == METADATA_MODIFY ||
+ s->mode == METADATA_ADD) && !s->value) {
+ av_log(ctx, AV_LOG_WARNING, "Missing metadata value\n");
+ return AVERROR(EINVAL);
+ }
+
+ switch (s->function) {
+ case METADATAF_SAME_STR:
+ s->compare = same_str;
+ break;
+ case METADATAF_STARTS_WITH:
+ s->compare = starts_with;
+ break;
+ case METADATAF_LESS:
+ s->compare = less;
+ break;
+ case METADATAF_EQUAL:
+ s->compare = equal;
+ break;
+ case METADATAF_GREATER:
+ s->compare = greater;
+ break;
+ case METADATAF_EXPR:
+ s->compare = parse_expr;
+ break;
+ default:
+ av_assert0(0);
+ };
+
+ if (s->function == METADATAF_EXPR) {
+ if (!s->expr_str) {
+ av_log(ctx, AV_LOG_WARNING, "expr option not set\n");
+ return AVERROR(EINVAL);
+ }
+ if ((ret = av_expr_parse(&s->expr, s->expr_str,
+ var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", s->expr_str);
+ return ret;
+ }
+ }
+
+ if (s->mode == METADATA_PRINT && s->file_str) {
+ s->print = print_file;
+ } else {
+ s->print = print_log;
+ }
+
+ s->avio_context = NULL;
+ if (s->file_str) {
+ if (!strcmp("-", s->file_str)) {
+ ret = avio_open(&s->avio_context, "pipe:1", AVIO_FLAG_WRITE);
+ } else {
+ ret = avio_open(&s->avio_context, s->file_str, AVIO_FLAG_WRITE);
+ }
+
+ if (ret < 0) {
+ char buf[128];
+ av_strerror(ret, buf, sizeof(buf));
+ av_log(ctx, AV_LOG_ERROR, "Could not open %s: %s\n",
+ s->file_str, buf);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ MetadataContext *s = ctx->priv;
+
+ if (s->avio_context) {
+ avio_closep(&s->avio_context);
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ MetadataContext *s = ctx->priv;
+ AVDictionary **metadata = avpriv_frame_get_metadatap(frame);
+ AVDictionaryEntry *e;
+
+ if (!*metadata)
+ return ff_filter_frame(outlink, frame);
+
+ e = av_dict_get(*metadata, !s->key ? "" : s->key, NULL,
+ !s->key ? AV_DICT_IGNORE_SUFFIX: 0);
+
+ switch (s->mode) {
+ case METADATA_SELECT:
+ if (!s->value && e && e->value) {
+ return ff_filter_frame(outlink, frame);
+ } else if (s->value && e && e->value &&
+ s->compare(s, e->value, s->value)) {
+ return ff_filter_frame(outlink, frame);
+ }
+ break;
+ case METADATA_ADD:
+ if (e && e->value) {
+ ;
+ } else {
+ av_dict_set(metadata, s->key, s->value, 0);
+ }
+ return ff_filter_frame(outlink, frame);
+ break;
+ case METADATA_MODIFY:
+ if (e && e->value) {
+ av_dict_set(metadata, s->key, s->value, 0);
+ }
+ return ff_filter_frame(outlink, frame);
+ break;
+ case METADATA_PRINT:
+ if (!s->key && e) {
+ s->print(ctx, "frame:%-4"PRId64" pts:%-7s pts_time:%-7s\n",
+ inlink->frame_count_out, av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
+ s->print(ctx, "%s=%s\n", e->key, e->value);
+ while ((e = av_dict_get(*metadata, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL) {
+ s->print(ctx, "%s=%s\n", e->key, e->value);
+ }
+ } else if (e && e->value && (!s->value || (e->value && s->compare(s, e->value, s->value)))) {
+ s->print(ctx, "frame:%-4"PRId64" pts:%-7s pts_time:%-7s\n",
+ inlink->frame_count_out, av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
+ s->print(ctx, "%s=%s\n", s->key, e->value);
+ }
+ return ff_filter_frame(outlink, frame);
+ break;
+ case METADATA_DELETE:
+ if (!s->key) {
+ av_dict_free(metadata);
+ } else if (e && e->value && (!s->value || s->compare(s, e->value, s->value))) {
+ av_dict_set(metadata, s->key, NULL, 0);
+ }
+ return ff_filter_frame(outlink, frame);
+ break;
+ default:
+ av_assert0(0);
+ };
+
+ av_frame_free(&frame);
+
+ return 0;
+}
+
+#if CONFIG_AMETADATA_FILTER
+
+DEFINE_OPTIONS(ametadata, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
+AVFILTER_DEFINE_CLASS(ametadata);
+
+static const AVFilterPad ainputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad aoutputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_ametadata = {
+ .name = "ametadata",
+ .description = NULL_IF_CONFIG_SMALL("Manipulate audio frame metadata."),
+ .priv_size = sizeof(MetadataContext),
+ .priv_class = &ametadata_class,
+ .init = init,
+ .uninit = uninit,
+ .inputs = ainputs,
+ .outputs = aoutputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
+#endif /* CONFIG_AMETADATA_FILTER */
+
+#if CONFIG_METADATA_FILTER
+
+DEFINE_OPTIONS(metadata, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
+AVFILTER_DEFINE_CLASS(metadata);
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_metadata = {
+ .name = "metadata",
+ .description = NULL_IF_CONFIG_SMALL("Manipulate video frame metadata."),
+ .priv_size = sizeof(MetadataContext),
+ .priv_class = &metadata_class,
+ .init = init,
+ .uninit = uninit,
+ .inputs = inputs,
+ .outputs = outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
+#endif /* CONFIG_METADATA_FILTER */
diff --git a/libavfilter/f_perms.c b/libavfilter/f_perms.c
new file mode 100644
index 0000000000..40b8811149
--- /dev/null
+++ b/libavfilter/f_perms.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2013 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/lfg.h"
+#include "libavutil/opt.h"
+#include "libavutil/random_seed.h"
+#include "audio.h"
+#include "video.h"
+
+enum mode {
+ MODE_NONE,
+ MODE_RO,
+ MODE_RW,
+ MODE_TOGGLE,
+ MODE_RANDOM,
+ NB_MODES
+};
+
+typedef struct {
+ const AVClass *class;
+ AVLFG lfg;
+ int64_t random_seed;
+ int mode;
+} PermsContext;
+
+#define OFFSET(x) offsetof(PermsContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption options[] = {
+ { "mode", "select permissions mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_NONE}, MODE_NONE, NB_MODES-1, FLAGS, "mode" },
+ { "none", "do nothing", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_NONE}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "ro", "set all output frames read-only", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_RO}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "rw", "set all output frames writable", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_RW}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "toggle", "switch permissions", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_TOGGLE}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "random", "set permissions randomly", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_RANDOM}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "seed", "set the seed for the random mode", OFFSET(random_seed), AV_OPT_TYPE_INT64, {.i64 = -1}, -1, UINT32_MAX, FLAGS },
+ { NULL }
+};
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ PermsContext *s = ctx->priv;
+
+ if (s->mode == MODE_RANDOM) {
+ uint32_t seed;
+
+ if (s->random_seed == -1)
+ s->random_seed = av_get_random_seed();
+ seed = s->random_seed;
+ av_log(ctx, AV_LOG_INFO, "random seed: 0x%08x\n", seed);
+ av_lfg_init(&s->lfg, seed);
+ }
+
+ return 0;
+}
+
+enum perm { RO, RW };
+static const char * const perm_str[2] = { "RO", "RW" };
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ int ret;
+ AVFilterContext *ctx = inlink->dst;
+ PermsContext *s = ctx->priv;
+ AVFrame *out = frame;
+ enum perm in_perm = av_frame_is_writable(frame) ? RW : RO;
+ enum perm out_perm;
+
+ switch (s->mode) {
+ case MODE_TOGGLE: out_perm = in_perm == RO ? RW : RO; break;
+ case MODE_RANDOM: out_perm = av_lfg_get(&s->lfg) & 1 ? RW : RO; break;
+ case MODE_RO: out_perm = RO; break;
+ case MODE_RW: out_perm = RW; break;
+ default: out_perm = in_perm; break;
+ }
+
+ av_log(ctx, AV_LOG_VERBOSE, "%s -> %s%s\n",
+ perm_str[in_perm], perm_str[out_perm],
+ in_perm == out_perm ? " (no-op)" : "");
+
+ if (in_perm == RO && out_perm == RW) {
+ if ((ret = av_frame_make_writable(frame)) < 0)
+ return ret;
+ } else if (in_perm == RW && out_perm == RO) {
+ out = av_frame_clone(frame);
+ if (!out)
+ return AVERROR(ENOMEM);
+ }
+
+ ret = ff_filter_frame(ctx->outputs[0], out);
+
+ if (in_perm == RW && out_perm == RO)
+ av_frame_free(&frame);
+ return ret;
+}
+
+#if CONFIG_APERMS_FILTER
+
+#define aperms_options options
+AVFILTER_DEFINE_CLASS(aperms);
+
+static const AVFilterPad aperms_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad aperms_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_aperms = {
+ .name = "aperms",
+ .description = NULL_IF_CONFIG_SMALL("Set permissions for the output audio frame."),
+ .init = init,
+ .priv_size = sizeof(PermsContext),
+ .inputs = aperms_inputs,
+ .outputs = aperms_outputs,
+ .priv_class = &aperms_class,
+};
+#endif /* CONFIG_APERMS_FILTER */
+
+#if CONFIG_PERMS_FILTER
+
+#define perms_options options
+AVFILTER_DEFINE_CLASS(perms);
+
+static const AVFilterPad perms_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad perms_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_perms = {
+ .name = "perms",
+ .description = NULL_IF_CONFIG_SMALL("Set permissions for the output video frame."),
+ .init = init,
+ .priv_size = sizeof(PermsContext),
+ .inputs = perms_inputs,
+ .outputs = perms_outputs,
+ .priv_class = &perms_class,
+};
+#endif /* CONFIG_PERMS_FILTER */
diff --git a/libavfilter/f_realtime.c b/libavfilter/f_realtime.c
new file mode 100644
index 0000000000..171c16aaaa
--- /dev/null
+++ b/libavfilter/f_realtime.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2015 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/time.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct RealtimeContext {
+ const AVClass *class;
+ int64_t delta;
+ int64_t limit;
+ unsigned inited;
+} RealtimeContext;
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ RealtimeContext *s = ctx->priv;
+
+ if (frame->pts != AV_NOPTS_VALUE) {
+ int64_t pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q);
+ int64_t now = av_gettime_relative();
+ int64_t sleep = pts - now + s->delta;
+ if (!s->inited) {
+ s->inited = 1;
+ sleep = 0;
+ s->delta = now - pts;
+ }
+ if (sleep > s->limit || sleep < -s->limit) {
+ av_log(ctx, AV_LOG_WARNING,
+ "time discontinuity detected: %"PRIi64" us, resetting\n",
+ sleep);
+ sleep = 0;
+ s->delta = now - pts;
+ }
+ if (sleep > 0) {
+ av_log(ctx, AV_LOG_DEBUG, "sleeping %"PRIi64" us\n", sleep);
+ for (; sleep > 600000000; sleep -= 600000000)
+ av_usleep(600000000);
+ av_usleep(sleep);
+ }
+ }
+ return ff_filter_frame(inlink->dst->outputs[0], frame);
+}
+
+#define OFFSET(x) offsetof(RealtimeContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption options[] = {
+ { "limit", "sleep time limit", OFFSET(limit), AV_OPT_TYPE_DURATION, { .i64 = 2000000 }, 0, INT64_MAX, FLAGS },
+ { NULL }
+};
+
+#if CONFIG_REALTIME_FILTER
+#define realtime_options options
+AVFILTER_DEFINE_CLASS(realtime);
+
+static const AVFilterPad avfilter_vf_realtime_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_vf_realtime_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_realtime = {
+ .name = "realtime",
+ .description = NULL_IF_CONFIG_SMALL("Slow down filtering to match realtime."),
+ .priv_size = sizeof(RealtimeContext),
+ .priv_class = &realtime_class,
+ .inputs = avfilter_vf_realtime_inputs,
+ .outputs = avfilter_vf_realtime_outputs,
+};
+#endif /* CONFIG_REALTIME_FILTER */
+
+#if CONFIG_AREALTIME_FILTER
+
+#define arealtime_options options
+AVFILTER_DEFINE_CLASS(arealtime);
+
+static const AVFilterPad arealtime_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad arealtime_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_arealtime = {
+ .name = "arealtime",
+ .description = NULL_IF_CONFIG_SMALL("Slow down filtering to match realtime."),
+ .priv_size = sizeof(RealtimeContext),
+ .priv_class = &arealtime_class,
+ .inputs = arealtime_inputs,
+ .outputs = arealtime_outputs,
+};
+#endif /* CONFIG_AREALTIME_FILTER */
diff --git a/libavfilter/f_reverse.c b/libavfilter/f_reverse.c
new file mode 100644
index 0000000000..5bf71b38ed
--- /dev/null
+++ b/libavfilter/f_reverse.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2015 Derek Buitenhuis
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define DEFAULT_LENGTH 300
+
+typedef struct ReverseContext {
+ int nb_frames;
+ AVFrame **frames;
+ unsigned int frames_size;
+ unsigned int pts_size;
+ int64_t *pts;
+ int flush_idx;
+} ReverseContext;
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ReverseContext *s = ctx->priv;
+
+ s->pts = av_fast_realloc(NULL, &s->pts_size,
+ DEFAULT_LENGTH * sizeof(*(s->pts)));
+ if (!s->pts)
+ return AVERROR(ENOMEM);
+
+ s->frames = av_fast_realloc(NULL, &s->frames_size,
+ DEFAULT_LENGTH * sizeof(*(s->frames)));
+ if (!s->frames) {
+ av_freep(&s->pts);
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ReverseContext *s = ctx->priv;
+
+ av_freep(&s->pts);
+ av_freep(&s->frames);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ReverseContext *s = ctx->priv;
+ void *ptr;
+
+ if (s->nb_frames + 1 > s->pts_size / sizeof(*(s->pts))) {
+ ptr = av_fast_realloc(s->pts, &s->pts_size, s->pts_size * 2);
+ if (!ptr)
+ return AVERROR(ENOMEM);
+ s->pts = ptr;
+ }
+
+ if (s->nb_frames + 1 > s->frames_size / sizeof(*(s->frames))) {
+ ptr = av_fast_realloc(s->frames, &s->frames_size, s->frames_size * 2);
+ if (!ptr)
+ return AVERROR(ENOMEM);
+ s->frames = ptr;
+ }
+
+ s->frames[s->nb_frames] = in;
+ s->pts[s->nb_frames] = in->pts;
+ s->nb_frames++;
+
+ return 0;
+}
+
+#if CONFIG_REVERSE_FILTER
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ReverseContext *s = ctx->priv;
+ int ret;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF && s->nb_frames > 0) {
+ AVFrame *out = s->frames[s->nb_frames - 1];
+ out->pts = s->pts[s->flush_idx++];
+ ret = ff_filter_frame(outlink, out);
+ s->nb_frames--;
+ }
+
+ return ret;
+}
+
+static const AVFilterPad reverse_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad reverse_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_reverse = {
+ .name = "reverse",
+ .description = NULL_IF_CONFIG_SMALL("Reverse a clip."),
+ .priv_size = sizeof(ReverseContext),
+ .init = init,
+ .uninit = uninit,
+ .inputs = reverse_inputs,
+ .outputs = reverse_outputs,
+};
+
+#endif /* CONFIG_REVERSE_FILTER */
+
+#if CONFIG_AREVERSE_FILTER
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ int ret;
+
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_channel_layouts(ctx, layouts);
+ if (ret < 0)
+ return ret;
+
+ ret = ff_set_common_formats(ctx, ff_planar_sample_fmts());
+ if (ret < 0)
+ return ret;
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_samplerates(ctx, formats);
+}
+
+static int areverse_request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ReverseContext *s = ctx->priv;
+ int ret, p, i, j;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF && s->nb_frames > 0) {
+ AVFrame *out = s->frames[s->nb_frames - 1];
+ out->pts = s->pts[s->flush_idx++];
+
+ for (p = 0; p < outlink->channels; p++) {
+ switch (outlink->format) {
+ case AV_SAMPLE_FMT_U8P: {
+ uint8_t *dst = (uint8_t *)out->extended_data[p];
+ for (i = 0, j = out->nb_samples - 1; i < j; i++, j--)
+ FFSWAP(uint8_t, dst[i], dst[j]);
+ }
+ break;
+ case AV_SAMPLE_FMT_S16P: {
+ int16_t *dst = (int16_t *)out->extended_data[p];
+ for (i = 0, j = out->nb_samples - 1; i < j; i++, j--)
+ FFSWAP(int16_t, dst[i], dst[j]);
+ }
+ break;
+ case AV_SAMPLE_FMT_S32P: {
+ int32_t *dst = (int32_t *)out->extended_data[p];
+ for (i = 0, j = out->nb_samples - 1; i < j; i++, j--)
+ FFSWAP(int32_t, dst[i], dst[j]);
+ }
+ break;
+ case AV_SAMPLE_FMT_FLTP: {
+ float *dst = (float *)out->extended_data[p];
+ for (i = 0, j = out->nb_samples - 1; i < j; i++, j--)
+ FFSWAP(float, dst[i], dst[j]);
+ }
+ break;
+ case AV_SAMPLE_FMT_DBLP: {
+ double *dst = (double *)out->extended_data[p];
+ for (i = 0, j = out->nb_samples - 1; i < j; i++, j--)
+ FFSWAP(double, dst[i], dst[j]);
+ }
+ break;
+ }
+ }
+
+ ret = ff_filter_frame(outlink, out);
+ s->nb_frames--;
+ }
+
+ return ret;
+}
+
+static const AVFilterPad areverse_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ .needs_writable = 1,
+ },
+ { NULL }
+};
+
+static const AVFilterPad areverse_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .request_frame = areverse_request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_areverse = {
+ .name = "areverse",
+ .description = NULL_IF_CONFIG_SMALL("Reverse an audio clip."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(ReverseContext),
+ .init = init,
+ .uninit = uninit,
+ .inputs = areverse_inputs,
+ .outputs = areverse_outputs,
+};
+
+#endif /* CONFIG_AREVERSE_FILTER */
diff --git a/libavfilter/f_select.c b/libavfilter/f_select.c
new file mode 100644
index 0000000000..03c1c0f3f3
--- /dev/null
+++ b/libavfilter/f_select.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * filter for selecting which frame passes in the filterchain
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/eval.h"
+#include "libavutil/fifo.h"
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixelutils.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+static const char *const var_names[] = {
+ "TB", ///< timebase
+
+ "pts", ///< original pts in the file of the frame
+ "start_pts", ///< first PTS in the stream, expressed in TB units
+ "prev_pts", ///< previous frame PTS
+ "prev_selected_pts", ///< previous selected frame PTS
+
+ "t", ///< timestamp expressed in seconds
+ "start_t", ///< first PTS in the stream, expressed in seconds
+ "prev_t", ///< previous frame time
+ "prev_selected_t", ///< previously selected time
+
+ "pict_type", ///< the type of picture in the movie
+ "I",
+ "P",
+ "B",
+ "S",
+ "SI",
+ "SP",
+ "BI",
+ "PICT_TYPE_I",
+ "PICT_TYPE_P",
+ "PICT_TYPE_B",
+ "PICT_TYPE_S",
+ "PICT_TYPE_SI",
+ "PICT_TYPE_SP",
+ "PICT_TYPE_BI",
+
+ "interlace_type", ///< the frame interlace type
+ "PROGRESSIVE",
+ "TOPFIRST",
+ "BOTTOMFIRST",
+
+ "consumed_samples_n",///< number of samples consumed by the filter (only audio)
+ "samples_n", ///< number of samples in the current frame (only audio)
+ "sample_rate", ///< sample rate (only audio)
+
+ "n", ///< frame number (starting from zero)
+ "selected_n", ///< selected frame number (starting from zero)
+ "prev_selected_n", ///< number of the last selected frame
+
+ "key", ///< tell if the frame is a key frame
+ "pos", ///< original position in the file of the frame
+
+ "scene",
+
+ "concatdec_select", ///< frame is within the interval set by the concat demuxer
+
+ NULL
+};
+
+enum var_name {
+ VAR_TB,
+
+ VAR_PTS,
+ VAR_START_PTS,
+ VAR_PREV_PTS,
+ VAR_PREV_SELECTED_PTS,
+
+ VAR_T,
+ VAR_START_T,
+ VAR_PREV_T,
+ VAR_PREV_SELECTED_T,
+
+ VAR_PICT_TYPE,
+ VAR_I,
+ VAR_P,
+ VAR_B,
+ VAR_S,
+ VAR_SI,
+ VAR_SP,
+ VAR_BI,
+ VAR_PICT_TYPE_I,
+ VAR_PICT_TYPE_P,
+ VAR_PICT_TYPE_B,
+ VAR_PICT_TYPE_S,
+ VAR_PICT_TYPE_SI,
+ VAR_PICT_TYPE_SP,
+ VAR_PICT_TYPE_BI,
+
+ VAR_INTERLACE_TYPE,
+ VAR_INTERLACE_TYPE_P,
+ VAR_INTERLACE_TYPE_T,
+ VAR_INTERLACE_TYPE_B,
+
+ VAR_CONSUMED_SAMPLES_N,
+ VAR_SAMPLES_N,
+ VAR_SAMPLE_RATE,
+
+ VAR_N,
+ VAR_SELECTED_N,
+ VAR_PREV_SELECTED_N,
+
+ VAR_KEY,
+ VAR_POS,
+
+ VAR_SCENE,
+
+ VAR_CONCATDEC_SELECT,
+
+ VAR_VARS_NB
+};
+
+typedef struct SelectContext {
+ const AVClass *class;
+ char *expr_str;
+ AVExpr *expr;
+ double var_values[VAR_VARS_NB];
+ int do_scene_detect; ///< 1 if the expression requires scene detection variables, 0 otherwise
+ av_pixelutils_sad_fn sad; ///< Sum of the absolute difference function (scene detect only)
+ double prev_mafd; ///< previous MAFD (scene detect only)
+ AVFrame *prev_picref; ///< previous frame (scene detect only)
+ double select;
+ int select_out; ///< mark the selected output pad index
+ int nb_outputs;
+} SelectContext;
+
+#define OFFSET(x) offsetof(SelectContext, x)
+#define DEFINE_OPTIONS(filt_name, FLAGS) \
+static const AVOption filt_name##_options[] = { \
+ { "expr", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
+ { "e", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
+ { "outputs", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
+ { "n", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
+ { NULL } \
+}
+
+static int request_frame(AVFilterLink *outlink);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ SelectContext *select = ctx->priv;
+ int i, ret;
+
+ if ((ret = av_expr_parse(&select->expr, select->expr_str,
+ var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n",
+ select->expr_str);
+ return ret;
+ }
+ select->do_scene_detect = !!strstr(select->expr_str, "scene");
+
+ for (i = 0; i < select->nb_outputs; i++) {
+ AVFilterPad pad = { 0 };
+
+ pad.name = av_asprintf("output%d", i);
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ pad.type = ctx->filter->inputs[0].type;
+ pad.request_frame = request_frame;
+ ff_insert_outpad(ctx, i, &pad);
+ }
+
+ return 0;
+}
+
+#define INTERLACE_TYPE_P 0
+#define INTERLACE_TYPE_T 1
+#define INTERLACE_TYPE_B 2
+
+static int config_input(AVFilterLink *inlink)
+{
+ SelectContext *select = inlink->dst->priv;
+
+ select->var_values[VAR_N] = 0.0;
+ select->var_values[VAR_SELECTED_N] = 0.0;
+
+ select->var_values[VAR_TB] = av_q2d(inlink->time_base);
+
+ select->var_values[VAR_PREV_PTS] = NAN;
+ select->var_values[VAR_PREV_SELECTED_PTS] = NAN;
+ select->var_values[VAR_PREV_SELECTED_T] = NAN;
+ select->var_values[VAR_PREV_T] = NAN;
+ select->var_values[VAR_START_PTS] = NAN;
+ select->var_values[VAR_START_T] = NAN;
+
+ select->var_values[VAR_I] = AV_PICTURE_TYPE_I;
+ select->var_values[VAR_P] = AV_PICTURE_TYPE_P;
+ select->var_values[VAR_B] = AV_PICTURE_TYPE_B;
+ select->var_values[VAR_SI] = AV_PICTURE_TYPE_SI;
+ select->var_values[VAR_SP] = AV_PICTURE_TYPE_SP;
+ select->var_values[VAR_BI] = AV_PICTURE_TYPE_BI;
+ select->var_values[VAR_PICT_TYPE_I] = AV_PICTURE_TYPE_I;
+ select->var_values[VAR_PICT_TYPE_P] = AV_PICTURE_TYPE_P;
+ select->var_values[VAR_PICT_TYPE_B] = AV_PICTURE_TYPE_B;
+ select->var_values[VAR_PICT_TYPE_SI] = AV_PICTURE_TYPE_SI;
+ select->var_values[VAR_PICT_TYPE_SP] = AV_PICTURE_TYPE_SP;
+ select->var_values[VAR_PICT_TYPE_BI] = AV_PICTURE_TYPE_BI;
+
+ select->var_values[VAR_INTERLACE_TYPE_P] = INTERLACE_TYPE_P;
+ select->var_values[VAR_INTERLACE_TYPE_T] = INTERLACE_TYPE_T;
+ select->var_values[VAR_INTERLACE_TYPE_B] = INTERLACE_TYPE_B;
+
+ select->var_values[VAR_PICT_TYPE] = NAN;
+ select->var_values[VAR_INTERLACE_TYPE] = NAN;
+ select->var_values[VAR_SCENE] = NAN;
+ select->var_values[VAR_CONSUMED_SAMPLES_N] = NAN;
+ select->var_values[VAR_SAMPLES_N] = NAN;
+
+ select->var_values[VAR_SAMPLE_RATE] =
+ inlink->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
+
+ if (select->do_scene_detect) {
+ select->sad = av_pixelutils_get_sad_fn(3, 3, 2, select); // 8x8 both sources aligned
+ if (!select->sad)
+ return AVERROR(EINVAL);
+ }
+ return 0;
+}
+
+static double get_scene_score(AVFilterContext *ctx, AVFrame *frame)
+{
+ double ret = 0;
+ SelectContext *select = ctx->priv;
+ AVFrame *prev_picref = select->prev_picref;
+
+ if (prev_picref &&
+ frame->height == prev_picref->height &&
+ frame->width == prev_picref->width) {
+ int x, y, nb_sad = 0;
+ int64_t sad = 0;
+ double mafd, diff;
+ uint8_t *p1 = frame->data[0];
+ uint8_t *p2 = prev_picref->data[0];
+ const int p1_linesize = frame->linesize[0];
+ const int p2_linesize = prev_picref->linesize[0];
+
+ for (y = 0; y < frame->height - 7; y += 8) {
+ for (x = 0; x < frame->width*3 - 7; x += 8) {
+ sad += select->sad(p1 + x, p1_linesize, p2 + x, p2_linesize);
+ nb_sad += 8 * 8;
+ }
+ p1 += 8 * p1_linesize;
+ p2 += 8 * p2_linesize;
+ }
+ emms_c();
+ mafd = nb_sad ? (double)sad / nb_sad : 0;
+ diff = fabs(mafd - select->prev_mafd);
+ ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1);
+ select->prev_mafd = mafd;
+ av_frame_free(&prev_picref);
+ }
+ select->prev_picref = av_frame_clone(frame);
+ return ret;
+}
+
+static double get_concatdec_select(AVFrame *frame, int64_t pts)
+{
+ AVDictionary *metadata = av_frame_get_metadata(frame);
+ AVDictionaryEntry *start_time_entry = av_dict_get(metadata, "lavf.concatdec.start_time", NULL, 0);
+ AVDictionaryEntry *duration_entry = av_dict_get(metadata, "lavf.concatdec.duration", NULL, 0);
+ if (start_time_entry) {
+ int64_t start_time = strtoll(start_time_entry->value, NULL, 10);
+ if (pts >= start_time) {
+ if (duration_entry) {
+ int64_t duration = strtoll(duration_entry->value, NULL, 10);
+ if (pts < start_time + duration)
+ return -1;
+ else
+ return 0;
+ }
+ return -1;
+ }
+ return 0;
+ }
+ return NAN;
+}
+
+#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
+#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
+
+static void select_frame(AVFilterContext *ctx, AVFrame *frame)
+{
+ SelectContext *select = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ double res;
+
+ if (isnan(select->var_values[VAR_START_PTS]))
+ select->var_values[VAR_START_PTS] = TS2D(frame->pts);
+ if (isnan(select->var_values[VAR_START_T]))
+ select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
+
+ select->var_values[VAR_N ] = inlink->frame_count_out;
+ select->var_values[VAR_PTS] = TS2D(frame->pts);
+ select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
+ select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
+ select->var_values[VAR_KEY] = frame->key_frame;
+ select->var_values[VAR_CONCATDEC_SELECT] = get_concatdec_select(frame, av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q));
+
+ switch (inlink->type) {
+ case AVMEDIA_TYPE_AUDIO:
+ select->var_values[VAR_SAMPLES_N] = frame->nb_samples;
+ break;
+
+ case AVMEDIA_TYPE_VIDEO:
+ select->var_values[VAR_INTERLACE_TYPE] =
+ !frame->interlaced_frame ? INTERLACE_TYPE_P :
+ frame->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B;
+ select->var_values[VAR_PICT_TYPE] = frame->pict_type;
+ if (select->do_scene_detect) {
+ char buf[32];
+ select->var_values[VAR_SCENE] = get_scene_score(ctx, frame);
+ // TODO: document metadata
+ snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
+ av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.scene_score", buf, 0);
+ }
+ break;
+ }
+
+ select->select = res = av_expr_eval(select->expr, select->var_values, NULL);
+ av_log(inlink->dst, AV_LOG_DEBUG,
+ "n:%f pts:%f t:%f key:%d",
+ select->var_values[VAR_N],
+ select->var_values[VAR_PTS],
+ select->var_values[VAR_T],
+ frame->key_frame);
+
+ switch (inlink->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f",
+ (!frame->interlaced_frame) ? 'P' :
+ frame->top_field_first ? 'T' : 'B',
+ av_get_picture_type_char(frame->pict_type),
+ select->var_values[VAR_SCENE]);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%f",
+ frame->nb_samples,
+ select->var_values[VAR_CONSUMED_SAMPLES_N]);
+ break;
+ }
+
+ if (res == 0) {
+ select->select_out = -1; /* drop */
+ } else if (isnan(res) || res < 0) {
+ select->select_out = 0; /* first output */
+ } else {
+ select->select_out = FFMIN(ceilf(res)-1, select->nb_outputs-1); /* other outputs */
+ }
+
+ av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f select_out:%d\n", res, select->select_out);
+
+ if (res) {
+ select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N];
+ select->var_values[VAR_PREV_SELECTED_PTS] = select->var_values[VAR_PTS];
+ select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T];
+ select->var_values[VAR_SELECTED_N] += 1.0;
+ if (inlink->type == AVMEDIA_TYPE_AUDIO)
+ select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples;
+ }
+
+ select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS];
+ select->var_values[VAR_PREV_T] = select->var_values[VAR_T];
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SelectContext *select = ctx->priv;
+
+ select_frame(ctx, frame);
+ if (select->select)
+ return ff_filter_frame(ctx->outputs[select->select_out], frame);
+
+ av_frame_free(&frame);
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ int ret = ff_request_frame(inlink);
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SelectContext *select = ctx->priv;
+ int i;
+
+ av_expr_free(select->expr);
+ select->expr = NULL;
+
+ for (i = 0; i < ctx->nb_outputs; i++)
+ av_freep(&ctx->output_pads[i].name);
+
+ if (select->do_scene_detect) {
+ av_frame_free(&select->prev_picref);
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ SelectContext *select = ctx->priv;
+
+ if (!select->do_scene_detect) {
+ return ff_default_query_formats(ctx);
+ } else {
+ int ret;
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ ret = ff_set_common_formats(ctx, fmts_list);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+#if CONFIG_ASELECT_FILTER
+
+DEFINE_OPTIONS(aselect, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
+AVFILTER_DEFINE_CLASS(aselect);
+
+static av_cold int aselect_init(AVFilterContext *ctx)
+{
+ SelectContext *select = ctx->priv;
+ int ret;
+
+ if ((ret = init(ctx)) < 0)
+ return ret;
+
+ if (select->do_scene_detect) {
+ av_log(ctx, AV_LOG_ERROR, "Scene detection is ignored in aselect filter\n");
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+static const AVFilterPad avfilter_af_aselect_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_aselect = {
+ .name = "aselect",
+ .description = NULL_IF_CONFIG_SMALL("Select audio frames to pass in output."),
+ .init = aselect_init,
+ .uninit = uninit,
+ .priv_size = sizeof(SelectContext),
+ .inputs = avfilter_af_aselect_inputs,
+ .priv_class = &aselect_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
+#endif /* CONFIG_ASELECT_FILTER */
+
+#if CONFIG_SELECT_FILTER
+
+DEFINE_OPTIONS(select, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
+AVFILTER_DEFINE_CLASS(select);
+
+static av_cold int select_init(AVFilterContext *ctx)
+{
+ int ret;
+
+ if ((ret = init(ctx)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static const AVFilterPad avfilter_vf_select_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_select = {
+ .name = "select",
+ .description = NULL_IF_CONFIG_SMALL("Select video frames to pass in output."),
+ .init = select_init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(SelectContext),
+ .priv_class = &select_class,
+ .inputs = avfilter_vf_select_inputs,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
+#endif /* CONFIG_SELECT_FILTER */
diff --git a/libavfilter/f_sendcmd.c b/libavfilter/f_sendcmd.c
new file mode 100644
index 0000000000..522d6adb90
--- /dev/null
+++ b/libavfilter/f_sendcmd.c
@@ -0,0 +1,587 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * send commands filter
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
+#include "libavutil/file.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "avfiltergraph.h"
+#include "audio.h"
+#include "video.h"
+
+#define COMMAND_FLAG_ENTER 1
+#define COMMAND_FLAG_LEAVE 2
+
+static inline char *make_command_flags_str(AVBPrint *pbuf, int flags)
+{
+ static const char * const flag_strings[] = { "enter", "leave" };
+ int i, is_first = 1;
+
+ av_bprint_init(pbuf, 0, AV_BPRINT_SIZE_AUTOMATIC);
+ for (i = 0; i < FF_ARRAY_ELEMS(flag_strings); i++) {
+ if (flags & 1<<i) {
+ if (!is_first)
+ av_bprint_chars(pbuf, '+', 1);
+ av_bprintf(pbuf, "%s", flag_strings[i]);
+ is_first = 0;
+ }
+ }
+
+ return pbuf->str;
+}
+
+typedef struct {
+ int flags;
+ char *target, *command, *arg;
+ int index;
+} Command;
+
+typedef struct {
+ int64_t start_ts; ///< start timestamp expressed as microseconds units
+ int64_t end_ts; ///< end timestamp expressed as microseconds units
+ int index; ///< unique index for these interval commands
+ Command *commands;
+ int nb_commands;
+ int enabled; ///< current time detected inside this interval
+} Interval;
+
+typedef struct {
+ const AVClass *class;
+ Interval *intervals;
+ int nb_intervals;
+
+ char *commands_filename;
+ char *commands_str;
+} SendCmdContext;
+
+#define OFFSET(x) offsetof(SendCmdContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption options[] = {
+ { "commands", "set commands", OFFSET(commands_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "c", "set commands", OFFSET(commands_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "filename", "set commands file", OFFSET(commands_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "f", "set commands file", OFFSET(commands_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { NULL }
+};
+
+#define SPACES " \f\t\n\r"
+
+static void skip_comments(const char **buf)
+{
+ while (**buf) {
+ /* skip leading spaces */
+ *buf += strspn(*buf, SPACES);
+ if (**buf != '#')
+ break;
+
+ (*buf)++;
+
+ /* skip comment until the end of line */
+ *buf += strcspn(*buf, "\n");
+ if (**buf)
+ (*buf)++;
+ }
+}
+
+#define COMMAND_DELIMS " \f\t\n\r,;"
+
+static int parse_command(Command *cmd, int cmd_count, int interval_count,
+ const char **buf, void *log_ctx)
+{
+ int ret;
+
+ memset(cmd, 0, sizeof(Command));
+ cmd->index = cmd_count;
+
+ /* format: [FLAGS] target command arg */
+ *buf += strspn(*buf, SPACES);
+
+ /* parse flags */
+ if (**buf == '[') {
+ (*buf)++; /* skip "[" */
+
+ while (**buf) {
+ int len = strcspn(*buf, "|+]");
+
+ if (!strncmp(*buf, "enter", strlen("enter"))) cmd->flags |= COMMAND_FLAG_ENTER;
+ else if (!strncmp(*buf, "leave", strlen("leave"))) cmd->flags |= COMMAND_FLAG_LEAVE;
+ else {
+ char flag_buf[64];
+ av_strlcpy(flag_buf, *buf, sizeof(flag_buf));
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Unknown flag '%s' in interval #%d, command #%d\n",
+ flag_buf, interval_count, cmd_count);
+ return AVERROR(EINVAL);
+ }
+ *buf += len;
+ if (**buf == ']')
+ break;
+ if (!strspn(*buf, "+|")) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Invalid flags char '%c' in interval #%d, command #%d\n",
+ **buf, interval_count, cmd_count);
+ return AVERROR(EINVAL);
+ }
+ if (**buf)
+ (*buf)++;
+ }
+
+ if (**buf != ']') {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Missing flag terminator or extraneous data found at the end of flags "
+ "in interval #%d, command #%d\n", interval_count, cmd_count);
+ return AVERROR(EINVAL);
+ }
+ (*buf)++; /* skip "]" */
+ } else {
+ cmd->flags = COMMAND_FLAG_ENTER;
+ }
+
+ *buf += strspn(*buf, SPACES);
+ cmd->target = av_get_token(buf, COMMAND_DELIMS);
+ if (!cmd->target || !cmd->target[0]) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "No target specified in interval #%d, command #%d\n",
+ interval_count, cmd_count);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ *buf += strspn(*buf, SPACES);
+ cmd->command = av_get_token(buf, COMMAND_DELIMS);
+ if (!cmd->command || !cmd->command[0]) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "No command specified in interval #%d, command #%d\n",
+ interval_count, cmd_count);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ *buf += strspn(*buf, SPACES);
+ cmd->arg = av_get_token(buf, COMMAND_DELIMS);
+
+ return 1;
+
+fail:
+ av_freep(&cmd->target);
+ av_freep(&cmd->command);
+ av_freep(&cmd->arg);
+ return ret;
+}
+
+static int parse_commands(Command **cmds, int *nb_cmds, int interval_count,
+ const char **buf, void *log_ctx)
+{
+ int cmd_count = 0;
+ int ret, n = 0;
+ AVBPrint pbuf;
+
+ *cmds = NULL;
+ *nb_cmds = 0;
+
+ while (**buf) {
+ Command cmd;
+
+ if ((ret = parse_command(&cmd, cmd_count, interval_count, buf, log_ctx)) < 0)
+ return ret;
+ cmd_count++;
+
+ /* (re)allocate commands array if required */
+ if (*nb_cmds == n) {
+ n = FFMAX(16, 2*n); /* first allocation = 16, or double the number */
+ *cmds = av_realloc_f(*cmds, n, 2*sizeof(Command));
+ if (!*cmds) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Could not (re)allocate command array\n");
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ (*cmds)[(*nb_cmds)++] = cmd;
+
+ *buf += strspn(*buf, SPACES);
+ if (**buf && **buf != ';' && **buf != ',') {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Missing separator or extraneous data found at the end of "
+ "interval #%d, in command #%d\n",
+ interval_count, cmd_count);
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Command was parsed as: flags:[%s] target:%s command:%s arg:%s\n",
+ make_command_flags_str(&pbuf, cmd.flags), cmd.target, cmd.command, cmd.arg);
+ return AVERROR(EINVAL);
+ }
+ if (**buf == ';')
+ break;
+ if (**buf == ',')
+ (*buf)++;
+ }
+
+ return 0;
+}
+
+#define DELIMS " \f\t\n\r,;"
+
+static int parse_interval(Interval *interval, int interval_count,
+ const char **buf, void *log_ctx)
+{
+ char *intervalstr;
+ int ret;
+
+ *buf += strspn(*buf, SPACES);
+ if (!**buf)
+ return 0;
+
+ /* reset data */
+ memset(interval, 0, sizeof(Interval));
+ interval->index = interval_count;
+
+ /* format: INTERVAL COMMANDS */
+
+ /* parse interval */
+ intervalstr = av_get_token(buf, DELIMS);
+ if (intervalstr && intervalstr[0]) {
+ char *start, *end;
+
+ start = av_strtok(intervalstr, "-", &end);
+ if (!start) {
+ ret = AVERROR(EINVAL);
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Invalid interval specification '%s' in interval #%d\n",
+ intervalstr, interval_count);
+ goto end;
+ }
+ if ((ret = av_parse_time(&interval->start_ts, start, 1)) < 0) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Invalid start time specification '%s' in interval #%d\n",
+ start, interval_count);
+ goto end;
+ }
+
+ if (end) {
+ if ((ret = av_parse_time(&interval->end_ts, end, 1)) < 0) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Invalid end time specification '%s' in interval #%d\n",
+ end, interval_count);
+ goto end;
+ }
+ } else {
+ interval->end_ts = INT64_MAX;
+ }
+ if (interval->end_ts < interval->start_ts) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Invalid end time '%s' in interval #%d: "
+ "cannot be lesser than start time '%s'\n",
+ end, interval_count, start);
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+ } else {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "No interval specified for interval #%d\n", interval_count);
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+
+ /* parse commands */
+ ret = parse_commands(&interval->commands, &interval->nb_commands,
+ interval_count, buf, log_ctx);
+
+end:
+ av_free(intervalstr);
+ return ret;
+}
+
+static int parse_intervals(Interval **intervals, int *nb_intervals,
+ const char *buf, void *log_ctx)
+{
+ int interval_count = 0;
+ int ret, n = 0;
+
+ *intervals = NULL;
+ *nb_intervals = 0;
+
+ if (!buf)
+ return 0;
+
+ while (1) {
+ Interval interval;
+
+ skip_comments(&buf);
+ if (!(*buf))
+ break;
+
+ if ((ret = parse_interval(&interval, interval_count, &buf, log_ctx)) < 0)
+ return ret;
+
+ buf += strspn(buf, SPACES);
+ if (*buf) {
+ if (*buf != ';') {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Missing terminator or extraneous data found at the end of interval #%d\n",
+ interval_count);
+ return AVERROR(EINVAL);
+ }
+ buf++; /* skip ';' */
+ }
+ interval_count++;
+
+ /* (re)allocate commands array if required */
+ if (*nb_intervals == n) {
+ n = FFMAX(16, 2*n); /* first allocation = 16, or double the number */
+ *intervals = av_realloc_f(*intervals, n, 2*sizeof(Interval));
+ if (!*intervals) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Could not (re)allocate intervals array\n");
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ (*intervals)[(*nb_intervals)++] = interval;
+ }
+
+ return 0;
+}
+
+static int cmp_intervals(const void *a, const void *b)
+{
+ const Interval *i1 = a;
+ const Interval *i2 = b;
+ return 2 * FFDIFFSIGN(i1->start_ts, i2->start_ts) + FFDIFFSIGN(i1->index, i2->index);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ SendCmdContext *s = ctx->priv;
+ int ret, i, j;
+
+ if ((!!s->commands_filename + !!s->commands_str) != 1) {
+ av_log(ctx, AV_LOG_ERROR,
+ "One and only one of the filename or commands options must be specified\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (s->commands_filename) {
+ uint8_t *file_buf, *buf;
+ size_t file_bufsize;
+ ret = av_file_map(s->commands_filename,
+ &file_buf, &file_bufsize, 0, ctx);
+ if (ret < 0)
+ return ret;
+
+ /* create a 0-terminated string based on the read file */
+ buf = av_malloc(file_bufsize + 1);
+ if (!buf) {
+ av_file_unmap(file_buf, file_bufsize);
+ return AVERROR(ENOMEM);
+ }
+ memcpy(buf, file_buf, file_bufsize);
+ buf[file_bufsize] = 0;
+ av_file_unmap(file_buf, file_bufsize);
+ s->commands_str = buf;
+ }
+
+ if ((ret = parse_intervals(&s->intervals, &s->nb_intervals,
+ s->commands_str, ctx)) < 0)
+ return ret;
+
+ if (s->nb_intervals == 0) {
+ av_log(ctx, AV_LOG_ERROR, "No commands were specified\n");
+ return AVERROR(EINVAL);
+ }
+
+ qsort(s->intervals, s->nb_intervals, sizeof(Interval), cmp_intervals);
+
+ av_log(ctx, AV_LOG_DEBUG, "Parsed commands:\n");
+ for (i = 0; i < s->nb_intervals; i++) {
+ AVBPrint pbuf;
+ Interval *interval = &s->intervals[i];
+ av_log(ctx, AV_LOG_VERBOSE, "start_time:%f end_time:%f index:%d\n",
+ (double)interval->start_ts/1000000, (double)interval->end_ts/1000000, interval->index);
+ for (j = 0; j < interval->nb_commands; j++) {
+ Command *cmd = &interval->commands[j];
+ av_log(ctx, AV_LOG_VERBOSE,
+ " [%s] target:%s command:%s arg:%s index:%d\n",
+ make_command_flags_str(&pbuf, cmd->flags), cmd->target, cmd->command, cmd->arg, cmd->index);
+ }
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SendCmdContext *s = ctx->priv;
+ int i, j;
+
+ for (i = 0; i < s->nb_intervals; i++) {
+ Interval *interval = &s->intervals[i];
+ for (j = 0; j < interval->nb_commands; j++) {
+ Command *cmd = &interval->commands[j];
+ av_freep(&cmd->target);
+ av_freep(&cmd->command);
+ av_freep(&cmd->arg);
+ }
+ av_freep(&interval->commands);
+ }
+ av_freep(&s->intervals);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SendCmdContext *s = ctx->priv;
+ int64_t ts;
+ int i, j, ret;
+
+ if (ref->pts == AV_NOPTS_VALUE)
+ goto end;
+
+ ts = av_rescale_q(ref->pts, inlink->time_base, AV_TIME_BASE_Q);
+
+#define WITHIN_INTERVAL(ts, start_ts, end_ts) ((ts) >= (start_ts) && (ts) < (end_ts))
+
+ for (i = 0; i < s->nb_intervals; i++) {
+ Interval *interval = &s->intervals[i];
+ int flags = 0;
+
+ if (!interval->enabled && WITHIN_INTERVAL(ts, interval->start_ts, interval->end_ts)) {
+ flags += COMMAND_FLAG_ENTER;
+ interval->enabled = 1;
+ }
+ if (interval->enabled && !WITHIN_INTERVAL(ts, interval->start_ts, interval->end_ts)) {
+ flags += COMMAND_FLAG_LEAVE;
+ interval->enabled = 0;
+ }
+
+ if (flags) {
+ AVBPrint pbuf;
+ av_log(ctx, AV_LOG_VERBOSE,
+ "[%s] interval #%d start_ts:%f end_ts:%f ts:%f\n",
+ make_command_flags_str(&pbuf, flags), interval->index,
+ (double)interval->start_ts/1000000, (double)interval->end_ts/1000000,
+ (double)ts/1000000);
+
+ for (j = 0; flags && j < interval->nb_commands; j++) {
+ Command *cmd = &interval->commands[j];
+ char buf[1024];
+
+ if (cmd->flags & flags) {
+ av_log(ctx, AV_LOG_VERBOSE,
+ "Processing command #%d target:%s command:%s arg:%s\n",
+ cmd->index, cmd->target, cmd->command, cmd->arg);
+ ret = avfilter_graph_send_command(inlink->graph,
+ cmd->target, cmd->command, cmd->arg,
+ buf, sizeof(buf),
+ AVFILTER_CMD_FLAG_ONE);
+ av_log(ctx, AV_LOG_VERBOSE,
+ "Command reply for command #%d: ret:%s res:%s\n",
+ cmd->index, av_err2str(ret), buf);
+ }
+ }
+ }
+ }
+
+end:
+ switch (inlink->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ case AVMEDIA_TYPE_AUDIO:
+ return ff_filter_frame(inlink->dst->outputs[0], ref);
+ }
+
+ return AVERROR(ENOSYS);
+}
+
+#if CONFIG_SENDCMD_FILTER
+
+#define sendcmd_options options
+AVFILTER_DEFINE_CLASS(sendcmd);
+
+static const AVFilterPad sendcmd_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad sendcmd_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_sendcmd = {
+ .name = "sendcmd",
+ .description = NULL_IF_CONFIG_SMALL("Send commands to filters."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(SendCmdContext),
+ .inputs = sendcmd_inputs,
+ .outputs = sendcmd_outputs,
+ .priv_class = &sendcmd_class,
+};
+
+#endif
+
+#if CONFIG_ASENDCMD_FILTER
+
+#define asendcmd_options options
+AVFILTER_DEFINE_CLASS(asendcmd);
+
+static const AVFilterPad asendcmd_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad asendcmd_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_asendcmd = {
+ .name = "asendcmd",
+ .description = NULL_IF_CONFIG_SMALL("Send commands to filters."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(SendCmdContext),
+ .inputs = asendcmd_inputs,
+ .outputs = asendcmd_outputs,
+ .priv_class = &asendcmd_class,
+};
+
+#endif
diff --git a/libavfilter/f_sidedata.c b/libavfilter/f_sidedata.c
new file mode 100644
index 0000000000..45d246b732
--- /dev/null
+++ b/libavfilter/f_sidedata.c
@@ -0,0 +1,180 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * filter for manipulating frame side data
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/internal.h"
+#include "libavutil/frame.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+enum SideDataMode {
+ SIDEDATA_SELECT,
+ SIDEDATA_DELETE,
+ SIDEDATA_NB
+};
+
+typedef struct SideDataContext {
+ const AVClass *class;
+
+ int mode;
+ enum AVFrameSideDataType type;
+} SideDataContext;
+
+#define OFFSET(x) offsetof(SideDataContext, x)
+#define DEFINE_OPTIONS(filt_name, FLAGS) \
+static const AVOption filt_name##_options[] = { \
+ { "mode", "set a mode of operation", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SIDEDATA_NB-1, FLAGS, "mode" }, \
+ { "select", "select frame", 0, AV_OPT_TYPE_CONST, {.i64 = SIDEDATA_SELECT }, 0, 0, FLAGS, "mode" }, \
+ { "delete", "delete side data", 0, AV_OPT_TYPE_CONST, {.i64 = SIDEDATA_DELETE }, 0, 0, FLAGS, "mode" }, \
+ { "type", "set side data type", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, INT_MAX, FLAGS, "type" }, \
+ { "PANSCAN", "", 0, AV_OPT_TYPE_CONST, {.i64 = AV_FRAME_DATA_PANSCAN }, 0, 0, FLAGS, "type" }, \
+ { "A53_CC", "", 0, AV_OPT_TYPE_CONST, {.i64 = AV_FRAME_DATA_A53_CC }, 0, 0, FLAGS, "type" }, \
+ { "STEREO3D", "", 0, AV_OPT_TYPE_CONST, {.i64 = AV_FRAME_DATA_STEREO3D }, 0, 0, FLAGS, "type" }, \
+ { "MATRIXENCODING", "", 0, AV_OPT_TYPE_CONST, {.i64 = AV_FRAME_DATA_MATRIXENCODING }, 0, 0, FLAGS, "type" }, \
+ { "DOWNMIX_INFO", "", 0, AV_OPT_TYPE_CONST, {.i64 = AV_FRAME_DATA_DOWNMIX_INFO }, 0, 0, FLAGS, "type" }, \
+ { "REPLAYGAIN", "", 0, AV_OPT_TYPE_CONST, {.i64 = AV_FRAME_DATA_REPLAYGAIN }, 0, 0, FLAGS, "type" }, \
+ { "DISPLAYMATRIX", "", 0, AV_OPT_TYPE_CONST, {.i64 = AV_FRAME_DATA_DISPLAYMATRIX }, 0, 0, FLAGS, "type" }, \
+ { "AFD", "", 0, AV_OPT_TYPE_CONST, {.i64 = AV_FRAME_DATA_AFD }, 0, 0, FLAGS, "type" }, \
+ { "MOTION_VECTORS", "", 0, AV_OPT_TYPE_CONST, {.i64 = AV_FRAME_DATA_MOTION_VECTORS }, 0, 0, FLAGS, "type" }, \
+ { "SKIP_SAMPLES", "", 0, AV_OPT_TYPE_CONST, {.i64 = AV_FRAME_DATA_SKIP_SAMPLES }, 0, 0, FLAGS, "type" }, \
+ { "AUDIO_SERVICE_TYPE", "", 0, AV_OPT_TYPE_CONST, {.i64 = AV_FRAME_DATA_AUDIO_SERVICE_TYPE }, 0, 0, FLAGS, "type" }, \
+ { "MASTERING_DISPLAY_METADATA", "", 0, AV_OPT_TYPE_CONST, {.i64 = AV_FRAME_DATA_MASTERING_DISPLAY_METADATA }, 0, 0, FLAGS, "type" }, \
+ { "GOP_TIMECODE", "", 0, AV_OPT_TYPE_CONST, {.i64 = AV_FRAME_DATA_GOP_TIMECODE }, 0, 0, FLAGS, "type" }, \
+ { NULL } \
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ SideDataContext *s = ctx->priv;
+
+ if (s->type == -1 && s->mode != SIDEDATA_DELETE) {
+ av_log(ctx, AV_LOG_ERROR, "Side data type must be set\n");
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ SideDataContext *s = ctx->priv;
+ AVFrameSideData *sd = NULL;
+
+ if (s->type != -1)
+ sd = av_frame_get_side_data(frame, s->type);
+
+ switch (s->mode) {
+ case SIDEDATA_SELECT:
+ if (sd) {
+ return ff_filter_frame(outlink, frame);
+ }
+ break;
+ case SIDEDATA_DELETE:
+ if (s->type == -1) {
+ while (frame->nb_side_data)
+ av_frame_remove_side_data(frame, frame->side_data[0]->type);
+ } else if (sd) {
+ av_frame_remove_side_data(frame, s->type);
+ }
+ return ff_filter_frame(outlink, frame);
+ break;
+ default:
+ av_assert0(0);
+ };
+
+ av_frame_free(&frame);
+
+ return 0;
+}
+
+#if CONFIG_ASIDEDATA_FILTER
+
+DEFINE_OPTIONS(asidedata, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
+AVFILTER_DEFINE_CLASS(asidedata);
+
+static const AVFilterPad ainputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad aoutputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_asidedata = {
+ .name = "asidedata",
+ .description = NULL_IF_CONFIG_SMALL("Manipulate audio frame side data."),
+ .priv_size = sizeof(SideDataContext),
+ .priv_class = &asidedata_class,
+ .init = init,
+ .inputs = ainputs,
+ .outputs = aoutputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
+#endif /* CONFIG_ASIDEDATA_FILTER */
+
+#if CONFIG_SIDEDATA_FILTER
+
+DEFINE_OPTIONS(sidedata, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
+AVFILTER_DEFINE_CLASS(sidedata);
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_sidedata = {
+ .name = "sidedata",
+ .description = NULL_IF_CONFIG_SMALL("Manipulate video frame side data."),
+ .priv_size = sizeof(SideDataContext),
+ .priv_class = &sidedata_class,
+ .init = init,
+ .inputs = inputs,
+ .outputs = outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
+#endif /* CONFIG_SIDEDATA_FILTER */
diff --git a/libavfilter/f_streamselect.c b/libavfilter/f_streamselect.c
new file mode 100644
index 0000000000..1a517bfc95
--- /dev/null
+++ b/libavfilter/f_streamselect.c
@@ -0,0 +1,353 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "formats.h"
+#include "framesync.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct StreamSelectContext {
+ const AVClass *class;
+ int nb_inputs;
+ char *map_str;
+ int *map;
+ int nb_map;
+ int is_audio;
+ int64_t *last_pts;
+ AVFrame **frames;
+ FFFrameSync fs;
+} StreamSelectContext;
+
+#define OFFSET(x) offsetof(StreamSelectContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption streamselect_options[] = {
+ { "inputs", "number of input streams", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64=2}, 2, INT_MAX, .flags=FLAGS },
+ { "map", "input indexes to remap to outputs", OFFSET(map_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags=FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(streamselect);
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ StreamSelectContext *s = inlink->dst->priv;
+ return ff_framesync_filter_frame(&s->fs, inlink, in);
+}
+
+static int process_frame(FFFrameSync *fs)
+{
+ AVFilterContext *ctx = fs->parent;
+ StreamSelectContext *s = fs->opaque;
+ AVFrame **in = s->frames;
+ int i, j, ret = 0;
+
+ for (i = 0; i < ctx->nb_inputs; i++) {
+ if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0)
+ return ret;
+ }
+
+ for (j = 0; j < ctx->nb_inputs; j++) {
+ for (i = 0; i < s->nb_map; i++) {
+ if (s->map[i] == j) {
+ AVFrame *out;
+
+ if (s->is_audio && s->last_pts[j] == in[j]->pts &&
+ ctx->outputs[i]->frame_count_in > 0)
+ continue;
+ out = av_frame_clone(in[j]);
+ if (!out)
+ return AVERROR(ENOMEM);
+
+ out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, ctx->outputs[i]->time_base);
+ s->last_pts[j] = in[j]->pts;
+ ret = ff_filter_frame(ctx->outputs[i], out);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ StreamSelectContext *s = outlink->src->priv;
+ return ff_framesync_request_frame(&s->fs, outlink);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ StreamSelectContext *s = ctx->priv;
+ const int outlink_idx = FF_OUTLINK_IDX(outlink);
+ const int inlink_idx = s->map[outlink_idx];
+ AVFilterLink *inlink = ctx->inputs[inlink_idx];
+ FFFrameSyncIn *in;
+ int i, ret;
+
+ av_log(ctx, AV_LOG_VERBOSE, "config output link %d "
+ "with settings from input link %d\n",
+ outlink_idx, inlink_idx);
+
+ switch (outlink->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ outlink->w = inlink->w;
+ outlink->h = inlink->h;
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ outlink->frame_rate = inlink->frame_rate;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ outlink->sample_rate = inlink->sample_rate;
+ outlink->channels = inlink->channels;
+ outlink->channel_layout = inlink->channel_layout;
+ break;
+ }
+
+ outlink->time_base = inlink->time_base;
+ outlink->format = inlink->format;
+
+ if (s->fs.opaque == s)
+ return 0;
+
+ if ((ret = ff_framesync_init(&s->fs, ctx, ctx->nb_inputs)) < 0)
+ return ret;
+
+ in = s->fs.in;
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+
+ for (i = 0; i < ctx->nb_inputs; i++) {
+ in[i].time_base = ctx->inputs[i]->time_base;
+ in[i].sync = 1;
+ in[i].before = EXT_STOP;
+ in[i].after = EXT_STOP;
+ }
+
+ s->frames = av_calloc(ctx->nb_inputs, sizeof(*s->frames));
+ if (!s->frames)
+ return AVERROR(ENOMEM);
+
+ return ff_framesync_configure(&s->fs);
+}
+
+static int parse_definition(AVFilterContext *ctx, int nb_pads, void *filter_frame, int is_audio)
+{
+ const int is_input = !!filter_frame;
+ const char *padtype = is_input ? "in" : "out";
+ int i = 0, ret = 0;
+
+ for (i = 0; i < nb_pads; i++) {
+ AVFilterPad pad = { 0 };
+
+ pad.type = is_audio ? AVMEDIA_TYPE_AUDIO : AVMEDIA_TYPE_VIDEO;
+
+ pad.name = av_asprintf("%sput%d", padtype, i);
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+
+ av_log(ctx, AV_LOG_DEBUG, "Add %s pad %s\n", padtype, pad.name);
+
+ if (is_input) {
+ pad.filter_frame = filter_frame;
+ ret = ff_insert_inpad(ctx, i, &pad);
+ } else {
+ pad.config_props = config_output;
+ pad.request_frame = request_frame;
+ ret = ff_insert_outpad(ctx, i, &pad);
+ }
+
+ if (ret < 0) {
+ av_freep(&pad.name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int parse_mapping(AVFilterContext *ctx, const char *map)
+{
+ StreamSelectContext *s = ctx->priv;
+ int *new_map;
+ int new_nb_map = 0;
+
+ if (!map) {
+ av_log(ctx, AV_LOG_ERROR, "mapping definition is not set\n");
+ return AVERROR(EINVAL);
+ }
+
+ new_map = av_calloc(s->nb_inputs, sizeof(*new_map));
+ if (!new_map)
+ return AVERROR(ENOMEM);
+
+ while (1) {
+ char *p;
+ const int n = strtol(map, &p, 0);
+
+ av_log(ctx, AV_LOG_DEBUG, "n=%d map=%p p=%p\n", n, map, p);
+
+ if (map == p)
+ break;
+ map = p;
+
+ if (new_nb_map >= s->nb_inputs) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to map more than the %d "
+ "input pads available\n", s->nb_inputs);
+ av_free(new_map);
+ return AVERROR(EINVAL);
+ }
+
+ if (n < 0 || n >= ctx->nb_inputs) {
+ av_log(ctx, AV_LOG_ERROR, "Input stream index %d doesn't exist "
+ "(there is only %d input streams defined)\n",
+ n, s->nb_inputs);
+ av_free(new_map);
+ return AVERROR(EINVAL);
+ }
+
+ av_log(ctx, AV_LOG_VERBOSE, "Map input stream %d to output stream %d\n", n, new_nb_map);
+ new_map[new_nb_map++] = n;
+ }
+
+ if (!new_nb_map) {
+ av_log(ctx, AV_LOG_ERROR, "invalid mapping\n");
+ av_free(new_map);
+ return AVERROR(EINVAL);
+ }
+
+ av_freep(&s->map);
+ s->map = new_map;
+ s->nb_map = new_nb_map;
+
+ av_log(ctx, AV_LOG_VERBOSE, "%d map set\n", s->nb_map);
+
+ return 0;
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ if (!strcmp(cmd, "map")) {
+ int ret = parse_mapping(ctx, args);
+
+ if (ret < 0)
+ return ret;
+ return avfilter_config_links(ctx);
+ }
+ return AVERROR(ENOSYS);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ StreamSelectContext *s = ctx->priv;
+ int ret, nb_outputs = 0;
+ char *map = s->map_str;
+
+ if (!strcmp(ctx->filter->name, "astreamselect"))
+ s->is_audio = 1;
+
+ for (; map;) {
+ char *p;
+
+ strtol(map, &p, 0);
+ if (map == p)
+ break;
+ nb_outputs++;
+ map = p;
+ }
+
+ s->last_pts = av_calloc(s->nb_inputs, sizeof(*s->last_pts));
+ if (!s->last_pts)
+ return AVERROR(ENOMEM);
+
+ if ((ret = parse_definition(ctx, s->nb_inputs, filter_frame, s->is_audio)) < 0 ||
+ (ret = parse_definition(ctx, nb_outputs, NULL, s->is_audio)) < 0)
+ return ret;
+
+ av_log(ctx, AV_LOG_DEBUG, "Configured with %d inpad and %d outpad\n",
+ ctx->nb_inputs, ctx->nb_outputs);
+
+ return parse_mapping(ctx, s->map_str);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ StreamSelectContext *s = ctx->priv;
+
+ av_freep(&s->last_pts);
+ av_freep(&s->map);
+ av_freep(&s->frames);
+ ff_framesync_uninit(&s->fs);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats, *rates = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ int ret, i;
+
+ for (i = 0; i < ctx->nb_inputs; i++) {
+ formats = ff_all_formats(ctx->inputs[i]->type);
+ if ((ret = ff_set_common_formats(ctx, formats)) < 0)
+ return ret;
+
+ if (ctx->inputs[i]->type == AVMEDIA_TYPE_AUDIO) {
+ rates = ff_all_samplerates();
+ if ((ret = ff_set_common_samplerates(ctx, rates)) < 0)
+ return ret;
+ layouts = ff_all_channel_counts();
+ if ((ret = ff_set_common_channel_layouts(ctx, layouts)) < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+AVFilter ff_vf_streamselect = {
+ .name = "streamselect",
+ .description = NULL_IF_CONFIG_SMALL("Select video streams"),
+ .init = init,
+ .query_formats = query_formats,
+ .process_command = process_command,
+ .uninit = uninit,
+ .priv_size = sizeof(StreamSelectContext),
+ .priv_class = &streamselect_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS | AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
+
+#define astreamselect_options streamselect_options
+AVFILTER_DEFINE_CLASS(astreamselect);
+
+AVFilter ff_af_astreamselect = {
+ .name = "astreamselect",
+ .description = NULL_IF_CONFIG_SMALL("Select audio streams"),
+ .init = init,
+ .query_formats = query_formats,
+ .process_command = process_command,
+ .uninit = uninit,
+ .priv_size = sizeof(StreamSelectContext),
+ .priv_class = &astreamselect_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS | AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
diff --git a/libavfilter/f_zmq.c b/libavfilter/f_zmq.c
new file mode 100644
index 0000000000..d6c3c65da4
--- /dev/null
+++ b/libavfilter/f_zmq.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (c) 2013 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * receive commands through libzeromq and broker them to filters
+ */
+
+#include <zmq.h>
+#include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "avfiltergraph.h"
+#include "audio.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ void *zmq;
+ void *responder;
+ char *bind_address;
+ int command_count;
+} ZMQContext;
+
+#define OFFSET(x) offsetof(ZMQContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption options[] = {
+ { "bind_address", "set bind address", OFFSET(bind_address), AV_OPT_TYPE_STRING, {.str = "tcp://*:5555"}, 0, 0, FLAGS },
+ { "b", "set bind address", OFFSET(bind_address), AV_OPT_TYPE_STRING, {.str = "tcp://*:5555"}, 0, 0, FLAGS },
+ { NULL }
+};
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ZMQContext *zmq = ctx->priv;
+
+ zmq->zmq = zmq_ctx_new();
+ if (!zmq->zmq) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Could not create ZMQ context: %s\n", zmq_strerror(errno));
+ return AVERROR_EXTERNAL;
+ }
+
+ zmq->responder = zmq_socket(zmq->zmq, ZMQ_REP);
+ if (!zmq->responder) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Could not create ZMQ socket: %s\n", zmq_strerror(errno));
+ return AVERROR_EXTERNAL;
+ }
+
+ if (zmq_bind(zmq->responder, zmq->bind_address) == -1) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Could not bind ZMQ socket to address '%s': %s\n",
+ zmq->bind_address, zmq_strerror(errno));
+ return AVERROR_EXTERNAL;
+ }
+
+ zmq->command_count = -1;
+ return 0;
+}
+
+static void av_cold uninit(AVFilterContext *ctx)
+{
+ ZMQContext *zmq = ctx->priv;
+
+ zmq_close(zmq->responder);
+ zmq_ctx_destroy(zmq->zmq);
+}
+
+typedef struct {
+ char *target, *command, *arg;
+} Command;
+
+#define SPACES " \f\t\n\r"
+
+static int parse_command(Command *cmd, const char *command_str, void *log_ctx)
+{
+ const char **buf = &command_str;
+
+ cmd->target = av_get_token(buf, SPACES);
+ if (!cmd->target || !cmd->target[0]) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "No target specified in command '%s'\n", command_str);
+ return AVERROR(EINVAL);
+ }
+
+ cmd->command = av_get_token(buf, SPACES);
+ if (!cmd->command || !cmd->command[0]) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "No command specified in command '%s'\n", command_str);
+ return AVERROR(EINVAL);
+ }
+
+ cmd->arg = av_get_token(buf, SPACES);
+ return 0;
+}
+
+static int recv_msg(AVFilterContext *ctx, char **buf, int *buf_size)
+{
+ ZMQContext *zmq = ctx->priv;
+ zmq_msg_t msg;
+ int ret = 0;
+
+ if (zmq_msg_init(&msg) == -1) {
+ av_log(ctx, AV_LOG_WARNING,
+ "Could not initialize receive message: %s\n", zmq_strerror(errno));
+ return AVERROR_EXTERNAL;
+ }
+
+ if (zmq_msg_recv(&msg, zmq->responder, ZMQ_DONTWAIT) == -1) {
+ if (errno != EAGAIN)
+ av_log(ctx, AV_LOG_WARNING,
+ "Could not receive message: %s\n", zmq_strerror(errno));
+ ret = AVERROR_EXTERNAL;
+ goto end;
+ }
+
+ *buf_size = zmq_msg_size(&msg) + 1;
+ *buf = av_malloc(*buf_size);
+ if (!*buf) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ memcpy(*buf, zmq_msg_data(&msg), *buf_size);
+ (*buf)[*buf_size-1] = 0;
+
+end:
+ zmq_msg_close(&msg);
+ return ret;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ZMQContext *zmq = ctx->priv;
+
+ while (1) {
+ char cmd_buf[1024];
+ char *recv_buf, *send_buf;
+ int recv_buf_size;
+ Command cmd = {0};
+ int ret;
+
+ /* receive command */
+ if (recv_msg(ctx, &recv_buf, &recv_buf_size) < 0)
+ break;
+ zmq->command_count++;
+
+ /* parse command */
+ if (parse_command(&cmd, recv_buf, ctx) < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Could not parse command #%d\n", zmq->command_count);
+ goto end;
+ }
+
+ /* process command */
+ av_log(ctx, AV_LOG_VERBOSE,
+ "Processing command #%d target:%s command:%s arg:%s\n",
+ zmq->command_count, cmd.target, cmd.command, cmd.arg);
+ ret = avfilter_graph_send_command(inlink->graph,
+ cmd.target, cmd.command, cmd.arg,
+ cmd_buf, sizeof(cmd_buf),
+ AVFILTER_CMD_FLAG_ONE);
+ send_buf = av_asprintf("%d %s%s%s",
+ -ret, av_err2str(ret), cmd_buf[0] ? "\n" : "", cmd_buf);
+ if (!send_buf) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ av_log(ctx, AV_LOG_VERBOSE,
+ "Sending command reply for command #%d:\n%s\n",
+ zmq->command_count, send_buf);
+ if (zmq_send(zmq->responder, send_buf, strlen(send_buf), 0) == -1)
+ av_log(ctx, AV_LOG_ERROR, "Failed to send reply for command #%d: %s\n",
+ zmq->command_count, zmq_strerror(ret));
+
+ end:
+ av_freep(&send_buf);
+ av_freep(&recv_buf);
+ recv_buf_size = 0;
+ av_freep(&cmd.target);
+ av_freep(&cmd.command);
+ av_freep(&cmd.arg);
+ }
+
+ return ff_filter_frame(ctx->outputs[0], ref);
+}
+
+#if CONFIG_ZMQ_FILTER
+
+#define zmq_options options
+AVFILTER_DEFINE_CLASS(zmq);
+
+static const AVFilterPad zmq_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad zmq_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_zmq = {
+ .name = "zmq",
+ .description = NULL_IF_CONFIG_SMALL("Receive commands through ZMQ and broker them to filters."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(ZMQContext),
+ .inputs = zmq_inputs,
+ .outputs = zmq_outputs,
+ .priv_class = &zmq_class,
+};
+
+#endif
+
+#if CONFIG_AZMQ_FILTER
+
+#define azmq_options options
+AVFILTER_DEFINE_CLASS(azmq);
+
+static const AVFilterPad azmq_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad azmq_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_azmq = {
+ .name = "azmq",
+ .description = NULL_IF_CONFIG_SMALL("Receive commands through ZMQ and broker them to filters."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(ZMQContext),
+ .inputs = azmq_inputs,
+ .outputs = azmq_outputs,
+ .priv_class = &azmq_class,
+};
+
+#endif
diff --git a/libavfilter/fifo.c b/libavfilter/fifo.c
index a414585ece..abfbba10bb 100644
--- a/libavfilter/fifo.c
+++ b/libavfilter/fifo.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -104,7 +104,7 @@ static void queue_pop(FifoContext *s)
static void buffer_offset(AVFilterLink *link, AVFrame *frame,
int offset)
{
- int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
+ int nb_channels = link->channels;
int planar = av_sample_fmt_is_planar(link->format);
int planes = planar ? nb_channels : 1;
int block_align = av_get_bytes_per_sample(link->format) * (planar ? 1 : nb_channels);
@@ -129,7 +129,7 @@ static void buffer_offset(AVFilterLink *link, AVFrame *frame,
static int calc_ptr_alignment(AVFrame *frame)
{
int planes = av_sample_fmt_is_planar(frame->format) ?
- av_get_channel_layout_nb_channels(frame->channel_layout) : 1;
+ av_frame_get_channels(frame) : 1;
int min_align = 128;
int p;
@@ -170,7 +170,7 @@ static int return_audio_frame(AVFilterContext *ctx)
buffer_offset(link, head, link->request_samples);
}
} else {
- int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
+ int nb_channels = link->channels;
if (!s->out) {
s->out = ff_get_audio_buffer(link, link->request_samples);
@@ -201,6 +201,8 @@ static int return_audio_frame(AVFilterContext *ctx)
break;
} else if (ret < 0)
return ret;
+ if (!s->root.next)
+ return 0;
}
head = s->root.next->frame;
@@ -236,6 +238,8 @@ static int request_frame(AVFilterLink *outlink)
return return_audio_frame(outlink->src);
return ret;
}
+ if (!fifo->root.next)
+ return 0;
}
if (outlink->request_samples) {
@@ -252,7 +256,6 @@ static const AVFilterPad avfilter_vf_fifo_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
.filter_frame = add_to_queue,
},
{ NULL }
@@ -284,7 +287,6 @@ static const AVFilterPad avfilter_af_afifo_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
.filter_frame = add_to_queue,
},
{ NULL }
diff --git a/libavfilter/filters.h b/libavfilter/filters.h
new file mode 100644
index 0000000000..2c78d60e62
--- /dev/null
+++ b/libavfilter/filters.h
@@ -0,0 +1,137 @@
+/*
+ * Filters implementation helper functions
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_FILTERS_H
+#define AVFILTER_FILTERS_H
+
+/**
+ * Filters implementation helper functions
+ */
+
+#include "avfilter.h"
+
+/**
+ * Mark a filter ready and schedule it for activation.
+ *
+ * This is automatically done when something happens to the filter (queued
+ * frame, status change, request on output).
+ * Filters implementing the activate callback can call it directly to
+ * perform one more round of processing later.
+ * It is also useful for filters reacting to external or asynchronous
+ * events.
+ */
+void ff_filter_set_ready(AVFilterContext *filter, unsigned priority);
+
+/**
+ * Process the commands queued in the link up to the time of the frame.
+ * Commands will trigger the process_command() callback.
+ * @return >= 0 or AVERROR code.
+ */
+int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame);
+
+/**
+ * Evaluate the timeline expression of the link for the time and properties
+ * of the frame.
+ * @return >0 if enabled, 0 if disabled
+ * @note It does not update link->dst->is_disabled.
+ */
+int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame);
+
+/**
+ * Test if a frame is available on the link.
+ * @return >0 if a frame is available
+ */
+int ff_inlink_check_available_frame(AVFilterLink *link);
+
+/**
+ * Test if enough samples are available on the link.
+ * @return >0 if enough samples are available
+ * @note on EOF and error, min becomes 1
+ */
+int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min);
+
+/**
+ * Take a frame from the link's FIFO and update the link's stats.
+ *
+ * If ff_inlink_check_available_frame() was previously called, the
+ * preferred way of expressing it is "av_assert1(ret);" immediately after
+ * ff_inlink_consume_frame(). Negative error codes must still be checked.
+ *
+ * @note May trigger process_command() and/or update is_disabled.
+ * @return >0 if a frame is available,
+ * 0 and set rframe to NULL if no frame available,
+ * or AVERROR code
+ */
+int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe);
+
+/**
+ * Take samples from the link's FIFO and update the link's stats.
+ *
+ * If ff_inlink_check_available_samples() was previously called, the
+ * preferred way of expressing it is "av_assert1(ret);" immediately after
+ * ff_inlink_consume_samples(). Negative error codes must still be checked.
+ *
+ * @note May trigger process_command() and/or update is_disabled.
+ * @return >0 if a frame is available,
+ * 0 and set rframe to NULL if no frame available,
+ * or AVERROR code
+ */
+int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
+ AVFrame **rframe);
+
+/**
+ * Make sure a frame is writable.
+ * This is similar to av_frame_make_writable() except it uses the link's
+ * buffer allocation callback, and therefore allows direct rendering.
+ */
+int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe);
+
+/**
+ * Test and acknowledge the change of status on the link.
+ *
+ * Status means EOF or an error condition; a change from the normal (0)
+ * status to a non-zero status can be queued in a filter's input link, it
+ * becomes relevant after the frames queued in the link's FIFO are
+ * processed. This function tests if frames are still queued and if a queued
+ * status change has not yet been processed. In that case it performs basic
+ * treatment (updating the link's timestamp) and returns a positive value to
+ * let the filter do its own treatments (flushing...).
+ *
+ * Filters implementing the activate callback should call this function when
+ * they think it might succeed (usually after checking unsuccessfully for a
+ * queued frame).
+ * Filters implementing the filter_frame and request_frame callbacks do not
+ * need to call that since the same treatment happens in ff_filter_frame().
+ *
+ * @param[out] rstatus new or current status
+ * @param[out] rpts current timestamp of the link in link time base
+ * @return >0 if status changed, <0 if status already acked, 0 otherwise
+ */
+int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts);
+
+/**
+ * Mark that a frame is wanted on the link.
+ * Unlike ff_filter_frame(), it must not be called when the link has a
+ * non-zero status, and thus does not acknowledge it.
+ * Also it cannot fail.
+ */
+void ff_inlink_request_frame(AVFilterLink *link);
+
+#endif /* AVFILTER_FILTERS_H */
diff --git a/libavfilter/formats.c b/libavfilter/formats.c
index 7b5a93c325..d4de862237 100644
--- a/libavfilter/formats.c
+++ b/libavfilter/formats.c
@@ -2,29 +2,35 @@
* Filter layer - format negotiation
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
+#include "libavutil/eval.h"
#include "libavutil/pixdesc.h"
+#include "libavutil/parseutils.h"
#include "avfilter.h"
#include "internal.h"
#include "formats.h"
+#define KNOWN(l) (!FF_LAYOUT2COUNT(l)) /* for readability */
+
/**
* Add all refs from a to ret and destroy a.
*/
@@ -33,8 +39,8 @@ do { \
type ***tmp; \
int i; \
\
- if (!(tmp = av_realloc(ret->refs, \
- sizeof(*tmp) * (ret->refcount + a->refcount)))) \
+ if (!(tmp = av_realloc_array(ret->refs, ret->refcount + a->refcount, \
+ sizeof(*tmp)))) \
goto fail; \
ret->refs = tmp; \
\
@@ -60,15 +66,21 @@ do {
goto fail; \
\
if (count) { \
- if (!(ret->fmts = av_malloc(sizeof(*ret->fmts) * count))) \
+ if (!(ret->fmts = av_malloc_array(count, sizeof(*ret->fmts)))) \
goto fail; \
for (i = 0; i < a->nb; i++) \
for (j = 0; j < b->nb; j++) \
- if (a->fmts[i] == b->fmts[j]) \
+ if (a->fmts[i] == b->fmts[j]) { \
+ if(k >= FFMIN(a->nb, b->nb)){ \
+ av_log(NULL, AV_LOG_ERROR, "Duplicate formats in avfilter_merge_formats() detected\n"); \
+ av_free(ret->fmts); \
+ av_free(ret); \
+ return NULL; \
+ } \
ret->fmts[k++] = a->fmts[i]; \
- \
- ret->nb = k; \
+ } \
} \
+ ret->nb = k; \
/* check that there was at least one common format */ \
if (!ret->nb) \
goto fail; \
@@ -77,13 +89,41 @@ do {
MERGE_REF(ret, b, fmts, type, fail); \
} while (0)
-AVFilterFormats *ff_merge_formats(AVFilterFormats *a, AVFilterFormats *b)
+AVFilterFormats *ff_merge_formats(AVFilterFormats *a, AVFilterFormats *b,
+ enum AVMediaType type)
{
AVFilterFormats *ret = NULL;
+ int i, j;
+ int alpha1=0, alpha2=0;
+ int chroma1=0, chroma2=0;
if (a == b)
return a;
+ /* Do not lose chroma or alpha in merging.
+ It happens if both lists have formats with chroma (resp. alpha), but
+ the only formats in common do not have it (e.g. YUV+gray vs.
+ RGB+gray): in that case, the merging would select the gray format,
+ possibly causing a lossy conversion elsewhere in the graph.
+ To avoid that, pretend that there are no common formats to force the
+ insertion of a conversion filter. */
+ if (type == AVMEDIA_TYPE_VIDEO)
+ for (i = 0; i < a->nb_formats; i++)
+ for (j = 0; j < b->nb_formats; j++) {
+ const AVPixFmtDescriptor *adesc = av_pix_fmt_desc_get(a->formats[i]);
+ const AVPixFmtDescriptor *bdesc = av_pix_fmt_desc_get(b->formats[j]);
+ alpha2 |= adesc->flags & bdesc->flags & AV_PIX_FMT_FLAG_ALPHA;
+ chroma2|= adesc->nb_components > 1 && bdesc->nb_components > 1;
+ if (a->formats[i] == b->formats[j]) {
+ alpha1 |= adesc->flags & AV_PIX_FMT_FLAG_ALPHA;
+ chroma1|= adesc->nb_components > 1;
+ }
+ }
+
+ // If chroma or alpha can be lost through merging then do not merge
+ if (alpha2 > alpha1 || chroma2 > chroma1)
+ return NULL;
+
MERGE_FORMATS(ret, a, b, formats, nb_formats, AVFilterFormats, fail);
return ret;
@@ -127,21 +167,81 @@ AVFilterChannelLayouts *ff_merge_channel_layouts(AVFilterChannelLayouts *a,
AVFilterChannelLayouts *b)
{
AVFilterChannelLayouts *ret = NULL;
+ unsigned a_all = a->all_layouts + a->all_counts;
+ unsigned b_all = b->all_layouts + b->all_counts;
+ int ret_max, ret_nb = 0, i, j, round;
if (a == b) return a;
- if (a->nb_channel_layouts && b->nb_channel_layouts) {
- MERGE_FORMATS(ret, a, b, channel_layouts, nb_channel_layouts,
- AVFilterChannelLayouts, fail);
- } else if (a->nb_channel_layouts) {
- MERGE_REF(a, b, channel_layouts, AVFilterChannelLayouts, fail);
- ret = a;
- } else {
+ /* Put the most generic set in a, to avoid doing everything twice */
+ if (a_all < b_all) {
+ FFSWAP(AVFilterChannelLayouts *, a, b);
+ FFSWAP(unsigned, a_all, b_all);
+ }
+ if (a_all) {
+ if (a_all == 1 && !b_all) {
+ /* keep only known layouts in b; works also for b_all = 1 */
+ for (i = j = 0; i < b->nb_channel_layouts; i++)
+ if (KNOWN(b->channel_layouts[i]))
+ b->channel_layouts[j++] = b->channel_layouts[i];
+ /* Not optimal: the unknown layouts of b may become known after
+ another merge. */
+ if (!j)
+ return NULL;
+ b->nb_channel_layouts = j;
+ }
MERGE_REF(b, a, channel_layouts, AVFilterChannelLayouts, fail);
- ret = b;
+ return b;
+ }
+
+ ret_max = a->nb_channel_layouts + b->nb_channel_layouts;
+ if (!(ret = av_mallocz(sizeof(*ret))) ||
+ !(ret->channel_layouts = av_malloc_array(ret_max,
+ sizeof(*ret->channel_layouts))))
+ goto fail;
+
+ /* a[known] intersect b[known] */
+ for (i = 0; i < a->nb_channel_layouts; i++) {
+ if (!KNOWN(a->channel_layouts[i]))
+ continue;
+ for (j = 0; j < b->nb_channel_layouts; j++) {
+ if (a->channel_layouts[i] == b->channel_layouts[j]) {
+ ret->channel_layouts[ret_nb++] = a->channel_layouts[i];
+ a->channel_layouts[i] = b->channel_layouts[j] = 0;
+ }
+ }
+ }
+ /* 1st round: a[known] intersect b[generic]
+ 2nd round: a[generic] intersect b[known] */
+ for (round = 0; round < 2; round++) {
+ for (i = 0; i < a->nb_channel_layouts; i++) {
+ uint64_t fmt = a->channel_layouts[i], bfmt;
+ if (!fmt || !KNOWN(fmt))
+ continue;
+ bfmt = FF_COUNT2LAYOUT(av_get_channel_layout_nb_channels(fmt));
+ for (j = 0; j < b->nb_channel_layouts; j++)
+ if (b->channel_layouts[j] == bfmt)
+ ret->channel_layouts[ret_nb++] = a->channel_layouts[i];
+ }
+ /* 1st round: swap to prepare 2nd round; 2nd round: put it back */
+ FFSWAP(AVFilterChannelLayouts *, a, b);
+ }
+ /* a[generic] intersect b[generic] */
+ for (i = 0; i < a->nb_channel_layouts; i++) {
+ if (KNOWN(a->channel_layouts[i]))
+ continue;
+ for (j = 0; j < b->nb_channel_layouts; j++)
+ if (a->channel_layouts[i] == b->channel_layouts[j])
+ ret->channel_layouts[ret_nb++] = a->channel_layouts[i];
}
+ ret->nb_channel_layouts = ret_nb;
+ if (!ret->nb_channel_layouts)
+ goto fail;
+ MERGE_REF(ret, a, channel_layouts, AVFilterChannelLayouts, fail);
+ MERGE_REF(ret, b, channel_layouts, AVFilterChannelLayouts, fail);
return ret;
+
fail:
if (ret) {
av_freep(&ret->refs);
@@ -155,64 +255,96 @@ int ff_fmt_is_in(int fmt, const int *fmts)
{
const int *p;
- for (p = fmts; *p != AV_PIX_FMT_NONE; p++) {
+ for (p = fmts; *p != -1; p++) {
if (fmt == *p)
return 1;
}
return 0;
}
+#define MAKE_FORMAT_LIST(type, field, count_field) \
+ type *formats; \
+ int count = 0; \
+ if (fmts) \
+ for (count = 0; fmts[count] != -1; count++) \
+ ; \
+ formats = av_mallocz(sizeof(*formats)); \
+ if (!formats) \
+ return NULL; \
+ formats->count_field = count; \
+ if (count) { \
+ formats->field = av_malloc_array(count, sizeof(*formats->field)); \
+ if (!formats->field) { \
+ av_freep(&formats); \
+ return NULL; \
+ } \
+ }
+
AVFilterFormats *ff_make_format_list(const int *fmts)
{
- AVFilterFormats *formats;
- int count;
+ MAKE_FORMAT_LIST(AVFilterFormats, formats, nb_formats);
+ while (count--)
+ formats->formats[count] = fmts[count];
- for (count = 0; fmts[count] != -1; count++)
- ;
+ return formats;
+}
- formats = av_mallocz(sizeof(*formats));
- if (!formats)
- return NULL;
- if (count) {
- formats->formats = av_malloc(sizeof(*formats->formats) * count);
- if (!formats->formats) {
- av_freep(&formats);
- return NULL;
- }
- }
- formats->nb_formats = count;
- memcpy(formats->formats, fmts, sizeof(*formats->formats) * count);
+AVFilterChannelLayouts *ff_make_formatu64_list(const uint64_t *fmts)
+{
+ MAKE_FORMAT_LIST(AVFilterChannelLayouts,
+ channel_layouts, nb_channel_layouts);
+ if (count)
+ memcpy(formats->channel_layouts, fmts,
+ sizeof(*formats->channel_layouts) * count);
return formats;
}
-#define ADD_FORMAT(f, fmt, type, list, nb) \
+AVFilterChannelLayouts *avfilter_make_format64_list(const int64_t *fmts)
+{
+ MAKE_FORMAT_LIST(AVFilterChannelLayouts,
+ channel_layouts, nb_channel_layouts);
+ if (count)
+ memcpy(formats->channel_layouts, fmts,
+ sizeof(*formats->channel_layouts) * count);
+
+ return formats;
+}
+
+#define ADD_FORMAT(f, fmt, unref_fn, type, list, nb) \
do { \
type *fmts; \
+ void *oldf = *f; \
\
- if (!(*f) && !(*f = av_mallocz(sizeof(**f)))) \
+ if (!(*f) && !(*f = av_mallocz(sizeof(**f)))) { \
+ unref_fn(f); \
return AVERROR(ENOMEM); \
+ } \
\
- fmts = av_realloc((*f)->list, \
- sizeof(*(*f)->list) * ((*f)->nb + 1));\
+ fmts = av_realloc_array((*f)->list, (*f)->nb + 1, \
+ sizeof(*(*f)->list)); \
if (!fmts) { \
- av_freep(&f); \
+ unref_fn(f); \
+ if (!oldf) \
+ av_freep(f); \
return AVERROR(ENOMEM); \
} \
\
(*f)->list = fmts; \
(*f)->list[(*f)->nb++] = fmt; \
- return 0; \
} while (0)
-int ff_add_format(AVFilterFormats **avff, int fmt)
+int ff_add_format(AVFilterFormats **avff, int64_t fmt)
{
- ADD_FORMAT(avff, fmt, int, formats, nb_formats);
+ ADD_FORMAT(avff, fmt, ff_formats_unref, int, formats, nb_formats);
+ return 0;
}
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
{
- ADD_FORMAT(l, channel_layout, uint64_t, channel_layouts, nb_channel_layouts);
+ av_assert1(!(*l && (*l)->all_layouts));
+ ADD_FORMAT(l, channel_layout, ff_channel_layouts_unref, uint64_t, channel_layouts, nb_channel_layouts);
+ return 0;
}
AVFilterFormats *ff_all_formats(enum AVMediaType type)
@@ -222,12 +354,14 @@ AVFilterFormats *ff_all_formats(enum AVMediaType type)
if (type == AVMEDIA_TYPE_VIDEO) {
const AVPixFmtDescriptor *desc = NULL;
while ((desc = av_pix_fmt_desc_next(desc))) {
- ff_add_format(&ret, av_pix_fmt_desc_get_id(desc));
+ if (ff_add_format(&ret, av_pix_fmt_desc_get_id(desc)) < 0)
+ return NULL;
}
} else if (type == AVMEDIA_TYPE_AUDIO) {
enum AVSampleFormat fmt = 0;
while (av_get_sample_fmt_name(fmt)) {
- ff_add_format(&ret, fmt);
+ if (ff_add_format(&ret, fmt) < 0)
+ return NULL;
fmt++;
}
}
@@ -235,14 +369,25 @@ AVFilterFormats *ff_all_formats(enum AVMediaType type)
return ret;
}
+const int64_t avfilter_all_channel_layouts[] = {
+#include "all_channel_layouts.inc"
+ -1
+};
+
+// AVFilterFormats *avfilter_make_all_channel_layouts(void)
+// {
+// return avfilter_make_format64_list(avfilter_all_channel_layouts);
+// }
+
AVFilterFormats *ff_planar_sample_fmts(void)
{
AVFilterFormats *ret = NULL;
int fmt;
- for (fmt = 0; fmt < AV_SAMPLE_FMT_NB; fmt++)
+ for (fmt = 0; av_get_bytes_per_sample(fmt)>0; fmt++)
if (av_sample_fmt_is_planar(fmt))
- ff_add_format(&ret, fmt);
+ if (ff_add_format(&ret, fmt) < 0)
+ return NULL;
return ret;
}
@@ -256,26 +401,45 @@ AVFilterFormats *ff_all_samplerates(void)
AVFilterChannelLayouts *ff_all_channel_layouts(void)
{
AVFilterChannelLayouts *ret = av_mallocz(sizeof(*ret));
+ if (!ret)
+ return NULL;
+ ret->all_layouts = 1;
return ret;
}
-#define FORMATS_REF(f, ref) \
-do { \
- *ref = f; \
- f->refs = av_realloc(f->refs, sizeof(*f->refs) * ++f->refcount); \
- if (!f->refs) \
- return; \
- f->refs[f->refcount-1] = ref; \
-} while (0)
+AVFilterChannelLayouts *ff_all_channel_counts(void)
+{
+ AVFilterChannelLayouts *ret = av_mallocz(sizeof(*ret));
+ if (!ret)
+ return NULL;
+ ret->all_layouts = ret->all_counts = 1;
+ return ret;
+}
+
+#define FORMATS_REF(f, ref, unref_fn) \
+ void *tmp; \
+ \
+ if (!f || !ref) \
+ return AVERROR(ENOMEM); \
+ \
+ tmp = av_realloc_array(f->refs, sizeof(*f->refs), f->refcount + 1); \
+ if (!tmp) { \
+ unref_fn(&f); \
+ return AVERROR(ENOMEM); \
+ } \
+ f->refs = tmp; \
+ f->refs[f->refcount++] = ref; \
+ *ref = f; \
+ return 0
-void ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
+int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
{
- FORMATS_REF(f, ref);
+ FORMATS_REF(f, ref, ff_channel_layouts_unref);
}
-void ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
+int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
{
- FORMATS_REF(f, ref);
+ FORMATS_REF(f, ref, ff_formats_unref);
}
#define FIND_REF_INDEX(ref, idx) \
@@ -292,7 +456,7 @@ do { \
do { \
int idx = -1; \
\
- if (!*ref) \
+ if (!*ref || !(*ref)->refs) \
return; \
\
FIND_REF_INDEX(ref, idx); \
@@ -343,19 +507,33 @@ void ff_formats_changeref(AVFilterFormats **oldref, AVFilterFormats **newref)
FORMATS_CHANGEREF(oldref, newref);
}
-#define SET_COMMON_FORMATS(ctx, fmts, in_fmts, out_fmts, ref, list) \
-{ \
+#define SET_COMMON_FORMATS(ctx, fmts, in_fmts, out_fmts, ref_fn, unref_fn, list) \
int count = 0, i; \
\
+ if (!fmts) \
+ return AVERROR(ENOMEM); \
+ \
for (i = 0; i < ctx->nb_inputs; i++) { \
- if (ctx->inputs[i]) { \
- ref(fmts, &ctx->inputs[i]->out_fmts); \
+ if (ctx->inputs[i] && !ctx->inputs[i]->out_fmts) { \
+ int ret = ref_fn(fmts, &ctx->inputs[i]->out_fmts); \
+ if (ret < 0) { \
+ unref_fn(&fmts); \
+ av_freep(&fmts->list); \
+ av_freep(&fmts); \
+ return ret; \
+ } \
count++; \
} \
} \
for (i = 0; i < ctx->nb_outputs; i++) { \
- if (ctx->outputs[i]) { \
- ref(fmts, &ctx->outputs[i]->in_fmts); \
+ if (ctx->outputs[i] && !ctx->outputs[i]->in_fmts) { \
+ int ret = ref_fn(fmts, &ctx->outputs[i]->in_fmts); \
+ if (ret < 0) { \
+ unref_fn(&fmts); \
+ av_freep(&fmts->list); \
+ av_freep(&fmts); \
+ return ret; \
+ } \
count++; \
} \
} \
@@ -365,20 +543,21 @@ void ff_formats_changeref(AVFilterFormats **oldref, AVFilterFormats **newref)
av_freep(&fmts->refs); \
av_freep(&fmts); \
} \
-}
+ \
+ return 0;
-void ff_set_common_channel_layouts(AVFilterContext *ctx,
- AVFilterChannelLayouts *layouts)
+int ff_set_common_channel_layouts(AVFilterContext *ctx,
+ AVFilterChannelLayouts *layouts)
{
SET_COMMON_FORMATS(ctx, layouts, in_channel_layouts, out_channel_layouts,
- ff_channel_layouts_ref, channel_layouts);
+ ff_channel_layouts_ref, ff_channel_layouts_unref, channel_layouts);
}
-void ff_set_common_samplerates(AVFilterContext *ctx,
- AVFilterFormats *samplerates)
+int ff_set_common_samplerates(AVFilterContext *ctx,
+ AVFilterFormats *samplerates)
{
SET_COMMON_FORMATS(ctx, samplerates, in_samplerates, out_samplerates,
- ff_formats_ref, formats);
+ ff_formats_ref, ff_formats_unref, formats);
}
/**
@@ -386,23 +565,125 @@ void ff_set_common_samplerates(AVFilterContext *ctx,
* formats. If there are no links hooked to this filter, the list of formats is
* freed.
*/
-void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
+int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
{
SET_COMMON_FORMATS(ctx, formats, in_formats, out_formats,
- ff_formats_ref, formats);
+ ff_formats_ref, ff_formats_unref, formats);
}
-int ff_default_query_formats(AVFilterContext *ctx)
+static int default_query_formats_common(AVFilterContext *ctx,
+ AVFilterChannelLayouts *(layouts)(void))
{
+ int ret;
enum AVMediaType type = ctx->inputs && ctx->inputs [0] ? ctx->inputs [0]->type :
ctx->outputs && ctx->outputs[0] ? ctx->outputs[0]->type :
AVMEDIA_TYPE_VIDEO;
- ff_set_common_formats(ctx, ff_all_formats(type));
+ ret = ff_set_common_formats(ctx, ff_all_formats(type));
+ if (ret < 0)
+ return ret;
if (type == AVMEDIA_TYPE_AUDIO) {
- ff_set_common_channel_layouts(ctx, ff_all_channel_layouts());
- ff_set_common_samplerates(ctx, ff_all_samplerates());
+ ret = ff_set_common_channel_layouts(ctx, layouts());
+ if (ret < 0)
+ return ret;
+ ret = ff_set_common_samplerates(ctx, ff_all_samplerates());
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ff_default_query_formats(AVFilterContext *ctx)
+{
+ return default_query_formats_common(ctx, ff_all_channel_counts);
+}
+
+int ff_query_formats_all_layouts(AVFilterContext *ctx)
+{
+ return default_query_formats_common(ctx, ff_all_channel_layouts);
+}
+
+/* internal functions for parsing audio format arguments */
+
+int ff_parse_pixel_format(enum AVPixelFormat *ret, const char *arg, void *log_ctx)
+{
+ char *tail;
+ int pix_fmt = av_get_pix_fmt(arg);
+ if (pix_fmt == AV_PIX_FMT_NONE) {
+ pix_fmt = strtol(arg, &tail, 0);
+ if (*tail || !av_pix_fmt_desc_get(pix_fmt)) {
+ av_log(log_ctx, AV_LOG_ERROR, "Invalid pixel format '%s'\n", arg);
+ return AVERROR(EINVAL);
+ }
+ }
+ *ret = pix_fmt;
+ return 0;
+}
+
+int ff_parse_sample_format(int *ret, const char *arg, void *log_ctx)
+{
+ char *tail;
+ int sfmt = av_get_sample_fmt(arg);
+ if (sfmt == AV_SAMPLE_FMT_NONE) {
+ sfmt = strtol(arg, &tail, 0);
+ if (*tail || av_get_bytes_per_sample(sfmt)<=0) {
+ av_log(log_ctx, AV_LOG_ERROR, "Invalid sample format '%s'\n", arg);
+ return AVERROR(EINVAL);
+ }
+ }
+ *ret = sfmt;
+ return 0;
+}
+
+int ff_parse_time_base(AVRational *ret, const char *arg, void *log_ctx)
+{
+ AVRational r;
+ if(av_parse_ratio(&r, arg, INT_MAX, 0, log_ctx) < 0 ||r.num<=0 ||r.den<=0) {
+ av_log(log_ctx, AV_LOG_ERROR, "Invalid time base '%s'\n", arg);
+ return AVERROR(EINVAL);
+ }
+ *ret = r;
+ return 0;
+}
+
+int ff_parse_sample_rate(int *ret, const char *arg, void *log_ctx)
+{
+ char *tail;
+ double srate = av_strtod(arg, &tail);
+ if (*tail || srate < 1 || (int)srate != srate || srate > INT_MAX) {
+ av_log(log_ctx, AV_LOG_ERROR, "Invalid sample rate '%s'\n", arg);
+ return AVERROR(EINVAL);
+ }
+ *ret = srate;
+ return 0;
+}
+
+int ff_parse_channel_layout(int64_t *ret, int *nret, const char *arg,
+ void *log_ctx)
+{
+ char *tail;
+ int64_t chlayout;
+ int nb_channels;
+
+ if (av_get_extended_channel_layout(arg, &chlayout, &nb_channels) < 0) {
+ /* [TEMPORARY 2016-12 -> 2017-12]*/
+ nb_channels = strtol(arg, &tail, 10);
+ if (!errno && *tail == 'c' && *(tail + 1) == '\0' && nb_channels > 0 && nb_channels < 64) {
+ chlayout = 0;
+ av_log(log_ctx, AV_LOG_WARNING, "Deprecated channel count specification '%s'. This will stop working in releases made in 2018 and after.\n", arg);
+ } else {
+ av_log(log_ctx, AV_LOG_ERROR, "Invalid channel layout '%s'\n", arg);
+ return AVERROR(EINVAL);
+ }
+ }
+ if (!chlayout && !nret) {
+ av_log(log_ctx, AV_LOG_ERROR, "Unknown channel layout '%s' is not supported.\n", arg);
+ return AVERROR(EINVAL);
}
+ *ret = chlayout;
+ if (nret)
+ *nret = nb_channels;
return 0;
}
diff --git a/libavfilter/formats.h b/libavfilter/formats.h
index b273f8aa03..870809b5a0 100644
--- a/libavfilter/formats.h
+++ b/libavfilter/formats.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -69,15 +69,46 @@ struct AVFilterFormats {
struct AVFilterFormats ***refs; ///< references to this list
};
+/**
+ * A list of supported channel layouts.
+ *
+ * The list works the same as AVFilterFormats, except for the following
+ * differences:
+ * - A list with all_layouts = 1 means all channel layouts with a known
+ * disposition; nb_channel_layouts must then be 0.
+ * - A list with all_counts = 1 means all channel counts, with a known or
+ * unknown disposition; nb_channel_layouts must then be 0 and all_layouts 1.
+ * - The list must not contain a layout with a known disposition and a
+ * channel count with unknown disposition with the same number of channels
+ * (e.g. AV_CH_LAYOUT_STEREO and FF_COUNT2LAYOUT(2).
+ */
typedef struct AVFilterChannelLayouts {
uint64_t *channel_layouts; ///< list of channel layouts
int nb_channel_layouts; ///< number of channel layouts
+ char all_layouts; ///< accept any known channel layout
+ char all_counts; ///< accept any channel layout or count
unsigned refcount; ///< number of references to this list
struct AVFilterChannelLayouts ***refs; ///< references to this list
} AVFilterChannelLayouts;
/**
+ * Encode a channel count as a channel layout.
+ * FF_COUNT2LAYOUT(c) means any channel layout with c channels, with a known
+ * or unknown disposition.
+ * The result is only valid inside AVFilterChannelLayouts and immediately
+ * related functions.
+ */
+#define FF_COUNT2LAYOUT(c) (0x8000000000000000ULL | (c))
+
+/**
+ * Decode a channel count encoded as a channel layout.
+ * Return 0 if the channel layout was a real one.
+ */
+#define FF_LAYOUT2COUNT(l) (((l) & 0x8000000000000000ULL) ? \
+ (int)((l) & 0x7FFFFFFF) : 0)
+
+/**
* Return a channel layouts/samplerates list which contains the intersection of
* the layouts/samplerates of a and b. Also, all the references of a, all the
* references of b, and a and b themselves will be deallocated.
@@ -92,35 +123,57 @@ AVFilterFormats *ff_merge_samplerates(AVFilterFormats *a,
/**
* Construct an empty AVFilterChannelLayouts/AVFilterFormats struct --
- * representing any channel layout/sample rate.
+ * representing any channel layout (with known disposition)/sample rate.
*/
+av_warn_unused_result
AVFilterChannelLayouts *ff_all_channel_layouts(void);
+
+av_warn_unused_result
AVFilterFormats *ff_all_samplerates(void);
/**
+ * Construct an AVFilterChannelLayouts coding for any channel layout, with
+ * known or unknown disposition.
+ */
+av_warn_unused_result
+AVFilterChannelLayouts *ff_all_channel_counts(void);
+
+av_warn_unused_result
+AVFilterChannelLayouts *avfilter_make_format64_list(const int64_t *fmts);
+
+av_warn_unused_result
+AVFilterChannelLayouts *ff_make_formatu64_list(const uint64_t *fmts);
+
+
+/**
* A helper for query_formats() which sets all links to the same list of channel
* layouts/sample rates. If there are no links hooked to this filter, the list
* is freed.
*/
-void ff_set_common_channel_layouts(AVFilterContext *ctx,
- AVFilterChannelLayouts *layouts);
-void ff_set_common_samplerates(AVFilterContext *ctx,
- AVFilterFormats *samplerates);
+av_warn_unused_result
+int ff_set_common_channel_layouts(AVFilterContext *ctx,
+ AVFilterChannelLayouts *layouts);
+av_warn_unused_result
+int ff_set_common_samplerates(AVFilterContext *ctx,
+ AVFilterFormats *samplerates);
/**
* A helper for query_formats() which sets all links to the same list of
* formats. If there are no links hooked to this filter, the list of formats is
* freed.
*/
-void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats);
+av_warn_unused_result
+int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats);
+av_warn_unused_result
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout);
/**
* Add *ref as a new reference to f.
*/
-void ff_channel_layouts_ref(AVFilterChannelLayouts *f,
- AVFilterChannelLayouts **ref);
+av_warn_unused_result
+int ff_channel_layouts_ref(AVFilterChannelLayouts *f,
+ AVFilterChannelLayouts **ref);
/**
* Remove a reference to a channel layouts list.
@@ -130,8 +183,16 @@ void ff_channel_layouts_unref(AVFilterChannelLayouts **ref);
void ff_channel_layouts_changeref(AVFilterChannelLayouts **oldref,
AVFilterChannelLayouts **newref);
+av_warn_unused_result
int ff_default_query_formats(AVFilterContext *ctx);
+ /**
+ * Set the formats list to all known channel layouts. This function behaves
+ * like ff_default_query_formats(), except it only accepts known channel
+ * layouts. It should only be used with audio filters.
+ */
+av_warn_unused_result
+int ff_query_formats_all_layouts(AVFilterContext *ctx);
/**
* Create a list of supported formats. This is intended for use in
@@ -140,6 +201,7 @@ int ff_default_query_formats(AVFilterContext *ctx);
* @param fmts list of media formats, terminated by -1
* @return the format list, with no existing references
*/
+av_warn_unused_result
AVFilterFormats *ff_make_format_list(const int *fmts);
/**
@@ -150,16 +212,19 @@ AVFilterFormats *ff_make_format_list(const int *fmts);
* @return a non negative value in case of success, or a negative
* value corresponding to an AVERROR code in case of error
*/
-int ff_add_format(AVFilterFormats **avff, int fmt);
+av_warn_unused_result
+int ff_add_format(AVFilterFormats **avff, int64_t fmt);
/**
- * Return a list of all formats supported by Libav for the given media type.
+ * Return a list of all formats supported by FFmpeg for the given media type.
*/
+av_warn_unused_result
AVFilterFormats *ff_all_formats(enum AVMediaType type);
/**
* Construct a formats list containing all planar sample formats.
*/
+av_warn_unused_result
AVFilterFormats *ff_planar_sample_fmts(void);
/**
@@ -170,7 +235,8 @@ AVFilterFormats *ff_planar_sample_fmts(void);
* If a and b do not share any common formats, neither is modified, and NULL
* is returned.
*/
-AVFilterFormats *ff_merge_formats(AVFilterFormats *a, AVFilterFormats *b);
+AVFilterFormats *ff_merge_formats(AVFilterFormats *a, AVFilterFormats *b,
+ enum AVMediaType type);
/**
* Add *ref as a new reference to formats.
@@ -184,7 +250,8 @@ AVFilterFormats *ff_merge_formats(AVFilterFormats *a, AVFilterFormats *b);
* | |____| | | |____|
* |________| |________________________
*/
-void ff_formats_ref(AVFilterFormats *formats, AVFilterFormats **ref);
+av_warn_unused_result
+int ff_formats_ref(AVFilterFormats *formats, AVFilterFormats **ref);
/**
* If *ref is non-NULL, remove *ref as a reference to the format list
diff --git a/libavfilter/framepool.c b/libavfilter/framepool.c
new file mode 100644
index 0000000000..e1f1e2cc41
--- /dev/null
+++ b/libavfilter/framepool.c
@@ -0,0 +1,296 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * Copyright (c) 2015 Matthieu Bouron <matthieu.bouron stupeflix.com>
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "framepool.h"
+#include "libavutil/avassert.h"
+#include "libavutil/avutil.h"
+#include "libavutil/buffer.h"
+#include "libavutil/frame.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/mem.h"
+#include "libavutil/pixfmt.h"
+
+struct FFFramePool {
+
+ enum AVMediaType type;
+
+ /* video */
+ int width;
+ int height;
+
+ /* audio */
+ int planes;
+ int channels;
+ int nb_samples;
+
+ /* common */
+ int format;
+ int align;
+ int linesize[4];
+ AVBufferPool *pools[4];
+
+};
+
+FFFramePool *ff_frame_pool_video_init(AVBufferRef* (*alloc)(int size),
+ int width,
+ int height,
+ enum AVPixelFormat format,
+ int align)
+{
+ int i, ret;
+ FFFramePool *pool;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);
+
+ if (!desc)
+ return NULL;
+
+ pool = av_mallocz(sizeof(FFFramePool));
+ if (!pool)
+ return NULL;
+
+ pool->type = AVMEDIA_TYPE_VIDEO;
+ pool->width = width;
+ pool->height = height;
+ pool->format = format;
+ pool->align = align;
+
+ if ((ret = av_image_check_size(width, height, 0, NULL)) < 0) {
+ goto fail;
+ }
+
+ if (!pool->linesize[0]) {
+ for(i = 1; i <= align; i += i) {
+ ret = av_image_fill_linesizes(pool->linesize, pool->format,
+ FFALIGN(pool->width, i));
+ if (ret < 0) {
+ goto fail;
+ }
+ if (!(pool->linesize[0] & (pool->align - 1)))
+ break;
+ }
+
+ for (i = 0; i < 4 && pool->linesize[i]; i++) {
+ pool->linesize[i] = FFALIGN(pool->linesize[i], pool->align);
+ }
+ }
+
+ for (i = 0; i < 4 && pool->linesize[i]; i++) {
+ int h = FFALIGN(pool->height, 32);
+ if (i == 1 || i == 2)
+ h = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
+
+ pool->pools[i] = av_buffer_pool_init(pool->linesize[i] * h + 16 + 16 - 1,
+ alloc);
+ if (!pool->pools[i])
+ goto fail;
+ }
+
+ if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
+ desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) {
+ pool->pools[1] = av_buffer_pool_init(AVPALETTE_SIZE, alloc);
+ if (!pool->pools[1])
+ goto fail;
+ }
+
+ return pool;
+
+fail:
+ ff_frame_pool_uninit(&pool);
+ return NULL;
+}
+
+FFFramePool *ff_frame_pool_audio_init(AVBufferRef* (*alloc)(int size),
+ int channels,
+ int nb_samples,
+ enum AVSampleFormat format,
+ int align)
+{
+ int ret, planar;
+ FFFramePool *pool;
+
+ pool = av_mallocz(sizeof(FFFramePool));
+ if (!pool)
+ return NULL;
+
+ planar = av_sample_fmt_is_planar(format);
+
+ pool->type = AVMEDIA_TYPE_AUDIO;
+ pool->planes = planar ? channels : 1;
+ pool->channels = channels;
+ pool->nb_samples = nb_samples;
+ pool->format = format;
+ pool->align = align;
+
+ ret = av_samples_get_buffer_size(&pool->linesize[0], channels,
+ nb_samples, format, 0);
+ if (ret < 0)
+ goto fail;
+
+ pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
+ if (!pool->pools[0])
+ goto fail;
+
+ return pool;
+
+fail:
+ ff_frame_pool_uninit(&pool);
+ return NULL;
+}
+
+int ff_frame_pool_get_video_config(FFFramePool *pool,
+ int *width,
+ int *height,
+ enum AVPixelFormat *format,
+ int *align)
+{
+ if (!pool)
+ return AVERROR(EINVAL);
+
+ av_assert0(pool->type == AVMEDIA_TYPE_VIDEO);
+
+ *width = pool->width;
+ *height = pool->height;
+ *format = pool->format;
+ *align = pool->align;
+
+ return 0;
+}
+
+int ff_frame_pool_get_audio_config(FFFramePool *pool,
+ int *channels,
+ int *nb_samples,
+ enum AVSampleFormat *format,
+ int *align)
+{
+ if (!pool)
+ return AVERROR(EINVAL);
+
+ av_assert0(pool->type == AVMEDIA_TYPE_AUDIO);
+
+ *channels = pool->channels;
+ *nb_samples = pool->nb_samples;
+ *format = pool->format;
+ *align = pool->align;
+
+ return 0;
+}
+
+AVFrame *ff_frame_pool_get(FFFramePool *pool)
+{
+ int i;
+ AVFrame *frame;
+ const AVPixFmtDescriptor *desc;
+
+ frame = av_frame_alloc();
+ if (!frame) {
+ return NULL;
+ }
+
+ switch(pool->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ desc = av_pix_fmt_desc_get(pool->format);
+ if (!desc) {
+ goto fail;
+ }
+
+ frame->width = pool->width;
+ frame->height = pool->height;
+ frame->format = pool->format;
+
+ for (i = 0; i < 4; i++) {
+ frame->linesize[i] = pool->linesize[i];
+ if (!pool->pools[i])
+ break;
+
+ frame->buf[i] = av_buffer_pool_get(pool->pools[i]);
+ if (!frame->buf[i])
+ goto fail;
+
+ frame->data[i] = frame->buf[i]->data;
+ }
+
+ if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
+ desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) {
+ enum AVPixelFormat format =
+ pool->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : pool->format;
+
+ av_assert0(frame->data[1] != NULL);
+ if (avpriv_set_systematic_pal2((uint32_t *)frame->data[1], format) < 0)
+ goto fail;
+ }
+
+ frame->extended_data = frame->data;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ frame->nb_samples = pool->nb_samples;
+ av_frame_set_channels(frame, pool->channels);
+ frame->format = pool->format;
+ frame->linesize[0] = pool->linesize[0];
+
+ if (pool->planes > AV_NUM_DATA_POINTERS) {
+ frame->extended_data = av_mallocz_array(pool->planes,
+ sizeof(*frame->extended_data));
+ frame->nb_extended_buf = pool->planes - AV_NUM_DATA_POINTERS;
+ frame->extended_buf = av_mallocz_array(frame->nb_extended_buf,
+ sizeof(*frame->extended_buf));
+ if (!frame->extended_data || !frame->extended_buf)
+ goto fail;
+ } else {
+ frame->extended_data = frame->data;
+ av_assert0(frame->nb_extended_buf == 0);
+ }
+
+ for (i = 0; i < FFMIN(pool->planes, AV_NUM_DATA_POINTERS); i++) {
+ frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
+ if (!frame->buf[i])
+ goto fail;
+ frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
+ }
+ for (i = 0; i < frame->nb_extended_buf; i++) {
+ frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
+ if (!frame->extended_buf[i])
+ goto fail;
+ frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
+ }
+
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ return frame;
+fail:
+ av_frame_free(&frame);
+ return NULL;
+}
+
+void ff_frame_pool_uninit(FFFramePool **pool)
+{
+ int i;
+
+ if (!pool || !*pool)
+ return;
+
+ for (i = 0; i < 4; i++) {
+ av_buffer_pool_uninit(&(*pool)->pools[i]);
+ }
+
+ av_freep(pool);
+}
diff --git a/libavfilter/framepool.h b/libavfilter/framepool.h
new file mode 100644
index 0000000000..e5560e4c6e
--- /dev/null
+++ b/libavfilter/framepool.h
@@ -0,0 +1,118 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * Copyright (c) 2015 Matthieu Bouron <matthieu.bouron stupeflix.com>
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_FRAMEPOOL_H
+#define AVFILTER_FRAMEPOOL_H
+
+#include "libavutil/buffer.h"
+#include "libavutil/frame.h"
+
+/**
+ * Frame pool. This structure is opaque and not meant to be accessed
+ * directly. It is allocated with ff_frame_pool_init() and freed with
+ * ff_frame_pool_uninit().
+ */
+typedef struct FFFramePool FFFramePool;
+
+/**
+ * Allocate and initialize a video frame pool.
+ *
+ * @param alloc a function that will be used to allocate new frame buffers when
+ * the pool is empty. May be NULL, then the default allocator will be used
+ * (av_buffer_alloc()).
+ * @param width width of each frame in this pool
+ * @param height height of each frame in this pool
+ * @param format format of each frame in this pool
+ * @param align buffers alignement of each frame in this pool
+ * @return newly created video frame pool on success, NULL on error.
+ */
+FFFramePool *ff_frame_pool_video_init(AVBufferRef* (*alloc)(int size),
+ int width,
+ int height,
+ enum AVPixelFormat format,
+ int align);
+
+/**
+ * Allocate and initialize an audio frame pool.
+ *
+ * @param alloc a function that will be used to allocate new frame buffers when
+ * the pool is empty. May be NULL, then the default allocator will be used
+ * (av_buffer_alloc()).
+ * @param channels channels of each frame in this pool
+ * @param nb_samples number of samples of each frame in this pool
+ * @param format format of each frame in this pool
+ * @param align buffers alignement of each frame in this pool
+ * @return newly created audio frame pool on success, NULL on error.
+ */
+FFFramePool *ff_frame_pool_audio_init(AVBufferRef* (*alloc)(int size),
+ int channels,
+ int samples,
+ enum AVSampleFormat format,
+ int align);
+
+/**
+ * Deallocate the frame pool. It is safe to call this function while
+ * some of the allocated frame are still in use.
+ *
+ * @param pool pointer to the frame pool to be freed. It will be set to NULL.
+ */
+void ff_frame_pool_uninit(FFFramePool **pool);
+
+/**
+ * Get the video frame pool configuration.
+ *
+ * @param width width of each frame in this pool
+ * @param height height of each frame in this pool
+ * @param format format of each frame in this pool
+ * @param align buffers alignement of each frame in this pool
+ * @return 0 on success, a negative AVERROR otherwise.
+ */
+int ff_frame_pool_get_video_config(FFFramePool *pool,
+ int *width,
+ int *height,
+ enum AVPixelFormat *format,
+ int *align);
+
+/**
+ * Get the audio frame pool configuration.
+ *
+ * @param channels channels of each frame in this pool
+ * @param nb_samples number of samples of each frame in this pool
+ * @param format format of each frame in this pool
+ * @param align buffers alignement of each frame in this pool
+ * @return 0 on success, a negative AVERROR otherwise.
+ */
+int ff_frame_pool_get_audio_config(FFFramePool *pool,
+ int *channels,
+ int *nb_samples,
+ enum AVSampleFormat *format,
+ int *align);
+
+
+/**
+ * Allocate a new AVFrame, reussing old buffers from the pool when available.
+ * This function may be called simultaneously from multiple threads.
+ *
+ * @return a new AVFrame on success, NULL on error.
+ */
+AVFrame *ff_frame_pool_get(FFFramePool *pool);
+
+
+#endif /* AVFILTER_FRAMEPOOL_H */
diff --git a/libavfilter/framequeue.c b/libavfilter/framequeue.c
new file mode 100644
index 0000000000..26bfa49967
--- /dev/null
+++ b/libavfilter/framequeue.c
@@ -0,0 +1,150 @@
+/*
+ * Generic frame queue
+ * Copyright (c) 2016 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "framequeue.h"
+
+static inline FFFrameBucket *bucket(FFFrameQueue *fq, size_t idx)
+{
+ return &fq->queue[(fq->tail + idx) & (fq->allocated - 1)];
+}
+
+void ff_framequeue_global_init(FFFrameQueueGlobal *fqg)
+{
+}
+
+static void check_consistency(FFFrameQueue *fq)
+{
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ uint64_t nb_samples = 0;
+ size_t i;
+
+ av_assert0(fq->queued == fq->total_frames_head - fq->total_frames_tail);
+ for (i = 0; i < fq->queued; i++)
+ nb_samples += bucket(fq, i)->frame->nb_samples;
+ av_assert0(nb_samples == fq->total_samples_head - fq->total_samples_tail);
+#endif
+}
+
+void ff_framequeue_init(FFFrameQueue *fq, FFFrameQueueGlobal *fqg)
+{
+ fq->queue = &fq->first_bucket;
+ fq->allocated = 1;
+}
+
+void ff_framequeue_free(FFFrameQueue *fq)
+{
+ while (fq->queued) {
+ AVFrame *frame = ff_framequeue_take(fq);
+ av_frame_free(&frame);
+ }
+ if (fq->queue != &fq->first_bucket)
+ av_freep(&fq->queue);
+}
+
+int ff_framequeue_add(FFFrameQueue *fq, AVFrame *frame)
+{
+ FFFrameBucket *b;
+
+ check_consistency(fq);
+ if (fq->queued == fq->allocated) {
+ if (fq->allocated == 1) {
+ size_t na = 8;
+ FFFrameBucket *nq = av_realloc_array(NULL, na, sizeof(*nq));
+ if (!nq)
+ return AVERROR(ENOMEM);
+ nq[0] = fq->queue[0];
+ fq->queue = nq;
+ fq->allocated = na;
+ } else {
+ size_t na = fq->allocated << 1;
+ FFFrameBucket *nq = av_realloc_array(fq->queue, na, sizeof(*nq));
+ if (!nq)
+ return AVERROR(ENOMEM);
+ if (fq->tail + fq->queued > fq->allocated)
+ memmove(nq + fq->allocated, nq,
+ (fq->tail + fq->queued - fq->allocated) * sizeof(*nq));
+ fq->queue = nq;
+ fq->allocated = na;
+ }
+ }
+ b = bucket(fq, fq->queued);
+ b->frame = frame;
+ fq->queued++;
+ fq->total_frames_head++;
+ fq->total_samples_head += frame->nb_samples;
+ check_consistency(fq);
+ return 0;
+}
+
+AVFrame *ff_framequeue_take(FFFrameQueue *fq)
+{
+ FFFrameBucket *b;
+
+ check_consistency(fq);
+ av_assert1(fq->queued);
+ b = bucket(fq, 0);
+ fq->queued--;
+ fq->tail++;
+ fq->tail &= fq->allocated - 1;
+ fq->total_frames_tail++;
+ fq->total_samples_tail += b->frame->nb_samples;
+ check_consistency(fq);
+ return b->frame;
+}
+
+AVFrame *ff_framequeue_peek(FFFrameQueue *fq, size_t idx)
+{
+ FFFrameBucket *b;
+
+ check_consistency(fq);
+ av_assert1(idx < fq->queued);
+ b = bucket(fq, idx);
+ check_consistency(fq);
+ return b->frame;
+}
+
+void ff_framequeue_skip_samples(FFFrameQueue *fq, size_t samples, AVRational time_base)
+{
+ FFFrameBucket *b;
+ size_t bytes;
+ int planar, planes, i;
+
+ check_consistency(fq);
+ av_assert1(fq->queued);
+ b = bucket(fq, 0);
+ av_assert1(samples < b->frame->nb_samples);
+ planar = av_sample_fmt_is_planar(b->frame->format);
+ planes = planar ? b->frame->channels : 1;
+ bytes = samples * av_get_bytes_per_sample(b->frame->format);
+ if (!planar)
+ bytes *= b->frame->channels;
+ if (b->frame->pts != AV_NOPTS_VALUE)
+ b->frame->pts += av_rescale_q(samples, av_make_q(1, b->frame->sample_rate), time_base);
+ b->frame->nb_samples -= samples;
+ b->frame->linesize[0] -= bytes;
+ for (i = 0; i < planes; i++)
+ b->frame->extended_data[i] += bytes;
+ for (i = 0; i < planes && i < AV_NUM_DATA_POINTERS; i++)
+ b->frame->data[i] = b->frame->extended_data[i];
+ fq->total_samples_tail += samples;
+ ff_framequeue_update_peeked(fq, 0);
+}
diff --git a/libavfilter/framequeue.h b/libavfilter/framequeue.h
new file mode 100644
index 0000000000..5aa2c725a7
--- /dev/null
+++ b/libavfilter/framequeue.h
@@ -0,0 +1,173 @@
+/*
+ * Generic frame queue
+ * Copyright (c) 2016 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_FRAMEQUEUE_H
+#define AVFILTER_FRAMEQUEUE_H
+
+/**
+ * FFFrameQueue: simple AVFrame queue API
+ *
+ * Note: this API is not thread-safe. Concurrent access to the same queue
+ * must be protected by a mutex or any synchronization mechanism.
+ */
+
+#include "libavutil/frame.h"
+
+typedef struct FFFrameBucket {
+ AVFrame *frame;
+} FFFrameBucket;
+
+/**
+ * Structure to hold global options and statistics for frame queues.
+ *
+ * This structure is intended to allow implementing global control of the
+ * frame queues, including memory consumption caps.
+ *
+ * It is currently empty.
+ */
+typedef struct FFFrameQueueGlobal {
+ char dummy; /* C does not allow empty structs */
+} FFFrameQueueGlobal;
+
+/**
+ * Queue of AVFrame pointers.
+ */
+typedef struct FFFrameQueue {
+
+ /**
+ * Array of allocated buckets, used as a circular buffer.
+ */
+ FFFrameBucket *queue;
+
+ /**
+ * Size of the array of buckets.
+ */
+ size_t allocated;
+
+ /**
+ * Tail of the queue.
+ * It is the index in the array of the next frame to take.
+ */
+ size_t tail;
+
+ /**
+ * Number of currently queued frames.
+ */
+ size_t queued;
+
+ /**
+ * Pre-allocated bucket for queues of size 1.
+ */
+ FFFrameBucket first_bucket;
+
+ /**
+ * Total number of frames entered in the queue.
+ */
+ uint64_t total_frames_head;
+
+ /**
+ * Total number of frames dequeued from the queue.
+ * queued = total_frames_head - total_frames_tail
+ */
+ uint64_t total_frames_tail;
+
+ /**
+ * Total number of samples entered in the queue.
+ */
+ uint64_t total_samples_head;
+
+ /**
+ * Total number of samples dequeued from the queue.
+ * queued_samples = total_samples_head - total_samples_tail
+ */
+ uint64_t total_samples_tail;
+
+} FFFrameQueue;
+
+/**
+ * Init a global structure.
+ */
+void ff_framequeue_global_init(FFFrameQueueGlobal *fqg);
+
+/**
+ * Init a frame queue and attach it to a global structure.
+ */
+void ff_framequeue_init(FFFrameQueue *fq, FFFrameQueueGlobal *fqg);
+
+/**
+ * Free the queue and all queued frames.
+ */
+void ff_framequeue_free(FFFrameQueue *fq);
+
+/**
+ * Add a frame.
+ * @return >=0 or an AVERROR code.
+ */
+int ff_framequeue_add(FFFrameQueue *fq, AVFrame *frame);
+
+/**
+ * Take the first frame in the queue.
+ * Must not be used with empty queues.
+ */
+AVFrame *ff_framequeue_take(FFFrameQueue *fq);
+
+/**
+ * Access a frame in the queue, without removing it.
+ * The first frame is numbered 0; the designated frame must exist.
+ */
+AVFrame *ff_framequeue_peek(FFFrameQueue *fq, size_t idx);
+
+/**
+ * Get the number of queued frames.
+ */
+static inline size_t ff_framequeue_queued_frames(const FFFrameQueue *fq)
+{
+ return fq->queued;
+}
+
+/**
+ * Get the number of queued samples.
+ */
+static inline uint64_t ff_framequeue_queued_samples(const FFFrameQueue *fq)
+{
+ return fq->total_samples_head - fq->total_samples_tail;
+}
+
+/**
+ * Update the statistics after a frame accessed using ff_framequeue_peek()
+ * was modified.
+ * Currently used only as a marker.
+ */
+static inline void ff_framequeue_update_peeked(FFFrameQueue *fq, size_t idx)
+{
+}
+
+/**
+ * Skip samples from the first frame in the queue.
+ *
+ * This function must be used when the first frame was accessed using
+ * ff_framequeue_peek() and samples were consumed from it.
+ * It adapts the data pointers and timestamps of the head frame to account
+ * for the skipped samples.
+ */
+void ff_framequeue_skip_samples(FFFrameQueue *fq, size_t samples, AVRational time_base);
+
+#endif /* AVFILTER_FRAMEQUEUE_H */
diff --git a/libavfilter/framesync.c b/libavfilter/framesync.c
new file mode 100644
index 0000000000..eb05d66a86
--- /dev/null
+++ b/libavfilter/framesync.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2013 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define FF_INTERNAL_FIELDS 1
+#include "framequeue.h"
+
+#include "libavutil/avassert.h"
+#include "avfilter.h"
+#include "bufferqueue.h"
+#include "framesync.h"
+#include "internal.h"
+
+#define OFFSET(member) offsetof(FFFrameSync, member)
+
+static const char *framesync_name(void *ptr)
+{
+ return "framesync";
+}
+
+static const AVClass framesync_class = {
+ .version = LIBAVUTIL_VERSION_INT,
+ .class_name = "framesync",
+ .item_name = framesync_name,
+ .category = AV_CLASS_CATEGORY_FILTER,
+ .option = NULL,
+ .parent_log_context_offset = OFFSET(parent),
+};
+
+enum {
+ STATE_BOF,
+ STATE_RUN,
+ STATE_EOF,
+};
+
+int ff_framesync_init(FFFrameSync *fs, void *parent, unsigned nb_in)
+{
+ fs->class = &framesync_class;
+ fs->parent = parent;
+ fs->nb_in = nb_in;
+
+ fs->in = av_calloc(nb_in, sizeof(*fs->in));
+ if (!fs->in)
+ return AVERROR(ENOMEM);
+ return 0;
+}
+
+static void framesync_sync_level_update(FFFrameSync *fs)
+{
+ unsigned i, level = 0;
+
+ for (i = 0; i < fs->nb_in; i++)
+ if (fs->in[i].state != STATE_EOF)
+ level = FFMAX(level, fs->in[i].sync);
+ av_assert0(level <= fs->sync_level);
+ if (level < fs->sync_level)
+ av_log(fs, AV_LOG_VERBOSE, "Sync level %u\n", level);
+ if (level)
+ fs->sync_level = level;
+ else
+ fs->eof = 1;
+}
+
+int ff_framesync_configure(FFFrameSync *fs)
+{
+ unsigned i;
+ int64_t gcd, lcm;
+
+ if (!fs->time_base.num) {
+ for (i = 0; i < fs->nb_in; i++) {
+ if (fs->in[i].sync) {
+ if (fs->time_base.num) {
+ gcd = av_gcd(fs->time_base.den, fs->in[i].time_base.den);
+ lcm = (fs->time_base.den / gcd) * fs->in[i].time_base.den;
+ if (lcm < AV_TIME_BASE / 2) {
+ fs->time_base.den = lcm;
+ fs->time_base.num = av_gcd(fs->time_base.num,
+ fs->in[i].time_base.num);
+ } else {
+ fs->time_base.num = 1;
+ fs->time_base.den = AV_TIME_BASE;
+ break;
+ }
+ } else {
+ fs->time_base = fs->in[i].time_base;
+ }
+ }
+ }
+ if (!fs->time_base.num) {
+ av_log(fs, AV_LOG_ERROR, "Impossible to set time base\n");
+ return AVERROR(EINVAL);
+ }
+ av_log(fs, AV_LOG_VERBOSE, "Selected %d/%d time base\n",
+ fs->time_base.num, fs->time_base.den);
+ }
+
+ for (i = 0; i < fs->nb_in; i++)
+ fs->in[i].pts = fs->in[i].pts_next = AV_NOPTS_VALUE;
+ fs->sync_level = UINT_MAX;
+ framesync_sync_level_update(fs);
+
+ return 0;
+}
+
+static void framesync_advance(FFFrameSync *fs)
+{
+ int latest;
+ unsigned i;
+ int64_t pts;
+
+ if (fs->eof)
+ return;
+ while (!fs->frame_ready) {
+ latest = -1;
+ for (i = 0; i < fs->nb_in; i++) {
+ if (!fs->in[i].have_next) {
+ if (latest < 0 || fs->in[i].pts < fs->in[latest].pts)
+ latest = i;
+ }
+ }
+ if (latest >= 0) {
+ fs->in_request = latest;
+ break;
+ }
+
+ pts = fs->in[0].pts_next;
+ for (i = 1; i < fs->nb_in; i++)
+ if (fs->in[i].pts_next < pts)
+ pts = fs->in[i].pts_next;
+ if (pts == INT64_MAX) {
+ fs->eof = 1;
+ break;
+ }
+ for (i = 0; i < fs->nb_in; i++) {
+ if (fs->in[i].pts_next == pts ||
+ (fs->in[i].before == EXT_INFINITY &&
+ fs->in[i].state == STATE_BOF)) {
+ av_frame_free(&fs->in[i].frame);
+ fs->in[i].frame = fs->in[i].frame_next;
+ fs->in[i].pts = fs->in[i].pts_next;
+ fs->in[i].frame_next = NULL;
+ fs->in[i].pts_next = AV_NOPTS_VALUE;
+ fs->in[i].have_next = 0;
+ fs->in[i].state = fs->in[i].frame ? STATE_RUN : STATE_EOF;
+ if (fs->in[i].sync == fs->sync_level && fs->in[i].frame)
+ fs->frame_ready = 1;
+ if (fs->in[i].state == STATE_EOF &&
+ fs->in[i].after == EXT_STOP)
+ fs->eof = 1;
+ }
+ }
+ if (fs->eof)
+ fs->frame_ready = 0;
+ if (fs->frame_ready)
+ for (i = 0; i < fs->nb_in; i++)
+ if ((fs->in[i].state == STATE_BOF &&
+ fs->in[i].before == EXT_STOP))
+ fs->frame_ready = 0;
+ fs->pts = pts;
+ }
+}
+
+static int64_t framesync_pts_extrapolate(FFFrameSync *fs, unsigned in,
+ int64_t pts)
+{
+ /* Possible enhancement: use the link's frame rate */
+ return pts + 1;
+}
+
+static void framesync_inject_frame(FFFrameSync *fs, unsigned in, AVFrame *frame)
+{
+ int64_t pts;
+
+ av_assert0(!fs->in[in].have_next);
+ if (frame) {
+ pts = av_rescale_q(frame->pts, fs->in[in].time_base, fs->time_base);
+ frame->pts = pts;
+ } else {
+ pts = fs->in[in].state != STATE_RUN || fs->in[in].after == EXT_INFINITY
+ ? INT64_MAX : framesync_pts_extrapolate(fs, in, fs->in[in].pts);
+ fs->in[in].sync = 0;
+ framesync_sync_level_update(fs);
+ }
+ fs->in[in].frame_next = frame;
+ fs->in[in].pts_next = pts;
+ fs->in[in].have_next = 1;
+}
+
+int ff_framesync_add_frame(FFFrameSync *fs, unsigned in, AVFrame *frame)
+{
+ av_assert1(in < fs->nb_in);
+ if (!fs->in[in].have_next)
+ framesync_inject_frame(fs, in, frame);
+ else
+ ff_bufqueue_add(fs, &fs->in[in].queue, frame);
+ return 0;
+}
+
+void ff_framesync_next(FFFrameSync *fs)
+{
+ unsigned i;
+
+ av_assert0(!fs->frame_ready);
+ for (i = 0; i < fs->nb_in; i++)
+ if (!fs->in[i].have_next && fs->in[i].queue.available)
+ framesync_inject_frame(fs, i, ff_bufqueue_get(&fs->in[i].queue));
+ fs->frame_ready = 0;
+ framesync_advance(fs);
+}
+
+void ff_framesync_drop(FFFrameSync *fs)
+{
+ fs->frame_ready = 0;
+}
+
+int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe,
+ unsigned get)
+{
+ AVFrame *frame;
+ unsigned need_copy = 0, i;
+ int64_t pts_next;
+ int ret;
+
+ if (!fs->in[in].frame) {
+ *rframe = NULL;
+ return 0;
+ }
+ frame = fs->in[in].frame;
+ if (get) {
+ /* Find out if we need to copy the frame: is there another sync
+ stream, and do we know if its current frame will outlast this one? */
+ pts_next = fs->in[in].have_next ? fs->in[in].pts_next : INT64_MAX;
+ for (i = 0; i < fs->nb_in && !need_copy; i++)
+ if (i != in && fs->in[i].sync &&
+ (!fs->in[i].have_next || fs->in[i].pts_next < pts_next))
+ need_copy = 1;
+ if (need_copy) {
+ if (!(frame = av_frame_clone(frame)))
+ return AVERROR(ENOMEM);
+ if ((ret = av_frame_make_writable(frame)) < 0) {
+ av_frame_free(&frame);
+ return ret;
+ }
+ } else {
+ fs->in[in].frame = NULL;
+ }
+ fs->frame_ready = 0;
+ }
+ *rframe = frame;
+ return 0;
+}
+
+void ff_framesync_uninit(FFFrameSync *fs)
+{
+ unsigned i;
+
+ for (i = 0; i < fs->nb_in; i++) {
+ av_frame_free(&fs->in[i].frame);
+ av_frame_free(&fs->in[i].frame_next);
+ ff_bufqueue_discard_all(&fs->in[i].queue);
+ }
+
+ av_freep(&fs->in);
+}
+
+int ff_framesync_process_frame(FFFrameSync *fs, unsigned all)
+{
+ int ret, count = 0;
+
+ av_assert0(fs->on_event);
+ while (1) {
+ ff_framesync_next(fs);
+ if (fs->eof || !fs->frame_ready)
+ break;
+ if ((ret = fs->on_event(fs)) < 0)
+ return ret;
+ ff_framesync_drop(fs);
+ count++;
+ if (!all)
+ break;
+ }
+ if (!count && fs->eof)
+ return AVERROR_EOF;
+ return count;
+}
+
+int ff_framesync_filter_frame(FFFrameSync *fs, AVFilterLink *inlink,
+ AVFrame *in)
+{
+ int ret;
+
+ if ((ret = ff_framesync_process_frame(fs, 1)) < 0)
+ return ret;
+ if ((ret = ff_framesync_add_frame(fs, FF_INLINK_IDX(inlink), in)) < 0)
+ return ret;
+ if ((ret = ff_framesync_process_frame(fs, 0)) < 0)
+ return ret;
+ return 0;
+}
+
+int ff_framesync_request_frame(FFFrameSync *fs, AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ int input, ret, i;
+
+ if ((ret = ff_framesync_process_frame(fs, 0)) < 0)
+ return ret;
+ if (ret > 0)
+ return 0;
+ if (fs->eof)
+ return AVERROR_EOF;
+ input = fs->in_request;
+ /* Detect status change early */
+ for (i = 0; i < fs->nb_in; i++)
+ if (!ff_framequeue_queued_frames(&ctx->inputs[i]->fifo) &&
+ ctx->inputs[i]->status_in && !ctx->inputs[i]->status_out)
+ input = i;
+ ret = ff_request_frame(ctx->inputs[input]);
+ if (ret == AVERROR_EOF) {
+ if ((ret = ff_framesync_add_frame(fs, input, NULL)) < 0)
+ return ret;
+ if ((ret = ff_framesync_process_frame(fs, 0)) < 0)
+ return ret;
+ ret = 0;
+ }
+ return ret;
+}
diff --git a/libavfilter/framesync.h b/libavfilter/framesync.h
new file mode 100644
index 0000000000..7ba99d5d86
--- /dev/null
+++ b/libavfilter/framesync.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2013 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_FRAMESYNC_H
+#define AVFILTER_FRAMESYNC_H
+
+#include "bufferqueue.h"
+
+/*
+ * TODO
+ * Callback-based API similar to dualinput.
+ * Export convenient options.
+ */
+
+/**
+ * This API is intended as a helper for filters that have several video
+ * input and need to combine them somehow. If the inputs have different or
+ * variable frame rate, getting the input frames to match requires a rather
+ * complex logic and a few user-tunable options.
+ *
+ * In this API, when a set of synchronized input frames is ready to be
+ * procesed is called a frame event. Frame event can be generated in
+ * response to input frames on any or all inputs and the handling of
+ * situations where some stream extend beyond the beginning or the end of
+ * others can be configured.
+ *
+ * The basic working of this API is the following:
+ *
+ * - When a frame is available on any input, add it using
+ * ff_framesync_add_frame().
+ *
+ * - When a frame event is ready to be processed (i.e. after adding a frame
+ * or when requested on input):
+ * - call ff_framesync_next();
+ * - if fs->frame_ready is true, process the frames;
+ * - call ff_framesync_drop().
+ */
+
+/**
+ * Stream extrapolation mode
+ *
+ * Describe how the frames of a stream are extrapolated before the first one
+ * and after EOF to keep sync with possibly longer other streams.
+ */
+enum FFFrameSyncExtMode {
+
+ /**
+ * Completely stop all streams with this one.
+ */
+ EXT_STOP,
+
+ /**
+ * Ignore this stream and continue processing the other ones.
+ */
+ EXT_NULL,
+
+ /**
+ * Extend the frame to infinity.
+ */
+ EXT_INFINITY,
+};
+
+/**
+ * Input stream structure
+ */
+typedef struct FFFrameSyncIn {
+
+ /**
+ * Queue of incoming AVFrame, and NULL to mark EOF
+ */
+ struct FFBufQueue queue;
+
+ /**
+ * Extrapolation mode for timestamps before the first frame
+ */
+ enum FFFrameSyncExtMode before;
+
+ /**
+ * Extrapolation mode for timestamps after the last frame
+ */
+ enum FFFrameSyncExtMode after;
+
+ /**
+ * Time base for the incoming frames
+ */
+ AVRational time_base;
+
+ /**
+ * Current frame, may be NULL before the first one or after EOF
+ */
+ AVFrame *frame;
+
+ /**
+ * Next frame, for internal use
+ */
+ AVFrame *frame_next;
+
+ /**
+ * PTS of the current frame
+ */
+ int64_t pts;
+
+ /**
+ * PTS of the next frame, for internal use
+ */
+ int64_t pts_next;
+
+ /**
+ * Boolean flagging the next frame, for internal use
+ */
+ uint8_t have_next;
+
+ /**
+ * State: before first, in stream or after EOF, for internal use
+ */
+ uint8_t state;
+
+ /**
+ * Synchronization level: frames on input at the highest sync level will
+ * generate output frame events.
+ *
+ * For example, if inputs #0 and #1 have sync level 2 and input #2 has
+ * sync level 1, then a frame on either input #0 or #1 will generate a
+ * frame event, but not a frame on input #2 until both inputs #0 and #1
+ * have reached EOF.
+ *
+ * If sync is 0, no frame event will be generated.
+ */
+ unsigned sync;
+
+} FFFrameSyncIn;
+
+/**
+ * Frame sync structure.
+ */
+typedef struct FFFrameSync {
+ const AVClass *class;
+ void *parent;
+
+ /**
+ * Number of input streams
+ */
+ unsigned nb_in;
+
+ /**
+ * Time base for the output events
+ */
+ AVRational time_base;
+
+ /**
+ * Timestamp of the current event
+ */
+ int64_t pts;
+
+ /**
+ * Callback called when a frame event is ready
+ */
+ int (*on_event)(struct FFFrameSync *fs);
+
+ /**
+ * Opaque pointer, not used by the API
+ */
+ void *opaque;
+
+ /**
+ * Index of the input that requires a request
+ */
+ unsigned in_request;
+
+ /**
+ * Synchronization level: only inputs with the same sync level are sync
+ * sources.
+ */
+ unsigned sync_level;
+
+ /**
+ * Flag indicating that a frame event is ready
+ */
+ uint8_t frame_ready;
+
+ /**
+ * Flag indicating that output has reached EOF.
+ */
+ uint8_t eof;
+
+ /**
+ * Pointer to array of inputs.
+ */
+ FFFrameSyncIn *in;
+
+} FFFrameSync;
+
+/**
+ * Initialize a frame sync structure.
+ *
+ * The entire structure is expected to be already set to 0.
+ *
+ * @param fs frame sync structure to initialize
+ * @param parent parent object, used for logging
+ * @param nb_in number of inputs
+ * @return >= 0 for success or a negative error code
+ */
+int ff_framesync_init(FFFrameSync *fs, void *parent, unsigned nb_in);
+
+/**
+ * Configure a frame sync structure.
+ *
+ * Must be called after all options are set but before all use.
+ *
+ * @return >= 0 for success or a negative error code
+ */
+int ff_framesync_configure(FFFrameSync *fs);
+
+/**
+ * Free all memory currently allocated.
+ */
+void ff_framesync_uninit(FFFrameSync *fs);
+
+/**
+ * Add a frame to an input
+ *
+ * Typically called from the filter_frame() method.
+ *
+ * @param fs frame sync structure
+ * @param in index of the input
+ * @param frame input frame, or NULL for EOF
+ */
+int ff_framesync_add_frame(FFFrameSync *fs, unsigned in, AVFrame *frame);
+
+/**
+ * Prepare the next frame event.
+ *
+ * The status of the operation can be found in fs->frame_ready and fs->eof.
+ */
+void ff_framesync_next(FFFrameSync *fs);
+
+/**
+ * Drop the current frame event.
+ */
+void ff_framesync_drop(FFFrameSync *fs);
+
+/**
+ * Get the current frame in an input.
+ *
+ * @param fs frame sync structure
+ * @param in index of the input
+ * @param rframe used to return the current frame (or NULL)
+ * @param get if not zero, the calling code needs to get ownership of
+ * the returned frame; the current frame will either be
+ * duplicated or removed from the framesync structure
+ */
+int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe,
+ unsigned get);
+
+/**
+ * Process one or several frame using the on_event callback.
+ *
+ * @return number of frames processed or negative error code
+ */
+int ff_framesync_process_frame(FFFrameSync *fs, unsigned all);
+
+
+/**
+ * Accept a frame on a filter input.
+ *
+ * This function can be the complete implementation of all filter_frame
+ * methods of a filter using framesync.
+ */
+int ff_framesync_filter_frame(FFFrameSync *fs, AVFilterLink *inlink,
+ AVFrame *in);
+
+/**
+ * Request a frame on the filter output.
+ *
+ * This function can be the complete implementation of all filter_frame
+ * methods of a filter using framesync if it has only one output.
+ */
+int ff_framesync_request_frame(FFFrameSync *fs, AVFilterLink *outlink);
+
+#endif /* AVFILTER_FRAMESYNC_H */
diff --git a/libavfilter/generate_wave_table.c b/libavfilter/generate_wave_table.c
new file mode 100644
index 0000000000..6cd80228b1
--- /dev/null
+++ b/libavfilter/generate_wave_table.c
@@ -0,0 +1,82 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include "libavutil/avassert.h"
+#include "avfilter.h"
+#include "generate_wave_table.h"
+
+void ff_generate_wave_table(enum WaveType wave_type,
+ enum AVSampleFormat sample_fmt,
+ void *table, int table_size,
+ double min, double max, double phase)
+{
+ uint32_t i, phase_offset = phase / M_PI / 2 * table_size + 0.5;
+
+ for (i = 0; i < table_size; i++) {
+ uint32_t point = (i + phase_offset) % table_size;
+ double d;
+
+ switch (wave_type) {
+ case WAVE_SIN:
+ d = (sin((double)point / table_size * 2 * M_PI) + 1) / 2;
+ break;
+ case WAVE_TRI:
+ d = (double)point * 2 / table_size;
+ switch (4 * point / table_size) {
+ case 0: d = d + 0.5; break;
+ case 1:
+ case 2: d = 1.5 - d; break;
+ case 3: d = d - 1.5; break;
+ }
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ d = d * (max - min) + min;
+ switch (sample_fmt) {
+ case AV_SAMPLE_FMT_FLT: {
+ float *fp = (float *)table;
+ *fp++ = (float)d;
+ table = fp;
+ continue; }
+ case AV_SAMPLE_FMT_DBL: {
+ double *dp = (double *)table;
+ *dp++ = d;
+ table = dp;
+ continue; }
+ }
+
+ d += d < 0 ? -0.5 : 0.5;
+ switch (sample_fmt) {
+ case AV_SAMPLE_FMT_S16: {
+ int16_t *sp = table;
+ *sp++ = (int16_t)d;
+ table = sp;
+ continue; }
+ case AV_SAMPLE_FMT_S32: {
+ int32_t *ip = table;
+ *ip++ = (int32_t)d;
+ table = ip;
+ continue; }
+ default:
+ av_assert0(0);
+ }
+ }
+}
diff --git a/libavfilter/generate_wave_table.h b/libavfilter/generate_wave_table.h
new file mode 100644
index 0000000000..5fe297e94d
--- /dev/null
+++ b/libavfilter/generate_wave_table.h
@@ -0,0 +1,35 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_GENERATE_WAVE_TABLE_H
+#define AVFILTER_GENERATE_WAVE_TABLE_H
+
+#include "libavutil/samplefmt.h"
+
+enum WaveType {
+ WAVE_SIN,
+ WAVE_TRI,
+ WAVE_NB,
+};
+
+void ff_generate_wave_table(enum WaveType wave_type,
+ enum AVSampleFormat sample_fmt,
+ void *table, int table_size,
+ double min, double max, double phase);
+
+#endif /* AVFILTER_GENERATE_WAVE_TABLE_H */
diff --git a/libavfilter/gradfun.h b/libavfilter/gradfun.h
index f6f7311d1d..eb1f1eb090 100644
--- a/libavfilter/gradfun.h
+++ b/libavfilter/gradfun.h
@@ -2,20 +2,20 @@
* Copyright (c) 2010 Nolan Lum <nol888@gmail.com>
* Copyright (c) 2009 Loren Merritt <lorenm@u.washington.edu>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -35,13 +35,13 @@ typedef struct GradFunContext {
int chroma_r; ///< blur radius for the chroma planes
uint16_t *buf; ///< holds image data for blur algorithm passed into filter.
/// DSP functions.
- void (*filter_line) (uint8_t *dst, uint8_t *src, uint16_t *dc, int width, int thresh, const uint16_t *dithers);
- void (*blur_line) (uint16_t *dc, uint16_t *buf, uint16_t *buf1, uint8_t *src, int src_linesize, int width);
+ void (*filter_line) (uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers);
+ void (*blur_line) (uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width);
} GradFunContext;
void ff_gradfun_init_x86(GradFunContext *gf);
-void ff_gradfun_filter_line_c(uint8_t *dst, uint8_t *src, uint16_t *dc, int width, int thresh, const uint16_t *dithers);
-void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, uint16_t *buf1, uint8_t *src, int src_linesize, int width);
+void ff_gradfun_filter_line_c(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers);
+void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width);
#endif /* AVFILTER_GRADFUN_H */
diff --git a/libavfilter/graphdump.c b/libavfilter/graphdump.c
new file mode 100644
index 0000000000..531bb571aa
--- /dev/null
+++ b/libavfilter/graphdump.c
@@ -0,0 +1,166 @@
+/*
+ * Filter graphs to bad ASCII-art
+ * Copyright (c) 2012 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <string.h>
+
+#include "libavutil/channel_layout.h"
+#include "libavutil/bprint.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "avfiltergraph.h"
+#include "internal.h"
+
+static int print_link_prop(AVBPrint *buf, AVFilterLink *link)
+{
+ char *format;
+ char layout[64];
+ AVBPrint dummy_buffer = { 0 };
+
+ if (!buf)
+ buf = &dummy_buffer;
+ switch (link->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ format = av_x_if_null(av_get_pix_fmt_name(link->format), "?");
+ av_bprintf(buf, "[%dx%d %d:%d %s]", link->w, link->h,
+ link->sample_aspect_ratio.num,
+ link->sample_aspect_ratio.den,
+ format);
+ break;
+
+ case AVMEDIA_TYPE_AUDIO:
+ av_get_channel_layout_string(layout, sizeof(layout),
+ link->channels, link->channel_layout);
+ format = av_x_if_null(av_get_sample_fmt_name(link->format), "?");
+ av_bprintf(buf, "[%dHz %s:%s]",
+ (int)link->sample_rate, format, layout);
+ break;
+
+ default:
+ av_bprintf(buf, "?");
+ break;
+ }
+ return buf->len;
+}
+
+static void avfilter_graph_dump_to_buf(AVBPrint *buf, AVFilterGraph *graph)
+{
+ unsigned i, j, x, e;
+
+ for (i = 0; i < graph->nb_filters; i++) {
+ AVFilterContext *filter = graph->filters[i];
+ unsigned max_src_name = 0, max_dst_name = 0;
+ unsigned max_in_name = 0, max_out_name = 0;
+ unsigned max_in_fmt = 0, max_out_fmt = 0;
+ unsigned width, height, in_indent;
+ unsigned lname = strlen(filter->name);
+ unsigned ltype = strlen(filter->filter->name);
+
+ for (j = 0; j < filter->nb_inputs; j++) {
+ AVFilterLink *l = filter->inputs[j];
+ unsigned ln = strlen(l->src->name) + 1 + strlen(l->srcpad->name);
+ max_src_name = FFMAX(max_src_name, ln);
+ max_in_name = FFMAX(max_in_name, strlen(l->dstpad->name));
+ max_in_fmt = FFMAX(max_in_fmt, print_link_prop(NULL, l));
+ }
+ for (j = 0; j < filter->nb_outputs; j++) {
+ AVFilterLink *l = filter->outputs[j];
+ unsigned ln = strlen(l->dst->name) + 1 + strlen(l->dstpad->name);
+ max_dst_name = FFMAX(max_dst_name, ln);
+ max_out_name = FFMAX(max_out_name, strlen(l->srcpad->name));
+ max_out_fmt = FFMAX(max_out_fmt, print_link_prop(NULL, l));
+ }
+ in_indent = max_src_name + max_in_name + max_in_fmt;
+ in_indent += in_indent ? 4 : 0;
+ width = FFMAX(lname + 2, ltype + 4);
+ height = FFMAX3(2, filter->nb_inputs, filter->nb_outputs);
+ av_bprint_chars(buf, ' ', in_indent);
+ av_bprintf(buf, "+");
+ av_bprint_chars(buf, '-', width);
+ av_bprintf(buf, "+\n");
+ for (j = 0; j < height; j++) {
+ unsigned in_no = j - (height - filter->nb_inputs ) / 2;
+ unsigned out_no = j - (height - filter->nb_outputs) / 2;
+
+ /* Input link */
+ if (in_no < filter->nb_inputs) {
+ AVFilterLink *l = filter->inputs[in_no];
+ e = buf->len + max_src_name + 2;
+ av_bprintf(buf, "%s:%s", l->src->name, l->srcpad->name);
+ av_bprint_chars(buf, '-', e - buf->len);
+ e = buf->len + max_in_fmt + 2 +
+ max_in_name - strlen(l->dstpad->name);
+ print_link_prop(buf, l);
+ av_bprint_chars(buf, '-', e - buf->len);
+ av_bprintf(buf, "%s", l->dstpad->name);
+ } else {
+ av_bprint_chars(buf, ' ', in_indent);
+ }
+
+ /* Filter */
+ av_bprintf(buf, "|");
+ if (j == (height - 2) / 2) {
+ x = (width - lname) / 2;
+ av_bprintf(buf, "%*s%-*s", x, "", width - x, filter->name);
+ } else if (j == (height - 2) / 2 + 1) {
+ x = (width - ltype - 2) / 2;
+ av_bprintf(buf, "%*s(%s)%*s", x, "", filter->filter->name,
+ width - ltype - 2 - x, "");
+ } else {
+ av_bprint_chars(buf, ' ', width);
+ }
+ av_bprintf(buf, "|");
+
+ /* Output link */
+ if (out_no < filter->nb_outputs) {
+ AVFilterLink *l = filter->outputs[out_no];
+ unsigned ln = strlen(l->dst->name) + 1 +
+ strlen(l->dstpad->name);
+ e = buf->len + max_out_name + 2;
+ av_bprintf(buf, "%s", l->srcpad->name);
+ av_bprint_chars(buf, '-', e - buf->len);
+ e = buf->len + max_out_fmt + 2 +
+ max_dst_name - ln;
+ print_link_prop(buf, l);
+ av_bprint_chars(buf, '-', e - buf->len);
+ av_bprintf(buf, "%s:%s", l->dst->name, l->dstpad->name);
+ }
+ av_bprintf(buf, "\n");
+ }
+ av_bprint_chars(buf, ' ', in_indent);
+ av_bprintf(buf, "+");
+ av_bprint_chars(buf, '-', width);
+ av_bprintf(buf, "+\n");
+ av_bprintf(buf, "\n");
+ }
+}
+
+char *avfilter_graph_dump(AVFilterGraph *graph, const char *options)
+{
+ AVBPrint buf;
+ char *dump;
+
+ av_bprint_init(&buf, 0, 0);
+ avfilter_graph_dump_to_buf(&buf, graph);
+ av_bprint_init(&buf, buf.len + 1, buf.len + 1);
+ avfilter_graph_dump_to_buf(&buf, graph);
+ av_bprint_finalize(&buf, &dump);
+ return dump;
+}
diff --git a/libavfilter/graphparser.c b/libavfilter/graphparser.c
index da499b52c9..04b4272e05 100644
--- a/libavfilter/graphparser.c
+++ b/libavfilter/graphparser.c
@@ -3,20 +3,20 @@
* Copyright (c) 2008 Vitor Sessak
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,7 +27,7 @@
#include "libavutil/mem.h"
#include "avfilter.h"
-#define WHITESPACES " \n\t"
+#define WHITESPACES " \n\t\r"
/**
* Link two filters together.
@@ -81,41 +81,6 @@ static char *parse_link_name(const char **buf, void *log_ctx)
return name;
}
-#define TMP_ARGS_SIZE 256
-
-static void append_sws_flags(const char **args, const char *sws_opts, char *tmp)
-{
- int nb_opts = 0;
- const char *separator = ":";
- const char *opt = *args;
-
- if (strstr(*args, "flags"))
- return;
-
- if (strstr(*args, "="))
- separator = ":flags=";
-
- while ((opt = strstr(opt, ":")) && *opt) {
- av_log(NULL, AV_LOG_INFO, "opts '%s' \n", opt);
- if (nb_opts > 2) {
- return;
- }
- nb_opts++;
- opt++;
- }
-
- opt = strstr(sws_opts, "flags=");
- if (opt && strlen(opt) > 6)
- opt += 6;
- else
- opt = sws_opts;
-
- snprintf(tmp, TMP_ARGS_SIZE, "%s%s%s",
- *args, separator, opt);
-
- *args = tmp;
-}
-
/**
* Create an instance of a filter, initialize and insert it in the
* filtergraph in *ctx.
@@ -126,17 +91,17 @@ static void append_sws_flags(const char **args, const char *sws_opts, char *tmp)
* @param filt_name the name of the filter to create
* @param args the arguments provided to the filter during its initialization
* @param log_ctx the log context to use
- * @return 0 in case of success, a negative AVERROR code otherwise
+ * @return >= 0 in case of success, a negative AVERROR code otherwise
*/
static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int index,
const char *filt_name, const char *args, void *log_ctx)
{
AVFilter *filt;
char inst_name[30];
- char tmp_args[TMP_ARGS_SIZE];
+ char *tmp_args = NULL;
int ret;
- snprintf(inst_name, sizeof(inst_name), "Parsed filter %d %s", index, filt_name);
+ snprintf(inst_name, sizeof(inst_name), "Parsed_%s_%d", filt_name, index);
filt = avfilter_get_by_name(filt_name);
@@ -153,9 +118,16 @@ static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int ind
return AVERROR(ENOMEM);
}
- if (!strcmp(filt_name, "scale") && args &&
+ if (!strcmp(filt_name, "scale") && (!args || !strstr(args, "flags")) &&
ctx->scale_sws_opts) {
- append_sws_flags(&args, ctx->scale_sws_opts, tmp_args);
+ if (args) {
+ tmp_args = av_asprintf("%s:%s",
+ args, ctx->scale_sws_opts);
+ if (!tmp_args)
+ return AVERROR(ENOMEM);
+ args = tmp_args;
+ } else
+ args = ctx->scale_sws_opts;
}
ret = avfilter_init_str(*filt_ctx, args);
@@ -166,10 +138,11 @@ static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int ind
av_log(log_ctx, AV_LOG_ERROR, " with args '%s'", args);
av_log(log_ctx, AV_LOG_ERROR, "\n");
avfilter_free(*filt_ctx);
- return ret;
+ *filt_ctx = NULL;
}
- return 0;
+ av_free(tmp_args);
+ return ret;
}
/**
@@ -186,18 +159,18 @@ static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int ind
* @param index an index which is assigned to the created filter
* instance, and which is supposed to be unique for each filter
* instance added to the filtergraph
- * @return 0 in case of success, a negative AVERROR code otherwise
+ * @return >= 0 in case of success, a negative AVERROR code otherwise
*/
static int parse_filter(AVFilterContext **filt_ctx, const char **buf, AVFilterGraph *graph,
int index, void *log_ctx)
{
char *opts = NULL;
- char *name = av_get_token(buf, "=,;[\n");
+ char *name = av_get_token(buf, "=,;[");
int ret;
if (**buf == '=') {
(*buf)++;
- opts = av_get_token(buf, "[],;\n");
+ opts = av_get_token(buf, "[],;");
}
ret = create_filter(filt_ctx, graph, index, name, opts, log_ctx);
@@ -273,8 +246,8 @@ static int link_filter_inouts(AVFilterContext *filt_ctx,
if (p->filter_ctx) {
ret = link_filter(p->filter_ctx, p->pad_idx, filt_ctx, pad, log_ctx);
- av_free(p->name);
- av_free(p);
+ av_freep(&p->name);
+ av_freep(&p);
if (ret < 0)
return ret;
} else {
@@ -376,12 +349,12 @@ static int parse_outputs(const char **buf, AVFilterInOut **curr_inputs,
av_free(name);
return ret;
}
- av_free(match->name);
- av_free(name);
- av_free(match);
- av_free(input);
+ av_freep(&match->name);
+ av_freep(&name);
+ av_freep(&match);
+ av_freep(&input);
} else {
- /* Not in the list, so add the first input as a open_output */
+ /* Not in the list, so add the first input as an open_output */
input->name = name;
insert_inout(open_outputs, input);
}
@@ -419,7 +392,7 @@ int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
AVFilterInOut **inputs,
AVFilterInOut **outputs)
{
- int index = 0, ret;
+ int index = 0, ret = 0;
char chr = 0;
AVFilterInOut *curr_inputs = NULL, *open_inputs = NULL, *open_outputs = NULL;
@@ -434,18 +407,17 @@ int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
filters += strspn(filters, WHITESPACES);
if ((ret = parse_inputs(&filters, &curr_inputs, &open_outputs, graph)) < 0)
- goto fail;
-
+ goto end;
if ((ret = parse_filter(&filter, &filters, graph, index, graph)) < 0)
- goto fail;
+ goto end;
if ((ret = link_filter_inouts(filter, &curr_inputs, &open_inputs, graph)) < 0)
- goto fail;
+ goto end;
if ((ret = parse_outputs(&filters, &curr_inputs, &open_inputs, &open_outputs,
graph)) < 0)
- goto fail;
+ goto end;
filters += strspn(filters, WHITESPACES);
chr = *filters++;
@@ -460,16 +432,17 @@ int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
"Unable to parse graph description substring: \"%s\"\n",
filters - 1);
ret = AVERROR(EINVAL);
- goto fail;
+ goto end;
}
append_inout(&open_outputs, &curr_inputs);
+
*inputs = open_inputs;
*outputs = open_outputs;
return 0;
- fail:
+ fail:end:
while (graph->nb_filters)
avfilter_free(graph->filters[0]);
av_freep(&graph->filters);
@@ -545,3 +518,87 @@ int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
avfilter_inout_free(&open_outputs);
return ret;
}
+
+int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters,
+ AVFilterInOut **open_inputs_ptr, AVFilterInOut **open_outputs_ptr,
+ void *log_ctx)
+{
+ int index = 0, ret = 0;
+ char chr = 0;
+
+ AVFilterInOut *curr_inputs = NULL;
+ AVFilterInOut *open_inputs = open_inputs_ptr ? *open_inputs_ptr : NULL;
+ AVFilterInOut *open_outputs = open_outputs_ptr ? *open_outputs_ptr : NULL;
+
+ if ((ret = parse_sws_flags(&filters, graph)) < 0)
+ goto end;
+
+ do {
+ AVFilterContext *filter;
+ const char *filterchain = filters;
+ filters += strspn(filters, WHITESPACES);
+
+ if ((ret = parse_inputs(&filters, &curr_inputs, &open_outputs, log_ctx)) < 0)
+ goto end;
+
+ if ((ret = parse_filter(&filter, &filters, graph, index, log_ctx)) < 0)
+ goto end;
+
+ if (filter->nb_inputs == 1 && !curr_inputs && !index) {
+ /* First input pad, assume it is "[in]" if not specified */
+ const char *tmp = "[in]";
+ if ((ret = parse_inputs(&tmp, &curr_inputs, &open_outputs, log_ctx)) < 0)
+ goto end;
+ }
+
+ if ((ret = link_filter_inouts(filter, &curr_inputs, &open_inputs, log_ctx)) < 0)
+ goto end;
+
+ if ((ret = parse_outputs(&filters, &curr_inputs, &open_inputs, &open_outputs,
+ log_ctx)) < 0)
+ goto end;
+
+ filters += strspn(filters, WHITESPACES);
+ chr = *filters++;
+
+ if (chr == ';' && curr_inputs) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Invalid filterchain containing an unlabelled output pad: \"%s\"\n",
+ filterchain);
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+ index++;
+ } while (chr == ',' || chr == ';');
+
+ if (chr) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Unable to parse graph description substring: \"%s\"\n",
+ filters - 1);
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+
+ if (curr_inputs) {
+ /* Last output pad, assume it is "[out]" if not specified */
+ const char *tmp = "[out]";
+ if ((ret = parse_outputs(&tmp, &curr_inputs, &open_inputs, &open_outputs,
+ log_ctx)) < 0)
+ goto end;
+ }
+
+end:
+ /* clear open_in/outputs only if not passed as parameters */
+ if (open_inputs_ptr) *open_inputs_ptr = open_inputs;
+ else avfilter_inout_free(&open_inputs);
+ if (open_outputs_ptr) *open_outputs_ptr = open_outputs;
+ else avfilter_inout_free(&open_outputs);
+ avfilter_inout_free(&curr_inputs);
+
+ if (ret < 0) {
+ while (graph->nb_filters)
+ avfilter_free(graph->filters[0]);
+ av_freep(&graph->filters);
+ }
+ return ret;
+}
diff --git a/libavfilter/hermite.h b/libavfilter/hermite.h
new file mode 100644
index 0000000000..fc1c0c613e
--- /dev/null
+++ b/libavfilter/hermite.h
@@ -0,0 +1,45 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_HERMITE_H
+#define AVFILTER_HERMITE_H
+
+static inline double hermite_interpolation(double x, double x0, double x1,
+ double p0, double p1,
+ double m0, double m1)
+{
+ double width = x1 - x0;
+ double t = (x - x0) / width;
+ double t2, t3;
+ double ct0, ct1, ct2, ct3;
+
+ m0 *= width;
+ m1 *= width;
+
+ t2 = t*t;
+ t3 = t2*t;
+ ct0 = p0;
+ ct1 = m0;
+
+ ct2 = -3 * p0 - 2 * m0 + 3 * p1 - m1;
+ ct3 = 2 * p0 + m0 - 2 * p1 + m1;
+
+ return ct3 * t3 + ct2 * t2 + ct1 * t + ct0;
+}
+
+#endif /* AVFILTER_HERMITE_H */
diff --git a/libavfilter/interlace.h b/libavfilter/interlace.h
index fa571bd6eb..da073aeba3 100644
--- a/libavfilter/interlace.h
+++ b/libavfilter/interlace.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or modify
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
@@ -49,7 +49,6 @@ typedef struct InterlaceContext {
enum ScanMode scan; // top or bottom field first scanning
int lowpass; // enable or disable low pass filtering
AVFrame *cur, *next; // the two frames from which the new one is obtained
- int got_output; // signal an output frame is ready to request_frame()
void (*lowpass_line)(uint8_t *dstp, ptrdiff_t linesize, const uint8_t *srcp,
const uint8_t *srcp_above, const uint8_t *srcp_below);
} InterlaceContext;
diff --git a/libavfilter/internal.h b/libavfilter/internal.h
index 202c2c018d..460d75eb83 100644
--- a/libavfilter/internal.h
+++ b/libavfilter/internal.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -26,8 +26,27 @@
#include "libavutil/internal.h"
#include "avfilter.h"
+#include "avfiltergraph.h"
+#include "formats.h"
+#include "framepool.h"
+#include "framequeue.h"
#include "thread.h"
#include "version.h"
+#include "video.h"
+#include "libavcodec/avcodec.h"
+
+typedef struct AVFilterCommand {
+ double time; ///< time expressed in seconds
+ char *command; ///< command
+ char *arg; ///< optional argument for the command
+ int flags;
+ struct AVFilterCommand *next;
+} AVFilterCommand;
+
+/**
+ * Update the position of a link in the age heap.
+ */
+void ff_avfilter_graph_update_heap(AVFilterGraph *graph, AVFilterLink *link);
/**
* A filter pad used for either input or output.
@@ -47,7 +66,7 @@ struct AVFilterPad {
/**
* Callback function to get a video buffer. If NULL, the filter system will
- * use avfilter_default_get_video_buffer().
+ * use ff_default_get_video_buffer().
*
* Input video pads only.
*/
@@ -55,7 +74,7 @@ struct AVFilterPad {
/**
* Callback function to get an audio buffer. If NULL, the filter system will
- * use avfilter_default_get_audio_buffer().
+ * use ff_default_get_audio_buffer().
*
* Input audio pads only.
*/
@@ -68,7 +87,7 @@ struct AVFilterPad {
* Input pads only.
*
* @return >= 0 on success, a negative AVERROR on error. This function
- * must ensure that samplesref is properly unreferenced on error if it
+ * must ensure that frame is properly unreferenced on error if it
* hasn't been passed on to another filter.
*/
int (*filter_frame)(AVFilterLink *link, AVFrame *frame);
@@ -85,9 +104,9 @@ struct AVFilterPad {
int (*poll_frame)(AVFilterLink *link);
/**
- * Frame request callback. A call to this should result in at least one
- * frame being output over the given link. This should return zero on
- * success, and another value on error.
+ * Frame request callback. A call to this should result in some progress
+ * towards producing output over the given link. This should return zero
+ * on success, and another value on error.
*
* Output pads only.
*/
@@ -129,18 +148,120 @@ struct AVFilterPad {
struct AVFilterGraphInternal {
void *thread;
avfilter_execute_func *thread_execute;
+ FFFrameQueueGlobal frame_queues;
};
struct AVFilterInternal {
avfilter_execute_func *execute;
};
-/** Tell is a format is contained in the provided list terminated by -1. */
+/**
+ * Tell if an integer is contained in the provided -1-terminated list of integers.
+ * This is useful for determining (for instance) if an AVPixelFormat is in an
+ * array of supported formats.
+ *
+ * @param fmt provided format
+ * @param fmts -1-terminated list of formats
+ * @return 1 if present, 0 if absent
+ */
int ff_fmt_is_in(int fmt, const int *fmts);
-#define FF_DPRINTF_START(ctx, func) av_log(NULL, AV_LOG_TRACE, "%-16s: ", #func)
+/* Functions to parse audio format arguments */
+
+/**
+ * Parse a pixel format.
+ *
+ * @param ret pixel format pointer to where the value should be written
+ * @param arg string to parse
+ * @param log_ctx log context
+ * @return >= 0 in case of success, a negative AVERROR code on error
+ */
+av_warn_unused_result
+int ff_parse_pixel_format(enum AVPixelFormat *ret, const char *arg, void *log_ctx);
+
+/**
+ * Parse a sample rate.
+ *
+ * @param ret unsigned integer pointer to where the value should be written
+ * @param arg string to parse
+ * @param log_ctx log context
+ * @return >= 0 in case of success, a negative AVERROR code on error
+ */
+av_warn_unused_result
+int ff_parse_sample_rate(int *ret, const char *arg, void *log_ctx);
+
+/**
+ * Parse a time base.
+ *
+ * @param ret unsigned AVRational pointer to where the value should be written
+ * @param arg string to parse
+ * @param log_ctx log context
+ * @return >= 0 in case of success, a negative AVERROR code on error
+ */
+av_warn_unused_result
+int ff_parse_time_base(AVRational *ret, const char *arg, void *log_ctx);
+
+/**
+ * Parse a sample format name or a corresponding integer representation.
+ *
+ * @param ret integer pointer to where the value should be written
+ * @param arg string to parse
+ * @param log_ctx log context
+ * @return >= 0 in case of success, a negative AVERROR code on error
+ */
+av_warn_unused_result
+int ff_parse_sample_format(int *ret, const char *arg, void *log_ctx);
+
+/**
+ * Parse a channel layout or a corresponding integer representation.
+ *
+ * @param ret 64bit integer pointer to where the value should be written.
+ * @param nret integer pointer to the number of channels;
+ * if not NULL, then unknown channel layouts are accepted
+ * @param arg string to parse
+ * @param log_ctx log context
+ * @return >= 0 in case of success, a negative AVERROR code on error
+ */
+av_warn_unused_result
+int ff_parse_channel_layout(int64_t *ret, int *nret, const char *arg,
+ void *log_ctx);
+
+void ff_update_link_current_pts(AVFilterLink *link, int64_t pts);
-void ff_dlog_link(void *ctx, AVFilterLink *link, int end);
+/**
+ * Set the status field of a link from the source filter.
+ * The pts should reflect the timestamp of the status change,
+ * in link time base and relative to the frames timeline.
+ * In particular, for AVERROR_EOF, it should reflect the
+ * end time of the last frame.
+ */
+void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts);
+
+/**
+ * Set the status field of a link from the destination filter.
+ * The pts should probably be left unset (AV_NOPTS_VALUE).
+ */
+void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts);
+
+void ff_command_queue_pop(AVFilterContext *filter);
+
+/* misc trace functions */
+
+/* #define FF_AVFILTER_TRACE */
+
+#ifdef FF_AVFILTER_TRACE
+# define ff_tlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__)
+#else
+# define ff_tlog(pctx, ...) do { if (0) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0)
+#endif
+
+#define FF_TPRINTF_START(ctx, func) ff_tlog(NULL, "%-16s: ", #func)
+
+char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms);
+
+void ff_tlog_ref(void *ctx, AVFrame *ref, int end);
+
+void ff_tlog_link(void *ctx, AVFilterLink *link, int end);
/**
* Insert a new pad.
@@ -154,24 +275,25 @@ void ff_dlog_link(void *ctx, AVFilterLink *link, int end);
* @param pads Pointer to the pointer to the beginning of the list of pads
* @param links Pointer to the pointer to the beginning of the list of links
* @param newpad The new pad to add. A copy is made when adding.
+ * @return >= 0 in case of success, a negative AVERROR code on error
*/
-void ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
+int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
AVFilterPad **pads, AVFilterLink ***links,
AVFilterPad *newpad);
/** Insert a new input pad for the filter. */
-static inline void ff_insert_inpad(AVFilterContext *f, unsigned index,
+static inline int ff_insert_inpad(AVFilterContext *f, unsigned index,
AVFilterPad *p)
{
- ff_insert_pad(index, &f->nb_inputs, offsetof(AVFilterLink, dstpad),
+ return ff_insert_pad(index, &f->nb_inputs, offsetof(AVFilterLink, dstpad),
&f->input_pads, &f->inputs, p);
}
/** Insert a new output pad for the filter. */
-static inline void ff_insert_outpad(AVFilterContext *f, unsigned index,
+static inline int ff_insert_outpad(AVFilterContext *f, unsigned index,
AVFilterPad *p)
{
- ff_insert_pad(index, &f->nb_outputs, offsetof(AVFilterLink, srcpad),
+ return ff_insert_pad(index, &f->nb_outputs, offsetof(AVFilterLink, srcpad),
&f->output_pads, &f->outputs, p);
}
@@ -187,11 +309,53 @@ int ff_poll_frame(AVFilterLink *link);
/**
* Request an input frame from the filter at the other end of the link.
*
+ * This function must not be used by filters using the activate callback,
+ * use ff_link_set_frame_wanted() instead.
+ *
+ * The input filter may pass the request on to its inputs, fulfill the
+ * request from an internal buffer or any other means specific to its function.
+ *
+ * When the end of a stream is reached AVERROR_EOF is returned and no further
+ * frames are returned after that.
+ *
+ * When a filter is unable to output a frame for example due to its sources
+ * being unable to do so or because it depends on external means pushing data
+ * into it then AVERROR(EAGAIN) is returned.
+ * It is important that a AVERROR(EAGAIN) return is returned all the way to the
+ * caller (generally eventually a user application) as this step may (but does
+ * not have to be) necessary to provide the input with the next frame.
+ *
+ * If a request is successful then some progress has been made towards
+ * providing a frame on the link (through ff_filter_frame()). A filter that
+ * needs several frames to produce one is allowed to return success if one
+ * more frame has been processed but no output has been produced yet. A
+ * filter is also allowed to simply forward a success return value.
+ *
* @param link the input link
* @return zero on success
+ * AVERROR_EOF on end of file
+ * AVERROR(EAGAIN) if the previous filter cannot output a frame
+ * currently and can neither guarantee that EOF has been reached.
*/
int ff_request_frame(AVFilterLink *link);
+#define AVFILTER_DEFINE_CLASS(fname) \
+ static const AVClass fname##_class = { \
+ .class_name = #fname, \
+ .item_name = av_default_item_name, \
+ .option = fname##_options, \
+ .version = LIBAVUTIL_VERSION_INT, \
+ .category = AV_CLASS_CATEGORY_FILTER, \
+ }
+
+/**
+ * Find the index of a link.
+ *
+ * I.e. find i such that link == ctx->(in|out)puts[i]
+ */
+#define FF_INLINK_IDX(link) ((int)((link)->dstpad - (link)->dst->input_pads))
+#define FF_OUTLINK_IDX(link) ((int)((link)->srcpad - (link)->src->output_pads))
+
/**
* Send a frame of data to the next filter.
*
@@ -215,9 +379,38 @@ int ff_filter_frame(AVFilterLink *link, AVFrame *frame);
*/
AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name);
+int ff_filter_activate(AVFilterContext *filter);
+
/**
* Remove a filter from a graph;
*/
void ff_filter_graph_remove_filter(AVFilterGraph *graph, AVFilterContext *filter);
+/**
+ * Run one round of processing on a filter graph.
+ */
+int ff_filter_graph_run_once(AVFilterGraph *graph);
+
+/**
+ * Normalize the qscale factor
+ * FIXME the H264 qscale is a log based scale, mpeg1/2 is not, the code below
+ * cannot be optimal
+ */
+static inline int ff_norm_qscale(int qscale, int type)
+{
+ switch (type) {
+ case FF_QSCALE_TYPE_MPEG1: return qscale;
+ case FF_QSCALE_TYPE_MPEG2: return qscale >> 1;
+ case FF_QSCALE_TYPE_H264: return qscale >> 2;
+ case FF_QSCALE_TYPE_VP56: return (63 - qscale + 2) >> 2;
+ }
+ return qscale;
+}
+
+/**
+ * Get number of threads for current filter instance.
+ * This number is always same or less than graph->nb_threads.
+ */
+int ff_filter_get_nb_threads(AVFilterContext *ctx);
+
#endif /* AVFILTER_INTERNAL_H */
diff --git a/libavfilter/lavfutils.c b/libavfilter/lavfutils.c
new file mode 100644
index 0000000000..706badf63d
--- /dev/null
+++ b/libavfilter/lavfutils.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2012 Stefano Sabatini <stefasab gmail com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "lavfutils.h"
+
+int ff_load_image(uint8_t *data[4], int linesize[4],
+ int *w, int *h, enum AVPixelFormat *pix_fmt,
+ const char *filename, void *log_ctx)
+{
+ AVInputFormat *iformat = NULL;
+ AVFormatContext *format_ctx = NULL;
+ AVCodec *codec;
+ AVCodecContext *codec_ctx;
+ AVFrame *frame;
+ int frame_decoded, ret = 0;
+ AVPacket pkt;
+ AVDictionary *opt=NULL;
+
+ av_init_packet(&pkt);
+
+ av_register_all();
+
+ iformat = av_find_input_format("image2");
+ if ((ret = avformat_open_input(&format_ctx, filename, iformat, NULL)) < 0) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Failed to open input file '%s'\n", filename);
+ return ret;
+ }
+
+ if ((ret = avformat_find_stream_info(format_ctx, NULL)) < 0) {
+ av_log(log_ctx, AV_LOG_ERROR, "Find stream info failed\n");
+ return ret;
+ }
+
+ codec_ctx = format_ctx->streams[0]->codec;
+ codec = avcodec_find_decoder(codec_ctx->codec_id);
+ if (!codec) {
+ av_log(log_ctx, AV_LOG_ERROR, "Failed to find codec\n");
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+
+ av_dict_set(&opt, "thread_type", "slice", 0);
+ if ((ret = avcodec_open2(codec_ctx, codec, &opt)) < 0) {
+ av_log(log_ctx, AV_LOG_ERROR, "Failed to open codec\n");
+ goto end;
+ }
+
+ if (!(frame = av_frame_alloc()) ) {
+ av_log(log_ctx, AV_LOG_ERROR, "Failed to alloc frame\n");
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ ret = av_read_frame(format_ctx, &pkt);
+ if (ret < 0) {
+ av_log(log_ctx, AV_LOG_ERROR, "Failed to read frame from file\n");
+ goto end;
+ }
+
+ ret = avcodec_decode_video2(codec_ctx, frame, &frame_decoded, &pkt);
+ if (ret < 0 || !frame_decoded) {
+ av_log(log_ctx, AV_LOG_ERROR, "Failed to decode image from file\n");
+ if (ret >= 0)
+ ret = -1;
+ goto end;
+ }
+
+ *w = frame->width;
+ *h = frame->height;
+ *pix_fmt = frame->format;
+
+ if ((ret = av_image_alloc(data, linesize, *w, *h, *pix_fmt, 16)) < 0)
+ goto end;
+ ret = 0;
+
+ av_image_copy(data, linesize, (const uint8_t **)frame->data, frame->linesize, *pix_fmt, *w, *h);
+
+end:
+ av_packet_unref(&pkt);
+ avcodec_close(codec_ctx);
+ avformat_close_input(&format_ctx);
+ av_frame_free(&frame);
+ av_dict_free(&opt);
+
+ if (ret < 0)
+ av_log(log_ctx, AV_LOG_ERROR, "Error loading image file '%s'\n", filename);
+ return ret;
+}
diff --git a/libavfilter/lavfutils.h b/libavfilter/lavfutils.h
new file mode 100644
index 0000000000..2d5308f79c
--- /dev/null
+++ b/libavfilter/lavfutils.h
@@ -0,0 +1,43 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Miscellaneous utilities which make use of the libavformat library
+ */
+
+#ifndef AVFILTER_LAVFUTILS_H
+#define AVFILTER_LAVFUTILS_H
+
+#include "libavformat/avformat.h"
+
+/**
+ * Load image from filename and put the resulting image in data.
+ *
+ * @param w pointer to the width of the loaded image
+ * @param h pointer to the height of the loaded image
+ * @param pix_fmt pointer to the pixel format of the loaded image
+ * @param filename the name of the image file to load
+ * @param log_ctx log context
+ * @return >= 0 in case of success, a negative error code otherwise.
+ */
+int ff_load_image(uint8_t *data[4], int linesize[4],
+ int *w, int *h, enum AVPixelFormat *pix_fmt,
+ const char *filename, void *log_ctx);
+
+#endif /* AVFILTER_LAVFUTILS_H */
diff --git a/libavfilter/log2_tab.c b/libavfilter/log2_tab.c
new file mode 100644
index 0000000000..47a1df03b7
--- /dev/null
+++ b/libavfilter/log2_tab.c
@@ -0,0 +1 @@
+#include "libavutil/log2_tab.c"
diff --git a/libavfilter/lswsutils.c b/libavfilter/lswsutils.c
new file mode 100644
index 0000000000..ebb4f93be0
--- /dev/null
+++ b/libavfilter/lswsutils.c
@@ -0,0 +1,50 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "lswsutils.h"
+
+int ff_scale_image(uint8_t *dst_data[4], int dst_linesize[4],
+ int dst_w, int dst_h, enum AVPixelFormat dst_pix_fmt,
+ uint8_t * const src_data[4], int src_linesize[4],
+ int src_w, int src_h, enum AVPixelFormat src_pix_fmt,
+ void *log_ctx)
+{
+ int ret;
+ struct SwsContext *sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
+ dst_w, dst_h, dst_pix_fmt,
+ 0, NULL, NULL, NULL);
+ if (!sws_ctx) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Impossible to create scale context for the conversion "
+ "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
+ av_get_pix_fmt_name(src_pix_fmt), src_w, src_h,
+ av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+
+ if ((ret = av_image_alloc(dst_data, dst_linesize, dst_w, dst_h, dst_pix_fmt, 16)) < 0)
+ goto end;
+ ret = 0;
+ sws_scale(sws_ctx, (const uint8_t * const*)src_data, src_linesize, 0, src_h, dst_data, dst_linesize);
+
+end:
+ sws_freeContext(sws_ctx);
+ return ret;
+}
diff --git a/libavfilter/lswsutils.h b/libavfilter/lswsutils.h
new file mode 100644
index 0000000000..f5f5320247
--- /dev/null
+++ b/libavfilter/lswsutils.h
@@ -0,0 +1,38 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Miscellaneous utilities which make use of the libswscale library
+ */
+
+#ifndef AVFILTER_LSWSUTILS_H
+#define AVFILTER_LSWSUTILS_H
+
+#include "libswscale/swscale.h"
+
+/**
+ * Scale image using libswscale.
+ */
+int ff_scale_image(uint8_t *dst_data[4], int dst_linesize[4],
+ int dst_w, int dst_h, enum AVPixelFormat dst_pix_fmt,
+ uint8_t *const src_data[4], int src_linesize[4],
+ int src_w, int src_h, enum AVPixelFormat src_pix_fmt,
+ void *log_ctx);
+
+#endif /* AVFILTER_LSWSUTILS_H */
diff --git a/libavfilter/maskedmerge.h b/libavfilter/maskedmerge.h
new file mode 100644
index 0000000000..a8c7551bad
--- /dev/null
+++ b/libavfilter/maskedmerge.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_MASKEDMERGE_H
+#define AVFILTER_MASKEDMERGE_H
+
+#include "avfilter.h"
+#include "framesync.h"
+
+typedef struct MaskedMergeContext {
+ const AVClass *class;
+ int width[4], height[4];
+ int nb_planes;
+ int planes;
+ int half, depth;
+ FFFrameSync fs;
+
+ void (*maskedmerge)(const uint8_t *bsrc, const uint8_t *osrc,
+ const uint8_t *msrc, uint8_t *dst,
+ ptrdiff_t blinesize, ptrdiff_t olinesize,
+ ptrdiff_t mlinesize, ptrdiff_t dlinesize,
+ int w, int h,
+ int half, int shift);
+} MaskedMergeContext;
+
+void ff_maskedmerge_init_x86(MaskedMergeContext *s);
+
+#endif /* AVFILTER_MASKEDMERGE_H */
diff --git a/libavfilter/motion_estimation.c b/libavfilter/motion_estimation.c
new file mode 100644
index 0000000000..0f9ba21784
--- /dev/null
+++ b/libavfilter/motion_estimation.c
@@ -0,0 +1,432 @@
+/**
+ * Copyright (c) 2016 Davinder Singh (DSM_) <ds.mudhar<@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "motion_estimation.h"
+
+static const int8_t sqr1[8][2] = {{ 0,-1}, { 0, 1}, {-1, 0}, { 1, 0}, {-1,-1}, {-1, 1}, { 1,-1}, { 1, 1}};
+static const int8_t dia1[4][2] = {{-1, 0}, { 0,-1}, { 1, 0}, { 0, 1}};
+static const int8_t dia2[8][2] = {{-2, 0}, {-1,-1}, { 0,-2}, { 1,-1}, { 2, 0}, { 1, 1}, { 0, 2}, {-1, 1}};
+static const int8_t hex2[6][2] = {{-2, 0}, {-1,-2}, {-1, 2}, { 1,-2}, { 1, 2}, { 2, 0}};
+static const int8_t hex4[16][2] = {{-4,-2}, {-4,-1}, {-4, 0}, {-4, 1}, {-4, 2},
+ { 4,-2}, { 4,-1}, { 4, 0}, { 4, 1}, { 4, 2},
+ {-2, 3}, { 0, 4}, { 2, 3}, {-2,-3}, { 0,-4}, { 2,-3}};
+
+#define COST_MV(x, y)\
+do {\
+ cost = me_ctx->get_cost(me_ctx, x_mb, y_mb, x, y);\
+ if (cost < cost_min) {\
+ cost_min = cost;\
+ mv[0] = x;\
+ mv[1] = y;\
+ }\
+} while(0)
+
+#define COST_P_MV(x, y)\
+if (x >= x_min && x <= x_max && y >= y_min && y <= y_max)\
+ COST_MV(x, y);
+
+void ff_me_init_context(AVMotionEstContext *me_ctx, int mb_size, int search_param,
+ int width, int height, int x_min, int x_max, int y_min, int y_max)
+{
+ me_ctx->width = width;
+ me_ctx->height = height;
+ me_ctx->mb_size = mb_size;
+ me_ctx->search_param = search_param;
+ me_ctx->get_cost = &ff_me_cmp_sad;
+ me_ctx->x_min = x_min;
+ me_ctx->x_max = x_max;
+ me_ctx->y_min = y_min;
+ me_ctx->y_max = y_max;
+}
+
+uint64_t ff_me_cmp_sad(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int x_mv, int y_mv)
+{
+ const int linesize = me_ctx->linesize;
+ uint8_t *data_ref = me_ctx->data_ref;
+ uint8_t *data_cur = me_ctx->data_cur;
+ uint64_t sad = 0;
+ int i, j;
+
+ data_ref += y_mv * linesize;
+ data_cur += y_mb * linesize;
+
+ for (j = 0; j < me_ctx->mb_size; j++)
+ for (i = 0; i < me_ctx->mb_size; i++)
+ sad += FFABS(data_ref[x_mv + i + j * linesize] - data_cur[x_mb + i + j * linesize]);
+
+ return sad;
+}
+
+uint64_t ff_me_search_esa(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
+{
+ int x, y;
+ int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
+ int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
+ int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
+ int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
+ uint64_t cost, cost_min;
+
+ if (!(cost_min = me_ctx->get_cost(me_ctx, x_mb, y_mb, x_mb, y_mb)))
+ return cost_min;
+
+ for (y = y_min; y <= y_max; y++)
+ for (x = x_min; x <= x_max; x++)
+ COST_MV(x, y);
+
+ return cost_min;
+}
+
+uint64_t ff_me_search_tss(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
+{
+ int x, y;
+ int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
+ int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
+ int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
+ int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
+ uint64_t cost, cost_min;
+ int step = ROUNDED_DIV(me_ctx->search_param, 2);
+ int i;
+
+ mv[0] = x_mb;
+ mv[1] = y_mb;
+
+ if (!(cost_min = me_ctx->get_cost(me_ctx, x_mb, y_mb, x_mb, y_mb)))
+ return cost_min;
+
+ do {
+ x = mv[0];
+ y = mv[1];
+
+ for (i = 0; i < 8; i++)
+ COST_P_MV(x + sqr1[i][0] * step, y + sqr1[i][1] * step);
+
+ step = step >> 1;
+
+ } while (step > 0);
+
+ return cost_min;
+}
+
+uint64_t ff_me_search_tdls(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
+{
+ int x, y;
+ int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
+ int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
+ int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
+ int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
+ uint64_t cost, cost_min;
+ int step = ROUNDED_DIV(me_ctx->search_param, 2);
+ int i;
+
+ mv[0] = x_mb;
+ mv[1] = y_mb;
+
+ if (!(cost_min = me_ctx->get_cost(me_ctx, x_mb, y_mb, x_mb, y_mb)))
+ return cost_min;
+
+ do {
+ x = mv[0];
+ y = mv[1];
+
+ for (i = 0; i < 4; i++)
+ COST_P_MV(x + dia1[i][0] * step, y + dia1[i][1] * step);
+
+ if (x == mv[0] && y == mv[1])
+ step = step >> 1;
+
+ } while (step > 0);
+
+ return cost_min;
+}
+
+uint64_t ff_me_search_ntss(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
+{
+ int x, y;
+ int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
+ int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
+ int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
+ int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
+ uint64_t cost, cost_min;
+ int step = ROUNDED_DIV(me_ctx->search_param, 2);
+ int first_step = 1;
+ int i;
+
+ mv[0] = x_mb;
+ mv[1] = y_mb;
+
+ if (!(cost_min = me_ctx->get_cost(me_ctx, x_mb, y_mb, x_mb, y_mb)))
+ return cost_min;
+
+ do {
+ x = mv[0];
+ y = mv[1];
+
+ for (i = 0; i < 8; i++)
+ COST_P_MV(x + sqr1[i][0] * step, y + sqr1[i][1] * step);
+
+ /* addition to TSS in NTSS */
+ if (first_step) {
+
+ for (i = 0; i < 8; i++)
+ COST_P_MV(x + sqr1[i][0], y + sqr1[i][1]);
+
+ if (x == mv[0] && y == mv[1])
+ return cost_min;
+
+ if (FFABS(x - mv[0]) <= 1 && FFABS(y - mv[1]) <= 1) {
+ x = mv[0];
+ y = mv[1];
+
+ for (i = 0; i < 8; i++)
+ COST_P_MV(x + sqr1[i][0], y + sqr1[i][1]);
+ return cost_min;
+ }
+
+ first_step = 0;
+ }
+
+ step = step >> 1;
+
+ } while (step > 0);
+
+ return cost_min;
+}
+
+uint64_t ff_me_search_fss(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
+{
+ int x, y;
+ int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
+ int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
+ int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
+ int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
+ uint64_t cost, cost_min;
+ int step = 2;
+ int i;
+
+ mv[0] = x_mb;
+ mv[1] = y_mb;
+
+ if (!(cost_min = me_ctx->get_cost(me_ctx, x_mb, y_mb, x_mb, y_mb)))
+ return cost_min;
+
+ do {
+ x = mv[0];
+ y = mv[1];
+
+ for (i = 0; i < 8; i++)
+ COST_P_MV(x + sqr1[i][0] * step, y + sqr1[i][1] * step);
+
+ if (x == mv[0] && y == mv[1])
+ step = step >> 1;
+
+ } while (step > 0);
+
+ return cost_min;
+}
+
+uint64_t ff_me_search_ds(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
+{
+ int x, y;
+ int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
+ int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
+ int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
+ int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
+ uint64_t cost, cost_min;
+ int i;
+ av_unused int dir_x, dir_y;
+
+ if (!(cost_min = me_ctx->get_cost(me_ctx, x_mb, y_mb, x_mb, y_mb)))
+ return cost_min;
+
+ x = x_mb; y = y_mb;
+ dir_x = dir_y = 0;
+
+ do {
+ x = mv[0];
+ y = mv[1];
+
+#if 1
+ for (i = 0; i < 8; i++)
+ COST_P_MV(x + dia2[i][0], y + dia2[i][1]);
+#else
+ /* this version skips previously examined 3 or 5 locations based on prev origin */
+ if (dir_x <= 0)
+ COST_P_MV(x - 2, y);
+ if (dir_x <= 0 && dir_y <= 0)
+ COST_P_MV(x - 1, y - 1);
+ if (dir_y <= 0)
+ COST_P_MV(x, y - 2);
+ if (dir_x >= 0 && dir_y <= 0)
+ COST_P_MV(x + 1, y - 1);
+ if (dir_x >= 0)
+ COST_P_MV(x + 2, y);
+ if (dir_x >= 0 && dir_y >= 0)
+ COST_P_MV(x + 1, y + 1);
+ if (dir_y >= 0)
+ COST_P_MV(x, y + 2);
+ if (dir_x <= 0 && dir_y >= 0)
+ COST_P_MV(x - 1, y + 1);
+
+ dir_x = mv[0] - x;
+ dir_y = mv[1] - y;
+#endif
+
+ } while (x != mv[0] || y != mv[1]);
+
+ for (i = 0; i < 4; i++)
+ COST_P_MV(x + dia1[i][0], y + dia1[i][1]);
+
+ return cost_min;
+}
+
+uint64_t ff_me_search_hexbs(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
+{
+ int x, y;
+ int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
+ int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
+ int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
+ int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
+ uint64_t cost, cost_min;
+ int i;
+
+ if (!(cost_min = me_ctx->get_cost(me_ctx, x_mb, y_mb, x_mb, y_mb)))
+ return cost_min;
+
+ do {
+ x = mv[0];
+ y = mv[1];
+
+ for (i = 0; i < 6; i++)
+ COST_P_MV(x + hex2[i][0], y + hex2[i][1]);
+
+ } while (x != mv[0] || y != mv[1]);
+
+ for (i = 0; i < 4; i++)
+ COST_P_MV(x + dia1[i][0], y + dia1[i][1]);
+
+ return cost_min;
+}
+
+/* two subsets of predictors are used
+ me->pred_x|y is set to median of current frame's left, top, top-right
+ set 1: me->preds[0] has: (0, 0), left, top, top-right, collocated block in prev frame
+ set 2: me->preds[1] has: accelerator mv, top, left, right, bottom adj mb of prev frame
+*/
+uint64_t ff_me_search_epzs(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
+{
+ int x, y;
+ int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
+ int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
+ int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
+ int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
+ uint64_t cost, cost_min;
+ int i;
+
+ AVMotionEstPredictor *preds = me_ctx->preds;
+
+ cost_min = UINT64_MAX;
+
+ COST_P_MV(x_mb + me_ctx->pred_x, y_mb + me_ctx->pred_y);
+
+ for (i = 0; i < preds[0].nb; i++)
+ COST_P_MV(x_mb + preds[0].mvs[i][0], y_mb + preds[0].mvs[i][1]);
+
+ for (i = 0; i < preds[1].nb; i++)
+ COST_P_MV(x_mb + preds[1].mvs[i][0], y_mb + preds[1].mvs[i][1]);
+
+ do {
+ x = mv[0];
+ y = mv[1];
+
+ for (i = 0; i < 4; i++)
+ COST_P_MV(x + dia1[i][0], y + dia1[i][1]);
+
+ } while (x != mv[0] || y != mv[1]);
+
+ return cost_min;
+}
+
+/* required predictor order: median, (0,0), left, top, top-right
+ rules when mb not available:
+ replace left with (0, 0)
+ replace top-right with top-left
+ replace top two with left
+ repeated can be skipped, if no predictors are used, set me_ctx->pred to (0,0)
+*/
+uint64_t ff_me_search_umh(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
+{
+ int x, y;
+ int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
+ int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
+ int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
+ int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
+ uint64_t cost, cost_min;
+ int d, i;
+ int end_x, end_y;
+
+ AVMotionEstPredictor *pred = &me_ctx->preds[0];
+
+ cost_min = UINT64_MAX;
+
+ COST_P_MV(x_mb + me_ctx->pred_x, y_mb + me_ctx->pred_y);
+
+ for (i = 0; i < pred->nb; i++)
+ COST_P_MV(x_mb + pred->mvs[i][0], y_mb + pred->mvs[i][1]);
+
+ // Unsymmetrical-cross Search
+ x = mv[0];
+ y = mv[1];
+ for (d = 1; d <= me_ctx->search_param; d += 2) {
+ COST_P_MV(x - d, y);
+ COST_P_MV(x + d, y);
+ if (d <= me_ctx->search_param / 2) {
+ COST_P_MV(x, y - d);
+ COST_P_MV(x, y + d);
+ }
+ }
+
+ // Uneven Multi-Hexagon-Grid Search
+ end_x = FFMIN(mv[0] + 2, x_max);
+ end_y = FFMIN(mv[1] + 2, y_max);
+ for (y = FFMAX(y_min, mv[1] - 2); y <= end_y; y++)
+ for (x = FFMAX(x_min, mv[0] - 2); x <= end_x; x++)
+ COST_P_MV(x, y);
+
+ x = mv[0];
+ y = mv[1];
+ for (d = 1; d <= me_ctx->search_param / 4; d++)
+ for (i = 1; i < 16; i++)
+ COST_P_MV(x + hex4[i][0] * d, y + hex4[i][1] * d);
+
+ // Extended Hexagon-based Search
+ do {
+ x = mv[0];
+ y = mv[1];
+
+ for (i = 0; i < 6; i++)
+ COST_P_MV(x + hex2[i][0], y + hex2[i][1]);
+
+ } while (x != mv[0] || y != mv[1]);
+
+ for (i = 0; i < 4; i++)
+ COST_P_MV(x + dia1[i][0], y + dia1[i][1]);
+
+ return cost_min;
+}
diff --git a/libavfilter/motion_estimation.h b/libavfilter/motion_estimation.h
new file mode 100644
index 0000000000..6ae29dd9e3
--- /dev/null
+++ b/libavfilter/motion_estimation.h
@@ -0,0 +1,87 @@
+/**
+ * Copyright (c) 2016 Davinder Singh (DSM_) <ds.mudhar<@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_MOTION_ESTIMATION_H
+#define AVFILTER_MOTION_ESTIMATION_H
+
+#include "libavutil/avutil.h"
+
+#define AV_ME_METHOD_ESA 1
+#define AV_ME_METHOD_TSS 2
+#define AV_ME_METHOD_TDLS 3
+#define AV_ME_METHOD_NTSS 4
+#define AV_ME_METHOD_FSS 5
+#define AV_ME_METHOD_DS 6
+#define AV_ME_METHOD_HEXBS 7
+#define AV_ME_METHOD_EPZS 8
+#define AV_ME_METHOD_UMH 9
+
+typedef struct AVMotionEstPredictor {
+ int mvs[10][2];
+ int nb;
+} AVMotionEstPredictor;
+
+typedef struct AVMotionEstContext {
+ uint8_t *data_cur, *data_ref;
+ int linesize;
+
+ int mb_size;
+ int search_param;
+
+ int width;
+ int height;
+
+ int x_min;
+ int x_max;
+ int y_min;
+ int y_max;
+
+ int pred_x; ///< median predictor x
+ int pred_y; ///< median predictor y
+ AVMotionEstPredictor preds[2];
+
+ uint64_t (*get_cost)(struct AVMotionEstContext *me_ctx, int x_mb, int y_mb,
+ int mv_x, int mv_y);
+} AVMotionEstContext;
+
+void ff_me_init_context(AVMotionEstContext *me_ctx, int mb_size, int search_param,
+ int width, int height, int x_min, int x_max, int y_min, int y_max);
+
+uint64_t ff_me_cmp_sad(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int x_mv, int y_mv);
+
+uint64_t ff_me_search_esa(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
+
+uint64_t ff_me_search_tss(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
+
+uint64_t ff_me_search_tdls(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
+
+uint64_t ff_me_search_ntss(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
+
+uint64_t ff_me_search_fss(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
+
+uint64_t ff_me_search_ds(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
+
+uint64_t ff_me_search_hexbs(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
+
+uint64_t ff_me_search_epzs(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
+
+uint64_t ff_me_search_umh(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
+
+#endif /* AVFILTER_MOTION_ESTIMATION_H */
diff --git a/libavfilter/opencl_allkernels.c b/libavfilter/opencl_allkernels.c
new file mode 100644
index 0000000000..6d80fa8598
--- /dev/null
+++ b/libavfilter/opencl_allkernels.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "opencl_allkernels.h"
+#if CONFIG_OPENCL
+#include "libavutil/opencl.h"
+#include "deshake_opencl_kernel.h"
+#include "unsharp_opencl_kernel.h"
+#endif
+
+#define OPENCL_REGISTER_KERNEL_CODE(X, x) \
+ { \
+ if (CONFIG_##X##_FILTER) { \
+ av_opencl_register_kernel_code(ff_kernel_##x##_opencl); \
+ } \
+ }
+
+void ff_opencl_register_filter_kernel_code_all(void)
+{
+ #if CONFIG_OPENCL
+ OPENCL_REGISTER_KERNEL_CODE(DESHAKE, deshake);
+ OPENCL_REGISTER_KERNEL_CODE(UNSHARP, unsharp);
+ #endif
+}
diff --git a/libavfilter/opencl_allkernels.h b/libavfilter/opencl_allkernels.h
new file mode 100644
index 0000000000..57b650d27b
--- /dev/null
+++ b/libavfilter/opencl_allkernels.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_OPENCL_ALLKERNELS_H
+#define AVFILTER_OPENCL_ALLKERNELS_H
+
+#include "avfilter.h"
+#include "config.h"
+
+void ff_opencl_register_filter_kernel_code_all(void);
+
+#endif /* AVFILTER_OPENCL_ALLKERNELS_H */
diff --git a/libavfilter/psnr.h b/libavfilter/psnr.h
new file mode 100644
index 0000000000..bbc4541135
--- /dev/null
+++ b/libavfilter/psnr.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2015 Ronald S. Bultje <rsbultje@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_PSNR_H
+#define AVFILTER_PSNR_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+typedef struct PSNRDSPContext {
+ uint64_t (*sse_line)(const uint8_t *buf, const uint8_t *ref, int w);
+} PSNRDSPContext;
+
+void ff_psnr_init_x86(PSNRDSPContext *dsp, int bpp);
+
+#endif /* AVFILTER_PSNR_H */
diff --git a/libavfilter/pthread.c b/libavfilter/pthread.c
index 2ebcc9753a..ccb915eae5 100644
--- a/libavfilter/pthread.c
+++ b/libavfilter/pthread.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -26,17 +26,12 @@
#include "libavutil/common.h"
#include "libavutil/cpu.h"
#include "libavutil/mem.h"
+#include "libavutil/thread.h"
#include "avfilter.h"
#include "internal.h"
#include "thread.h"
-#if HAVE_PTHREADS
-#include <pthread.h>
-#elif HAVE_W32THREADS
-#include "compat/w32pthreads.h"
-#endif
-
typedef struct ThreadContext {
AVFilterGraph *graph;
@@ -156,7 +151,6 @@ static int thread_init_internal(ThreadContext *c, int nb_threads)
if (!nb_threads) {
int nb_cpus = av_cpu_count();
- av_log(c->graph, AV_LOG_DEBUG, "Detected %d logical cores.\n", nb_cpus);
// use number of cores + 1 as thread count if there is more than one
if (nb_cpus > 1)
nb_threads = nb_cpus + 1;
@@ -168,7 +162,7 @@ static int thread_init_internal(ThreadContext *c, int nb_threads)
return 1;
c->nb_threads = nb_threads;
- c->workers = av_mallocz(sizeof(*c->workers) * nb_threads);
+ c->workers = av_mallocz_array(sizeof(*c->workers), nb_threads);
if (!c->workers)
return AVERROR(ENOMEM);
diff --git a/libavfilter/removegrain.h b/libavfilter/removegrain.h
new file mode 100644
index 0000000000..f3f102889f
--- /dev/null
+++ b/libavfilter/removegrain.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ * Copyright (c) 2015 James Darnley
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_REMOVEGRAIN_H
+#define AVFILTER_REMOVEGRAIN_H
+
+#include "avfilter.h"
+
+typedef struct RemoveGrainContext {
+ const AVClass *class;
+
+ int mode[4];
+
+ int nb_planes;
+ int planewidth[4];
+ int planeheight[4];
+ int skip_even;
+ int skip_odd;
+
+ int (*rg[4])(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8);
+
+ void (*fl[4])(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+} RemoveGrainContext;
+
+void ff_removegrain_init_x86(RemoveGrainContext *rg);
+
+#endif /* AVFILTER_REMOVEGRAIN_H */
diff --git a/libavfilter/scale.c b/libavfilter/scale.c
new file mode 100644
index 0000000000..50cd442849
--- /dev/null
+++ b/libavfilter/scale.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2007 Bobby Bingham
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include "scale.h"
+#include "libavutil/eval.h"
+#include "libavutil/mathematics.h"
+#include "libavutil/pixdesc.h"
+
+static const char *const var_names[] = {
+ "PI",
+ "PHI",
+ "E",
+ "in_w", "iw",
+ "in_h", "ih",
+ "out_w", "ow",
+ "out_h", "oh",
+ "a",
+ "sar",
+ "dar",
+ "hsub",
+ "vsub",
+ "ohsub",
+ "ovsub",
+ NULL
+};
+
+enum var_name {
+ VAR_PI,
+ VAR_PHI,
+ VAR_E,
+ VAR_IN_W, VAR_IW,
+ VAR_IN_H, VAR_IH,
+ VAR_OUT_W, VAR_OW,
+ VAR_OUT_H, VAR_OH,
+ VAR_A,
+ VAR_SAR,
+ VAR_DAR,
+ VAR_HSUB,
+ VAR_VSUB,
+ VAR_OHSUB,
+ VAR_OVSUB,
+ VARS_NB
+};
+
+int ff_scale_eval_dimensions(void *log_ctx,
+ const char *w_expr, const char *h_expr,
+ AVFilterLink *inlink, AVFilterLink *outlink,
+ int *ret_w, int *ret_h)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format);
+ const char *expr;
+ int w, h;
+ int factor_w, factor_h;
+ int eval_w, eval_h;
+ int ret;
+ double var_values[VARS_NB], res;
+
+ var_values[VAR_PI] = M_PI;
+ var_values[VAR_PHI] = M_PHI;
+ var_values[VAR_E] = M_E;
+ var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
+ var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
+ var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
+ var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
+ var_values[VAR_A] = (double) inlink->w / inlink->h;
+ var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
+ (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
+ var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
+ var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
+ var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
+ var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w;
+ var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h;
+
+ /* evaluate width and height */
+ av_expr_parse_and_eval(&res, (expr = w_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, log_ctx);
+ eval_w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
+
+ if ((ret = av_expr_parse_and_eval(&res, (expr = h_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, log_ctx)) < 0)
+ goto fail;
+ eval_h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
+ /* evaluate again the width, as it may depend on the output height */
+ if ((ret = av_expr_parse_and_eval(&res, (expr = w_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, log_ctx)) < 0)
+ goto fail;
+ eval_w = res;
+
+ w = eval_w;
+ h = eval_h;
+
+ /* Check if it is requested that the result has to be divisible by a some
+ * factor (w or h = -n with n being the factor). */
+ factor_w = 1;
+ factor_h = 1;
+ if (w < -1) {
+ factor_w = -w;
+ }
+ if (h < -1) {
+ factor_h = -h;
+ }
+
+ if (w < 0 && h < 0)
+ eval_w = eval_h = 0;
+
+ if (!(w = eval_w))
+ w = inlink->w;
+ if (!(h = eval_h))
+ h = inlink->h;
+
+ /* Make sure that the result is divisible by the factor we determined
+ * earlier. If no factor was set, it is nothing will happen as the default
+ * factor is 1 */
+ if (w < 0)
+ w = av_rescale(h, inlink->w, inlink->h * factor_w) * factor_w;
+ if (h < 0)
+ h = av_rescale(w, inlink->h, inlink->w * factor_h) * factor_h;
+
+ *ret_w = w;
+ *ret_h = h;
+
+ return 0;
+
+fail:
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Error when evaluating the expression '%s'.\n"
+ "Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\n",
+ expr, w_expr, h_expr);
+ return ret;
+}
diff --git a/libavfilter/scale.h b/libavfilter/scale.h
new file mode 100644
index 0000000000..dfe67d0be0
--- /dev/null
+++ b/libavfilter/scale.h
@@ -0,0 +1,28 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_SCALE_H
+#define AVFILTER_SCALE_H
+
+#include "avfilter.h"
+
+int ff_scale_eval_dimensions(void *ctx,
+ const char *w_expr, const char *h_expr,
+ AVFilterLink *inlink, AVFilterLink *outlink,
+ int *ret_w, int *ret_h);
+#endif
diff --git a/libavfilter/setpts.c b/libavfilter/setpts.c
index 1c262b73e7..2ccca28e9a 100644
--- a/libavfilter/setpts.c
+++ b/libavfilter/setpts.c
@@ -2,20 +2,20 @@
* Copyright (c) 2010 Stefano Sabatini
* Copyright (c) 2008 Victor Paesa
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -31,25 +31,27 @@
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
-
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
-#include "config.h"
-
static const char *const var_names[] = {
- "E", ///< Euler number
"FRAME_RATE", ///< defined only for constant frame-rate video
"INTERLACED", ///< tell if the current frame is interlaced
"N", ///< frame / sample number (starting at zero)
- "PHI", ///< golden ratio
- "PI", ///< Greek pi
+ "NB_CONSUMED_SAMPLES", ///< number of samples consumed by the filter (only audio)
+ "NB_SAMPLES", ///< number of samples in the current frame (only audio)
+ "POS", ///< original position in the file of the frame
"PREV_INPTS", ///< previous input PTS
+ "PREV_INT", ///< previous input time in seconds
"PREV_OUTPTS", ///< previous output PTS
+ "PREV_OUTT", ///< previous output time in seconds
"PTS", ///< original pts in the file of the frame
+ "SAMPLE_RATE", ///< sample rate (only audio)
"STARTPTS", ///< PTS at start of movie
+ "STARTT", ///< time at start of movie
+ "T", ///< original time in the file of the frame
"TB", ///< timebase
"RTCTIME", ///< wallclock (RTC) time in micro seconds
"RTCSTART", ///< wallclock (RTC) time at the start of the movie in micro seconds
@@ -59,16 +61,21 @@ static const char *const var_names[] = {
};
enum var_name {
- VAR_E,
VAR_FRAME_RATE,
VAR_INTERLACED,
VAR_N,
- VAR_PHI,
- VAR_PI,
+ VAR_NB_CONSUMED_SAMPLES,
+ VAR_NB_SAMPLES,
+ VAR_POS,
VAR_PREV_INPTS,
+ VAR_PREV_INT,
VAR_PREV_OUTPTS,
+ VAR_PREV_OUTT,
VAR_PTS,
+ VAR_SAMPLE_RATE,
VAR_STARTPTS,
+ VAR_STARTT,
+ VAR_T,
VAR_TB,
VAR_RTCTIME,
VAR_RTCSTART,
@@ -82,6 +89,7 @@ typedef struct SetPTSContext {
char *expr_str;
AVExpr *expr;
double var_values[VAR_VARS_NB];
+ enum AVMediaType type;
} SetPTSContext;
static av_cold int init(AVFilterContext *ctx)
@@ -95,41 +103,55 @@ static av_cold int init(AVFilterContext *ctx)
return ret;
}
- setpts->var_values[VAR_E] = M_E;
setpts->var_values[VAR_N] = 0.0;
setpts->var_values[VAR_S] = 0.0;
- setpts->var_values[VAR_PHI] = M_PHI;
- setpts->var_values[VAR_PI] = M_PI;
setpts->var_values[VAR_PREV_INPTS] = NAN;
+ setpts->var_values[VAR_PREV_INT] = NAN;
setpts->var_values[VAR_PREV_OUTPTS] = NAN;
+ setpts->var_values[VAR_PREV_OUTT] = NAN;
setpts->var_values[VAR_STARTPTS] = NAN;
+ setpts->var_values[VAR_STARTT] = NAN;
return 0;
}
static int config_input(AVFilterLink *inlink)
{
- SetPTSContext *setpts = inlink->dst->priv;
+ AVFilterContext *ctx = inlink->dst;
+ SetPTSContext *setpts = ctx->priv;
+ setpts->type = inlink->type;
setpts->var_values[VAR_TB] = av_q2d(inlink->time_base);
setpts->var_values[VAR_RTCSTART] = av_gettime();
- if (inlink->type == AVMEDIA_TYPE_AUDIO) {
- setpts->var_values[VAR_SR] = inlink->sample_rate;
- }
+ setpts->var_values[VAR_SR] =
+ setpts->var_values[VAR_SAMPLE_RATE] =
+ setpts->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
setpts->var_values[VAR_FRAME_RATE] = inlink->frame_rate.num &&
inlink->frame_rate.den ?
av_q2d(inlink->frame_rate) : NAN;
- // Indicate the output can be variable framerate.
- inlink->frame_rate = (AVRational){1, 0};
-
- av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f\n", setpts->var_values[VAR_TB]);
+ av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f FRAME_RATE:%f SAMPLE_RATE:%f\n",
+ setpts->var_values[VAR_TB],
+ setpts->var_values[VAR_FRAME_RATE],
+ setpts->var_values[VAR_SAMPLE_RATE]);
return 0;
}
#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
+#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
+
+#define BUF_SIZE 64
+
+static inline char *double2int64str(char *buf, double v)
+{
+ if (isnan(v)) snprintf(buf, BUF_SIZE, "nan");
+ else snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)v);
+ return buf;
+}
+
+#define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
@@ -137,27 +159,43 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
int64_t in_pts = frame->pts;
double d;
- if (isnan(setpts->var_values[VAR_STARTPTS]))
+ if (isnan(setpts->var_values[VAR_STARTPTS])) {
setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts);
-
+ setpts->var_values[VAR_STARTT ] = TS2T(frame->pts, inlink->time_base);
+ }
setpts->var_values[VAR_PTS ] = TS2D(frame->pts);
+ setpts->var_values[VAR_T ] = TS2T(frame->pts, inlink->time_base);
+ setpts->var_values[VAR_POS ] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
setpts->var_values[VAR_RTCTIME ] = av_gettime();
if (inlink->type == AVMEDIA_TYPE_VIDEO) {
setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame;
- } else {
+ } else if (inlink->type == AVMEDIA_TYPE_AUDIO) {
setpts->var_values[VAR_S] = frame->nb_samples;
+ setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples;
}
d = av_expr_eval(setpts->expr, setpts->var_values, NULL);
frame->pts = D2TS(d);
av_log(inlink->dst, AV_LOG_TRACE,
- "n:%"PRId64" interlaced:%d pts:%"PRId64" t:%f -> pts:%"PRId64" t:%f\n",
+ "N:%"PRId64" PTS:%s T:%f POS:%s",
(int64_t)setpts->var_values[VAR_N],
- (int)setpts->var_values[VAR_INTERLACED],
- in_pts, in_pts * av_q2d(inlink->time_base),
- frame->pts, frame->pts * av_q2d(inlink->time_base));
+ d2istr(setpts->var_values[VAR_PTS]),
+ setpts->var_values[VAR_T],
+ d2istr(setpts->var_values[VAR_POS]));
+ switch (inlink->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ av_log(inlink->dst, AV_LOG_TRACE, " INTERLACED:%"PRId64,
+ (int64_t)setpts->var_values[VAR_INTERLACED]);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ av_log(inlink->dst, AV_LOG_TRACE, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64,
+ (int64_t)setpts->var_values[VAR_NB_SAMPLES],
+ (int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]);
+ break;
+ }
+ av_log(inlink->dst, AV_LOG_TRACE, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base));
if (inlink->type == AVMEDIA_TYPE_VIDEO) {
setpts->var_values[VAR_N] += 1.0;
@@ -166,7 +204,12 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
}
setpts->var_values[VAR_PREV_INPTS ] = TS2D(in_pts);
+ setpts->var_values[VAR_PREV_INT ] = TS2T(in_pts, inlink->time_base);
setpts->var_values[VAR_PREV_OUTPTS] = TS2D(frame->pts);
+ setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base);
+ if (setpts->type == AVMEDIA_TYPE_AUDIO) {
+ setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->nb_samples;
+ }
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
@@ -178,27 +221,22 @@ static av_cold void uninit(AVFilterContext *ctx)
}
#define OFFSET(x) offsetof(SetPTSContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption options[] = {
{ "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = FLAGS },
- { NULL },
+ { NULL }
};
#if CONFIG_SETPTS_FILTER
-static const AVClass setpts_class = {
- .class_name = "setpts",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+#define setpts_options options
+AVFILTER_DEFINE_CLASS(setpts);
static const AVFilterPad avfilter_vf_setpts_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .config_props = config_input,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -223,23 +261,19 @@ AVFilter ff_vf_setpts = {
.inputs = avfilter_vf_setpts_inputs,
.outputs = avfilter_vf_setpts_outputs,
};
-#endif
+#endif /* CONFIG_SETPTS_FILTER */
#if CONFIG_ASETPTS_FILTER
-static const AVClass asetpts_class = {
- .class_name = "asetpts",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+
+#define asetpts_options options
+AVFILTER_DEFINE_CLASS(asetpts);
static const AVFilterPad asetpts_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
- .config_props = config_input,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -257,11 +291,9 @@ AVFilter ff_af_asetpts = {
.description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio frame."),
.init = init,
.uninit = uninit,
-
- .priv_size = sizeof(SetPTSContext),
- .priv_class = &asetpts_class,
-
- .inputs = asetpts_inputs,
- .outputs = asetpts_outputs,
+ .priv_size = sizeof(SetPTSContext),
+ .priv_class = &asetpts_class,
+ .inputs = asetpts_inputs,
+ .outputs = asetpts_outputs,
};
-#endif
+#endif /* CONFIG_ASETPTS_FILTER */
diff --git a/libavfilter/settb.c b/libavfilter/settb.c
index 169037f321..83616c1361 100644
--- a/libavfilter/settb.c
+++ b/libavfilter/settb.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2010 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -38,9 +38,6 @@
#include "video.h"
static const char *const var_names[] = {
- "E",
- "PHI",
- "PI",
"AVTB", /* default timebase 1/AV_TIME_BASE */
"intb", /* input timebase */
"sr", /* sample rate */
@@ -48,9 +45,6 @@ static const char *const var_names[] = {
};
enum var_name {
- VAR_E,
- VAR_PHI,
- VAR_PI,
VAR_AVTB,
VAR_INTB,
VAR_SR,
@@ -63,6 +57,16 @@ typedef struct SetTBContext {
double var_values[VAR_VARS_NB];
} SetTBContext;
+#define OFFSET(x) offsetof(SetTBContext, x)
+#define DEFINE_OPTIONS(filt_name, filt_type) \
+static const AVOption filt_name##_options[] = { \
+ { "expr", "set expression determining the output timebase", OFFSET(tb_expr), AV_OPT_TYPE_STRING, {.str="intb"}, \
+ .flags=AV_OPT_FLAG_##filt_type##_PARAM|AV_OPT_FLAG_FILTERING_PARAM }, \
+ { "tb", "set expression determining the output timebase", OFFSET(tb_expr), AV_OPT_TYPE_STRING, {.str="intb"}, \
+ .flags=AV_OPT_FLAG_##filt_type##_PARAM|AV_OPT_FLAG_FILTERING_PARAM }, \
+ { NULL } \
+}
+
static int config_output_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
@@ -72,9 +76,6 @@ static int config_output_props(AVFilterLink *outlink)
int ret;
double res;
- settb->var_values[VAR_E] = M_E;
- settb->var_values[VAR_PHI] = M_PHI;
- settb->var_values[VAR_PI] = M_PI;
settb->var_values[VAR_AVTB] = av_q2d(AV_TIME_BASE_Q);
settb->var_values[VAR_INTB] = av_q2d(inlink->time_base);
settb->var_values[VAR_SR] = inlink->sample_rate;
@@ -119,27 +120,16 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
return ff_filter_frame(outlink, frame);
}
-#define OFFSET(x) offsetof(SetTBContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM
-static const AVOption options[] = {
- { "expr", "Expression determining the output timebase", OFFSET(tb_expr), AV_OPT_TYPE_STRING, { .str = "intb" }, .flags = FLAGS },
- { NULL },
-};
-
#if CONFIG_SETTB_FILTER
-static const AVClass settb_class = {
- .class_name = "settb",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+
+DEFINE_OPTIONS(settb, VIDEO);
+AVFILTER_DEFINE_CLASS(settb);
static const AVFilterPad avfilter_vf_settb_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -154,31 +144,24 @@ static const AVFilterPad avfilter_vf_settb_outputs[] = {
};
AVFilter ff_vf_settb = {
- .name = "settb",
+ .name = "settb",
.description = NULL_IF_CONFIG_SMALL("Set timebase for the video output link."),
-
- .priv_size = sizeof(SetTBContext),
- .priv_class = &settb_class,
-
- .inputs = avfilter_vf_settb_inputs,
-
- .outputs = avfilter_vf_settb_outputs,
+ .priv_size = sizeof(SetTBContext),
+ .priv_class = &settb_class,
+ .inputs = avfilter_vf_settb_inputs,
+ .outputs = avfilter_vf_settb_outputs,
};
#endif /* CONFIG_SETTB_FILTER */
#if CONFIG_ASETTB_FILTER
-static const AVClass asettb_class = {
- .class_name = "asettb",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+
+DEFINE_OPTIONS(asettb, AUDIO);
+AVFILTER_DEFINE_CLASS(asettb);
static const AVFilterPad avfilter_af_asettb_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
.filter_frame = filter_frame,
},
{ NULL }
diff --git a/libavfilter/signature.h b/libavfilter/signature.h
new file mode 100644
index 0000000000..2659c8790e
--- /dev/null
+++ b/libavfilter/signature.h
@@ -0,0 +1,569 @@
+/*
+ * Copyright (c) 2017 Gerion Entrup
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * MPEG-7 video signature calculation and lookup filter
+ */
+
+#ifndef AVFILTER_SIGNATURE_H
+#define AVFILTER_SIGNATURE_H
+
+#include <float.h>
+#include "libavutil/common.h"
+#include "libavutil/opt.h"
+#include "libavutil/timestamp.h"
+#include "avfilter.h"
+#include "internal.h"
+
+#define ELEMENT_COUNT 10
+#define SIGELEM_SIZE 380
+#define DIFFELEM_SIZE 348 /* SIGELEM_SIZE - elem_a1 - elem_a2 */
+#define COARSE_SIZE 90
+
+enum lookup_mode {
+ MODE_OFF,
+ MODE_FULL,
+ MODE_FAST,
+ NB_LOOKUP_MODE
+};
+
+enum formats {
+ FORMAT_BINARY,
+ FORMAT_XML,
+ NB_FORMATS
+};
+
+typedef struct {
+ uint8_t x;
+ uint8_t y;
+} Point;
+
+typedef struct {
+ Point up;
+ Point to;
+} Block;
+
+typedef struct {
+ int av_elem; /* average element category */
+ short left_count; /* count of blocks that will be added together */
+ short block_count; /* count of blocks per element */
+ short elem_count;
+ const Block* blocks;
+} ElemCat;
+
+typedef struct FineSignature {
+ struct FineSignature* next;
+ struct FineSignature* prev;
+ uint64_t pts;
+ uint32_t index; /* needed for xmlexport */
+ uint8_t confidence;
+ uint8_t words[5];
+ uint8_t framesig[SIGELEM_SIZE/5];
+} FineSignature;
+
+typedef struct CoarseSignature {
+ uint8_t data[5][31]; /* 5 words with min. 243 bit */
+ struct FineSignature* first; /* associated Finesignatures */
+ struct FineSignature* last;
+ struct CoarseSignature* next;
+} CoarseSignature;
+
+/* lookup types */
+typedef struct MatchingInfo {
+ double meandist;
+ double framerateratio; /* second/first */
+ int score;
+ int offset;
+ int matchframes; /* number of matching frames */
+ int whole;
+ struct FineSignature* first;
+ struct FineSignature* second;
+ struct MatchingInfo* next;
+} MatchingInfo;
+
+typedef struct {
+ AVRational time_base;
+ /* needed for xml_export */
+ int w; /* height */
+ int h; /* width */
+
+ /* overflow protection */
+ int divide;
+
+ FineSignature* finesiglist;
+ FineSignature* curfinesig;
+
+ CoarseSignature* coarsesiglist;
+ CoarseSignature* coarseend; /* needed for xml export */
+ /* helpers to store the alternating signatures */
+ CoarseSignature* curcoarsesig1;
+ CoarseSignature* curcoarsesig2;
+
+ int coarsecount; /* counter from 0 to 89 */
+ int midcoarse; /* whether it is a coarsesignature beginning from 45 + i * 90 */
+ uint32_t lastindex; /* helper to store amount of frames */
+
+ int exported; /* boolean whether stream already exported */
+} StreamContext;
+
+typedef struct {
+ const AVClass *class;
+ /* input parameters */
+ int mode;
+ int nb_inputs;
+ char *filename;
+ int format;
+ int thworddist;
+ int thcomposdist;
+ int thl1;
+ int thdi;
+ int thit;
+ /* end input parameters */
+
+ uint8_t l1distlut[243*242/2]; /* 243 + 242 + 241 ... */
+ StreamContext* streamcontexts;
+} SignatureContext;
+
+
+static const Block elem_a1_data[] = {
+ {{ 0, 0},{ 7, 7}},
+ {{ 8, 0},{15, 7}},
+ {{ 0, 8},{ 7,15}},
+ {{ 8, 8},{15,15}},
+ {{16, 0},{23, 7}},
+ {{24, 0},{31, 7}},
+ {{16, 8},{23,15}},
+ {{24, 8},{31,15}},
+ {{ 0,16},{ 7,23}},
+ {{ 8,16},{15,23}},
+ {{ 0,24},{ 7,31}},
+ {{ 8,24},{15,31}},
+ {{16,16},{23,23}},
+ {{24,16},{31,23}},
+ {{16,24},{23,31}},
+ {{24,24},{31,31}},
+ {{ 0, 0},{15,15}},
+ {{16, 0},{31,15}},
+ {{ 0,16},{15,31}},
+ {{16,16},{31,31}}
+};
+static const ElemCat elem_a1 = { 1, 1, 1, 20, elem_a1_data };
+
+static const Block elem_a2_data[] = {
+ {{ 2, 2},{ 9, 9}},
+ {{12, 2},{19, 9}},
+ {{22, 2},{29, 9}},
+ {{ 2,12},{ 9,19}},
+ {{12,12},{19,19}},
+ {{22,12},{29,19}},
+ {{ 2,22},{ 9,29}},
+ {{12,22},{19,29}},
+ {{22,22},{29,29}},
+ {{ 9, 9},{22,22}},
+ {{ 6, 6},{25,25}},
+ {{ 3, 3},{28,28}}
+};
+static const ElemCat elem_a2 = { 1, 1, 1, 12, elem_a2_data };
+
+static const Block elem_d1_data[] = {
+ {{ 0, 0},{ 1, 3}},{{ 2, 0},{ 3, 3}},
+ {{ 4, 0},{ 7, 1}},{{ 4, 2},{ 7, 3}},
+ {{ 0, 6},{ 3, 7}},{{ 0, 4},{ 3, 5}},
+ {{ 6, 4},{ 7, 7}},{{ 4, 4},{ 5, 7}},
+ {{ 8, 0},{ 9, 3}},{{10, 0},{11, 3}},
+ {{12, 0},{15, 1}},{{12, 2},{15, 3}},
+ {{ 8, 6},{11, 7}},{{ 8, 4},{11, 5}},
+ {{14, 4},{15, 7}},{{12, 4},{13, 7}},
+ {{ 0, 8},{ 1,11}},{{ 2, 8},{ 3,11}},
+ {{ 4, 8},{ 7, 9}},{{ 4,10},{ 7,11}},
+ {{ 0,14},{ 3,15}},{{ 0,12},{ 3,13}},
+ {{ 6,12},{ 7,15}},{{ 4,12},{ 5,15}},
+ {{ 8, 8},{ 9,11}},{{10, 8},{11,11}},
+ {{12, 8},{15, 9}},{{12,10},{15,11}},
+ {{ 8,14},{11,15}},{{ 8,12},{11,13}},
+ {{14,12},{15,15}},{{12,12},{13,15}},
+ {{16, 0},{19, 1}},{{16, 2},{19, 3}},
+ {{22, 0},{23, 3}},{{20, 0},{21, 3}},
+ {{16, 4},{17, 7}},{{18, 4},{19, 7}},
+ {{20, 6},{23, 7}},{{20, 4},{23, 5}},
+ {{24, 0},{27, 1}},{{24, 2},{27, 3}},
+ {{30, 0},{31, 3}},{{28, 0},{29, 3}},
+ {{24, 4},{25, 7}},{{26, 4},{27, 7}},
+ {{28, 6},{31, 7}},{{28, 4},{31, 5}},
+ {{16, 8},{19, 9}},{{16,10},{19,11}},
+ {{22, 8},{23,11}},{{20, 8},{21,11}},
+ {{16,12},{17,15}},{{18,12},{19,15}},
+ {{20,14},{23,15}},{{20,12},{23,13}},
+ {{24, 8},{27, 9}},{{24,10},{27,11}},
+ {{30, 8},{31,11}},{{28, 8},{29,11}},
+ {{24,12},{25,15}},{{26,12},{27,15}},
+ {{28,14},{31,15}},{{28,12},{31,13}},
+ {{ 0,16},{ 3,17}},{{ 0,18},{ 3,19}},
+ {{ 6,16},{ 7,19}},{{ 4,16},{ 5,19}},
+ {{ 0,20},{ 1,23}},{{ 2,20},{ 3,23}},
+ {{ 4,22},{ 7,23}},{{ 4,20},{ 7,21}},
+ {{ 8,16},{11,17}},{{ 8,18},{11,19}},
+ {{14,16},{15,19}},{{12,16},{13,19}},
+ {{ 8,20},{ 9,23}},{{10,20},{11,23}},
+ {{12,22},{15,23}},{{12,20},{15,21}},
+ {{ 0,24},{ 3,25}},{{ 0,26},{ 3,27}},
+ {{ 6,24},{ 7,27}},{{ 4,24},{ 5,27}},
+ {{ 0,28},{ 1,31}},{{ 2,28},{ 3,31}},
+ {{ 4,30},{ 7,31}},{{ 4,28},{ 7,29}},
+ {{ 8,24},{11,25}},{{ 8,26},{11,27}},
+ {{14,24},{15,27}},{{12,24},{13,27}},
+ {{ 8,28},{ 9,31}},{{10,28},{11,31}},
+ {{12,30},{15,31}},{{12,28},{15,29}},
+ {{16,16},{17,19}},{{18,16},{19,19}},
+ {{20,16},{23,17}},{{20,18},{23,19}},
+ {{16,22},{19,23}},{{16,20},{19,21}},
+ {{22,20},{23,23}},{{20,20},{21,23}},
+ {{24,16},{25,19}},{{26,16},{27,19}},
+ {{28,16},{31,17}},{{28,18},{31,19}},
+ {{24,22},{27,23}},{{24,20},{27,21}},
+ {{30,20},{31,23}},{{28,20},{29,23}},
+ {{16,24},{17,27}},{{18,24},{19,27}},
+ {{20,24},{23,25}},{{20,26},{23,27}},
+ {{16,30},{19,31}},{{16,28},{19,29}},
+ {{22,28},{23,31}},{{20,28},{21,31}},
+ {{24,24},{25,27}},{{26,24},{27,27}},
+ {{28,24},{31,25}},{{28,26},{31,27}},
+ {{24,30},{27,31}},{{24,28},{27,29}},
+ {{30,28},{31,31}},{{28,28},{29,31}},
+ {{ 2, 2},{ 3, 5}},{{ 4, 2},{ 5, 5}},
+ {{ 6, 2},{ 9, 3}},{{ 6, 4},{ 9, 5}},
+ {{ 2, 8},{ 5, 9}},{{ 2, 6},{ 5, 7}},
+ {{ 8, 6},{ 9, 9}},{{ 6, 6},{ 7, 9}},
+ {{12, 2},{13, 5}},{{14, 2},{15, 5}},
+ {{16, 2},{19, 3}},{{16, 4},{19, 5}},
+ {{12, 8},{15, 9}},{{12, 6},{15, 7}},
+ {{18, 6},{19, 9}},{{16, 6},{17, 9}},
+ {{22, 2},{23, 5}},{{24, 2},{25, 5}},
+ {{26, 2},{29, 3}},{{26, 4},{29, 5}},
+ {{22, 8},{25, 9}},{{22, 6},{25, 7}},
+ {{28, 6},{29, 9}},{{26, 6},{27, 9}},
+ {{ 2,12},{ 3,15}},{{ 4,12},{ 5,15}},
+ {{ 6,12},{ 9,13}},{{ 6,14},{ 9,15}},
+ {{ 2,18},{ 5,19}},{{ 2,16},{ 5,17}},
+ {{ 8,16},{ 9,19}},{{ 6,16},{ 7,19}},
+ {{12,12},{15,13}},{{12,14},{15,15}},
+ {{16,12},{19,13}},{{16,14},{19,15}},
+ {{12,18},{15,19}},{{12,16},{15,17}},
+ {{16,18},{19,19}},{{16,16},{19,17}},
+ {{22,12},{23,15}},{{24,12},{25,15}},
+ {{26,12},{29,13}},{{26,14},{29,15}},
+ {{22,18},{25,19}},{{22,16},{25,17}},
+ {{28,16},{29,19}},{{26,16},{27,19}},
+ {{ 2,22},{ 3,25}},{{ 4,22},{ 5,25}},
+ {{ 6,22},{ 9,23}},{{ 6,24},{ 9,25}},
+ {{ 2,28},{ 5,29}},{{ 2,26},{ 5,27}},
+ {{ 8,26},{ 9,29}},{{ 6,26},{ 7,29}},
+ {{12,22},{13,25}},{{14,22},{15,25}},
+ {{16,22},{19,23}},{{16,24},{19,25}},
+ {{12,28},{15,29}},{{12,26},{15,27}},
+ {{18,26},{19,29}},{{16,26},{17,29}},
+ {{22,22},{23,25}},{{24,22},{25,25}},
+ {{26,22},{29,23}},{{26,24},{29,25}},
+ {{22,28},{25,29}},{{22,26},{25,27}},
+ {{28,26},{29,29}},{{26,26},{27,29}},
+ {{ 7, 7},{10, 8}},{{ 7, 9},{10,10}},
+ {{11, 7},{12,10}},{{13, 7},{14,10}},
+ {{ 7,11},{ 8,14}},{{ 9,11},{10,14}},
+ {{11,11},{14,12}},{{11,13},{14,14}},
+ {{17, 7},{20, 8}},{{17, 9},{20,10}},
+ {{21, 7},{22,10}},{{23, 7},{24,10}},
+ {{17,11},{18,14}},{{19,11},{20,14}},
+ {{21,11},{24,12}},{{21,13},{24,14}},
+ {{ 7,17},{10,18}},{{ 7,19},{10,20}},
+ {{11,17},{12,20}},{{13,17},{14,20}},
+ {{ 7,21},{ 8,24}},{{ 9,21},{10,24}},
+ {{11,21},{14,22}},{{11,23},{14,24}},
+ {{17,17},{20,18}},{{17,19},{20,20}},
+ {{21,17},{22,20}},{{23,17},{24,20}},
+ {{17,21},{18,24}},{{19,21},{20,24}},
+ {{21,21},{24,22}},{{21,23},{24,24}}
+};
+static const ElemCat elem_d1 = { 0, 1, 2, 116, elem_d1_data };
+
+static const Block elem_d2_data[] = {
+ {{ 0, 0},{ 3, 3}},{{ 4, 4},{ 7, 7}},{{ 4, 0},{ 7, 3}},{{ 0, 4},{ 3, 7}},
+ {{ 8, 0},{11, 3}},{{12, 4},{15, 7}},{{12, 0},{15, 3}},{{ 8, 4},{11, 7}},
+ {{16, 0},{19, 3}},{{20, 4},{23, 7}},{{20, 0},{23, 3}},{{16, 4},{19, 7}},
+ {{24, 0},{27, 3}},{{28, 4},{31, 7}},{{28, 0},{31, 3}},{{24, 4},{27, 7}},
+ {{ 0, 8},{ 3,11}},{{ 4,12},{ 7,15}},{{ 4, 8},{ 7,11}},{{ 0,12},{ 3,15}},
+ {{ 8, 8},{11,11}},{{12,12},{15,15}},{{12, 8},{15,11}},{{ 8,12},{11,15}},
+ {{16, 8},{19,11}},{{20,12},{23,15}},{{20, 8},{23,11}},{{16,12},{19,15}},
+ {{24, 8},{27,11}},{{28,12},{31,15}},{{28, 8},{31,11}},{{24,12},{27,15}},
+ {{ 0,16},{ 3,19}},{{ 4,20},{ 7,23}},{{ 4,16},{ 7,19}},{{ 0,20},{ 3,23}},
+ {{ 8,16},{11,19}},{{12,20},{15,23}},{{12,16},{15,19}},{{ 8,20},{11,23}},
+ {{16,16},{19,19}},{{20,20},{23,23}},{{20,16},{23,19}},{{16,20},{19,23}},
+ {{24,16},{27,19}},{{28,20},{31,23}},{{28,16},{31,19}},{{24,20},{27,23}},
+ {{ 0,24},{ 3,27}},{{ 4,28},{ 7,31}},{{ 4,24},{ 7,27}},{{ 0,28},{ 3,31}},
+ {{ 8,24},{11,27}},{{12,28},{15,31}},{{12,24},{15,27}},{{ 8,28},{11,31}},
+ {{16,24},{19,27}},{{20,28},{23,31}},{{20,24},{23,27}},{{16,28},{19,31}},
+ {{24,24},{27,27}},{{28,28},{31,31}},{{28,24},{31,27}},{{24,28},{27,31}},
+ {{ 4, 4},{ 7, 7}},{{ 8, 8},{11,11}},{{ 8, 4},{11, 7}},{{ 4, 8},{ 7,11}},
+ {{12, 4},{15, 7}},{{16, 8},{19,11}},{{16, 4},{19, 7}},{{12, 8},{15,11}},
+ {{20, 4},{23, 7}},{{24, 8},{27,11}},{{24, 4},{27, 7}},{{20, 8},{23,11}},
+ {{ 4,12},{ 7,15}},{{ 8,16},{11,19}},{{ 8,12},{11,15}},{{ 4,16},{ 7,19}},
+ {{12,12},{15,15}},{{16,16},{19,19}},{{16,12},{19,15}},{{12,16},{15,19}},
+ {{20,12},{23,15}},{{24,16},{27,19}},{{24,12},{27,15}},{{20,16},{23,19}},
+ {{ 4,20},{ 7,23}},{{ 8,24},{11,27}},{{ 8,20},{11,23}},{{ 4,24},{ 7,27}},
+ {{12,20},{15,23}},{{16,24},{19,27}},{{16,20},{19,23}},{{12,24},{15,27}},
+ {{20,20},{23,23}},{{24,24},{27,27}},{{24,20},{27,23}},{{20,24},{23,27}}
+};
+static const ElemCat elem_d2 = { 0, 2, 4, 25, elem_d2_data };
+
+static const Block elem_d3_data[] = {
+ {{ 1, 1},{10,10}},{{11, 1},{20,10}},
+ {{ 1, 1},{10,10}},{{21, 1},{30,10}},
+ {{ 1, 1},{10,10}},{{ 1,11},{10,20}},
+ {{ 1, 1},{10,10}},{{11,11},{20,20}},
+ {{ 1, 1},{10,10}},{{21,11},{30,20}},
+ {{ 1, 1},{10,10}},{{ 1,21},{10,30}},
+ {{ 1, 1},{10,10}},{{11,21},{20,30}},
+ {{ 1, 1},{10,10}},{{21,21},{30,30}},
+ {{11, 1},{20,10}},{{21, 1},{30,10}},
+ {{11, 1},{20,10}},{{ 1,11},{10,20}},
+ {{11, 1},{20,10}},{{11,11},{20,20}},
+ {{11, 1},{20,10}},{{21,11},{30,20}},
+ {{11, 1},{20,10}},{{ 1,21},{10,30}},
+ {{11, 1},{20,10}},{{11,21},{20,30}},
+ {{11, 1},{20,10}},{{21,21},{30,30}},
+ {{21, 1},{30,10}},{{ 1,11},{10,20}},
+ {{21, 1},{30,10}},{{11,11},{20,20}},
+ {{21, 1},{30,10}},{{21,11},{30,20}},
+ {{21, 1},{30,10}},{{ 1,21},{10,30}},
+ {{21, 1},{30,10}},{{11,21},{20,30}},
+ {{21, 1},{30,10}},{{21,21},{30,30}},
+ {{ 1,11},{10,20}},{{11,11},{20,20}},
+ {{ 1,11},{10,20}},{{21,11},{30,20}},
+ {{ 1,11},{10,20}},{{ 1,21},{10,30}},
+ {{ 1,11},{10,20}},{{11,21},{20,30}},
+ {{ 1,11},{10,20}},{{21,21},{30,30}},
+ {{11,11},{20,20}},{{21,11},{30,20}},
+ {{11,11},{20,20}},{{ 1,21},{10,30}},
+ {{11,11},{20,20}},{{11,21},{20,30}},
+ {{11,11},{20,20}},{{21,21},{30,30}},
+ {{21,11},{30,20}},{{ 1,21},{10,30}},
+ {{21,11},{30,20}},{{11,21},{20,30}},
+ {{21,11},{30,20}},{{21,21},{30,30}},
+ {{ 1,21},{10,30}},{{11,21},{20,30}},
+ {{ 1,21},{10,30}},{{21,21},{30,30}},
+ {{11,21},{20,30}},{{21,21},{30,30}}
+};
+static const ElemCat elem_d3 = { 0, 1, 2, 36, elem_d3_data };
+
+static const Block elem_d4_data[] = {
+ {{ 7,13},{12,18}},{{19,13},{24,18}},
+ {{13, 7},{18,12}},{{13,19},{18,24}},
+ {{ 7, 7},{12,12}},{{19,19},{24,24}},
+ {{19, 7},{24,12}},{{ 7,19},{12,24}},
+ {{13, 7},{18,12}},{{19,13},{24,18}},
+ {{19,13},{24,18}},{{13,19},{18,24}},
+ {{13,19},{18,24}},{{ 7,13},{12,18}},
+ {{ 7,13},{12,18}},{{13, 7},{18,12}},
+ {{ 7, 7},{12,12}},{{19, 7},{24,12}},
+ {{19, 7},{24,12}},{{19,19},{24,24}},
+ {{19,19},{24,24}},{{ 7,19},{12,24}},
+ {{ 7,19},{12,24}},{{ 7, 7},{12,12}},
+ {{13,13},{18,18}},{{13, 1},{18, 6}},
+ {{13,13},{18,18}},{{25,13},{30,18}},
+ {{13,13},{18,18}},{{13,25},{18,30}},
+ {{13,13},{18,18}},{{ 1,13},{ 6,18}},
+ {{13, 1},{18, 6}},{{13,25},{18,30}},
+ {{ 1,13},{ 6,18}},{{25,13},{30,18}},
+ {{ 7, 1},{12, 6}},{{19, 1},{24, 6}},
+ {{ 7,25},{12,30}},{{19,25},{24,30}},
+ {{ 1, 7},{ 6,12}},{{ 1,19},{ 6,24}},
+ {{25, 7},{30,12}},{{25,19},{30,24}},
+ {{ 7, 1},{12, 6}},{{ 1, 7},{ 6,12}},
+ {{19, 1},{24, 6}},{{25, 7},{30,12}},
+ {{25,19},{30,24}},{{19,25},{24,30}},
+ {{ 1,19},{ 6,24}},{{ 7,25},{12,30}},
+ {{ 1, 1},{ 6, 6}},{{25, 1},{30, 6}},
+ {{25, 1},{30, 6}},{{25,25},{30,30}},
+ {{25,25},{30,30}},{{ 1,25},{ 6,30}},
+ {{ 1,25},{ 6,30}},{{ 1, 1},{ 6, 6}}
+};
+static const ElemCat elem_d4 = { 0, 1, 2, 30, elem_d4_data };
+
+static const Block elem_d5_data[] = {
+ {{ 1, 1},{10, 3}},{{ 1, 4},{ 3, 7}},{{ 8, 4},{10, 7}},{{ 1, 8},{10,10}},{{ 4, 4},{ 7, 7}},
+ {{11, 1},{20, 3}},{{11, 4},{13, 7}},{{18, 4},{20, 7}},{{11, 8},{20,10}},{{14, 4},{17, 7}},
+ {{21, 1},{30, 3}},{{21, 4},{23, 7}},{{28, 4},{30, 7}},{{21, 8},{30,10}},{{24, 4},{27, 7}},
+ {{ 1,11},{10,13}},{{ 1,14},{ 3,17}},{{ 8,14},{10,17}},{{ 1,18},{10,20}},{{ 4,14},{ 7,17}},
+ {{11,11},{20,13}},{{11,14},{13,17}},{{18,14},{20,17}},{{11,18},{20,20}},{{14,14},{17,17}},
+ {{21,11},{30,13}},{{21,14},{23,17}},{{28,14},{30,17}},{{21,18},{30,20}},{{24,14},{27,17}},
+ {{ 1,21},{10,23}},{{ 1,24},{ 3,27}},{{ 8,24},{10,27}},{{ 1,28},{10,30}},{{ 4,24},{ 7,27}},
+ {{11,21},{20,23}},{{11,24},{13,27}},{{18,24},{20,27}},{{11,28},{20,30}},{{14,24},{17,27}},
+ {{21,21},{30,23}},{{21,24},{23,27}},{{28,24},{30,27}},{{21,28},{30,30}},{{24,24},{27,27}},
+ {{ 6, 6},{15, 8}},{{ 6, 9},{ 8,12}},{{13, 9},{15,12}},{{ 6,13},{15,15}},{{ 9, 9},{12,12}},
+ {{16, 6},{25, 8}},{{16, 9},{18,12}},{{23, 9},{25,12}},{{16,13},{25,15}},{{19, 9},{22,12}},
+ {{ 6,16},{15,18}},{{ 6,19},{ 8,22}},{{13,19},{15,22}},{{ 6,23},{15,25}},{{ 9,19},{12,22}},
+ {{16,16},{25,18}},{{16,19},{18,22}},{{23,19},{25,22}},{{16,23},{25,25}},{{19,19},{22,22}},
+ {{ 6, 1},{15, 3}},{{ 6, 4},{ 8, 7}},{{13, 4},{15, 7}},{{ 6, 8},{15,10}},{{ 9, 4},{12, 7}},
+ {{16, 1},{25, 3}},{{16, 4},{18, 7}},{{23, 4},{25, 7}},{{16, 8},{25,10}},{{19, 4},{22, 7}},
+ {{ 1, 6},{10, 8}},{{ 1, 9},{ 3,12}},{{ 8, 9},{10,12}},{{ 1,13},{10,15}},{{ 4, 9},{ 7,12}},
+ {{11, 6},{20, 8}},{{11, 9},{13,12}},{{18, 9},{20,12}},{{11,13},{20,15}},{{14, 9},{17,12}},
+ {{21, 6},{30, 8}},{{21, 9},{23,12}},{{28, 9},{30,12}},{{21,13},{30,15}},{{24, 9},{27,12}},
+ {{ 6,11},{15,13}},{{ 6,14},{ 8,17}},{{13,14},{15,17}},{{ 6,18},{15,20}},{{ 9,14},{12,17}},
+ {{16,11},{25,13}},{{16,14},{18,17}},{{23,14},{25,17}},{{16,18},{25,20}},{{19,14},{22,17}},
+ {{ 1,16},{10,18}},{{ 1,19},{ 3,22}},{{ 8,19},{10,22}},{{ 1,23},{10,25}},{{ 4,19},{ 7,22}},
+ {{11,16},{20,18}},{{11,19},{13,22}},{{18,19},{20,22}},{{11,23},{20,25}},{{14,19},{17,22}},
+ {{21,16},{30,18}},{{21,19},{23,22}},{{28,19},{30,22}},{{21,23},{30,25}},{{24,19},{27,22}},
+ {{ 6,21},{15,23}},{{ 6,24},{ 8,27}},{{13,24},{15,27}},{{ 6,28},{15,30}},{{ 9,24},{12,27}},
+ {{16,21},{25,23}},{{16,24},{18,27}},{{23,24},{25,27}},{{16,28},{25,30}},{{19,24},{22,27}},
+ {{ 2, 2},{14, 6}},{{ 2, 7},{ 6, 9}},{{10, 7},{14, 9}},{{ 2,10},{14,14}},{{ 7, 7},{ 9, 9}},
+ {{ 7, 2},{19, 6}},{{ 7, 7},{11, 9}},{{15, 7},{19, 9}},{{ 7,10},{19,14}},{{12, 7},{14, 9}},
+ {{12, 2},{24, 6}},{{12, 7},{16, 9}},{{20, 7},{24, 9}},{{12,10},{24,14}},{{17, 7},{19, 9}},
+ {{17, 2},{29, 6}},{{17, 7},{21, 9}},{{25, 7},{29, 9}},{{17,10},{29,14}},{{22, 7},{24, 9}},
+ {{ 2, 7},{14,11}},{{ 2,12},{ 6,14}},{{10,12},{14,14}},{{ 2,15},{14,19}},{{ 7,12},{ 9,14}},
+ {{ 7, 7},{19,11}},{{ 7,12},{11,14}},{{15,12},{19,14}},{{ 7,15},{19,19}},{{12,12},{14,14}},
+ {{12, 7},{24,11}},{{12,12},{16,14}},{{20,12},{24,14}},{{12,15},{24,19}},{{17,12},{19,14}},
+ {{17, 7},{29,11}},{{17,12},{21,14}},{{25,12},{29,14}},{{17,15},{29,19}},{{22,12},{24,14}},
+ {{ 2,12},{14,16}},{{ 2,17},{ 6,19}},{{10,17},{14,19}},{{ 2,20},{14,24}},{{ 7,17},{ 9,19}},
+ {{ 7,12},{19,16}},{{ 7,17},{11,19}},{{15,17},{19,19}},{{ 7,20},{19,24}},{{12,17},{14,19}},
+ {{12,12},{24,16}},{{12,17},{16,19}},{{20,17},{24,19}},{{12,20},{24,24}},{{17,17},{19,19}},
+ {{17,12},{29,16}},{{17,17},{21,19}},{{25,17},{29,19}},{{17,20},{29,24}},{{22,17},{24,19}},
+ {{ 2,17},{14,21}},{{ 2,22},{ 6,24}},{{10,22},{14,24}},{{ 2,25},{14,29}},{{ 7,22},{ 9,24}},
+ {{ 7,17},{19,21}},{{ 7,22},{11,24}},{{15,22},{19,24}},{{ 7,25},{19,29}},{{12,22},{14,24}},
+ {{12,17},{24,21}},{{12,22},{16,24}},{{20,22},{24,24}},{{12,25},{24,29}},{{17,22},{19,24}},
+ {{17,17},{29,21}},{{17,22},{21,24}},{{25,22},{29,24}},{{17,25},{29,29}},{{22,22},{24,24}},
+ {{ 8, 3},{13, 4}},{{ 8, 5},{ 9, 6}},{{12, 5},{13, 6}},{{ 8, 7},{13, 8}},{{10, 5},{11, 6}},
+ {{13, 3},{18, 4}},{{13, 5},{14, 6}},{{17, 5},{18, 6}},{{13, 7},{18, 8}},{{15, 5},{16, 6}},
+ {{18, 3},{23, 4}},{{18, 5},{19, 6}},{{22, 5},{23, 6}},{{18, 7},{23, 8}},{{20, 5},{21, 6}},
+ {{ 3, 8},{ 8, 9}},{{ 3,10},{ 4,11}},{{ 7,10},{ 8,11}},{{ 3,12},{ 8,13}},{{ 5,10},{ 6,11}},
+ {{ 8, 8},{13, 9}},{{ 8,10},{ 9,11}},{{12,10},{13,11}},{{ 8,12},{13,13}},{{10,10},{11,11}},
+ {{13, 8},{18, 9}},{{13,10},{14,11}},{{17,10},{18,11}},{{13,12},{18,13}},{{15,10},{16,11}},
+ {{18, 8},{23, 9}},{{18,10},{19,11}},{{22,10},{23,11}},{{18,12},{23,13}},{{20,10},{21,11}},
+ {{23, 8},{28, 9}},{{23,10},{24,11}},{{27,10},{28,11}},{{23,12},{28,13}},{{25,10},{26,11}},
+ {{ 3,13},{ 8,14}},{{ 3,15},{ 4,16}},{{ 7,15},{ 8,16}},{{ 3,17},{ 8,18}},{{ 5,15},{ 6,16}},
+ {{ 8,13},{13,14}},{{ 8,15},{ 9,16}},{{12,15},{13,16}},{{ 8,17},{13,18}},{{10,15},{11,16}},
+ {{13,13},{18,14}},{{13,15},{14,16}},{{17,15},{18,16}},{{13,17},{18,18}},{{15,15},{16,16}},
+ {{18,13},{23,14}},{{18,15},{19,16}},{{22,15},{23,16}},{{18,17},{23,18}},{{20,15},{21,16}},
+ {{23,13},{28,14}},{{23,15},{24,16}},{{27,15},{28,16}},{{23,17},{28,18}},{{25,15},{26,16}},
+ {{ 3,18},{ 8,19}},{{ 3,20},{ 4,21}},{{ 7,20},{ 8,21}},{{ 3,22},{ 8,23}},{{ 5,20},{ 6,21}},
+ {{ 8,18},{13,19}},{{ 8,20},{ 9,21}},{{12,20},{13,21}},{{ 8,22},{13,23}},{{10,20},{11,21}},
+ {{13,18},{18,19}},{{13,20},{14,21}},{{17,20},{18,21}},{{13,22},{18,23}},{{15,20},{16,21}},
+ {{18,18},{23,19}},{{18,20},{19,21}},{{22,20},{23,21}},{{18,22},{23,23}},{{20,20},{21,21}},
+ {{23,18},{28,19}},{{23,20},{24,21}},{{27,20},{28,21}},{{23,22},{28,23}},{{25,20},{26,21}},
+ {{ 8,23},{13,24}},{{ 8,25},{ 9,26}},{{12,25},{13,26}},{{ 8,27},{13,28}},{{10,25},{11,26}},
+ {{13,23},{18,24}},{{13,25},{14,26}},{{17,25},{18,26}},{{13,27},{18,28}},{{15,25},{16,26}},
+ {{18,23},{23,24}},{{18,25},{19,26}},{{22,25},{23,26}},{{18,27},{23,28}},{{20,25},{21,26}}
+};
+static const ElemCat elem_d5 = { 0, 4, 5, 62, elem_d5_data };
+
+static const Block elem_d6_data[] = {
+ {{ 3, 5},{12,10}},{{ 5, 3},{10,12}},
+ {{11, 5},{20,10}},{{13, 3},{18,12}},
+ {{19, 5},{28,10}},{{21, 3},{26,12}},
+ {{ 3,13},{12,18}},{{ 5,11},{10,20}},
+ {{11,13},{20,18}},{{13,11},{18,20}},
+ {{19,13},{28,18}},{{21,11},{26,20}},
+ {{ 3,21},{12,26}},{{ 5,19},{10,28}},
+ {{11,21},{20,26}},{{13,19},{18,28}},
+ {{19,21},{28,26}},{{21,19},{26,28}}
+};
+static const ElemCat elem_d6 = { 0, 1, 2, 9, elem_d6_data };
+
+static const Block elem_d7_data[] = {
+ {{ 0, 4},{ 3, 7}},{{ 8, 4},{11, 7}},{{ 4, 4},{ 7, 7}},
+ {{ 4, 0},{ 7, 3}},{{ 4, 8},{ 7,11}},{{ 4, 4},{ 7, 7}},
+ {{ 5, 4},{ 8, 7}},{{13, 4},{16, 7}},{{ 9, 4},{12, 7}},
+ {{ 9, 0},{12, 3}},{{ 9, 8},{12,11}},{{ 9, 4},{12, 7}},
+ {{10, 4},{13, 7}},{{18, 4},{21, 7}},{{14, 4},{17, 7}},
+ {{14, 0},{17, 3}},{{14, 8},{17,11}},{{14, 4},{17, 7}},
+ {{15, 4},{18, 7}},{{23, 4},{26, 7}},{{19, 4},{22, 7}},
+ {{19, 0},{22, 3}},{{19, 8},{22,11}},{{19, 4},{22, 7}},
+ {{20, 4},{23, 7}},{{28, 4},{31, 7}},{{24, 4},{27, 7}},
+ {{24, 0},{27, 3}},{{24, 8},{27,11}},{{24, 4},{27, 7}},
+ {{ 0, 9},{ 3,12}},{{ 8, 9},{11,12}},{{ 4, 9},{ 7,12}},
+ {{ 4, 5},{ 7, 8}},{{ 4,13},{ 7,16}},{{ 4, 9},{ 7,12}},
+ {{ 5, 9},{ 8,12}},{{13, 9},{16,12}},{{ 9, 9},{12,12}},
+ {{ 9, 5},{12, 8}},{{ 9,13},{12,16}},{{ 9, 9},{12,12}},
+ {{10, 9},{13,12}},{{18, 9},{21,12}},{{14, 9},{17,12}},
+ {{14, 5},{17, 8}},{{14,13},{17,16}},{{14, 9},{17,12}},
+ {{15, 9},{18,12}},{{23, 9},{26,12}},{{19, 9},{22,12}},
+ {{19, 5},{22, 8}},{{19,13},{22,16}},{{19, 9},{22,12}},
+ {{20, 9},{23,12}},{{28, 9},{31,12}},{{24, 9},{27,12}},
+ {{24, 5},{27, 8}},{{24,13},{27,16}},{{24, 9},{27,12}},
+ {{ 0,14},{ 3,17}},{{ 8,14},{11,17}},{{ 4,14},{ 7,17}},
+ {{ 4,10},{ 7,13}},{{ 4,18},{ 7,21}},{{ 4,14},{ 7,17}},
+ {{ 5,14},{ 8,17}},{{13,14},{16,17}},{{ 9,14},{12,17}},
+ {{ 9,10},{12,13}},{{ 9,18},{12,21}},{{ 9,14},{12,17}},
+ {{10,14},{13,17}},{{18,14},{21,17}},{{14,14},{17,17}},
+ {{14,10},{17,13}},{{14,18},{17,21}},{{14,14},{17,17}},
+ {{15,14},{18,17}},{{23,14},{26,17}},{{19,14},{22,17}},
+ {{19,10},{22,13}},{{19,18},{22,21}},{{19,14},{22,17}},
+ {{20,14},{23,17}},{{28,14},{31,17}},{{24,14},{27,17}},
+ {{24,10},{27,13}},{{24,18},{27,21}},{{24,14},{27,17}},
+ {{ 0,19},{ 3,22}},{{ 8,19},{11,22}},{{ 4,19},{ 7,22}},
+ {{ 4,15},{ 7,18}},{{ 4,23},{ 7,26}},{{ 4,19},{ 7,22}},
+ {{ 5,19},{ 8,22}},{{13,19},{16,22}},{{ 9,19},{12,22}},
+ {{ 9,15},{12,18}},{{ 9,23},{12,26}},{{ 9,19},{12,22}},
+ {{10,19},{13,22}},{{18,19},{21,22}},{{14,19},{17,22}},
+ {{14,15},{17,18}},{{14,23},{17,26}},{{14,19},{17,22}},
+ {{15,19},{18,22}},{{23,19},{26,22}},{{19,19},{22,22}},
+ {{19,15},{22,18}},{{19,23},{22,26}},{{19,19},{22,22}},
+ {{20,19},{23,22}},{{28,19},{31,22}},{{24,19},{27,22}},
+ {{24,15},{27,18}},{{24,23},{27,26}},{{24,19},{27,22}},
+ {{ 0,24},{ 3,27}},{{ 8,24},{11,27}},{{ 4,24},{ 7,27}},
+ {{ 4,20},{ 7,23}},{{ 4,28},{ 7,31}},{{ 4,24},{ 7,27}},
+ {{ 5,24},{ 8,27}},{{13,24},{16,27}},{{ 9,24},{12,27}},
+ {{ 9,20},{12,23}},{{ 9,28},{12,31}},{{ 9,24},{12,27}},
+ {{10,24},{13,27}},{{18,24},{21,27}},{{14,24},{17,27}},
+ {{14,20},{17,23}},{{14,28},{17,31}},{{14,24},{17,27}},
+ {{15,24},{18,27}},{{23,24},{26,27}},{{19,24},{22,27}},
+ {{19,20},{22,23}},{{19,28},{22,31}},{{19,24},{22,27}},
+ {{20,24},{23,27}},{{28,24},{31,27}},{{24,24},{27,27}},
+ {{24,20},{27,23}},{{24,28},{27,31}},{{24,24},{27,27}}
+};
+static const ElemCat elem_d7 = { 0, 2, 3, 50, elem_d7_data };
+
+static const Block elem_d8_data[] = {
+ {{ 0, 0},{ 7, 3}},{{ 0, 4},{ 7, 7}},
+ {{ 8, 0},{11, 7}},{{12, 0},{15, 7}},
+ {{ 0, 8},{ 3,15}},{{ 4, 8},{ 7,15}},
+ {{ 8, 8},{15,11}},{{ 8,12},{15,15}},
+ {{16, 0},{19, 7}},{{20, 0},{23, 7}},
+ {{24, 0},{31, 3}},{{24, 4},{31, 7}},
+ {{16, 8},{23,11}},{{16,12},{23,15}},
+ {{24, 8},{27,15}},{{28, 8},{31,15}},
+ {{ 0,16},{ 3,23}},{{ 4,16},{ 7,23}},
+ {{ 8,16},{15,19}},{{ 8,20},{15,23}},
+ {{ 0,24},{ 7,27}},{{ 0,28},{ 7,31}},
+ {{ 8,24},{11,31}},{{12,24},{15,31}},
+ {{16,16},{23,19}},{{16,20},{23,23}},
+ {{24,16},{27,23}},{{28,16},{31,23}},
+ {{16,24},{19,31}},{{20,24},{23,31}},
+ {{24,24},{31,27}},{{24,28},{31,31}},
+ {{ 0, 0},{ 7,15}},{{ 8, 0},{15,15}},
+ {{16, 0},{31, 7}},{{16, 8},{31,15}},
+ {{ 0,16},{15,23}},{{ 0,24},{15,31}},
+ {{16,16},{23,31}},{{24,16},{31,31}}
+};
+static const ElemCat elem_d8 = { 0, 1, 2, 20, elem_d8_data };
+
+static const ElemCat* elements[ELEMENT_COUNT] = { &elem_a1, &elem_a2,
+ &elem_d1, &elem_d2, &elem_d3, &elem_d4,
+ &elem_d5, &elem_d6, &elem_d7, &elem_d8 };
+#endif /* AVFILTER_SIGNATURE_H */
diff --git a/libavfilter/signature_lookup.c b/libavfilter/signature_lookup.c
new file mode 100644
index 0000000000..5bc2904409
--- /dev/null
+++ b/libavfilter/signature_lookup.c
@@ -0,0 +1,573 @@
+/*
+ * Copyright (c) 2017 Gerion Entrup
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * MPEG-7 video signature calculation and lookup filter
+ */
+
+#include "signature.h"
+
+#define HOUGH_MAX_OFFSET 90
+#define MAX_FRAMERATE 60
+
+#define DIR_PREV 0
+#define DIR_NEXT 1
+#define DIR_PREV_END 2
+#define DIR_NEXT_END 3
+
+#define STATUS_NULL 0
+#define STATUS_END_REACHED 1
+#define STATUS_BEGIN_REACHED 2
+
+static void fill_l1distlut(uint8_t lut[])
+{
+ int i, j, tmp_i, tmp_j,count;
+ uint8_t dist;
+
+ for (i = 0, count = 0; i < 242; i++) {
+ for (j = i + 1; j < 243; j++, count++) {
+ /* ternary distance between i and j */
+ dist = 0;
+ tmp_i = i; tmp_j = j;
+ do {
+ dist += FFABS((tmp_j % 3) - (tmp_i % 3));
+ tmp_j /= 3;
+ tmp_i /= 3;
+ } while (tmp_i > 0 || tmp_j > 0);
+ lut[count] = dist;
+ }
+ }
+}
+
+static unsigned int intersection_word(const uint8_t *first, const uint8_t *second)
+{
+ unsigned int val=0,i;
+ for (i = 0; i < 28; i += 4) {
+ val += av_popcount( (first[i] & second[i] ) << 24 |
+ (first[i+1] & second[i+1]) << 16 |
+ (first[i+2] & second[i+2]) << 8 |
+ (first[i+3] & second[i+3]) );
+ }
+ val += av_popcount( (first[28] & second[28]) << 16 |
+ (first[29] & second[29]) << 8 |
+ (first[30] & second[30]) );
+ return val;
+}
+
+static unsigned int union_word(const uint8_t *first, const uint8_t *second)
+{
+ unsigned int val=0,i;
+ for (i = 0; i < 28; i += 4) {
+ val += av_popcount( (first[i] | second[i] ) << 24 |
+ (first[i+1] | second[i+1]) << 16 |
+ (first[i+2] | second[i+2]) << 8 |
+ (first[i+3] | second[i+3]) );
+ }
+ val += av_popcount( (first[28] | second[28]) << 16 |
+ (first[29] | second[29]) << 8 |
+ (first[30] | second[30]) );
+ return val;
+}
+
+static unsigned int get_l1dist(AVFilterContext *ctx, SignatureContext *sc, const uint8_t *first, const uint8_t *second)
+{
+ unsigned int i;
+ unsigned int dist = 0;
+ uint8_t f, s;
+
+ for (i = 0; i < SIGELEM_SIZE/5; i++) {
+ if (first[i] != second[i]) {
+ f = first[i];
+ s = second[i];
+ if (f > s) {
+ /* little variation of gauss sum formula */
+ dist += sc->l1distlut[243*242/2 - (243-s)*(242-s)/2 + f - s - 1];
+ } else {
+ dist += sc->l1distlut[243*242/2 - (243-f)*(242-f)/2 + s - f - 1];
+ }
+ }
+ }
+ return dist;
+}
+
+/**
+ * calculates the jaccard distance and evaluates a pair of coarse signatures as good
+ * @return 0 if pair is bad, 1 otherwise
+ */
+static int get_jaccarddist(SignatureContext *sc, CoarseSignature *first, CoarseSignature *second)
+{
+ int jaccarddist, i, composdist = 0, cwthcount = 0;
+ for (i = 0; i < 5; i++) {
+ if ((jaccarddist = intersection_word(first->data[i], second->data[i])) > 0) {
+ jaccarddist /= union_word(first->data[i], second->data[i]);
+ }
+ if (jaccarddist >= sc->thworddist) {
+ if (++cwthcount > 2) {
+ /* more than half (5/2) of distances are too wide */
+ return 0;
+ }
+ }
+ composdist += jaccarddist;
+ if (composdist > sc->thcomposdist) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/**
+ * step through the coarsesignatures as long as a good candidate is found
+ * @return 0 if no candidate is found, 1 otherwise
+ */
+static int find_next_coarsecandidate(SignatureContext *sc, CoarseSignature *secondstart, CoarseSignature **first, CoarseSignature **second, int start)
+{
+ /* go one coarsesignature foreword */
+ if (!start) {
+ if ((*second)->next) {
+ *second = (*second)->next;
+ } else if ((*first)->next) {
+ *second = secondstart;
+ *first = (*first)->next;
+ } else {
+ return 0;
+ }
+ }
+
+ while (1) {
+ if (get_jaccarddist(sc, *first, *second))
+ return 1;
+
+ /* next signature */
+ if ((*second)->next) {
+ *second = (*second)->next;
+ } else if ((*first)->next) {
+ *second = secondstart;
+ *first = (*first)->next;
+ } else {
+ return 0;
+ }
+ }
+}
+
+/**
+ * compares framesignatures and sorts out signatures with a l1 distance above a given threshold.
+ * Then tries to find out offset and differences between framerates with a hough transformation
+ */
+static MatchingInfo* get_matching_parameters(AVFilterContext *ctx, SignatureContext *sc, FineSignature *first, FineSignature *second)
+{
+ FineSignature *f, *s;
+ size_t i, j, k, l, hmax = 0, score;
+ int framerate, offset, l1dist;
+ double m;
+ MatchingInfo *cands = NULL, *c = NULL;
+
+ struct {
+ uint8_t size;
+ unsigned int dist;
+ FineSignature *a;
+ uint8_t b_pos[COARSE_SIZE];
+ FineSignature *b[COARSE_SIZE];
+ } pairs[COARSE_SIZE];
+
+ typedef struct {
+ int dist;
+ size_t score;
+ FineSignature *a;
+ FineSignature *b;
+ } hspace_elem;
+
+ /* houghspace */
+ hspace_elem** hspace = av_malloc_array(MAX_FRAMERATE, sizeof(hspace_elem *));
+
+ /* initialize houghspace */
+ for (i = 0; i < MAX_FRAMERATE; i++) {
+ hspace[i] = av_malloc_array(2 * HOUGH_MAX_OFFSET + 1, sizeof(hspace_elem));
+ for (j = 0; j < HOUGH_MAX_OFFSET; j++) {
+ hspace[i][j].score = 0;
+ hspace[i][j].dist = 99999;
+ }
+ }
+
+ /* l1 distances */
+ for (i = 0, f = first; i < COARSE_SIZE && f->next; i++, f = f->next) {
+ pairs[i].size = 0;
+ pairs[i].dist = 99999;
+ pairs[i].a = f;
+ for (j = 0, s = second; j < COARSE_SIZE && s->next; j++, s = s->next) {
+ /* l1 distance of finesignature */
+ l1dist = get_l1dist(ctx, sc, f->framesig, s->framesig);
+ if (l1dist < sc->thl1) {
+ if (l1dist < pairs[i].dist) {
+ pairs[i].size = 1;
+ pairs[i].dist = l1dist;
+ pairs[i].b_pos[0] = j;
+ pairs[i].b[0] = s;
+ } else if (l1dist == pairs[i].dist) {
+ pairs[i].b[pairs[i].size] = s;
+ pairs[i].b_pos[pairs[i].size] = j;
+ pairs[i].size++;
+ }
+ }
+ }
+ }
+ /* last incomplete coarsesignature */
+ if (f->next == NULL) {
+ for (; i < COARSE_SIZE; i++) {
+ pairs[i].size = 0;
+ pairs[i].dist = 99999;
+ }
+ }
+
+ /* hough transformation */
+ for (i = 0; i < COARSE_SIZE; i++) {
+ for (j = 0; j < pairs[i].size; j++) {
+ for (k = i + 1; k < COARSE_SIZE; k++) {
+ for (l = 0; l < pairs[k].size; l++) {
+ if (pairs[i].b[j] != pairs[k].b[l]) {
+ /* linear regression */
+ m = (pairs[k].b_pos[l]-pairs[i].b_pos[j]) / (k-i); /* good value between 0.0 - 2.0 */
+ framerate = (int) m*30 + 0.5; /* round up to 0 - 60 */
+ if (framerate>0 && framerate <= MAX_FRAMERATE) {
+ offset = pairs[i].b_pos[j] - ((int) m*i + 0.5); /* only second part has to be rounded up */
+ if (offset > -HOUGH_MAX_OFFSET && offset < HOUGH_MAX_OFFSET) {
+ if (pairs[i].dist < pairs[k].dist) {
+ if (pairs[i].dist < hspace[framerate-1][offset+HOUGH_MAX_OFFSET].dist) {
+ hspace[framerate-1][offset+HOUGH_MAX_OFFSET].dist = pairs[i].dist;
+ hspace[framerate-1][offset+HOUGH_MAX_OFFSET].a = pairs[i].a;
+ hspace[framerate-1][offset+HOUGH_MAX_OFFSET].b = pairs[i].b[j];
+ }
+ } else {
+ if (pairs[k].dist < hspace[framerate-1][offset+HOUGH_MAX_OFFSET].dist) {
+ hspace[framerate-1][offset+HOUGH_MAX_OFFSET].dist = pairs[k].dist;
+ hspace[framerate-1][offset+HOUGH_MAX_OFFSET].a = pairs[k].a;
+ hspace[framerate-1][offset+HOUGH_MAX_OFFSET].b = pairs[k].b[l];
+ }
+ }
+
+ score = hspace[framerate-1][offset+HOUGH_MAX_OFFSET].score + 1;
+ if (score > hmax )
+ hmax = score;
+ hspace[framerate-1][offset+HOUGH_MAX_OFFSET].score = score;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (hmax > 0) {
+ hmax = (int) (0.7*hmax);
+ for (i = 0; i < MAX_FRAMERATE; i++) {
+ for (j = 0; j < HOUGH_MAX_OFFSET; j++) {
+ if (hmax < hspace[i][j].score) {
+ if (c == NULL) {
+ c = av_malloc(sizeof(MatchingInfo));
+ if (!c)
+ av_log(ctx, AV_LOG_FATAL, "Could not allocate memory");
+ cands = c;
+ } else {
+ c->next = av_malloc(sizeof(MatchingInfo));
+ if (!c->next)
+ av_log(ctx, AV_LOG_FATAL, "Could not allocate memory");
+ c = c->next;
+ }
+ c->framerateratio = (i+1.0) / 30;
+ c->score = hspace[i][j].score;
+ c->offset = j-90;
+ c->first = hspace[i][j].a;
+ c->second = hspace[i][j].b;
+ c->next = NULL;
+
+ /* not used */
+ c->meandist = 0;
+ c->matchframes = 0;
+ c->whole = 0;
+ }
+ }
+ }
+ }
+ for (i = 0; i < MAX_FRAMERATE; i++) {
+ av_freep(&hspace[i]);
+ }
+ av_freep(&hspace);
+ return cands;
+}
+
+static int iterate_frame(double frr, FineSignature **a, FineSignature **b, int fcount, int *bcount, int dir)
+{
+ int step;
+
+ /* between 1 and 2, because frr is between 1 and 2 */
+ step = ((int) 0.5 + fcount * frr) /* current frame */
+ -((int) 0.5 + (fcount-1) * frr);/* last frame */
+
+ if (dir == DIR_NEXT) {
+ if (frr >= 1.0) {
+ if ((*a)->next) {
+ *a = (*a)->next;
+ } else {
+ return DIR_NEXT_END;
+ }
+
+ if (step == 1) {
+ if ((*b)->next) {
+ *b = (*b)->next;
+ (*bcount)++;
+ } else {
+ return DIR_NEXT_END;
+ }
+ } else {
+ if ((*b)->next && (*b)->next->next) {
+ *b = (*b)->next->next;
+ (*bcount)++;
+ } else {
+ return DIR_NEXT_END;
+ }
+ }
+ } else {
+ if ((*b)->next) {
+ *b = (*b)->next;
+ (*bcount)++;
+ } else {
+ return DIR_NEXT_END;
+ }
+
+ if (step == 1) {
+ if ((*a)->next) {
+ *a = (*a)->next;
+ } else {
+ return DIR_NEXT_END;
+ }
+ } else {
+ if ((*a)->next && (*a)->next->next) {
+ *a = (*a)->next->next;
+ } else {
+ return DIR_NEXT_END;
+ }
+ }
+ }
+ return DIR_NEXT;
+ } else {
+ if (frr >= 1.0) {
+ if ((*a)->prev) {
+ *a = (*a)->prev;
+ } else {
+ return DIR_PREV_END;
+ }
+
+ if (step == 1) {
+ if ((*b)->prev) {
+ *b = (*b)->prev;
+ (*bcount)++;
+ } else {
+ return DIR_PREV_END;
+ }
+ } else {
+ if ((*b)->prev && (*b)->prev->prev) {
+ *b = (*b)->prev->prev;
+ (*bcount)++;
+ } else {
+ return DIR_PREV_END;
+ }
+ }
+ } else {
+ if ((*b)->prev) {
+ *b = (*b)->prev;
+ (*bcount)++;
+ } else {
+ return DIR_PREV_END;
+ }
+
+ if (step == 1) {
+ if ((*a)->prev) {
+ *a = (*a)->prev;
+ } else {
+ return DIR_PREV_END;
+ }
+ } else {
+ if ((*a)->prev && (*a)->prev->prev) {
+ *a = (*a)->prev->prev;
+ } else {
+ return DIR_PREV_END;
+ }
+ }
+ }
+ return DIR_PREV;
+ }
+}
+
+static MatchingInfo evaluate_parameters(AVFilterContext *ctx, SignatureContext *sc, MatchingInfo *infos, MatchingInfo bestmatch, int mode)
+{
+ int dist, distsum = 0, bcount = 1, dir = DIR_NEXT;
+ int fcount = 0, goodfcount = 0, gooda = 0, goodb = 0;
+ double meandist, minmeandist = bestmatch.meandist;
+ int tolerancecount = 0;
+ FineSignature *a, *b, *aprev, *bprev;
+ int status = STATUS_NULL;
+
+ for (; infos != NULL; infos = infos->next) {
+ a = infos->first;
+ b = infos->second;
+ while (1) {
+ dist = get_l1dist(ctx, sc, a->framesig, b->framesig);
+
+ if (dist > sc->thl1) {
+ if (a->confidence >= 1 || b->confidence >= 1) {
+ /* bad frame (because high different information) */
+ tolerancecount++;
+ }
+
+ if (tolerancecount > 2) {
+ a = aprev;
+ b = bprev;
+ if (dir == DIR_NEXT) {
+ /* turn around */
+ a = infos->first;
+ b = infos->second;
+ dir = DIR_PREV;
+ } else {
+ break;
+ }
+ }
+ } else {
+ /* good frame */
+ distsum += dist;
+ goodfcount++;
+ tolerancecount=0;
+
+ aprev = a;
+ bprev = b;
+
+ if (a->confidence < 1) gooda++;
+ if (b->confidence < 1) goodb++;
+ }
+
+ fcount++;
+
+ dir = iterate_frame(infos->framerateratio, &a, &b, fcount, &bcount, dir);
+ if (dir == DIR_NEXT_END) {
+ status = STATUS_END_REACHED;
+ a = infos->first;
+ b = infos->second;
+ dir = iterate_frame(infos->framerateratio, &a, &b, fcount, &bcount, DIR_PREV);
+ }
+
+ if (dir == DIR_PREV_END) {
+ status |= STATUS_BEGIN_REACHED;
+ break;
+ }
+
+ if (sc->thdi != 0 && bcount >= sc->thdi) {
+ break; /* enough frames found */
+ }
+ }
+
+ if (bcount < sc->thdi)
+ continue; /* matching sequence is too short */
+ if ((double) goodfcount / (double) fcount < sc->thit)
+ continue;
+ if ((double) goodfcount*0.5 < FFMAX(gooda, goodb))
+ continue;
+
+ meandist = (double) goodfcount / (double) distsum;
+
+ if (meandist < minmeandist ||
+ status == STATUS_END_REACHED | STATUS_BEGIN_REACHED ||
+ mode == MODE_FAST){
+ minmeandist = meandist;
+ /* bestcandidate in this iteration */
+ bestmatch.meandist = meandist;
+ bestmatch.matchframes = bcount;
+ bestmatch.framerateratio = infos->framerateratio;
+ bestmatch.score = infos->score;
+ bestmatch.offset = infos->offset;
+ bestmatch.first = infos->first;
+ bestmatch.second = infos->second;
+ bestmatch.whole = 0; /* will be set to true later */
+ bestmatch.next = NULL;
+ }
+
+ /* whole sequence is automatically best match */
+ if (status == (STATUS_END_REACHED | STATUS_BEGIN_REACHED)) {
+ bestmatch.whole = 1;
+ break;
+ }
+
+ /* first matching sequence is enough, finding the best one is not necessary */
+ if (mode == MODE_FAST) {
+ break;
+ }
+ }
+ return bestmatch;
+}
+
+static void sll_free(MatchingInfo *sll)
+{
+ void *tmp;
+ while (sll) {
+ tmp = sll;
+ sll = sll->next;
+ av_freep(&tmp);
+ }
+}
+
+static MatchingInfo lookup_signatures(AVFilterContext *ctx, SignatureContext *sc, StreamContext *first, StreamContext *second, int mode)
+{
+ CoarseSignature *cs, *cs2;
+ MatchingInfo *infos;
+ MatchingInfo bestmatch;
+ MatchingInfo *i;
+
+ cs = first->coarsesiglist;
+ cs2 = second->coarsesiglist;
+
+ /* score of bestmatch is 0, if no match is found */
+ bestmatch.score = 0;
+ bestmatch.meandist = 99999;
+ bestmatch.whole = 0;
+
+ fill_l1distlut(sc->l1distlut);
+
+ /* stage 1: coarsesignature matching */
+ if (find_next_coarsecandidate(sc, second->coarsesiglist, &cs, &cs2, 1) == 0)
+ return bestmatch; /* no candidate found */
+ do {
+ av_log(ctx, AV_LOG_DEBUG, "Stage 1: got coarsesignature pair. indices of first frame: %d and %d\n", cs->first->index, cs2->first->index);
+ /* stage 2: l1-distance and hough-transform */
+ av_log(ctx, AV_LOG_DEBUG, "Stage 2: calculate matching parameters\n");
+ infos = get_matching_parameters(ctx, sc, cs->first, cs2->first);
+ if (av_log_get_level() == AV_LOG_DEBUG) {
+ for (i = infos; i != NULL; i = i->next) {
+ av_log(ctx, AV_LOG_DEBUG, "Stage 2: matching pair at %d and %d, ratio %f, offset %d\n", i->first->index, i->second->index, i->framerateratio, i->offset);
+ }
+ }
+ /* stage 3: evaluation */
+ av_log(ctx, AV_LOG_DEBUG, "Stage 3: evaluate\n");
+ if (infos) {
+ bestmatch = evaluate_parameters(ctx, sc, infos, bestmatch, mode);
+ av_log(ctx, AV_LOG_DEBUG, "Stage 3: best matching pair at %d and %d, ratio %f, offset %d, score %d, %d frames matching\n", bestmatch.first->index, bestmatch.second->index, bestmatch.framerateratio, bestmatch.offset, bestmatch.score, bestmatch.matchframes);
+ sll_free(infos);
+ }
+ } while (find_next_coarsecandidate(sc, second->coarsesiglist, &cs, &cs2, 0) && !bestmatch.whole);
+ return bestmatch;
+
+}
diff --git a/libavfilter/split.c b/libavfilter/split.c
index 41395e7f48..b85a221353 100644
--- a/libavfilter/split.c
+++ b/libavfilter/split.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -30,8 +30,12 @@
#include "libavutil/mem.h"
#include "libavutil/opt.h"
+#define FF_INTERNAL_FIELDS 1
+#include "framequeue.h"
+
#include "avfilter.h"
#include "audio.h"
+#include "formats.h"
#include "internal.h"
#include "video.h"
@@ -52,6 +56,8 @@ static av_cold int split_init(AVFilterContext *ctx)
snprintf(name, sizeof(name), "output%d", i);
pad.type = ctx->filter->inputs[0].type;
pad.name = av_strdup(name);
+ if (!pad.name)
+ return AVERROR(ENOMEM);
ff_insert_outpad(ctx, i, &pad);
}
@@ -70,10 +76,14 @@ static av_cold void split_uninit(AVFilterContext *ctx)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
- int i, ret = 0;
+ int i, ret = AVERROR_EOF;
for (i = 0; i < ctx->nb_outputs; i++) {
- AVFrame *buf_out = av_frame_clone(frame);
+ AVFrame *buf_out;
+
+ if (ctx->outputs[i]->status_in)
+ continue;
+ buf_out = av_frame_clone(frame);
if (!buf_out) {
ret = AVERROR(ENOMEM);
break;
@@ -88,58 +98,44 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
}
#define OFFSET(x) offsetof(SplitContext, x)
-#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS (AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM)
static const AVOption options[] = {
- { "outputs", "Number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, FLAGS },
- { NULL },
+ { "outputs", "set number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, FLAGS },
+ { NULL }
};
-static const AVClass split_class = {
- .class_name = "split",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+#define split_options options
+AVFILTER_DEFINE_CLASS(split);
-static const AVClass asplit_class = {
- .class_name = "asplit",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+#define asplit_options options
+AVFILTER_DEFINE_CLASS(asplit);
static const AVFilterPad avfilter_vf_split_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
AVFilter ff_vf_split = {
- .name = "split",
+ .name = "split",
.description = NULL_IF_CONFIG_SMALL("Pass on the input to N video outputs."),
-
- .priv_size = sizeof(SplitContext),
- .priv_class = &split_class,
-
- .init = split_init,
- .uninit = split_uninit,
-
- .inputs = avfilter_vf_split_inputs,
- .outputs = NULL,
-
- .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+ .priv_size = sizeof(SplitContext),
+ .priv_class = &split_class,
+ .init = split_init,
+ .uninit = split_uninit,
+ .inputs = avfilter_vf_split_inputs,
+ .outputs = NULL,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
static const AVFilterPad avfilter_af_asplit_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -147,15 +143,11 @@ static const AVFilterPad avfilter_af_asplit_inputs[] = {
AVFilter ff_af_asplit = {
.name = "asplit",
.description = NULL_IF_CONFIG_SMALL("Pass on the audio input to N audio outputs."),
-
- .priv_size = sizeof(SplitContext),
- .priv_class = &asplit_class,
-
- .init = split_init,
- .uninit = split_uninit,
-
- .inputs = avfilter_af_asplit_inputs,
- .outputs = NULL,
-
- .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+ .priv_size = sizeof(SplitContext),
+ .priv_class = &asplit_class,
+ .init = split_init,
+ .uninit = split_uninit,
+ .inputs = avfilter_af_asplit_inputs,
+ .outputs = NULL,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
diff --git a/libavfilter/src_movie.c b/libavfilter/src_movie.c
new file mode 100644
index 0000000000..23dcb7ba2e
--- /dev/null
+++ b/libavfilter/src_movie.c
@@ -0,0 +1,691 @@
+/*
+ * Copyright (c) 2010 Stefano Sabatini
+ * Copyright (c) 2008 Victor Paesa
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * movie video source
+ *
+ * @todo use direct rendering (no allocation of a new frame)
+ * @todo support a PTS correction mechanism
+ */
+
+#include <float.h>
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/avstring.h"
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/internal.h"
+#include "libavutil/timestamp.h"
+#include "libavformat/avformat.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct MovieStream {
+ AVStream *st;
+ AVCodecContext *codec_ctx;
+ int done;
+ int64_t discontinuity_threshold;
+ int64_t last_pts;
+} MovieStream;
+
+typedef struct MovieContext {
+ /* common A/V fields */
+ const AVClass *class;
+ int64_t seek_point; ///< seekpoint in microseconds
+ double seek_point_d;
+ char *format_name;
+ char *file_name;
+ char *stream_specs; /**< user-provided list of streams, separated by + */
+ int stream_index; /**< for compatibility */
+ int loop_count;
+ int64_t discontinuity_threshold;
+ int64_t ts_offset;
+
+ AVFormatContext *format_ctx;
+ int eof;
+ AVPacket pkt, pkt0;
+
+ int max_stream_index; /**< max stream # actually used for output */
+ MovieStream *st; /**< array of all streams, one per output */
+ int *out_index; /**< stream number -> output number map, or -1 */
+} MovieContext;
+
+#define OFFSET(x) offsetof(MovieContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption movie_options[]= {
+ { "filename", NULL, OFFSET(file_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "format_name", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "f", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "stream_index", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
+ { "si", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
+ { "seek_point", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, (INT64_MAX-1) / 1000000, FLAGS },
+ { "sp", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, (INT64_MAX-1) / 1000000, FLAGS },
+ { "streams", "set streams", OFFSET(stream_specs), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MAX, CHAR_MAX, FLAGS },
+ { "s", "set streams", OFFSET(stream_specs), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MAX, CHAR_MAX, FLAGS },
+ { "loop", "set loop count", OFFSET(loop_count), AV_OPT_TYPE_INT, {.i64 = 1}, 0, INT_MAX, FLAGS },
+ { "discontinuity", "set discontinuity threshold", OFFSET(discontinuity_threshold), AV_OPT_TYPE_DURATION, {.i64 = 0}, 0, INT64_MAX, FLAGS },
+ { NULL },
+};
+
+static int movie_config_output_props(AVFilterLink *outlink);
+static int movie_request_frame(AVFilterLink *outlink);
+
+static AVStream *find_stream(void *log, AVFormatContext *avf, const char *spec)
+{
+ int i, ret, already = 0, stream_id = -1;
+ char type_char[2], dummy;
+ AVStream *found = NULL;
+ enum AVMediaType type;
+
+ ret = sscanf(spec, "d%1[av]%d%c", type_char, &stream_id, &dummy);
+ if (ret >= 1 && ret <= 2) {
+ type = type_char[0] == 'v' ? AVMEDIA_TYPE_VIDEO : AVMEDIA_TYPE_AUDIO;
+ ret = av_find_best_stream(avf, type, stream_id, -1, NULL, 0);
+ if (ret < 0) {
+ av_log(log, AV_LOG_ERROR, "No %s stream with index '%d' found\n",
+ av_get_media_type_string(type), stream_id);
+ return NULL;
+ }
+ return avf->streams[ret];
+ }
+ for (i = 0; i < avf->nb_streams; i++) {
+ ret = avformat_match_stream_specifier(avf, avf->streams[i], spec);
+ if (ret < 0) {
+ av_log(log, AV_LOG_ERROR,
+ "Invalid stream specifier \"%s\"\n", spec);
+ return NULL;
+ }
+ if (!ret)
+ continue;
+ if (avf->streams[i]->discard != AVDISCARD_ALL) {
+ already++;
+ continue;
+ }
+ if (found) {
+ av_log(log, AV_LOG_WARNING,
+ "Ambiguous stream specifier \"%s\", using #%d\n", spec, i);
+ break;
+ }
+ found = avf->streams[i];
+ }
+ if (!found) {
+ av_log(log, AV_LOG_WARNING, "Stream specifier \"%s\" %s\n", spec,
+ already ? "matched only already used streams" :
+ "did not match any stream");
+ return NULL;
+ }
+ if (found->codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
+ found->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
+ av_log(log, AV_LOG_ERROR, "Stream specifier \"%s\" matched a %s stream,"
+ "currently unsupported by libavfilter\n", spec,
+ av_get_media_type_string(found->codecpar->codec_type));
+ return NULL;
+ }
+ return found;
+}
+
+static int open_stream(void *log, MovieStream *st)
+{
+ AVCodec *codec;
+ int ret;
+
+ codec = avcodec_find_decoder(st->st->codecpar->codec_id);
+ if (!codec) {
+ av_log(log, AV_LOG_ERROR, "Failed to find any codec\n");
+ return AVERROR(EINVAL);
+ }
+
+ st->codec_ctx = avcodec_alloc_context3(codec);
+ if (!st->codec_ctx)
+ return AVERROR(ENOMEM);
+
+ ret = avcodec_parameters_to_context(st->codec_ctx, st->st->codecpar);
+ if (ret < 0)
+ return ret;
+
+ st->codec_ctx->refcounted_frames = 1;
+
+ if ((ret = avcodec_open2(st->codec_ctx, codec, NULL)) < 0) {
+ av_log(log, AV_LOG_ERROR, "Failed to open codec\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int guess_channel_layout(MovieStream *st, int st_index, void *log_ctx)
+{
+ AVCodecParameters *dec_par = st->st->codecpar;
+ char buf[256];
+ int64_t chl = av_get_default_channel_layout(dec_par->channels);
+
+ if (!chl) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Channel layout is not set in stream %d, and could not "
+ "be guessed from the number of channels (%d)\n",
+ st_index, dec_par->channels);
+ return AVERROR(EINVAL);
+ }
+
+ av_get_channel_layout_string(buf, sizeof(buf), dec_par->channels, chl);
+ av_log(log_ctx, AV_LOG_WARNING,
+ "Channel layout is not set in output stream %d, "
+ "guessed channel layout is '%s'\n",
+ st_index, buf);
+ dec_par->channel_layout = chl;
+ return 0;
+}
+
+static av_cold int movie_common_init(AVFilterContext *ctx)
+{
+ MovieContext *movie = ctx->priv;
+ AVInputFormat *iformat = NULL;
+ int64_t timestamp;
+ int nb_streams = 1, ret, i;
+ char default_streams[16], *stream_specs, *spec, *cursor;
+ char name[16];
+ AVStream *st;
+
+ if (!movie->file_name) {
+ av_log(ctx, AV_LOG_ERROR, "No filename provided!\n");
+ return AVERROR(EINVAL);
+ }
+
+ movie->seek_point = movie->seek_point_d * 1000000 + 0.5;
+
+ stream_specs = movie->stream_specs;
+ if (!stream_specs) {
+ snprintf(default_streams, sizeof(default_streams), "d%c%d",
+ !strcmp(ctx->filter->name, "amovie") ? 'a' : 'v',
+ movie->stream_index);
+ stream_specs = default_streams;
+ }
+ for (cursor = stream_specs; *cursor; cursor++)
+ if (*cursor == '+')
+ nb_streams++;
+
+ if (movie->loop_count != 1 && nb_streams != 1) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Loop with several streams is currently unsupported\n");
+ return AVERROR_PATCHWELCOME;
+ }
+
+ av_register_all();
+
+ // Try to find the movie format (container)
+ iformat = movie->format_name ? av_find_input_format(movie->format_name) : NULL;
+
+ movie->format_ctx = NULL;
+ if ((ret = avformat_open_input(&movie->format_ctx, movie->file_name, iformat, NULL)) < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Failed to avformat_open_input '%s'\n", movie->file_name);
+ return ret;
+ }
+ if ((ret = avformat_find_stream_info(movie->format_ctx, NULL)) < 0)
+ av_log(ctx, AV_LOG_WARNING, "Failed to find stream info\n");
+
+ // if seeking requested, we execute it
+ if (movie->seek_point > 0) {
+ timestamp = movie->seek_point;
+ // add the stream start time, should it exist
+ if (movie->format_ctx->start_time != AV_NOPTS_VALUE) {
+ if (timestamp > 0 && movie->format_ctx->start_time > INT64_MAX - timestamp) {
+ av_log(ctx, AV_LOG_ERROR,
+ "%s: seek value overflow with start_time:%"PRId64" seek_point:%"PRId64"\n",
+ movie->file_name, movie->format_ctx->start_time, movie->seek_point);
+ return AVERROR(EINVAL);
+ }
+ timestamp += movie->format_ctx->start_time;
+ }
+ if ((ret = av_seek_frame(movie->format_ctx, -1, timestamp, AVSEEK_FLAG_BACKWARD)) < 0) {
+ av_log(ctx, AV_LOG_ERROR, "%s: could not seek to position %"PRId64"\n",
+ movie->file_name, timestamp);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < movie->format_ctx->nb_streams; i++)
+ movie->format_ctx->streams[i]->discard = AVDISCARD_ALL;
+
+ movie->st = av_calloc(nb_streams, sizeof(*movie->st));
+ if (!movie->st)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < nb_streams; i++) {
+ spec = av_strtok(stream_specs, "+", &cursor);
+ if (!spec)
+ return AVERROR_BUG;
+ stream_specs = NULL; /* for next strtok */
+ st = find_stream(ctx, movie->format_ctx, spec);
+ if (!st)
+ return AVERROR(EINVAL);
+ st->discard = AVDISCARD_DEFAULT;
+ movie->st[i].st = st;
+ movie->max_stream_index = FFMAX(movie->max_stream_index, st->index);
+ movie->st[i].discontinuity_threshold =
+ av_rescale_q(movie->discontinuity_threshold, AV_TIME_BASE_Q, st->time_base);
+ }
+ if (av_strtok(NULL, "+", &cursor))
+ return AVERROR_BUG;
+
+ movie->out_index = av_calloc(movie->max_stream_index + 1,
+ sizeof(*movie->out_index));
+ if (!movie->out_index)
+ return AVERROR(ENOMEM);
+ for (i = 0; i <= movie->max_stream_index; i++)
+ movie->out_index[i] = -1;
+ for (i = 0; i < nb_streams; i++) {
+ AVFilterPad pad = { 0 };
+ movie->out_index[movie->st[i].st->index] = i;
+ snprintf(name, sizeof(name), "out%d", i);
+ pad.type = movie->st[i].st->codecpar->codec_type;
+ pad.name = av_strdup(name);
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ pad.config_props = movie_config_output_props;
+ pad.request_frame = movie_request_frame;
+ ff_insert_outpad(ctx, i, &pad);
+ if ( movie->st[i].st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
+ !movie->st[i].st->codecpar->channel_layout) {
+ ret = guess_channel_layout(&movie->st[i], i, ctx);
+ if (ret < 0)
+ return ret;
+ }
+ ret = open_stream(ctx, &movie->st[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ av_log(ctx, AV_LOG_VERBOSE, "seek_point:%"PRIi64" format_name:%s file_name:%s stream_index:%d\n",
+ movie->seek_point, movie->format_name, movie->file_name,
+ movie->stream_index);
+
+ return 0;
+}
+
+static av_cold void movie_uninit(AVFilterContext *ctx)
+{
+ MovieContext *movie = ctx->priv;
+ int i;
+
+ for (i = 0; i < ctx->nb_outputs; i++) {
+ av_freep(&ctx->output_pads[i].name);
+ if (movie->st[i].st)
+ avcodec_free_context(&movie->st[i].codec_ctx);
+ }
+ av_freep(&movie->st);
+ av_freep(&movie->out_index);
+ if (movie->format_ctx)
+ avformat_close_input(&movie->format_ctx);
+}
+
+static int movie_query_formats(AVFilterContext *ctx)
+{
+ MovieContext *movie = ctx->priv;
+ int list[] = { 0, -1 };
+ int64_t list64[] = { 0, -1 };
+ int i, ret;
+
+ for (i = 0; i < ctx->nb_outputs; i++) {
+ MovieStream *st = &movie->st[i];
+ AVCodecParameters *c = st->st->codecpar;
+ AVFilterLink *outlink = ctx->outputs[i];
+
+ switch (c->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ list[0] = c->format;
+ if ((ret = ff_formats_ref(ff_make_format_list(list), &outlink->in_formats)) < 0)
+ return ret;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ list[0] = c->format;
+ if ((ret = ff_formats_ref(ff_make_format_list(list), &outlink->in_formats)) < 0)
+ return ret;
+ list[0] = c->sample_rate;
+ if ((ret = ff_formats_ref(ff_make_format_list(list), &outlink->in_samplerates)) < 0)
+ return ret;
+ list64[0] = c->channel_layout;
+ if ((ret = ff_channel_layouts_ref(avfilter_make_format64_list(list64),
+ &outlink->in_channel_layouts)) < 0)
+ return ret;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int movie_config_output_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ MovieContext *movie = ctx->priv;
+ unsigned out_id = FF_OUTLINK_IDX(outlink);
+ MovieStream *st = &movie->st[out_id];
+ AVCodecParameters *c = st->st->codecpar;
+
+ outlink->time_base = st->st->time_base;
+
+ switch (c->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ outlink->w = c->width;
+ outlink->h = c->height;
+ outlink->frame_rate = st->st->r_frame_rate;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ break;
+ }
+
+ return 0;
+}
+
+static char *describe_frame_to_str(char *dst, size_t dst_size,
+ AVFrame *frame, enum AVMediaType frame_type,
+ AVFilterLink *link)
+{
+ switch (frame_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ snprintf(dst, dst_size,
+ "video pts:%s time:%s size:%dx%d aspect:%d/%d",
+ av_ts2str(frame->pts), av_ts2timestr(frame->pts, &link->time_base),
+ frame->width, frame->height,
+ frame->sample_aspect_ratio.num,
+ frame->sample_aspect_ratio.den);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ snprintf(dst, dst_size,
+ "audio pts:%s time:%s samples:%d",
+ av_ts2str(frame->pts), av_ts2timestr(frame->pts, &link->time_base),
+ frame->nb_samples);
+ break;
+ default:
+ snprintf(dst, dst_size, "%s BUG", av_get_media_type_string(frame_type));
+ break;
+ }
+ return dst;
+}
+
+static int rewind_file(AVFilterContext *ctx)
+{
+ MovieContext *movie = ctx->priv;
+ int64_t timestamp = movie->seek_point;
+ int ret, i;
+
+ if (movie->format_ctx->start_time != AV_NOPTS_VALUE)
+ timestamp += movie->format_ctx->start_time;
+ ret = av_seek_frame(movie->format_ctx, -1, timestamp, AVSEEK_FLAG_BACKWARD);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to loop: %s\n", av_err2str(ret));
+ movie->loop_count = 1; /* do not try again */
+ return ret;
+ }
+
+ for (i = 0; i < ctx->nb_outputs; i++) {
+ avcodec_flush_buffers(movie->st[i].codec_ctx);
+ movie->st[i].done = 0;
+ }
+ movie->eof = 0;
+ return 0;
+}
+
+/**
+ * Try to push a frame to the requested output.
+ *
+ * @param ctx filter context
+ * @param out_id number of output where a frame is wanted;
+ * if the frame is read from file, used to set the return value;
+ * if the codec is being flushed, flush the corresponding stream
+ * @return 1 if a frame was pushed on the requested output,
+ * 0 if another attempt is possible,
+ * <0 AVERROR code
+ */
+static int movie_push_frame(AVFilterContext *ctx, unsigned out_id)
+{
+ MovieContext *movie = ctx->priv;
+ AVPacket *pkt = &movie->pkt;
+ enum AVMediaType frame_type;
+ MovieStream *st;
+ int ret, got_frame = 0, pkt_out_id;
+ AVFilterLink *outlink;
+ AVFrame *frame;
+
+ if (!pkt->size) {
+ if (movie->eof) {
+ if (movie->st[out_id].done) {
+ if (movie->loop_count != 1) {
+ ret = rewind_file(ctx);
+ if (ret < 0)
+ return ret;
+ movie->loop_count -= movie->loop_count > 1;
+ av_log(ctx, AV_LOG_VERBOSE, "Stream finished, looping.\n");
+ return 0; /* retry */
+ }
+ return AVERROR_EOF;
+ }
+ pkt->stream_index = movie->st[out_id].st->index;
+ /* packet is already ready for flushing */
+ } else {
+ ret = av_read_frame(movie->format_ctx, &movie->pkt0);
+ if (ret < 0) {
+ av_init_packet(&movie->pkt0); /* ready for flushing */
+ *pkt = movie->pkt0;
+ if (ret == AVERROR_EOF) {
+ movie->eof = 1;
+ return 0; /* start flushing */
+ }
+ return ret;
+ }
+ *pkt = movie->pkt0;
+ }
+ }
+
+ pkt_out_id = pkt->stream_index > movie->max_stream_index ? -1 :
+ movie->out_index[pkt->stream_index];
+ if (pkt_out_id < 0) {
+ av_packet_unref(&movie->pkt0);
+ pkt->size = 0; /* ready for next run */
+ pkt->data = NULL;
+ return 0;
+ }
+ st = &movie->st[pkt_out_id];
+ outlink = ctx->outputs[pkt_out_id];
+
+ frame = av_frame_alloc();
+ if (!frame)
+ return AVERROR(ENOMEM);
+
+ frame_type = st->st->codecpar->codec_type;
+ switch (frame_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ ret = avcodec_decode_video2(st->codec_ctx, frame, &got_frame, pkt);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ ret = avcodec_decode_audio4(st->codec_ctx, frame, &got_frame, pkt);
+ break;
+ default:
+ ret = AVERROR(ENOSYS);
+ break;
+ }
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_WARNING, "Decode error: %s\n", av_err2str(ret));
+ av_frame_free(&frame);
+ av_packet_unref(&movie->pkt0);
+ movie->pkt.size = 0;
+ movie->pkt.data = NULL;
+ return 0;
+ }
+ if (!ret || st->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
+ ret = pkt->size;
+
+ pkt->data += ret;
+ pkt->size -= ret;
+ if (pkt->size <= 0) {
+ av_packet_unref(&movie->pkt0);
+ pkt->size = 0; /* ready for next run */
+ pkt->data = NULL;
+ }
+ if (!got_frame) {
+ if (!ret)
+ st->done = 1;
+ av_frame_free(&frame);
+ return 0;
+ }
+
+ frame->pts = av_frame_get_best_effort_timestamp(frame);
+ if (frame->pts != AV_NOPTS_VALUE) {
+ if (movie->ts_offset)
+ frame->pts += av_rescale_q_rnd(movie->ts_offset, AV_TIME_BASE_Q, outlink->time_base, AV_ROUND_UP);
+ if (st->discontinuity_threshold) {
+ if (st->last_pts != AV_NOPTS_VALUE) {
+ int64_t diff = frame->pts - st->last_pts;
+ if (diff < 0 || diff > st->discontinuity_threshold) {
+ av_log(ctx, AV_LOG_VERBOSE, "Discontinuity in stream:%d diff:%"PRId64"\n", pkt_out_id, diff);
+ movie->ts_offset += av_rescale_q_rnd(-diff, outlink->time_base, AV_TIME_BASE_Q, AV_ROUND_UP);
+ frame->pts -= diff;
+ }
+ }
+ }
+ st->last_pts = frame->pts;
+ }
+ ff_dlog(ctx, "movie_push_frame(): file:'%s' %s\n", movie->file_name,
+ describe_frame_to_str((char[1024]){0}, 1024, frame, frame_type, outlink));
+
+ if (st->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
+ if (frame->format != outlink->format) {
+ av_log(ctx, AV_LOG_ERROR, "Format changed %s -> %s, discarding frame\n",
+ av_get_pix_fmt_name(outlink->format),
+ av_get_pix_fmt_name(frame->format)
+ );
+ av_frame_free(&frame);
+ return 0;
+ }
+ }
+ ret = ff_filter_frame(outlink, frame);
+
+ if (ret < 0)
+ return ret;
+ return pkt_out_id == out_id;
+}
+
+static int movie_request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ unsigned out_id = FF_OUTLINK_IDX(outlink);
+ int ret;
+
+ while (1) {
+ ret = movie_push_frame(ctx, out_id);
+ if (ret)
+ return FFMIN(ret, 0);
+ }
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ MovieContext *movie = ctx->priv;
+ int ret = AVERROR(ENOSYS);
+
+ if (!strcmp(cmd, "seek")) {
+ int idx, flags, i;
+ int64_t ts;
+ char tail[2];
+
+ if (sscanf(args, "%i|%"SCNi64"|%i %1s", &idx, &ts, &flags, tail) != 3)
+ return AVERROR(EINVAL);
+
+ ret = av_seek_frame(movie->format_ctx, idx, ts, flags);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ctx->nb_outputs; i++) {
+ avcodec_flush_buffers(movie->st[i].codec_ctx);
+ movie->st[i].done = 0;
+ }
+ return ret;
+ } else if (!strcmp(cmd, "get_duration")) {
+ int print_len;
+ char tail[2];
+
+ if (!res || res_len <= 0)
+ return AVERROR(EINVAL);
+
+ if (args && sscanf(args, "%1s", tail) == 1)
+ return AVERROR(EINVAL);
+
+ print_len = snprintf(res, res_len, "%"PRId64, movie->format_ctx->duration);
+ if (print_len < 0 || print_len >= res_len)
+ return AVERROR(EINVAL);
+
+ return 0;
+ }
+
+ return ret;
+}
+
+#if CONFIG_MOVIE_FILTER
+
+AVFILTER_DEFINE_CLASS(movie);
+
+AVFilter ff_avsrc_movie = {
+ .name = "movie",
+ .description = NULL_IF_CONFIG_SMALL("Read from a movie source."),
+ .priv_size = sizeof(MovieContext),
+ .priv_class = &movie_class,
+ .init = movie_common_init,
+ .uninit = movie_uninit,
+ .query_formats = movie_query_formats,
+
+ .inputs = NULL,
+ .outputs = NULL,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+ .process_command = process_command
+};
+
+#endif /* CONFIG_MOVIE_FILTER */
+
+#if CONFIG_AMOVIE_FILTER
+
+#define amovie_options movie_options
+AVFILTER_DEFINE_CLASS(amovie);
+
+AVFilter ff_avsrc_amovie = {
+ .name = "amovie",
+ .description = NULL_IF_CONFIG_SMALL("Read audio from a movie source."),
+ .priv_size = sizeof(MovieContext),
+ .init = movie_common_init,
+ .uninit = movie_uninit,
+ .query_formats = movie_query_formats,
+
+ .inputs = NULL,
+ .outputs = NULL,
+ .priv_class = &amovie_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+ .process_command = process_command,
+};
+
+#endif /* CONFIG_AMOVIE_FILTER */
diff --git a/libavfilter/ssim.h b/libavfilter/ssim.h
new file mode 100644
index 0000000000..ac0395a22a
--- /dev/null
+++ b/libavfilter/ssim.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2015 Ronald S. Bultje <rsbultje@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_SSIM_H
+#define AVFILTER_SSIM_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+typedef struct SSIMDSPContext {
+ void (*ssim_4x4_line)(const uint8_t *buf, ptrdiff_t buf_stride,
+ const uint8_t *ref, ptrdiff_t ref_stride,
+ int (*sums)[4], int w);
+ float (*ssim_end_line)(const int (*sum0)[4], const int (*sum1)[4], int w);
+} SSIMDSPContext;
+
+void ff_ssim_init_x86(SSIMDSPContext *dsp);
+
+#endif /* AVFILTER_SSIM_H */
diff --git a/libavfilter/stereo3d.h b/libavfilter/stereo3d.h
new file mode 100644
index 0000000000..54611d1286
--- /dev/null
+++ b/libavfilter/stereo3d.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_STEREO3D_H
+#define AVFILTER_STEREO3D_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+typedef struct Stereo3DDSPContext {
+ void (*anaglyph)(uint8_t *dst, uint8_t *lsrc, uint8_t *rsrc,
+ ptrdiff_t dst_linesize, ptrdiff_t l_linesize, ptrdiff_t r_linesize,
+ int width, int height,
+ const int *ana_matrix_r, const int *ana_matrix_g, const int *ana_matrix_b);
+} Stereo3DDSPContext;
+
+void ff_stereo3d_init_x86(Stereo3DDSPContext *dsp);
+
+#endif /* AVFILTER_STEREO3D_H */
diff --git a/libavfilter/tests/.gitignore b/libavfilter/tests/.gitignore
index a55b3866b3..65ef86f2e5 100644
--- a/libavfilter/tests/.gitignore
+++ b/libavfilter/tests/.gitignore
@@ -1 +1,4 @@
+/drawutils
/filtfmts
+/formats
+/integral
diff --git a/libavfilter/tests/drawutils.c b/libavfilter/tests/drawutils.c
new file mode 100644
index 0000000000..7fe53ddf31
--- /dev/null
+++ b/libavfilter/tests/drawutils.c
@@ -0,0 +1,56 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+
+#include "libavutil/pixdesc.h"
+#include "libavfilter/drawutils.h"
+
+int main(void)
+{
+ enum AVPixelFormat f;
+ const AVPixFmtDescriptor *desc;
+ FFDrawContext draw;
+ FFDrawColor color;
+ int r, i;
+
+ for (f = 0; av_pix_fmt_desc_get(f); f++) {
+ desc = av_pix_fmt_desc_get(f);
+ if (!desc->name)
+ continue;
+ printf("Testing %s...%*s", desc->name,
+ (int)(16 - strlen(desc->name)), "");
+ r = ff_draw_init(&draw, f, 0);
+ if (r < 0) {
+ char buf[128];
+ av_strerror(r, buf, sizeof(buf));
+ printf("no: %s\n", buf);
+ continue;
+ }
+ ff_draw_color(&draw, &color, (uint8_t[]) { 1, 0, 0, 1 });
+ for (i = 0; i < sizeof(color); i++)
+ if (((uint8_t *)&color)[i] != 128)
+ break;
+ if (i == sizeof(color)) {
+ printf("fallback color\n");
+ continue;
+ }
+ printf("ok\n");
+ }
+ return 0;
+}
diff --git a/libavfilter/tests/filtfmts.c b/libavfilter/tests/filtfmts.c
index cc04654222..199d74d7a9 100644
--- a/libavfilter/tests/filtfmts.c
+++ b/libavfilter/tests/filtfmts.c
@@ -1,30 +1,75 @@
/*
* Copyright (c) 2009 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
+#include "libavutil/channel_layout.h"
#include "libavutil/mem.h"
#include "libavutil/pixdesc.h"
+#include "libavutil/samplefmt.h"
+
+#define FF_INTERNAL_FIELDS 1
+#include "libavfilter/framequeue.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/formats.h"
+#include "libavfilter/internal.h"
+
+static void print_formats(AVFilterContext *filter_ctx)
+{
+ int i, j;
+
+#define PRINT_FMTS(inout, outin, INOUT) \
+ for (i = 0; i < filter_ctx->nb_##inout##puts; i++) { \
+ if (filter_ctx->inout##puts[i]->type == AVMEDIA_TYPE_VIDEO) { \
+ AVFilterFormats *fmts = \
+ filter_ctx->inout##puts[i]->outin##_formats; \
+ for (j = 0; j < fmts->nb_formats; j++) \
+ if(av_get_pix_fmt_name(fmts->formats[j])) \
+ printf(#INOUT "PUT[%d] %s: fmt:%s\n", \
+ i, avfilter_pad_get_name(filter_ctx->inout##put_pads, i), \
+ av_get_pix_fmt_name(fmts->formats[j])); \
+ } else if (filter_ctx->inout##puts[i]->type == AVMEDIA_TYPE_AUDIO) { \
+ AVFilterFormats *fmts; \
+ AVFilterChannelLayouts *layouts; \
+ \
+ fmts = filter_ctx->inout##puts[i]->outin##_formats; \
+ for (j = 0; j < fmts->nb_formats; j++) \
+ printf(#INOUT "PUT[%d] %s: fmt:%s\n", \
+ i, avfilter_pad_get_name(filter_ctx->inout##put_pads, i), \
+ av_get_sample_fmt_name(fmts->formats[j])); \
+ \
+ layouts = filter_ctx->inout##puts[i]->outin##_channel_layouts; \
+ for (j = 0; j < layouts->nb_channel_layouts; j++) { \
+ char buf[256]; \
+ av_get_channel_layout_string(buf, sizeof(buf), -1, \
+ layouts->channel_layouts[j]); \
+ printf(#INOUT "PUT[%d] %s: chlayout:%s\n", \
+ i, avfilter_pad_get_name(filter_ctx->inout##put_pads, i), buf); \
+ } \
+ } \
+ } \
+
+ PRINT_FMTS(in, out, IN);
+ PRINT_FMTS(out, in, OUT);
+}
int main(int argc, char **argv)
{
@@ -33,17 +78,18 @@ int main(int argc, char **argv)
AVFilterGraph *graph_ctx;
const char *filter_name;
const char *filter_args = NULL;
- int i, j, ret = 0;
+ int i;
+ int ret = 0;
av_log_set_level(AV_LOG_DEBUG);
- if (!argv[1]) {
+ if (argc < 2) {
fprintf(stderr, "Missing filter name as argument\n");
return 1;
}
filter_name = argv[1];
- if (argv[2])
+ if (argc > 2)
filter_args = argv[2];
/* allocate graph */
@@ -79,7 +125,7 @@ int main(int argc, char **argv)
ret = 1;
goto fail;
}
- link->type = avfilter_pad_get_type(filter_ctx->filter->inputs, i);
+ link->type = avfilter_pad_get_type(filter_ctx->input_pads, i);
filter_ctx->inputs[i] = link;
}
for (i = 0; i < filter_ctx->nb_outputs; i++) {
@@ -89,7 +135,7 @@ int main(int argc, char **argv)
ret = 1;
goto fail;
}
- link->type = avfilter_pad_get_type(filter_ctx->filter->outputs, i);
+ link->type = avfilter_pad_get_type(filter_ctx->output_pads, i);
filter_ctx->outputs[i] = link;
}
@@ -98,23 +144,7 @@ int main(int argc, char **argv)
else
ff_default_query_formats(filter_ctx);
- /* print the supported formats in input */
- for (i = 0; i < filter_ctx->nb_inputs; i++) {
- AVFilterFormats *fmts = filter_ctx->inputs[i]->out_formats;
- for (j = 0; j < fmts->nb_formats; j++)
- printf("INPUT[%d] %s: %s\n",
- i, avfilter_pad_get_name(filter_ctx->filter->inputs, i),
- av_get_pix_fmt_name(fmts->formats[j]));
- }
-
- /* print the supported formats in output */
- for (i = 0; i < filter_ctx->nb_outputs; i++) {
- AVFilterFormats *fmts = filter_ctx->outputs[i]->in_formats;
- for (j = 0; j < fmts->nb_formats; j++)
- printf("OUTPUT[%d] %s: %s\n",
- i, avfilter_pad_get_name(filter_ctx->filter->outputs, i),
- av_get_pix_fmt_name(fmts->formats[j]));
- }
+ print_formats(filter_ctx);
fail:
avfilter_free(filter_ctx);
diff --git a/libavfilter/tests/formats.c b/libavfilter/tests/formats.c
new file mode 100644
index 0000000000..5450742b68
--- /dev/null
+++ b/libavfilter/tests/formats.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2007 Bobby Bingham
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavfilter/formats.c"
+
+#undef printf
+
+int main(void)
+{
+ const int64_t *cl;
+ char buf[512];
+ int i;
+ const char *teststrings[] ={
+ "blah",
+ "1",
+ "2",
+ "-1",
+ "60",
+ "65",
+ "1c",
+ "2c",
+ "-1c",
+ "60c",
+ "65c",
+ "2C",
+ "60C",
+ "65C",
+ "5.1",
+ "stereo",
+ "1+1+1+1",
+ "1c+1c+1c+1c",
+ "2c+1c",
+ "0x3",
+ };
+
+ for (cl = avfilter_all_channel_layouts; *cl != -1; cl++) {
+ av_get_channel_layout_string(buf, sizeof(buf), -1, *cl);
+ printf("%s\n", buf);
+ }
+
+ for ( i = 0; i<FF_ARRAY_ELEMS(teststrings); i++) {
+ int64_t layout = -1;
+ int count = -1;
+ int ret;
+ ret = ff_parse_channel_layout(&layout, &count, teststrings[i], NULL);
+
+ printf ("%d = ff_parse_channel_layout(%016"PRIX64", %2d, %s);\n", ret ? -1 : 0, layout, count, teststrings[i]);
+ }
+
+ return 0;
+}
diff --git a/libavfilter/tests/integral.c b/libavfilter/tests/integral.c
new file mode 100644
index 0000000000..049fefae83
--- /dev/null
+++ b/libavfilter/tests/integral.c
@@ -0,0 +1,90 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavfilter/vf_nlmeans.c"
+
+static void display_integral(const uint32_t *ii, int w, int h, int lz_32)
+{
+ int x, y;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++)
+ printf(" %7x", ii[y*lz_32 + x]);
+ printf("\n");
+ }
+ printf("---------------\n");
+}
+
+int main(void)
+{
+ int ret = 0, xoff, yoff;
+
+ // arbitrary test source of size 6x4 and linesize=8
+ const int w = 6, h = 5, lz = 8;
+ static const uint8_t src[] = {
+ 0xb0, 0x71, 0xfb, 0xd8, 0x01, 0xd9, /***/ 0x01, 0x02,
+ 0x51, 0x8e, 0x41, 0x0f, 0x84, 0x58, /***/ 0x03, 0x04,
+ 0xc7, 0x8d, 0x07, 0x70, 0x5c, 0x47, /***/ 0x05, 0x06,
+ 0x09, 0x4e, 0xfc, 0x74, 0x8f, 0x9a, /***/ 0x07, 0x08,
+ 0x60, 0x8e, 0x20, 0xaa, 0x95, 0x7d, /***/ 0x09, 0x0a,
+ };
+
+ const int e = 3;
+ const int ii_w = w+e*2, ii_h = h+e*2;
+
+ // align to 4 the linesize, "+1" is for the space of the left 0-column
+ const int ii_lz_32 = ((ii_w + 1) + 3) & ~3;
+
+ // "+1" is for the space of the top 0-line
+ uint32_t *ii = av_mallocz_array(ii_h + 1, ii_lz_32 * sizeof(*ii));
+ uint32_t *ii2 = av_mallocz_array(ii_h + 1, ii_lz_32 * sizeof(*ii2));
+
+ uint32_t *ii_start = ii + ii_lz_32 + 1; // skip top 0-line and left 0-column
+ uint32_t *ii_start2 = ii2 + ii_lz_32 + 1; // skip top 0-line and left 0-column
+
+ if (!ii || !ii2)
+ return -1;
+
+ for (yoff = -e; yoff <= e; yoff++) {
+ for (xoff = -e; xoff <= e; xoff++) {
+ printf("xoff=%d yoff=%d\n", xoff, yoff);
+
+ compute_ssd_integral_image(ii_start, ii_lz_32,
+ src, lz, xoff, yoff, e, w, h);
+ display_integral(ii_start, ii_w, ii_h, ii_lz_32);
+
+ compute_unsafe_ssd_integral_image(ii_start2, ii_lz_32,
+ 0, 0,
+ src, lz,
+ xoff, yoff, e, w, h,
+ ii_w, ii_h);
+ display_integral(ii_start2, ii_w, ii_h, ii_lz_32);
+
+ if (memcmp(ii, ii2, (ii_h+1) * ii_lz_32 * sizeof(*ii))) {
+ printf("Integral mismatch\n");
+ ret = 1;
+ goto end;
+ }
+ }
+ }
+
+end:
+ av_freep(&ii);
+ av_freep(&ii2);
+ return ret;
+}
diff --git a/libavfilter/thread.h b/libavfilter/thread.h
index 42b7cafcf3..c709f17a33 100644
--- a/libavfilter/thread.h
+++ b/libavfilter/thread.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavfilter/tinterlace.h b/libavfilter/tinterlace.h
new file mode 100644
index 0000000000..3b703e7b21
--- /dev/null
+++ b/libavfilter/tinterlace.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2010 Baptiste Coudurier
+ * Copyright (c) 2003 Michael Zucchi <notzed@ximian.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * temporal field interlace filter, ported from MPlayer/libmpcodecs
+ */
+#ifndef AVFILTER_TINTERLACE_H
+#define AVFILTER_TINTERLACE_H
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+
+enum TInterlaceMode {
+ MODE_MERGE = 0,
+ MODE_DROP_EVEN,
+ MODE_DROP_ODD,
+ MODE_PAD,
+ MODE_INTERLEAVE_TOP,
+ MODE_INTERLEAVE_BOTTOM,
+ MODE_INTERLACEX2,
+ MODE_MERGEX2,
+ MODE_NB,
+};
+
+typedef struct {
+ const AVClass *class;
+ int mode; ///< TInterlaceMode, interlace mode selected
+ AVRational preout_time_base;
+ int flags; ///< flags affecting interlacing algorithm
+ int frame; ///< number of the output frame
+ int vsub; ///< chroma vertical subsampling
+ AVFrame *cur;
+ AVFrame *next;
+ uint8_t *black_data[4]; ///< buffer used to fill padded lines
+ int black_linesize[4];
+ void (*lowpass_line)(uint8_t *dstp, ptrdiff_t width, const uint8_t *srcp,
+ const uint8_t *srcp_above, const uint8_t *srcp_below);
+} TInterlaceContext;
+
+void ff_tinterlace_init_x86(TInterlaceContext *interlace);
+
+#endif /* AVFILTER_TINTERLACE_H */
diff --git a/libavfilter/transform.c b/libavfilter/transform.c
new file mode 100644
index 0000000000..f92fc4d42f
--- /dev/null
+++ b/libavfilter/transform.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2010 Georg Martius <georg.martius@web.de>
+ * Copyright (C) 2010 Daniel G. Taylor <dan@programmer-art.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * transform input video
+ */
+
+#include "libavutil/common.h"
+#include "libavutil/avassert.h"
+
+#include "transform.h"
+
+#define INTERPOLATE_METHOD(name) \
+ static uint8_t name(float x, float y, const uint8_t *src, \
+ int width, int height, int stride, uint8_t def)
+
+#define PIXEL(img, x, y, w, h, stride, def) \
+ ((x) < 0 || (y) < 0) ? (def) : \
+ (((x) >= (w) || (y) >= (h)) ? (def) : \
+ img[(x) + (y) * (stride)])
+
+/**
+ * Nearest neighbor interpolation
+ */
+INTERPOLATE_METHOD(interpolate_nearest)
+{
+ return PIXEL(src, (int)(x + 0.5), (int)(y + 0.5), width, height, stride, def);
+}
+
+/**
+ * Bilinear interpolation
+ */
+INTERPOLATE_METHOD(interpolate_bilinear)
+{
+ int x_c, x_f, y_c, y_f;
+ int v1, v2, v3, v4;
+
+ if (x < -1 || x > width || y < -1 || y > height) {
+ return def;
+ } else {
+ x_f = (int)x;
+ x_c = x_f + 1;
+
+ y_f = (int)y;
+ y_c = y_f + 1;
+
+ v1 = PIXEL(src, x_c, y_c, width, height, stride, def);
+ v2 = PIXEL(src, x_c, y_f, width, height, stride, def);
+ v3 = PIXEL(src, x_f, y_c, width, height, stride, def);
+ v4 = PIXEL(src, x_f, y_f, width, height, stride, def);
+
+ return (v1*(x - x_f)*(y - y_f) + v2*((x - x_f)*(y_c - y)) +
+ v3*(x_c - x)*(y - y_f) + v4*((x_c - x)*(y_c - y)));
+ }
+}
+
+/**
+ * Biquadratic interpolation
+ */
+INTERPOLATE_METHOD(interpolate_biquadratic)
+{
+ int x_c, x_f, y_c, y_f;
+ uint8_t v1, v2, v3, v4;
+ float f1, f2, f3, f4;
+
+ if (x < - 1 || x > width || y < -1 || y > height)
+ return def;
+ else {
+ x_f = (int)x;
+ x_c = x_f + 1;
+ y_f = (int)y;
+ y_c = y_f + 1;
+
+ v1 = PIXEL(src, x_c, y_c, width, height, stride, def);
+ v2 = PIXEL(src, x_c, y_f, width, height, stride, def);
+ v3 = PIXEL(src, x_f, y_c, width, height, stride, def);
+ v4 = PIXEL(src, x_f, y_f, width, height, stride, def);
+
+ f1 = 1 - sqrt((x_c - x) * (y_c - y));
+ f2 = 1 - sqrt((x_c - x) * (y - y_f));
+ f3 = 1 - sqrt((x - x_f) * (y_c - y));
+ f4 = 1 - sqrt((x - x_f) * (y - y_f));
+ return (v1 * f1 + v2 * f2 + v3 * f3 + v4 * f4) / (f1 + f2 + f3 + f4);
+ }
+}
+
+void avfilter_get_matrix(float x_shift, float y_shift, float angle, float zoom, float *matrix) {
+ matrix[0] = zoom * cos(angle);
+ matrix[1] = -sin(angle);
+ matrix[2] = x_shift;
+ matrix[3] = -matrix[1];
+ matrix[4] = matrix[0];
+ matrix[5] = y_shift;
+ matrix[6] = 0;
+ matrix[7] = 0;
+ matrix[8] = 1;
+}
+
+void avfilter_add_matrix(const float *m1, const float *m2, float *result)
+{
+ int i;
+ for (i = 0; i < 9; i++)
+ result[i] = m1[i] + m2[i];
+}
+
+void avfilter_sub_matrix(const float *m1, const float *m2, float *result)
+{
+ int i;
+ for (i = 0; i < 9; i++)
+ result[i] = m1[i] - m2[i];
+}
+
+void avfilter_mul_matrix(const float *m1, float scalar, float *result)
+{
+ int i;
+ for (i = 0; i < 9; i++)
+ result[i] = m1[i] * scalar;
+}
+
+int avfilter_transform(const uint8_t *src, uint8_t *dst,
+ int src_stride, int dst_stride,
+ int width, int height, const float *matrix,
+ enum InterpolateMethod interpolate,
+ enum FillMethod fill)
+{
+ int x, y;
+ float x_s, y_s;
+ uint8_t def = 0;
+ uint8_t (*func)(float, float, const uint8_t *, int, int, int, uint8_t) = NULL;
+
+ switch(interpolate) {
+ case INTERPOLATE_NEAREST:
+ func = interpolate_nearest;
+ break;
+ case INTERPOLATE_BILINEAR:
+ func = interpolate_bilinear;
+ break;
+ case INTERPOLATE_BIQUADRATIC:
+ func = interpolate_biquadratic;
+ break;
+ default:
+ return AVERROR(EINVAL);
+ }
+
+ for (y = 0; y < height; y++) {
+ for(x = 0; x < width; x++) {
+ x_s = x * matrix[0] + y * matrix[1] + matrix[2];
+ y_s = x * matrix[3] + y * matrix[4] + matrix[5];
+
+ switch(fill) {
+ case FILL_ORIGINAL:
+ def = src[y * src_stride + x];
+ break;
+ case FILL_CLAMP:
+ y_s = av_clipf(y_s, 0, height - 1);
+ x_s = av_clipf(x_s, 0, width - 1);
+ def = src[(int)y_s * src_stride + (int)x_s];
+ break;
+ case FILL_MIRROR:
+ x_s = avpriv_mirror(x_s, width-1);
+ y_s = avpriv_mirror(y_s, height-1);
+
+ av_assert2(x_s >= 0 && y_s >= 0);
+ av_assert2(x_s < width && y_s < height);
+ def = src[(int)y_s * src_stride + (int)x_s];
+ }
+
+ dst[y * dst_stride + x] = func(x_s, y_s, src, width, height, src_stride, def);
+ }
+ }
+ return 0;
+}
diff --git a/libavfilter/transform.h b/libavfilter/transform.h
new file mode 100644
index 0000000000..07436bfccb
--- /dev/null
+++ b/libavfilter/transform.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2010 Georg Martius <georg.martius@web.de>
+ * Copyright (C) 2010 Daniel G. Taylor <dan@programmer-art.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_TRANSFORM_H
+#define AVFILTER_TRANSFORM_H
+
+#include <stdint.h>
+
+/**
+ * @file
+ * transform input video
+ *
+ * All matrices are defined as a single 9-item block of contiguous memory. For
+ * example, the identity matrix would be:
+ *
+ * float *matrix = {1, 0, 0,
+ * 0, 1, 0,
+ * 0, 0, 1};
+ */
+
+enum InterpolateMethod {
+ INTERPOLATE_NEAREST, //< Nearest-neighbor (fast)
+ INTERPOLATE_BILINEAR, //< Bilinear
+ INTERPOLATE_BIQUADRATIC, //< Biquadratic (best)
+ INTERPOLATE_COUNT, //< Number of interpolation methods
+};
+
+// Shortcuts for the fastest and best interpolation methods
+#define INTERPOLATE_DEFAULT INTERPOLATE_BILINEAR
+#define INTERPOLATE_FAST INTERPOLATE_NEAREST
+#define INTERPOLATE_BEST INTERPOLATE_BIQUADRATIC
+
+enum FillMethod {
+ FILL_BLANK, //< Fill zeroes at blank locations
+ FILL_ORIGINAL, //< Original image at blank locations
+ FILL_CLAMP, //< Extruded edge value at blank locations
+ FILL_MIRROR, //< Mirrored edge at blank locations
+ FILL_COUNT, //< Number of edge fill methods
+};
+
+// Shortcuts for fill methods
+#define FILL_DEFAULT FILL_ORIGINAL
+
+/**
+ * Get an affine transformation matrix from a given translation, rotation, and
+ * zoom factor. The matrix will look like:
+ *
+ * [ zoom * cos(angle), -sin(angle), x_shift,
+ * sin(angle), zoom * cos(angle), y_shift,
+ * 0, 0, 1 ]
+ *
+ * @param x_shift horizontal translation
+ * @param y_shift vertical translation
+ * @param angle rotation in radians
+ * @param zoom scale percent (1.0 = 100%)
+ * @param matrix 9-item affine transformation matrix
+ */
+void avfilter_get_matrix(float x_shift, float y_shift, float angle, float zoom, float *matrix);
+
+/**
+ * Add two matrices together. result = m1 + m2.
+ *
+ * @param m1 9-item transformation matrix
+ * @param m2 9-item transformation matrix
+ * @param result 9-item transformation matrix
+ */
+void avfilter_add_matrix(const float *m1, const float *m2, float *result);
+
+/**
+ * Subtract one matrix from another. result = m1 - m2.
+ *
+ * @param m1 9-item transformation matrix
+ * @param m2 9-item transformation matrix
+ * @param result 9-item transformation matrix
+ */
+void avfilter_sub_matrix(const float *m1, const float *m2, float *result);
+
+/**
+ * Multiply a matrix by a scalar value. result = m1 * scalar.
+ *
+ * @param m1 9-item transformation matrix
+ * @param scalar a number
+ * @param result 9-item transformation matrix
+ */
+void avfilter_mul_matrix(const float *m1, float scalar, float *result);
+
+/**
+ * Do an affine transformation with the given interpolation method. This
+ * multiplies each vector [x,y,1] by the matrix and then interpolates to
+ * get the final value.
+ *
+ * @param src source image
+ * @param dst destination image
+ * @param src_stride source image line size in bytes
+ * @param dst_stride destination image line size in bytes
+ * @param width image width in pixels
+ * @param height image height in pixels
+ * @param matrix 9-item affine transformation matrix
+ * @param interpolate pixel interpolation method
+ * @param fill edge fill method
+ * @return negative on error
+ */
+int avfilter_transform(const uint8_t *src, uint8_t *dst,
+ int src_stride, int dst_stride,
+ int width, int height, const float *matrix,
+ enum InterpolateMethod interpolate,
+ enum FillMethod fill);
+
+#endif /* AVFILTER_TRANSFORM_H */
diff --git a/libavfilter/trim.c b/libavfilter/trim.c
index 2b57540460..1dbbabbb93 100644
--- a/libavfilter/trim.c
+++ b/libavfilter/trim.c
@@ -1,23 +1,21 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <float.h>
-#include <math.h>
#include <stdint.h>
#include "config.h"
@@ -40,8 +38,8 @@ typedef struct TrimContext {
/*
* AVOptions
*/
- double duration;
- double start_time, end_time;
+ int64_t duration;
+ int64_t start_time, end_time;
int64_t start_frame, end_frame;
/*
* in the link timebase for video,
@@ -70,10 +68,9 @@ typedef struct TrimContext {
int64_t next_pts;
int eof;
- int got_output;
} TrimContext;
-static int init(AVFilterContext *ctx)
+static av_cold int init(AVFilterContext *ctx)
{
TrimContext *s = ctx->priv;
@@ -89,52 +86,38 @@ static int config_input(AVFilterLink *inlink)
AVRational tb = (inlink->type == AVMEDIA_TYPE_VIDEO) ?
inlink->time_base : (AVRational){ 1, inlink->sample_rate };
- if (s->start_time != DBL_MAX) {
- int64_t start_pts = lrintf(s->start_time / av_q2d(tb));
+ if (s->start_time != INT64_MAX) {
+ int64_t start_pts = av_rescale_q(s->start_time, AV_TIME_BASE_Q, tb);
if (s->start_pts == AV_NOPTS_VALUE || start_pts < s->start_pts)
s->start_pts = start_pts;
}
- if (s->end_time != DBL_MAX) {
- int64_t end_pts = lrintf(s->end_time / av_q2d(tb));
+ if (s->end_time != INT64_MAX) {
+ int64_t end_pts = av_rescale_q(s->end_time, AV_TIME_BASE_Q, tb);
if (s->end_pts == AV_NOPTS_VALUE || end_pts > s->end_pts)
s->end_pts = end_pts;
}
if (s->duration)
- s->duration_tb = lrintf(s->duration / av_q2d(tb));
-
- return 0;
-}
-
-static int request_frame(AVFilterLink *outlink)
-{
- AVFilterContext *ctx = outlink->src;
- TrimContext *s = ctx->priv;
- int ret;
-
- s->got_output = 0;
- while (!s->got_output) {
- if (s->eof)
- return AVERROR_EOF;
-
- ret = ff_request_frame(ctx->inputs[0]);
- if (ret < 0)
- return ret;
- }
+ s->duration_tb = av_rescale_q(s->duration, AV_TIME_BASE_Q, tb);
return 0;
}
#define OFFSET(x) offsetof(TrimContext, x)
#define COMMON_OPTS \
- { "start", "Timestamp in seconds of the first frame that " \
- "should be passed", OFFSET(start_time), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
- { "end", "Timestamp in seconds of the first frame that " \
- "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
+ { "start", "Timestamp of the first frame that " \
+ "should be passed", OFFSET(start_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
+ { "starti", "Timestamp of the first frame that " \
+ "should be passed", OFFSET(start_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
+ { "end", "Timestamp of the first frame that " \
+ "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
+ { "endi", "Timestamp of the first frame that " \
+ "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
{ "start_pts", "Timestamp of the first frame that should be " \
" passed", OFFSET(start_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
{ "end_pts", "Timestamp of the first frame that should be " \
"dropped again", OFFSET(end_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
- { "duration", "Maximum duration of the output in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, DBL_MAX, FLAGS },
+ { "duration", "Maximum duration of the output", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, FLAGS }, \
+ { "durationi", "Maximum duration of the output", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, FLAGS },
#if CONFIG_TRIM_FILTER
@@ -178,12 +161,12 @@ static int trim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
if (drop) {
s->eof = 1;
+ ff_avfilter_link_set_out_status(inlink, AVERROR_EOF, AV_NOPTS_VALUE);
goto drop;
}
}
s->nb_frames++;
- s->got_output = 1;
return ff_filter_frame(ctx->outputs[0], frame);
@@ -193,23 +176,18 @@ drop:
return 0;
}
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption trim_options[] = {
COMMON_OPTS
{ "start_frame", "Number of the first frame that should be passed "
"to the output", OFFSET(start_frame), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
{ "end_frame", "Number of the first frame that should be dropped "
"again", OFFSET(end_frame), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
- { NULL },
+ { NULL }
};
#undef FLAGS
-static const AVClass trim_class = {
- .class_name = "trim",
- .item_name = av_default_item_name,
- .option = trim_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(trim);
static const AVFilterPad trim_inputs[] = {
{
@@ -223,9 +201,8 @@ static const AVFilterPad trim_inputs[] = {
static const AVFilterPad trim_outputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .request_frame = request_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
@@ -233,12 +210,9 @@ static const AVFilterPad trim_outputs[] = {
AVFilter ff_vf_trim = {
.name = "trim",
.description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
-
.init = init,
-
.priv_size = sizeof(TrimContext),
.priv_class = &trim_class,
-
.inputs = trim_inputs,
.outputs = trim_outputs,
};
@@ -249,7 +223,7 @@ static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
TrimContext *s = ctx->priv;
- int64_t start_sample, end_sample = frame->nb_samples;
+ int64_t start_sample, end_sample;
int64_t pts;
int drop;
@@ -318,6 +292,7 @@ static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
if (drop) {
s->eof = 1;
+ ff_avfilter_link_set_out_status(inlink, AVERROR_EOF, AV_NOPTS_VALUE);
goto drop;
}
}
@@ -325,7 +300,7 @@ static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
s->nb_samples += frame->nb_samples;
start_sample = FFMAX(0, start_sample);
end_sample = FFMIN(frame->nb_samples, end_sample);
- av_assert0(start_sample < end_sample);
+ av_assert0(start_sample < end_sample || (start_sample == end_sample && !frame->nb_samples));
if (start_sample) {
AVFrame *out = ff_get_audio_buffer(ctx->outputs[0], end_sample - start_sample);
@@ -336,7 +311,7 @@ static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
av_frame_copy_props(out, frame);
av_samples_copy(out->extended_data, frame->extended_data, 0, start_sample,
- out->nb_samples, av_get_channel_layout_nb_channels(frame->channel_layout),
+ out->nb_samples, inlink->channels,
frame->format);
if (out->pts != AV_NOPTS_VALUE)
out->pts += av_rescale_q(start_sample, (AVRational){ 1, out->sample_rate },
@@ -347,7 +322,6 @@ static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
} else
frame->nb_samples = end_sample;
- s->got_output = 1;
return ff_filter_frame(ctx->outputs[0], frame);
drop:
@@ -356,23 +330,18 @@ drop:
return 0;
}
-#define FLAGS AV_OPT_FLAG_AUDIO_PARAM
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption atrim_options[] = {
COMMON_OPTS
{ "start_sample", "Number of the first audio sample that should be "
"passed to the output", OFFSET(start_sample), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
{ "end_sample", "Number of the first audio sample that should be "
"dropped again", OFFSET(end_sample), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
- { NULL },
+ { NULL }
};
#undef FLAGS
-static const AVClass atrim_class = {
- .class_name = "atrim",
- .item_name = av_default_item_name,
- .option = atrim_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(atrim);
static const AVFilterPad atrim_inputs[] = {
{
@@ -386,9 +355,8 @@ static const AVFilterPad atrim_inputs[] = {
static const AVFilterPad atrim_outputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .request_frame = request_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
@@ -396,12 +364,9 @@ static const AVFilterPad atrim_outputs[] = {
AVFilter ff_af_atrim = {
.name = "atrim",
.description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
-
.init = init,
-
.priv_size = sizeof(TrimContext),
.priv_class = &atrim_class,
-
.inputs = atrim_inputs,
.outputs = atrim_outputs,
};
diff --git a/libavfilter/unsharp.h b/libavfilter/unsharp.h
new file mode 100644
index 0000000000..fc651c0654
--- /dev/null
+++ b/libavfilter/unsharp.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ * Copyright (C) 2013 Lenny Wang
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_UNSHARP_H
+#define AVFILTER_UNSHARP_H
+
+#include "config.h"
+#include "avfilter.h"
+#if CONFIG_OPENCL
+#include "libavutil/opencl.h"
+#endif
+
+#define MIN_MATRIX_SIZE 3
+#define MAX_MATRIX_SIZE 63
+
+#if CONFIG_OPENCL
+
+typedef struct {
+ cl_command_queue command_queue;
+ cl_program program;
+ cl_kernel kernel_default;
+ cl_kernel kernel_luma;
+ cl_kernel kernel_chroma;
+ cl_mem cl_luma_mask;
+ cl_mem cl_chroma_mask;
+ cl_mem cl_luma_mask_x;
+ cl_mem cl_chroma_mask_x;
+ cl_mem cl_luma_mask_y;
+ cl_mem cl_chroma_mask_y;
+ int in_plane_size[8];
+ int out_plane_size[8];
+ int plane_num;
+ cl_mem cl_inbuf;
+ size_t cl_inbuf_size;
+ cl_mem cl_outbuf;
+ size_t cl_outbuf_size;
+ int use_fast_kernels;
+} UnsharpOpenclContext;
+
+#endif
+
+typedef struct UnsharpFilterParam {
+ int msize_x; ///< matrix width
+ int msize_y; ///< matrix height
+ int amount; ///< effect amount
+ int steps_x; ///< horizontal step count
+ int steps_y; ///< vertical step count
+ int scalebits; ///< bits to shift pixel
+ int32_t halfscale; ///< amount to add to pixel
+ uint32_t *sc[MAX_MATRIX_SIZE - 1]; ///< finite state machine storage
+} UnsharpFilterParam;
+
+typedef struct UnsharpContext {
+ const AVClass *class;
+ int lmsize_x, lmsize_y, cmsize_x, cmsize_y;
+ float lamount, camount;
+ UnsharpFilterParam luma; ///< luma parameters (width, height, amount)
+ UnsharpFilterParam chroma; ///< chroma parameters (width, height, amount)
+ int hsub, vsub;
+ int opencl;
+#if CONFIG_OPENCL
+ UnsharpOpenclContext opencl_ctx;
+#endif
+ int (* apply_unsharp)(AVFilterContext *ctx, AVFrame *in, AVFrame *out);
+} UnsharpContext;
+
+#endif /* AVFILTER_UNSHARP_H */
diff --git a/libavfilter/unsharp_opencl.c b/libavfilter/unsharp_opencl.c
new file mode 100644
index 0000000000..d84920c590
--- /dev/null
+++ b/libavfilter/unsharp_opencl.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ * Copyright (C) 2013 Lenny Wang
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * unsharp input video
+ */
+
+#include "unsharp_opencl.h"
+#include "libavutil/common.h"
+#include "libavutil/opencl_internal.h"
+
+#define PLANE_NUM 3
+#define ROUND_TO_16(a) (((((a) - 1)/16)+1)*16)
+
+static inline void add_mask_counter(uint32_t *dst, uint32_t *counter1, uint32_t *counter2, int len)
+{
+ int i;
+ for (i = 0; i < len; i++) {
+ dst[i] = counter1[i] + counter2[i];
+ }
+}
+
+static int compute_mask(int step, uint32_t *mask)
+{
+ int i, z, ret = 0;
+ int counter_size = sizeof(uint32_t) * (2 * step + 1);
+ uint32_t *temp1_counter, *temp2_counter, **counter;
+ temp1_counter = av_mallocz(counter_size);
+ if (!temp1_counter) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ temp2_counter = av_mallocz(counter_size);
+ if (!temp2_counter) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ counter = av_mallocz_array(2 * step + 1, sizeof(uint32_t *));
+ if (!counter) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ for (i = 0; i < 2 * step + 1; i++) {
+ counter[i] = av_mallocz(counter_size);
+ if (!counter[i]) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ }
+ for (i = 0; i < 2 * step + 1; i++) {
+ memset(temp1_counter, 0, counter_size);
+ temp1_counter[i] = 1;
+ for (z = 0; z < step * 2; z += 2) {
+ add_mask_counter(temp2_counter, counter[z], temp1_counter, step * 2);
+ memcpy(counter[z], temp1_counter, counter_size);
+ add_mask_counter(temp1_counter, counter[z + 1], temp2_counter, step * 2);
+ memcpy(counter[z + 1], temp2_counter, counter_size);
+ }
+ }
+ memcpy(mask, temp1_counter, counter_size);
+end:
+ av_freep(&temp1_counter);
+ av_freep(&temp2_counter);
+ for (i = 0; i < 2 * step + 1; i++) {
+ av_freep(&counter[i]);
+ }
+ av_freep(&counter);
+ return ret;
+}
+
+static int copy_separable_masks(cl_mem cl_mask_x, cl_mem cl_mask_y, int step_x, int step_y)
+{
+ int ret = 0;
+ uint32_t *mask_x, *mask_y;
+ size_t size_mask_x = sizeof(uint32_t) * (2 * step_x + 1);
+ size_t size_mask_y = sizeof(uint32_t) * (2 * step_y + 1);
+ mask_x = av_mallocz_array(2 * step_x + 1, sizeof(uint32_t));
+ if (!mask_x) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ mask_y = av_mallocz_array(2 * step_y + 1, sizeof(uint32_t));
+ if (!mask_y) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ ret = compute_mask(step_x, mask_x);
+ if (ret < 0)
+ goto end;
+ ret = compute_mask(step_y, mask_y);
+ if (ret < 0)
+ goto end;
+
+ ret = av_opencl_buffer_write(cl_mask_x, (uint8_t *)mask_x, size_mask_x);
+ ret = av_opencl_buffer_write(cl_mask_y, (uint8_t *)mask_y, size_mask_y);
+end:
+ av_freep(&mask_x);
+ av_freep(&mask_y);
+
+ return ret;
+}
+
+static int generate_mask(AVFilterContext *ctx)
+{
+ cl_mem masks[4];
+ cl_mem mask_matrix[2];
+ int i, ret = 0, step_x[2], step_y[2];
+
+ UnsharpContext *unsharp = ctx->priv;
+ mask_matrix[0] = unsharp->opencl_ctx.cl_luma_mask;
+ mask_matrix[1] = unsharp->opencl_ctx.cl_chroma_mask;
+ masks[0] = unsharp->opencl_ctx.cl_luma_mask_x;
+ masks[1] = unsharp->opencl_ctx.cl_luma_mask_y;
+ masks[2] = unsharp->opencl_ctx.cl_chroma_mask_x;
+ masks[3] = unsharp->opencl_ctx.cl_chroma_mask_y;
+ step_x[0] = unsharp->luma.steps_x;
+ step_x[1] = unsharp->chroma.steps_x;
+ step_y[0] = unsharp->luma.steps_y;
+ step_y[1] = unsharp->chroma.steps_y;
+
+ /* use default kernel if any matrix dim larger than 8 due to limited local mem size */
+ if (step_x[0]>8 || step_x[1]>8 || step_y[0]>8 || step_y[1]>8)
+ unsharp->opencl_ctx.use_fast_kernels = 0;
+ else
+ unsharp->opencl_ctx.use_fast_kernels = 1;
+
+ if (!masks[0] || !masks[1] || !masks[2] || !masks[3]) {
+ av_log(ctx, AV_LOG_ERROR, "Luma mask and chroma mask should not be NULL\n");
+ return AVERROR(EINVAL);
+ }
+ if (!mask_matrix[0] || !mask_matrix[1]) {
+ av_log(ctx, AV_LOG_ERROR, "Luma mask and chroma mask should not be NULL\n");
+ return AVERROR(EINVAL);
+ }
+ for (i = 0; i < 2; i++) {
+ ret = copy_separable_masks(masks[2*i], masks[2*i+1], step_x[i], step_y[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return ret;
+}
+
+int ff_opencl_apply_unsharp(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
+{
+ int ret;
+ AVFilterLink *link = ctx->inputs[0];
+ UnsharpContext *unsharp = ctx->priv;
+ cl_int status;
+ FFOpenclParam kernel1 = {0};
+ FFOpenclParam kernel2 = {0};
+ int width = link->w;
+ int height = link->h;
+ int cw = AV_CEIL_RSHIFT(link->w, unsharp->hsub);
+ int ch = AV_CEIL_RSHIFT(link->h, unsharp->vsub);
+ size_t globalWorkSize1d = width * height + 2 * ch * cw;
+ size_t globalWorkSize2dLuma[2];
+ size_t globalWorkSize2dChroma[2];
+ size_t localWorkSize2d[2] = {16, 16};
+
+ if (unsharp->opencl_ctx.use_fast_kernels) {
+ globalWorkSize2dLuma[0] = (size_t)ROUND_TO_16(width);
+ globalWorkSize2dLuma[1] = (size_t)ROUND_TO_16(height);
+ globalWorkSize2dChroma[0] = (size_t)ROUND_TO_16(cw);
+ globalWorkSize2dChroma[1] = (size_t)(2*ROUND_TO_16(ch));
+
+ kernel1.ctx = ctx;
+ kernel1.kernel = unsharp->opencl_ctx.kernel_luma;
+ ret = avpriv_opencl_set_parameter(&kernel1,
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_inbuf),
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_outbuf),
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_luma_mask_x),
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_luma_mask_y),
+ FF_OPENCL_PARAM_INFO(unsharp->luma.amount),
+ FF_OPENCL_PARAM_INFO(unsharp->luma.scalebits),
+ FF_OPENCL_PARAM_INFO(unsharp->luma.halfscale),
+ FF_OPENCL_PARAM_INFO(in->linesize[0]),
+ FF_OPENCL_PARAM_INFO(out->linesize[0]),
+ FF_OPENCL_PARAM_INFO(width),
+ FF_OPENCL_PARAM_INFO(height),
+ NULL);
+ if (ret < 0)
+ return ret;
+
+ kernel2.ctx = ctx;
+ kernel2.kernel = unsharp->opencl_ctx.kernel_chroma;
+ ret = avpriv_opencl_set_parameter(&kernel2,
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_inbuf),
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_outbuf),
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_chroma_mask_x),
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_chroma_mask_y),
+ FF_OPENCL_PARAM_INFO(unsharp->chroma.amount),
+ FF_OPENCL_PARAM_INFO(unsharp->chroma.scalebits),
+ FF_OPENCL_PARAM_INFO(unsharp->chroma.halfscale),
+ FF_OPENCL_PARAM_INFO(in->linesize[0]),
+ FF_OPENCL_PARAM_INFO(in->linesize[1]),
+ FF_OPENCL_PARAM_INFO(out->linesize[0]),
+ FF_OPENCL_PARAM_INFO(out->linesize[1]),
+ FF_OPENCL_PARAM_INFO(link->w),
+ FF_OPENCL_PARAM_INFO(link->h),
+ FF_OPENCL_PARAM_INFO(cw),
+ FF_OPENCL_PARAM_INFO(ch),
+ NULL);
+ if (ret < 0)
+ return ret;
+ status = clEnqueueNDRangeKernel(unsharp->opencl_ctx.command_queue,
+ unsharp->opencl_ctx.kernel_luma, 2, NULL,
+ globalWorkSize2dLuma, localWorkSize2d, 0, NULL, NULL);
+ status |=clEnqueueNDRangeKernel(unsharp->opencl_ctx.command_queue,
+ unsharp->opencl_ctx.kernel_chroma, 2, NULL,
+ globalWorkSize2dChroma, localWorkSize2d, 0, NULL, NULL);
+ if (status != CL_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL run kernel error occurred: %s\n", av_opencl_errstr(status));
+ return AVERROR_EXTERNAL;
+ }
+ } else { /* use default kernel */
+ kernel1.ctx = ctx;
+ kernel1.kernel = unsharp->opencl_ctx.kernel_default;
+
+ ret = avpriv_opencl_set_parameter(&kernel1,
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_inbuf),
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_outbuf),
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_luma_mask),
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_chroma_mask),
+ FF_OPENCL_PARAM_INFO(unsharp->luma.amount),
+ FF_OPENCL_PARAM_INFO(unsharp->chroma.amount),
+ FF_OPENCL_PARAM_INFO(unsharp->luma.steps_x),
+ FF_OPENCL_PARAM_INFO(unsharp->luma.steps_y),
+ FF_OPENCL_PARAM_INFO(unsharp->chroma.steps_x),
+ FF_OPENCL_PARAM_INFO(unsharp->chroma.steps_y),
+ FF_OPENCL_PARAM_INFO(unsharp->luma.scalebits),
+ FF_OPENCL_PARAM_INFO(unsharp->chroma.scalebits),
+ FF_OPENCL_PARAM_INFO(unsharp->luma.halfscale),
+ FF_OPENCL_PARAM_INFO(unsharp->chroma.halfscale),
+ FF_OPENCL_PARAM_INFO(in->linesize[0]),
+ FF_OPENCL_PARAM_INFO(in->linesize[1]),
+ FF_OPENCL_PARAM_INFO(out->linesize[0]),
+ FF_OPENCL_PARAM_INFO(out->linesize[1]),
+ FF_OPENCL_PARAM_INFO(link->h),
+ FF_OPENCL_PARAM_INFO(link->w),
+ FF_OPENCL_PARAM_INFO(ch),
+ FF_OPENCL_PARAM_INFO(cw),
+ NULL);
+ if (ret < 0)
+ return ret;
+ status = clEnqueueNDRangeKernel(unsharp->opencl_ctx.command_queue,
+ unsharp->opencl_ctx.kernel_default, 1, NULL,
+ &globalWorkSize1d, NULL, 0, NULL, NULL);
+ if (status != CL_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL run kernel error occurred: %s\n", av_opencl_errstr(status));
+ return AVERROR_EXTERNAL;
+ }
+ }
+ //blocking map is suffficient, no need for clFinish
+ //clFinish(unsharp->opencl_ctx.command_queue);
+
+ return av_opencl_buffer_read_image(out->data, unsharp->opencl_ctx.out_plane_size,
+ unsharp->opencl_ctx.plane_num, unsharp->opencl_ctx.cl_outbuf,
+ unsharp->opencl_ctx.cl_outbuf_size);
+}
+
+int ff_opencl_unsharp_init(AVFilterContext *ctx)
+{
+ int ret = 0;
+ char build_opts[96];
+ UnsharpContext *unsharp = ctx->priv;
+ ret = av_opencl_init(NULL);
+ if (ret < 0)
+ return ret;
+ ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_luma_mask,
+ sizeof(uint32_t) * (2 * unsharp->luma.steps_x + 1) * (2 * unsharp->luma.steps_y + 1),
+ CL_MEM_READ_ONLY, NULL);
+ if (ret < 0)
+ return ret;
+ ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_chroma_mask,
+ sizeof(uint32_t) * (2 * unsharp->chroma.steps_x + 1) * (2 * unsharp->chroma.steps_y + 1),
+ CL_MEM_READ_ONLY, NULL);
+ // separable filters
+ if (ret < 0)
+ return ret;
+ ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_luma_mask_x,
+ sizeof(uint32_t) * (2 * unsharp->luma.steps_x + 1),
+ CL_MEM_READ_ONLY, NULL);
+ if (ret < 0)
+ return ret;
+ ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_luma_mask_y,
+ sizeof(uint32_t) * (2 * unsharp->luma.steps_y + 1),
+ CL_MEM_READ_ONLY, NULL);
+ if (ret < 0)
+ return ret;
+ ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_chroma_mask_x,
+ sizeof(uint32_t) * (2 * unsharp->chroma.steps_x + 1),
+ CL_MEM_READ_ONLY, NULL);
+ if (ret < 0)
+ return ret;
+ ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_chroma_mask_y,
+ sizeof(uint32_t) * (2 * unsharp->chroma.steps_y + 1),
+ CL_MEM_READ_ONLY, NULL);
+ if (ret < 0)
+ return ret;
+ ret = generate_mask(ctx);
+ if (ret < 0)
+ return ret;
+ unsharp->opencl_ctx.plane_num = PLANE_NUM;
+ unsharp->opencl_ctx.command_queue = av_opencl_get_command_queue();
+ if (!unsharp->opencl_ctx.command_queue) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to get OpenCL command queue in filter 'unsharp'\n");
+ return AVERROR(EINVAL);
+ }
+ snprintf(build_opts, 96, "-D LU_RADIUS_X=%d -D LU_RADIUS_Y=%d -D CH_RADIUS_X=%d -D CH_RADIUS_Y=%d",
+ 2*unsharp->luma.steps_x+1, 2*unsharp->luma.steps_y+1, 2*unsharp->chroma.steps_x+1, 2*unsharp->chroma.steps_y+1);
+ unsharp->opencl_ctx.program = av_opencl_compile("unsharp", build_opts);
+ if (!unsharp->opencl_ctx.program) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL failed to compile program 'unsharp'\n");
+ return AVERROR(EINVAL);
+ }
+ if (unsharp->opencl_ctx.use_fast_kernels) {
+ if (!unsharp->opencl_ctx.kernel_luma) {
+ unsharp->opencl_ctx.kernel_luma = clCreateKernel(unsharp->opencl_ctx.program, "unsharp_luma", &ret);
+ if (ret != CL_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL failed to create kernel 'unsharp_luma'\n");
+ return ret;
+ }
+ }
+ if (!unsharp->opencl_ctx.kernel_chroma) {
+ unsharp->opencl_ctx.kernel_chroma = clCreateKernel(unsharp->opencl_ctx.program, "unsharp_chroma", &ret);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL failed to create kernel 'unsharp_chroma'\n");
+ return ret;
+ }
+ }
+ }
+ else {
+ if (!unsharp->opencl_ctx.kernel_default) {
+ unsharp->opencl_ctx.kernel_default = clCreateKernel(unsharp->opencl_ctx.program, "unsharp_default", &ret);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL failed to create kernel 'unsharp_default'\n");
+ return ret;
+ }
+ }
+ }
+ return ret;
+}
+
+void ff_opencl_unsharp_uninit(AVFilterContext *ctx)
+{
+ UnsharpContext *unsharp = ctx->priv;
+ av_opencl_buffer_release(&unsharp->opencl_ctx.cl_inbuf);
+ av_opencl_buffer_release(&unsharp->opencl_ctx.cl_outbuf);
+ av_opencl_buffer_release(&unsharp->opencl_ctx.cl_luma_mask);
+ av_opencl_buffer_release(&unsharp->opencl_ctx.cl_chroma_mask);
+ av_opencl_buffer_release(&unsharp->opencl_ctx.cl_luma_mask_x);
+ av_opencl_buffer_release(&unsharp->opencl_ctx.cl_chroma_mask_x);
+ av_opencl_buffer_release(&unsharp->opencl_ctx.cl_luma_mask_y);
+ av_opencl_buffer_release(&unsharp->opencl_ctx.cl_chroma_mask_y);
+ clReleaseKernel(unsharp->opencl_ctx.kernel_default);
+ clReleaseKernel(unsharp->opencl_ctx.kernel_luma);
+ clReleaseKernel(unsharp->opencl_ctx.kernel_chroma);
+ clReleaseProgram(unsharp->opencl_ctx.program);
+ unsharp->opencl_ctx.command_queue = NULL;
+ av_opencl_uninit();
+}
+
+int ff_opencl_unsharp_process_inout_buf(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
+{
+ int ret = 0;
+ AVFilterLink *link = ctx->inputs[0];
+ UnsharpContext *unsharp = ctx->priv;
+ int ch = AV_CEIL_RSHIFT(link->h, unsharp->vsub);
+
+ if ((!unsharp->opencl_ctx.cl_inbuf) || (!unsharp->opencl_ctx.cl_outbuf)) {
+ unsharp->opencl_ctx.in_plane_size[0] = (in->linesize[0] * in->height);
+ unsharp->opencl_ctx.in_plane_size[1] = (in->linesize[1] * ch);
+ unsharp->opencl_ctx.in_plane_size[2] = (in->linesize[2] * ch);
+ unsharp->opencl_ctx.out_plane_size[0] = (out->linesize[0] * out->height);
+ unsharp->opencl_ctx.out_plane_size[1] = (out->linesize[1] * ch);
+ unsharp->opencl_ctx.out_plane_size[2] = (out->linesize[2] * ch);
+ unsharp->opencl_ctx.cl_inbuf_size = unsharp->opencl_ctx.in_plane_size[0] +
+ unsharp->opencl_ctx.in_plane_size[1] +
+ unsharp->opencl_ctx.in_plane_size[2];
+ unsharp->opencl_ctx.cl_outbuf_size = unsharp->opencl_ctx.out_plane_size[0] +
+ unsharp->opencl_ctx.out_plane_size[1] +
+ unsharp->opencl_ctx.out_plane_size[2];
+ if (!unsharp->opencl_ctx.cl_inbuf) {
+ ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_inbuf,
+ unsharp->opencl_ctx.cl_inbuf_size,
+ CL_MEM_READ_ONLY, NULL);
+ if (ret < 0)
+ return ret;
+ }
+ if (!unsharp->opencl_ctx.cl_outbuf) {
+ ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_outbuf,
+ unsharp->opencl_ctx.cl_outbuf_size,
+ CL_MEM_READ_WRITE, NULL);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ return av_opencl_buffer_write_image(unsharp->opencl_ctx.cl_inbuf,
+ unsharp->opencl_ctx.cl_inbuf_size,
+ 0, in->data, unsharp->opencl_ctx.in_plane_size,
+ unsharp->opencl_ctx.plane_num);
+}
diff --git a/libavfilter/unsharp_opencl.h b/libavfilter/unsharp_opencl.h
new file mode 100644
index 0000000000..3aefab62e0
--- /dev/null
+++ b/libavfilter/unsharp_opencl.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_UNSHARP_OPENCL_H
+#define AVFILTER_UNSHARP_OPENCL_H
+
+#include "unsharp.h"
+
+int ff_opencl_unsharp_init(AVFilterContext *ctx);
+
+void ff_opencl_unsharp_uninit(AVFilterContext *ctx);
+
+int ff_opencl_unsharp_process_inout_buf(AVFilterContext *ctx, AVFrame *in, AVFrame *out);
+
+int ff_opencl_apply_unsharp(AVFilterContext *ctx, AVFrame *in, AVFrame *out);
+
+#endif /* AVFILTER_UNSHARP_OPENCL_H */
diff --git a/libavfilter/unsharp_opencl_kernel.h b/libavfilter/unsharp_opencl_kernel.h
new file mode 100644
index 0000000000..307d0f1814
--- /dev/null
+++ b/libavfilter/unsharp_opencl_kernel.h
@@ -0,0 +1,342 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ * Copyright (C) 2013 Lenny Wang
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_UNSHARP_OPENCL_KERNEL_H
+#define AVFILTER_UNSHARP_OPENCL_KERNEL_H
+
+#include "libavutil/opencl.h"
+
+const char *ff_kernel_unsharp_opencl = AV_OPENCL_KERNEL(
+inline unsigned char clip_uint8(int a)
+{
+ if (a & (~0xFF))
+ return (-a)>>31;
+ else
+ return a;
+}
+
+kernel void unsharp_luma(
+ global unsigned char *src,
+ global unsigned char *dst,
+ global int *mask_x,
+ global int *mask_y,
+ int amount,
+ int scalebits,
+ int halfscale,
+ int src_stride,
+ int dst_stride,
+ int width,
+ int height)
+{
+ int2 threadIdx, blockIdx, globalIdx;
+ threadIdx.x = get_local_id(0);
+ threadIdx.y = get_local_id(1);
+ blockIdx.x = get_group_id(0);
+ blockIdx.y = get_group_id(1);
+ globalIdx.x = get_global_id(0);
+ globalIdx.y = get_global_id(1);
+
+ if (!amount) {
+ if (globalIdx.x < width && globalIdx.y < height)
+ dst[globalIdx.x + globalIdx.y*dst_stride] = src[globalIdx.x + globalIdx.y*src_stride];
+ return;
+ }
+
+ local unsigned int l[32][32];
+ local unsigned int lcx[LU_RADIUS_X];
+ local unsigned int lcy[LU_RADIUS_Y];
+ int indexIx, indexIy, i, j;
+
+ //load up tile: actual workspace + halo of 8 points in x and y \n
+ for(i = 0; i <= 1; i++) {
+ indexIy = -8 + (blockIdx.y + i) * 16 + threadIdx.y;
+ indexIy = indexIy < 0 ? 0 : indexIy;
+ indexIy = indexIy >= height ? height - 1: indexIy;
+ for(j = 0; j <= 1; j++) {
+ indexIx = -8 + (blockIdx.x + j) * 16 + threadIdx.x;
+ indexIx = indexIx < 0 ? 0 : indexIx;
+ indexIx = indexIx >= width ? width - 1: indexIx;
+ l[i*16 + threadIdx.y][j*16 + threadIdx.x] = src[indexIy*src_stride + indexIx];
+ }
+ }
+
+ int indexL = threadIdx.y*16 + threadIdx.x;
+ if (indexL < LU_RADIUS_X)
+ lcx[indexL] = mask_x[indexL];
+ if (indexL < LU_RADIUS_Y)
+ lcy[indexL] = mask_y[indexL];
+ barrier(CLK_LOCAL_MEM_FENCE);
+
+ //needed for unsharp mask application in the end \n
+ int orig_value = (int)l[threadIdx.y + 8][threadIdx.x + 8];
+
+ int idx, idy, maskIndex;
+ int temp[2] = {0};
+ int steps_x = (LU_RADIUS_X-1)/2;
+ int steps_y = (LU_RADIUS_Y-1)/2;
+
+ // compute the actual workspace + left&right halos \n
+ \n#pragma unroll\n
+ for (j = 0; j <=1; j++) {
+ //extra work to cover left and right halos \n
+ idx = 16*j + threadIdx.x;
+ \n#pragma unroll\n
+ for (i = -steps_y; i <= steps_y; i++) {
+ idy = 8 + i + threadIdx.y;
+ maskIndex = (i + steps_y);
+ temp[j] += (int)l[idy][idx] * lcy[maskIndex];
+ }
+ }
+ barrier(CLK_LOCAL_MEM_FENCE);
+ //save results from the vertical filter in local memory \n
+ idy = 8 + threadIdx.y;
+ \n#pragma unroll\n
+ for (j = 0; j <=1; j++) {
+ idx = 16*j + threadIdx.x;
+ l[idy][idx] = temp[j];
+ }
+ barrier(CLK_LOCAL_MEM_FENCE);
+
+ //compute results with the horizontal filter \n
+ int sum = 0;
+ idy = 8 + threadIdx.y;
+ \n#pragma unroll\n
+ for (j = -steps_x; j <= steps_x; j++) {
+ idx = 8 + j + threadIdx.x;
+ maskIndex = j + steps_x;
+ sum += (int)l[idy][idx] * lcx[maskIndex];
+ }
+
+ int res = orig_value + (((orig_value - (int)((sum + halfscale) >> scalebits)) * amount) >> 16);
+
+ if (globalIdx.x < width && globalIdx.y < height)
+ dst[globalIdx.x + globalIdx.y*dst_stride] = clip_uint8(res);
+}
+
+kernel void unsharp_chroma(
+ global unsigned char *src_y,
+ global unsigned char *dst_y,
+ global int *mask_x,
+ global int *mask_y,
+ int amount,
+ int scalebits,
+ int halfscale,
+ int src_stride_lu,
+ int src_stride_ch,
+ int dst_stride_lu,
+ int dst_stride_ch,
+ int width,
+ int height,
+ int cw,
+ int ch)
+{
+ global unsigned char *dst_u = dst_y + height * dst_stride_lu;
+ global unsigned char *dst_v = dst_u + ch * dst_stride_ch;
+ global unsigned char *src_u = src_y + height * src_stride_lu;
+ global unsigned char *src_v = src_u + ch * src_stride_ch;
+ int2 threadIdx, blockIdx, globalIdx;
+ threadIdx.x = get_local_id(0);
+ threadIdx.y = get_local_id(1);
+ blockIdx.x = get_group_id(0);
+ blockIdx.y = get_group_id(1);
+ globalIdx.x = get_global_id(0);
+ globalIdx.y = get_global_id(1);
+ int padch = get_global_size(1)/2;
+ global unsigned char *src = globalIdx.y>=padch ? src_v : src_u;
+ global unsigned char *dst = globalIdx.y>=padch ? dst_v : dst_u;
+
+ blockIdx.y = globalIdx.y>=padch ? blockIdx.y - get_num_groups(1)/2 : blockIdx.y;
+ globalIdx.y = globalIdx.y>=padch ? globalIdx.y - padch : globalIdx.y;
+
+ if (!amount) {
+ if (globalIdx.x < cw && globalIdx.y < ch)
+ dst[globalIdx.x + globalIdx.y*dst_stride_ch] = src[globalIdx.x + globalIdx.y*src_stride_ch];
+ return;
+ }
+
+ local unsigned int l[32][32];
+ local unsigned int lcx[CH_RADIUS_X];
+ local unsigned int lcy[CH_RADIUS_Y];
+ int indexIx, indexIy, i, j;
+ for(i = 0; i <= 1; i++) {
+ indexIy = -8 + (blockIdx.y + i) * 16 + threadIdx.y;
+ indexIy = indexIy < 0 ? 0 : indexIy;
+ indexIy = indexIy >= ch ? ch - 1: indexIy;
+ for(j = 0; j <= 1; j++) {
+ indexIx = -8 + (blockIdx.x + j) * 16 + threadIdx.x;
+ indexIx = indexIx < 0 ? 0 : indexIx;
+ indexIx = indexIx >= cw ? cw - 1: indexIx;
+ l[i*16 + threadIdx.y][j*16 + threadIdx.x] = src[indexIy * src_stride_ch + indexIx];
+ }
+ }
+
+ int indexL = threadIdx.y*16 + threadIdx.x;
+ if (indexL < CH_RADIUS_X)
+ lcx[indexL] = mask_x[indexL];
+ if (indexL < CH_RADIUS_Y)
+ lcy[indexL] = mask_y[indexL];
+ barrier(CLK_LOCAL_MEM_FENCE);
+
+ int orig_value = (int)l[threadIdx.y + 8][threadIdx.x + 8];
+
+ int idx, idy, maskIndex;
+ int steps_x = CH_RADIUS_X/2;
+ int steps_y = CH_RADIUS_Y/2;
+ int temp[2] = {0,0};
+
+ \n#pragma unroll\n
+ for (j = 0; j <= 1; j++) {
+ idx = 16*j + threadIdx.x;
+ \n#pragma unroll\n
+ for (i = -steps_y; i <= steps_y; i++) {
+ idy = 8 + i + threadIdx.y;
+ maskIndex = i + steps_y;
+ temp[j] += (int)l[idy][idx] * lcy[maskIndex];
+ }
+ }
+
+ barrier(CLK_LOCAL_MEM_FENCE);
+ idy = 8 + threadIdx.y;
+ \n#pragma unroll\n
+ for (j = 0; j <= 1; j++) {
+ idx = 16*j + threadIdx.x;
+ l[idy][idx] = temp[j];
+ }
+ barrier(CLK_LOCAL_MEM_FENCE);
+
+ //compute results with the horizontal filter \n
+ int sum = 0;
+ idy = 8 + threadIdx.y;
+ \n#pragma unroll\n
+ for (j = -steps_x; j <= steps_x; j++) {
+ idx = 8 + j + threadIdx.x;
+ maskIndex = j + steps_x;
+ sum += (int)l[idy][idx] * lcx[maskIndex];
+ }
+
+ int res = orig_value + (((orig_value - (int)((sum + halfscale) >> scalebits)) * amount) >> 16);
+
+ if (globalIdx.x < cw && globalIdx.y < ch)
+ dst[globalIdx.x + globalIdx.y*dst_stride_ch] = clip_uint8(res);
+}
+
+kernel void unsharp_default(global unsigned char *src,
+ global unsigned char *dst,
+ const global unsigned int *mask_lu,
+ const global unsigned int *mask_ch,
+ int amount_lu,
+ int amount_ch,
+ int step_x_lu,
+ int step_y_lu,
+ int step_x_ch,
+ int step_y_ch,
+ int scalebits_lu,
+ int scalebits_ch,
+ int halfscale_lu,
+ int halfscale_ch,
+ int src_stride_lu,
+ int src_stride_ch,
+ int dst_stride_lu,
+ int dst_stride_ch,
+ int height,
+ int width,
+ int ch,
+ int cw)
+{
+ global unsigned char *dst_y = dst;
+ global unsigned char *dst_u = dst_y + height * dst_stride_lu;
+ global unsigned char *dst_v = dst_u + ch * dst_stride_ch;
+
+ global unsigned char *src_y = src;
+ global unsigned char *src_u = src_y + height * src_stride_lu;
+ global unsigned char *src_v = src_u + ch * src_stride_ch;
+
+ global unsigned char *temp_dst;
+ global unsigned char *temp_src;
+ const global unsigned int *temp_mask;
+ int global_id = get_global_id(0);
+ int i, j, x, y, temp_src_stride, temp_dst_stride, temp_height, temp_width, temp_steps_x, temp_steps_y,
+ temp_amount, temp_scalebits, temp_halfscale, sum, idx_x, idx_y, temp, res;
+ if (global_id < width * height) {
+ y = global_id / width;
+ x = global_id % width;
+ temp_dst = dst_y;
+ temp_src = src_y;
+ temp_src_stride = src_stride_lu;
+ temp_dst_stride = dst_stride_lu;
+ temp_height = height;
+ temp_width = width;
+ temp_steps_x = step_x_lu;
+ temp_steps_y = step_y_lu;
+ temp_mask = mask_lu;
+ temp_amount = amount_lu;
+ temp_scalebits = scalebits_lu;
+ temp_halfscale = halfscale_lu;
+ } else if ((global_id >= width * height) && (global_id < width * height + ch * cw)) {
+ y = (global_id - width * height) / cw;
+ x = (global_id - width * height) % cw;
+ temp_dst = dst_u;
+ temp_src = src_u;
+ temp_src_stride = src_stride_ch;
+ temp_dst_stride = dst_stride_ch;
+ temp_height = ch;
+ temp_width = cw;
+ temp_steps_x = step_x_ch;
+ temp_steps_y = step_y_ch;
+ temp_mask = mask_ch;
+ temp_amount = amount_ch;
+ temp_scalebits = scalebits_ch;
+ temp_halfscale = halfscale_ch;
+ } else {
+ y = (global_id - width * height - ch * cw) / cw;
+ x = (global_id - width * height - ch * cw) % cw;
+ temp_dst = dst_v;
+ temp_src = src_v;
+ temp_src_stride = src_stride_ch;
+ temp_dst_stride = dst_stride_ch;
+ temp_height = ch;
+ temp_width = cw;
+ temp_steps_x = step_x_ch;
+ temp_steps_y = step_y_ch;
+ temp_mask = mask_ch;
+ temp_amount = amount_ch;
+ temp_scalebits = scalebits_ch;
+ temp_halfscale = halfscale_ch;
+ }
+ if (temp_amount) {
+ sum = 0;
+ for (j = 0; j <= 2 * temp_steps_y; j++) {
+ idx_y = (y - temp_steps_y + j) <= 0 ? 0 : (y - temp_steps_y + j) >= temp_height ? temp_height-1 : y - temp_steps_y + j;
+ for (i = 0; i <= 2 * temp_steps_x; i++) {
+ idx_x = (x - temp_steps_x + i) <= 0 ? 0 : (x - temp_steps_x + i) >= temp_width ? temp_width-1 : x - temp_steps_x + i;
+ sum += temp_mask[i + j * (2 * temp_steps_x + 1)] * temp_src[idx_x + idx_y * temp_src_stride];
+ }
+ }
+ temp = (int)temp_src[x + y * temp_src_stride];
+ res = temp + (((temp - (int)((sum + temp_halfscale) >> temp_scalebits)) * temp_amount) >> 16);
+ temp_dst[x + y * temp_dst_stride] = clip_uint8(res);
+ } else {
+ temp_dst[x + y * temp_dst_stride] = temp_src[x + y * temp_src_stride];
+ }
+}
+);
+
+#endif /* AVFILTER_UNSHARP_OPENCL_KERNEL_H */
diff --git a/libavfilter/vaf_spectrumsynth.c b/libavfilter/vaf_spectrumsynth.c
new file mode 100644
index 0000000000..ed191a3e34
--- /dev/null
+++ b/libavfilter/vaf_spectrumsynth.c
@@ -0,0 +1,543 @@
+/*
+ * Copyright (c) 2016 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * SpectrumSynth filter
+ * @todo support float pixel format
+ */
+
+#include "libavcodec/avfft.h"
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/ffmath.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "audio.h"
+#include "video.h"
+#include "internal.h"
+#include "window_func.h"
+
+enum MagnitudeScale { LINEAR, LOG, NB_SCALES };
+enum SlideMode { REPLACE, SCROLL, FULLFRAME, RSCROLL, NB_SLIDES };
+enum Orientation { VERTICAL, HORIZONTAL, NB_ORIENTATIONS };
+
+typedef struct SpectrumSynthContext {
+ const AVClass *class;
+ int sample_rate;
+ int channels;
+ int scale;
+ int sliding;
+ int win_func;
+ float overlap;
+ int orientation;
+
+ AVFrame *magnitude, *phase;
+ FFTContext *fft; ///< Fast Fourier Transform context
+ int fft_bits; ///< number of bits (FFT window size = 1<<fft_bits)
+ FFTComplex **fft_data; ///< bins holder for each (displayed) channels
+ int win_size;
+ int size;
+ int nb_freq;
+ int hop_size;
+ int start, end;
+ int xpos;
+ int xend;
+ int64_t pts;
+ float factor;
+ AVFrame *buffer;
+ float *window_func_lut; ///< Window function LUT
+} SpectrumSynthContext;
+
+#define OFFSET(x) offsetof(SpectrumSynthContext, x)
+#define A AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
+#define V AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption spectrumsynth_options[] = {
+ { "sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 44100}, 15, INT_MAX, A },
+ { "channels", "set channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = 1}, 1, 8, A },
+ { "scale", "set input amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = LOG}, 0, NB_SCALES-1, V, "scale" },
+ { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, V, "scale" },
+ { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, V, "scale" },
+ { "slide", "set input sliding mode", OFFSET(sliding), AV_OPT_TYPE_INT, {.i64 = FULLFRAME}, 0, NB_SLIDES-1, V, "slide" },
+ { "replace", "consume old columns with new", 0, AV_OPT_TYPE_CONST, {.i64=REPLACE}, 0, 0, V, "slide" },
+ { "scroll", "consume only most right column", 0, AV_OPT_TYPE_CONST, {.i64=SCROLL}, 0, 0, V, "slide" },
+ { "fullframe", "consume full frames", 0, AV_OPT_TYPE_CONST, {.i64=FULLFRAME}, 0, 0, V, "slide" },
+ { "rscroll", "consume only most left column", 0, AV_OPT_TYPE_CONST, {.i64=RSCROLL}, 0, 0, V, "slide" },
+ { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = 0}, 0, NB_WFUNC-1, A, "win_func" },
+ { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, A, "win_func" },
+ { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, A, "win_func" },
+ { "hann", "Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, A, "win_func" },
+ { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, A, "win_func" },
+ { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, A, "win_func" },
+ { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, A, "win_func" },
+ { "overlap", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, A },
+ { "orientation", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=VERTICAL}, 0, NB_ORIENTATIONS-1, V, "orientation" },
+ { "vertical", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VERTICAL}, 0, 0, V, "orientation" },
+ { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=HORIZONTAL}, 0, 0, V, "orientation" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(spectrumsynth);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ SpectrumSynthContext *s = ctx->priv;
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layout = NULL;
+ AVFilterLink *magnitude = ctx->inputs[0];
+ AVFilterLink *phase = ctx->inputs[1];
+ AVFilterLink *outlink = ctx->outputs[0];
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV444P16, AV_PIX_FMT_NONE };
+ int ret, sample_rates[] = { 48000, -1 };
+
+ formats = ff_make_format_list(sample_fmts);
+ if ((ret = ff_formats_ref (formats, &outlink->in_formats )) < 0 ||
+ (ret = ff_add_channel_layout (&layout, FF_COUNT2LAYOUT(s->channels))) < 0 ||
+ (ret = ff_channel_layouts_ref (layout , &outlink->in_channel_layouts)) < 0)
+ return ret;
+
+ sample_rates[0] = s->sample_rate;
+ formats = ff_make_format_list(sample_rates);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ if ((ret = ff_formats_ref(formats, &outlink->in_samplerates)) < 0)
+ return ret;
+
+ formats = ff_make_format_list(pix_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ if ((ret = ff_formats_ref(formats, &magnitude->out_formats)) < 0)
+ return ret;
+
+ formats = ff_make_format_list(pix_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ if ((ret = ff_formats_ref(formats, &phase->out_formats)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ SpectrumSynthContext *s = ctx->priv;
+ int width = ctx->inputs[0]->w;
+ int height = ctx->inputs[0]->h;
+ AVRational time_base = ctx->inputs[0]->time_base;
+ AVRational frame_rate = ctx->inputs[0]->frame_rate;
+ int i, ch, fft_bits;
+ float factor, overlap;
+
+ outlink->sample_rate = s->sample_rate;
+ outlink->time_base = (AVRational){1, s->sample_rate};
+
+ if (width != ctx->inputs[1]->w ||
+ height != ctx->inputs[1]->h) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Magnitude and Phase sizes differ (%dx%d vs %dx%d).\n",
+ width, height,
+ ctx->inputs[1]->w, ctx->inputs[1]->h);
+ return AVERROR_INVALIDDATA;
+ } else if (av_cmp_q(time_base, ctx->inputs[1]->time_base) != 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Magnitude and Phase time bases differ (%d/%d vs %d/%d).\n",
+ time_base.num, time_base.den,
+ ctx->inputs[1]->time_base.num,
+ ctx->inputs[1]->time_base.den);
+ return AVERROR_INVALIDDATA;
+ } else if (av_cmp_q(frame_rate, ctx->inputs[1]->frame_rate) != 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Magnitude and Phase framerates differ (%d/%d vs %d/%d).\n",
+ frame_rate.num, frame_rate.den,
+ ctx->inputs[1]->frame_rate.num,
+ ctx->inputs[1]->frame_rate.den);
+ return AVERROR_INVALIDDATA;
+ }
+
+ s->size = s->orientation == VERTICAL ? height / s->channels : width / s->channels;
+ s->xend = s->orientation == VERTICAL ? width : height;
+
+ for (fft_bits = 1; 1 << fft_bits < 2 * s->size; fft_bits++);
+
+ s->win_size = 1 << fft_bits;
+ s->nb_freq = 1 << (fft_bits - 1);
+
+ s->fft = av_fft_init(fft_bits, 1);
+ if (!s->fft) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to create FFT context. "
+ "The window size might be too high.\n");
+ return AVERROR(EINVAL);
+ }
+ s->fft_data = av_calloc(s->channels, sizeof(*s->fft_data));
+ if (!s->fft_data)
+ return AVERROR(ENOMEM);
+ for (ch = 0; ch < s->channels; ch++) {
+ s->fft_data[ch] = av_calloc(s->win_size, sizeof(**s->fft_data));
+ if (!s->fft_data[ch])
+ return AVERROR(ENOMEM);
+ }
+
+ s->buffer = ff_get_audio_buffer(outlink, s->win_size * 2);
+ if (!s->buffer)
+ return AVERROR(ENOMEM);
+
+ /* pre-calc windowing function */
+ s->window_func_lut = av_realloc_f(s->window_func_lut, s->win_size,
+ sizeof(*s->window_func_lut));
+ if (!s->window_func_lut)
+ return AVERROR(ENOMEM);
+ ff_generate_window_func(s->window_func_lut, s->win_size, s->win_func, &overlap);
+ if (s->overlap == 1)
+ s->overlap = overlap;
+ s->hop_size = (1 - s->overlap) * s->win_size;
+ for (factor = 0, i = 0; i < s->win_size; i++) {
+ factor += s->window_func_lut[i] * s->window_func_lut[i];
+ }
+ s->factor = (factor / s->win_size) / FFMAX(1 / (1 - s->overlap) - 1, 1);
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ SpectrumSynthContext *s = ctx->priv;
+ int ret;
+
+ if (!s->magnitude) {
+ ret = ff_request_frame(ctx->inputs[0]);
+ if (ret < 0)
+ return ret;
+ }
+ if (!s->phase) {
+ ret = ff_request_frame(ctx->inputs[1]);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static void read16_fft_bin(SpectrumSynthContext *s,
+ int x, int y, int f, int ch)
+{
+ const int m_linesize = s->magnitude->linesize[0];
+ const int p_linesize = s->phase->linesize[0];
+ const uint16_t *m = (uint16_t *)(s->magnitude->data[0] + y * m_linesize);
+ const uint16_t *p = (uint16_t *)(s->phase->data[0] + y * p_linesize);
+ float magnitude, phase;
+
+ switch (s->scale) {
+ case LINEAR:
+ magnitude = m[x] / (double)UINT16_MAX;
+ break;
+ case LOG:
+ magnitude = ff_exp10(((m[x] / (double)UINT16_MAX) - 1.) * 6.);
+ break;
+ default:
+ av_assert0(0);
+ }
+ phase = ((p[x] / (double)UINT16_MAX) * 2. - 1.) * M_PI;
+
+ s->fft_data[ch][f].re = magnitude * cos(phase);
+ s->fft_data[ch][f].im = magnitude * sin(phase);
+}
+
+static void read8_fft_bin(SpectrumSynthContext *s,
+ int x, int y, int f, int ch)
+{
+ const int m_linesize = s->magnitude->linesize[0];
+ const int p_linesize = s->phase->linesize[0];
+ const uint8_t *m = (uint8_t *)(s->magnitude->data[0] + y * m_linesize);
+ const uint8_t *p = (uint8_t *)(s->phase->data[0] + y * p_linesize);
+ float magnitude, phase;
+
+ switch (s->scale) {
+ case LINEAR:
+ magnitude = m[x] / (double)UINT8_MAX;
+ break;
+ case LOG:
+ magnitude = ff_exp10(((m[x] / (double)UINT8_MAX) - 1.) * 6.);
+ break;
+ default:
+ av_assert0(0);
+ }
+ phase = ((p[x] / (double)UINT8_MAX) * 2. - 1.) * M_PI;
+
+ s->fft_data[ch][f].re = magnitude * cos(phase);
+ s->fft_data[ch][f].im = magnitude * sin(phase);
+}
+
+static void read_fft_data(AVFilterContext *ctx, int x, int h, int ch)
+{
+ SpectrumSynthContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ int start = h * (s->channels - ch) - 1;
+ int end = h * (s->channels - ch - 1);
+ int y, f;
+
+ switch (s->orientation) {
+ case VERTICAL:
+ switch (inlink->format) {
+ case AV_PIX_FMT_YUV444P16:
+ case AV_PIX_FMT_GRAY16:
+ for (y = start, f = 0; y >= end; y--, f++) {
+ read16_fft_bin(s, x, y, f, ch);
+ }
+ break;
+ case AV_PIX_FMT_YUVJ444P:
+ case AV_PIX_FMT_YUV444P:
+ case AV_PIX_FMT_GRAY8:
+ for (y = start, f = 0; y >= end; y--, f++) {
+ read8_fft_bin(s, x, y, f, ch);
+ }
+ break;
+ }
+ break;
+ case HORIZONTAL:
+ switch (inlink->format) {
+ case AV_PIX_FMT_YUV444P16:
+ case AV_PIX_FMT_GRAY16:
+ for (y = end, f = 0; y <= start; y++, f++) {
+ read16_fft_bin(s, y, x, f, ch);
+ }
+ break;
+ case AV_PIX_FMT_YUVJ444P:
+ case AV_PIX_FMT_YUV444P:
+ case AV_PIX_FMT_GRAY8:
+ for (y = end, f = 0; y <= start; y++, f++) {
+ read8_fft_bin(s, y, x, f, ch);
+ }
+ break;
+ }
+ break;
+ }
+}
+
+static void synth_window(AVFilterContext *ctx, int x)
+{
+ SpectrumSynthContext *s = ctx->priv;
+ const int h = s->size;
+ int nb = s->win_size;
+ int y, f, ch;
+
+ for (ch = 0; ch < s->channels; ch++) {
+ read_fft_data(ctx, x, h, ch);
+
+ for (y = h; y <= s->nb_freq; y++) {
+ s->fft_data[ch][y].re = 0;
+ s->fft_data[ch][y].im = 0;
+ }
+
+ for (y = s->nb_freq + 1, f = s->nb_freq - 1; y < nb; y++, f--) {
+ s->fft_data[ch][y].re = s->fft_data[ch][f].re;
+ s->fft_data[ch][y].im = -s->fft_data[ch][f].im;
+ }
+
+ av_fft_permute(s->fft, s->fft_data[ch]);
+ av_fft_calc(s->fft, s->fft_data[ch]);
+ }
+}
+
+static int try_push_frame(AVFilterContext *ctx, int x)
+{
+ SpectrumSynthContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ const float factor = s->factor;
+ int ch, n, i, ret;
+ int start, end;
+ AVFrame *out;
+
+ synth_window(ctx, x);
+
+ for (ch = 0; ch < s->channels; ch++) {
+ float *buf = (float *)s->buffer->extended_data[ch];
+ int j, k;
+
+ start = s->start;
+ end = s->end;
+ k = end;
+ for (i = 0, j = start; j < k && i < s->win_size; i++, j++) {
+ buf[j] += s->fft_data[ch][i].re;
+ }
+
+ for (; i < s->win_size; i++, j++) {
+ buf[j] = s->fft_data[ch][i].re;
+ }
+
+ start += s->hop_size;
+ end = j;
+
+ if (start >= s->win_size) {
+ start -= s->win_size;
+ end -= s->win_size;
+
+ if (ch == s->channels - 1) {
+ float *dst;
+ int c;
+
+ out = ff_get_audio_buffer(outlink, s->win_size);
+ if (!out) {
+ av_frame_free(&s->magnitude);
+ av_frame_free(&s->phase);
+ return AVERROR(ENOMEM);
+ }
+
+ out->pts = s->pts;
+ s->pts += s->win_size;
+ for (c = 0; c < s->channels; c++) {
+ dst = (float *)out->extended_data[c];
+ buf = (float *)s->buffer->extended_data[c];
+
+ for (n = 0; n < s->win_size; n++) {
+ dst[n] = buf[n] * factor;
+ }
+ memmove(buf, buf + s->win_size, s->win_size * 4);
+ }
+
+ ret = ff_filter_frame(outlink, out);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+
+ s->start = start;
+ s->end = end;
+
+ return 0;
+}
+
+static int try_push_frames(AVFilterContext *ctx)
+{
+ SpectrumSynthContext *s = ctx->priv;
+ int ret, x;
+
+ if (!(s->magnitude && s->phase))
+ return 0;
+
+ switch (s->sliding) {
+ case REPLACE:
+ ret = try_push_frame(ctx, s->xpos);
+ s->xpos++;
+ if (s->xpos >= s->xend)
+ s->xpos = 0;
+ break;
+ case SCROLL:
+ s->xpos = s->xend - 1;
+ ret = try_push_frame(ctx, s->xpos);
+ break;
+ case RSCROLL:
+ s->xpos = 0;
+ ret = try_push_frame(ctx, s->xpos);
+ break;
+ case FULLFRAME:
+ for (x = 0; x < s->xend; x++) {
+ ret = try_push_frame(ctx, x);
+ if (ret < 0)
+ break;
+ }
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ av_frame_free(&s->magnitude);
+ av_frame_free(&s->phase);
+ return ret;
+}
+
+static int filter_frame_magnitude(AVFilterLink *inlink, AVFrame *magnitude)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SpectrumSynthContext *s = ctx->priv;
+
+ s->magnitude = magnitude;
+ return try_push_frames(ctx);
+}
+
+static int filter_frame_phase(AVFilterLink *inlink, AVFrame *phase)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SpectrumSynthContext *s = ctx->priv;
+
+ s->phase = phase;
+ return try_push_frames(ctx);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SpectrumSynthContext *s = ctx->priv;
+ int i;
+
+ av_frame_free(&s->magnitude);
+ av_frame_free(&s->phase);
+ av_frame_free(&s->buffer);
+ av_fft_end(s->fft);
+ if (s->fft_data) {
+ for (i = 0; i < s->channels; i++)
+ av_freep(&s->fft_data[i]);
+ }
+ av_freep(&s->fft_data);
+ av_freep(&s->window_func_lut);
+}
+
+static const AVFilterPad spectrumsynth_inputs[] = {
+ {
+ .name = "magnitude",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame_magnitude,
+ .needs_fifo = 1,
+ },
+ {
+ .name = "phase",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame_phase,
+ .needs_fifo = 1,
+ },
+ { NULL }
+};
+
+static const AVFilterPad spectrumsynth_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vaf_spectrumsynth = {
+ .name = "spectrumsynth",
+ .description = NULL_IF_CONFIG_SMALL("Convert input spectrum videos to audio output."),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(SpectrumSynthContext),
+ .inputs = spectrumsynth_inputs,
+ .outputs = spectrumsynth_outputs,
+ .priv_class = &spectrumsynth_class,
+};
diff --git a/libavfilter/version.h b/libavfilter/version.h
index febfc8fa1d..a61ca32712 100644
--- a/libavfilter/version.h
+++ b/libavfilter/version.h
@@ -1,20 +1,20 @@
/*
* Version macros.
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -29,9 +29,9 @@
#include "libavutil/version.h"
-#define LIBAVFILTER_VERSION_MAJOR 6
-#define LIBAVFILTER_VERSION_MINOR 7
-#define LIBAVFILTER_VERSION_MICRO 0
+#define LIBAVFILTER_VERSION_MAJOR 6
+#define LIBAVFILTER_VERSION_MINOR 78
+#define LIBAVFILTER_VERSION_MICRO 100
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
LIBAVFILTER_VERSION_MINOR, \
@@ -52,6 +52,9 @@
#ifndef FF_API_OLD_FILTER_OPTS
#define FF_API_OLD_FILTER_OPTS (LIBAVFILTER_VERSION_MAJOR < 7)
#endif
+#ifndef FF_API_OLD_FILTER_OPTS_ERROR
+#define FF_API_OLD_FILTER_OPTS_ERROR (LIBAVFILTER_VERSION_MAJOR < 7)
+#endif
#ifndef FF_API_AVFILTER_OPEN
#define FF_API_AVFILTER_OPEN (LIBAVFILTER_VERSION_MAJOR < 7)
#endif
@@ -64,5 +67,8 @@
#ifndef FF_API_NOCONST_GET_NAME
#define FF_API_NOCONST_GET_NAME (LIBAVFILTER_VERSION_MAJOR < 7)
#endif
+#ifndef FF_API_LAVR_OPTS
+#define FF_API_LAVR_OPTS (LIBAVFILTER_VERSION_MAJOR < 7)
+#endif
#endif /* AVFILTER_VERSION_H */
diff --git a/libavfilter/vf_alphamerge.c b/libavfilter/vf_alphamerge.c
new file mode 100644
index 0000000000..a8a8d56824
--- /dev/null
+++ b/libavfilter/vf_alphamerge.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2012 Steven Robertson
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * copy an alpha component from another video's luma
+ */
+
+#include <string.h>
+
+#include "libavutil/pixfmt.h"
+#include "avfilter.h"
+#include "bufferqueue.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+enum { Y, U, V, A };
+
+typedef struct {
+ int is_packed_rgb;
+ uint8_t rgba_map[4];
+ struct FFBufQueue queue_main;
+ struct FFBufQueue queue_alpha;
+} AlphaMergeContext;
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AlphaMergeContext *merge = ctx->priv;
+ ff_bufqueue_discard_all(&merge->queue_main);
+ ff_bufqueue_discard_all(&merge->queue_alpha);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat main_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA, AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_NONE
+ };
+ static const enum AVPixelFormat alpha_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
+ AVFilterFormats *main_formats = NULL, *alpha_formats = NULL;
+ int ret;
+
+ if (!(main_formats = ff_make_format_list(main_fmts)) ||
+ !(alpha_formats = ff_make_format_list(alpha_fmts))) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ if ((ret = ff_formats_ref(main_formats , &ctx->inputs[0]->out_formats)) < 0 ||
+ (ret = ff_formats_ref(alpha_formats, &ctx->inputs[1]->out_formats)) < 0 ||
+ (ret = ff_formats_ref(main_formats , &ctx->outputs[0]->in_formats)) < 0)
+ goto fail;
+ return 0;
+fail:
+ if (main_formats)
+ av_freep(&main_formats->formats);
+ av_freep(&main_formats);
+ if (alpha_formats)
+ av_freep(&alpha_formats->formats);
+ av_freep(&alpha_formats);
+ return ret;
+}
+
+static int config_input_main(AVFilterLink *inlink)
+{
+ AlphaMergeContext *merge = inlink->dst->priv;
+ merge->is_packed_rgb =
+ ff_fill_rgba_map(merge->rgba_map, inlink->format) >= 0;
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *mainlink = ctx->inputs[0];
+ AVFilterLink *alphalink = ctx->inputs[1];
+ if (mainlink->w != alphalink->w || mainlink->h != alphalink->h) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Input frame sizes do not match (%dx%d vs %dx%d).\n",
+ mainlink->w, mainlink->h,
+ alphalink->w, alphalink->h);
+ return AVERROR(EINVAL);
+ }
+
+ outlink->w = mainlink->w;
+ outlink->h = mainlink->h;
+ outlink->time_base = mainlink->time_base;
+ outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
+ outlink->frame_rate = mainlink->frame_rate;
+ return 0;
+}
+
+static void draw_frame(AVFilterContext *ctx,
+ AVFrame *main_buf,
+ AVFrame *alpha_buf)
+{
+ AlphaMergeContext *merge = ctx->priv;
+ int h = main_buf->height;
+
+ if (merge->is_packed_rgb) {
+ int x, y;
+ uint8_t *pin, *pout;
+ for (y = 0; y < h; y++) {
+ pin = alpha_buf->data[0] + y * alpha_buf->linesize[0];
+ pout = main_buf->data[0] + y * main_buf->linesize[0] + merge->rgba_map[A];
+ for (x = 0; x < main_buf->width; x++) {
+ *pout = *pin;
+ pin += 1;
+ pout += 4;
+ }
+ }
+ } else {
+ int y;
+ const int main_linesize = main_buf->linesize[A];
+ const int alpha_linesize = alpha_buf->linesize[Y];
+ for (y = 0; y < h && y < alpha_buf->height; y++) {
+ memcpy(main_buf->data[A] + y * main_linesize,
+ alpha_buf->data[Y] + y * alpha_linesize,
+ FFMIN(main_linesize, alpha_linesize));
+ }
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AlphaMergeContext *merge = ctx->priv;
+
+ int ret = 0;
+ int is_alpha = (inlink == ctx->inputs[1]);
+ struct FFBufQueue *queue =
+ (is_alpha ? &merge->queue_alpha : &merge->queue_main);
+ ff_bufqueue_add(ctx, queue, buf);
+
+ do {
+ AVFrame *main_buf, *alpha_buf;
+
+ if (!ff_bufqueue_peek(&merge->queue_main, 0) ||
+ !ff_bufqueue_peek(&merge->queue_alpha, 0)) break;
+
+ main_buf = ff_bufqueue_get(&merge->queue_main);
+ alpha_buf = ff_bufqueue_get(&merge->queue_alpha);
+
+ draw_frame(ctx, main_buf, alpha_buf);
+ ret = ff_filter_frame(ctx->outputs[0], main_buf);
+ av_frame_free(&alpha_buf);
+ } while (ret >= 0);
+ return ret;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AlphaMergeContext *merge = ctx->priv;
+ int in, ret;
+
+ in = ff_bufqueue_peek(&merge->queue_main, 0) ? 1 : 0;
+ ret = ff_request_frame(ctx->inputs[in]);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static const AVFilterPad alphamerge_inputs[] = {
+ {
+ .name = "main",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input_main,
+ .filter_frame = filter_frame,
+ .needs_writable = 1,
+ },{
+ .name = "alpha",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad alphamerge_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_alphamerge = {
+ .name = "alphamerge",
+ .description = NULL_IF_CONFIG_SMALL("Copy the luma value of the second "
+ "input into the alpha channel of the first input."),
+ .uninit = uninit,
+ .priv_size = sizeof(AlphaMergeContext),
+ .query_formats = query_formats,
+ .inputs = alphamerge_inputs,
+ .outputs = alphamerge_outputs,
+};
diff --git a/libavfilter/vf_aspect.c b/libavfilter/vf_aspect.c
index 2c2821318e..bf30824851 100644
--- a/libavfilter/vf_aspect.c
+++ b/libavfilter/vf_aspect.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2010 Bobby Bingham
-
- * This file is part of Libav.
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -37,9 +37,6 @@
#include "video.h"
static const char *const var_names[] = {
- "PI",
- "PHI",
- "E",
"w",
"h",
"a", "dar",
@@ -50,9 +47,6 @@ static const char *const var_names[] = {
};
enum var_name {
- VAR_PI,
- VAR_PHI,
- VAR_E,
VAR_W,
VAR_H,
VAR_A, VAR_DAR,
@@ -66,26 +60,35 @@ typedef struct AspectContext {
const AVClass *class;
AVRational dar;
AVRational sar;
+ int max;
#if FF_API_OLD_FILTER_OPTS
- float aspect_num, aspect_den;
+ float aspect_den;
#endif
char *ratio_expr;
} AspectContext;
-#if FF_API_OLD_FILTER_OPTS
static av_cold int init(AVFilterContext *ctx)
{
+#if FF_API_OLD_FILTER_OPTS
AspectContext *s = ctx->priv;
+ int ret;
- if (s->aspect_num > 0 && s->aspect_den > 0) {
- av_log(ctx, AV_LOG_WARNING, "This syntax is deprecated, use "
- "dar=<number> or dar=num/den.\n");
- s->sar = s->dar = av_d2q(s->aspect_num / s->aspect_den, INT_MAX);
+ if (s->ratio_expr && s->aspect_den > 0) {
+ double num;
+ av_log(ctx, AV_LOG_WARNING,
+ "num:den syntax is deprecated, please use num/den or named options instead\n");
+ ret = av_expr_parse_and_eval(&num, s->ratio_expr, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to parse ratio numerator \"%s\"\n", s->ratio_expr);
+ return AVERROR(EINVAL);
+ }
+ s->sar = s->dar = av_d2q(num / s->aspect_den, s->max);
}
+#endif
return 0;
}
-#endif
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
@@ -96,7 +99,16 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
}
#define OFFSET(x) offsetof(AspectContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static inline void compute_dar(AVRational *dar, AVRational sar, int w, int h)
+{
+ if (sar.num && sar.den) {
+ av_reduce(&dar->num, &dar->den, sar.num * w, sar.den * h, INT_MAX);
+ } else {
+ av_reduce(&dar->num, &dar->den, w, h, INT_MAX);
+ }
+}
static int get_aspect_ratio(AVFilterLink *inlink, AVRational *aspect_ratio)
{
@@ -106,9 +118,6 @@ static int get_aspect_ratio(AVFilterLink *inlink, AVRational *aspect_ratio)
double var_values[VARS_NB], res;
int ret;
- var_values[VAR_PI] = M_PI;
- var_values[VAR_PHI] = M_PHI;
- var_values[VAR_E] = M_E;
var_values[VAR_W] = inlink->w;
var_values[VAR_H] = inlink->h;
var_values[VAR_A] = (double) inlink->w / inlink->h;
@@ -119,27 +128,39 @@ static int get_aspect_ratio(AVFilterLink *inlink, AVRational *aspect_ratio)
var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
/* evaluate new aspect ratio*/
- if ((ret = av_expr_parse_and_eval(&res, s->ratio_expr,
+ ret = av_expr_parse_and_eval(&res, s->ratio_expr,
var_names, var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
- av_log(NULL, AV_LOG_ERROR,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0) {
+ ret = av_parse_ratio(aspect_ratio, s->ratio_expr, s->max, 0, ctx);
+ } else
+ *aspect_ratio = av_d2q(res, s->max);
+
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR,
"Error when evaluating the expression '%s'\n", s->ratio_expr);
return ret;
}
- *aspect_ratio = av_d2q(res, INT_MAX);
+ if (aspect_ratio->num < 0 || aspect_ratio->den <= 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Invalid string '%s' for aspect ratio\n", s->ratio_expr);
+ return AVERROR(EINVAL);
+ }
return 0;
}
#if CONFIG_SETDAR_FILTER
-/* for setdar filter, convert from frame aspect ratio to pixel aspect ratio */
+
static int setdar_config_props(AVFilterLink *inlink)
{
AspectContext *s = inlink->dst->priv;
AVRational dar;
+ AVRational old_dar;
+ AVRational old_sar = inlink->sample_aspect_ratio;
int ret;
#if FF_API_OLD_FILTER_OPTS
- if (!(s->aspect_num > 0 && s->aspect_den > 0)) {
+ if (!(s->ratio_expr && s->aspect_den > 0)) {
#endif
if ((ret = get_aspect_ratio(inlink, &s->dar)))
return ret;
@@ -150,7 +171,7 @@ static int setdar_config_props(AVFilterLink *inlink)
if (s->dar.num && s->dar.den) {
av_reduce(&s->sar.num, &s->sar.den,
s->dar.num * inlink->h,
- s->dar.den * inlink->w, 100);
+ s->dar.den * inlink->w, INT_MAX);
inlink->sample_aspect_ratio = s->sar;
dar = s->dar;
} else {
@@ -158,36 +179,33 @@ static int setdar_config_props(AVFilterLink *inlink)
dar = (AVRational){ inlink->w, inlink->h };
}
- av_log(inlink->dst, AV_LOG_VERBOSE, "w:%d h:%d -> dar:%d/%d sar:%d/%d\n",
- inlink->w, inlink->h, dar.num, dar.den,
- inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den);
+ compute_dar(&old_dar, old_sar, inlink->w, inlink->h);
+ av_log(inlink->dst, AV_LOG_VERBOSE, "w:%d h:%d dar:%d/%d sar:%d/%d -> dar:%d/%d sar:%d/%d\n",
+ inlink->w, inlink->h, old_dar.num, old_dar.den, old_sar.num, old_sar.den,
+ dar.num, dar.den, inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den);
return 0;
}
static const AVOption setdar_options[] = {
+ { "dar", "set display aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
+ { "ratio", "set display aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
+ { "r", "set display aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
#if FF_API_OLD_FILTER_OPTS
- { "dar_num", NULL, OFFSET(aspect_num), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, 0, FLT_MAX, FLAGS },
{ "dar_den", NULL, OFFSET(aspect_den), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, 0, FLT_MAX, FLAGS },
#endif
- { "dar", "display aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "1" }, .flags = FLAGS },
- { NULL },
+ { "max", "set max value for nominator or denominator in the ratio", OFFSET(max), AV_OPT_TYPE_INT, {.i64=100}, 1, INT_MAX, FLAGS },
+ { NULL }
};
-static const AVClass setdar_class = {
- .class_name = "setdar",
- .item_name = av_default_item_name,
- .option = setdar_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(setdar);
static const AVFilterPad avfilter_vf_setdar_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = setdar_config_props,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = setdar_config_props,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -201,31 +219,28 @@ static const AVFilterPad avfilter_vf_setdar_outputs[] = {
};
AVFilter ff_vf_setdar = {
- .name = "setdar",
+ .name = "setdar",
.description = NULL_IF_CONFIG_SMALL("Set the frame display aspect ratio."),
-
-#if FF_API_OLD_FILTER_OPTS
- .init = init,
-#endif
-
- .priv_size = sizeof(AspectContext),
- .priv_class = &setdar_class,
-
- .inputs = avfilter_vf_setdar_inputs,
-
- .outputs = avfilter_vf_setdar_outputs,
+ .init = init,
+ .priv_size = sizeof(AspectContext),
+ .priv_class = &setdar_class,
+ .inputs = avfilter_vf_setdar_inputs,
+ .outputs = avfilter_vf_setdar_outputs,
};
+
#endif /* CONFIG_SETDAR_FILTER */
#if CONFIG_SETSAR_FILTER
-/* for setdar filter, convert from frame aspect ratio to pixel aspect ratio */
+
static int setsar_config_props(AVFilterLink *inlink)
{
AspectContext *s = inlink->dst->priv;
+ AVRational old_sar = inlink->sample_aspect_ratio;
+ AVRational old_dar, dar;
int ret;
#if FF_API_OLD_FILTER_OPTS
- if (!(s->aspect_num > 0 && s->aspect_den > 0)) {
+ if (!(s->ratio_expr && s->aspect_den > 0)) {
#endif
if ((ret = get_aspect_ratio(inlink, &s->sar)))
return ret;
@@ -235,32 +250,34 @@ static int setsar_config_props(AVFilterLink *inlink)
inlink->sample_aspect_ratio = s->sar;
+ compute_dar(&old_dar, old_sar, inlink->w, inlink->h);
+ compute_dar(&dar, s->sar, inlink->w, inlink->h);
+ av_log(inlink->dst, AV_LOG_VERBOSE, "w:%d h:%d sar:%d/%d dar:%d/%d -> sar:%d/%d dar:%d/%d\n",
+ inlink->w, inlink->h, old_sar.num, old_sar.den, old_dar.num, old_dar.den,
+ inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den, dar.num, dar.den);
+
return 0;
}
static const AVOption setsar_options[] = {
+ { "sar", "set sample (pixel) aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
+ { "ratio", "set sample (pixel) aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
+ { "r", "set sample (pixel) aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
#if FF_API_OLD_FILTER_OPTS
- { "sar_num", NULL, OFFSET(aspect_num), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, 0, FLT_MAX, FLAGS },
{ "sar_den", NULL, OFFSET(aspect_den), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, 0, FLT_MAX, FLAGS },
#endif
- { "sar", "sample (pixel) aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "1" }, .flags = FLAGS },
- { NULL },
+ { "max", "set max value for nominator or denominator in the ratio", OFFSET(max), AV_OPT_TYPE_INT, {.i64=100}, 1, INT_MAX, FLAGS },
+ { NULL }
};
-static const AVClass setsar_class = {
- .class_name = "setsar",
- .item_name = av_default_item_name,
- .option = setsar_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(setsar);
static const AVFilterPad avfilter_vf_setsar_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = setsar_config_props,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = setsar_config_props,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -274,18 +291,13 @@ static const AVFilterPad avfilter_vf_setsar_outputs[] = {
};
AVFilter ff_vf_setsar = {
- .name = "setsar",
+ .name = "setsar",
.description = NULL_IF_CONFIG_SMALL("Set the pixel sample aspect ratio."),
-
-#if FF_API_OLD_FILTER_OPTS
- .init = init,
-#endif
-
- .priv_size = sizeof(AspectContext),
- .priv_class = &setsar_class,
-
- .inputs = avfilter_vf_setsar_inputs,
-
- .outputs = avfilter_vf_setsar_outputs,
+ .init = init,
+ .priv_size = sizeof(AspectContext),
+ .priv_class = &setsar_class,
+ .inputs = avfilter_vf_setsar_inputs,
+ .outputs = avfilter_vf_setsar_outputs,
};
+
#endif /* CONFIG_SETSAR_FILTER */
diff --git a/libavfilter/vf_atadenoise.c b/libavfilter/vf_atadenoise.c
new file mode 100644
index 0000000000..bf75d53d92
--- /dev/null
+++ b/libavfilter/vf_atadenoise.c
@@ -0,0 +1,434 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Adaptive Temporal Averaging Denoiser,
+ * based on paper "Video Denoising Based on Adaptive Temporal Averaging" by
+ * David Bartovčak and Miroslav Vrankić
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+
+#define FF_BUFQUEUE_SIZE 129
+#include "bufferqueue.h"
+
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define SIZE FF_BUFQUEUE_SIZE
+
+typedef struct ATADenoiseContext {
+ const AVClass *class;
+
+ float fthra[4], fthrb[4];
+ int thra[4], thrb[4];
+
+ int planes;
+ int nb_planes;
+ int planewidth[4];
+ int planeheight[4];
+
+ struct FFBufQueue q;
+ void *data[4][SIZE];
+ int linesize[4][SIZE];
+ int size, mid;
+ int available;
+
+ int (*filter_slice)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
+} ATADenoiseContext;
+
+#define OFFSET(x) offsetof(ATADenoiseContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption atadenoise_options[] = {
+ { "0a", "set threshold A for 1st plane", OFFSET(fthra[0]), AV_OPT_TYPE_FLOAT, {.dbl=0.02}, 0, 0.3, FLAGS },
+ { "0b", "set threshold B for 1st plane", OFFSET(fthrb[0]), AV_OPT_TYPE_FLOAT, {.dbl=0.04}, 0, 5.0, FLAGS },
+ { "1a", "set threshold A for 2nd plane", OFFSET(fthra[1]), AV_OPT_TYPE_FLOAT, {.dbl=0.02}, 0, 0.3, FLAGS },
+ { "1b", "set threshold B for 2nd plane", OFFSET(fthrb[1]), AV_OPT_TYPE_FLOAT, {.dbl=0.04}, 0, 5.0, FLAGS },
+ { "2a", "set threshold A for 3rd plane", OFFSET(fthra[2]), AV_OPT_TYPE_FLOAT, {.dbl=0.02}, 0, 0.3, FLAGS },
+ { "2b", "set threshold B for 3rd plane", OFFSET(fthrb[2]), AV_OPT_TYPE_FLOAT, {.dbl=0.04}, 0, 5.0, FLAGS },
+ { "s", "set how many frames to use", OFFSET(size), AV_OPT_TYPE_INT, {.i64=9}, 5, SIZE, FLAGS },
+ { "p", "set what planes to filter", OFFSET(planes), AV_OPT_TYPE_FLAGS, {.i64=7}, 0, 15, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(atadenoise);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pixel_fmts[] = {
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV440P10,
+ AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
+ AV_PIX_FMT_YUV440P12,
+ AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
+ AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *formats = ff_make_format_list(pixel_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, formats);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ATADenoiseContext *s = ctx->priv;
+
+ if (!(s->size & 1)) {
+ av_log(ctx, AV_LOG_ERROR, "size %d is invalid. Must be an odd value.\n", s->size);
+ return AVERROR(EINVAL);
+ }
+ s->mid = s->size / 2 + 1;
+
+ return 0;
+}
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+} ThreadData;
+
+static int filter_slice8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ATADenoiseContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int size = s->size;
+ const int mid = s->mid;
+ int p, x, y, i, j;
+
+ for (p = 0; p < s->nb_planes; p++) {
+ const int h = s->planeheight[p];
+ const int w = s->planewidth[p];
+ const int slice_start = (h * jobnr) / nb_jobs;
+ const int slice_end = (h * (jobnr+1)) / nb_jobs;
+ const uint8_t *src = in->data[p] + slice_start * in->linesize[p];
+ uint8_t *dst = out->data[p] + slice_start * out->linesize[p];
+ const int thra = s->thra[p];
+ const int thrb = s->thrb[p];
+ const uint8_t **data = (const uint8_t **)s->data[p];
+ const int *linesize = (const int *)s->linesize[p];
+ const uint8_t *srcf[SIZE];
+
+ if (!((1 << p) & s->planes)) {
+ av_image_copy_plane(dst, out->linesize[p], src, in->linesize[p],
+ w, slice_end - slice_start);
+ continue;
+ }
+
+ for (i = 0; i < size; i++)
+ srcf[i] = data[i] + slice_start * linesize[i];
+
+ for (y = slice_start; y < slice_end; y++) {
+ for (x = 0; x < w; x++) {
+ const int srcx = src[x];
+ unsigned lsumdiff = 0, rsumdiff = 0;
+ unsigned ldiff, rdiff;
+ unsigned sum = srcx;
+ int l = 0, r = 0;
+ int srcjx, srcix;
+
+ for (j = mid - 1, i = mid + 1; j >= 0 && i < size; j--, i++) {
+ srcjx = srcf[j][x];
+
+ ldiff = FFABS(srcx - srcjx);
+ lsumdiff += ldiff;
+ if (ldiff > thra ||
+ lsumdiff > thrb)
+ break;
+ l++;
+ sum += srcjx;
+
+ srcix = srcf[i][x];
+
+ rdiff = FFABS(srcx - srcix);
+ rsumdiff += rdiff;
+ if (rdiff > thra ||
+ rsumdiff > thrb)
+ break;
+ r++;
+ sum += srcix;
+ }
+
+ dst[x] = sum / (r + l + 1);
+ }
+
+ dst += out->linesize[p];
+ src += in->linesize[p];
+
+ for (i = 0; i < size; i++)
+ srcf[i] += linesize[i];
+ }
+ }
+
+ return 0;
+}
+
+static int filter_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ATADenoiseContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int size = s->size;
+ const int mid = s->mid;
+ int p, x, y, i, j;
+
+ for (p = 0; p < s->nb_planes; p++) {
+ const int h = s->planeheight[p];
+ const int w = s->planewidth[p];
+ const int slice_start = (h * jobnr) / nb_jobs;
+ const int slice_end = (h * (jobnr+1)) / nb_jobs;
+ const uint16_t *src = (uint16_t *)(in->data[p] + slice_start * in->linesize[p]);
+ uint16_t *dst = (uint16_t *)(out->data[p] + slice_start * out->linesize[p]);
+ const int thra = s->thra[p];
+ const int thrb = s->thrb[p];
+ const uint8_t **data = (const uint8_t **)s->data[p];
+ const int *linesize = (const int *)s->linesize[p];
+ const uint16_t *srcf[SIZE];
+
+ if (!((1 << p) & s->planes)) {
+ av_image_copy_plane((uint8_t *)dst, out->linesize[p], (uint8_t *)src, in->linesize[p],
+ w * 2, slice_end - slice_start);
+ continue;
+ }
+
+ for (i = 0; i < s->size; i++)
+ srcf[i] = (const uint16_t *)(data[i] + slice_start * linesize[i]);
+
+ for (y = slice_start; y < slice_end; y++) {
+ for (x = 0; x < w; x++) {
+ const int srcx = src[x];
+ unsigned lsumdiff = 0, rsumdiff = 0;
+ unsigned ldiff, rdiff;
+ unsigned sum = srcx;
+ int l = 0, r = 0;
+ int srcjx, srcix;
+
+ for (j = mid - 1, i = mid + 1; j >= 0 && i < size; j--, i++) {
+ srcjx = srcf[j][x];
+
+ ldiff = FFABS(srcx - srcjx);
+ lsumdiff += ldiff;
+ if (ldiff > thra ||
+ lsumdiff > thrb)
+ break;
+ l++;
+ sum += srcjx;
+
+ srcix = srcf[i][x];
+
+ rdiff = FFABS(srcx - srcix);
+ rsumdiff += rdiff;
+ if (rdiff > thra ||
+ rsumdiff > thrb)
+ break;
+ r++;
+ sum += srcix;
+ }
+
+ dst[x] = sum / (r + l + 1);
+ }
+
+ dst += out->linesize[p] / 2;
+ src += in->linesize[p] / 2;
+
+ for (i = 0; i < size; i++)
+ srcf[i] += linesize[i] / 2;
+ }
+ }
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ AVFilterContext *ctx = inlink->dst;
+ ATADenoiseContext *s = ctx->priv;
+ int depth;
+
+ s->nb_planes = desc->nb_components;
+
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+ s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+
+ depth = desc->comp[0].depth;
+ if (depth == 8)
+ s->filter_slice = filter_slice8;
+ else
+ s->filter_slice = filter_slice16;
+
+ s->thra[0] = s->fthra[0] * (1 << depth) - 1;
+ s->thra[1] = s->fthra[1] * (1 << depth) - 1;
+ s->thra[2] = s->fthra[2] * (1 << depth) - 1;
+ s->thrb[0] = s->fthrb[0] * (1 << depth) - 1;
+ s->thrb[1] = s->fthrb[1] * (1 << depth) - 1;
+ s->thrb[2] = s->fthrb[2] * (1 << depth) - 1;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ATADenoiseContext *s = ctx->priv;
+ AVFrame *out, *in;
+ int i;
+
+ if (s->q.available != s->size) {
+ if (s->q.available < s->mid) {
+ for (i = 0; i < s->mid; i++) {
+ out = av_frame_clone(buf);
+ if (!out) {
+ av_frame_free(&buf);
+ return AVERROR(ENOMEM);
+ }
+ ff_bufqueue_add(ctx, &s->q, out);
+ }
+ }
+ if (s->q.available < s->size) {
+ ff_bufqueue_add(ctx, &s->q, buf);
+ s->available++;
+ }
+ return 0;
+ }
+
+ in = ff_bufqueue_peek(&s->q, s->mid);
+
+ if (!ctx->is_disabled) {
+ ThreadData td;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&buf);
+ return AVERROR(ENOMEM);
+ }
+
+ for (i = 0; i < s->size; i++) {
+ AVFrame *frame = ff_bufqueue_peek(&s->q, i);
+
+ s->data[0][i] = frame->data[0];
+ s->data[1][i] = frame->data[1];
+ s->data[2][i] = frame->data[2];
+ s->linesize[0][i] = frame->linesize[0];
+ s->linesize[1][i] = frame->linesize[1];
+ s->linesize[2][i] = frame->linesize[2];
+ }
+
+ td.in = in; td.out = out;
+ ctx->internal->execute(ctx, s->filter_slice, &td, NULL,
+ FFMIN3(s->planeheight[1],
+ s->planeheight[2],
+ ff_filter_get_nb_threads(ctx)));
+ av_frame_copy_props(out, in);
+ } else {
+ out = av_frame_clone(in);
+ if (!out) {
+ av_frame_free(&buf);
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ in = ff_bufqueue_get(&s->q);
+ av_frame_free(&in);
+ ff_bufqueue_add(ctx, &s->q, buf);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ATADenoiseContext *s = ctx->priv;
+ int ret = 0;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF && !ctx->is_disabled && s->available) {
+ AVFrame *buf = av_frame_clone(ff_bufqueue_peek(&s->q, s->available));
+ if (!buf)
+ return AVERROR(ENOMEM);
+
+ ret = filter_frame(ctx->inputs[0], buf);
+ s->available--;
+ }
+
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ATADenoiseContext *s = ctx->priv;
+
+ ff_bufqueue_discard_all(&s->q);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_atadenoise = {
+ .name = "atadenoise",
+ .description = NULL_IF_CONFIG_SMALL("Apply an Adaptive Temporal Averaging Denoiser."),
+ .priv_size = sizeof(ATADenoiseContext),
+ .priv_class = &atadenoise_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = inputs,
+ .outputs = outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_avgblur.c b/libavfilter/vf_avgblur.c
new file mode 100644
index 0000000000..2bb2ab1ed5
--- /dev/null
+++ b/libavfilter/vf_avgblur.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2016 Paul B Mahol
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct AverageBlurContext {
+ const AVClass *class;
+
+ int radius;
+ int radiusV;
+ int planes;
+
+ int depth;
+ int planewidth[4];
+ int planeheight[4];
+ float *buffer;
+ int nb_planes;
+
+ int (*filter_horizontally)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
+ int (*filter_vertically)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
+} AverageBlurContext;
+
+#define OFFSET(x) offsetof(AverageBlurContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption avgblur_options[] = {
+ { "sizeX", "set horizontal size", OFFSET(radius), AV_OPT_TYPE_INT, {.i64=1}, 1, 1024, FLAGS },
+ { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=0xF}, 0, 0xF, FLAGS },
+ { "sizeY", "set vertical size", OFFSET(radiusV), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(avgblur);
+
+typedef struct ThreadData {
+ int height;
+ int width;
+ uint8_t *ptr;
+ int linesize;
+} ThreadData;
+
+#define HORIZONTAL_FILTER(name, type) \
+static int filter_horizontally_##name(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)\
+{ \
+ AverageBlurContext *s = ctx->priv; \
+ ThreadData *td = arg; \
+ const int height = td->height; \
+ const int width = td->width; \
+ const int slice_start = (height * jobnr ) / nb_jobs; \
+ const int slice_end = (height * (jobnr+1)) / nb_jobs; \
+ const int radius = FFMIN(s->radius, width / 2); \
+ const int linesize = td->linesize / sizeof(type); \
+ float *buffer = s->buffer; \
+ const type *src; \
+ float *ptr; \
+ int y, x; \
+ \
+ /* Filter horizontally along each row */ \
+ for (y = slice_start; y < slice_end; y++) { \
+ float acc = 0; \
+ int count = 0; \
+ \
+ src = (const type *)td->ptr + linesize * y; \
+ ptr = buffer + width * y; \
+ \
+ for (x = 0; x < radius; x++) { \
+ acc += src[x]; \
+ } \
+ count += radius; \
+ \
+ for (x = 0; x <= radius; x++) { \
+ acc += src[x + radius]; \
+ count++; \
+ ptr[x] = acc / count; \
+ } \
+ \
+ for (; x < width - radius; x++) { \
+ acc += src[x + radius] - src[x - radius - 1]; \
+ ptr[x] = acc / count; \
+ } \
+ \
+ for (; x < width; x++) { \
+ acc -= src[x - radius]; \
+ count--; \
+ ptr[x] = acc / count; \
+ } \
+ } \
+ \
+ return 0; \
+}
+
+HORIZONTAL_FILTER(8, uint8_t)
+HORIZONTAL_FILTER(16, uint16_t)
+
+#define VERTICAL_FILTER(name, type) \
+static int filter_vertically_##name(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \
+{ \
+ AverageBlurContext *s = ctx->priv; \
+ ThreadData *td = arg; \
+ const int height = td->height; \
+ const int width = td->width; \
+ const int slice_start = (width * jobnr ) / nb_jobs; \
+ const int slice_end = (width * (jobnr+1)) / nb_jobs; \
+ const int radius = FFMIN(s->radiusV, height / 2); \
+ const int linesize = td->linesize / sizeof(type); \
+ type *buffer = (type *)td->ptr; \
+ const float *src; \
+ type *ptr; \
+ int i, x; \
+ \
+ /* Filter vertically along each column */ \
+ for (x = slice_start; x < slice_end; x++) { \
+ float acc = 0; \
+ int count = 0; \
+ \
+ ptr = buffer + x; \
+ src = s->buffer + x; \
+ \
+ for (i = 0; i < radius; i++) { \
+ acc += src[0]; \
+ src += width; \
+ } \
+ count += radius; \
+ \
+ src = s->buffer + x; \
+ ptr = buffer + x; \
+ for (i = 0; i <= radius; i++) { \
+ acc += src[(i + radius) * width]; \
+ count++; \
+ ptr[i * linesize] = acc / count; \
+ } \
+ \
+ for (; i < height - radius; i++) { \
+ acc += src[(i + radius) * width] - src[(i - radius - 1) * width]; \
+ ptr[i * linesize] = acc / count; \
+ } \
+ \
+ for (; i < height; i++) { \
+ acc -= src[(i - radius) * width]; \
+ count--; \
+ ptr[i * linesize] = acc / count; \
+ } \
+ } \
+ \
+ return 0; \
+}
+
+VERTICAL_FILTER(8, uint8_t)
+VERTICAL_FILTER(16, uint16_t)
+
+static int config_input(AVFilterLink *inlink)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ AverageBlurContext *s = inlink->dst->priv;
+
+ s->depth = desc->comp[0].depth;
+ s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ s->buffer = av_malloc_array(inlink->w, inlink->h * sizeof(*s->buffer));
+ if (!s->buffer)
+ return AVERROR(ENOMEM);
+
+ if (s->radiusV <= 0) {
+ s->radiusV = s->radius;
+ }
+
+ if (s->depth == 8) {
+ s->filter_horizontally = filter_horizontally_8;
+ s->filter_vertically = filter_vertically_8;
+ } else {
+ s->filter_horizontally = filter_horizontally_16;
+ s->filter_vertically = filter_vertically_16;
+ }
+
+ return 0;
+}
+
+static void averageiir2d(AVFilterContext *ctx, AVFrame *in, AVFrame *out, int plane)
+{
+ AverageBlurContext *s = ctx->priv;
+ const int width = s->planewidth[plane];
+ const int height = s->planeheight[plane];
+ const int nb_threads = ff_filter_get_nb_threads(ctx);
+ ThreadData td;
+
+ td.width = width;
+ td.height = height;
+ td.ptr = in->data[plane];
+ td.linesize = in->linesize[plane];
+ ctx->internal->execute(ctx, s->filter_horizontally, &td, NULL, FFMIN(height, nb_threads));
+ td.ptr = out->data[plane];
+ td.linesize = out->linesize[plane];
+ ctx->internal->execute(ctx, s->filter_vertically, &td, NULL, FFMIN(width, nb_threads));
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
+ AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
+ AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_NONE
+ };
+
+ return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AverageBlurContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ int plane;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ const int height = s->planeheight[plane];
+ const int width = s->planewidth[plane];
+
+ if (!(s->planes & (1 << plane))) {
+ if (out != in)
+ av_image_copy_plane(out->data[plane], out->linesize[plane],
+ in->data[plane], in->linesize[plane],
+ width * ((s->depth + 7) / 8), height);
+ continue;
+ }
+
+ averageiir2d(ctx, in, out, plane);
+ }
+
+ if (out != in)
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AverageBlurContext *s = ctx->priv;
+
+ av_freep(&s->buffer);
+}
+
+static const AVFilterPad avgblur_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avgblur_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_avgblur = {
+ .name = "avgblur",
+ .description = NULL_IF_CONFIG_SMALL("Apply Average Blur filter."),
+ .priv_size = sizeof(AverageBlurContext),
+ .priv_class = &avgblur_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = avgblur_inputs,
+ .outputs = avgblur_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_bbox.c b/libavfilter/vf_bbox.c
new file mode 100644
index 0000000000..86054b2483
--- /dev/null
+++ b/libavfilter/vf_bbox.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * bounding box detection filter
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/timestamp.h"
+#include "avfilter.h"
+#include "bbox.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ int min_val;
+} BBoxContext;
+
+#define OFFSET(x) offsetof(BBoxContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption bbox_options[] = {
+ { "min_val", "set minimum luminance value for bounding box", OFFSET(min_val), AV_OPT_TYPE_INT, { .i64 = 16 }, 0, 254, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(bbox);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_NONE,
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+#define SET_META(key, value) \
+ av_dict_set_int(metadata, key, value, 0);
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ BBoxContext *bbox = ctx->priv;
+ FFBoundingBox box;
+ int has_bbox, w, h;
+
+ has_bbox =
+ ff_calculate_bounding_box(&box,
+ frame->data[0], frame->linesize[0],
+ inlink->w, inlink->h, bbox->min_val);
+ w = box.x2 - box.x1 + 1;
+ h = box.y2 - box.y1 + 1;
+
+ av_log(ctx, AV_LOG_INFO,
+ "n:%"PRId64" pts:%s pts_time:%s", inlink->frame_count_out,
+ av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
+
+ if (has_bbox) {
+ AVDictionary **metadata = avpriv_frame_get_metadatap(frame);
+
+ SET_META("lavfi.bbox.x1", box.x1)
+ SET_META("lavfi.bbox.x2", box.x2)
+ SET_META("lavfi.bbox.y1", box.y1)
+ SET_META("lavfi.bbox.y2", box.y2)
+ SET_META("lavfi.bbox.w", w)
+ SET_META("lavfi.bbox.h", h)
+
+ av_log(ctx, AV_LOG_INFO,
+ " x1:%d x2:%d y1:%d y2:%d w:%d h:%d"
+ " crop=%d:%d:%d:%d drawbox=%d:%d:%d:%d",
+ box.x1, box.x2, box.y1, box.y2, w, h,
+ w, h, box.x1, box.y1, /* crop params */
+ box.x1, box.y1, w, h); /* drawbox params */
+ }
+ av_log(ctx, AV_LOG_INFO, "\n");
+
+ return ff_filter_frame(inlink->dst->outputs[0], frame);
+}
+
+static const AVFilterPad bbox_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad bbox_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_bbox = {
+ .name = "bbox",
+ .description = NULL_IF_CONFIG_SMALL("Compute bounding box for each frame."),
+ .priv_size = sizeof(BBoxContext),
+ .priv_class = &bbox_class,
+ .query_formats = query_formats,
+ .inputs = bbox_inputs,
+ .outputs = bbox_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_bitplanenoise.c b/libavfilter/vf_bitplanenoise.c
new file mode 100644
index 0000000000..82318a7061
--- /dev/null
+++ b/libavfilter/vf_bitplanenoise.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2016 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct BPNContext {
+ const AVClass *class;
+
+ int bitplane;
+ int filter;
+
+ int nb_planes;
+ int planeheight[4];
+ int planewidth[4];
+ int depth;
+} BPNContext;
+
+#define OFFSET(x) offsetof(BPNContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption bitplanenoise_options[] = {
+ { "bitplane", "set bit plane to use for measuring noise", OFFSET(bitplane), AV_OPT_TYPE_INT, {.i64=1}, 1, 16, FLAGS},
+ { "filter", "show noisy pixels", OFFSET(filter), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(bitplanenoise);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pixfmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV420P9,
+ AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUV440P10,
+ AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
+ AV_PIX_FMT_YUV440P12,
+ AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
+ AV_PIX_FMT_YUV444P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV420P16,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *formats = ff_make_format_list(pixfmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, formats);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ AVFilterContext *ctx = inlink->dst;
+ BPNContext *s = ctx->priv;
+
+ s->nb_planes = desc->nb_components;
+
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+ s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+
+ s->depth = desc->comp[0].depth;
+
+ return 0;
+}
+
+#define CHECK_BIT(x, a, b, c) { \
+ bit = (((val[(x)] & mask) == (val[(x) + (a)] & mask)) + \
+ ((val[(x)] & mask) == (val[(x) + (b)] & mask)) + \
+ ((val[(x)] & mask) == (val[(x) + (c)] & mask))) > 1; \
+ if (dst) \
+ dst[(x)] = factor * bit; \
+ stats[plane] += bit; }
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ BPNContext *s = ctx->priv;
+ const int mask = (1 << (s->bitplane - 1));
+ const int factor = (1 << s->depth) - 1;
+ float stats[4] = { 0 };
+ char metabuf[128];
+ int plane, y, x, bit;
+ AVFrame *out = s->filter ? NULL : in;
+
+ if (!out) {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ if (s->depth <= 8) {
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ const int linesize = in->linesize[plane];
+ const int dlinesize = out->linesize[plane];
+ uint8_t *val = in->data[plane];
+ uint8_t *dst = s->filter ? out->data[plane]: NULL;
+
+ for (y = 0; y < s->planeheight[plane] - 1; y++) {
+ CHECK_BIT(0, 1, 1 + linesize, linesize)
+
+ for (x = 1; x < s->planewidth[plane] - 1; x++) {
+ CHECK_BIT(x, -1, 1, linesize)
+ }
+
+ CHECK_BIT(x, -1, -1 + linesize, linesize)
+
+ val += linesize;
+ if (dst)
+ dst += dlinesize;
+ }
+
+ CHECK_BIT(0, 1, 1 - linesize, -linesize)
+
+ for (x = 1; x < s->planewidth[plane] - 1; x++) {
+ CHECK_BIT(x, -1, 1, -linesize)
+ }
+
+ CHECK_BIT(x, -1, -1 - linesize, -linesize)
+ }
+ } else {
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ const int linesize = in->linesize[plane] / 2;
+ const int dlinesize = out->linesize[plane] / 2;
+ uint16_t *val = (uint16_t *)in->data[plane];
+ uint16_t *dst = s->filter ? (uint16_t *)out->data[plane] : NULL;
+
+ val = (uint16_t *)in->data[plane];
+ for (y = 0; y < s->planeheight[plane] - 1; y++) {
+ CHECK_BIT(0, 1, 1 + linesize, linesize)
+
+ for (x = 1; x < s->planewidth[plane] - 1; x++) {
+ CHECK_BIT(x, -1, 1, linesize)
+ }
+
+ CHECK_BIT(x, -1, -1 + linesize, linesize)
+
+ val += linesize;
+ if (dst)
+ dst += dlinesize;
+ }
+
+ CHECK_BIT(0, 1, 1 - linesize, -linesize)
+
+ for (x = 1; x < s->planewidth[plane] - 1; x++) {
+ CHECK_BIT(x, -1, 1, -linesize)
+ }
+
+ CHECK_BIT(x, -1, -1 -linesize, -linesize)
+ }
+ }
+
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ char key[32];
+
+ stats[plane] /= s->planewidth[plane] * s->planeheight[plane];
+ snprintf(key, sizeof(key), "lavfi.bitplanenoise.%d.%d", plane, s->bitplane);
+ snprintf(metabuf, sizeof(metabuf), "%f", 1. - 2.* fabs((stats[plane] - 0.5)));
+ av_dict_set(&out->metadata, key, metabuf, 0);
+ }
+
+ if (out != in)
+ av_frame_free(&in);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_bitplanenoise = {
+ .name = "bitplanenoise",
+ .description = NULL_IF_CONFIG_SMALL("Measure bit plane noise."),
+ .priv_size = sizeof(BPNContext),
+ .query_formats = query_formats,
+ .inputs = inputs,
+ .outputs = outputs,
+ .priv_class = &bitplanenoise_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_blackdetect.c b/libavfilter/vf_blackdetect.c
new file mode 100644
index 0000000000..0f6adf49ed
--- /dev/null
+++ b/libavfilter/vf_blackdetect.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Video black detector, loosely based on blackframe with extended
+ * syntax and features
+ */
+
+#include <float.h>
+#include "libavutil/opt.h"
+#include "libavutil/timestamp.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ double black_min_duration_time; ///< minimum duration of detected black, in seconds
+ int64_t black_min_duration; ///< minimum duration of detected black, expressed in timebase units
+ int64_t black_start; ///< pts start time of the first black picture
+ int64_t black_end; ///< pts end time of the last black picture
+ int64_t last_picref_pts; ///< pts of the last input picture
+ int black_started;
+
+ double picture_black_ratio_th;
+ double pixel_black_th;
+ unsigned int pixel_black_th_i;
+
+ unsigned int nb_black_pixels; ///< number of black pixels counted so far
+} BlackDetectContext;
+
+#define OFFSET(x) offsetof(BlackDetectContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption blackdetect_options[] = {
+ { "d", "set minimum detected black duration in seconds", OFFSET(black_min_duration_time), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0, DBL_MAX, FLAGS },
+ { "black_min_duration", "set minimum detected black duration in seconds", OFFSET(black_min_duration_time), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0, DBL_MAX, FLAGS },
+ { "picture_black_ratio_th", "set the picture black ratio threshold", OFFSET(picture_black_ratio_th), AV_OPT_TYPE_DOUBLE, {.dbl=.98}, 0, 1, FLAGS },
+ { "pic_th", "set the picture black ratio threshold", OFFSET(picture_black_ratio_th), AV_OPT_TYPE_DOUBLE, {.dbl=.98}, 0, 1, FLAGS },
+ { "pixel_black_th", "set the pixel black threshold", OFFSET(pixel_black_th), AV_OPT_TYPE_DOUBLE, {.dbl=.10}, 0, 1, FLAGS },
+ { "pix_th", "set the pixel black threshold", OFFSET(pixel_black_th), AV_OPT_TYPE_DOUBLE, {.dbl=.10}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(blackdetect);
+
+#define YUVJ_FORMATS \
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P
+
+static const enum AVPixelFormat yuvj_formats[] = {
+ YUVJ_FORMATS, AV_PIX_FMT_NONE
+};
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_NV12, AV_PIX_FMT_NV21,
+ YUVJ_FORMATS,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ BlackDetectContext *blackdetect = ctx->priv;
+
+ blackdetect->black_min_duration =
+ blackdetect->black_min_duration_time / av_q2d(inlink->time_base);
+
+ blackdetect->pixel_black_th_i = ff_fmt_is_in(inlink->format, yuvj_formats) ?
+ // luminance_minimum_value + pixel_black_th * luminance_range_size
+ blackdetect->pixel_black_th * 255 :
+ 16 + blackdetect->pixel_black_th * (235 - 16);
+
+ av_log(blackdetect, AV_LOG_VERBOSE,
+ "black_min_duration:%s pixel_black_th:%f pixel_black_th_i:%d picture_black_ratio_th:%f\n",
+ av_ts2timestr(blackdetect->black_min_duration, &inlink->time_base),
+ blackdetect->pixel_black_th, blackdetect->pixel_black_th_i,
+ blackdetect->picture_black_ratio_th);
+ return 0;
+}
+
+static void check_black_end(AVFilterContext *ctx)
+{
+ BlackDetectContext *blackdetect = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ if ((blackdetect->black_end - blackdetect->black_start) >= blackdetect->black_min_duration) {
+ av_log(blackdetect, AV_LOG_INFO,
+ "black_start:%s black_end:%s black_duration:%s\n",
+ av_ts2timestr(blackdetect->black_start, &inlink->time_base),
+ av_ts2timestr(blackdetect->black_end, &inlink->time_base),
+ av_ts2timestr(blackdetect->black_end - blackdetect->black_start, &inlink->time_base));
+ }
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ BlackDetectContext *blackdetect = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ int ret = ff_request_frame(inlink);
+
+ if (ret == AVERROR_EOF && blackdetect->black_started) {
+ // FIXME: black_end should be set to last_picref_pts + last_picref_duration
+ blackdetect->black_end = blackdetect->last_picref_pts;
+ check_black_end(ctx);
+ }
+ return ret;
+}
+
+// TODO: document metadata
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ BlackDetectContext *blackdetect = ctx->priv;
+ double picture_black_ratio = 0;
+ const uint8_t *p = picref->data[0];
+ int x, i;
+
+ for (i = 0; i < inlink->h; i++) {
+ for (x = 0; x < inlink->w; x++)
+ blackdetect->nb_black_pixels += p[x] <= blackdetect->pixel_black_th_i;
+ p += picref->linesize[0];
+ }
+
+ picture_black_ratio = (double)blackdetect->nb_black_pixels / (inlink->w * inlink->h);
+
+ av_log(ctx, AV_LOG_DEBUG,
+ "frame:%"PRId64" picture_black_ratio:%f pts:%s t:%s type:%c\n",
+ inlink->frame_count_out, picture_black_ratio,
+ av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base),
+ av_get_picture_type_char(picref->pict_type));
+
+ if (picture_black_ratio >= blackdetect->picture_black_ratio_th) {
+ if (!blackdetect->black_started) {
+ /* black starts here */
+ blackdetect->black_started = 1;
+ blackdetect->black_start = picref->pts;
+ av_dict_set(avpriv_frame_get_metadatap(picref), "lavfi.black_start",
+ av_ts2timestr(blackdetect->black_start, &inlink->time_base), 0);
+ }
+ } else if (blackdetect->black_started) {
+ /* black ends here */
+ blackdetect->black_started = 0;
+ blackdetect->black_end = picref->pts;
+ check_black_end(ctx);
+ av_dict_set(avpriv_frame_get_metadatap(picref), "lavfi.black_end",
+ av_ts2timestr(blackdetect->black_end, &inlink->time_base), 0);
+ }
+
+ blackdetect->last_picref_pts = picref->pts;
+ blackdetect->nb_black_pixels = 0;
+ return ff_filter_frame(inlink->dst->outputs[0], picref);
+}
+
+static const AVFilterPad blackdetect_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad blackdetect_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_blackdetect = {
+ .name = "blackdetect",
+ .description = NULL_IF_CONFIG_SMALL("Detect video intervals that are (almost) black."),
+ .priv_size = sizeof(BlackDetectContext),
+ .query_formats = query_formats,
+ .inputs = blackdetect_inputs,
+ .outputs = blackdetect_outputs,
+ .priv_class = &blackdetect_class,
+};
diff --git a/libavfilter/vf_blackframe.c b/libavfilter/vf_blackframe.c
index 8cbcc005a4..9fe2a42942 100644
--- a/libavfilter/vf_blackframe.c
+++ b/libavfilter/vf_blackframe.c
@@ -4,20 +4,20 @@
* Copyright (c) 2006 Julian Hall
* Copyright (c) 2002-2003 Brian J. Murrell
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or modify
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
@@ -32,7 +32,6 @@
#include "libavutil/internal.h"
#include "libavutil/opt.h"
-
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
@@ -44,6 +43,7 @@ typedef struct BlackFrameContext {
int bthresh; ///< black threshold
unsigned int frame; ///< frame number
unsigned int nblack; ///< number of black pixels counted so far
+ unsigned int last_keyframe; ///< frame number of the last received key-frame
} BlackFrameContext;
static int query_formats(AVFilterContext *ctx)
@@ -54,10 +54,16 @@ static int query_formats(AVFilterContext *ctx)
AV_PIX_FMT_NONE
};
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
}
+#define SET_META(key, format, value) \
+ snprintf(buf, sizeof(buf), format, value); \
+ av_dict_set(metadata, key, buf, 0)
+
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
@@ -65,6 +71,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
int x, i;
int pblack = 0;
uint8_t *p = frame->data[0];
+ AVDictionary **metadata;
+ char buf[32];
for (i = 0; i < frame->height; i++) {
for (x = 0; x < inlink->w; x++)
@@ -72,11 +80,21 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
p += frame->linesize[0];
}
+ if (frame->key_frame)
+ s->last_keyframe = s->frame;
+
pblack = s->nblack * 100 / (inlink->w * inlink->h);
- if (pblack >= s->bamount)
- av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pts:%"PRId64" t:%f\n",
+ if (pblack >= s->bamount) {
+ metadata = avpriv_frame_get_metadatap(frame);
+
+ av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pts:%"PRId64" t:%f "
+ "type:%c last_keyframe:%d\n",
s->frame, pblack, frame->pts,
- frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base));
+ frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base),
+ av_get_picture_type_char(frame->pict_type), s->last_keyframe);
+
+ SET_META("lavfi.blackframe.pblack", "%u", pblack);
+ }
s->frame++;
s->nblack = 0;
@@ -84,28 +102,24 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
}
#define OFFSET(x) offsetof(BlackFrameContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "amount", "Percentage of the pixels that have to be below the threshold "
- "for the frame to be considered black.", OFFSET(bamount), AV_OPT_TYPE_INT, { .i64 = 98 }, 0, 100, FLAGS },
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption blackframe_options[] = {
+ { "amount", "percentage of the pixels that have to be below the threshold "
+ "for the frame to be considered black", OFFSET(bamount), AV_OPT_TYPE_INT, { .i64 = 98 }, 0, 100, FLAGS },
{ "threshold", "threshold below which a pixel value is considered black",
- OFFSET(bthresh), AV_OPT_TYPE_INT, { .i64 = 32 }, 0, INT_MAX, FLAGS },
- { NULL },
+ OFFSET(bthresh), AV_OPT_TYPE_INT, { .i64 = 32 }, 0, 255, FLAGS },
+ { "thresh", "threshold below which a pixel value is considered black",
+ OFFSET(bthresh), AV_OPT_TYPE_INT, { .i64 = 32 }, 0, 255, FLAGS },
+ { NULL }
};
-static const AVClass blackframe_class = {
- .class_name = "blackframe",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(blackframe);
static const AVFilterPad avfilter_vf_blackframe_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -119,15 +133,11 @@ static const AVFilterPad avfilter_vf_blackframe_outputs[] = {
};
AVFilter ff_vf_blackframe = {
- .name = "blackframe",
- .description = NULL_IF_CONFIG_SMALL("Detect frames that are (almost) black."),
-
- .priv_size = sizeof(BlackFrameContext),
- .priv_class = &blackframe_class,
-
+ .name = "blackframe",
+ .description = NULL_IF_CONFIG_SMALL("Detect frames that are (almost) black."),
+ .priv_size = sizeof(BlackFrameContext),
+ .priv_class = &blackframe_class,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_blackframe_inputs,
-
- .outputs = avfilter_vf_blackframe_outputs,
+ .inputs = avfilter_vf_blackframe_inputs,
+ .outputs = avfilter_vf_blackframe_outputs,
};
diff --git a/libavfilter/vf_blend.c b/libavfilter/vf_blend.c
new file mode 100644
index 0000000000..a3235e684b
--- /dev/null
+++ b/libavfilter/vf_blend.c
@@ -0,0 +1,675 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/eval.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixfmt.h"
+#include "avfilter.h"
+#include "bufferqueue.h"
+#include "formats.h"
+#include "internal.h"
+#include "dualinput.h"
+#include "video.h"
+#include "blend.h"
+
+#define TOP 0
+#define BOTTOM 1
+
+typedef struct BlendContext {
+ const AVClass *class;
+ FFDualInputContext dinput;
+ int hsub, vsub; ///< chroma subsampling values
+ int nb_planes;
+ char *all_expr;
+ enum BlendMode all_mode;
+ double all_opacity;
+
+ FilterParams params[4];
+ int tblend;
+ AVFrame *prev_frame; /* only used with tblend */
+} BlendContext;
+
+static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "N", "A", "B", "TOP", "BOTTOM", NULL };
+enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_SW, VAR_SH, VAR_T, VAR_N, VAR_A, VAR_B, VAR_TOP, VAR_BOTTOM, VAR_VARS_NB };
+
+typedef struct ThreadData {
+ const AVFrame *top, *bottom;
+ AVFrame *dst;
+ AVFilterLink *inlink;
+ int plane;
+ int w, h;
+ FilterParams *param;
+} ThreadData;
+
+#define COMMON_OPTIONS \
+ { "c0_mode", "set component #0 blend mode", OFFSET(params[0].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
+ { "c1_mode", "set component #1 blend mode", OFFSET(params[1].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
+ { "c2_mode", "set component #2 blend mode", OFFSET(params[2].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
+ { "c3_mode", "set component #3 blend mode", OFFSET(params[3].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
+ { "all_mode", "set blend mode for all components", OFFSET(all_mode), AV_OPT_TYPE_INT, {.i64=-1},-1, BLEND_NB-1, FLAGS, "mode"},\
+ { "addition", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_ADDITION}, 0, 0, FLAGS, "mode" },\
+ { "addition128", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_ADDITION128}, 0, 0, FLAGS, "mode" },\
+ { "and", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AND}, 0, 0, FLAGS, "mode" },\
+ { "average", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AVERAGE}, 0, 0, FLAGS, "mode" },\
+ { "burn", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BURN}, 0, 0, FLAGS, "mode" },\
+ { "darken", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DARKEN}, 0, 0, FLAGS, "mode" },\
+ { "difference", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE}, 0, 0, FLAGS, "mode" },\
+ { "difference128", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE128}, 0, 0, FLAGS, "mode" },\
+ { "divide", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIVIDE}, 0, 0, FLAGS, "mode" },\
+ { "dodge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DODGE}, 0, 0, FLAGS, "mode" },\
+ { "exclusion", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXCLUSION}, 0, 0, FLAGS, "mode" },\
+ { "freeze", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_FREEZE}, 0, 0, FLAGS, "mode" },\
+ { "glow", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GLOW}, 0, 0, FLAGS, "mode" },\
+ { "hardlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDLIGHT}, 0, 0, FLAGS, "mode" },\
+ { "hardmix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDMIX}, 0, 0, FLAGS, "mode" },\
+ { "heat", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HEAT}, 0, 0, FLAGS, "mode" },\
+ { "lighten", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LIGHTEN}, 0, 0, FLAGS, "mode" },\
+ { "linearlight","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LINEARLIGHT},0, 0, FLAGS, "mode" },\
+ { "multiply", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY}, 0, 0, FLAGS, "mode" },\
+ { "multiply128","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY128},0, 0, FLAGS, "mode" },\
+ { "negation", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NEGATION}, 0, 0, FLAGS, "mode" },\
+ { "normal", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NORMAL}, 0, 0, FLAGS, "mode" },\
+ { "or", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OR}, 0, 0, FLAGS, "mode" },\
+ { "overlay", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OVERLAY}, 0, 0, FLAGS, "mode" },\
+ { "phoenix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PHOENIX}, 0, 0, FLAGS, "mode" },\
+ { "pinlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PINLIGHT}, 0, 0, FLAGS, "mode" },\
+ { "reflect", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_REFLECT}, 0, 0, FLAGS, "mode" },\
+ { "screen", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SCREEN}, 0, 0, FLAGS, "mode" },\
+ { "softlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTLIGHT}, 0, 0, FLAGS, "mode" },\
+ { "subtract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SUBTRACT}, 0, 0, FLAGS, "mode" },\
+ { "vividlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_VIVIDLIGHT}, 0, 0, FLAGS, "mode" },\
+ { "xor", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_XOR}, 0, 0, FLAGS, "mode" },\
+ { "c0_expr", "set color component #0 expression", OFFSET(params[0].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
+ { "c1_expr", "set color component #1 expression", OFFSET(params[1].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
+ { "c2_expr", "set color component #2 expression", OFFSET(params[2].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
+ { "c3_expr", "set color component #3 expression", OFFSET(params[3].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
+ { "all_expr", "set expression for all color components", OFFSET(all_expr), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
+ { "c0_opacity", "set color component #0 opacity", OFFSET(params[0].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
+ { "c1_opacity", "set color component #1 opacity", OFFSET(params[1].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
+ { "c2_opacity", "set color component #2 opacity", OFFSET(params[2].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
+ { "c3_opacity", "set color component #3 opacity", OFFSET(params[3].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
+ { "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS}
+
+#define OFFSET(x) offsetof(BlendContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption blend_options[] = {
+ COMMON_OPTIONS,
+ { "shortest", "force termination when the shortest input terminates", OFFSET(dinput.shortest), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { "repeatlast", "repeat last bottom frame", OFFSET(dinput.repeatlast), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(blend);
+
+#define COPY(src) \
+static void blend_copy ## src(const uint8_t *top, ptrdiff_t top_linesize, \
+ const uint8_t *bottom, ptrdiff_t bottom_linesize,\
+ uint8_t *dst, ptrdiff_t dst_linesize, \
+ ptrdiff_t width, ptrdiff_t height, \
+ FilterParams *param, double *values, int starty) \
+{ \
+ av_image_copy_plane(dst, dst_linesize, src, src ## _linesize, \
+ width, height); \
+}
+
+COPY(top)
+COPY(bottom)
+
+#undef COPY
+
+static void blend_normal_8bit(const uint8_t *top, ptrdiff_t top_linesize,
+ const uint8_t *bottom, ptrdiff_t bottom_linesize,
+ uint8_t *dst, ptrdiff_t dst_linesize,
+ ptrdiff_t width, ptrdiff_t height,
+ FilterParams *param, double *values, int starty)
+{
+ const double opacity = param->opacity;
+ int i, j;
+
+ for (i = 0; i < height; i++) {
+ for (j = 0; j < width; j++) {
+ dst[j] = top[j] * opacity + bottom[j] * (1. - opacity);
+ }
+ dst += dst_linesize;
+ top += top_linesize;
+ bottom += bottom_linesize;
+ }
+}
+
+static void blend_normal_16bit(const uint8_t *_top, ptrdiff_t top_linesize,
+ const uint8_t *_bottom, ptrdiff_t bottom_linesize,
+ uint8_t *_dst, ptrdiff_t dst_linesize,
+ ptrdiff_t width, ptrdiff_t height,
+ FilterParams *param, double *values, int starty)
+{
+ const uint16_t *top = (uint16_t*)_top;
+ const uint16_t *bottom = (uint16_t*)_bottom;
+ uint16_t *dst = (uint16_t*)_dst;
+ const double opacity = param->opacity;
+ int i, j;
+ dst_linesize /= 2;
+ top_linesize /= 2;
+ bottom_linesize /= 2;
+
+ for (i = 0; i < height; i++) {
+ for (j = 0; j < width; j++) {
+ dst[j] = top[j] * opacity + bottom[j] * (1. - opacity);
+ }
+ dst += dst_linesize;
+ top += top_linesize;
+ bottom += bottom_linesize;
+ }
+}
+
+#define DEFINE_BLEND8(name, expr) \
+static void blend_## name##_8bit(const uint8_t *top, ptrdiff_t top_linesize, \
+ const uint8_t *bottom, ptrdiff_t bottom_linesize, \
+ uint8_t *dst, ptrdiff_t dst_linesize, \
+ ptrdiff_t width, ptrdiff_t height, \
+ FilterParams *param, double *values, int starty) \
+{ \
+ double opacity = param->opacity; \
+ int i, j; \
+ \
+ for (i = 0; i < height; i++) { \
+ for (j = 0; j < width; j++) { \
+ dst[j] = top[j] + ((expr) - top[j]) * opacity; \
+ } \
+ dst += dst_linesize; \
+ top += top_linesize; \
+ bottom += bottom_linesize; \
+ } \
+}
+
+#define DEFINE_BLEND16(name, expr) \
+static void blend_## name##_16bit(const uint8_t *_top, ptrdiff_t top_linesize, \
+ const uint8_t *_bottom, ptrdiff_t bottom_linesize, \
+ uint8_t *_dst, ptrdiff_t dst_linesize, \
+ ptrdiff_t width, ptrdiff_t height, \
+ FilterParams *param, double *values, int starty) \
+{ \
+ const uint16_t *top = (uint16_t*)_top; \
+ const uint16_t *bottom = (uint16_t*)_bottom; \
+ uint16_t *dst = (uint16_t*)_dst; \
+ double opacity = param->opacity; \
+ int i, j; \
+ dst_linesize /= 2; \
+ top_linesize /= 2; \
+ bottom_linesize /= 2; \
+ \
+ for (i = 0; i < height; i++) { \
+ for (j = 0; j < width; j++) { \
+ dst[j] = top[j] + ((expr) - top[j]) * opacity; \
+ } \
+ dst += dst_linesize; \
+ top += top_linesize; \
+ bottom += bottom_linesize; \
+ } \
+}
+
+#define A top[j]
+#define B bottom[j]
+
+#define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 255))
+#define SCREEN(x, a, b) (255 - (x) * ((255 - (a)) * (255 - (b)) / 255))
+#define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 255 - ((255 - (b)) << 8) / (a)))
+#define DODGE(a, b) (((a) == 255) ? (a) : FFMIN(255, (((b) << 8) / (255 - (a)))))
+
+DEFINE_BLEND8(addition, FFMIN(255, A + B))
+DEFINE_BLEND8(addition128, av_clip_uint8(A + B - 128))
+DEFINE_BLEND8(average, (A + B) / 2)
+DEFINE_BLEND8(subtract, FFMAX(0, A - B))
+DEFINE_BLEND8(multiply, MULTIPLY(1, A, B))
+DEFINE_BLEND8(multiply128,av_clip_uint8((A - 128) * B / 32. + 128))
+DEFINE_BLEND8(negation, 255 - FFABS(255 - A - B))
+DEFINE_BLEND8(difference, FFABS(A - B))
+DEFINE_BLEND8(difference128, av_clip_uint8(128 + A - B))
+DEFINE_BLEND8(screen, SCREEN(1, A, B))
+DEFINE_BLEND8(overlay, (A < 128) ? MULTIPLY(2, A, B) : SCREEN(2, A, B))
+DEFINE_BLEND8(hardlight, (B < 128) ? MULTIPLY(2, B, A) : SCREEN(2, B, A))
+DEFINE_BLEND8(hardmix, (A < (255 - B)) ? 0: 255)
+DEFINE_BLEND8(heat, (A == 0) ? 0 : 255 - FFMIN(((255 - B) * (255 - B)) / A, 255))
+DEFINE_BLEND8(freeze, (B == 0) ? 0 : 255 - FFMIN(((255 - A) * (255 - A)) / B, 255))
+DEFINE_BLEND8(darken, FFMIN(A, B))
+DEFINE_BLEND8(lighten, FFMAX(A, B))
+DEFINE_BLEND8(divide, av_clip_uint8(B == 0 ? 255 : 255 * A / B))
+DEFINE_BLEND8(dodge, DODGE(A, B))
+DEFINE_BLEND8(burn, BURN(A, B))
+DEFINE_BLEND8(softlight, (A > 127) ? B + (255 - B) * (A - 127.5) / 127.5 * (0.5 - fabs(B - 127.5) / 255): B - B * ((127.5 - A) / 127.5) * (0.5 - fabs(B - 127.5)/255))
+DEFINE_BLEND8(exclusion, A + B - 2 * A * B / 255)
+DEFINE_BLEND8(pinlight, (B < 128) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 128)))
+DEFINE_BLEND8(phoenix, FFMIN(A, B) - FFMAX(A, B) + 255)
+DEFINE_BLEND8(reflect, (B == 255) ? B : FFMIN(255, (A * A / (255 - B))))
+DEFINE_BLEND8(glow, (A == 255) ? A : FFMIN(255, (B * B / (255 - A))))
+DEFINE_BLEND8(and, A & B)
+DEFINE_BLEND8(or, A | B)
+DEFINE_BLEND8(xor, A ^ B)
+DEFINE_BLEND8(vividlight, (A < 128) ? BURN(2 * A, B) : DODGE(2 * (A - 128), B))
+DEFINE_BLEND8(linearlight,av_clip_uint8((B < 128) ? B + 2 * A - 255 : B + 2 * (A - 128)))
+
+#undef MULTIPLY
+#undef SCREEN
+#undef BURN
+#undef DODGE
+
+#define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 65535))
+#define SCREEN(x, a, b) (65535 - (x) * ((65535 - (a)) * (65535 - (b)) / 65535))
+#define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 65535 - ((65535 - (b)) << 16) / (a)))
+#define DODGE(a, b) (((a) == 65535) ? (a) : FFMIN(65535, (((b) << 16) / (65535 - (a)))))
+
+DEFINE_BLEND16(addition, FFMIN(65535, A + B))
+DEFINE_BLEND16(addition128, av_clip_uint16(A + B - 32768))
+DEFINE_BLEND16(average, (A + B) / 2)
+DEFINE_BLEND16(subtract, FFMAX(0, A - B))
+DEFINE_BLEND16(multiply, MULTIPLY(1, A, B))
+DEFINE_BLEND16(multiply128, av_clip_uint16((A - 32768) * B / 8192. + 32768))
+DEFINE_BLEND16(negation, 65535 - FFABS(65535 - A - B))
+DEFINE_BLEND16(difference, FFABS(A - B))
+DEFINE_BLEND16(difference128, av_clip_uint16(32768 + A - B))
+DEFINE_BLEND16(screen, SCREEN(1, A, B))
+DEFINE_BLEND16(overlay, (A < 32768) ? MULTIPLY(2, A, B) : SCREEN(2, A, B))
+DEFINE_BLEND16(hardlight, (B < 32768) ? MULTIPLY(2, B, A) : SCREEN(2, B, A))
+DEFINE_BLEND16(hardmix, (A < (65535 - B)) ? 0: 65535)
+DEFINE_BLEND16(heat, (A == 0) ? 0 : 65535 - FFMIN(((65535 - B) * (65535 - B)) / A, 65535))
+DEFINE_BLEND16(freeze, (B == 0) ? 0 : 65535 - FFMIN(((65535 - A) * (65535 - A)) / B, 65535))
+DEFINE_BLEND16(darken, FFMIN(A, B))
+DEFINE_BLEND16(lighten, FFMAX(A, B))
+DEFINE_BLEND16(divide, av_clip_uint16(B == 0 ? 65535 : 65535 * A / B))
+DEFINE_BLEND16(dodge, DODGE(A, B))
+DEFINE_BLEND16(burn, BURN(A, B))
+DEFINE_BLEND16(softlight, (A > 32767) ? B + (65535 - B) * (A - 32767.5) / 32767.5 * (0.5 - fabs(B - 32767.5) / 65535): B - B * ((32767.5 - A) / 32767.5) * (0.5 - fabs(B - 32767.5)/65535))
+DEFINE_BLEND16(exclusion, A + B - 2 * A * B / 65535)
+DEFINE_BLEND16(pinlight, (B < 32768) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 32768)))
+DEFINE_BLEND16(phoenix, FFMIN(A, B) - FFMAX(A, B) + 65535)
+DEFINE_BLEND16(reflect, (B == 65535) ? B : FFMIN(65535, (A * A / (65535 - B))))
+DEFINE_BLEND16(glow, (A == 65535) ? A : FFMIN(65535, (B * B / (65535 - A))))
+DEFINE_BLEND16(and, A & B)
+DEFINE_BLEND16(or, A | B)
+DEFINE_BLEND16(xor, A ^ B)
+DEFINE_BLEND16(vividlight, (A < 32768) ? BURN(2 * A, B) : DODGE(2 * (A - 32768), B))
+DEFINE_BLEND16(linearlight,av_clip_uint16((B < 32768) ? B + 2 * A - 65535 : B + 2 * (A - 32768)))
+
+#define DEFINE_BLEND_EXPR(type, name, div) \
+static void blend_expr_## name(const uint8_t *_top, ptrdiff_t top_linesize, \
+ const uint8_t *_bottom, ptrdiff_t bottom_linesize, \
+ uint8_t *_dst, ptrdiff_t dst_linesize, \
+ ptrdiff_t width, ptrdiff_t height, \
+ FilterParams *param, double *values, int starty) \
+{ \
+ const type *top = (type*)_top; \
+ const type *bottom = (type*)_bottom; \
+ type *dst = (type*)_dst; \
+ AVExpr *e = param->e; \
+ int y, x; \
+ dst_linesize /= div; \
+ top_linesize /= div; \
+ bottom_linesize /= div; \
+ \
+ for (y = 0; y < height; y++) { \
+ values[VAR_Y] = y + starty; \
+ for (x = 0; x < width; x++) { \
+ values[VAR_X] = x; \
+ values[VAR_TOP] = values[VAR_A] = top[x]; \
+ values[VAR_BOTTOM] = values[VAR_B] = bottom[x]; \
+ dst[x] = av_expr_eval(e, values, NULL); \
+ } \
+ dst += dst_linesize; \
+ top += top_linesize; \
+ bottom += bottom_linesize; \
+ } \
+}
+
+DEFINE_BLEND_EXPR(uint8_t, 8bit, 1)
+DEFINE_BLEND_EXPR(uint16_t, 16bit, 2)
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ThreadData *td = arg;
+ int slice_start = (td->h * jobnr ) / nb_jobs;
+ int slice_end = (td->h * (jobnr+1)) / nb_jobs;
+ int height = slice_end - slice_start;
+ const uint8_t *top = td->top->data[td->plane];
+ const uint8_t *bottom = td->bottom->data[td->plane];
+ uint8_t *dst = td->dst->data[td->plane];
+ double values[VAR_VARS_NB];
+
+ values[VAR_N] = td->inlink->frame_count_out;
+ values[VAR_T] = td->dst->pts == AV_NOPTS_VALUE ? NAN : td->dst->pts * av_q2d(td->inlink->time_base);
+ values[VAR_W] = td->w;
+ values[VAR_H] = td->h;
+ values[VAR_SW] = td->w / (double)td->dst->width;
+ values[VAR_SH] = td->h / (double)td->dst->height;
+
+ td->param->blend(top + slice_start * td->top->linesize[td->plane],
+ td->top->linesize[td->plane],
+ bottom + slice_start * td->bottom->linesize[td->plane],
+ td->bottom->linesize[td->plane],
+ dst + slice_start * td->dst->linesize[td->plane],
+ td->dst->linesize[td->plane],
+ td->w, height, td->param, &values[0], slice_start);
+ return 0;
+}
+
+static AVFrame *blend_frame(AVFilterContext *ctx, AVFrame *top_buf,
+ const AVFrame *bottom_buf)
+{
+ BlendContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *dst_buf;
+ int plane;
+
+ dst_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!dst_buf)
+ return top_buf;
+ av_frame_copy_props(dst_buf, top_buf);
+
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
+ int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
+ int outw = AV_CEIL_RSHIFT(dst_buf->width, hsub);
+ int outh = AV_CEIL_RSHIFT(dst_buf->height, vsub);
+ FilterParams *param = &s->params[plane];
+ ThreadData td = { .top = top_buf, .bottom = bottom_buf, .dst = dst_buf,
+ .w = outw, .h = outh, .param = param, .plane = plane,
+ .inlink = inlink };
+
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outh, ff_filter_get_nb_threads(ctx)));
+ }
+
+ if (!s->tblend)
+ av_frame_free(&top_buf);
+
+ return dst_buf;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ BlendContext *s = ctx->priv;
+
+ s->tblend = !strcmp(ctx->filter->name, "tblend");
+
+ s->dinput.process = blend_frame;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
+ AV_PIX_FMT_GBRP16, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ BlendContext *s = ctx->priv;
+ int i;
+
+ ff_dualinput_uninit(&s->dinput);
+ av_frame_free(&s->prev_frame);
+
+ for (i = 0; i < FF_ARRAY_ELEMS(s->params); i++)
+ av_expr_free(s->params[i].e);
+}
+
+void ff_blend_init(FilterParams *param, int is_16bit)
+{
+ switch (param->mode) {
+ case BLEND_ADDITION: param->blend = is_16bit ? blend_addition_16bit : blend_addition_8bit; break;
+ case BLEND_ADDITION128: param->blend = is_16bit ? blend_addition128_16bit : blend_addition128_8bit; break;
+ case BLEND_AND: param->blend = is_16bit ? blend_and_16bit : blend_and_8bit; break;
+ case BLEND_AVERAGE: param->blend = is_16bit ? blend_average_16bit : blend_average_8bit; break;
+ case BLEND_BURN: param->blend = is_16bit ? blend_burn_16bit : blend_burn_8bit; break;
+ case BLEND_DARKEN: param->blend = is_16bit ? blend_darken_16bit : blend_darken_8bit; break;
+ case BLEND_DIFFERENCE: param->blend = is_16bit ? blend_difference_16bit : blend_difference_8bit; break;
+ case BLEND_DIFFERENCE128: param->blend = is_16bit ? blend_difference128_16bit: blend_difference128_8bit; break;
+ case BLEND_DIVIDE: param->blend = is_16bit ? blend_divide_16bit : blend_divide_8bit; break;
+ case BLEND_DODGE: param->blend = is_16bit ? blend_dodge_16bit : blend_dodge_8bit; break;
+ case BLEND_EXCLUSION: param->blend = is_16bit ? blend_exclusion_16bit : blend_exclusion_8bit; break;
+ case BLEND_FREEZE: param->blend = is_16bit ? blend_freeze_16bit : blend_freeze_8bit; break;
+ case BLEND_GLOW: param->blend = is_16bit ? blend_glow_16bit : blend_glow_8bit; break;
+ case BLEND_HARDLIGHT: param->blend = is_16bit ? blend_hardlight_16bit : blend_hardlight_8bit; break;
+ case BLEND_HARDMIX: param->blend = is_16bit ? blend_hardmix_16bit : blend_hardmix_8bit; break;
+ case BLEND_HEAT: param->blend = is_16bit ? blend_heat_16bit : blend_heat_8bit; break;
+ case BLEND_LIGHTEN: param->blend = is_16bit ? blend_lighten_16bit : blend_lighten_8bit; break;
+ case BLEND_LINEARLIGHT:param->blend = is_16bit ? blend_linearlight_16bit: blend_linearlight_8bit;break;
+ case BLEND_MULTIPLY: param->blend = is_16bit ? blend_multiply_16bit : blend_multiply_8bit; break;
+ case BLEND_MULTIPLY128:param->blend = is_16bit ? blend_multiply128_16bit: blend_multiply128_8bit;break;
+ case BLEND_NEGATION: param->blend = is_16bit ? blend_negation_16bit : blend_negation_8bit; break;
+ case BLEND_NORMAL: param->blend = param->opacity == 1 ? blend_copytop :
+ param->opacity == 0 ? blend_copybottom :
+ is_16bit ? blend_normal_16bit : blend_normal_8bit; break;
+ case BLEND_OR: param->blend = is_16bit ? blend_or_16bit : blend_or_8bit; break;
+ case BLEND_OVERLAY: param->blend = is_16bit ? blend_overlay_16bit : blend_overlay_8bit; break;
+ case BLEND_PHOENIX: param->blend = is_16bit ? blend_phoenix_16bit : blend_phoenix_8bit; break;
+ case BLEND_PINLIGHT: param->blend = is_16bit ? blend_pinlight_16bit : blend_pinlight_8bit; break;
+ case BLEND_REFLECT: param->blend = is_16bit ? blend_reflect_16bit : blend_reflect_8bit; break;
+ case BLEND_SCREEN: param->blend = is_16bit ? blend_screen_16bit : blend_screen_8bit; break;
+ case BLEND_SOFTLIGHT: param->blend = is_16bit ? blend_softlight_16bit : blend_softlight_8bit; break;
+ case BLEND_SUBTRACT: param->blend = is_16bit ? blend_subtract_16bit : blend_subtract_8bit; break;
+ case BLEND_VIVIDLIGHT: param->blend = is_16bit ? blend_vividlight_16bit : blend_vividlight_8bit; break;
+ case BLEND_XOR: param->blend = is_16bit ? blend_xor_16bit : blend_xor_8bit; break;
+ }
+
+ if (param->opacity == 0 && param->mode != BLEND_NORMAL) {
+ param->blend = blend_copytop;
+ }
+
+ if (ARCH_X86)
+ ff_blend_init_x86(param, is_16bit);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *toplink = ctx->inputs[TOP];
+ BlendContext *s = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(toplink->format);
+ int ret, plane, is_16bit;
+
+ if (!s->tblend) {
+ AVFilterLink *bottomlink = ctx->inputs[BOTTOM];
+
+ if (toplink->format != bottomlink->format) {
+ av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
+ return AVERROR(EINVAL);
+ }
+ if (toplink->w != bottomlink->w ||
+ toplink->h != bottomlink->h ||
+ toplink->sample_aspect_ratio.num != bottomlink->sample_aspect_ratio.num ||
+ toplink->sample_aspect_ratio.den != bottomlink->sample_aspect_ratio.den) {
+ av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
+ "(size %dx%d, SAR %d:%d) do not match the corresponding "
+ "second input link %s parameters (%dx%d, SAR %d:%d)\n",
+ ctx->input_pads[TOP].name, toplink->w, toplink->h,
+ toplink->sample_aspect_ratio.num,
+ toplink->sample_aspect_ratio.den,
+ ctx->input_pads[BOTTOM].name, bottomlink->w, bottomlink->h,
+ bottomlink->sample_aspect_ratio.num,
+ bottomlink->sample_aspect_ratio.den);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ outlink->w = toplink->w;
+ outlink->h = toplink->h;
+ outlink->time_base = toplink->time_base;
+ outlink->sample_aspect_ratio = toplink->sample_aspect_ratio;
+ outlink->frame_rate = toplink->frame_rate;
+
+ s->hsub = pix_desc->log2_chroma_w;
+ s->vsub = pix_desc->log2_chroma_h;
+
+ is_16bit = pix_desc->comp[0].depth == 16;
+ s->nb_planes = av_pix_fmt_count_planes(toplink->format);
+
+ if (!s->tblend)
+ if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0)
+ return ret;
+
+ for (plane = 0; plane < FF_ARRAY_ELEMS(s->params); plane++) {
+ FilterParams *param = &s->params[plane];
+
+ if (s->all_mode >= 0)
+ param->mode = s->all_mode;
+ if (s->all_opacity < 1)
+ param->opacity = s->all_opacity;
+
+ ff_blend_init(param, is_16bit);
+
+ if (s->all_expr && !param->expr_str) {
+ param->expr_str = av_strdup(s->all_expr);
+ if (!param->expr_str)
+ return AVERROR(ENOMEM);
+ }
+ if (param->expr_str) {
+ ret = av_expr_parse(&param->e, param->expr_str, var_names,
+ NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0)
+ return ret;
+ param->blend = is_16bit? blend_expr_16bit : blend_expr_8bit;
+ }
+ }
+
+ return 0;
+}
+
+#if CONFIG_BLEND_FILTER
+
+static int request_frame(AVFilterLink *outlink)
+{
+ BlendContext *s = outlink->src->priv;
+ return ff_dualinput_request_frame(&s->dinput, outlink);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ BlendContext *s = inlink->dst->priv;
+ return ff_dualinput_filter_frame(&s->dinput, inlink, buf);
+}
+
+static const AVFilterPad blend_inputs[] = {
+ {
+ .name = "top",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },{
+ .name = "bottom",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad blend_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_blend = {
+ .name = "blend",
+ .description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(BlendContext),
+ .query_formats = query_formats,
+ .inputs = blend_inputs,
+ .outputs = blend_outputs,
+ .priv_class = &blend_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
+};
+
+#endif
+
+#if CONFIG_TBLEND_FILTER
+
+static int tblend_filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ BlendContext *s = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+
+ if (s->prev_frame) {
+ AVFrame *out = blend_frame(inlink->dst, frame, s->prev_frame);
+ av_frame_free(&s->prev_frame);
+ s->prev_frame = frame;
+ return ff_filter_frame(outlink, out);
+ }
+ s->prev_frame = frame;
+ return 0;
+}
+
+static const AVOption tblend_options[] = {
+ COMMON_OPTIONS,
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(tblend);
+
+static const AVFilterPad tblend_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = tblend_filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad tblend_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_tblend = {
+ .name = "tblend",
+ .description = NULL_IF_CONFIG_SMALL("Blend successive frames."),
+ .priv_size = sizeof(BlendContext),
+ .priv_class = &tblend_class,
+ .query_formats = query_formats,
+ .init = init,
+ .uninit = uninit,
+ .inputs = tblend_inputs,
+ .outputs = tblend_outputs,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
+};
+
+#endif
diff --git a/libavfilter/vf_boxblur.c b/libavfilter/vf_boxblur.c
index 4cbfe2cc0f..8e43986846 100644
--- a/libavfilter/vf_boxblur.c
+++ b/libavfilter/vf_boxblur.c
@@ -2,20 +2,20 @@
* Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2011 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or modify
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
@@ -58,6 +58,7 @@ enum var_name {
typedef struct FilterParam {
int radius;
int power;
+ char *radius_expr;
} FilterParam;
typedef struct BoxBlurContext {
@@ -65,9 +66,6 @@ typedef struct BoxBlurContext {
FilterParam luma_param;
FilterParam chroma_param;
FilterParam alpha_param;
- char *luma_radius_expr;
- char *chroma_radius_expr;
- char *alpha_radius_expr;
int hsub, vsub;
int radius[4];
@@ -84,23 +82,27 @@ static av_cold int init(AVFilterContext *ctx)
{
BoxBlurContext *s = ctx->priv;
- if (!s->luma_radius_expr) {
+ if (!s->luma_param.radius_expr) {
av_log(ctx, AV_LOG_ERROR, "Luma radius expression is not set.\n");
return AVERROR(EINVAL);
}
- if (!s->chroma_radius_expr) {
- s->chroma_radius_expr = av_strdup(s->luma_radius_expr);
- if (!s->chroma_radius_expr)
+ /* fill missing params */
+ if (!s->chroma_param.radius_expr) {
+ s->chroma_param.radius_expr = av_strdup(s->luma_param.radius_expr);
+ if (!s->chroma_param.radius_expr)
return AVERROR(ENOMEM);
- s->chroma_param.power = s->luma_param.power;
}
- if (!s->alpha_radius_expr) {
- s->alpha_radius_expr = av_strdup(s->luma_radius_expr);
- if (!s->alpha_radius_expr)
+ if (s->chroma_param.power < 0)
+ s->chroma_param.power = s->luma_param.power;
+
+ if (!s->alpha_param.radius_expr) {
+ s->alpha_param.radius_expr = av_strdup(s->luma_param.radius_expr);
+ if (!s->alpha_param.radius_expr)
return AVERROR(ENOMEM);
- s->alpha_param.power = s->luma_param.power;
}
+ if (s->alpha_param.power < 0)
+ s->alpha_param.power = s->luma_param.power;
return 0;
}
@@ -115,17 +117,19 @@ static av_cold void uninit(AVFilterContext *ctx)
static int query_formats(AVFilterContext *ctx)
{
- enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
- AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P,
- AV_PIX_FMT_YUV440P, AV_PIX_FMT_GRAY8,
- AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
- AV_PIX_FMT_YUVJ440P,
- AV_PIX_FMT_NONE
- };
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
+ AVFilterFormats *formats = NULL;
+ int fmt, ret;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (!(desc->flags & (AV_PIX_FMT_FLAG_HWACCEL | AV_PIX_FMT_FLAG_BITSTREAM | AV_PIX_FMT_FLAG_PAL)) &&
+ (desc->flags & AV_PIX_FMT_FLAG_PLANAR || desc->nb_components == 1) &&
+ (!(desc->flags & AV_PIX_FMT_FLAG_BE) == !HAVE_BIGENDIAN || desc->comp[0].depth == 8) &&
+ (ret = ff_add_format(&formats, fmt)) < 0)
+ return ret;
+ }
+
+ return ff_set_common_formats(ctx, formats);
}
static int config_input(AVFilterLink *inlink)
@@ -139,14 +143,9 @@ static int config_input(AVFilterLink *inlink)
char *expr;
int ret;
- av_freep(&s->temp[0]);
- av_freep(&s->temp[1]);
- if (!(s->temp[0] = av_malloc(FFMAX(w, h))))
- return AVERROR(ENOMEM);
- if (!(s->temp[1] = av_malloc(FFMAX(w, h)))) {
- av_freep(&s->temp[0]);
+ if (!(s->temp[0] = av_malloc(2*FFMAX(w, h))) ||
+ !(s->temp[1] = av_malloc(2*FFMAX(w, h))))
return AVERROR(ENOMEM);
- }
s->hsub = desc->log2_chroma_w;
s->vsub = desc->log2_chroma_h;
@@ -159,7 +158,7 @@ static int config_input(AVFilterLink *inlink)
var_values[VAR_VSUB] = 1<<s->vsub;
#define EVAL_RADIUS_EXPR(comp) \
- expr = s->comp##_radius_expr; \
+ expr = s->comp##_param.radius_expr; \
ret = av_expr_parse_and_eval(&res, expr, var_names, var_values, \
NULL, NULL, NULL, NULL, NULL, 0, ctx); \
s->comp##_param.radius = res; \
@@ -172,7 +171,7 @@ static int config_input(AVFilterLink *inlink)
EVAL_RADIUS_EXPR(chroma);
EVAL_RADIUS_EXPR(alpha);
- av_log(ctx, AV_LOG_DEBUG,
+ av_log(ctx, AV_LOG_VERBOSE,
"luma_radius:%d luma_power:%d "
"chroma_radius:%d chroma_power:%d "
"alpha_radius:%d alpha_power:%d "
@@ -205,75 +204,97 @@ static int config_input(AVFilterLink *inlink)
return 0;
}
-static inline void blur(uint8_t *dst, int dst_step, const uint8_t *src, int src_step,
- int len, int radius)
-{
- /* Naive boxblur would sum source pixels from x-radius .. x+radius
- * for destination pixel x. That would be O(radius*width).
- * If you now look at what source pixels represent 2 consecutive
- * output pixels, then you see they are almost identical and only
- * differ by 2 pixels, like:
- * src0 111111111
- * dst0 1
- * src1 111111111
- * dst1 1
- * src0-src1 1 -1
- * so when you know one output pixel you can find the next by just adding
- * and subtracting 1 input pixel.
- * The following code adopts this faster variant.
- */
- const int length = radius*2 + 1;
- const int inv = ((1<<16) + length/2)/length;
- int x, sum = 0;
-
- for (x = 0; x < radius; x++)
- sum += src[x*src_step]<<1;
- sum += src[radius*src_step];
-
- for (x = 0; x <= radius; x++) {
- sum += src[(radius+x)*src_step] - src[(radius-x)*src_step];
- dst[x*dst_step] = (sum*inv + (1<<15))>>16;
- }
+/* Naive boxblur would sum source pixels from x-radius .. x+radius
+ * for destination pixel x. That would be O(radius*width).
+ * If you now look at what source pixels represent 2 consecutive
+ * output pixels, then you see they are almost identical and only
+ * differ by 2 pixels, like:
+ * src0 111111111
+ * dst0 1
+ * src1 111111111
+ * dst1 1
+ * src0-src1 1 -1
+ * so when you know one output pixel you can find the next by just adding
+ * and subtracting 1 input pixel.
+ * The following code adopts this faster variant.
+ */
+#define BLUR(type, depth) \
+static inline void blur ## depth(type *dst, int dst_step, const type *src, \
+ int src_step, int len, int radius) \
+{ \
+ const int length = radius*2 + 1; \
+ const int inv = ((1<<16) + length/2)/length; \
+ int x, sum = src[radius*src_step]; \
+ \
+ for (x = 0; x < radius; x++) \
+ sum += src[x*src_step]<<1; \
+ \
+ sum = sum*inv + (1<<15); \
+ \
+ for (x = 0; x <= radius; x++) { \
+ sum += (src[(radius+x)*src_step] - src[(radius-x)*src_step])*inv; \
+ dst[x*dst_step] = sum>>16; \
+ } \
+ \
+ for (; x < len-radius; x++) { \
+ sum += (src[(radius+x)*src_step] - src[(x-radius-1)*src_step])*inv; \
+ dst[x*dst_step] = sum >>16; \
+ } \
+ \
+ for (; x < len; x++) { \
+ sum += (src[(2*len-radius-x-1)*src_step] - src[(x-radius-1)*src_step])*inv; \
+ dst[x*dst_step] = sum>>16; \
+ } \
+}
- for (; x < len-radius; x++) {
- sum += src[(radius+x)*src_step] - src[(x-radius-1)*src_step];
- dst[x*dst_step] = (sum*inv + (1<<15))>>16;
- }
+BLUR(uint8_t, 8)
+BLUR(uint16_t, 16)
- for (; x < len; x++) {
- sum += src[(2*len-radius-x-1)*src_step] - src[(x-radius-1)*src_step];
- dst[x*dst_step] = (sum*inv + (1<<15))>>16;
- }
+#undef BLUR
+
+static inline void blur(uint8_t *dst, int dst_step, const uint8_t *src, int src_step,
+ int len, int radius, int pixsize)
+{
+ if (pixsize == 1) blur8 (dst, dst_step , src, src_step , len, radius);
+ else blur16((uint16_t*)dst, dst_step>>1, (const uint16_t*)src, src_step>>1, len, radius);
}
static inline void blur_power(uint8_t *dst, int dst_step, const uint8_t *src, int src_step,
- int len, int radius, int power, uint8_t *temp[2])
+ int len, int radius, int power, uint8_t *temp[2], int pixsize)
{
uint8_t *a = temp[0], *b = temp[1];
if (radius && power) {
- blur(a, 1, src, src_step, len, radius);
+ blur(a, pixsize, src, src_step, len, radius, pixsize);
for (; power > 2; power--) {
uint8_t *c;
- blur(b, 1, a, 1, len, radius);
+ blur(b, pixsize, a, pixsize, len, radius, pixsize);
c = a; a = b; b = c;
}
if (power > 1) {
- blur(dst, dst_step, a, 1, len, radius);
+ blur(dst, dst_step, a, pixsize, len, radius, pixsize);
} else {
int i;
- for (i = 0; i < len; i++)
- dst[i*dst_step] = a[i];
+ if (pixsize == 1) {
+ for (i = 0; i < len; i++)
+ dst[i*dst_step] = a[i];
+ } else
+ for (i = 0; i < len; i++)
+ *(uint16_t*)(dst + i*dst_step) = ((uint16_t*)a)[i];
}
} else {
int i;
- for (i = 0; i < len; i++)
- dst[i*dst_step] = src[i*src_step];
+ if (pixsize == 1) {
+ for (i = 0; i < len; i++)
+ dst[i*dst_step] = src[i*src_step];
+ } else
+ for (i = 0; i < len; i++)
+ *(uint16_t*)(dst + i*dst_step) = *(uint16_t*)(src + i*src_step);
}
}
static void hblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize,
- int w, int h, int radius, int power, uint8_t *temp[2])
+ int w, int h, int radius, int power, uint8_t *temp[2], int pixsize)
{
int y;
@@ -281,12 +302,12 @@ static void hblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_li
return;
for (y = 0; y < h; y++)
- blur_power(dst + y*dst_linesize, 1, src + y*src_linesize, 1,
- w, radius, power, temp);
+ blur_power(dst + y*dst_linesize, pixsize, src + y*src_linesize, pixsize,
+ w, radius, power, temp, pixsize);
}
static void vblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize,
- int w, int h, int radius, int power, uint8_t *temp[2])
+ int w, int h, int radius, int power, uint8_t *temp[2], int pixsize)
{
int x;
@@ -294,8 +315,8 @@ static void vblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_li
return;
for (x = 0; x < w; x++)
- blur_power(dst + x, dst_linesize, src + x, src_linesize,
- h, radius, power, temp);
+ blur_power(dst + x*pixsize, dst_linesize, src + x*pixsize, src_linesize,
+ h, radius, power, temp, pixsize);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
@@ -305,9 +326,12 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
int plane;
- int cw = inlink->w >> s->hsub, ch = in->height >> s->vsub;
+ int cw = AV_CEIL_RSHIFT(inlink->w, s->hsub), ch = AV_CEIL_RSHIFT(in->height, s->vsub);
int w[4] = { inlink->w, cw, cw, inlink->w };
int h[4] = { in->height, ch, ch, in->height };
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ const int depth = desc->comp[0].depth;
+ const int pixsize = (depth+7)/8;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
@@ -316,17 +340,17 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
av_frame_copy_props(out, in);
- for (plane = 0; in->data[plane] && plane < 4; plane++)
+ for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++)
hblur(out->data[plane], out->linesize[plane],
in ->data[plane], in ->linesize[plane],
w[plane], h[plane], s->radius[plane], s->power[plane],
- s->temp);
+ s->temp, pixsize);
- for (plane = 0; in->data[plane] && plane < 4; plane++)
+ for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++)
vblur(out->data[plane], out->linesize[plane],
out->data[plane], out->linesize[plane],
w[plane], h[plane], s->radius[plane], s->power[plane],
- s->temp);
+ s->temp, pixsize);
av_frame_free(&in);
@@ -334,27 +358,29 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
#define OFFSET(x) offsetof(BoxBlurContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "luma_radius", "Radius of the luma blurring box", OFFSET(luma_radius_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "luma_power", "How many times should the boxblur be applied to luma",
- OFFSET(luma_param.power), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, FLAGS },
- { "chroma_radius", "Radius of the chroma blurring box", OFFSET(chroma_radius_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "chroma_power", "How many times should the boxblur be applied to chroma",
- OFFSET(chroma_param.power), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, FLAGS },
- { "alpha_radius", "Radius of the alpha blurring box", OFFSET(alpha_radius_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "alpha_power", "How many times should the boxblur be applied to alpha",
- OFFSET(alpha_param.power), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, FLAGS },
- { NULL },
-};
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption boxblur_options[] = {
+ { "luma_radius", "Radius of the luma blurring box", OFFSET(luma_param.radius_expr), AV_OPT_TYPE_STRING, {.str="2"}, .flags = FLAGS },
+ { "lr", "Radius of the luma blurring box", OFFSET(luma_param.radius_expr), AV_OPT_TYPE_STRING, {.str="2"}, .flags = FLAGS },
+ { "luma_power", "How many times should the boxblur be applied to luma", OFFSET(luma_param.power), AV_OPT_TYPE_INT, {.i64=2}, 0, INT_MAX, .flags = FLAGS },
+ { "lp", "How many times should the boxblur be applied to luma", OFFSET(luma_param.power), AV_OPT_TYPE_INT, {.i64=2}, 0, INT_MAX, .flags = FLAGS },
+
+ { "chroma_radius", "Radius of the chroma blurring box", OFFSET(chroma_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "cr", "Radius of the chroma blurring box", OFFSET(chroma_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "chroma_power", "How many times should the boxblur be applied to chroma", OFFSET(chroma_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+ { "cp", "How many times should the boxblur be applied to chroma", OFFSET(chroma_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
-static const AVClass boxblur_class = {
- .class_name = "boxblur",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
+ { "alpha_radius", "Radius of the alpha blurring box", OFFSET(alpha_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "ar", "Radius of the alpha blurring box", OFFSET(alpha_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "alpha_power", "How many times should the boxblur be applied to alpha", OFFSET(alpha_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+ { "ap", "How many times should the boxblur be applied to alpha", OFFSET(alpha_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+
+ { NULL }
};
+AVFILTER_DEFINE_CLASS(boxblur);
+
static const AVFilterPad avfilter_vf_boxblur_inputs[] = {
{
.name = "default",
@@ -381,7 +407,7 @@ AVFilter ff_vf_boxblur = {
.init = init,
.uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_boxblur_inputs,
- .outputs = avfilter_vf_boxblur_outputs,
+ .inputs = avfilter_vf_boxblur_inputs,
+ .outputs = avfilter_vf_boxblur_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/libavfilter/vf_bwdif.c b/libavfilter/vf_bwdif.c
new file mode 100644
index 0000000000..b691983611
--- /dev/null
+++ b/libavfilter/vf_bwdif.c
@@ -0,0 +1,584 @@
+/*
+ * BobWeaver Deinterlacing Filter
+ * Copyright (C) 2016 Thomas Mundt <loudmax@yahoo.de>
+ *
+ * Based on YADIF (Yet Another Deinterlacing Filter)
+ * Copyright (C) 2006-2011 Michael Niedermayer <michaelni@gmx.at>
+ * 2010 James Darnley <james.darnley@gmail.com>
+ *
+ * With use of Weston 3 Field Deinterlacing Filter algorithm
+ * Copyright (C) 2012 British Broadcasting Corporation, All Rights Reserved
+ * Author of de-interlace algorithm: Jim Easterbrook for BBC R&D
+ * Based on the process described by Martin Weston for BBC R&D
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/common.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/imgutils.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "bwdif.h"
+
+/*
+ * Filter coefficients coef_lf and coef_hf taken from BBC PH-2071 (Weston 3 Field Deinterlacer).
+ * Used when there is spatial and temporal interpolation.
+ * Filter coefficients coef_sp are used when there is spatial interpolation only.
+ * Adjusted for matching visual sharpness impression of spatial and temporal interpolation.
+ */
+static const uint16_t coef_lf[2] = { 4309, 213 };
+static const uint16_t coef_hf[3] = { 5570, 3801, 1016 };
+static const uint16_t coef_sp[2] = { 5077, 981 };
+
+typedef struct ThreadData {
+ AVFrame *frame;
+ int plane;
+ int w, h;
+ int parity;
+ int tff;
+} ThreadData;
+
+#define FILTER_INTRA() \
+ for (x = 0; x < w; x++) { \
+ interpol = (coef_sp[0] * (cur[mrefs] + cur[prefs]) - coef_sp[1] * (cur[mrefs3] + cur[prefs3])) >> 13; \
+ dst[0] = av_clip(interpol, 0, clip_max); \
+ \
+ dst++; \
+ cur++; \
+ }
+
+#define FILTER1() \
+ for (x = 0; x < w; x++) { \
+ int c = cur[mrefs]; \
+ int d = (prev2[0] + next2[0]) >> 1; \
+ int e = cur[prefs]; \
+ int temporal_diff0 = FFABS(prev2[0] - next2[0]); \
+ int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e)) >> 1; \
+ int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e)) >> 1; \
+ int diff = FFMAX3(temporal_diff0 >> 1, temporal_diff1, temporal_diff2); \
+ \
+ if (!diff) { \
+ dst[0] = d; \
+ } else {
+
+#define SPAT_CHECK() \
+ int b = ((prev2[mrefs2] + next2[mrefs2]) >> 1) - c; \
+ int f = ((prev2[prefs2] + next2[prefs2]) >> 1) - e; \
+ int dc = d - c; \
+ int de = d - e; \
+ int max = FFMAX3(de, dc, FFMIN(b, f)); \
+ int min = FFMIN3(de, dc, FFMAX(b, f)); \
+ diff = FFMAX3(diff, min, -max);
+
+#define FILTER_LINE() \
+ SPAT_CHECK() \
+ if (FFABS(c - e) > temporal_diff0) { \
+ interpol = (((coef_hf[0] * (prev2[0] + next2[0]) \
+ - coef_hf[1] * (prev2[mrefs2] + next2[mrefs2] + prev2[prefs2] + next2[prefs2]) \
+ + coef_hf[2] * (prev2[mrefs4] + next2[mrefs4] + prev2[prefs4] + next2[prefs4])) >> 2) \
+ + coef_lf[0] * (c + e) - coef_lf[1] * (cur[mrefs3] + cur[prefs3])) >> 13; \
+ } else { \
+ interpol = (coef_sp[0] * (c + e) - coef_sp[1] * (cur[mrefs3] + cur[prefs3])) >> 13; \
+ }
+
+#define FILTER_EDGE() \
+ if (spat) { \
+ SPAT_CHECK() \
+ } \
+ interpol = (c + e) >> 1;
+
+#define FILTER2() \
+ if (interpol > d + diff) \
+ interpol = d + diff; \
+ else if (interpol < d - diff) \
+ interpol = d - diff; \
+ \
+ dst[0] = av_clip(interpol, 0, clip_max); \
+ } \
+ \
+ dst++; \
+ cur++; \
+ prev++; \
+ next++; \
+ prev2++; \
+ next2++; \
+ }
+
+static void filter_intra(void *dst1, void *cur1, int w, int prefs, int mrefs,
+ int prefs3, int mrefs3, int parity, int clip_max)
+{
+ uint8_t *dst = dst1;
+ uint8_t *cur = cur1;
+ int interpol, x;
+
+ FILTER_INTRA()
+}
+
+static void filter_line_c(void *dst1, void *prev1, void *cur1, void *next1,
+ int w, int prefs, int mrefs, int prefs2, int mrefs2,
+ int prefs3, int mrefs3, int prefs4, int mrefs4,
+ int parity, int clip_max)
+{
+ uint8_t *dst = dst1;
+ uint8_t *prev = prev1;
+ uint8_t *cur = cur1;
+ uint8_t *next = next1;
+ uint8_t *prev2 = parity ? prev : cur ;
+ uint8_t *next2 = parity ? cur : next;
+ int interpol, x;
+
+ FILTER1()
+ FILTER_LINE()
+ FILTER2()
+}
+
+static void filter_edge(void *dst1, void *prev1, void *cur1, void *next1,
+ int w, int prefs, int mrefs, int prefs2, int mrefs2,
+ int parity, int clip_max, int spat)
+{
+ uint8_t *dst = dst1;
+ uint8_t *prev = prev1;
+ uint8_t *cur = cur1;
+ uint8_t *next = next1;
+ uint8_t *prev2 = parity ? prev : cur ;
+ uint8_t *next2 = parity ? cur : next;
+ int interpol, x;
+
+ FILTER1()
+ FILTER_EDGE()
+ FILTER2()
+}
+
+static void filter_intra_16bit(void *dst1, void *cur1, int w, int prefs, int mrefs,
+ int prefs3, int mrefs3, int parity, int clip_max)
+{
+ uint16_t *dst = dst1;
+ uint16_t *cur = cur1;
+ int interpol, x;
+
+ FILTER_INTRA()
+}
+
+static void filter_line_c_16bit(void *dst1, void *prev1, void *cur1, void *next1,
+ int w, int prefs, int mrefs, int prefs2, int mrefs2,
+ int prefs3, int mrefs3, int prefs4, int mrefs4,
+ int parity, int clip_max)
+{
+ uint16_t *dst = dst1;
+ uint16_t *prev = prev1;
+ uint16_t *cur = cur1;
+ uint16_t *next = next1;
+ uint16_t *prev2 = parity ? prev : cur ;
+ uint16_t *next2 = parity ? cur : next;
+ int interpol, x;
+
+ FILTER1()
+ FILTER_LINE()
+ FILTER2()
+}
+
+static void filter_edge_16bit(void *dst1, void *prev1, void *cur1, void *next1,
+ int w, int prefs, int mrefs, int prefs2, int mrefs2,
+ int parity, int clip_max, int spat)
+{
+ uint16_t *dst = dst1;
+ uint16_t *prev = prev1;
+ uint16_t *cur = cur1;
+ uint16_t *next = next1;
+ uint16_t *prev2 = parity ? prev : cur ;
+ uint16_t *next2 = parity ? cur : next;
+ int interpol, x;
+
+ FILTER1()
+ FILTER_EDGE()
+ FILTER2()
+}
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ BWDIFContext *s = ctx->priv;
+ ThreadData *td = arg;
+ int linesize = s->cur->linesize[td->plane];
+ int clip_max = (1 << (s->csp->comp[td->plane].depth)) - 1;
+ int df = (s->csp->comp[td->plane].depth + 7) / 8;
+ int refs = linesize / df;
+ int slice_start = (td->h * jobnr ) / nb_jobs;
+ int slice_end = (td->h * (jobnr+1)) / nb_jobs;
+ int y;
+
+ for (y = slice_start; y < slice_end; y++) {
+ if ((y ^ td->parity) & 1) {
+ uint8_t *prev = &s->prev->data[td->plane][y * linesize];
+ uint8_t *cur = &s->cur ->data[td->plane][y * linesize];
+ uint8_t *next = &s->next->data[td->plane][y * linesize];
+ uint8_t *dst = &td->frame->data[td->plane][y * td->frame->linesize[td->plane]];
+ if (!s->inter_field) {
+ s->filter_intra(dst, cur, td->w, (y + df) < td->h ? refs : -refs,
+ y > (df - 1) ? -refs : refs,
+ (y + 3*df) < td->h ? 3 * refs : -refs,
+ y > (3*df - 1) ? -3 * refs : refs,
+ td->parity ^ td->tff, clip_max);
+ } else if ((y < 4) || ((y + 5) > td->h)) {
+ s->filter_edge(dst, prev, cur, next, td->w,
+ (y + df) < td->h ? refs : -refs,
+ y > (df - 1) ? -refs : refs,
+ refs << 1, -(refs << 1),
+ td->parity ^ td->tff, clip_max,
+ (y < 2) || ((y + 3) > td->h) ? 0 : 1);
+ } else {
+ s->filter_line(dst, prev, cur, next, td->w,
+ refs, -refs, refs << 1, -(refs << 1),
+ 3 * refs, -3 * refs, refs << 2, -(refs << 2),
+ td->parity ^ td->tff, clip_max);
+ }
+ } else {
+ memcpy(&td->frame->data[td->plane][y * td->frame->linesize[td->plane]],
+ &s->cur->data[td->plane][y * linesize], td->w * df);
+ }
+ }
+ return 0;
+}
+
+static void filter(AVFilterContext *ctx, AVFrame *dstpic,
+ int parity, int tff)
+{
+ BWDIFContext *bwdif = ctx->priv;
+ ThreadData td = { .frame = dstpic, .parity = parity, .tff = tff };
+ int i;
+
+ for (i = 0; i < bwdif->csp->nb_components; i++) {
+ int w = dstpic->width;
+ int h = dstpic->height;
+
+ if (i == 1 || i == 2) {
+ w = AV_CEIL_RSHIFT(w, bwdif->csp->log2_chroma_w);
+ h = AV_CEIL_RSHIFT(h, bwdif->csp->log2_chroma_h);
+ }
+
+ td.w = w;
+ td.h = h;
+ td.plane = i;
+
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(h, ff_filter_get_nb_threads(ctx)));
+ }
+ if (!bwdif->inter_field) {
+ bwdif->inter_field = 1;
+ }
+
+ emms_c();
+}
+
+static int return_frame(AVFilterContext *ctx, int is_second)
+{
+ BWDIFContext *bwdif = ctx->priv;
+ AVFilterLink *link = ctx->outputs[0];
+ int tff, ret;
+
+ if (bwdif->parity == -1) {
+ tff = bwdif->cur->interlaced_frame ?
+ bwdif->cur->top_field_first : 1;
+ } else {
+ tff = bwdif->parity ^ 1;
+ }
+
+ if (is_second) {
+ bwdif->out = ff_get_video_buffer(link, link->w, link->h);
+ if (!bwdif->out)
+ return AVERROR(ENOMEM);
+
+ av_frame_copy_props(bwdif->out, bwdif->cur);
+ bwdif->out->interlaced_frame = 0;
+ if (bwdif->inter_field < 0)
+ bwdif->inter_field = 0;
+ }
+
+ filter(ctx, bwdif->out, tff ^ !is_second, tff);
+
+ if (is_second) {
+ int64_t cur_pts = bwdif->cur->pts;
+ int64_t next_pts = bwdif->next->pts;
+
+ if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) {
+ bwdif->out->pts = cur_pts + next_pts;
+ } else {
+ bwdif->out->pts = AV_NOPTS_VALUE;
+ }
+ }
+ ret = ff_filter_frame(ctx->outputs[0], bwdif->out);
+
+ bwdif->frame_pending = (bwdif->mode&1) && !is_second;
+ return ret;
+}
+
+static int checkstride(BWDIFContext *bwdif, const AVFrame *a, const AVFrame *b)
+{
+ int i;
+ for (i = 0; i < bwdif->csp->nb_components; i++)
+ if (a->linesize[i] != b->linesize[i])
+ return 1;
+ return 0;
+}
+
+static void fixstride(AVFilterLink *link, AVFrame *f)
+{
+ AVFrame *dst = ff_default_get_video_buffer(link, f->width, f->height);
+ if(!dst)
+ return;
+ av_frame_copy_props(dst, f);
+ av_image_copy(dst->data, dst->linesize,
+ (const uint8_t **)f->data, f->linesize,
+ dst->format, dst->width, dst->height);
+ av_frame_unref(f);
+ av_frame_move_ref(f, dst);
+ av_frame_free(&dst);
+}
+
+static int filter_frame(AVFilterLink *link, AVFrame *frame)
+{
+ AVFilterContext *ctx = link->dst;
+ BWDIFContext *bwdif = ctx->priv;
+
+ av_assert0(frame);
+
+ if (bwdif->frame_pending)
+ return_frame(ctx, 1);
+
+ if (bwdif->prev)
+ av_frame_free(&bwdif->prev);
+ bwdif->prev = bwdif->cur;
+ bwdif->cur = bwdif->next;
+ bwdif->next = frame;
+
+ if (!bwdif->cur) {
+ bwdif->cur = av_frame_clone(bwdif->next);
+ if (!bwdif->cur)
+ return AVERROR(ENOMEM);
+ bwdif->inter_field = 0;
+ }
+
+ if (checkstride(bwdif, bwdif->next, bwdif->cur)) {
+ av_log(ctx, AV_LOG_VERBOSE, "Reallocating frame due to differing stride\n");
+ fixstride(link, bwdif->next);
+ }
+ if (checkstride(bwdif, bwdif->next, bwdif->cur))
+ fixstride(link, bwdif->cur);
+ if (bwdif->prev && checkstride(bwdif, bwdif->next, bwdif->prev))
+ fixstride(link, bwdif->prev);
+ if (checkstride(bwdif, bwdif->next, bwdif->cur) || (bwdif->prev && checkstride(bwdif, bwdif->next, bwdif->prev))) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to reallocate frame\n");
+ return -1;
+ }
+
+ if (!bwdif->prev)
+ return 0;
+
+ if ((bwdif->deint && !bwdif->cur->interlaced_frame) ||
+ ctx->is_disabled ||
+ (bwdif->deint && !bwdif->prev->interlaced_frame && bwdif->prev->repeat_pict) ||
+ (bwdif->deint && !bwdif->next->interlaced_frame && bwdif->next->repeat_pict)
+ ) {
+ bwdif->out = av_frame_clone(bwdif->cur);
+ if (!bwdif->out)
+ return AVERROR(ENOMEM);
+
+ av_frame_free(&bwdif->prev);
+ if (bwdif->out->pts != AV_NOPTS_VALUE)
+ bwdif->out->pts *= 2;
+ return ff_filter_frame(ctx->outputs[0], bwdif->out);
+ }
+
+ bwdif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h);
+ if (!bwdif->out)
+ return AVERROR(ENOMEM);
+
+ av_frame_copy_props(bwdif->out, bwdif->cur);
+ bwdif->out->interlaced_frame = 0;
+
+ if (bwdif->out->pts != AV_NOPTS_VALUE)
+ bwdif->out->pts *= 2;
+
+ return return_frame(ctx, 0);
+}
+
+static int request_frame(AVFilterLink *link)
+{
+ AVFilterContext *ctx = link->src;
+ BWDIFContext *bwdif = ctx->priv;
+ int ret;
+
+ if (bwdif->frame_pending) {
+ return_frame(ctx, 1);
+ return 0;
+ }
+
+ if (bwdif->eof)
+ return AVERROR_EOF;
+
+ ret = ff_request_frame(link->src->inputs[0]);
+
+ if (ret == AVERROR_EOF && bwdif->cur) {
+ AVFrame *next = av_frame_clone(bwdif->next);
+
+ if (!next)
+ return AVERROR(ENOMEM);
+
+ bwdif->inter_field = -1;
+ next->pts = bwdif->next->pts * 2 - bwdif->cur->pts;
+
+ filter_frame(link->src->inputs[0], next);
+ bwdif->eof = 1;
+ } else if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ BWDIFContext *bwdif = ctx->priv;
+
+ av_frame_free(&bwdif->prev);
+ av_frame_free(&bwdif->cur );
+ av_frame_free(&bwdif->next);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
+ AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP16,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_props(AVFilterLink *link)
+{
+ AVFilterContext *ctx = link->src;
+ BWDIFContext *s = link->src->priv;
+
+ link->time_base.num = link->src->inputs[0]->time_base.num;
+ link->time_base.den = link->src->inputs[0]->time_base.den * 2;
+ link->w = link->src->inputs[0]->w;
+ link->h = link->src->inputs[0]->h;
+
+ if(s->mode&1)
+ link->frame_rate = av_mul_q(link->src->inputs[0]->frame_rate, (AVRational){2,1});
+
+ if (link->w < 3 || link->h < 3) {
+ av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n");
+ return AVERROR(EINVAL);
+ }
+
+ s->csp = av_pix_fmt_desc_get(link->format);
+ if (s->csp->comp[0].depth > 8) {
+ s->filter_intra = filter_intra_16bit;
+ s->filter_line = filter_line_c_16bit;
+ s->filter_edge = filter_edge_16bit;
+ } else {
+ s->filter_intra = filter_intra;
+ s->filter_line = filter_line_c;
+ s->filter_edge = filter_edge;
+ }
+
+ if (ARCH_X86)
+ ff_bwdif_init_x86(s);
+
+ return 0;
+}
+
+
+#define OFFSET(x) offsetof(BWDIFContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
+
+static const AVOption bwdif_options[] = {
+ { "mode", "specify the interlacing mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=BWDIF_MODE_SEND_FIELD}, 0, 1, FLAGS, "mode"},
+ CONST("send_frame", "send one frame for each frame", BWDIF_MODE_SEND_FRAME, "mode"),
+ CONST("send_field", "send one frame for each field", BWDIF_MODE_SEND_FIELD, "mode"),
+
+ { "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=BWDIF_PARITY_AUTO}, -1, 1, FLAGS, "parity" },
+ CONST("tff", "assume top field first", BWDIF_PARITY_TFF, "parity"),
+ CONST("bff", "assume bottom field first", BWDIF_PARITY_BFF, "parity"),
+ CONST("auto", "auto detect parity", BWDIF_PARITY_AUTO, "parity"),
+
+ { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=BWDIF_DEINT_ALL}, 0, 1, FLAGS, "deint" },
+ CONST("all", "deinterlace all frames", BWDIF_DEINT_ALL, "deint"),
+ CONST("interlaced", "only deinterlace frames marked as interlaced", BWDIF_DEINT_INTERLACED, "deint"),
+
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(bwdif);
+
+static const AVFilterPad avfilter_vf_bwdif_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_vf_bwdif_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_bwdif = {
+ .name = "bwdif",
+ .description = NULL_IF_CONFIG_SMALL("Deinterlace the input image."),
+ .priv_size = sizeof(BWDIFContext),
+ .priv_class = &bwdif_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = avfilter_vf_bwdif_inputs,
+ .outputs = avfilter_vf_bwdif_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_chromakey.c b/libavfilter/vf_chromakey.c
new file mode 100644
index 0000000000..88414783bc
--- /dev/null
+++ b/libavfilter/vf_chromakey.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2015 Timo Rothenpieler <timo@rothenpieler.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct ChromakeyContext {
+ const AVClass *class;
+
+ uint8_t chromakey_rgba[4];
+ uint8_t chromakey_uv[2];
+
+ float similarity;
+ float blend;
+
+ int is_yuv;
+
+ int hsub_log2;
+ int vsub_log2;
+} ChromakeyContext;
+
+static uint8_t do_chromakey_pixel(ChromakeyContext *ctx, uint8_t u[9], uint8_t v[9])
+{
+ double diff = 0.0;
+ int du, dv, i;
+
+ for (i = 0; i < 9; ++i) {
+ du = (int)u[i] - ctx->chromakey_uv[0];
+ dv = (int)v[i] - ctx->chromakey_uv[1];
+
+ diff += sqrt((du * du + dv * dv) / (255.0 * 255.0));
+ }
+
+ diff /= 9.0;
+
+ if (ctx->blend > 0.0001) {
+ return av_clipd((diff - ctx->similarity) / ctx->blend, 0.0, 1.0) * 255.0;
+ } else {
+ return (diff > ctx->similarity) ? 255 : 0;
+ }
+}
+
+static av_always_inline void get_pixel_uv(AVFrame *frame, int hsub_log2, int vsub_log2, int x, int y, uint8_t *u, uint8_t *v)
+{
+ if (x < 0 || x >= frame->width || y < 0 || y >= frame->height)
+ return;
+
+ x >>= hsub_log2;
+ y >>= vsub_log2;
+
+ *u = frame->data[1][frame->linesize[1] * y + x];
+ *v = frame->data[2][frame->linesize[2] * y + x];
+}
+
+static int do_chromakey_slice(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
+{
+ AVFrame *frame = arg;
+
+ const int slice_start = (frame->height * jobnr) / nb_jobs;
+ const int slice_end = (frame->height * (jobnr + 1)) / nb_jobs;
+
+ ChromakeyContext *ctx = avctx->priv;
+
+ int x, y, xo, yo;
+ uint8_t u[9], v[9];
+
+ memset(u, ctx->chromakey_uv[0], sizeof(u));
+ memset(v, ctx->chromakey_uv[1], sizeof(v));
+
+ for (y = slice_start; y < slice_end; ++y) {
+ for (x = 0; x < frame->width; ++x) {
+ for (yo = 0; yo < 3; ++yo) {
+ for (xo = 0; xo < 3; ++xo) {
+ get_pixel_uv(frame, ctx->hsub_log2, ctx->vsub_log2, x + xo - 1, y + yo - 1, &u[yo * 3 + xo], &v[yo * 3 + xo]);
+ }
+ }
+
+ frame->data[3][frame->linesize[3] * y + x] = do_chromakey_pixel(ctx, u, v);
+ }
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *link, AVFrame *frame)
+{
+ AVFilterContext *avctx = link->dst;
+ int res;
+
+ if (res = avctx->internal->execute(avctx, do_chromakey_slice, frame, NULL, FFMIN(frame->height, ff_filter_get_nb_threads(avctx))))
+ return res;
+
+ return ff_filter_frame(avctx->outputs[0], frame);
+}
+
+#define FIXNUM(x) lrint((x) * (1 << 10))
+#define RGB_TO_U(rgb) (((- FIXNUM(0.16874) * rgb[0] - FIXNUM(0.33126) * rgb[1] + FIXNUM(0.50000) * rgb[2] + (1 << 9) - 1) >> 10) + 128)
+#define RGB_TO_V(rgb) ((( FIXNUM(0.50000) * rgb[0] - FIXNUM(0.41869) * rgb[1] - FIXNUM(0.08131) * rgb[2] + (1 << 9) - 1) >> 10) + 128)
+
+static av_cold int initialize_chromakey(AVFilterContext *avctx)
+{
+ ChromakeyContext *ctx = avctx->priv;
+
+ if (ctx->is_yuv) {
+ ctx->chromakey_uv[0] = ctx->chromakey_rgba[1];
+ ctx->chromakey_uv[1] = ctx->chromakey_rgba[2];
+ } else {
+ ctx->chromakey_uv[0] = RGB_TO_U(ctx->chromakey_rgba);
+ ctx->chromakey_uv[1] = RGB_TO_V(ctx->chromakey_rgba);
+ }
+
+ return 0;
+}
+
+static av_cold int query_formats(AVFilterContext *avctx)
+{
+ static const enum AVPixelFormat pixel_fmts[] = {
+ AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUVA422P,
+ AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *formats = NULL;
+
+ formats = ff_make_format_list(pixel_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+
+ return ff_set_common_formats(avctx, formats);
+}
+
+static av_cold int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *avctx = inlink->dst;
+ ChromakeyContext *ctx = avctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ ctx->hsub_log2 = desc->log2_chroma_w;
+ ctx->vsub_log2 = desc->log2_chroma_h;
+
+ return 0;
+}
+
+static const AVFilterPad chromakey_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .needs_writable = 1,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad chromakey_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+#define OFFSET(x) offsetof(ChromakeyContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption chromakey_options[] = {
+ { "color", "set the chromakey key color", OFFSET(chromakey_rgba), AV_OPT_TYPE_COLOR, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "similarity", "set the chromakey similarity value", OFFSET(similarity), AV_OPT_TYPE_FLOAT, { .dbl = 0.01 }, 0.01, 1.0, FLAGS },
+ { "blend", "set the chromakey key blend value", OFFSET(blend), AV_OPT_TYPE_FLOAT, { .dbl = 0.0 }, 0.0, 1.0, FLAGS },
+ { "yuv", "color parameter is in yuv instead of rgb", OFFSET(is_yuv), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(chromakey);
+
+AVFilter ff_vf_chromakey = {
+ .name = "chromakey",
+ .description = NULL_IF_CONFIG_SMALL("Turns a certain color into transparency. Operates on YUV colors."),
+ .priv_size = sizeof(ChromakeyContext),
+ .priv_class = &chromakey_class,
+ .init = initialize_chromakey,
+ .query_formats = query_formats,
+ .inputs = chromakey_inputs,
+ .outputs = chromakey_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_ciescope.c b/libavfilter/vf_ciescope.c
new file mode 100644
index 0000000000..7c0cfed061
--- /dev/null
+++ b/libavfilter/vf_ciescope.c
@@ -0,0 +1,1512 @@
+/*
+ * Copyright (c) 2000 John Walker
+ * Copyright (c) 2016 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+enum CieSystem {
+ XYY,
+ UCS,
+ LUV,
+ NB_CIE
+};
+
+enum ColorsSystems {
+ NTSCsystem,
+ EBUsystem,
+ SMPTEsystem,
+ SMPTE240Msystem,
+ APPLEsystem,
+ wRGBsystem,
+ CIE1931system,
+ Rec709system,
+ Rec2020system,
+ NB_CS
+};
+
+typedef struct CiescopeContext {
+ const AVClass *class;
+ int color_system;
+ unsigned gamuts;
+ int size;
+ int show_white;
+ int correct_gamma;
+ int cie;
+ float intensity;
+ float contrast;
+ int background;
+
+ double log2lin[65536];
+ double igamma;
+ double i[3][3];
+ double m[3][3];
+ AVFrame *f;
+ void (*filter)(AVFilterContext *ctx, AVFrame *in, double *cx, double *cy, int x, int y);
+} CiescopeContext;
+
+#define OFFSET(x) offsetof(CiescopeContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption ciescope_options[] = {
+ { "system", "set color system", OFFSET(color_system), AV_OPT_TYPE_INT, {.i64=Rec709system}, 0, NB_CS-1, FLAGS, "system" },
+ { "ntsc", "NTSC 1953 Y'I'O' (ITU-R BT.470 System M)", 0, AV_OPT_TYPE_CONST, {.i64=NTSCsystem}, 0, 0, FLAGS, "system" },
+ { "470m", "NTSC 1953 Y'I'O' (ITU-R BT.470 System M)", 0, AV_OPT_TYPE_CONST, {.i64=NTSCsystem}, 0, 0, FLAGS, "system" },
+ { "ebu", "EBU Y'U'V' (PAL/SECAM) (ITU-R BT.470 System B, G)", 0, AV_OPT_TYPE_CONST, {.i64=EBUsystem}, 0, 0, FLAGS, "system" },
+ { "470bg", "EBU Y'U'V' (PAL/SECAM) (ITU-R BT.470 System B, G)", 0, AV_OPT_TYPE_CONST, {.i64=EBUsystem}, 0, 0, FLAGS, "system" },
+ { "smpte", "SMPTE-C RGB", 0, AV_OPT_TYPE_CONST, {.i64=SMPTEsystem}, 0, 0, FLAGS, "system" },
+ { "240m", "SMPTE-240M Y'PbPr", 0, AV_OPT_TYPE_CONST, {.i64=SMPTE240Msystem},0, 0, FLAGS, "system" },
+ { "apple", "Apple RGB", 0, AV_OPT_TYPE_CONST, {.i64=APPLEsystem}, 0, 0, FLAGS, "system" },
+ { "widergb", "Adobe Wide Gamut RGB", 0, AV_OPT_TYPE_CONST, {.i64=wRGBsystem}, 0, 0, FLAGS, "system" },
+ { "cie1931", "CIE 1931 RGB", 0, AV_OPT_TYPE_CONST, {.i64=CIE1931system}, 0, 0, FLAGS, "system" },
+ { "hdtv", "ITU.BT-709 Y'CbCr", 0, AV_OPT_TYPE_CONST, {.i64=Rec709system}, 0, 0, FLAGS, "system" },
+ { "rec709", "ITU.BT-709 Y'CbCr", 0, AV_OPT_TYPE_CONST, {.i64=Rec709system}, 0, 0, FLAGS, "system" },
+ { "uhdtv", "ITU-R.BT-2020", 0, AV_OPT_TYPE_CONST, {.i64=Rec2020system}, 0, 0, FLAGS, "system" },
+ { "rec2020", "ITU-R.BT-2020", 0, AV_OPT_TYPE_CONST, {.i64=Rec2020system}, 0, 0, FLAGS, "system" },
+ { "cie", "set cie system", OFFSET(cie), AV_OPT_TYPE_INT, {.i64=XYY}, 0, NB_CIE-1, FLAGS, "cie" },
+ { "xyy", "CIE 1931 xyY", 0, AV_OPT_TYPE_CONST, {.i64=XYY}, 0, 0, FLAGS, "cie" },
+ { "ucs", "CIE 1960 UCS", 0, AV_OPT_TYPE_CONST, {.i64=UCS}, 0, 0, FLAGS, "cie" },
+ { "luv", "CIE 1976 Luv", 0, AV_OPT_TYPE_CONST, {.i64=LUV}, 0, 0, FLAGS, "cie" },
+ { "gamuts", "set what gamuts to draw", OFFSET(gamuts), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, 0xFFF, FLAGS, "gamuts" },
+ { "ntsc", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1<<NTSCsystem}, 0, 0, FLAGS, "gamuts" },
+ { "470m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1<<NTSCsystem}, 0, 0, FLAGS, "gamuts" },
+ { "ebu", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1<<EBUsystem}, 0, 0, FLAGS, "gamuts" },
+ { "470bg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1<<EBUsystem}, 0, 0, FLAGS, "gamuts" },
+ { "smpte", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1<<SMPTEsystem}, 0, 0, FLAGS, "gamuts" },
+ { "240m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1<<SMPTE240Msystem}, 0, 0, FLAGS, "gamuts" },
+ { "apple", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1<<APPLEsystem}, 0, 0, FLAGS, "gamuts" },
+ { "widergb", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1<<wRGBsystem}, 0, 0, FLAGS, "gamuts" },
+ { "cie1931", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1<<CIE1931system}, 0, 0, FLAGS, "gamuts" },
+ { "hdtv", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1<<Rec709system}, 0, 0, FLAGS, "gamuts" },
+ { "rec709", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1<<Rec709system}, 0, 0, FLAGS, "gamuts" },
+ { "uhdtv", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1<<Rec2020system}, 0, 0, FLAGS, "gamuts" },
+ { "rec2020", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1<<Rec2020system}, 0, 0, FLAGS, "gamuts" },
+ { "size", "set ciescope size", OFFSET(size), AV_OPT_TYPE_INT, {.i64=512}, 256, 8192, FLAGS },
+ { "s", "set ciescope size", OFFSET(size), AV_OPT_TYPE_INT, {.i64=512}, 256, 8192, FLAGS },
+ { "intensity", "set ciescope intensity", OFFSET(intensity), AV_OPT_TYPE_FLOAT, {.dbl=0.001}, 0, 1, FLAGS },
+ { "i", "set ciescope intensity", OFFSET(intensity), AV_OPT_TYPE_FLOAT, {.dbl=0.001}, 0, 1, FLAGS },
+ { "contrast", NULL, OFFSET(contrast), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 1, FLAGS },
+ { "corrgamma", NULL, OFFSET(correct_gamma), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
+ { "showwhite", NULL, OFFSET(show_white), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { "gamma", NULL, OFFSET(igamma), AV_OPT_TYPE_DOUBLE, {.dbl=2.6}, 0.1, 6, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(ciescope);
+
+static const enum AVPixelFormat in_pix_fmts[] = {
+ AV_PIX_FMT_RGB24,
+ AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_RGB48,
+ AV_PIX_FMT_RGBA64,
+ AV_PIX_FMT_XYZ12,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_pix_fmts[] = {
+ AV_PIX_FMT_RGBA64,
+ AV_PIX_FMT_NONE
+};
+
+static int query_formats(AVFilterContext *ctx)
+{
+ int ret;
+
+ if ((ret = ff_formats_ref(ff_make_format_list(in_pix_fmts), &ctx->inputs[0]->out_formats)) < 0)
+ return ret;
+
+ if ((ret = ff_formats_ref(ff_make_format_list(out_pix_fmts), &ctx->outputs[0]->in_formats)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ CiescopeContext *s = outlink->src->priv;
+
+ outlink->h = outlink->w = s->size;
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+
+ return 0;
+}
+
+/* A color system is defined by the CIE x and y coordinates of its
+ three primary illuminants and the x and y coordinates of the white
+ point. */
+
+struct ColorSystem {
+ double xRed, yRed, /* Red primary illuminant */
+ xGreen, yGreen, /* Green primary illuminant */
+ xBlue, yBlue, /* Blue primary illuminant */
+ xWhite, yWhite, /* White point */
+ gamma; /* gamma of nonlinear correction */
+};
+
+static float const spectral_chromaticity[][3] = {
+ { 0.175560, 0.005294, 0.819146 },
+ { 0.175483, 0.005286, 0.819231 },
+ { 0.175400, 0.005279, 0.819321 },
+ { 0.175317, 0.005271, 0.819412 },
+ { 0.175237, 0.005263, 0.819500 },
+ { 0.175161, 0.005256, 0.819582 },
+ { 0.175088, 0.005247, 0.819665 },
+ { 0.175015, 0.005236, 0.819749 },
+ { 0.174945, 0.005226, 0.819829 },
+ { 0.174880, 0.005221, 0.819899 },
+ { 0.174821, 0.005221, 0.819959 },
+ { 0.174770, 0.005229, 0.820001 },
+ { 0.174722, 0.005238, 0.820040 },
+ { 0.174665, 0.005236, 0.820098 },
+ { 0.174595, 0.005218, 0.820187 },
+ { 0.174510, 0.005182, 0.820309 },
+ { 0.174409, 0.005127, 0.820464 },
+ { 0.174308, 0.005068, 0.820624 },
+ { 0.174222, 0.005017, 0.820761 },
+ { 0.174156, 0.004981, 0.820863 },
+ { 0.174112, 0.004964, 0.820924 },
+ { 0.174088, 0.004964, 0.820948 },
+ { 0.174073, 0.004973, 0.820955 },
+ { 0.174057, 0.004982, 0.820961 },
+ { 0.174036, 0.004986, 0.820978 },
+ { 0.174008, 0.004981, 0.821012 },
+ { 0.173972, 0.004964, 0.821064 },
+ { 0.173932, 0.004943, 0.821125 },
+ { 0.173889, 0.004926, 0.821185 },
+ { 0.173845, 0.004916, 0.821239 },
+ { 0.173801, 0.004915, 0.821284 },
+ { 0.173754, 0.004925, 0.821321 },
+ { 0.173705, 0.004937, 0.821358 },
+ { 0.173655, 0.004944, 0.821401 },
+ { 0.173606, 0.004940, 0.821454 },
+ { 0.173560, 0.004923, 0.821517 },
+ { 0.173514, 0.004895, 0.821590 },
+ { 0.173468, 0.004865, 0.821667 },
+ { 0.173424, 0.004836, 0.821740 },
+ { 0.173380, 0.004813, 0.821807 },
+ { 0.173337, 0.004797, 0.821866 },
+ { 0.173291, 0.004786, 0.821923 },
+ { 0.173238, 0.004779, 0.821983 },
+ { 0.173174, 0.004775, 0.822051 },
+ { 0.173101, 0.004774, 0.822125 },
+ { 0.173021, 0.004775, 0.822204 },
+ { 0.172934, 0.004781, 0.822285 },
+ { 0.172843, 0.004791, 0.822366 },
+ { 0.172751, 0.004799, 0.822450 },
+ { 0.172662, 0.004802, 0.822536 },
+ { 0.172577, 0.004799, 0.822624 },
+ { 0.172489, 0.004795, 0.822715 },
+ { 0.172396, 0.004796, 0.822808 },
+ { 0.172296, 0.004803, 0.822901 },
+ { 0.172192, 0.004815, 0.822993 },
+ { 0.172087, 0.004833, 0.823081 },
+ { 0.171982, 0.004855, 0.823163 },
+ { 0.171871, 0.004889, 0.823240 },
+ { 0.171741, 0.004939, 0.823319 },
+ { 0.171587, 0.005010, 0.823402 },
+ { 0.171407, 0.005102, 0.823490 },
+ { 0.171206, 0.005211, 0.823583 },
+ { 0.170993, 0.005334, 0.823674 },
+ { 0.170771, 0.005470, 0.823759 },
+ { 0.170541, 0.005621, 0.823838 },
+ { 0.170301, 0.005789, 0.823911 },
+ { 0.170050, 0.005974, 0.823976 },
+ { 0.169786, 0.006177, 0.824037 },
+ { 0.169505, 0.006398, 0.824097 },
+ { 0.169203, 0.006639, 0.824158 },
+ { 0.168878, 0.006900, 0.824222 },
+ { 0.168525, 0.007184, 0.824291 },
+ { 0.168146, 0.007491, 0.824363 },
+ { 0.167746, 0.007821, 0.824433 },
+ { 0.167328, 0.008175, 0.824496 },
+ { 0.166895, 0.008556, 0.824549 },
+ { 0.166446, 0.008964, 0.824589 },
+ { 0.165977, 0.009402, 0.824622 },
+ { 0.165483, 0.009865, 0.824652 },
+ { 0.164963, 0.010351, 0.824687 },
+ { 0.164412, 0.010858, 0.824731 },
+ { 0.163828, 0.011385, 0.824787 },
+ { 0.163210, 0.011937, 0.824853 },
+ { 0.162552, 0.012520, 0.824928 },
+ { 0.161851, 0.013137, 0.825011 },
+ { 0.161105, 0.013793, 0.825102 },
+ { 0.160310, 0.014491, 0.825199 },
+ { 0.159466, 0.015232, 0.825302 },
+ { 0.158573, 0.016015, 0.825412 },
+ { 0.157631, 0.016840, 0.825529 },
+ { 0.156641, 0.017705, 0.825654 },
+ { 0.155605, 0.018609, 0.825786 },
+ { 0.154525, 0.019556, 0.825920 },
+ { 0.153397, 0.020554, 0.826049 },
+ { 0.152219, 0.021612, 0.826169 },
+ { 0.150985, 0.022740, 0.826274 },
+ { 0.149691, 0.023950, 0.826359 },
+ { 0.148337, 0.025247, 0.826416 },
+ { 0.146928, 0.026635, 0.826437 },
+ { 0.145468, 0.028118, 0.826413 },
+ { 0.143960, 0.029703, 0.826337 },
+ { 0.142405, 0.031394, 0.826201 },
+ { 0.140796, 0.033213, 0.825991 },
+ { 0.139121, 0.035201, 0.825679 },
+ { 0.137364, 0.037403, 0.825233 },
+ { 0.135503, 0.039879, 0.824618 },
+ { 0.133509, 0.042692, 0.823798 },
+ { 0.131371, 0.045876, 0.822753 },
+ { 0.129086, 0.049450, 0.821464 },
+ { 0.126662, 0.053426, 0.819912 },
+ { 0.124118, 0.057803, 0.818079 },
+ { 0.121469, 0.062588, 0.815944 },
+ { 0.118701, 0.067830, 0.813468 },
+ { 0.115807, 0.073581, 0.810612 },
+ { 0.112776, 0.079896, 0.807328 },
+ { 0.109594, 0.086843, 0.803563 },
+ { 0.106261, 0.094486, 0.799253 },
+ { 0.102776, 0.102864, 0.794360 },
+ { 0.099128, 0.112007, 0.788865 },
+ { 0.095304, 0.121945, 0.782751 },
+ { 0.091294, 0.132702, 0.776004 },
+ { 0.087082, 0.144317, 0.768601 },
+ { 0.082680, 0.156866, 0.760455 },
+ { 0.078116, 0.170420, 0.751464 },
+ { 0.073437, 0.185032, 0.741531 },
+ { 0.068706, 0.200723, 0.730571 },
+ { 0.063993, 0.217468, 0.718539 },
+ { 0.059316, 0.235254, 0.705430 },
+ { 0.054667, 0.254096, 0.691238 },
+ { 0.050031, 0.274002, 0.675967 },
+ { 0.045391, 0.294976, 0.659633 },
+ { 0.040757, 0.316981, 0.642262 },
+ { 0.036195, 0.339900, 0.623905 },
+ { 0.031756, 0.363598, 0.604646 },
+ { 0.027494, 0.387921, 0.584584 },
+ { 0.023460, 0.412703, 0.563837 },
+ { 0.019705, 0.437756, 0.542539 },
+ { 0.016268, 0.462955, 0.520777 },
+ { 0.013183, 0.488207, 0.498610 },
+ { 0.010476, 0.513404, 0.476120 },
+ { 0.008168, 0.538423, 0.453409 },
+ { 0.006285, 0.563068, 0.430647 },
+ { 0.004875, 0.587116, 0.408008 },
+ { 0.003982, 0.610447, 0.385570 },
+ { 0.003636, 0.633011, 0.363352 },
+ { 0.003859, 0.654823, 0.341318 },
+ { 0.004646, 0.675898, 0.319456 },
+ { 0.006011, 0.696120, 0.297869 },
+ { 0.007988, 0.715342, 0.276670 },
+ { 0.010603, 0.733413, 0.255984 },
+ { 0.013870, 0.750186, 0.235943 },
+ { 0.017766, 0.765612, 0.216622 },
+ { 0.022244, 0.779630, 0.198126 },
+ { 0.027273, 0.792104, 0.180623 },
+ { 0.032820, 0.802926, 0.164254 },
+ { 0.038852, 0.812016, 0.149132 },
+ { 0.045328, 0.819391, 0.135281 },
+ { 0.052177, 0.825164, 0.122660 },
+ { 0.059326, 0.829426, 0.111249 },
+ { 0.066716, 0.832274, 0.101010 },
+ { 0.074302, 0.833803, 0.091894 },
+ { 0.082053, 0.834090, 0.083856 },
+ { 0.089942, 0.833289, 0.076769 },
+ { 0.097940, 0.831593, 0.070468 },
+ { 0.106021, 0.829178, 0.064801 },
+ { 0.114161, 0.826207, 0.059632 },
+ { 0.122347, 0.822770, 0.054882 },
+ { 0.130546, 0.818928, 0.050526 },
+ { 0.138702, 0.814774, 0.046523 },
+ { 0.146773, 0.810395, 0.042832 },
+ { 0.154722, 0.805864, 0.039414 },
+ { 0.162535, 0.801238, 0.036226 },
+ { 0.170237, 0.796519, 0.033244 },
+ { 0.177850, 0.791687, 0.030464 },
+ { 0.185391, 0.786728, 0.027881 },
+ { 0.192876, 0.781629, 0.025495 },
+ { 0.200309, 0.776399, 0.023292 },
+ { 0.207690, 0.771055, 0.021255 },
+ { 0.215030, 0.765595, 0.019375 },
+ { 0.222337, 0.760020, 0.017643 },
+ { 0.229620, 0.754329, 0.016051 },
+ { 0.236885, 0.748524, 0.014591 },
+ { 0.244133, 0.742614, 0.013253 },
+ { 0.251363, 0.736606, 0.012031 },
+ { 0.258578, 0.730507, 0.010916 },
+ { 0.265775, 0.724324, 0.009901 },
+ { 0.272958, 0.718062, 0.008980 },
+ { 0.280129, 0.711725, 0.008146 },
+ { 0.287292, 0.705316, 0.007391 },
+ { 0.294450, 0.698842, 0.006708 },
+ { 0.301604, 0.692308, 0.006088 },
+ { 0.308760, 0.685712, 0.005528 },
+ { 0.315914, 0.679063, 0.005022 },
+ { 0.323066, 0.672367, 0.004566 },
+ { 0.330216, 0.665628, 0.004156 },
+ { 0.337363, 0.658848, 0.003788 },
+ { 0.344513, 0.652028, 0.003459 },
+ { 0.351664, 0.645172, 0.003163 },
+ { 0.358814, 0.638287, 0.002899 },
+ { 0.365959, 0.631379, 0.002662 },
+ { 0.373102, 0.624451, 0.002448 },
+ { 0.380244, 0.617502, 0.002254 },
+ { 0.387379, 0.610542, 0.002079 },
+ { 0.394507, 0.603571, 0.001922 },
+ { 0.401626, 0.596592, 0.001782 },
+ { 0.408736, 0.589607, 0.001657 },
+ { 0.415836, 0.582618, 0.001546 },
+ { 0.422921, 0.575631, 0.001448 },
+ { 0.429989, 0.568649, 0.001362 },
+ { 0.437036, 0.561676, 0.001288 },
+ { 0.444062, 0.554714, 0.001224 },
+ { 0.451065, 0.547766, 0.001169 },
+ { 0.458041, 0.540837, 0.001123 },
+ { 0.464986, 0.533930, 0.001084 },
+ { 0.471899, 0.527051, 0.001051 },
+ { 0.478775, 0.520202, 0.001023 },
+ { 0.485612, 0.513389, 0.001000 },
+ { 0.492405, 0.506615, 0.000980 },
+ { 0.499151, 0.499887, 0.000962 },
+ { 0.505845, 0.493211, 0.000944 },
+ { 0.512486, 0.486591, 0.000923 },
+ { 0.519073, 0.480029, 0.000899 },
+ { 0.525600, 0.473527, 0.000872 },
+ { 0.532066, 0.467091, 0.000843 },
+ { 0.538463, 0.460725, 0.000812 },
+ { 0.544787, 0.454434, 0.000779 },
+ { 0.551031, 0.448225, 0.000744 },
+ { 0.557193, 0.442099, 0.000708 },
+ { 0.563269, 0.436058, 0.000673 },
+ { 0.569257, 0.430102, 0.000641 },
+ { 0.575151, 0.424232, 0.000616 },
+ { 0.580953, 0.418447, 0.000601 },
+ { 0.586650, 0.412758, 0.000591 },
+ { 0.592225, 0.407190, 0.000586 },
+ { 0.597658, 0.401762, 0.000580 },
+ { 0.602933, 0.396497, 0.000571 },
+ { 0.608035, 0.391409, 0.000556 },
+ { 0.612977, 0.386486, 0.000537 },
+ { 0.617779, 0.381706, 0.000516 },
+ { 0.622459, 0.377047, 0.000493 },
+ { 0.627037, 0.372491, 0.000472 },
+ { 0.631521, 0.368026, 0.000453 },
+ { 0.635900, 0.363665, 0.000435 },
+ { 0.640156, 0.359428, 0.000416 },
+ { 0.644273, 0.355331, 0.000396 },
+ { 0.648233, 0.351395, 0.000372 },
+ { 0.652028, 0.347628, 0.000344 },
+ { 0.655669, 0.344018, 0.000313 },
+ { 0.659166, 0.340553, 0.000281 },
+ { 0.662528, 0.337221, 0.000251 },
+ { 0.665764, 0.334011, 0.000226 },
+ { 0.668874, 0.330919, 0.000207 },
+ { 0.671859, 0.327947, 0.000194 },
+ { 0.674720, 0.325095, 0.000185 },
+ { 0.677459, 0.322362, 0.000179 },
+ { 0.680079, 0.319747, 0.000174 },
+ { 0.682582, 0.317249, 0.000170 },
+ { 0.684971, 0.314863, 0.000167 },
+ { 0.687250, 0.312586, 0.000164 },
+ { 0.689426, 0.310414, 0.000160 },
+ { 0.691504, 0.308342, 0.000154 },
+ { 0.693490, 0.306366, 0.000145 },
+ { 0.695389, 0.304479, 0.000133 },
+ { 0.697206, 0.302675, 0.000119 },
+ { 0.698944, 0.300950, 0.000106 },
+ { 0.700606, 0.299301, 0.000093 },
+ { 0.702193, 0.297725, 0.000083 },
+ { 0.703709, 0.296217, 0.000074 },
+ { 0.705163, 0.294770, 0.000067 },
+ { 0.706563, 0.293376, 0.000061 },
+ { 0.707918, 0.292027, 0.000055 },
+ { 0.709231, 0.290719, 0.000050 },
+ { 0.710500, 0.289453, 0.000047 },
+ { 0.711724, 0.288232, 0.000044 },
+ { 0.712901, 0.287057, 0.000041 },
+ { 0.714032, 0.285929, 0.000040 },
+ { 0.715117, 0.284845, 0.000038 },
+ { 0.716159, 0.283804, 0.000036 },
+ { 0.717159, 0.282806, 0.000035 },
+ { 0.718116, 0.281850, 0.000034 },
+ { 0.719033, 0.280935, 0.000032 },
+ { 0.719912, 0.280058, 0.000030 },
+ { 0.720753, 0.279219, 0.000028 },
+ { 0.721555, 0.278420, 0.000026 },
+ { 0.722315, 0.277662, 0.000023 },
+ { 0.723032, 0.276948, 0.000020 },
+ { 0.723702, 0.276282, 0.000016 },
+ { 0.724328, 0.275660, 0.000012 },
+ { 0.724914, 0.275078, 0.000007 },
+ { 0.725467, 0.274530, 0.000003 },
+ { 0.725992, 0.274008, 0.000000 },
+ { 0.726495, 0.273505, 0.000000 },
+ { 0.726975, 0.273025, 0.000000 },
+ { 0.727432, 0.272568, 0.000000 },
+ { 0.727864, 0.272136, 0.000000 },
+ { 0.728272, 0.271728, 0.000000 },
+ { 0.728656, 0.271344, 0.000000 },
+ { 0.729020, 0.270980, 0.000000 },
+ { 0.729361, 0.270639, 0.000000 },
+ { 0.729678, 0.270322, 0.000000 },
+ { 0.729969, 0.270031, 0.000000 },
+ { 0.730234, 0.269766, 0.000000 },
+ { 0.730474, 0.269526, 0.000000 },
+ { 0.730693, 0.269307, 0.000000 },
+ { 0.730896, 0.269104, 0.000000 },
+ { 0.731089, 0.268911, 0.000000 },
+ { 0.731280, 0.268720, 0.000000 },
+ { 0.731467, 0.268533, 0.000000 },
+ { 0.731650, 0.268350, 0.000000 },
+ { 0.731826, 0.268174, 0.000000 },
+ { 0.731993, 0.268007, 0.000000 },
+ { 0.732150, 0.267850, 0.000000 },
+ { 0.732300, 0.267700, 0.000000 },
+ { 0.732443, 0.267557, 0.000000 },
+ { 0.732581, 0.267419, 0.000000 },
+ { 0.732719, 0.267281, 0.000000 },
+ { 0.732859, 0.267141, 0.000000 },
+ { 0.733000, 0.267000, 0.000000 },
+ { 0.733142, 0.266858, 0.000000 },
+ { 0.733281, 0.266719, 0.000000 },
+ { 0.733417, 0.266583, 0.000000 },
+ { 0.733551, 0.266449, 0.000000 },
+ { 0.733683, 0.266317, 0.000000 },
+ { 0.733813, 0.266187, 0.000000 },
+ { 0.733936, 0.266064, 0.000000 },
+ { 0.734047, 0.265953, 0.000000 },
+ { 0.734143, 0.265857, 0.000000 },
+ { 0.734221, 0.265779, 0.000000 },
+ { 0.734286, 0.265714, 0.000000 },
+ { 0.734341, 0.265659, 0.000000 },
+ { 0.734390, 0.265610, 0.000000 },
+ { 0.734438, 0.265562, 0.000000 },
+ { 0.734482, 0.265518, 0.000000 },
+ { 0.734523, 0.265477, 0.000000 },
+ { 0.734560, 0.265440, 0.000000 },
+ { 0.734592, 0.265408, 0.000000 },
+ { 0.734621, 0.265379, 0.000000 },
+ { 0.734649, 0.265351, 0.000000 },
+ { 0.734673, 0.265327, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+ { 0.734690, 0.265310, 0.000000 },
+};
+
+
+/* Standard white point chromaticities. */
+
+#define C 0.310063, 0.316158
+#define E 1.0/3.0, 1.0/3.0
+#define D50 0.34570, 0.3585
+#define D65 0.312713, 0.329016
+
+/* Gamma of nonlinear correction.
+ See Charles Poynton's ColorFAQ Item 45 and GammaFAQ Item 6 at
+ http://www.inforamp.net/~poynton/ColorFAQ.html
+ http://www.inforamp.net/~poynton/GammaFAQ.html
+*/
+
+#define GAMMA_REC709 0. /* Rec. 709 */
+
+static const struct ColorSystem color_systems[] = {
+ [NTSCsystem] = {
+ 0.67, 0.33, 0.21, 0.71, 0.14, 0.08,
+ C, GAMMA_REC709
+ },
+ [EBUsystem] = {
+ 0.64, 0.33, 0.29, 0.60, 0.15, 0.06,
+ D65, GAMMA_REC709
+ },
+ [SMPTEsystem] = {
+ 0.630, 0.340, 0.310, 0.595, 0.155, 0.070,
+ D65, GAMMA_REC709
+ },
+ [SMPTE240Msystem] = {
+ 0.670, 0.330, 0.210, 0.710, 0.150, 0.060,
+ D65, GAMMA_REC709
+ },
+ [APPLEsystem] = {
+ 0.625, 0.340, 0.280, 0.595, 0.115, 0.070,
+ D65, GAMMA_REC709
+ },
+ [wRGBsystem] = {
+ 0.7347, 0.2653, 0.1152, 0.8264, 0.1566, 0.0177,
+ D50, GAMMA_REC709
+ },
+ [CIE1931system] = {
+ 0.7347, 0.2653, 0.2738, 0.7174, 0.1666, 0.0089,
+ E, GAMMA_REC709
+ },
+ [Rec709system] = {
+ 0.64, 0.33, 0.30, 0.60, 0.15, 0.06,
+ D65, GAMMA_REC709
+ },
+ [Rec2020system] = {
+ 0.708, 0.292, 0.170, 0.797, 0.131, 0.046,
+ D65, GAMMA_REC709
+ },
+};
+
+/*
+static struct ColorSystem CustomSystem = {
+ "Custom",
+ 0.64, 0.33, 0.30, 0.60, 0.15, 0.06,
+ D65, GAMMA_REC709
+};
+*/
+
+static void
+uv_to_xy(double const u,
+ double const v,
+ double * const xc,
+ double * const yc)
+{
+/*
+ Given 1970 coordinates u, v, determine 1931 chromaticities x, y
+*/
+ *xc = 3*u / (2*u - 8*v + 4);
+ *yc = 2*v / (2*u - 8*v + 4);
+}
+
+static void
+upvp_to_xy(double const up,
+ double const vp,
+ double * const xc,
+ double * const yc)
+{
+/*
+ Given 1976 coordinates u', v', determine 1931 chromaticities x, y
+*/
+ *xc = 9*up / (6*up - 16*vp + 12);
+ *yc = 4*vp / (6*up - 16*vp + 12);
+}
+
+static void
+xy_to_upvp(double xc,
+ double yc,
+ double * const up,
+ double * const vp)
+{
+/*
+ Given 1931 chromaticities x, y, determine 1976 coordinates u', v'
+*/
+ *up = 4*xc / (- 2*xc + 12*yc + 3);
+ *vp = 9*yc / (- 2*xc + 12*yc + 3);
+}
+
+static void
+xy_to_uv(double xc,
+ double yc,
+ double * const u,
+ double * const v)
+{
+/*
+ Given 1931 chromaticities x, y, determine 1960 coordinates u, v
+*/
+ *u = 4*xc / (- 2*xc + 12*yc + 3);
+ *v = 6*yc / (- 2*xc + 12*yc + 3);
+}
+
+static void
+xyz_to_rgb(const double m[3][3],
+ double xc, double yc, double zc,
+ double * const r, double * const g, double * const b)
+{
+ *r = m[0][0]*xc + m[0][1]*yc + m[0][2]*zc;
+ *g = m[1][0]*xc + m[1][1]*yc + m[1][2]*zc;
+ *b = m[2][0]*xc + m[2][1]*yc + m[2][2]*zc;
+}
+
+static void invert_matrix3x3(double in[3][3], double out[3][3])
+{
+ double m00 = in[0][0], m01 = in[0][1], m02 = in[0][2],
+ m10 = in[1][0], m11 = in[1][1], m12 = in[1][2],
+ m20 = in[2][0], m21 = in[2][1], m22 = in[2][2];
+ int i, j;
+ double det;
+
+ out[0][0] = (m11 * m22 - m21 * m12);
+ out[0][1] = -(m01 * m22 - m21 * m02);
+ out[0][2] = (m01 * m12 - m11 * m02);
+ out[1][0] = -(m10 * m22 - m20 * m12);
+ out[1][1] = (m00 * m22 - m20 * m02);
+ out[1][2] = -(m00 * m12 - m10 * m02);
+ out[2][0] = (m10 * m21 - m20 * m11);
+ out[2][1] = -(m00 * m21 - m20 * m01);
+ out[2][2] = (m00 * m11 - m10 * m01);
+
+ det = m00 * out[0][0] + m10 * out[0][1] + m20 * out[0][2];
+ det = 1.0 / det;
+
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < 3; j++)
+ out[i][j] *= det;
+ }
+}
+
+static void get_rgb2xyz_matrix(struct ColorSystem system, double m[3][3])
+{
+ double S[3], X[4], Z[4];
+ int i;
+
+ X[0] = system.xRed / system.yRed;
+ X[1] = system.xGreen / system.yGreen;
+ X[2] = system.xBlue / system.yBlue;
+ X[3] = system.xWhite / system.yWhite;
+
+ Z[0] = (1 - system.xRed - system.yRed) / system.yRed;
+ Z[1] = (1 - system.xGreen - system.yGreen) / system.yGreen;
+ Z[2] = (1 - system.xBlue - system.yBlue) / system.yBlue;
+ Z[3] = (1 - system.xWhite - system.yWhite) / system.yWhite;
+
+ for (i = 0; i < 3; i++) {
+ m[0][i] = X[i];
+ m[1][i] = 1;
+ m[2][i] = Z[i];
+ }
+
+ invert_matrix3x3(m, m);
+
+ for (i = 0; i < 3; i++)
+ S[i] = m[i][0] * X[3] + m[i][1] * 1 + m[i][2] * Z[3];
+
+ for (i = 0; i < 3; i++) {
+ m[0][i] = S[i] * X[i];
+ m[1][i] = S[i] * 1;
+ m[2][i] = S[i] * Z[i];
+ }
+}
+
+static void
+rgb_to_xy(double rc,
+ double gc,
+ double bc,
+ double * const x,
+ double * const y,
+ double * const z,
+ const double m[3][3])
+{
+ double sum;
+
+ *x = m[0][0] * rc + m[0][1] * gc + m[0][2] * bc;
+ *y = m[1][0] * rc + m[1][1] * gc + m[1][2] * bc;
+ *z = m[2][0] * rc + m[2][1] * gc + m[2][2] * bc;
+
+ sum = *x + *y + *z;
+
+ *x = *x / sum;
+ *y = *y / sum;
+}
+
+static int
+constrain_rgb(double * const r,
+ double * const g,
+ double * const b)
+{
+/*----------------------------------------------------------------------------
+ If the requested RGB shade contains a negative weight for one of
+ the primaries, it lies outside the color gamut accessible from
+ the given triple of primaries. Desaturate it by adding white,
+ equal quantities of R, G, and B, enough to make RGB all positive.
+-----------------------------------------------------------------------------*/
+ double w;
+
+ /* Amount of white needed is w = - min(0, *r, *g, *b) */
+ w = (0 < *r) ? 0 : *r;
+ w = (w < *g) ? w : *g;
+ w = (w < *b) ? w : *b;
+ w = - w;
+
+ /* Add just enough white to make r, g, b all positive. */
+ if (w > 0) {
+ *r += w; *g += w; *b += w;
+
+ return 1; /* Color modified to fit RGB gamut */
+ }
+
+ return 0; /* Color within RGB gamut */
+}
+
+static void
+gamma_correct(const struct ColorSystem * const cs,
+ double * const c)
+{
+/*----------------------------------------------------------------------------
+ Transform linear RGB values to nonlinear RGB values.
+
+ Rec. 709 is ITU-R Recommendation BT. 709 (1990)
+ ``Basic Parameter Values for the HDTV Standard for the Studio and for
+ International Programme Exchange'', formerly CCIR Rec. 709.
+
+ For details see
+ http://www.inforamp.net/~poynton/ColorFAQ.html
+ http://www.inforamp.net/~poynton/GammaFAQ.html
+-----------------------------------------------------------------------------*/
+ double gamma;
+ double cc;
+
+ gamma = cs->gamma;
+
+ if (gamma == 0.) {
+ /* Rec. 709 gamma correction. */
+ cc = 0.018;
+ if (*c < cc) {
+ *c *= (1.099 * pow(cc, 0.45) - 0.099) / cc;
+ } else {
+ *c = 1.099 * pow(*c, 0.45) - 0.099;
+ }
+ } else {
+ /* Nonlinear color = (Linear color)^(1/gamma) */
+ *c = pow(*c, 1./gamma);
+ }
+}
+
+
+
+static void
+gamma_correct_rgb(const struct ColorSystem * const cs,
+ double * const r,
+ double * const g,
+ double * const b)
+{
+ gamma_correct(cs, r);
+ gamma_correct(cs, g);
+ gamma_correct(cs, b);
+}
+
+/* Sz(X) is the displacement in pixels of a displacement of X normalized
+ distance units. (A normalized distance unit is 1/512 of the smaller
+ dimension of the canvas)
+*/
+#define Sz(x) (((x) * (int)FFMIN(w, h)) / 512)
+
+static void
+monochrome_color_location(double waveLength, int w, int h,
+ int cie, int *xP, int *yP)
+{
+ const int ix = waveLength - 360;
+ const double pX = spectral_chromaticity[ix][0];
+ const double pY = spectral_chromaticity[ix][1];
+ const double pZ = spectral_chromaticity[ix][2];
+ const double px = pX / (pX + pY + pZ);
+ const double py = pY / (pX + pY + pZ);
+
+ if (cie == LUV) {
+ double up, vp;
+
+ xy_to_upvp(px, py, &up, &vp);
+ *xP = up * (w - 1);
+ *yP = (h - 1) - vp * (h - 1);
+ } else if (cie == UCS) {
+ double u, v;
+
+ xy_to_uv(px, py, &u, &v);
+ *xP = u * (w - 1);
+ *yP = (h - 1) - v * (h - 1);
+ } else if (cie == XYY) {
+ *xP = px * (w - 1);
+ *yP = (h - 1) - py * (h - 1);
+ } else {
+ av_assert0(0);
+ }
+}
+
+static void
+find_tongue(uint16_t* const pixels,
+ int const w,
+ int const linesize,
+ int const row,
+ int * const presentP,
+ int * const leftEdgeP,
+ int * const rightEdgeP)
+{
+ int i;
+
+ for (i = 0; i < w && pixels[row * linesize + i * 4 + 0] == 0; i++)
+ ;
+
+ if (i >= w) {
+ *presentP = 0;
+ } else {
+ int j;
+ int const leftEdge = i;
+
+ *presentP = 1;
+
+ for (j = w - 1; j >= leftEdge && pixels[row * linesize + j * 4 + 0] == 0; j--)
+ ;
+
+ *rightEdgeP = j;
+ *leftEdgeP = leftEdge;
+ }
+}
+
+static void draw_line(uint16_t *const pixels, int linesize,
+ int x0, int y0, int x1, int y1,
+ int w, int h,
+ const uint16_t *const rgbcolor)
+{
+ int dx = FFABS(x1 - x0), sx = x0 < x1 ? 1 : -1;
+ int dy = FFABS(y1 - y0), sy = y0 < y1 ? 1 : -1;
+ int err = (dx > dy ? dx : -dy) / 2, e2;
+
+ for (;;) {
+ pixels[y0 * linesize + x0 * 4 + 0] = rgbcolor[0];
+ pixels[y0 * linesize + x0 * 4 + 1] = rgbcolor[1];
+ pixels[y0 * linesize + x0 * 4 + 2] = rgbcolor[2];
+ pixels[y0 * linesize + x0 * 4 + 3] = rgbcolor[3];
+
+ if (x0 == x1 && y0 == y1)
+ break;
+
+ e2 = err;
+
+ if (e2 >-dx) {
+ err -= dy;
+ x0 += sx;
+ }
+
+ if (e2 < dy) {
+ err += dx;
+ y0 += sy;
+ }
+ }
+}
+
+static void draw_rline(uint16_t *const pixels, int linesize,
+ int x0, int y0, int x1, int y1,
+ int w, int h)
+{
+ int dx = FFABS(x1 - x0), sx = x0 < x1 ? 1 : -1;
+ int dy = FFABS(y1 - y0), sy = y0 < y1 ? 1 : -1;
+ int err = (dx > dy ? dx : -dy) / 2, e2;
+
+ for (;;) {
+ pixels[y0 * linesize + x0 * 4 + 0] = 65535 - pixels[y0 * linesize + x0 * 4 + 0];
+ pixels[y0 * linesize + x0 * 4 + 1] = 65535 - pixels[y0 * linesize + x0 * 4 + 1];
+ pixels[y0 * linesize + x0 * 4 + 2] = 65535 - pixels[y0 * linesize + x0 * 4 + 2];
+ pixels[y0 * linesize + x0 * 4 + 3] = 65535;
+
+ if (x0 == x1 && y0 == y1)
+ break;
+
+ e2 = err;
+
+ if (e2 >-dx) {
+ err -= dy;
+ x0 += sx;
+ }
+
+ if (e2 < dy) {
+ err += dx;
+ y0 += sy;
+ }
+ }
+}
+
+static void
+tongue_outline(uint16_t* const pixels,
+ int const linesize,
+ int const w,
+ int const h,
+ uint16_t const maxval,
+ int const cie)
+{
+ const uint16_t rgbcolor[4] = { maxval, maxval, maxval, maxval };
+ int wavelength;
+ int lx, ly;
+ int fx, fy;
+
+ for (wavelength = 360; wavelength <= 830; wavelength++) {
+ int icx, icy;
+
+ monochrome_color_location(wavelength, w, h, cie,
+ &icx, &icy);
+
+ if (wavelength > 360)
+ draw_line(pixels, linesize, lx, ly, icx, icy, w, h, rgbcolor);
+ else {
+ fx = icx;
+ fy = icy;
+ }
+ lx = icx;
+ ly = icy;
+ }
+ draw_line(pixels, linesize, lx, ly, fx, fy, w, h, rgbcolor);
+}
+
+static void
+fill_in_tongue(uint16_t* const pixels,
+ int const linesize,
+ int const w,
+ int const h,
+ uint16_t const maxval,
+ const struct ColorSystem * const cs,
+ double const m[3][3],
+ int const cie,
+ int const correct_gamma,
+ float const contrast)
+{
+ int y;
+
+ /* Scan the image line by line and fill the tongue outline
+ with the RGB values determined by the color system for the x-y
+ co-ordinates within the tongue.
+ */
+
+ for (y = 0; y < h; ++y) {
+ int present; /* There is some tongue on this line */
+ int leftEdge; /* x position of leftmost pixel in tongue on this line */
+ int rightEdge; /* same, but rightmost */
+
+ find_tongue(pixels, w, linesize, y, &present, &leftEdge, &rightEdge);
+
+ if (present) {
+ int x;
+
+ for (x = leftEdge; x <= rightEdge; ++x) {
+ double cx, cy, cz, jr, jg, jb, jmax;
+ int r, g, b, mx = maxval;
+
+ if (cie == LUV) {
+ double up, vp;
+ up = ((double) x) / (w - 1);
+ vp = 1.0 - ((double) y) / (h - 1);
+ upvp_to_xy(up, vp, &cx, &cy);
+ cz = 1.0 - (cx + cy);
+ } else if (cie == UCS) {
+ double u, v;
+ u = ((double) x) / (w - 1);
+ v = 1.0 - ((double) y) / (h - 1);
+ uv_to_xy(u, v, &cx, &cy);
+ cz = 1.0 - (cx + cy);
+ } else if (cie == XYY) {
+ cx = ((double) x) / (w - 1);
+ cy = 1.0 - ((double) y) / (h - 1);
+ cz = 1.0 - (cx + cy);
+ } else {
+ av_assert0(0);
+ }
+
+ xyz_to_rgb(m, cx, cy, cz, &jr, &jg, &jb);
+
+ /* Check whether the requested color is within the
+ gamut achievable with the given color system. If
+ not, draw it in a reduced intensity, interpolated
+ by desaturation to the closest within-gamut color. */
+
+ if (constrain_rgb(&jr, &jg, &jb))
+ mx *= contrast;
+
+ jmax = FFMAX3(jr, jg, jb);
+ if (jmax > 0) {
+ jr = jr / jmax;
+ jg = jg / jmax;
+ jb = jb / jmax;
+ }
+ /* gamma correct from linear rgb to nonlinear rgb. */
+ if (correct_gamma)
+ gamma_correct_rgb(cs, &jr, &jg, &jb);
+ r = mx * jr;
+ g = mx * jg;
+ b = mx * jb;
+ pixels[y * linesize + x * 4 + 0] = r;
+ pixels[y * linesize + x * 4 + 1] = g;
+ pixels[y * linesize + x * 4 + 2] = b;
+ pixels[y * linesize + x * 4 + 3] = 65535;
+ }
+ }
+ }
+}
+
+static void
+plot_white_point(uint16_t* pixels,
+ int const linesize,
+ int const w,
+ int const h,
+ int const maxval,
+ int const color_system,
+ int const cie)
+{
+ const struct ColorSystem *cs = &color_systems[color_system];
+ int wx, wy;
+
+ if (cie == LUV) {
+ double wup, wvp;
+ xy_to_upvp(cs->xWhite, cs->yWhite, &wup, &wvp);
+ wx = wup;
+ wy = wvp;
+ wx = (w - 1) * wup;
+ wy = (h - 1) - ((int) ((h - 1) * wvp));
+ } else if (cie == UCS) {
+ double wu, wv;
+ xy_to_uv(cs->xWhite, cs->yWhite, &wu, &wv);
+ wx = wu;
+ wy = wv;
+ wx = (w - 1) * wu;
+ wy = (h - 1) - ((int) ((h - 1) * wv));
+ } else if (cie == XYY) {
+ wx = (w - 1) * cs->xWhite;
+ wy = (h - 1) - ((int) ((h - 1) * cs->yWhite));
+ } else {
+ av_assert0(0);
+ }
+
+ draw_rline(pixels, linesize,
+ wx + Sz(3), wy, wx + Sz(10), wy,
+ w, h);
+ draw_rline(pixels, linesize,
+ wx - Sz(3), wy, wx - Sz(10), wy,
+ w, h);
+ draw_rline(pixels, linesize,
+ wx, wy + Sz(3), wx, wy + Sz(10),
+ w, h);
+ draw_rline(pixels, linesize,
+ wx, wy - Sz(3), wx, wy - Sz(10),
+ w, h);
+}
+
+static int draw_background(AVFilterContext *ctx)
+{
+ CiescopeContext *s = ctx->priv;
+ const struct ColorSystem *cs = &color_systems[s->color_system];
+ AVFilterLink *outlink = ctx->outputs[0];
+ int w = s->size;
+ int h = s->size;
+ uint16_t *pixels;
+
+ if ((s->f = ff_get_video_buffer(outlink, outlink->w, outlink->h)) == NULL)
+ return AVERROR(ENOMEM);
+ pixels = (uint16_t *)s->f->data[0];
+
+ tongue_outline(pixels, s->f->linesize[0] / 2, w, h, 65535, s->cie);
+
+ fill_in_tongue(pixels, s->f->linesize[0] / 2, w, h, 65535, cs, (const double (*)[3])s->i, s->cie,
+ s->correct_gamma, s->contrast);
+
+ return 0;
+}
+
+static void filter_rgb48(AVFilterContext *ctx, AVFrame *in, double *cx, double *cy, int x, int y)
+{
+ CiescopeContext *s = ctx->priv;
+ const uint16_t* src = (const uint16_t*)(in->data[0] + in->linesize[0] * y + x * 6);
+ double r = src[0] / 65535.;
+ double g = src[1] / 65535.;
+ double b = src[2] / 65535.;
+ double cz;
+
+ rgb_to_xy(r, g, b, cx, cy, &cz, (const double (*)[3])s->m);
+}
+
+static void filter_rgba64(AVFilterContext *ctx, AVFrame *in, double *cx, double *cy, int x, int y)
+{
+ CiescopeContext *s = ctx->priv;
+ const uint16_t* src = (const uint16_t*)(in->data[0] + in->linesize[0] * y + x * 8);
+ double r = src[0] / 65535.;
+ double g = src[1] / 65535.;
+ double b = src[2] / 65535.;
+ double cz;
+
+ rgb_to_xy(r, g, b, cx, cy, &cz, (const double (*)[3])s->m);
+}
+
+static void filter_rgb24(AVFilterContext *ctx, AVFrame *in, double *cx, double *cy, int x, int y)
+{
+ CiescopeContext *s = ctx->priv;
+ const uint8_t* src = in->data[0] + in->linesize[0] * y + x * 3;
+ double r = src[0] / 255.;
+ double g = src[1] / 255.;
+ double b = src[2] / 255.;
+ double cz;
+
+ rgb_to_xy(r, g, b, cx, cy, &cz, (const double (*)[3])s->m);
+}
+
+static void filter_rgba(AVFilterContext *ctx, AVFrame *in, double *cx, double *cy, int x, int y)
+{
+ CiescopeContext *s = ctx->priv;
+ const uint8_t* src = in->data[0] + in->linesize[0] * y + x * 4;
+ double r = src[0] / 255.;
+ double g = src[1] / 255.;
+ double b = src[2] / 255.;
+ double cz;
+
+ rgb_to_xy(r, g, b, cx, cy, &cz, (const double (*)[3])s->m);
+}
+
+static void filter_xyz(AVFilterContext *ctx, AVFrame *in, double *cx, double *cy, int x, int y)
+{
+ CiescopeContext *s = ctx->priv;
+ const uint16_t* src = (uint16_t *)(in->data[0] + in->linesize[0] * y + x * 6);
+ double lx = s->log2lin[src[0]];
+ double ly = s->log2lin[src[1]];
+ double lz = s->log2lin[src[2]];
+ double sum = lx + ly + lz;
+
+ if (sum == 0)
+ sum = 1;
+ *cx = lx / sum;
+ *cy = ly / sum;
+}
+
+static void plot_gamuts(uint16_t *pixels, int linesize, int w, int h,
+ int cie, int gamuts)
+{
+ int i;
+
+ for (i = 0; i < NB_CS; i++) {
+ const struct ColorSystem *cs = &color_systems[i];
+ int rx, ry, gx, gy, bx, by;
+
+ if (!((1 << i) & gamuts))
+ continue;
+ if (cie == LUV) {
+ double wup, wvp;
+ xy_to_upvp(cs->xRed, cs->yRed, &wup, &wvp);
+ rx = (w - 1) * wup;
+ ry = (h - 1) - ((int) ((h - 1) * wvp));
+ xy_to_upvp(cs->xGreen, cs->yGreen, &wup, &wvp);
+ gx = (w - 1) * wup;
+ gy = (h - 1) - ((int) ((h - 1) * wvp));
+ xy_to_upvp(cs->xBlue, cs->yBlue, &wup, &wvp);
+ bx = (w - 1) * wup;
+ by = (h - 1) - ((int) ((h - 1) * wvp));
+ } else if (cie == UCS) {
+ double wu, wv;
+ xy_to_uv(cs->xRed, cs->yRed, &wu, &wv);
+ rx = (w - 1) * wu;
+ ry = (h - 1) - ((int) ((h - 1) * wv));
+ xy_to_uv(cs->xGreen, cs->yGreen, &wu, &wv);
+ gx = (w - 1) * wu;
+ gy = (h - 1) - ((int) ((h - 1) * wv));
+ xy_to_uv(cs->xBlue, cs->yBlue, &wu, &wv);
+ bx = (w - 1) * wu;
+ by = (h - 1) - ((int) ((h - 1) * wv));
+ } else if (cie == XYY) {
+ rx = (w - 1) * cs->xRed;
+ ry = (h - 1) - ((int) ((h - 1) * cs->yRed));
+ gx = (w - 1) * cs->xGreen;
+ gy = (h - 1) - ((int) ((h - 1) * cs->yGreen));
+ bx = (w - 1) * cs->xBlue;
+ by = (h - 1) - ((int) ((h - 1) * cs->yBlue));
+ } else {
+ av_assert0(0);
+ }
+
+ draw_rline(pixels, linesize, rx, ry, gx, gy, w, h);
+ draw_rline(pixels, linesize, gx, gy, bx, by, w, h);
+ draw_rline(pixels, linesize, bx, by, rx, ry, w, h);
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ CiescopeContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ int i = s->intensity * 65535;
+ int w = outlink->w;
+ int h = outlink->h;
+ AVFrame *out;
+ int ret, x, y;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ out->pts = in->pts;
+
+ if (!s->background) {
+ ret = draw_background(ctx);
+ if (ret < 0) {
+ av_frame_free(&out);
+ return ret;
+ }
+ s->background = 1;
+ }
+ for (y = 0; y < outlink->h; y++) {
+ memset(out->data[0] + y * out->linesize[0], 0, outlink->w * 8);
+ }
+
+ for (y = 0; y < in->height; y++) {
+ for (x = 0; x < in->width; x++) {
+ double cx, cy;
+ uint16_t *dst;
+ int wx, wy;
+
+ s->filter(ctx, in, &cx, &cy, x, y);
+
+ if (s->cie == LUV) {
+ double up, vp;
+ xy_to_upvp(cx, cy, &up, &vp);
+ cx = up;
+ cy = vp;
+ } else if (s->cie == UCS) {
+ double u, v;
+ xy_to_uv(cx, cy, &u, &v);
+ cx = u;
+ cy = v;
+ }
+
+ wx = (w - 1) * cx;
+ wy = (h - 1) - ((h - 1) * cy);
+
+ if (wx < 0 || wx >= w ||
+ wy < 0 || wy >= h)
+ continue;
+
+ dst = (uint16_t *)(out->data[0] + wy * out->linesize[0] + wx * 8 + 0);
+ dst[0] = FFMIN(dst[0] + i, 65535);
+ dst[1] = FFMIN(dst[1] + i, 65535);
+ dst[2] = FFMIN(dst[2] + i, 65535);
+ dst[3] = 65535;
+ }
+ }
+
+ for (y = 0; y < outlink->h; y++) {
+ uint16_t *dst = (uint16_t *)(out->data[0] + y * out->linesize[0]);
+ const uint16_t *src = (const uint16_t *)(s->f->data[0] + y * s->f->linesize[0]);
+ for (x = 0; x < outlink->w; x++) {
+ const int xx = x * 4;
+ if (dst[xx + 3] == 0) {
+ dst[xx + 0] = src[xx + 0];
+ dst[xx + 1] = src[xx + 1];
+ dst[xx + 2] = src[xx + 2];
+ dst[xx + 3] = src[xx + 3];
+ }
+ }
+ }
+
+ if (s->show_white)
+ plot_white_point((uint16_t *)out->data[0], out->linesize[0] / 2,
+ outlink->w, outlink->h, 65535,
+ s->color_system, s->cie);
+
+ plot_gamuts((uint16_t *)out->data[0], out->linesize[0] / 2,
+ outlink->w, outlink->h,
+ s->cie, s->gamuts);
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static void av_cold uninit(AVFilterContext *ctx)
+{
+ CiescopeContext *s = ctx->priv;
+
+ av_frame_free(&s->f);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ CiescopeContext *s = inlink->dst->priv;
+ int i;
+
+ get_rgb2xyz_matrix(color_systems[s->color_system], s->m);
+ invert_matrix3x3(s->m, s->i);
+
+ switch (inlink->format) {
+ case AV_PIX_FMT_RGB24:
+ s->filter = filter_rgb24;
+ break;
+ case AV_PIX_FMT_RGBA:
+ s->filter = filter_rgba;
+ break;
+ case AV_PIX_FMT_RGB48:
+ s->filter = filter_rgb48;
+ break;
+ case AV_PIX_FMT_RGBA64:
+ s->filter = filter_rgba64;
+ break;
+ case AV_PIX_FMT_XYZ12:
+ s->filter = filter_xyz;
+ for (i = 0; i < 65536; i++)
+ s->log2lin[i] = pow(i / 65535., s->igamma) * 65535.;
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ return 0;
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_ciescope = {
+ .name = "ciescope",
+ .description = NULL_IF_CONFIG_SMALL("Video CIE scope."),
+ .priv_size = sizeof(CiescopeContext),
+ .priv_class = &ciescope_class,
+ .query_formats = query_formats,
+ .uninit = uninit,
+ .inputs = inputs,
+ .outputs = outputs,
+};
diff --git a/libavfilter/vf_codecview.c b/libavfilter/vf_codecview.c
new file mode 100644
index 0000000000..dc3397316d
--- /dev/null
+++ b/libavfilter/vf_codecview.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2014 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Codec debug viewer filter.
+ *
+ * All the MV drawing code from Michael Niedermayer is extracted from
+ * libavcodec/mpegvideo.c.
+ *
+ * TODO: segmentation
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/motion_vector.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+
+#define MV_P_FOR (1<<0)
+#define MV_B_FOR (1<<1)
+#define MV_B_BACK (1<<2)
+#define MV_TYPE_FOR (1<<0)
+#define MV_TYPE_BACK (1<<1)
+#define FRAME_TYPE_I (1<<0)
+#define FRAME_TYPE_P (1<<1)
+#define FRAME_TYPE_B (1<<2)
+
+typedef struct {
+ const AVClass *class;
+ unsigned mv;
+ unsigned frame_type;
+ unsigned mv_type;
+ int hsub, vsub;
+ int qp;
+} CodecViewContext;
+
+#define OFFSET(x) offsetof(CodecViewContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, 0, 0, FLAGS, unit }
+
+static const AVOption codecview_options[] = {
+ { "mv", "set motion vectors to visualize", OFFSET(mv), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, INT_MAX, FLAGS, "mv" },
+ CONST("pf", "forward predicted MVs of P-frames", MV_P_FOR, "mv"),
+ CONST("bf", "forward predicted MVs of B-frames", MV_B_FOR, "mv"),
+ CONST("bb", "backward predicted MVs of B-frames", MV_B_BACK, "mv"),
+ { "qp", NULL, OFFSET(qp), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, .flags = FLAGS },
+ { "mv_type", "set motion vectors type", OFFSET(mv_type), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, INT_MAX, FLAGS, "mv_type" },
+ { "mvt", "set motion vectors type", OFFSET(mv_type), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, INT_MAX, FLAGS, "mv_type" },
+ CONST("fp", "forward predicted MVs", MV_TYPE_FOR, "mv_type"),
+ CONST("bp", "backward predicted MVs", MV_TYPE_BACK, "mv_type"),
+ { "frame_type", "set frame types to visualize motion vectors of", OFFSET(frame_type), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, INT_MAX, FLAGS, "frame_type" },
+ { "ft", "set frame types to visualize motion vectors of", OFFSET(frame_type), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, INT_MAX, FLAGS, "frame_type" },
+ CONST("if", "I-frames", FRAME_TYPE_I, "frame_type"),
+ CONST("pf", "P-frames", FRAME_TYPE_P, "frame_type"),
+ CONST("bf", "B-frames", FRAME_TYPE_B, "frame_type"),
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(codecview);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ // TODO: we can probably add way more pixel formats without any other
+ // changes; anything with 8-bit luma in first plane should be working
+ static const enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE};
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
+{
+ if(*sx > *ex)
+ return clip_line(ex, ey, sx, sy, maxx);
+
+ if (*sx < 0) {
+ if (*ex < 0)
+ return 1;
+ *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
+ *sx = 0;
+ }
+
+ if (*ex > maxx) {
+ if (*sx > maxx)
+ return 1;
+ *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
+ *ex = maxx;
+ }
+ return 0;
+}
+
+/**
+ * Draw a line from (ex, ey) -> (sx, sy).
+ * @param w width of the image
+ * @param h height of the image
+ * @param stride stride/linesize of the image
+ * @param color color of the arrow
+ */
+static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
+ int w, int h, int stride, int color)
+{
+ int x, y, fr, f;
+
+ if (clip_line(&sx, &sy, &ex, &ey, w - 1))
+ return;
+ if (clip_line(&sy, &sx, &ey, &ex, h - 1))
+ return;
+
+ sx = av_clip(sx, 0, w - 1);
+ sy = av_clip(sy, 0, h - 1);
+ ex = av_clip(ex, 0, w - 1);
+ ey = av_clip(ey, 0, h - 1);
+
+ buf[sy * stride + sx] += color;
+
+ if (FFABS(ex - sx) > FFABS(ey - sy)) {
+ if (sx > ex) {
+ FFSWAP(int, sx, ex);
+ FFSWAP(int, sy, ey);
+ }
+ buf += sx + sy * stride;
+ ex -= sx;
+ f = ((ey - sy) << 16) / ex;
+ for (x = 0; x <= ex; x++) {
+ y = (x * f) >> 16;
+ fr = (x * f) & 0xFFFF;
+ buf[ y * stride + x] += (color * (0x10000 - fr)) >> 16;
+ if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
+ }
+ } else {
+ if (sy > ey) {
+ FFSWAP(int, sx, ex);
+ FFSWAP(int, sy, ey);
+ }
+ buf += sx + sy * stride;
+ ey -= sy;
+ if (ey)
+ f = ((ex - sx) << 16) / ey;
+ else
+ f = 0;
+ for(y= 0; y <= ey; y++){
+ x = (y*f) >> 16;
+ fr = (y*f) & 0xFFFF;
+ buf[y * stride + x ] += (color * (0x10000 - fr)) >> 16;
+ if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
+ }
+ }
+}
+
+/**
+ * Draw an arrow from (ex, ey) -> (sx, sy).
+ * @param w width of the image
+ * @param h height of the image
+ * @param stride stride/linesize of the image
+ * @param color color of the arrow
+ */
+static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
+ int ey, int w, int h, int stride, int color, int tail, int direction)
+{
+ int dx,dy;
+
+ if (direction) {
+ FFSWAP(int, sx, ex);
+ FFSWAP(int, sy, ey);
+ }
+
+ sx = av_clip(sx, -100, w + 100);
+ sy = av_clip(sy, -100, h + 100);
+ ex = av_clip(ex, -100, w + 100);
+ ey = av_clip(ey, -100, h + 100);
+
+ dx = ex - sx;
+ dy = ey - sy;
+
+ if (dx * dx + dy * dy > 3 * 3) {
+ int rx = dx + dy;
+ int ry = -dx + dy;
+ int length = sqrt((rx * rx + ry * ry) << 8);
+
+ // FIXME subpixel accuracy
+ rx = ROUNDED_DIV(rx * 3 << 4, length);
+ ry = ROUNDED_DIV(ry * 3 << 4, length);
+
+ if (tail) {
+ rx = -rx;
+ ry = -ry;
+ }
+
+ draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
+ draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
+ }
+ draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ CodecViewContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ if (s->qp) {
+ int qstride, qp_type;
+ int8_t *qp_table = av_frame_get_qp_table(frame, &qstride, &qp_type);
+
+ if (qp_table) {
+ int x, y;
+ const int w = AV_CEIL_RSHIFT(frame->width, s->hsub);
+ const int h = AV_CEIL_RSHIFT(frame->height, s->vsub);
+ uint8_t *pu = frame->data[1];
+ uint8_t *pv = frame->data[2];
+ const int lzu = frame->linesize[1];
+ const int lzv = frame->linesize[2];
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ const int qp = ff_norm_qscale(qp_table[(y >> 3) * qstride + (x >> 3)], qp_type) * 128/31;
+ pu[x] = pv[x] = qp;
+ }
+ pu += lzu;
+ pv += lzv;
+ }
+ }
+ }
+
+ if (s->mv || s->mv_type) {
+ AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MOTION_VECTORS);
+ if (sd) {
+ int i;
+ const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
+ const int is_iframe = (s->frame_type & FRAME_TYPE_I) && frame->pict_type == AV_PICTURE_TYPE_I;
+ const int is_pframe = (s->frame_type & FRAME_TYPE_P) && frame->pict_type == AV_PICTURE_TYPE_P;
+ const int is_bframe = (s->frame_type & FRAME_TYPE_B) && frame->pict_type == AV_PICTURE_TYPE_B;
+
+ for (i = 0; i < sd->size / sizeof(*mvs); i++) {
+ const AVMotionVector *mv = &mvs[i];
+ const int direction = mv->source > 0;
+
+ if (s->mv_type) {
+ const int is_fp = direction == 0 && (s->mv_type & MV_TYPE_FOR);
+ const int is_bp = direction == 1 && (s->mv_type & MV_TYPE_BACK);
+
+ if ((!s->frame_type && (is_fp || is_bp)) ||
+ is_iframe && is_fp || is_iframe && is_bp ||
+ is_pframe && is_fp ||
+ is_bframe && is_fp || is_bframe && is_bp)
+ draw_arrow(frame->data[0], mv->dst_x, mv->dst_y, mv->src_x, mv->src_y,
+ frame->width, frame->height, frame->linesize[0],
+ 100, 0, direction);
+ } else if (s->mv)
+ if ((direction == 0 && (s->mv & MV_P_FOR) && frame->pict_type == AV_PICTURE_TYPE_P) ||
+ (direction == 0 && (s->mv & MV_B_FOR) && frame->pict_type == AV_PICTURE_TYPE_B) ||
+ (direction == 1 && (s->mv & MV_B_BACK) && frame->pict_type == AV_PICTURE_TYPE_B))
+ draw_arrow(frame->data[0], mv->dst_x, mv->dst_y, mv->src_x, mv->src_y,
+ frame->width, frame->height, frame->linesize[0],
+ 100, 0, direction);
+ }
+ }
+ }
+
+ return ff_filter_frame(outlink, frame);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ CodecViewContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
+ return 0;
+}
+
+static const AVFilterPad codecview_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ .needs_writable = 1,
+ },
+ { NULL }
+};
+
+static const AVFilterPad codecview_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_codecview = {
+ .name = "codecview",
+ .description = NULL_IF_CONFIG_SMALL("Visualize information about some codecs."),
+ .priv_size = sizeof(CodecViewContext),
+ .query_formats = query_formats,
+ .inputs = codecview_inputs,
+ .outputs = codecview_outputs,
+ .priv_class = &codecview_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_colorbalance.c b/libavfilter/vf_colorbalance.c
new file mode 100644
index 0000000000..e37f1995ca
--- /dev/null
+++ b/libavfilter/vf_colorbalance.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define R 0
+#define G 1
+#define B 2
+#define A 3
+
+typedef struct {
+ double shadows;
+ double midtones;
+ double highlights;
+} Range;
+
+typedef struct {
+ const AVClass *class;
+ Range cyan_red;
+ Range magenta_green;
+ Range yellow_blue;
+
+ uint8_t lut[3][256];
+
+ uint8_t rgba_map[4];
+ int step;
+} ColorBalanceContext;
+
+#define OFFSET(x) offsetof(ColorBalanceContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption colorbalance_options[] = {
+ { "rs", "set red shadows", OFFSET(cyan_red.shadows), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "gs", "set green shadows", OFFSET(magenta_green.shadows), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "bs", "set blue shadows", OFFSET(yellow_blue.shadows), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "rm", "set red midtones", OFFSET(cyan_red.midtones), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "gm", "set green midtones", OFFSET(magenta_green.midtones), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "bm", "set blue midtones", OFFSET(yellow_blue.midtones), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "rh", "set red highlights", OFFSET(cyan_red.highlights), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "gh", "set green highlights", OFFSET(magenta_green.highlights), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "bh", "set blue highlights", OFFSET(yellow_blue.highlights), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(colorbalance);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ABGR, AV_PIX_FMT_ARGB,
+ AV_PIX_FMT_0BGR, AV_PIX_FMT_0RGB,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ColorBalanceContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
+ double *shadows, *midtones, *highlights, *buffer;
+ int i, r, g, b;
+
+ buffer = av_malloc(256 * 3 * sizeof(*buffer));
+ if (!buffer)
+ return AVERROR(ENOMEM);
+
+ shadows = buffer + 256 * 0;
+ midtones = buffer + 256 * 1;
+ highlights = buffer + 256 * 2;
+
+ for (i = 0; i < 256; i++) {
+ double low = av_clipd((i - 85.0) / -64.0 + 0.5, 0, 1) * 178.5;
+ double mid = av_clipd((i - 85.0) / 64.0 + 0.5, 0, 1) *
+ av_clipd((i + 85.0 - 255.0) / -64.0 + 0.5, 0, 1) * 178.5;
+
+ shadows[i] = low;
+ midtones[i] = mid;
+ highlights[255 - i] = low;
+ }
+
+ for (i = 0; i < 256; i++) {
+ r = g = b = i;
+
+ r = av_clip_uint8(r + s->cyan_red.shadows * shadows[r]);
+ r = av_clip_uint8(r + s->cyan_red.midtones * midtones[r]);
+ r = av_clip_uint8(r + s->cyan_red.highlights * highlights[r]);
+
+ g = av_clip_uint8(g + s->magenta_green.shadows * shadows[g]);
+ g = av_clip_uint8(g + s->magenta_green.midtones * midtones[g]);
+ g = av_clip_uint8(g + s->magenta_green.highlights * highlights[g]);
+
+ b = av_clip_uint8(b + s->yellow_blue.shadows * shadows[b]);
+ b = av_clip_uint8(b + s->yellow_blue.midtones * midtones[b]);
+ b = av_clip_uint8(b + s->yellow_blue.highlights * highlights[b]);
+
+ s->lut[R][i] = r;
+ s->lut[G][i] = g;
+ s->lut[B][i] = b;
+ }
+
+ av_free(buffer);
+
+ ff_fill_rgba_map(s->rgba_map, outlink->format);
+ s->step = av_get_padded_bits_per_pixel(desc) >> 3;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ColorBalanceContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ const uint8_t roffset = s->rgba_map[R];
+ const uint8_t goffset = s->rgba_map[G];
+ const uint8_t boffset = s->rgba_map[B];
+ const uint8_t aoffset = s->rgba_map[A];
+ const int step = s->step;
+ const uint8_t *srcrow = in->data[0];
+ uint8_t *dstrow;
+ AVFrame *out;
+ int i, j;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ dstrow = out->data[0];
+ for (i = 0; i < outlink->h; i++) {
+ const uint8_t *src = srcrow;
+ uint8_t *dst = dstrow;
+
+ for (j = 0; j < outlink->w * step; j += step) {
+ dst[j + roffset] = s->lut[R][src[j + roffset]];
+ dst[j + goffset] = s->lut[G][src[j + goffset]];
+ dst[j + boffset] = s->lut[B][src[j + boffset]];
+ if (in != out && step == 4)
+ dst[j + aoffset] = src[j + aoffset];
+ }
+
+ srcrow += in->linesize[0];
+ dstrow += out->linesize[0];
+ }
+
+ if (in != out)
+ av_frame_free(&in);
+ return ff_filter_frame(ctx->outputs[0], out);
+}
+
+static const AVFilterPad colorbalance_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad colorbalance_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_colorbalance = {
+ .name = "colorbalance",
+ .description = NULL_IF_CONFIG_SMALL("Adjust the color balance."),
+ .priv_size = sizeof(ColorBalanceContext),
+ .priv_class = &colorbalance_class,
+ .query_formats = query_formats,
+ .inputs = colorbalance_inputs,
+ .outputs = colorbalance_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_colorchannelmixer.c b/libavfilter/vf_colorchannelmixer.c
new file mode 100644
index 0000000000..cda972dd00
--- /dev/null
+++ b/libavfilter/vf_colorchannelmixer.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define R 0
+#define G 1
+#define B 2
+#define A 3
+
+typedef struct {
+ const AVClass *class;
+ double rr, rg, rb, ra;
+ double gr, gg, gb, ga;
+ double br, bg, bb, ba;
+ double ar, ag, ab, aa;
+
+ int *lut[4][4];
+
+ int *buffer;
+
+ uint8_t rgba_map[4];
+} ColorChannelMixerContext;
+
+#define OFFSET(x) offsetof(ColorChannelMixerContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption colorchannelmixer_options[] = {
+ { "rr", "set the red gain for the red channel", OFFSET(rr), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -2, 2, FLAGS },
+ { "rg", "set the green gain for the red channel", OFFSET(rg), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "rb", "set the blue gain for the red channel", OFFSET(rb), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "ra", "set the alpha gain for the red channel", OFFSET(ra), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "gr", "set the red gain for the green channel", OFFSET(gr), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "gg", "set the green gain for the green channel", OFFSET(gg), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -2, 2, FLAGS },
+ { "gb", "set the blue gain for the green channel", OFFSET(gb), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "ga", "set the alpha gain for the green channel", OFFSET(ga), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "br", "set the red gain for the blue channel", OFFSET(br), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "bg", "set the green gain for the blue channel", OFFSET(bg), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "bb", "set the blue gain for the blue channel", OFFSET(bb), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -2, 2, FLAGS },
+ { "ba", "set the alpha gain for the blue channel", OFFSET(ba), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "ar", "set the red gain for the alpha channel", OFFSET(ar), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "ag", "set the green gain for the alpha channel", OFFSET(ag), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "ab", "set the blue gain for the alpha channel", OFFSET(ab), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "aa", "set the alpha gain for the alpha channel", OFFSET(aa), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -2, 2, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(colorchannelmixer);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_RGB48, AV_PIX_FMT_BGR48,
+ AV_PIX_FMT_RGBA64, AV_PIX_FMT_BGRA64,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ColorChannelMixerContext *s = ctx->priv;
+ int i, j, size, *buffer;
+
+ ff_fill_rgba_map(s->rgba_map, outlink->format);
+
+ switch (outlink->format) {
+ case AV_PIX_FMT_RGB48:
+ case AV_PIX_FMT_BGR48:
+ case AV_PIX_FMT_RGBA64:
+ case AV_PIX_FMT_BGRA64:
+ size = 65536;
+ break;
+ default:
+ size = 256;
+ }
+
+ s->buffer = buffer = av_malloc(16 * size * sizeof(*s->buffer));
+ if (!s->buffer)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 4; j++, buffer += size)
+ s->lut[i][j] = buffer;
+
+ for (i = 0; i < size; i++) {
+ s->lut[R][R][i] = lrint(i * s->rr);
+ s->lut[R][G][i] = lrint(i * s->rg);
+ s->lut[R][B][i] = lrint(i * s->rb);
+ s->lut[R][A][i] = lrint(i * s->ra);
+
+ s->lut[G][R][i] = lrint(i * s->gr);
+ s->lut[G][G][i] = lrint(i * s->gg);
+ s->lut[G][B][i] = lrint(i * s->gb);
+ s->lut[G][A][i] = lrint(i * s->ga);
+
+ s->lut[B][R][i] = lrint(i * s->br);
+ s->lut[B][G][i] = lrint(i * s->bg);
+ s->lut[B][B][i] = lrint(i * s->bb);
+ s->lut[B][A][i] = lrint(i * s->ba);
+
+ s->lut[A][R][i] = lrint(i * s->ar);
+ s->lut[A][G][i] = lrint(i * s->ag);
+ s->lut[A][B][i] = lrint(i * s->ab);
+ s->lut[A][A][i] = lrint(i * s->aa);
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ColorChannelMixerContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ const uint8_t roffset = s->rgba_map[R];
+ const uint8_t goffset = s->rgba_map[G];
+ const uint8_t boffset = s->rgba_map[B];
+ const uint8_t aoffset = s->rgba_map[A];
+ const uint8_t *srcrow = in->data[0];
+ uint8_t *dstrow;
+ AVFrame *out;
+ int i, j;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ dstrow = out->data[0];
+ switch (outlink->format) {
+ case AV_PIX_FMT_BGR24:
+ case AV_PIX_FMT_RGB24:
+ for (i = 0; i < outlink->h; i++) {
+ const uint8_t *src = srcrow;
+ uint8_t *dst = dstrow;
+
+ for (j = 0; j < outlink->w * 3; j += 3) {
+ const uint8_t rin = src[j + roffset];
+ const uint8_t gin = src[j + goffset];
+ const uint8_t bin = src[j + boffset];
+
+ dst[j + roffset] = av_clip_uint8(s->lut[R][R][rin] +
+ s->lut[R][G][gin] +
+ s->lut[R][B][bin]);
+ dst[j + goffset] = av_clip_uint8(s->lut[G][R][rin] +
+ s->lut[G][G][gin] +
+ s->lut[G][B][bin]);
+ dst[j + boffset] = av_clip_uint8(s->lut[B][R][rin] +
+ s->lut[B][G][gin] +
+ s->lut[B][B][bin]);
+ }
+
+ srcrow += in->linesize[0];
+ dstrow += out->linesize[0];
+ }
+ break;
+ case AV_PIX_FMT_0BGR:
+ case AV_PIX_FMT_0RGB:
+ case AV_PIX_FMT_BGR0:
+ case AV_PIX_FMT_RGB0:
+ for (i = 0; i < outlink->h; i++) {
+ const uint8_t *src = srcrow;
+ uint8_t *dst = dstrow;
+
+ for (j = 0; j < outlink->w * 4; j += 4) {
+ const uint8_t rin = src[j + roffset];
+ const uint8_t gin = src[j + goffset];
+ const uint8_t bin = src[j + boffset];
+
+ dst[j + roffset] = av_clip_uint8(s->lut[R][R][rin] +
+ s->lut[R][G][gin] +
+ s->lut[R][B][bin]);
+ dst[j + goffset] = av_clip_uint8(s->lut[G][R][rin] +
+ s->lut[G][G][gin] +
+ s->lut[G][B][bin]);
+ dst[j + boffset] = av_clip_uint8(s->lut[B][R][rin] +
+ s->lut[B][G][gin] +
+ s->lut[B][B][bin]);
+ if (in != out)
+ dst[j + aoffset] = 0;
+ }
+
+ srcrow += in->linesize[0];
+ dstrow += out->linesize[0];
+ }
+ break;
+ case AV_PIX_FMT_ABGR:
+ case AV_PIX_FMT_ARGB:
+ case AV_PIX_FMT_BGRA:
+ case AV_PIX_FMT_RGBA:
+ for (i = 0; i < outlink->h; i++) {
+ const uint8_t *src = srcrow;
+ uint8_t *dst = dstrow;
+
+ for (j = 0; j < outlink->w * 4; j += 4) {
+ const uint8_t rin = src[j + roffset];
+ const uint8_t gin = src[j + goffset];
+ const uint8_t bin = src[j + boffset];
+ const uint8_t ain = src[j + aoffset];
+
+ dst[j + roffset] = av_clip_uint8(s->lut[R][R][rin] +
+ s->lut[R][G][gin] +
+ s->lut[R][B][bin] +
+ s->lut[R][A][ain]);
+ dst[j + goffset] = av_clip_uint8(s->lut[G][R][rin] +
+ s->lut[G][G][gin] +
+ s->lut[G][B][bin] +
+ s->lut[G][A][ain]);
+ dst[j + boffset] = av_clip_uint8(s->lut[B][R][rin] +
+ s->lut[B][G][gin] +
+ s->lut[B][B][bin] +
+ s->lut[B][A][ain]);
+ dst[j + aoffset] = av_clip_uint8(s->lut[A][R][rin] +
+ s->lut[A][G][gin] +
+ s->lut[A][B][bin] +
+ s->lut[A][A][ain]);
+ }
+
+ srcrow += in->linesize[0];
+ dstrow += out->linesize[0];
+ }
+ break;
+ case AV_PIX_FMT_BGR48:
+ case AV_PIX_FMT_RGB48:
+ for (i = 0; i < outlink->h; i++) {
+ const uint16_t *src = (const uint16_t *)srcrow;
+ uint16_t *dst = (uint16_t *)dstrow;
+
+ for (j = 0; j < outlink->w * 3; j += 3) {
+ const uint16_t rin = src[j + roffset];
+ const uint16_t gin = src[j + goffset];
+ const uint16_t bin = src[j + boffset];
+
+ dst[j + roffset] = av_clip_uint16(s->lut[R][R][rin] +
+ s->lut[R][G][gin] +
+ s->lut[R][B][bin]);
+ dst[j + goffset] = av_clip_uint16(s->lut[G][R][rin] +
+ s->lut[G][G][gin] +
+ s->lut[G][B][bin]);
+ dst[j + boffset] = av_clip_uint16(s->lut[B][R][rin] +
+ s->lut[B][G][gin] +
+ s->lut[B][B][bin]);
+ }
+
+ srcrow += in->linesize[0];
+ dstrow += out->linesize[0];
+ }
+ break;
+ case AV_PIX_FMT_BGRA64:
+ case AV_PIX_FMT_RGBA64:
+ for (i = 0; i < outlink->h; i++) {
+ const uint16_t *src = (const uint16_t *)srcrow;
+ uint16_t *dst = (uint16_t *)dstrow;
+
+ for (j = 0; j < outlink->w * 4; j += 4) {
+ const uint16_t rin = src[j + roffset];
+ const uint16_t gin = src[j + goffset];
+ const uint16_t bin = src[j + boffset];
+ const uint16_t ain = src[j + aoffset];
+
+ dst[j + roffset] = av_clip_uint16(s->lut[R][R][rin] +
+ s->lut[R][G][gin] +
+ s->lut[R][B][bin] +
+ s->lut[R][A][ain]);
+ dst[j + goffset] = av_clip_uint16(s->lut[G][R][rin] +
+ s->lut[G][G][gin] +
+ s->lut[G][B][bin] +
+ s->lut[G][A][ain]);
+ dst[j + boffset] = av_clip_uint16(s->lut[B][R][rin] +
+ s->lut[B][G][gin] +
+ s->lut[B][B][bin] +
+ s->lut[B][A][ain]);
+ dst[j + aoffset] = av_clip_uint16(s->lut[A][R][rin] +
+ s->lut[A][G][gin] +
+ s->lut[A][B][bin] +
+ s->lut[A][A][ain]);
+ }
+
+ srcrow += in->linesize[0];
+ dstrow += out->linesize[0];
+ }
+ }
+
+ if (in != out)
+ av_frame_free(&in);
+ return ff_filter_frame(ctx->outputs[0], out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ColorChannelMixerContext *s = ctx->priv;
+
+ av_freep(&s->buffer);
+}
+
+static const AVFilterPad colorchannelmixer_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad colorchannelmixer_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_colorchannelmixer = {
+ .name = "colorchannelmixer",
+ .description = NULL_IF_CONFIG_SMALL("Adjust colors by mixing color channels."),
+ .priv_size = sizeof(ColorChannelMixerContext),
+ .priv_class = &colorchannelmixer_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = colorchannelmixer_inputs,
+ .outputs = colorchannelmixer_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_colorkey.c b/libavfilter/vf_colorkey.c
new file mode 100644
index 0000000000..3d65e59d42
--- /dev/null
+++ b/libavfilter/vf_colorkey.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2015 Timo Rothenpieler <timo@rothenpieler.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct ColorkeyContext {
+ const AVClass *class;
+
+ /* color offsets rgba */
+ int co[4];
+
+ uint8_t colorkey_rgba[4];
+ float similarity;
+ float blend;
+} ColorkeyContext;
+
+static uint8_t do_colorkey_pixel(ColorkeyContext *ctx, uint8_t r, uint8_t g, uint8_t b)
+{
+ int dr = (int)r - ctx->colorkey_rgba[0];
+ int dg = (int)g - ctx->colorkey_rgba[1];
+ int db = (int)b - ctx->colorkey_rgba[2];
+
+ double diff = sqrt((dr * dr + dg * dg + db * db) / (255.0 * 255.0));
+
+ if (ctx->blend > 0.0001) {
+ return av_clipd((diff - ctx->similarity) / ctx->blend, 0.0, 1.0) * 255.0;
+ } else {
+ return (diff > ctx->similarity) ? 255 : 0;
+ }
+}
+
+static int do_colorkey_slice(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
+{
+ AVFrame *frame = arg;
+
+ const int slice_start = (frame->height * jobnr) / nb_jobs;
+ const int slice_end = (frame->height * (jobnr + 1)) / nb_jobs;
+
+ ColorkeyContext *ctx = avctx->priv;
+
+ int o, x, y;
+
+ for (y = slice_start; y < slice_end; ++y) {
+ for (x = 0; x < frame->width; ++x) {
+ o = frame->linesize[0] * y + x * 4;
+
+ frame->data[0][o + ctx->co[3]] =
+ do_colorkey_pixel(ctx,
+ frame->data[0][o + ctx->co[0]],
+ frame->data[0][o + ctx->co[1]],
+ frame->data[0][o + ctx->co[2]]);
+ }
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *link, AVFrame *frame)
+{
+ AVFilterContext *avctx = link->dst;
+ int res;
+
+ if (res = av_frame_make_writable(frame))
+ return res;
+
+ if (res = avctx->internal->execute(avctx, do_colorkey_slice, frame, NULL, FFMIN(frame->height, ff_filter_get_nb_threads(avctx))))
+ return res;
+
+ return ff_filter_frame(avctx->outputs[0], frame);
+}
+
+static av_cold int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *avctx = outlink->src;
+ ColorkeyContext *ctx = avctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
+ int i;
+
+ outlink->w = avctx->inputs[0]->w;
+ outlink->h = avctx->inputs[0]->h;
+ outlink->time_base = avctx->inputs[0]->time_base;
+
+ for (i = 0; i < 4; ++i)
+ ctx->co[i] = desc->comp[i].offset;
+
+ return 0;
+}
+
+static av_cold int query_formats(AVFilterContext *avctx)
+{
+ static const enum AVPixelFormat pixel_fmts[] = {
+ AV_PIX_FMT_ARGB,
+ AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *formats = NULL;
+
+ formats = ff_make_format_list(pixel_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+
+ return ff_set_common_formats(avctx, formats);
+}
+
+static const AVFilterPad colorkey_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad colorkey_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+#define OFFSET(x) offsetof(ColorkeyContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption colorkey_options[] = {
+ { "color", "set the colorkey key color", OFFSET(colorkey_rgba), AV_OPT_TYPE_COLOR, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "similarity", "set the colorkey similarity value", OFFSET(similarity), AV_OPT_TYPE_FLOAT, { .dbl = 0.01 }, 0.01, 1.0, FLAGS },
+ { "blend", "set the colorkey key blend value", OFFSET(blend), AV_OPT_TYPE_FLOAT, { .dbl = 0.0 }, 0.0, 1.0, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(colorkey);
+
+AVFilter ff_vf_colorkey = {
+ .name = "colorkey",
+ .description = NULL_IF_CONFIG_SMALL("Turns a certain color into transparency. Operates on RGB colors."),
+ .priv_size = sizeof(ColorkeyContext),
+ .priv_class = &colorkey_class,
+ .query_formats = query_formats,
+ .inputs = colorkey_inputs,
+ .outputs = colorkey_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_colorlevels.c b/libavfilter/vf_colorlevels.c
new file mode 100644
index 0000000000..dedbe30d19
--- /dev/null
+++ b/libavfilter/vf_colorlevels.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define R 0
+#define G 1
+#define B 2
+#define A 3
+
+typedef struct {
+ double in_min, in_max;
+ double out_min, out_max;
+} Range;
+
+typedef struct {
+ const AVClass *class;
+ Range range[4];
+ int nb_comp;
+ int bpp;
+ int step;
+ uint8_t rgba_map[4];
+ int linesize;
+} ColorLevelsContext;
+
+#define OFFSET(x) offsetof(ColorLevelsContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption colorlevels_options[] = {
+ { "rimin", "set input red black point", OFFSET(range[R].in_min), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "gimin", "set input green black point", OFFSET(range[G].in_min), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "bimin", "set input blue black point", OFFSET(range[B].in_min), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "aimin", "set input alpha black point", OFFSET(range[A].in_min), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "rimax", "set input red white point", OFFSET(range[R].in_max), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -1, 1, FLAGS },
+ { "gimax", "set input green white point", OFFSET(range[G].in_max), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -1, 1, FLAGS },
+ { "bimax", "set input blue white point", OFFSET(range[B].in_max), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -1, 1, FLAGS },
+ { "aimax", "set input alpha white point", OFFSET(range[A].in_max), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -1, 1, FLAGS },
+ { "romin", "set output red black point", OFFSET(range[R].out_min), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 1, FLAGS },
+ { "gomin", "set output green black point", OFFSET(range[G].out_min), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 1, FLAGS },
+ { "bomin", "set output blue black point", OFFSET(range[B].out_min), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 1, FLAGS },
+ { "aomin", "set output alpha black point", OFFSET(range[A].out_min), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 1, FLAGS },
+ { "romax", "set output red white point", OFFSET(range[R].out_max), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
+ { "gomax", "set output green white point", OFFSET(range[G].out_max), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
+ { "bomax", "set output blue white point", OFFSET(range[B].out_max), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
+ { "aomax", "set output alpha white point", OFFSET(range[A].out_max), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(colorlevels);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGB48, AV_PIX_FMT_BGR48,
+ AV_PIX_FMT_RGBA64, AV_PIX_FMT_BGRA64,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ColorLevelsContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ s->nb_comp = desc->nb_components;
+ s->bpp = desc->comp[0].depth >> 3;
+ s->step = (av_get_padded_bits_per_pixel(desc) >> 3) / s->bpp;
+ s->linesize = inlink->w * s->step;
+ ff_fill_rgba_map(s->rgba_map, inlink->format);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ColorLevelsContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ const int step = s->step;
+ AVFrame *out;
+ int x, y, i;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ switch (s->bpp) {
+ case 1:
+ for (i = 0; i < s->nb_comp; i++) {
+ Range *r = &s->range[i];
+ const uint8_t offset = s->rgba_map[i];
+ const uint8_t *srcrow = in->data[0];
+ uint8_t *dstrow = out->data[0];
+ int imin = lrint(r->in_min * UINT8_MAX);
+ int imax = lrint(r->in_max * UINT8_MAX);
+ int omin = lrint(r->out_min * UINT8_MAX);
+ int omax = lrint(r->out_max * UINT8_MAX);
+ double coeff;
+
+ if (imin < 0) {
+ imin = UINT8_MAX;
+ for (y = 0; y < inlink->h; y++) {
+ const uint8_t *src = srcrow;
+
+ for (x = 0; x < s->linesize; x += step)
+ imin = FFMIN(imin, src[x + offset]);
+ srcrow += in->linesize[0];
+ }
+ }
+ if (imax < 0) {
+ srcrow = in->data[0];
+ imax = 0;
+ for (y = 0; y < inlink->h; y++) {
+ const uint8_t *src = srcrow;
+
+ for (x = 0; x < s->linesize; x += step)
+ imax = FFMAX(imax, src[x + offset]);
+ srcrow += in->linesize[0];
+ }
+ }
+
+ srcrow = in->data[0];
+ coeff = (omax - omin) / (double)(imax - imin);
+ for (y = 0; y < inlink->h; y++) {
+ const uint8_t *src = srcrow;
+ uint8_t *dst = dstrow;
+
+ for (x = 0; x < s->linesize; x += step)
+ dst[x + offset] = av_clip_uint8((src[x + offset] - imin) * coeff + omin);
+ dstrow += out->linesize[0];
+ srcrow += in->linesize[0];
+ }
+ }
+ break;
+ case 2:
+ for (i = 0; i < s->nb_comp; i++) {
+ Range *r = &s->range[i];
+ const uint8_t offset = s->rgba_map[i];
+ const uint8_t *srcrow = in->data[0];
+ uint8_t *dstrow = out->data[0];
+ int imin = lrint(r->in_min * UINT16_MAX);
+ int imax = lrint(r->in_max * UINT16_MAX);
+ int omin = lrint(r->out_min * UINT16_MAX);
+ int omax = lrint(r->out_max * UINT16_MAX);
+ double coeff;
+
+ if (imin < 0) {
+ imin = UINT16_MAX;
+ for (y = 0; y < inlink->h; y++) {
+ const uint16_t *src = (const uint16_t *)srcrow;
+
+ for (x = 0; x < s->linesize; x += step)
+ imin = FFMIN(imin, src[x + offset]);
+ srcrow += in->linesize[0];
+ }
+ }
+ if (imax < 0) {
+ srcrow = in->data[0];
+ imax = 0;
+ for (y = 0; y < inlink->h; y++) {
+ const uint16_t *src = (const uint16_t *)srcrow;
+
+ for (x = 0; x < s->linesize; x += step)
+ imax = FFMAX(imax, src[x + offset]);
+ srcrow += in->linesize[0];
+ }
+ }
+
+ srcrow = in->data[0];
+ coeff = (omax - omin) / (double)(imax - imin);
+ for (y = 0; y < inlink->h; y++) {
+ const uint16_t *src = (const uint16_t*)srcrow;
+ uint16_t *dst = (uint16_t *)dstrow;
+
+ for (x = 0; x < s->linesize; x += step)
+ dst[x + offset] = av_clip_uint16((src[x + offset] - imin) * coeff + omin);
+ dstrow += out->linesize[0];
+ srcrow += in->linesize[0];
+ }
+ }
+ }
+
+ if (in != out)
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad colorlevels_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad colorlevels_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_colorlevels = {
+ .name = "colorlevels",
+ .description = NULL_IF_CONFIG_SMALL("Adjust the color levels."),
+ .priv_size = sizeof(ColorLevelsContext),
+ .priv_class = &colorlevels_class,
+ .query_formats = query_formats,
+ .inputs = colorlevels_inputs,
+ .outputs = colorlevels_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_colormatrix.c b/libavfilter/vf_colormatrix.c
new file mode 100644
index 0000000000..d237baa7b9
--- /dev/null
+++ b/libavfilter/vf_colormatrix.c
@@ -0,0 +1,520 @@
+/*
+ * ColorMatrix v2.2 for Avisynth 2.5.x
+ *
+ * Copyright (C) 2006-2007 Kevin Stone
+ *
+ * ColorMatrix 1.x is Copyright (C) Wilbert Dijkhof
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * ColorMatrix 2.0 is based on the original ColorMatrix filter by Wilbert
+ * Dijkhof. It adds the ability to convert between any of: Rec.709, FCC,
+ * Rec.601, and SMPTE 240M. It also makes pre and post clipping optional,
+ * adds an option to use scaled or non-scaled coefficients, and more...
+ */
+
+#include <float.h>
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/avstring.h"
+
+#define NS(n) ((n) < 0 ? (int)((n)*65536.0-0.5+DBL_EPSILON) : (int)((n)*65536.0+0.5))
+#define CB(n) av_clip_uint8(n)
+
+static const double yuv_coeff_luma[5][3] = {
+ { +0.7152, +0.0722, +0.2126 }, // Rec.709 (0)
+ { +0.5900, +0.1100, +0.3000 }, // FCC (1)
+ { +0.5870, +0.1140, +0.2990 }, // Rec.601 (ITU-R BT.470-2/SMPTE 170M) (2)
+ { +0.7010, +0.0870, +0.2120 }, // SMPTE 240M (3)
+ { +0.6780, +0.0593, +0.2627 }, // Rec.2020 (4)
+};
+
+enum ColorMode {
+ COLOR_MODE_NONE = -1,
+ COLOR_MODE_BT709,
+ COLOR_MODE_FCC,
+ COLOR_MODE_BT601,
+ COLOR_MODE_SMPTE240M,
+ COLOR_MODE_BT2020,
+ COLOR_MODE_COUNT
+};
+
+typedef struct {
+ const AVClass *class;
+ int yuv_convert[25][3][3];
+ int interlaced;
+ int source, dest; ///< ColorMode
+ int mode;
+ int hsub, vsub;
+} ColorMatrixContext;
+
+typedef struct ThreadData {
+ AVFrame *dst;
+ const AVFrame *src;
+ int c2;
+ int c3;
+ int c4;
+ int c5;
+ int c6;
+ int c7;
+} ThreadData;
+
+#define OFFSET(x) offsetof(ColorMatrixContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption colormatrix_options[] = {
+ { "src", "set source color matrix", OFFSET(source), AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
+ { "dst", "set destination color matrix", OFFSET(dest), AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
+ { "bt709", "set BT.709 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT709}, .flags=FLAGS, .unit="color_mode" },
+ { "fcc", "set FCC colorspace ", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_FCC}, .flags=FLAGS, .unit="color_mode" },
+ { "bt601", "set BT.601 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
+ { "bt470", "set BT.470 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
+ { "bt470bg", "set BT.470 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
+ { "smpte170m", "set SMTPE-170M colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
+ { "smpte240m", "set SMPTE-240M colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_SMPTE240M}, .flags=FLAGS, .unit="color_mode" },
+ { "bt2020", "set BT.2020 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT2020}, .flags=FLAGS, .unit="color_mode" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(colormatrix);
+
+#define ma m[0][0]
+#define mb m[0][1]
+#define mc m[0][2]
+#define md m[1][0]
+#define me m[1][1]
+#define mf m[1][2]
+#define mg m[2][0]
+#define mh m[2][1]
+#define mi m[2][2]
+
+#define ima im[0][0]
+#define imb im[0][1]
+#define imc im[0][2]
+#define imd im[1][0]
+#define ime im[1][1]
+#define imf im[1][2]
+#define img im[2][0]
+#define imh im[2][1]
+#define imi im[2][2]
+
+static void inverse3x3(double im[3][3], double m[3][3])
+{
+ double det = ma * (me * mi - mf * mh) - mb * (md * mi - mf * mg) + mc * (md * mh - me * mg);
+ det = 1.0 / det;
+ ima = det * (me * mi - mf * mh);
+ imb = det * (mc * mh - mb * mi);
+ imc = det * (mb * mf - mc * me);
+ imd = det * (mf * mg - md * mi);
+ ime = det * (ma * mi - mc * mg);
+ imf = det * (mc * md - ma * mf);
+ img = det * (md * mh - me * mg);
+ imh = det * (mb * mg - ma * mh);
+ imi = det * (ma * me - mb * md);
+}
+
+static void solve_coefficients(double cm[3][3], double rgb[3][3], double yuv[3][3])
+{
+ int i, j;
+ for (i = 0; i < 3; i++)
+ for (j = 0; j < 3; j++)
+ cm[i][j] = yuv[i][0] * rgb[0][j] + yuv[i][1] * rgb[1][j] + yuv[i][2] * rgb[2][j];
+}
+
+static void calc_coefficients(AVFilterContext *ctx)
+{
+ ColorMatrixContext *color = ctx->priv;
+ double yuv_coeff[5][3][3];
+ double rgb_coeffd[5][3][3];
+ double yuv_convertd[25][3][3];
+ double bscale, rscale;
+ int v = 0;
+ int i, j, k;
+ for (i = 0; i < 5; i++) {
+ yuv_coeff[i][0][0] = yuv_coeff_luma[i][0];
+ yuv_coeff[i][0][1] = yuv_coeff_luma[i][1];
+ yuv_coeff[i][0][2] = yuv_coeff_luma[i][2];
+ bscale = 0.5 / (yuv_coeff[i][0][1] - 1.0);
+ rscale = 0.5 / (yuv_coeff[i][0][2] - 1.0);
+ yuv_coeff[i][1][0] = bscale * yuv_coeff[i][0][0];
+ yuv_coeff[i][1][1] = 0.5;
+ yuv_coeff[i][1][2] = bscale * yuv_coeff[i][0][2];
+ yuv_coeff[i][2][0] = rscale * yuv_coeff[i][0][0];
+ yuv_coeff[i][2][1] = rscale * yuv_coeff[i][0][1];
+ yuv_coeff[i][2][2] = 0.5;
+ }
+ for (i = 0; i < 5; i++)
+ inverse3x3(rgb_coeffd[i], yuv_coeff[i]);
+ for (i = 0; i < 5; i++) {
+ for (j = 0; j < 5; j++) {
+ solve_coefficients(yuv_convertd[v], rgb_coeffd[i], yuv_coeff[j]);
+ for (k = 0; k < 3; k++) {
+ color->yuv_convert[v][k][0] = NS(yuv_convertd[v][k][0]);
+ color->yuv_convert[v][k][1] = NS(yuv_convertd[v][k][1]);
+ color->yuv_convert[v][k][2] = NS(yuv_convertd[v][k][2]);
+ }
+ if (color->yuv_convert[v][0][0] != 65536 || color->yuv_convert[v][1][0] != 0 ||
+ color->yuv_convert[v][2][0] != 0) {
+ av_log(ctx, AV_LOG_ERROR, "error calculating conversion coefficients\n");
+ }
+ v++;
+ }
+ }
+}
+
+static const char * const color_modes[] = {"bt709", "fcc", "bt601", "smpte240m", "bt2020"};
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ColorMatrixContext *color = ctx->priv;
+
+ if (color->dest == COLOR_MODE_NONE) {
+ av_log(ctx, AV_LOG_ERROR, "Unspecified destination color space\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (color->source == color->dest) {
+ av_log(ctx, AV_LOG_ERROR, "Source and destination color space must not be identical\n");
+ return AVERROR(EINVAL);
+ }
+
+ calc_coefficients(ctx);
+
+ return 0;
+}
+
+static int process_slice_uyvy422(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ const ThreadData *td = arg;
+ const AVFrame *src = td->src;
+ AVFrame *dst = td->dst;
+ const int height = src->height;
+ const int width = src->width*2;
+ const int src_pitch = src->linesize[0];
+ const int dst_pitch = dst->linesize[0];
+ const int slice_start = (height * jobnr ) / nb_jobs;
+ const int slice_end = (height * (jobnr+1)) / nb_jobs;
+ const unsigned char *srcp = src->data[0] + slice_start * src_pitch;
+ unsigned char *dstp = dst->data[0] + slice_start * dst_pitch;
+ const int c2 = td->c2;
+ const int c3 = td->c3;
+ const int c4 = td->c4;
+ const int c5 = td->c5;
+ const int c6 = td->c6;
+ const int c7 = td->c7;
+ int x, y;
+
+ for (y = slice_start; y < slice_end; y++) {
+ for (x = 0; x < width; x += 4) {
+ const int u = srcp[x + 0] - 128;
+ const int v = srcp[x + 2] - 128;
+ const int uvval = c2 * u + c3 * v + 1081344;
+ dstp[x + 0] = CB((c4 * u + c5 * v + 8421376) >> 16);
+ dstp[x + 1] = CB((65536 * (srcp[x + 1] - 16) + uvval) >> 16);
+ dstp[x + 2] = CB((c6 * u + c7 * v + 8421376) >> 16);
+ dstp[x + 3] = CB((65536 * (srcp[x + 3] - 16) + uvval) >> 16);
+ }
+ srcp += src_pitch;
+ dstp += dst_pitch;
+ }
+
+ return 0;
+}
+
+static int process_slice_yuv444p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ const ThreadData *td = arg;
+ const AVFrame *src = td->src;
+ AVFrame *dst = td->dst;
+ const int height = src->height;
+ const int width = src->width;
+ const int slice_start = (height * jobnr ) / nb_jobs;
+ const int slice_end = (height * (jobnr+1)) / nb_jobs;
+ const int src_pitchY = src->linesize[0];
+ const int src_pitchUV = src->linesize[1];
+ const unsigned char *srcpU = src->data[1] + slice_start * src_pitchUV;
+ const unsigned char *srcpV = src->data[2] + slice_start * src_pitchUV;
+ const unsigned char *srcpY = src->data[0] + slice_start * src_pitchY;
+ const int dst_pitchY = dst->linesize[0];
+ const int dst_pitchUV = dst->linesize[1];
+ unsigned char *dstpU = dst->data[1] + slice_start * dst_pitchUV;
+ unsigned char *dstpV = dst->data[2] + slice_start * dst_pitchUV;
+ unsigned char *dstpY = dst->data[0] + slice_start * dst_pitchY;
+ const int c2 = td->c2;
+ const int c3 = td->c3;
+ const int c4 = td->c4;
+ const int c5 = td->c5;
+ const int c6 = td->c6;
+ const int c7 = td->c7;
+ int x, y;
+
+ for (y = slice_start; y < slice_end; y++) {
+ for (x = 0; x < width; x++) {
+ const int u = srcpU[x] - 128;
+ const int v = srcpV[x] - 128;
+ const int uvval = c2 * u + c3 * v + 1081344;
+ dstpY[x] = CB((65536 * (srcpY[x] - 16) + uvval) >> 16);
+ dstpU[x] = CB((c4 * u + c5 * v + 8421376) >> 16);
+ dstpV[x] = CB((c6 * u + c7 * v + 8421376) >> 16);
+ }
+ srcpY += src_pitchY;
+ dstpY += dst_pitchY;
+ srcpU += src_pitchUV;
+ srcpV += src_pitchUV;
+ dstpU += dst_pitchUV;
+ dstpV += dst_pitchUV;
+ }
+
+ return 0;
+}
+
+static int process_slice_yuv422p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ const ThreadData *td = arg;
+ const AVFrame *src = td->src;
+ AVFrame *dst = td->dst;
+ const int height = src->height;
+ const int width = src->width;
+ const int slice_start = (height * jobnr ) / nb_jobs;
+ const int slice_end = (height * (jobnr+1)) / nb_jobs;
+ const int src_pitchY = src->linesize[0];
+ const int src_pitchUV = src->linesize[1];
+ const unsigned char *srcpU = src->data[1] + slice_start * src_pitchUV;
+ const unsigned char *srcpV = src->data[2] + slice_start * src_pitchUV;
+ const unsigned char *srcpY = src->data[0] + slice_start * src_pitchY;
+ const int dst_pitchY = dst->linesize[0];
+ const int dst_pitchUV = dst->linesize[1];
+ unsigned char *dstpU = dst->data[1] + slice_start * dst_pitchUV;
+ unsigned char *dstpV = dst->data[2] + slice_start * dst_pitchUV;
+ unsigned char *dstpY = dst->data[0] + slice_start * dst_pitchY;
+ const int c2 = td->c2;
+ const int c3 = td->c3;
+ const int c4 = td->c4;
+ const int c5 = td->c5;
+ const int c6 = td->c6;
+ const int c7 = td->c7;
+ int x, y;
+
+ for (y = slice_start; y < slice_end; y++) {
+ for (x = 0; x < width; x += 2) {
+ const int u = srcpU[x >> 1] - 128;
+ const int v = srcpV[x >> 1] - 128;
+ const int uvval = c2 * u + c3 * v + 1081344;
+ dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
+ dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
+ dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
+ dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
+ }
+ srcpY += src_pitchY;
+ dstpY += dst_pitchY;
+ srcpU += src_pitchUV;
+ srcpV += src_pitchUV;
+ dstpU += dst_pitchUV;
+ dstpV += dst_pitchUV;
+ }
+
+ return 0;
+}
+
+static int process_slice_yuv420p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ const ThreadData *td = arg;
+ const AVFrame *src = td->src;
+ AVFrame *dst = td->dst;
+ const int height = FFALIGN(src->height, 2) >> 1;
+ const int width = src->width;
+ const int slice_start = ((height * jobnr ) / nb_jobs) << 1;
+ const int slice_end = ((height * (jobnr+1)) / nb_jobs) << 1;
+ const int src_pitchY = src->linesize[0];
+ const int src_pitchUV = src->linesize[1];
+ const int dst_pitchY = dst->linesize[0];
+ const int dst_pitchUV = dst->linesize[1];
+ const unsigned char *srcpY = src->data[0] + src_pitchY * slice_start;
+ const unsigned char *srcpU = src->data[1] + src_pitchUV * (slice_start >> 1);
+ const unsigned char *srcpV = src->data[2] + src_pitchUV * (slice_start >> 1);
+ const unsigned char *srcpN = src->data[0] + src_pitchY * (slice_start + 1);
+ unsigned char *dstpU = dst->data[1] + dst_pitchUV * (slice_start >> 1);
+ unsigned char *dstpV = dst->data[2] + dst_pitchUV * (slice_start >> 1);
+ unsigned char *dstpY = dst->data[0] + dst_pitchY * slice_start;
+ unsigned char *dstpN = dst->data[0] + dst_pitchY * (slice_start + 1);
+ const int c2 = td->c2;
+ const int c3 = td->c3;
+ const int c4 = td->c4;
+ const int c5 = td->c5;
+ const int c6 = td->c6;
+ const int c7 = td->c7;
+ int x, y;
+
+ for (y = slice_start; y < slice_end; y += 2) {
+ for (x = 0; x < width; x += 2) {
+ const int u = srcpU[x >> 1] - 128;
+ const int v = srcpV[x >> 1] - 128;
+ const int uvval = c2 * u + c3 * v + 1081344;
+ dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
+ dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
+ dstpN[x + 0] = CB((65536 * (srcpN[x + 0] - 16) + uvval) >> 16);
+ dstpN[x + 1] = CB((65536 * (srcpN[x + 1] - 16) + uvval) >> 16);
+ dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
+ dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
+ }
+ srcpY += src_pitchY << 1;
+ dstpY += dst_pitchY << 1;
+ srcpN += src_pitchY << 1;
+ dstpN += dst_pitchY << 1;
+ srcpU += src_pitchUV;
+ srcpV += src_pitchUV;
+ dstpU += dst_pitchUV;
+ dstpV += dst_pitchUV;
+ }
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ColorMatrixContext *color = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+
+ color->hsub = pix_desc->log2_chroma_w;
+ color->vsub = pix_desc->log2_chroma_h;
+
+ av_log(ctx, AV_LOG_VERBOSE, "%s -> %s\n",
+ color_modes[color->source], color_modes[color->dest]);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_UYVY422,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int filter_frame(AVFilterLink *link, AVFrame *in)
+{
+ AVFilterContext *ctx = link->dst;
+ ColorMatrixContext *color = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ ThreadData td = {0};
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ if (color->source == COLOR_MODE_NONE) {
+ enum AVColorSpace cs = av_frame_get_colorspace(in);
+ enum ColorMode source;
+
+ switch(cs) {
+ case AVCOL_SPC_BT709 : source = COLOR_MODE_BT709 ; break;
+ case AVCOL_SPC_FCC : source = COLOR_MODE_FCC ; break;
+ case AVCOL_SPC_SMPTE240M : source = COLOR_MODE_SMPTE240M ; break;
+ case AVCOL_SPC_BT470BG : source = COLOR_MODE_BT601 ; break;
+ case AVCOL_SPC_SMPTE170M : source = COLOR_MODE_BT601 ; break;
+ case AVCOL_SPC_BT2020_NCL: source = COLOR_MODE_BT2020 ; break;
+ case AVCOL_SPC_BT2020_CL : source = COLOR_MODE_BT2020 ; break;
+ default :
+ av_log(ctx, AV_LOG_ERROR, "Input frame does not specify a supported colorspace, and none has been specified as source either\n");
+ av_frame_free(&out);
+ return AVERROR(EINVAL);
+ }
+ color->mode = source * 5 + color->dest;
+ } else
+ color->mode = color->source * 5 + color->dest;
+
+ switch(color->dest) {
+ case COLOR_MODE_BT709 : av_frame_set_colorspace(out, AVCOL_SPC_BT709) ; break;
+ case COLOR_MODE_FCC : av_frame_set_colorspace(out, AVCOL_SPC_FCC) ; break;
+ case COLOR_MODE_SMPTE240M: av_frame_set_colorspace(out, AVCOL_SPC_SMPTE240M) ; break;
+ case COLOR_MODE_BT601 : av_frame_set_colorspace(out, AVCOL_SPC_BT470BG) ; break;
+ case COLOR_MODE_BT2020 : av_frame_set_colorspace(out, AVCOL_SPC_BT2020_NCL); break;
+ }
+
+ td.src = in;
+ td.dst = out;
+ td.c2 = color->yuv_convert[color->mode][0][1];
+ td.c3 = color->yuv_convert[color->mode][0][2];
+ td.c4 = color->yuv_convert[color->mode][1][1];
+ td.c5 = color->yuv_convert[color->mode][1][2];
+ td.c6 = color->yuv_convert[color->mode][2][1];
+ td.c7 = color->yuv_convert[color->mode][2][2];
+
+ if (in->format == AV_PIX_FMT_YUV444P)
+ ctx->internal->execute(ctx, process_slice_yuv444p, &td, NULL,
+ FFMIN(in->height, ff_filter_get_nb_threads(ctx)));
+ else if (in->format == AV_PIX_FMT_YUV422P)
+ ctx->internal->execute(ctx, process_slice_yuv422p, &td, NULL,
+ FFMIN(in->height, ff_filter_get_nb_threads(ctx)));
+ else if (in->format == AV_PIX_FMT_YUV420P)
+ ctx->internal->execute(ctx, process_slice_yuv420p, &td, NULL,
+ FFMIN(in->height / 2, ff_filter_get_nb_threads(ctx)));
+ else
+ ctx->internal->execute(ctx, process_slice_uyvy422, &td, NULL,
+ FFMIN(in->height, ff_filter_get_nb_threads(ctx)));
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad colormatrix_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad colormatrix_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_colormatrix = {
+ .name = "colormatrix",
+ .description = NULL_IF_CONFIG_SMALL("Convert color matrix."),
+ .priv_size = sizeof(ColorMatrixContext),
+ .init = init,
+ .query_formats = query_formats,
+ .inputs = colormatrix_inputs,
+ .outputs = colormatrix_outputs,
+ .priv_class = &colormatrix_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_colorspace.c b/libavfilter/vf_colorspace.c
new file mode 100644
index 0000000000..0024505a44
--- /dev/null
+++ b/libavfilter/vf_colorspace.c
@@ -0,0 +1,1197 @@
+/*
+ * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * @file
+ * Convert between colorspaces.
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/pixfmt.h"
+
+#include "avfilter.h"
+#include "colorspacedsp.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+enum DitherMode {
+ DITHER_NONE,
+ DITHER_FSB,
+ DITHER_NB,
+};
+
+enum Colorspace {
+ CS_UNSPECIFIED,
+ CS_BT470M,
+ CS_BT470BG,
+ CS_BT601_6_525,
+ CS_BT601_6_625,
+ CS_BT709,
+ CS_SMPTE170M,
+ CS_SMPTE240M,
+ CS_BT2020,
+ CS_NB,
+};
+
+enum Whitepoint {
+ WP_D65,
+ WP_C,
+ WP_DCI,
+ WP_NB,
+};
+
+enum WhitepointAdaptation {
+ WP_ADAPT_BRADFORD,
+ WP_ADAPT_VON_KRIES,
+ NB_WP_ADAPT_NON_IDENTITY,
+ WP_ADAPT_IDENTITY = NB_WP_ADAPT_NON_IDENTITY,
+ NB_WP_ADAPT,
+};
+
+static const enum AVColorTransferCharacteristic default_trc[CS_NB + 1] = {
+ [CS_UNSPECIFIED] = AVCOL_TRC_UNSPECIFIED,
+ [CS_BT470M] = AVCOL_TRC_GAMMA22,
+ [CS_BT470BG] = AVCOL_TRC_GAMMA28,
+ [CS_BT601_6_525] = AVCOL_TRC_SMPTE170M,
+ [CS_BT601_6_625] = AVCOL_TRC_SMPTE170M,
+ [CS_BT709] = AVCOL_TRC_BT709,
+ [CS_SMPTE170M] = AVCOL_TRC_SMPTE170M,
+ [CS_SMPTE240M] = AVCOL_TRC_SMPTE240M,
+ [CS_BT2020] = AVCOL_TRC_BT2020_10,
+ [CS_NB] = AVCOL_TRC_UNSPECIFIED,
+};
+
+static const enum AVColorPrimaries default_prm[CS_NB + 1] = {
+ [CS_UNSPECIFIED] = AVCOL_PRI_UNSPECIFIED,
+ [CS_BT470M] = AVCOL_PRI_BT470M,
+ [CS_BT470BG] = AVCOL_PRI_BT470BG,
+ [CS_BT601_6_525] = AVCOL_PRI_SMPTE170M,
+ [CS_BT601_6_625] = AVCOL_PRI_BT470BG,
+ [CS_BT709] = AVCOL_PRI_BT709,
+ [CS_SMPTE170M] = AVCOL_PRI_SMPTE170M,
+ [CS_SMPTE240M] = AVCOL_PRI_SMPTE240M,
+ [CS_BT2020] = AVCOL_PRI_BT2020,
+ [CS_NB] = AVCOL_PRI_UNSPECIFIED,
+};
+
+static const enum AVColorSpace default_csp[CS_NB + 1] = {
+ [CS_UNSPECIFIED] = AVCOL_SPC_UNSPECIFIED,
+ [CS_BT470M] = AVCOL_SPC_SMPTE170M,
+ [CS_BT470BG] = AVCOL_SPC_BT470BG,
+ [CS_BT601_6_525] = AVCOL_SPC_SMPTE170M,
+ [CS_BT601_6_625] = AVCOL_SPC_BT470BG,
+ [CS_BT709] = AVCOL_SPC_BT709,
+ [CS_SMPTE170M] = AVCOL_SPC_SMPTE170M,
+ [CS_SMPTE240M] = AVCOL_SPC_SMPTE240M,
+ [CS_BT2020] = AVCOL_SPC_BT2020_NCL,
+ [CS_NB] = AVCOL_SPC_UNSPECIFIED,
+};
+
+struct ColorPrimaries {
+ enum Whitepoint wp;
+ double xr, yr, xg, yg, xb, yb;
+};
+
+struct TransferCharacteristics {
+ double alpha, beta, gamma, delta;
+};
+
+struct LumaCoefficients {
+ double cr, cg, cb;
+};
+
+struct WhitepointCoefficients {
+ double xw, yw;
+};
+
+typedef struct ColorSpaceContext {
+ const AVClass *class;
+
+ ColorSpaceDSPContext dsp;
+
+ enum Colorspace user_all, user_iall;
+ enum AVColorSpace in_csp, out_csp, user_csp, user_icsp;
+ enum AVColorRange in_rng, out_rng, user_rng, user_irng;
+ enum AVColorTransferCharacteristic in_trc, out_trc, user_trc, user_itrc;
+ enum AVColorPrimaries in_prm, out_prm, user_prm, user_iprm;
+ enum AVPixelFormat in_format, user_format;
+ int fast_mode;
+ enum DitherMode dither;
+ enum WhitepointAdaptation wp_adapt;
+
+ int16_t *rgb[3];
+ ptrdiff_t rgb_stride;
+ unsigned rgb_sz;
+ int *dither_scratch[3][2], *dither_scratch_base[3][2];
+
+ const struct ColorPrimaries *in_primaries, *out_primaries;
+ int lrgb2lrgb_passthrough;
+ DECLARE_ALIGNED(16, int16_t, lrgb2lrgb_coeffs)[3][3][8];
+
+ const struct TransferCharacteristics *in_txchr, *out_txchr;
+ int rgb2rgb_passthrough;
+ int16_t *lin_lut, *delin_lut;
+
+ const struct LumaCoefficients *in_lumacoef, *out_lumacoef;
+ int yuv2yuv_passthrough, yuv2yuv_fastmode;
+ DECLARE_ALIGNED(16, int16_t, yuv2rgb_coeffs)[3][3][8];
+ DECLARE_ALIGNED(16, int16_t, rgb2yuv_coeffs)[3][3][8];
+ DECLARE_ALIGNED(16, int16_t, yuv2yuv_coeffs)[3][3][8];
+ DECLARE_ALIGNED(16, int16_t, yuv_offset)[2 /* in, out */][8];
+ yuv2rgb_fn yuv2rgb;
+ rgb2yuv_fn rgb2yuv;
+ rgb2yuv_fsb_fn rgb2yuv_fsb;
+ yuv2yuv_fn yuv2yuv;
+ double yuv2rgb_dbl_coeffs[3][3], rgb2yuv_dbl_coeffs[3][3];
+ int in_y_rng, in_uv_rng, out_y_rng, out_uv_rng;
+
+ int did_warn_range;
+} ColorSpaceContext;
+
+// FIXME deal with odd width/heights
+// FIXME faster linearize/delinearize implementation (integer pow)
+// FIXME bt2020cl support (linearization between yuv/rgb step instead of between rgb/xyz)
+// FIXME test that the values in (de)lin_lut don't exceed their container storage
+// type size (only useful if we keep the LUT and don't move to fast integer pow)
+// FIXME dithering if bitdepth goes down?
+// FIXME bitexact for fate integration?
+
+static const double ycgco_matrix[3][3] =
+{
+ { 0.25, 0.5, 0.25 },
+ { -0.25, 0.5, -0.25 },
+ { 0.5, 0, -0.5 },
+};
+
+/*
+ * All constants explained in e.g. https://linuxtv.org/downloads/v4l-dvb-apis/ch02s06.html
+ * The older ones (bt470bg/m) are also explained in their respective ITU docs
+ * (e.g. https://www.itu.int/dms_pubrec/itu-r/rec/bt/R-REC-BT.470-5-199802-S!!PDF-E.pdf)
+ * whereas the newer ones can typically be copied directly from wikipedia :)
+ */
+static const struct LumaCoefficients luma_coefficients[AVCOL_SPC_NB] = {
+ [AVCOL_SPC_FCC] = { 0.30, 0.59, 0.11 },
+ [AVCOL_SPC_BT470BG] = { 0.299, 0.587, 0.114 },
+ [AVCOL_SPC_SMPTE170M] = { 0.299, 0.587, 0.114 },
+ [AVCOL_SPC_BT709] = { 0.2126, 0.7152, 0.0722 },
+ [AVCOL_SPC_SMPTE240M] = { 0.212, 0.701, 0.087 },
+ [AVCOL_SPC_YCOCG] = { 0.25, 0.5, 0.25 },
+ [AVCOL_SPC_BT2020_NCL] = { 0.2627, 0.6780, 0.0593 },
+ [AVCOL_SPC_BT2020_CL] = { 0.2627, 0.6780, 0.0593 },
+};
+
+static const struct LumaCoefficients *get_luma_coefficients(enum AVColorSpace csp)
+{
+ const struct LumaCoefficients *coeffs;
+
+ if (csp >= AVCOL_SPC_NB)
+ return NULL;
+ coeffs = &luma_coefficients[csp];
+ if (!coeffs->cr)
+ return NULL;
+
+ return coeffs;
+}
+
+static void fill_rgb2yuv_table(const struct LumaCoefficients *coeffs,
+ double rgb2yuv[3][3])
+{
+ double bscale, rscale;
+
+ // special ycgco matrix
+ if (coeffs->cr == 0.25 && coeffs->cg == 0.5 && coeffs->cb == 0.25) {
+ memcpy(rgb2yuv, ycgco_matrix, sizeof(double) * 9);
+ return;
+ }
+
+ rgb2yuv[0][0] = coeffs->cr;
+ rgb2yuv[0][1] = coeffs->cg;
+ rgb2yuv[0][2] = coeffs->cb;
+ bscale = 0.5 / (coeffs->cb - 1.0);
+ rscale = 0.5 / (coeffs->cr - 1.0);
+ rgb2yuv[1][0] = bscale * coeffs->cr;
+ rgb2yuv[1][1] = bscale * coeffs->cg;
+ rgb2yuv[1][2] = 0.5;
+ rgb2yuv[2][0] = 0.5;
+ rgb2yuv[2][1] = rscale * coeffs->cg;
+ rgb2yuv[2][2] = rscale * coeffs->cb;
+}
+
+// FIXME I'm pretty sure gamma22/28 also have a linear toe slope, but I can't
+// find any actual tables that document their real values...
+// See http://www.13thmonkey.org/~boris/gammacorrection/ first graph why it matters
+static const struct TransferCharacteristics transfer_characteristics[AVCOL_TRC_NB] = {
+ [AVCOL_TRC_BT709] = { 1.099, 0.018, 0.45, 4.5 },
+ [AVCOL_TRC_GAMMA22] = { 1.0, 0.0, 1.0 / 2.2, 0.0 },
+ [AVCOL_TRC_GAMMA28] = { 1.0, 0.0, 1.0 / 2.8, 0.0 },
+ [AVCOL_TRC_SMPTE170M] = { 1.099, 0.018, 0.45, 4.5 },
+ [AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
+ [AVCOL_TRC_IEC61966_2_1] = { 1.055, 0.0031308, 1.0 / 2.4, 12.92 },
+ [AVCOL_TRC_IEC61966_2_4] = { 1.099, 0.018, 0.45, 4.5 },
+ [AVCOL_TRC_BT2020_10] = { 1.099, 0.018, 0.45, 4.5 },
+ [AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
+};
+
+static const struct TransferCharacteristics *
+ get_transfer_characteristics(enum AVColorTransferCharacteristic trc)
+{
+ const struct TransferCharacteristics *coeffs;
+
+ if (trc >= AVCOL_TRC_NB)
+ return NULL;
+ coeffs = &transfer_characteristics[trc];
+ if (!coeffs->alpha)
+ return NULL;
+
+ return coeffs;
+}
+
+static const struct WhitepointCoefficients whitepoint_coefficients[WP_NB] = {
+ [WP_D65] = { 0.3127, 0.3290 },
+ [WP_C] = { 0.3100, 0.3160 },
+ [WP_DCI] = { 0.3140, 0.3510 },
+};
+
+static const struct ColorPrimaries color_primaries[AVCOL_PRI_NB] = {
+ [AVCOL_PRI_BT709] = { WP_D65, 0.640, 0.330, 0.300, 0.600, 0.150, 0.060 },
+ [AVCOL_PRI_BT470M] = { WP_C, 0.670, 0.330, 0.210, 0.710, 0.140, 0.080 },
+ [AVCOL_PRI_BT470BG] = { WP_D65, 0.640, 0.330, 0.290, 0.600, 0.150, 0.060,},
+ [AVCOL_PRI_SMPTE170M] = { WP_D65, 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 },
+ [AVCOL_PRI_SMPTE240M] = { WP_D65, 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 },
+ [AVCOL_PRI_SMPTE431] = { WP_DCI, 0.680, 0.320, 0.265, 0.690, 0.150, 0.060 },
+ [AVCOL_PRI_SMPTE432] = { WP_D65, 0.680, 0.320, 0.265, 0.690, 0.150, 0.060 },
+ [AVCOL_PRI_FILM] = { WP_C, 0.681, 0.319, 0.243, 0.692, 0.145, 0.049 },
+ [AVCOL_PRI_BT2020] = { WP_D65, 0.708, 0.292, 0.170, 0.797, 0.131, 0.046 },
+};
+
+static const struct ColorPrimaries *get_color_primaries(enum AVColorPrimaries prm)
+{
+ const struct ColorPrimaries *coeffs;
+
+ if (prm >= AVCOL_PRI_NB)
+ return NULL;
+ coeffs = &color_primaries[prm];
+ if (!coeffs->xr)
+ return NULL;
+
+ return coeffs;
+}
+
+static void invert_matrix3x3(const double in[3][3], double out[3][3])
+{
+ double m00 = in[0][0], m01 = in[0][1], m02 = in[0][2],
+ m10 = in[1][0], m11 = in[1][1], m12 = in[1][2],
+ m20 = in[2][0], m21 = in[2][1], m22 = in[2][2];
+ int i, j;
+ double det;
+
+ out[0][0] = (m11 * m22 - m21 * m12);
+ out[0][1] = -(m01 * m22 - m21 * m02);
+ out[0][2] = (m01 * m12 - m11 * m02);
+ out[1][0] = -(m10 * m22 - m20 * m12);
+ out[1][1] = (m00 * m22 - m20 * m02);
+ out[1][2] = -(m00 * m12 - m10 * m02);
+ out[2][0] = (m10 * m21 - m20 * m11);
+ out[2][1] = -(m00 * m21 - m20 * m01);
+ out[2][2] = (m00 * m11 - m10 * m01);
+
+ det = m00 * out[0][0] + m10 * out[0][1] + m20 * out[0][2];
+ det = 1.0 / det;
+
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < 3; j++)
+ out[i][j] *= det;
+ }
+}
+
+static int fill_gamma_table(ColorSpaceContext *s)
+{
+ int n;
+ double in_alpha = s->in_txchr->alpha, in_beta = s->in_txchr->beta;
+ double in_gamma = s->in_txchr->gamma, in_delta = s->in_txchr->delta;
+ double in_ialpha = 1.0 / in_alpha, in_igamma = 1.0 / in_gamma, in_idelta = 1.0 / in_delta;
+ double out_alpha = s->out_txchr->alpha, out_beta = s->out_txchr->beta;
+ double out_gamma = s->out_txchr->gamma, out_delta = s->out_txchr->delta;
+
+ s->lin_lut = av_malloc(sizeof(*s->lin_lut) * 32768 * 2);
+ if (!s->lin_lut)
+ return AVERROR(ENOMEM);
+ s->delin_lut = &s->lin_lut[32768];
+ for (n = 0; n < 32768; n++) {
+ double v = (n - 2048.0) / 28672.0, d, l;
+
+ // delinearize
+ if (v <= -out_beta) {
+ d = -out_alpha * pow(-v, out_gamma) + (out_alpha - 1.0);
+ } else if (v < out_beta) {
+ d = out_delta * v;
+ } else {
+ d = out_alpha * pow(v, out_gamma) - (out_alpha - 1.0);
+ }
+ s->delin_lut[n] = av_clip_int16(lrint(d * 28672.0));
+
+ // linearize
+ if (v <= -in_beta) {
+ l = -pow((1.0 - in_alpha - v) * in_ialpha, in_igamma);
+ } else if (v < in_beta) {
+ l = v * in_idelta;
+ } else {
+ l = pow((v + in_alpha - 1.0) * in_ialpha, in_igamma);
+ }
+ s->lin_lut[n] = av_clip_int16(lrint(l * 28672.0));
+ }
+
+ return 0;
+}
+
+/*
+ * see e.g. http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
+ */
+static void fill_rgb2xyz_table(const struct ColorPrimaries *coeffs,
+ double rgb2xyz[3][3])
+{
+ const struct WhitepointCoefficients *wp = &whitepoint_coefficients[coeffs->wp];
+ double i[3][3], sr, sg, sb, zw;
+
+ rgb2xyz[0][0] = coeffs->xr / coeffs->yr;
+ rgb2xyz[0][1] = coeffs->xg / coeffs->yg;
+ rgb2xyz[0][2] = coeffs->xb / coeffs->yb;
+ rgb2xyz[1][0] = rgb2xyz[1][1] = rgb2xyz[1][2] = 1.0;
+ rgb2xyz[2][0] = (1.0 - coeffs->xr - coeffs->yr) / coeffs->yr;
+ rgb2xyz[2][1] = (1.0 - coeffs->xg - coeffs->yg) / coeffs->yg;
+ rgb2xyz[2][2] = (1.0 - coeffs->xb - coeffs->yb) / coeffs->yb;
+ invert_matrix3x3(rgb2xyz, i);
+ zw = 1.0 - wp->xw - wp->yw;
+ sr = i[0][0] * wp->xw + i[0][1] * wp->yw + i[0][2] * zw;
+ sg = i[1][0] * wp->xw + i[1][1] * wp->yw + i[1][2] * zw;
+ sb = i[2][0] * wp->xw + i[2][1] * wp->yw + i[2][2] * zw;
+ rgb2xyz[0][0] *= sr;
+ rgb2xyz[0][1] *= sg;
+ rgb2xyz[0][2] *= sb;
+ rgb2xyz[1][0] *= sr;
+ rgb2xyz[1][1] *= sg;
+ rgb2xyz[1][2] *= sb;
+ rgb2xyz[2][0] *= sr;
+ rgb2xyz[2][1] *= sg;
+ rgb2xyz[2][2] *= sb;
+}
+
+static void mul3x3(double dst[3][3], const double src1[3][3], const double src2[3][3])
+{
+ int m, n;
+
+ for (m = 0; m < 3; m++)
+ for (n = 0; n < 3; n++)
+ dst[m][n] = src2[m][0] * src1[0][n] +
+ src2[m][1] * src1[1][n] +
+ src2[m][2] * src1[2][n];
+}
+
+/*
+ * See http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html
+ * This function uses the Bradford mechanism.
+ */
+static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt,
+ enum Whitepoint src, enum Whitepoint dst)
+{
+ static const double ma_tbl[NB_WP_ADAPT_NON_IDENTITY][3][3] = {
+ [WP_ADAPT_BRADFORD] = {
+ { 0.8951, 0.2664, -0.1614 },
+ { -0.7502, 1.7135, 0.0367 },
+ { 0.0389, -0.0685, 1.0296 },
+ }, [WP_ADAPT_VON_KRIES] = {
+ { 0.40024, 0.70760, -0.08081 },
+ { -0.22630, 1.16532, 0.04570 },
+ { 0.00000, 0.00000, 0.91822 },
+ },
+ };
+ const double (*ma)[3] = ma_tbl[wp_adapt];
+ const struct WhitepointCoefficients *wp_src = &whitepoint_coefficients[src];
+ double zw_src = 1.0 - wp_src->xw - wp_src->yw;
+ const struct WhitepointCoefficients *wp_dst = &whitepoint_coefficients[dst];
+ double zw_dst = 1.0 - wp_dst->xw - wp_dst->yw;
+ double mai[3][3], fac[3][3], tmp[3][3];
+ double rs, gs, bs, rd, gd, bd;
+
+ invert_matrix3x3(ma, mai);
+ rs = ma[0][0] * wp_src->xw + ma[0][1] * wp_src->yw + ma[0][2] * zw_src;
+ gs = ma[1][0] * wp_src->xw + ma[1][1] * wp_src->yw + ma[1][2] * zw_src;
+ bs = ma[2][0] * wp_src->xw + ma[2][1] * wp_src->yw + ma[2][2] * zw_src;
+ rd = ma[0][0] * wp_dst->xw + ma[0][1] * wp_dst->yw + ma[0][2] * zw_dst;
+ gd = ma[1][0] * wp_dst->xw + ma[1][1] * wp_dst->yw + ma[1][2] * zw_dst;
+ bd = ma[2][0] * wp_dst->xw + ma[2][1] * wp_dst->yw + ma[2][2] * zw_dst;
+ fac[0][0] = rd / rs;
+ fac[1][1] = gd / gs;
+ fac[2][2] = bd / bs;
+ fac[0][1] = fac[0][2] = fac[1][0] = fac[1][2] = fac[2][0] = fac[2][1] = 0.0;
+ mul3x3(tmp, ma, fac);
+ mul3x3(out, tmp, mai);
+}
+
+static void apply_lut(int16_t *buf[3], ptrdiff_t stride,
+ int w, int h, const int16_t *lut)
+{
+ int y, x, n;
+
+ for (n = 0; n < 3; n++) {
+ int16_t *data = buf[n];
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++)
+ data[x] = lut[av_clip_uintp2(2048 + data[x], 15)];
+
+ data += stride;
+ }
+ }
+}
+
+struct ThreadData {
+ AVFrame *in, *out;
+ ptrdiff_t in_linesize[3], out_linesize[3];
+ int in_ss_h, out_ss_h;
+};
+
+static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
+{
+ struct ThreadData *td = data;
+ ColorSpaceContext *s = ctx->priv;
+ uint8_t *in_data[3], *out_data[3];
+ int16_t *rgb[3];
+ int h_in = (td->in->height + 1) >> 1;
+ int h1 = 2 * (job_nr * h_in / n_jobs), h2 = 2 * ((job_nr + 1) * h_in / n_jobs);
+ int w = td->in->width, h = h2 - h1;
+
+ in_data[0] = td->in->data[0] + td->in_linesize[0] * h1;
+ in_data[1] = td->in->data[1] + td->in_linesize[1] * (h1 >> td->in_ss_h);
+ in_data[2] = td->in->data[2] + td->in_linesize[2] * (h1 >> td->in_ss_h);
+ out_data[0] = td->out->data[0] + td->out_linesize[0] * h1;
+ out_data[1] = td->out->data[1] + td->out_linesize[1] * (h1 >> td->out_ss_h);
+ out_data[2] = td->out->data[2] + td->out_linesize[2] * (h1 >> td->out_ss_h);
+ rgb[0] = s->rgb[0] + s->rgb_stride * h1;
+ rgb[1] = s->rgb[1] + s->rgb_stride * h1;
+ rgb[2] = s->rgb[2] + s->rgb_stride * h1;
+
+ // FIXME for simd, also make sure we do pictures with negative stride
+ // top-down so we don't overwrite lines with padding of data before it
+ // in the same buffer (same as swscale)
+
+ if (s->yuv2yuv_fastmode) {
+ // FIXME possibly use a fast mode in case only the y range changes?
+ // since in that case, only the diagonal entries in yuv2yuv_coeffs[]
+ // are non-zero
+ s->yuv2yuv(out_data, td->out_linesize, in_data, td->in_linesize, w, h,
+ s->yuv2yuv_coeffs, s->yuv_offset);
+ } else {
+ // FIXME maybe (for caching effciency) do pipeline per-line instead of
+ // full buffer per function? (Or, since yuv2rgb requires 2 lines: per
+ // 2 lines, for yuv420.)
+ /*
+ * General design:
+ * - yuv2rgb converts from whatever range the input was ([16-235/240] or
+ * [0,255] or the 10/12bpp equivalents thereof) to an integer version
+ * of RGB in psuedo-restricted 15+sign bits. That means that the float
+ * range [0.0,1.0] is in [0,28762], and the remainder of the int16_t
+ * range is used for overflow/underflow outside the representable
+ * range of this RGB type. rgb2yuv is the exact opposite.
+ * - gamma correction is done using a LUT since that appears to work
+ * fairly fast.
+ * - If the input is chroma-subsampled (420/422), the yuv2rgb conversion
+ * (or rgb2yuv conversion) uses nearest-neighbour sampling to read
+ * read chroma pixels at luma resolution. If you want some more fancy
+ * filter, you can use swscale to convert to yuv444p.
+ * - all coefficients are 14bit (so in the [-2.0,2.0] range).
+ */
+ s->yuv2rgb(rgb, s->rgb_stride, in_data, td->in_linesize, w, h,
+ s->yuv2rgb_coeffs, s->yuv_offset[0]);
+ if (!s->rgb2rgb_passthrough) {
+ apply_lut(rgb, s->rgb_stride, w, h, s->lin_lut);
+ if (!s->lrgb2lrgb_passthrough)
+ s->dsp.multiply3x3(rgb, s->rgb_stride, w, h, s->lrgb2lrgb_coeffs);
+ apply_lut(rgb, s->rgb_stride, w, h, s->delin_lut);
+ }
+ if (s->dither == DITHER_FSB) {
+ s->rgb2yuv_fsb(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
+ s->rgb2yuv_coeffs, s->yuv_offset[1], s->dither_scratch);
+ } else {
+ s->rgb2yuv(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
+ s->rgb2yuv_coeffs, s->yuv_offset[1]);
+ }
+ }
+
+ return 0;
+}
+
+static int get_range_off(AVFilterContext *ctx, int *off,
+ int *y_rng, int *uv_rng,
+ enum AVColorRange rng, int depth)
+{
+ switch (rng) {
+ case AVCOL_RANGE_UNSPECIFIED: {
+ ColorSpaceContext *s = ctx->priv;
+
+ if (!s->did_warn_range) {
+ av_log(ctx, AV_LOG_WARNING, "Input range not set, assuming tv/mpeg\n");
+ s->did_warn_range = 1;
+ }
+ }
+ // fall-through
+ case AVCOL_RANGE_MPEG:
+ *off = 16 << (depth - 8);
+ *y_rng = 219 << (depth - 8);
+ *uv_rng = 224 << (depth - 8);
+ break;
+ case AVCOL_RANGE_JPEG:
+ *off = 0;
+ *y_rng = *uv_rng = (256 << (depth - 8)) - 1;
+ break;
+ default:
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+static int create_filtergraph(AVFilterContext *ctx,
+ const AVFrame *in, const AVFrame *out)
+{
+ ColorSpaceContext *s = ctx->priv;
+ const AVPixFmtDescriptor *in_desc = av_pix_fmt_desc_get(in->format);
+ const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(out->format);
+ int emms = 0, m, n, o, res, fmt_identical, redo_yuv2rgb = 0, redo_rgb2yuv = 0;
+
+#define supported_depth(d) ((d) == 8 || (d) == 10 || (d) == 12)
+#define supported_subsampling(lcw, lch) \
+ (((lcw) == 0 && (lch) == 0) || ((lcw) == 1 && (lch) == 0) || ((lcw) == 1 && (lch) == 1))
+#define supported_format(d) \
+ ((d) != NULL && (d)->nb_components == 3 && \
+ !((d)->flags & AV_PIX_FMT_FLAG_RGB) && \
+ supported_depth((d)->comp[0].depth) && \
+ supported_subsampling((d)->log2_chroma_w, (d)->log2_chroma_h))
+
+ if (!supported_format(in_desc)) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Unsupported input format %d (%s) or bitdepth (%d)\n",
+ in->format, av_get_pix_fmt_name(in->format),
+ in_desc ? in_desc->comp[0].depth : -1);
+ return AVERROR(EINVAL);
+ }
+ if (!supported_format(out_desc)) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Unsupported output format %d (%s) or bitdepth (%d)\n",
+ out->format, av_get_pix_fmt_name(out->format),
+ out_desc ? out_desc->comp[0].depth : -1);
+ return AVERROR(EINVAL);
+ }
+
+ if (in->color_primaries != s->in_prm) s->in_primaries = NULL;
+ if (out->color_primaries != s->out_prm) s->out_primaries = NULL;
+ if (in->color_trc != s->in_trc) s->in_txchr = NULL;
+ if (out->color_trc != s->out_trc) s->out_txchr = NULL;
+ if (in->colorspace != s->in_csp ||
+ in->color_range != s->in_rng) s->in_lumacoef = NULL;
+ if (out->colorspace != s->out_csp ||
+ out->color_range != s->out_rng) s->out_lumacoef = NULL;
+
+ if (!s->out_primaries || !s->in_primaries) {
+ s->in_prm = in->color_primaries;
+ if (s->user_iall != CS_UNSPECIFIED)
+ s->in_prm = default_prm[FFMIN(s->user_iall, CS_NB)];
+ if (s->user_iprm != AVCOL_PRI_UNSPECIFIED)
+ s->in_prm = s->user_iprm;
+ s->in_primaries = get_color_primaries(s->in_prm);
+ if (!s->in_primaries) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Unsupported input primaries %d (%s)\n",
+ s->in_prm, av_color_primaries_name(s->in_prm));
+ return AVERROR(EINVAL);
+ }
+ s->out_prm = out->color_primaries;
+ s->out_primaries = get_color_primaries(s->out_prm);
+ if (!s->out_primaries) {
+ if (s->out_prm == AVCOL_PRI_UNSPECIFIED) {
+ if (s->user_all == CS_UNSPECIFIED) {
+ av_log(ctx, AV_LOG_ERROR, "Please specify output primaries\n");
+ } else {
+ av_log(ctx, AV_LOG_ERROR,
+ "Unsupported output color property %d\n", s->user_all);
+ }
+ } else {
+ av_log(ctx, AV_LOG_ERROR,
+ "Unsupported output primaries %d (%s)\n",
+ s->out_prm, av_color_primaries_name(s->out_prm));
+ }
+ return AVERROR(EINVAL);
+ }
+ s->lrgb2lrgb_passthrough = !memcmp(s->in_primaries, s->out_primaries,
+ sizeof(*s->in_primaries));
+ if (!s->lrgb2lrgb_passthrough) {
+ double rgb2xyz[3][3], xyz2rgb[3][3], rgb2rgb[3][3];
+
+ fill_rgb2xyz_table(s->out_primaries, rgb2xyz);
+ invert_matrix3x3(rgb2xyz, xyz2rgb);
+ fill_rgb2xyz_table(s->in_primaries, rgb2xyz);
+ if (s->out_primaries->wp != s->in_primaries->wp &&
+ s->wp_adapt != WP_ADAPT_IDENTITY) {
+ double wpconv[3][3], tmp[3][3];
+
+ fill_whitepoint_conv_table(wpconv, s->wp_adapt, s->in_primaries->wp,
+ s->out_primaries->wp);
+ mul3x3(tmp, rgb2xyz, wpconv);
+ mul3x3(rgb2rgb, tmp, xyz2rgb);
+ } else {
+ mul3x3(rgb2rgb, rgb2xyz, xyz2rgb);
+ }
+ for (m = 0; m < 3; m++)
+ for (n = 0; n < 3; n++) {
+ s->lrgb2lrgb_coeffs[m][n][0] = lrint(16384.0 * rgb2rgb[m][n]);
+ for (o = 1; o < 8; o++)
+ s->lrgb2lrgb_coeffs[m][n][o] = s->lrgb2lrgb_coeffs[m][n][0];
+ }
+
+ emms = 1;
+ }
+ }
+
+ if (!s->in_txchr) {
+ av_freep(&s->lin_lut);
+ s->in_trc = in->color_trc;
+ if (s->user_iall != CS_UNSPECIFIED)
+ s->in_trc = default_trc[FFMIN(s->user_iall, CS_NB)];
+ if (s->user_itrc != AVCOL_TRC_UNSPECIFIED)
+ s->in_trc = s->user_itrc;
+ s->in_txchr = get_transfer_characteristics(s->in_trc);
+ if (!s->in_txchr) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Unsupported input transfer characteristics %d (%s)\n",
+ s->in_trc, av_color_transfer_name(s->in_trc));
+ return AVERROR(EINVAL);
+ }
+ }
+
+ if (!s->out_txchr) {
+ av_freep(&s->lin_lut);
+ s->out_trc = out->color_trc;
+ s->out_txchr = get_transfer_characteristics(s->out_trc);
+ if (!s->out_txchr) {
+ if (s->out_trc == AVCOL_TRC_UNSPECIFIED) {
+ if (s->user_all == CS_UNSPECIFIED) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Please specify output transfer characteristics\n");
+ } else {
+ av_log(ctx, AV_LOG_ERROR,
+ "Unsupported output color property %d\n", s->user_all);
+ }
+ } else {
+ av_log(ctx, AV_LOG_ERROR,
+ "Unsupported output transfer characteristics %d (%s)\n",
+ s->out_trc, av_color_transfer_name(s->out_trc));
+ }
+ return AVERROR(EINVAL);
+ }
+ }
+
+ s->rgb2rgb_passthrough = s->fast_mode || (s->lrgb2lrgb_passthrough &&
+ !memcmp(s->in_txchr, s->out_txchr, sizeof(*s->in_txchr)));
+ if (!s->rgb2rgb_passthrough && !s->lin_lut) {
+ res = fill_gamma_table(s);
+ if (res < 0)
+ return res;
+ emms = 1;
+ }
+
+ if (!s->in_lumacoef) {
+ s->in_csp = in->colorspace;
+ if (s->user_iall != CS_UNSPECIFIED)
+ s->in_csp = default_csp[FFMIN(s->user_iall, CS_NB)];
+ if (s->user_icsp != AVCOL_SPC_UNSPECIFIED)
+ s->in_csp = s->user_icsp;
+ s->in_rng = in->color_range;
+ if (s->user_irng != AVCOL_RANGE_UNSPECIFIED)
+ s->in_rng = s->user_irng;
+ s->in_lumacoef = get_luma_coefficients(s->in_csp);
+ if (!s->in_lumacoef) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Unsupported input colorspace %d (%s)\n",
+ s->in_csp, av_color_space_name(s->in_csp));
+ return AVERROR(EINVAL);
+ }
+ redo_yuv2rgb = 1;
+ }
+
+ if (!s->out_lumacoef) {
+ s->out_csp = out->colorspace;
+ s->out_rng = out->color_range;
+ s->out_lumacoef = get_luma_coefficients(s->out_csp);
+ if (!s->out_lumacoef) {
+ if (s->out_csp == AVCOL_SPC_UNSPECIFIED) {
+ if (s->user_all == CS_UNSPECIFIED) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Please specify output transfer characteristics\n");
+ } else {
+ av_log(ctx, AV_LOG_ERROR,
+ "Unsupported output color property %d\n", s->user_all);
+ }
+ } else {
+ av_log(ctx, AV_LOG_ERROR,
+ "Unsupported output transfer characteristics %d (%s)\n",
+ s->out_csp, av_color_space_name(s->out_csp));
+ }
+ return AVERROR(EINVAL);
+ }
+ redo_rgb2yuv = 1;
+ }
+
+ fmt_identical = in_desc->log2_chroma_h == out_desc->log2_chroma_h &&
+ in_desc->log2_chroma_w == out_desc->log2_chroma_w;
+ s->yuv2yuv_fastmode = s->rgb2rgb_passthrough && fmt_identical;
+ s->yuv2yuv_passthrough = s->yuv2yuv_fastmode && s->in_rng == s->out_rng &&
+ !memcmp(s->in_lumacoef, s->out_lumacoef,
+ sizeof(*s->in_lumacoef)) &&
+ in_desc->comp[0].depth == out_desc->comp[0].depth;
+ if (!s->yuv2yuv_passthrough) {
+ if (redo_yuv2rgb) {
+ double rgb2yuv[3][3], (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
+ int off, bits, in_rng;
+
+ res = get_range_off(ctx, &off, &s->in_y_rng, &s->in_uv_rng,
+ s->in_rng, in_desc->comp[0].depth);
+ if (res < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Unsupported input color range %d (%s)\n",
+ s->in_rng, av_color_range_name(s->in_rng));
+ return res;
+ }
+ for (n = 0; n < 8; n++)
+ s->yuv_offset[0][n] = off;
+ fill_rgb2yuv_table(s->in_lumacoef, rgb2yuv);
+ invert_matrix3x3(rgb2yuv, yuv2rgb);
+ bits = 1 << (in_desc->comp[0].depth - 1);
+ for (n = 0; n < 3; n++) {
+ for (in_rng = s->in_y_rng, m = 0; m < 3; m++, in_rng = s->in_uv_rng) {
+ s->yuv2rgb_coeffs[n][m][0] = lrint(28672 * bits * yuv2rgb[n][m] / in_rng);
+ for (o = 1; o < 8; o++)
+ s->yuv2rgb_coeffs[n][m][o] = s->yuv2rgb_coeffs[n][m][0];
+ }
+ }
+ av_assert2(s->yuv2rgb_coeffs[0][1][0] == 0);
+ av_assert2(s->yuv2rgb_coeffs[2][2][0] == 0);
+ av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[1][0][0]);
+ av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[2][0][0]);
+ s->yuv2rgb = s->dsp.yuv2rgb[(in_desc->comp[0].depth - 8) >> 1]
+ [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
+ emms = 1;
+ }
+
+ if (redo_rgb2yuv) {
+ double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
+ int off, out_rng, bits;
+
+ res = get_range_off(ctx, &off, &s->out_y_rng, &s->out_uv_rng,
+ s->out_rng, out_desc->comp[0].depth);
+ if (res < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Unsupported output color range %d (%s)\n",
+ s->out_rng, av_color_range_name(s->out_rng));
+ return res;
+ }
+ for (n = 0; n < 8; n++)
+ s->yuv_offset[1][n] = off;
+ fill_rgb2yuv_table(s->out_lumacoef, rgb2yuv);
+ bits = 1 << (29 - out_desc->comp[0].depth);
+ for (out_rng = s->out_y_rng, n = 0; n < 3; n++, out_rng = s->out_uv_rng) {
+ for (m = 0; m < 3; m++) {
+ s->rgb2yuv_coeffs[n][m][0] = lrint(bits * out_rng * rgb2yuv[n][m] / 28672);
+ for (o = 1; o < 8; o++)
+ s->rgb2yuv_coeffs[n][m][o] = s->rgb2yuv_coeffs[n][m][0];
+ }
+ }
+ av_assert2(s->rgb2yuv_coeffs[1][2][0] == s->rgb2yuv_coeffs[2][0][0]);
+ s->rgb2yuv = s->dsp.rgb2yuv[(out_desc->comp[0].depth - 8) >> 1]
+ [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
+ s->rgb2yuv_fsb = s->dsp.rgb2yuv_fsb[(out_desc->comp[0].depth - 8) >> 1]
+ [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
+ emms = 1;
+ }
+
+ if (s->yuv2yuv_fastmode && (redo_yuv2rgb || redo_rgb2yuv)) {
+ int idepth = in_desc->comp[0].depth, odepth = out_desc->comp[0].depth;
+ double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
+ double (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
+ double yuv2yuv[3][3];
+ int in_rng, out_rng;
+
+ mul3x3(yuv2yuv, yuv2rgb, rgb2yuv);
+ for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
+ for (in_rng = s->in_y_rng, n = 0; n < 3; n++, in_rng = s->in_uv_rng) {
+ s->yuv2yuv_coeffs[m][n][0] =
+ lrint(16384 * yuv2yuv[m][n] * out_rng * (1 << idepth) /
+ (in_rng * (1 << odepth)));
+ for (o = 1; o < 8; o++)
+ s->yuv2yuv_coeffs[m][n][o] = s->yuv2yuv_coeffs[m][n][0];
+ }
+ }
+ av_assert2(s->yuv2yuv_coeffs[1][0][0] == 0);
+ av_assert2(s->yuv2yuv_coeffs[2][0][0] == 0);
+ s->yuv2yuv = s->dsp.yuv2yuv[(idepth - 8) >> 1][(odepth - 8) >> 1]
+ [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
+ }
+ }
+
+ if (emms)
+ emms_c();
+
+ return 0;
+}
+
+static int init(AVFilterContext *ctx)
+{
+ ColorSpaceContext *s = ctx->priv;
+
+ ff_colorspacedsp_init(&s->dsp);
+
+ return 0;
+}
+
+static void uninit(AVFilterContext *ctx)
+{
+ ColorSpaceContext *s = ctx->priv;
+
+ av_freep(&s->rgb[0]);
+ av_freep(&s->rgb[1]);
+ av_freep(&s->rgb[2]);
+ s->rgb_sz = 0;
+ av_freep(&s->dither_scratch_base[0][0]);
+ av_freep(&s->dither_scratch_base[0][1]);
+ av_freep(&s->dither_scratch_base[1][0]);
+ av_freep(&s->dither_scratch_base[1][1]);
+ av_freep(&s->dither_scratch_base[2][0]);
+ av_freep(&s->dither_scratch_base[2][1]);
+
+ av_freep(&s->lin_lut);
+}
+
+static int filter_frame(AVFilterLink *link, AVFrame *in)
+{
+ AVFilterContext *ctx = link->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ColorSpaceContext *s = ctx->priv;
+ // FIXME if yuv2yuv_passthrough, don't get a new buffer but use the
+ // input one if it is writable *OR* the actual literal values of in_*
+ // and out_* are identical (not just their respective properties)
+ AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ int res;
+ ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32);
+ unsigned rgb_sz = rgb_stride * in->height;
+ struct ThreadData td;
+
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ res = av_frame_copy_props(out, in);
+ if (res < 0) {
+ av_frame_free(&in);
+ return res;
+ }
+
+ out->color_primaries = s->user_prm == AVCOL_PRI_UNSPECIFIED ?
+ default_prm[FFMIN(s->user_all, CS_NB)] : s->user_prm;
+ if (s->user_trc == AVCOL_TRC_UNSPECIFIED) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(out->format);
+
+ out->color_trc = default_trc[FFMIN(s->user_all, CS_NB)];
+ if (out->color_trc == AVCOL_TRC_BT2020_10 && desc && desc->comp[0].depth >= 12)
+ out->color_trc = AVCOL_TRC_BT2020_12;
+ } else {
+ out->color_trc = s->user_trc;
+ }
+ out->colorspace = s->user_csp == AVCOL_SPC_UNSPECIFIED ?
+ default_csp[FFMIN(s->user_all, CS_NB)] : s->user_csp;
+ out->color_range = s->user_rng == AVCOL_RANGE_UNSPECIFIED ?
+ in->color_range : s->user_rng;
+ if (rgb_sz != s->rgb_sz) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(out->format);
+ int uvw = in->width >> desc->log2_chroma_w;
+
+ av_freep(&s->rgb[0]);
+ av_freep(&s->rgb[1]);
+ av_freep(&s->rgb[2]);
+ s->rgb_sz = 0;
+ av_freep(&s->dither_scratch_base[0][0]);
+ av_freep(&s->dither_scratch_base[0][1]);
+ av_freep(&s->dither_scratch_base[1][0]);
+ av_freep(&s->dither_scratch_base[1][1]);
+ av_freep(&s->dither_scratch_base[2][0]);
+ av_freep(&s->dither_scratch_base[2][1]);
+
+ s->rgb[0] = av_malloc(rgb_sz);
+ s->rgb[1] = av_malloc(rgb_sz);
+ s->rgb[2] = av_malloc(rgb_sz);
+ s->dither_scratch_base[0][0] =
+ av_malloc(sizeof(*s->dither_scratch_base[0][0]) * (in->width + 4));
+ s->dither_scratch_base[0][1] =
+ av_malloc(sizeof(*s->dither_scratch_base[0][1]) * (in->width + 4));
+ s->dither_scratch_base[1][0] =
+ av_malloc(sizeof(*s->dither_scratch_base[1][0]) * (uvw + 4));
+ s->dither_scratch_base[1][1] =
+ av_malloc(sizeof(*s->dither_scratch_base[1][1]) * (uvw + 4));
+ s->dither_scratch_base[2][0] =
+ av_malloc(sizeof(*s->dither_scratch_base[2][0]) * (uvw + 4));
+ s->dither_scratch_base[2][1] =
+ av_malloc(sizeof(*s->dither_scratch_base[2][1]) * (uvw + 4));
+ s->dither_scratch[0][0] = &s->dither_scratch_base[0][0][1];
+ s->dither_scratch[0][1] = &s->dither_scratch_base[0][1][1];
+ s->dither_scratch[1][0] = &s->dither_scratch_base[1][0][1];
+ s->dither_scratch[1][1] = &s->dither_scratch_base[1][1][1];
+ s->dither_scratch[2][0] = &s->dither_scratch_base[2][0][1];
+ s->dither_scratch[2][1] = &s->dither_scratch_base[2][1][1];
+ if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2] ||
+ !s->dither_scratch_base[0][0] || !s->dither_scratch_base[0][1] ||
+ !s->dither_scratch_base[1][0] || !s->dither_scratch_base[1][1] ||
+ !s->dither_scratch_base[2][0] || !s->dither_scratch_base[2][1]) {
+ uninit(ctx);
+ return AVERROR(ENOMEM);
+ }
+ s->rgb_sz = rgb_sz;
+ }
+ res = create_filtergraph(ctx, in, out);
+ if (res < 0)
+ return res;
+ s->rgb_stride = rgb_stride / sizeof(int16_t);
+ td.in = in;
+ td.out = out;
+ td.in_linesize[0] = in->linesize[0];
+ td.in_linesize[1] = in->linesize[1];
+ td.in_linesize[2] = in->linesize[2];
+ td.out_linesize[0] = out->linesize[0];
+ td.out_linesize[1] = out->linesize[1];
+ td.out_linesize[2] = out->linesize[2];
+ td.in_ss_h = av_pix_fmt_desc_get(in->format)->log2_chroma_h;
+ td.out_ss_h = av_pix_fmt_desc_get(out->format)->log2_chroma_h;
+ if (s->yuv2yuv_passthrough) {
+ res = av_frame_copy(out, in);
+ if (res < 0)
+ return res;
+ } else {
+ ctx->internal->execute(ctx, convert, &td, NULL,
+ FFMIN((in->height + 1) >> 1, ff_filter_get_nb_threads(ctx)));
+ }
+ av_frame_free(&in);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_NONE
+ };
+ int res;
+ ColorSpaceContext *s = ctx->priv;
+ AVFilterFormats *formats = ff_make_format_list(pix_fmts);
+
+ if (!formats)
+ return AVERROR(ENOMEM);
+ if (s->user_format == AV_PIX_FMT_NONE)
+ return ff_set_common_formats(ctx, formats);
+ res = ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
+ if (res < 0)
+ return res;
+ formats = NULL;
+ res = ff_add_format(&formats, s->user_format);
+ if (res < 0)
+ return res;
+
+ return ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->dst;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+
+ if (inlink->w % 2 || inlink->h % 2) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid odd size (%dx%d)\n",
+ inlink->w, inlink->h);
+ return AVERROR_PATCHWELCOME;
+ }
+
+ outlink->w = inlink->w;
+ outlink->h = inlink->h;
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ outlink->time_base = inlink->time_base;
+
+ return 0;
+}
+
+#define OFFSET(x) offsetof(ColorSpaceContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
+#define ENUM(x, y, z) { x, "", 0, AV_OPT_TYPE_CONST, { .i64 = y }, INT_MIN, INT_MAX, FLAGS, z }
+
+static const AVOption colorspace_options[] = {
+ { "all", "Set all color properties together",
+ OFFSET(user_all), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
+ CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
+ ENUM("bt470m", CS_BT470M, "all"),
+ ENUM("bt470bg", CS_BT470BG, "all"),
+ ENUM("bt601-6-525", CS_BT601_6_525, "all"),
+ ENUM("bt601-6-625", CS_BT601_6_625, "all"),
+ ENUM("bt709", CS_BT709, "all"),
+ ENUM("smpte170m", CS_SMPTE170M, "all"),
+ ENUM("smpte240m", CS_SMPTE240M, "all"),
+ ENUM("bt2020", CS_BT2020, "all"),
+
+ { "space", "Output colorspace",
+ OFFSET(user_csp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
+ AVCOL_SPC_RGB, AVCOL_SPC_NB - 1, FLAGS, "csp"},
+ ENUM("bt709", AVCOL_SPC_BT709, "csp"),
+ ENUM("fcc", AVCOL_SPC_FCC, "csp"),
+ ENUM("bt470bg", AVCOL_SPC_BT470BG, "csp"),
+ ENUM("smpte170m", AVCOL_SPC_SMPTE170M, "csp"),
+ ENUM("smpte240m", AVCOL_SPC_SMPTE240M, "csp"),
+ ENUM("ycgco", AVCOL_SPC_YCGCO, "csp"),
+ ENUM("bt2020ncl", AVCOL_SPC_BT2020_NCL, "csp"),
+
+ { "range", "Output color range",
+ OFFSET(user_rng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
+ AVCOL_RANGE_UNSPECIFIED, AVCOL_RANGE_NB - 1, FLAGS, "rng" },
+ ENUM("tv", AVCOL_RANGE_MPEG, "rng"),
+ ENUM("mpeg", AVCOL_RANGE_MPEG, "rng"),
+ ENUM("pc", AVCOL_RANGE_JPEG, "rng"),
+ ENUM("jpeg", AVCOL_RANGE_JPEG, "rng"),
+
+ { "primaries", "Output color primaries",
+ OFFSET(user_prm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
+ AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "prm" },
+ ENUM("bt709", AVCOL_PRI_BT709, "prm"),
+ ENUM("bt470m", AVCOL_PRI_BT470M, "prm"),
+ ENUM("bt470bg", AVCOL_PRI_BT470BG, "prm"),
+ ENUM("smpte170m", AVCOL_PRI_SMPTE170M, "prm"),
+ ENUM("smpte240m", AVCOL_PRI_SMPTE240M, "prm"),
+ ENUM("film", AVCOL_PRI_FILM, "prm"),
+ ENUM("smpte431", AVCOL_PRI_SMPTE431, "prm"),
+ ENUM("smpte432", AVCOL_PRI_SMPTE432, "prm"),
+ ENUM("bt2020", AVCOL_PRI_BT2020, "prm"),
+
+ { "trc", "Output transfer characteristics",
+ OFFSET(user_trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
+ AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
+ ENUM("bt709", AVCOL_TRC_BT709, "trc"),
+ ENUM("bt470m", AVCOL_TRC_GAMMA22, "trc"),
+ ENUM("gamma22", AVCOL_TRC_GAMMA22, "trc"),
+ ENUM("bt470bg", AVCOL_TRC_GAMMA28, "trc"),
+ ENUM("gamma28", AVCOL_TRC_GAMMA28, "trc"),
+ ENUM("smpte170m", AVCOL_TRC_SMPTE170M, "trc"),
+ ENUM("smpte240m", AVCOL_TRC_SMPTE240M, "trc"),
+ ENUM("srgb", AVCOL_TRC_IEC61966_2_1, "trc"),
+ ENUM("iec61966-2-1", AVCOL_TRC_IEC61966_2_1, "trc"),
+ ENUM("xvycc", AVCOL_TRC_IEC61966_2_4, "trc"),
+ ENUM("iec61966-2-4", AVCOL_TRC_IEC61966_2_4, "trc"),
+ ENUM("bt2020-10", AVCOL_TRC_BT2020_10, "trc"),
+ ENUM("bt2020-12", AVCOL_TRC_BT2020_12, "trc"),
+
+ { "format", "Output pixel format",
+ OFFSET(user_format), AV_OPT_TYPE_INT, { .i64 = AV_PIX_FMT_NONE },
+ AV_PIX_FMT_NONE, AV_PIX_FMT_GBRAP12LE, FLAGS, "fmt" },
+ ENUM("yuv420p", AV_PIX_FMT_YUV420P, "fmt"),
+ ENUM("yuv420p10", AV_PIX_FMT_YUV420P10, "fmt"),
+ ENUM("yuv420p12", AV_PIX_FMT_YUV420P12, "fmt"),
+ ENUM("yuv422p", AV_PIX_FMT_YUV422P, "fmt"),
+ ENUM("yuv422p10", AV_PIX_FMT_YUV422P10, "fmt"),
+ ENUM("yuv422p12", AV_PIX_FMT_YUV422P12, "fmt"),
+ ENUM("yuv444p", AV_PIX_FMT_YUV444P, "fmt"),
+ ENUM("yuv444p10", AV_PIX_FMT_YUV444P10, "fmt"),
+ ENUM("yuv444p12", AV_PIX_FMT_YUV444P12, "fmt"),
+
+ { "fast", "Ignore primary chromaticity and gamma correction",
+ OFFSET(fast_mode), AV_OPT_TYPE_BOOL, { .i64 = 0 },
+ 0, 1, FLAGS },
+
+ { "dither", "Dithering mode",
+ OFFSET(dither), AV_OPT_TYPE_INT, { .i64 = DITHER_NONE },
+ DITHER_NONE, DITHER_NB - 1, FLAGS, "dither" },
+ ENUM("none", DITHER_NONE, "dither"),
+ ENUM("fsb", DITHER_FSB, "dither"),
+
+ { "wpadapt", "Whitepoint adaptation method",
+ OFFSET(wp_adapt), AV_OPT_TYPE_INT, { .i64 = WP_ADAPT_BRADFORD },
+ WP_ADAPT_BRADFORD, NB_WP_ADAPT - 1, FLAGS, "wpadapt" },
+ ENUM("bradford", WP_ADAPT_BRADFORD, "wpadapt"),
+ ENUM("vonkries", WP_ADAPT_VON_KRIES, "wpadapt"),
+ ENUM("identity", WP_ADAPT_IDENTITY, "wpadapt"),
+
+ { "iall", "Set all input color properties together",
+ OFFSET(user_iall), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
+ CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
+ { "ispace", "Input colorspace",
+ OFFSET(user_icsp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
+ AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "csp" },
+ { "irange", "Input color range",
+ OFFSET(user_irng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
+ AVCOL_RANGE_UNSPECIFIED, AVCOL_RANGE_NB - 1, FLAGS, "rng" },
+ { "iprimaries", "Input color primaries",
+ OFFSET(user_iprm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
+ AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "prm" },
+ { "itrc", "Input transfer characteristics",
+ OFFSET(user_itrc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
+ AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
+
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(colorspace);
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_colorspace = {
+ .name = "colorspace",
+ .description = NULL_IF_CONFIG_SMALL("Convert between colorspaces."),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ColorSpaceContext),
+ .priv_class = &colorspace_class,
+ .inputs = inputs,
+ .outputs = outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_convolution.c b/libavfilter/vf_convolution.c
new file mode 100644
index 0000000000..41e92497c3
--- /dev/null
+++ b/libavfilter/vf_convolution.c
@@ -0,0 +1,847 @@
+/*
+ * Copyright (c) 2012-2013 Oka Motofumi (chikuzen.mo at gmail dot com)
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct ConvolutionContext {
+ const AVClass *class;
+
+ char *matrix_str[4];
+ float rdiv[4];
+ float bias[4];
+ float scale;
+ float delta;
+ int planes;
+
+ int size[4];
+ int depth;
+ int bpc;
+ int bstride;
+ uint8_t *buffer;
+ uint8_t **bptrs;
+ int nb_planes;
+ int nb_threads;
+ int planewidth[4];
+ int planeheight[4];
+ int matrix[4][25];
+ int matrix_length[4];
+ int copy[4];
+
+ int (*filter[4])(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
+} ConvolutionContext;
+
+#define OFFSET(x) offsetof(ConvolutionContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption convolution_options[] = {
+ { "0m", "set matrix for 1st plane", OFFSET(matrix_str[0]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
+ { "1m", "set matrix for 2nd plane", OFFSET(matrix_str[1]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
+ { "2m", "set matrix for 3rd plane", OFFSET(matrix_str[2]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
+ { "3m", "set matrix for 4th plane", OFFSET(matrix_str[3]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
+ { "0rdiv", "set rdiv for 1st plane", OFFSET(rdiv[0]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
+ { "1rdiv", "set rdiv for 2nd plane", OFFSET(rdiv[1]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
+ { "2rdiv", "set rdiv for 3rd plane", OFFSET(rdiv[2]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
+ { "3rdiv", "set rdiv for 4th plane", OFFSET(rdiv[3]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
+ { "0bias", "set bias for 1st plane", OFFSET(bias[0]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
+ { "1bias", "set bias for 2nd plane", OFFSET(bias[1]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
+ { "2bias", "set bias for 3rd plane", OFFSET(bias[2]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
+ { "3bias", "set bias for 4th plane", OFFSET(bias[3]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(convolution);
+
+static const int same3x3[9] = {0, 0, 0,
+ 0, 1, 0,
+ 0, 0, 0};
+
+static const int same5x5[25] = {0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ 0, 0, 1, 0, 0,
+ 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0};
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
+ AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
+ AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_NONE
+ };
+
+ return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+}
+
+static inline void line_copy8(uint8_t *line, const uint8_t *srcp, int width, int mergin)
+{
+ int i;
+
+ memcpy(line, srcp, width);
+
+ for (i = mergin; i > 0; i--) {
+ line[-i] = line[i];
+ line[width - 1 + i] = line[width - 1 - i];
+ }
+}
+
+static inline void line_copy16(uint16_t *line, const uint16_t *srcp, int width, int mergin)
+{
+ int i;
+
+ memcpy(line, srcp, width * 2);
+
+ for (i = mergin; i > 0; i--) {
+ line[-i] = line[i];
+ line[width - 1 + i] = line[width - 1 - i];
+ }
+}
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+ int plane;
+} ThreadData;
+
+static int filter16_prewitt(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ConvolutionContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int plane = td->plane;
+ const int peak = (1 << s->depth) - 1;
+ const int stride = in->linesize[plane] / 2;
+ const int bstride = s->bstride;
+ const int height = s->planeheight[plane];
+ const int width = s->planewidth[plane];
+ const int slice_start = (height * jobnr) / nb_jobs;
+ const int slice_end = (height * (jobnr+1)) / nb_jobs;
+ const uint16_t *src = (const uint16_t *)in->data[plane] + slice_start * stride;
+ uint16_t *dst = (uint16_t *)out->data[plane] + slice_start * (out->linesize[plane] / 2);
+ const float scale = s->scale;
+ const float delta = s->delta;
+ uint16_t *p0 = (uint16_t *)s->bptrs[jobnr] + 16;
+ uint16_t *p1 = p0 + bstride;
+ uint16_t *p2 = p1 + bstride;
+ uint16_t *orig = p0, *end = p2;
+ int y, x;
+
+ line_copy16(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
+ line_copy16(p1, src, width, 1);
+
+ for (y = slice_start; y < slice_end; y++) {
+ src += stride * (y < height - 1 ? 1 : -1);
+ line_copy16(p2, src, width, 1);
+
+ for (x = 0; x < width; x++) {
+ int suma = p0[x - 1] * -1 +
+ p0[x] * -1 +
+ p0[x + 1] * -1 +
+ p2[x - 1] * 1 +
+ p2[x] * 1 +
+ p2[x + 1] * 1;
+ int sumb = p0[x - 1] * -1 +
+ p0[x + 1] * 1 +
+ p1[x - 1] * -1 +
+ p1[x + 1] * 1 +
+ p2[x - 1] * -1 +
+ p2[x + 1] * 1;
+
+ dst[x] = av_clip(sqrt(suma*suma + sumb*sumb) * scale + delta, 0, peak);
+ }
+
+ p0 = p1;
+ p1 = p2;
+ p2 = (p2 == end) ? orig: p2 + bstride;
+ dst += out->linesize[plane] / 2;
+ }
+
+ return 0;
+}
+
+static int filter16_sobel(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ConvolutionContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int plane = td->plane;
+ const int peak = (1 << s->depth) - 1;
+ const int stride = in->linesize[plane] / 2;
+ const int bstride = s->bstride;
+ const int height = s->planeheight[plane];
+ const int width = s->planewidth[plane];
+ const int slice_start = (height * jobnr) / nb_jobs;
+ const int slice_end = (height * (jobnr+1)) / nb_jobs;
+ const uint16_t *src = (const uint16_t *)in->data[plane] + slice_start * stride;
+ uint16_t *dst = (uint16_t *)out->data[plane] + slice_start * (out->linesize[plane] / 2);
+ const float scale = s->scale;
+ const float delta = s->delta;
+ uint16_t *p0 = (uint16_t *)s->bptrs[jobnr] + 16;
+ uint16_t *p1 = p0 + bstride;
+ uint16_t *p2 = p1 + bstride;
+ uint16_t *orig = p0, *end = p2;
+ int y, x;
+
+ line_copy16(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
+ line_copy16(p1, src, width, 1);
+
+ for (y = slice_start; y < slice_end; y++) {
+ src += stride * (y < height - 1 ? 1 : -1);
+ line_copy16(p2, src, width, 1);
+
+ for (x = 0; x < width; x++) {
+ int suma = p0[x - 1] * -1 +
+ p0[x] * -2 +
+ p0[x + 1] * -1 +
+ p2[x - 1] * 1 +
+ p2[x] * 2 +
+ p2[x + 1] * 1;
+ int sumb = p0[x - 1] * -1 +
+ p0[x + 1] * 1 +
+ p1[x - 1] * -2 +
+ p1[x + 1] * 2 +
+ p2[x - 1] * -1 +
+ p2[x + 1] * 1;
+
+ dst[x] = av_clip(sqrt(suma*suma + sumb*sumb) * scale + delta, 0, peak);
+ }
+
+ p0 = p1;
+ p1 = p2;
+ p2 = (p2 == end) ? orig: p2 + bstride;
+ dst += out->linesize[plane] / 2;
+ }
+
+ return 0;
+}
+
+static int filter_prewitt(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ConvolutionContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int plane = td->plane;
+ const int stride = in->linesize[plane];
+ const int bstride = s->bstride;
+ const int height = s->planeheight[plane];
+ const int width = s->planewidth[plane];
+ const int slice_start = (height * jobnr) / nb_jobs;
+ const int slice_end = (height * (jobnr+1)) / nb_jobs;
+ const uint8_t *src = in->data[plane] + slice_start * stride;
+ uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
+ const float scale = s->scale;
+ const float delta = s->delta;
+ uint8_t *p0 = s->bptrs[jobnr] + 16;
+ uint8_t *p1 = p0 + bstride;
+ uint8_t *p2 = p1 + bstride;
+ uint8_t *orig = p0, *end = p2;
+ int y, x;
+
+ line_copy8(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
+ line_copy8(p1, src, width, 1);
+
+ for (y = slice_start; y < slice_end; y++) {
+ src += stride * (y < height - 1 ? 1 : -1);
+ line_copy8(p2, src, width, 1);
+
+ for (x = 0; x < width; x++) {
+ int suma = p0[x - 1] * -1 +
+ p0[x] * -1 +
+ p0[x + 1] * -1 +
+ p2[x - 1] * 1 +
+ p2[x] * 1 +
+ p2[x + 1] * 1;
+ int sumb = p0[x - 1] * -1 +
+ p0[x + 1] * 1 +
+ p1[x - 1] * -1 +
+ p1[x + 1] * 1 +
+ p2[x - 1] * -1 +
+ p2[x + 1] * 1;
+
+ dst[x] = av_clip_uint8(sqrt(suma*suma + sumb*sumb) * scale + delta);
+ }
+
+ p0 = p1;
+ p1 = p2;
+ p2 = (p2 == end) ? orig: p2 + bstride;
+ dst += out->linesize[plane];
+ }
+
+ return 0;
+}
+
+static int filter_sobel(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ConvolutionContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int plane = td->plane;
+ const int stride = in->linesize[plane];
+ const int bstride = s->bstride;
+ const int height = s->planeheight[plane];
+ const int width = s->planewidth[plane];
+ const int slice_start = (height * jobnr) / nb_jobs;
+ const int slice_end = (height * (jobnr+1)) / nb_jobs;
+ const uint8_t *src = in->data[plane] + slice_start * stride;
+ uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
+ const float scale = s->scale;
+ const float delta = s->delta;
+ uint8_t *p0 = s->bptrs[jobnr] + 16;
+ uint8_t *p1 = p0 + bstride;
+ uint8_t *p2 = p1 + bstride;
+ uint8_t *orig = p0, *end = p2;
+ int y, x;
+
+ line_copy8(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
+ line_copy8(p1, src, width, 1);
+
+ for (y = slice_start; y < slice_end; y++) {
+ src += stride * (y < height - 1 ? 1 : -1);
+ line_copy8(p2, src, width, 1);
+
+ for (x = 0; x < width; x++) {
+ int suma = p0[x - 1] * -1 +
+ p0[x] * -2 +
+ p0[x + 1] * -1 +
+ p2[x - 1] * 1 +
+ p2[x] * 2 +
+ p2[x + 1] * 1;
+ int sumb = p0[x - 1] * -1 +
+ p0[x + 1] * 1 +
+ p1[x - 1] * -2 +
+ p1[x + 1] * 2 +
+ p2[x - 1] * -1 +
+ p2[x + 1] * 1;
+
+ dst[x] = av_clip_uint8(sqrt(suma*suma + sumb*sumb) * scale + delta);
+ }
+
+ p0 = p1;
+ p1 = p2;
+ p2 = (p2 == end) ? orig: p2 + bstride;
+ dst += out->linesize[plane];
+ }
+
+ return 0;
+}
+
+static int filter16_3x3(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ConvolutionContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int plane = td->plane;
+ const int peak = (1 << s->depth) - 1;
+ const int stride = in->linesize[plane] / 2;
+ const int bstride = s->bstride;
+ const int height = s->planeheight[plane];
+ const int width = s->planewidth[plane];
+ const int slice_start = (height * jobnr) / nb_jobs;
+ const int slice_end = (height * (jobnr+1)) / nb_jobs;
+ const uint16_t *src = (const uint16_t *)in->data[plane] + slice_start * stride;
+ uint16_t *dst = (uint16_t *)out->data[plane] + slice_start * (out->linesize[plane] / 2);
+ uint16_t *p0 = (uint16_t *)s->bptrs[jobnr] + 16;
+ uint16_t *p1 = p0 + bstride;
+ uint16_t *p2 = p1 + bstride;
+ uint16_t *orig = p0, *end = p2;
+ const int *matrix = s->matrix[plane];
+ const float rdiv = s->rdiv[plane];
+ const float bias = s->bias[plane];
+ int y, x;
+
+ line_copy16(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
+ line_copy16(p1, src, width, 1);
+
+ for (y = slice_start; y < slice_end; y++) {
+ src += stride * (y < height - 1 ? 1 : -1);
+ line_copy16(p2, src, width, 1);
+
+ for (x = 0; x < width; x++) {
+ int sum = p0[x - 1] * matrix[0] +
+ p0[x] * matrix[1] +
+ p0[x + 1] * matrix[2] +
+ p1[x - 1] * matrix[3] +
+ p1[x] * matrix[4] +
+ p1[x + 1] * matrix[5] +
+ p2[x - 1] * matrix[6] +
+ p2[x] * matrix[7] +
+ p2[x + 1] * matrix[8];
+ sum = (int)(sum * rdiv + bias + 0.5f);
+ dst[x] = av_clip(sum, 0, peak);
+ }
+
+ p0 = p1;
+ p1 = p2;
+ p2 = (p2 == end) ? orig: p2 + bstride;
+ dst += out->linesize[plane] / 2;
+ }
+
+ return 0;
+}
+
+static int filter16_5x5(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ConvolutionContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int plane = td->plane;
+ const int peak = (1 << s->depth) - 1;
+ const int stride = in->linesize[plane] / 2;
+ const int bstride = s->bstride;
+ const int height = s->planeheight[plane];
+ const int width = s->planewidth[plane];
+ const int slice_start = (height * jobnr) / nb_jobs;
+ const int slice_end = (height * (jobnr+1)) / nb_jobs;
+ const uint16_t *src = (const uint16_t *)in->data[plane] + slice_start * stride;
+ uint16_t *dst = (uint16_t *)out->data[plane] + slice_start * (out->linesize[plane] / 2);
+ uint16_t *p0 = (uint16_t *)s->bptrs[jobnr] + 16;
+ uint16_t *p1 = p0 + bstride;
+ uint16_t *p2 = p1 + bstride;
+ uint16_t *p3 = p2 + bstride;
+ uint16_t *p4 = p3 + bstride;
+ uint16_t *orig = p0, *end = p4;
+ const int *matrix = s->matrix[plane];
+ float rdiv = s->rdiv[plane];
+ float bias = s->bias[plane];
+ int y, x, i;
+
+ line_copy16(p0, src + 2 * stride * (slice_start < 2 ? 1 : -1), width, 2);
+ line_copy16(p1, src + stride * (slice_start == 0 ? 1 : -1), width, 2);
+ line_copy16(p2, src, width, 2);
+ src += stride;
+ line_copy16(p3, src, width, 2);
+
+ for (y = slice_start; y < slice_end; y++) {
+ uint16_t *array[] = {
+ p0 - 2, p0 - 1, p0, p0 + 1, p0 + 2,
+ p1 - 2, p1 - 1, p1, p1 + 1, p1 + 2,
+ p2 - 2, p2 - 1, p2, p2 + 1, p2 + 2,
+ p3 - 2, p3 - 1, p3, p3 + 1, p3 + 2,
+ p4 - 2, p4 - 1, p4, p4 + 1, p4 + 2
+ };
+
+ src += stride * (y < height - 2 ? 1 : -1);
+ line_copy16(p4, src, width, 2);
+
+ for (x = 0; x < width; x++) {
+ int sum = 0;
+
+ for (i = 0; i < 25; i++) {
+ sum += *(array[i] + x) * matrix[i];
+ }
+ sum = (int)(sum * rdiv + bias + 0.5f);
+ dst[x] = av_clip(sum, 0, peak);
+ }
+
+ p0 = p1;
+ p1 = p2;
+ p2 = p3;
+ p3 = p4;
+ p4 = (p4 == end) ? orig: p4 + bstride;
+ dst += out->linesize[plane] / 2;
+ }
+
+ return 0;
+}
+
+static int filter_3x3(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ConvolutionContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int plane = td->plane;
+ const int stride = in->linesize[plane];
+ const int bstride = s->bstride;
+ const int height = s->planeheight[plane];
+ const int width = s->planewidth[plane];
+ const int slice_start = (height * jobnr) / nb_jobs;
+ const int slice_end = (height * (jobnr+1)) / nb_jobs;
+ const uint8_t *src = in->data[plane] + slice_start * stride;
+ uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
+ uint8_t *p0 = s->bptrs[jobnr] + 16;
+ uint8_t *p1 = p0 + bstride;
+ uint8_t *p2 = p1 + bstride;
+ uint8_t *orig = p0, *end = p2;
+ const int *matrix = s->matrix[plane];
+ const float rdiv = s->rdiv[plane];
+ const float bias = s->bias[plane];
+ int y, x;
+
+ line_copy8(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
+ line_copy8(p1, src, width, 1);
+
+ for (y = slice_start; y < slice_end; y++) {
+ src += stride * (y < height - 1 ? 1 : -1);
+ line_copy8(p2, src, width, 1);
+
+ for (x = 0; x < width; x++) {
+ int sum = p0[x - 1] * matrix[0] +
+ p0[x] * matrix[1] +
+ p0[x + 1] * matrix[2] +
+ p1[x - 1] * matrix[3] +
+ p1[x] * matrix[4] +
+ p1[x + 1] * matrix[5] +
+ p2[x - 1] * matrix[6] +
+ p2[x] * matrix[7] +
+ p2[x + 1] * matrix[8];
+ sum = (int)(sum * rdiv + bias + 0.5f);
+ dst[x] = av_clip_uint8(sum);
+ }
+
+ p0 = p1;
+ p1 = p2;
+ p2 = (p2 == end) ? orig: p2 + bstride;
+ dst += out->linesize[plane];
+ }
+
+ return 0;
+}
+
+static int filter_5x5(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ConvolutionContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int plane = td->plane;
+ const int stride = in->linesize[plane];
+ const int bstride = s->bstride;
+ const int height = s->planeheight[plane];
+ const int width = s->planewidth[plane];
+ const int slice_start = (height * jobnr) / nb_jobs;
+ const int slice_end = (height * (jobnr+1)) / nb_jobs;
+ const uint8_t *src = in->data[plane] + slice_start * stride;
+ uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
+ uint8_t *p0 = s->bptrs[jobnr] + 16;
+ uint8_t *p1 = p0 + bstride;
+ uint8_t *p2 = p1 + bstride;
+ uint8_t *p3 = p2 + bstride;
+ uint8_t *p4 = p3 + bstride;
+ uint8_t *orig = p0, *end = p4;
+ const int *matrix = s->matrix[plane];
+ float rdiv = s->rdiv[plane];
+ float bias = s->bias[plane];
+ int y, x, i;
+
+ line_copy8(p0, src + 2 * stride * (slice_start < 2 ? 1 : -1), width, 2);
+ line_copy8(p1, src + stride * (slice_start == 0 ? 1 : -1), width, 2);
+ line_copy8(p2, src, width, 2);
+ src += stride;
+ line_copy8(p3, src, width, 2);
+
+
+ for (y = slice_start; y < slice_end; y++) {
+ uint8_t *array[] = {
+ p0 - 2, p0 - 1, p0, p0 + 1, p0 + 2,
+ p1 - 2, p1 - 1, p1, p1 + 1, p1 + 2,
+ p2 - 2, p2 - 1, p2, p2 + 1, p2 + 2,
+ p3 - 2, p3 - 1, p3, p3 + 1, p3 + 2,
+ p4 - 2, p4 - 1, p4, p4 + 1, p4 + 2
+ };
+
+ src += stride * (y < height - 2 ? 1 : -1);
+ line_copy8(p4, src, width, 2);
+
+ for (x = 0; x < width; x++) {
+ int sum = 0;
+
+ for (i = 0; i < 25; i++) {
+ sum += *(array[i] + x) * matrix[i];
+ }
+ sum = (int)(sum * rdiv + bias + 0.5f);
+ dst[x] = av_clip_uint8(sum);
+ }
+
+ p0 = p1;
+ p1 = p2;
+ p2 = p3;
+ p3 = p4;
+ p4 = (p4 == end) ? orig: p4 + bstride;
+ dst += out->linesize[plane];
+ }
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ConvolutionContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int p;
+
+ s->depth = desc->comp[0].depth;
+
+ s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+ s->nb_threads = ff_filter_get_nb_threads(ctx);
+ s->bptrs = av_calloc(s->nb_threads, sizeof(*s->bptrs));
+ if (!s->bptrs)
+ return AVERROR(ENOMEM);
+
+ s->bstride = s->planewidth[0] + 32;
+ s->bpc = (s->depth + 7) / 8;
+ s->buffer = av_malloc_array(5 * s->bstride * s->nb_threads, s->bpc);
+ if (!s->buffer)
+ return AVERROR(ENOMEM);
+
+ for (p = 0; p < s->nb_threads; p++) {
+ s->bptrs[p] = s->buffer + 5 * s->bstride * s->bpc * p;
+ }
+
+ if (!strcmp(ctx->filter->name, "convolution")) {
+ if (s->depth > 8) {
+ for (p = 0; p < s->nb_planes; p++) {
+ if (s->size[p] == 3)
+ s->filter[p] = filter16_3x3;
+ else if (s->size[p] == 5)
+ s->filter[p] = filter16_5x5;
+ }
+ }
+ } else if (!strcmp(ctx->filter->name, "prewitt")) {
+ if (s->depth > 8)
+ for (p = 0; p < s->nb_planes; p++)
+ s->filter[p] = filter16_prewitt;
+ } else if (!strcmp(ctx->filter->name, "sobel")) {
+ if (s->depth > 8)
+ for (p = 0; p < s->nb_planes; p++)
+ s->filter[p] = filter16_sobel;
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ConvolutionContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ int plane;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ ThreadData td;
+
+ if (s->copy[plane]) {
+ av_image_copy_plane(out->data[plane], out->linesize[plane],
+ in->data[plane], in->linesize[plane],
+ s->planewidth[plane] * s->bpc,
+ s->planeheight[plane]);
+ continue;
+ }
+
+ td.in = in;
+ td.out = out;
+ td.plane = plane;
+ ctx->internal->execute(ctx, s->filter[plane], &td, NULL, FFMIN(s->planeheight[plane], s->nb_threads));
+ }
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ConvolutionContext *s = ctx->priv;
+ int i;
+
+ if (!strcmp(ctx->filter->name, "convolution")) {
+ for (i = 0; i < 4; i++) {
+ int *matrix = (int *)s->matrix[i];
+ char *p, *arg, *saveptr = NULL;
+
+ p = s->matrix_str[i];
+ while (s->matrix_length[i] < 25) {
+ if (!(arg = av_strtok(p, " ", &saveptr)))
+ break;
+
+ p = NULL;
+ sscanf(arg, "%d", &matrix[s->matrix_length[i]]);
+ s->matrix_length[i]++;
+ }
+
+ if (s->matrix_length[i] == 9) {
+ s->size[i] = 3;
+ if (!memcmp(matrix, same3x3, sizeof(same3x3)))
+ s->copy[i] = 1;
+ else
+ s->filter[i] = filter_3x3;
+ } else if (s->matrix_length[i] == 25) {
+ s->size[i] = 5;
+ if (!memcmp(matrix, same5x5, sizeof(same5x5)))
+ s->copy[i] = 1;
+ else
+ s->filter[i] = filter_5x5;
+ } else {
+ return AVERROR(EINVAL);
+ }
+ }
+ } else if (!strcmp(ctx->filter->name, "prewitt")) {
+ for (i = 0; i < 4; i++) {
+ if ((1 << i) & s->planes)
+ s->filter[i] = filter_prewitt;
+ else
+ s->copy[i] = 1;
+ }
+ } else if (!strcmp(ctx->filter->name, "sobel")) {
+ for (i = 0; i < 4; i++) {
+ if ((1 << i) & s->planes)
+ s->filter[i] = filter_sobel;
+ else
+ s->copy[i] = 1;
+ }
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ConvolutionContext *s = ctx->priv;
+
+ av_freep(&s->bptrs);
+ av_freep(&s->buffer);
+}
+
+static const AVFilterPad convolution_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad convolution_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+#if CONFIG_CONVOLUTION_FILTER
+
+AVFilter ff_vf_convolution = {
+ .name = "convolution",
+ .description = NULL_IF_CONFIG_SMALL("Apply convolution filter."),
+ .priv_size = sizeof(ConvolutionContext),
+ .priv_class = &convolution_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = convolution_inputs,
+ .outputs = convolution_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
+
+#endif /* CONFIG_CONVOLUTION_FILTER */
+
+#if CONFIG_PREWITT_FILTER
+
+static const AVOption prewitt_options[] = {
+ { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15}, 0, 15, FLAGS},
+ { "scale", "set scale", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, 65535, FLAGS},
+ { "delta", "set delta", OFFSET(delta), AV_OPT_TYPE_FLOAT, {.dbl=0}, -65535, 65535, FLAGS},
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(prewitt);
+
+AVFilter ff_vf_prewitt = {
+ .name = "prewitt",
+ .description = NULL_IF_CONFIG_SMALL("Apply prewitt operator."),
+ .priv_size = sizeof(ConvolutionContext),
+ .priv_class = &prewitt_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = convolution_inputs,
+ .outputs = convolution_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
+
+#endif /* CONFIG_PREWITT_FILTER */
+
+#if CONFIG_SOBEL_FILTER
+
+static const AVOption sobel_options[] = {
+ { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15}, 0, 15, FLAGS},
+ { "scale", "set scale", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, 65535, FLAGS},
+ { "delta", "set delta", OFFSET(delta), AV_OPT_TYPE_FLOAT, {.dbl=0}, -65535, 65535, FLAGS},
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(sobel);
+
+AVFilter ff_vf_sobel = {
+ .name = "sobel",
+ .description = NULL_IF_CONFIG_SMALL("Apply sobel operator."),
+ .priv_size = sizeof(ConvolutionContext),
+ .priv_class = &sobel_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = convolution_inputs,
+ .outputs = convolution_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
+
+#endif /* CONFIG_SOBEL_FILTER */
diff --git a/libavfilter/vf_copy.c b/libavfilter/vf_copy.c
index 5e60f2082e..b0159cff00 100644
--- a/libavfilter/vf_copy.c
+++ b/libavfilter/vf_copy.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,6 +27,23 @@
#include "internal.h"
#include "video.h"
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ int fmt;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ int ret;
+ if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL)
+ continue;
+ if ((ret = ff_add_format(&formats, fmt)) < 0)
+ return ret;
+ }
+
+ return ff_set_common_formats(ctx, formats);
+}
+
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterLink *outlink = inlink->dst->outputs[0];
@@ -44,10 +61,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static const AVFilterPad avfilter_vf_copy_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -61,9 +77,9 @@ static const AVFilterPad avfilter_vf_copy_outputs[] = {
};
AVFilter ff_vf_copy = {
- .name = "copy",
+ .name = "copy",
.description = NULL_IF_CONFIG_SMALL("Copy the input video unchanged to the output."),
-
- .inputs = avfilter_vf_copy_inputs,
- .outputs = avfilter_vf_copy_outputs,
+ .inputs = avfilter_vf_copy_inputs,
+ .outputs = avfilter_vf_copy_outputs,
+ .query_formats = query_formats,
};
diff --git a/libavfilter/vf_coreimage.m b/libavfilter/vf_coreimage.m
new file mode 100644
index 0000000000..9c8db02858
--- /dev/null
+++ b/libavfilter/vf_coreimage.m
@@ -0,0 +1,688 @@
+/*
+ * Copyright (c) 2016 Thilo Borgmann
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Video processing based on Apple's CoreImage API
+ */
+
+#import <QuartzCore/CoreImage.h>
+#import <AppKit/AppKit.h>
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+
+typedef struct CoreImageContext {
+ const AVClass *class;
+
+ int is_video_source; ///< filter is used as video source
+
+ int w, h; ///< video size
+ AVRational sar; ///< sample aspect ratio
+ AVRational frame_rate; ///< video frame rate
+ AVRational time_base; ///< stream time base
+ int64_t duration; ///< duration expressed in microseconds
+ int64_t pts; ///< increasing presentation time stamp
+ AVFrame *picref; ///< cached reference containing the painted picture
+
+ CFTypeRef glctx; ///< OpenGL context
+ CGContextRef cgctx; ///< Bitmap context for image copy
+ CFTypeRef input_image; ///< Input image container for passing into Core Image API
+ CGColorSpaceRef color_space; ///< Common color space for input image and cgcontext
+ int bits_per_component; ///< Shared bpc for input-output operation
+
+ char *filter_string; ///< The complete user provided filter definition
+ CFTypeRef *filters; ///< CIFilter object for all requested filters
+ int num_filters; ///< Amount of filters in *filters
+
+ char *output_rect; ///< Rectangle to be filled with filter intput
+ int list_filters; ///< Option used to list all available filters including generators
+ int list_generators; ///< Option used to list all available generators
+} CoreImageContext;
+
+static int config_output(AVFilterLink *link)
+{
+ CoreImageContext *ctx = link->src->priv;
+
+ link->w = ctx->w;
+ link->h = ctx->h;
+ link->sample_aspect_ratio = ctx->sar;
+ link->frame_rate = ctx->frame_rate;
+ link->time_base = ctx->time_base;
+
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
+ ctx->bits_per_component = av_get_bits_per_pixel(desc) / desc->nb_components;
+
+ return 0;
+}
+
+/** Determine image properties from input link of filter chain.
+ */
+static int config_input(AVFilterLink *link)
+{
+ CoreImageContext *ctx = link->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
+ ctx->bits_per_component = av_get_bits_per_pixel(desc) / desc->nb_components;
+
+ return 0;
+}
+
+/** Print a list of all available filters including options and respective value ranges and defaults.
+ */
+static void list_filters(CoreImageContext *ctx)
+{
+ // querying filters and attributes
+ NSArray *filter_categories = nil;
+
+ if (ctx->list_generators && !ctx->list_filters) {
+ filter_categories = [NSArray arrayWithObjects:kCICategoryGenerator, nil];
+ }
+
+ NSArray *filter_names = [CIFilter filterNamesInCategories:filter_categories];
+ NSEnumerator *filters = [filter_names objectEnumerator];
+
+ NSString *filter_name;
+ while (filter_name = [filters nextObject]) {
+ av_log(ctx, AV_LOG_INFO, "Filter: %s\n", [filter_name UTF8String]);
+ NSString *input;
+
+ CIFilter *filter = [CIFilter filterWithName:filter_name];
+ NSDictionary *filter_attribs = [filter attributes]; // <nsstring, id>
+ NSArray *filter_inputs = [filter inputKeys]; // <nsstring>
+
+ for (input in filter_inputs) {
+ NSDictionary *input_attribs = [filter_attribs valueForKey:input];
+ NSString *input_class = [input_attribs valueForKey:kCIAttributeClass];
+ if ([input_class isEqualToString:@"NSNumber"]) {
+ NSNumber *value_default = [input_attribs valueForKey:kCIAttributeDefault];
+ NSNumber *value_min = [input_attribs valueForKey:kCIAttributeSliderMin];
+ NSNumber *value_max = [input_attribs valueForKey:kCIAttributeSliderMax];
+
+ av_log(ctx, AV_LOG_INFO, "\tOption: %s\t[%s]\t[%s %s][%s]\n",
+ [input UTF8String],
+ [input_class UTF8String],
+ [[value_min stringValue] UTF8String],
+ [[value_max stringValue] UTF8String],
+ [[value_default stringValue] UTF8String]);
+ } else {
+ av_log(ctx, AV_LOG_INFO, "\tOption: %s\t[%s]\n",
+ [input UTF8String],
+ [input_class UTF8String]);
+ }
+ }
+ }
+}
+
+static int query_formats(AVFilterContext *fctx)
+{
+ static const enum AVPixelFormat inout_fmts_rgb[] = {
+ AV_PIX_FMT_ARGB,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *inout_formats;
+ int ret;
+
+ if (!(inout_formats = ff_make_format_list(inout_fmts_rgb))) {
+ return AVERROR(ENOMEM);
+ }
+
+ if ((ret = ff_formats_ref(inout_formats, &fctx->inputs[0]->out_formats)) < 0 ||
+ (ret = ff_formats_ref(inout_formats, &fctx->outputs[0]->in_formats)) < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static int query_formats_src(AVFilterContext *fctx)
+{
+ static const enum AVPixelFormat inout_fmts_rgb[] = {
+ AV_PIX_FMT_ARGB,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *inout_formats;
+ int ret;
+
+ if (!(inout_formats = ff_make_format_list(inout_fmts_rgb))) {
+ return AVERROR(ENOMEM);
+ }
+
+ if ((ret = ff_formats_ref(inout_formats, &fctx->outputs[0]->in_formats)) < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static int apply_filter(CoreImageContext *ctx, AVFilterLink *link, AVFrame *frame)
+{
+ int i;
+
+ // (re-)initialize input image
+ const CGSize frame_size = {
+ frame->width,
+ frame->height
+ };
+
+ NSData *data = [NSData dataWithBytesNoCopy:frame->data[0]
+ length:frame->height*frame->linesize[0]
+ freeWhenDone:NO];
+
+ CIImage *ret = [(__bridge CIImage*)ctx->input_image initWithBitmapData:data
+ bytesPerRow:frame->linesize[0]
+ size:frame_size
+ format:kCIFormatARGB8
+ colorSpace:ctx->color_space]; //kCGColorSpaceGenericRGB
+ if (!ret) {
+ av_log(ctx, AV_LOG_ERROR, "Input image could not be initialized.\n");
+ return AVERROR_EXTERNAL;
+ }
+
+ CIFilter *filter = NULL;
+ CIImage *filter_input = (__bridge CIImage*)ctx->input_image;
+ CIImage *filter_output = NULL;
+
+ // successively apply all filters
+ for (i = 0; i < ctx->num_filters; i++) {
+ if (i) {
+ // set filter input to previous filter output
+ filter_input = [(__bridge CIImage*)ctx->filters[i-1] valueForKey:kCIOutputImageKey];
+ CGRect out_rect = [filter_input extent];
+ if (out_rect.size.width > frame->width || out_rect.size.height > frame->height) {
+ // do not keep padded image regions after filtering
+ out_rect.origin.x = 0.0f;
+ out_rect.origin.y = 0.0f;
+ out_rect.size.width = frame->width;
+ out_rect.size.height = frame->height;
+ }
+ filter_input = [filter_input imageByCroppingToRect:out_rect];
+ }
+
+ filter = (__bridge CIFilter*)ctx->filters[i];
+
+ // do not set input image for the first filter if used as video source
+ if (!ctx->is_video_source || i) {
+ @try {
+ [filter setValue:filter_input forKey:kCIInputImageKey];
+ } @catch (NSException *exception) {
+ if (![[exception name] isEqualToString:NSUndefinedKeyException]) {
+ av_log(ctx, AV_LOG_ERROR, "An error occurred: %s.", [exception.reason UTF8String]);
+ return AVERROR_EXTERNAL;
+ } else {
+ av_log(ctx, AV_LOG_WARNING, "Selected filter does not accept an input image.\n");
+ }
+ }
+ }
+ }
+
+ // get output of last filter
+ filter_output = [filter valueForKey:kCIOutputImageKey];
+
+ if (!filter_output) {
+ av_log(ctx, AV_LOG_ERROR, "Filter output not available.\n");
+ return AVERROR_EXTERNAL;
+ }
+
+ // do not keep padded image regions after filtering
+ CGRect out_rect = [filter_output extent];
+ if (out_rect.size.width > frame->width || out_rect.size.height > frame->height) {
+ av_log(ctx, AV_LOG_DEBUG, "Cropping output image.\n");
+ out_rect.origin.x = 0.0f;
+ out_rect.origin.y = 0.0f;
+ out_rect.size.width = frame->width;
+ out_rect.size.height = frame->height;
+ }
+
+ CGImageRef out = [(__bridge CIContext*)ctx->glctx createCGImage:filter_output
+ fromRect:out_rect];
+
+ if (!out) {
+ av_log(ctx, AV_LOG_ERROR, "Cannot create valid output image.\n");
+ }
+
+ // create bitmap context on the fly for rendering into current frame->data[]
+ if (ctx->cgctx) {
+ CGContextRelease(ctx->cgctx);
+ ctx->cgctx = NULL;
+ }
+ size_t out_width = CGImageGetWidth(out);
+ size_t out_height = CGImageGetHeight(out);
+
+ if (out_width > frame->width || out_height > frame->height) { // this might result in segfault
+ av_log(ctx, AV_LOG_WARNING, "Output image has unexpected size: %lux%lu (expected: %ix%i). This may crash...\n",
+ out_width, out_height, frame->width, frame->height);
+ }
+ ctx->cgctx = CGBitmapContextCreate(frame->data[0],
+ frame->width,
+ frame->height,
+ ctx->bits_per_component,
+ frame->linesize[0],
+ ctx->color_space,
+ (uint32_t)kCGImageAlphaPremultipliedFirst); // ARGB
+ if (!ctx->cgctx) {
+ av_log(ctx, AV_LOG_ERROR, "CGBitmap context cannot be created.\n");
+ return AVERROR_EXTERNAL;
+ }
+
+ // copy ("draw") the output image into the frame data
+ CGRect rect = {{0,0},{frame->width, frame->height}};
+ if (ctx->output_rect) {
+ @try {
+ NSString *tmp_string = [NSString stringWithUTF8String:ctx->output_rect];
+ NSRect tmp = NSRectFromString(tmp_string);
+ rect = NSRectToCGRect(tmp);
+ } @catch (NSException *exception) {
+ av_log(ctx, AV_LOG_ERROR, "An error occurred: %s.", [exception.reason UTF8String]);
+ return AVERROR_EXTERNAL;
+ }
+ if (rect.size.width == 0.0f) {
+ av_log(ctx, AV_LOG_WARNING, "Width of output rect is zero.\n");
+ }
+ if (rect.size.height == 0.0f) {
+ av_log(ctx, AV_LOG_WARNING, "Height of output rect is zero.\n");
+ }
+ }
+
+ CGContextDrawImage(ctx->cgctx, rect, out);
+
+ return ff_filter_frame(link, frame);
+}
+
+/** Apply all valid filters successively to the input image.
+ * The final output image is copied from the GPU by "drawing" using a bitmap context.
+ */
+static int filter_frame(AVFilterLink *link, AVFrame *frame)
+{
+ return apply_filter(link->dst->priv, link->dst->outputs[0], frame);
+}
+
+static int request_frame(AVFilterLink *link)
+{
+ CoreImageContext *ctx = link->src->priv;
+ AVFrame *frame;
+
+ if (ctx->duration >= 0 &&
+ av_rescale_q(ctx->pts, ctx->time_base, AV_TIME_BASE_Q) >= ctx->duration) {
+ return AVERROR_EOF;
+ }
+
+ if (!ctx->picref) {
+ ctx->picref = ff_get_video_buffer(link, ctx->w, ctx->h);
+ if (!ctx->picref) {
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ frame = av_frame_clone(ctx->picref);
+ if (!frame) {
+ return AVERROR(ENOMEM);
+ }
+
+ frame->pts = ctx->pts;
+ frame->key_frame = 1;
+ frame->interlaced_frame = 0;
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ frame->sample_aspect_ratio = ctx->sar;
+
+ ctx->pts++;
+
+ return apply_filter(ctx, link, frame);
+}
+
+/** Set an option of the given filter to the provided key-value pair.
+ */
+static void set_option(CoreImageContext *ctx, CIFilter *filter, const char *key, const char *value)
+{
+ NSString *input_key = [NSString stringWithUTF8String:key];
+ NSString *input_val = [NSString stringWithUTF8String:value];
+
+ NSDictionary *filter_attribs = [filter attributes]; // <nsstring, id>
+ NSDictionary *input_attribs = [filter_attribs valueForKey:input_key];
+
+ NSString *input_class = [input_attribs valueForKey:kCIAttributeClass];
+ NSString *input_type = [input_attribs valueForKey:kCIAttributeType];
+
+ if (!input_attribs) {
+ av_log(ctx, AV_LOG_WARNING, "Skipping unknown option: \"%s\".\n",
+ [input_key UTF8String]); // [[filter name] UTF8String]) not currently defined...
+ return;
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "key: %s, val: %s, #attribs: %lu, class: %s, type: %s\n",
+ [input_key UTF8String],
+ [input_val UTF8String],
+ input_attribs ? (unsigned long)[input_attribs count] : -1,
+ [input_class UTF8String],
+ [input_type UTF8String]);
+
+ if ([input_class isEqualToString:@"NSNumber"]) {
+ float input = input_val.floatValue;
+ NSNumber *max_value = [input_attribs valueForKey:kCIAttributeSliderMax];
+ NSNumber *min_value = [input_attribs valueForKey:kCIAttributeSliderMin];
+ NSNumber *used_value = nil;
+
+#define CLAMP_WARNING do { \
+av_log(ctx, AV_LOG_WARNING, "Value of \"%f\" for option \"%s\" is out of range [%f %f], clamping to \"%f\".\n", \
+ input, \
+ [input_key UTF8String], \
+ min_value.floatValue, \
+ max_value.floatValue, \
+ used_value.floatValue); \
+} while(0)
+ if (input > max_value.floatValue) {
+ used_value = max_value;
+ CLAMP_WARNING;
+ } else if (input < min_value.floatValue) {
+ used_value = min_value;
+ CLAMP_WARNING;
+ } else {
+ used_value = [NSNumber numberWithFloat:input];
+ }
+
+ [filter setValue:used_value forKey:input_key];
+ } else if ([input_class isEqualToString:@"CIVector"]) {
+ CIVector *input = [CIVector vectorWithString:input_val];
+
+ if (!input) {
+ av_log(ctx, AV_LOG_WARNING, "Skipping invalid CIVctor description: \"%s\".\n",
+ [input_val UTF8String]);
+ return;
+ }
+
+ [filter setValue:input forKey:input_key];
+ } else if ([input_class isEqualToString:@"CIColor"]) {
+ CIColor *input = [CIColor colorWithString:input_val];
+
+ if (!input) {
+ av_log(ctx, AV_LOG_WARNING, "Skipping invalid CIColor description: \"%s\".\n",
+ [input_val UTF8String]);
+ return;
+ }
+
+ [filter setValue:input forKey:input_key];
+ } else if ([input_class isEqualToString:@"NSString"]) { // set display name as string with latin1 encoding
+ [filter setValue:input_val forKey:input_key];
+ } else if ([input_class isEqualToString:@"NSData"]) { // set display name as string with latin1 encoding
+ NSData *input = [NSData dataWithBytes:(const void*)[input_val cStringUsingEncoding:NSISOLatin1StringEncoding]
+ length:[input_val lengthOfBytesUsingEncoding:NSISOLatin1StringEncoding]];
+
+ if (!input) {
+ av_log(ctx, AV_LOG_WARNING, "Skipping invalid NSData description: \"%s\".\n",
+ [input_val UTF8String]);
+ return;
+ }
+
+ [filter setValue:input forKey:input_key];
+ } else {
+ av_log(ctx, AV_LOG_WARNING, "Skipping unsupported option class: \"%s\".\n",
+ [input_class UTF8String]);
+ avpriv_report_missing_feature(ctx, "Handling of some option classes");
+ return;
+ }
+}
+
+/** Create a filter object by a given name and set all options to defaults.
+ * Overwrite any option given by the user to the provided value in filter_options.
+ */
+static CIFilter* create_filter(CoreImageContext *ctx, const char *filter_name, AVDictionary *filter_options)
+{
+ // create filter object
+ CIFilter *filter = [CIFilter filterWithName:[NSString stringWithUTF8String:filter_name]];
+
+ // set default options
+ [filter setDefaults];
+
+ // set user options
+ if (filter_options) {
+ AVDictionaryEntry *o = NULL;
+ while ((o = av_dict_get(filter_options, "", o, AV_DICT_IGNORE_SUFFIX))) {
+ set_option(ctx, filter, o->key, o->value);
+ }
+ }
+
+ return filter;
+}
+
+static av_cold int init(AVFilterContext *fctx)
+{
+ CoreImageContext *ctx = fctx->priv;
+ AVDictionary *filter_dict = NULL;
+ AVDictionaryEntry *f = NULL;
+ AVDictionaryEntry *o = NULL;
+ int ret;
+ int i;
+
+ if (ctx->list_filters || ctx->list_generators) {
+ list_filters(ctx);
+ return AVERROR_EXIT;
+ }
+
+ if (ctx->filter_string) {
+ // parse filter string (filter=name@opt=val@opt2=val2#name2@opt3=val3) for filters separated by #
+ av_log(ctx, AV_LOG_DEBUG, "Filter_string: %s\n", ctx->filter_string);
+ ret = av_dict_parse_string(&filter_dict, ctx->filter_string, "@", "#", AV_DICT_MULTIKEY); // parse filter_name:all_filter_options
+ if (ret) {
+ av_log(ctx, AV_LOG_ERROR, "Parsing of filters failed.\n");
+ return AVERROR(EIO);
+ }
+ ctx->num_filters = av_dict_count(filter_dict);
+ av_log(ctx, AV_LOG_DEBUG, "Filter count: %i\n", ctx->num_filters);
+
+ // allocate CIFilter array
+ ctx->filters = av_mallocz_array(ctx->num_filters, sizeof(CIFilter*));
+ if (!ctx->filters) {
+ av_log(ctx, AV_LOG_ERROR, "Could not allocate filter array.\n");
+ return AVERROR(ENOMEM);
+ }
+
+ // parse filters for option key-value pairs (opt=val@opt2=val2) separated by @
+ i = 0;
+ while ((f = av_dict_get(filter_dict, "", f, AV_DICT_IGNORE_SUFFIX))) {
+ AVDictionary *filter_options = NULL;
+
+ if (strncmp(f->value, "default", 7)) { // not default
+ ret = av_dict_parse_string(&filter_options, f->value, "=", "@", 0); // parse option_name:option_value
+ if (ret) {
+ av_log(ctx, AV_LOG_ERROR, "Parsing of filter options for \"%s\" failed.\n", f->key);
+ return AVERROR(EIO);
+ }
+ }
+
+ if (av_log_get_level() >= AV_LOG_DEBUG) {
+ av_log(ctx, AV_LOG_DEBUG, "Creating filter %i: \"%s\":\n", i, f->key);
+ if (!filter_options) {
+ av_log(ctx, AV_LOG_DEBUG, "\tusing default options\n");
+ } else {
+ while ((o = av_dict_get(filter_options, "", o, AV_DICT_IGNORE_SUFFIX))) {
+ av_log(ctx, AV_LOG_DEBUG, "\t%s: %s\n", o->key, o->value);
+ }
+ }
+ }
+
+ ctx->filters[i] = CFBridgingRetain(create_filter(ctx, f->key, filter_options));
+ if (!ctx->filters[i]) {
+ av_log(ctx, AV_LOG_ERROR, "Could not create filter \"%s\".\n", f->key);
+ return AVERROR(EINVAL);
+ }
+
+ i++;
+ }
+ } else {
+ av_log(ctx, AV_LOG_ERROR, "No filters specified.\n");
+ return AVERROR(EINVAL);
+ }
+
+ // create GPU context on OSX
+ const NSOpenGLPixelFormatAttribute attr[] = {
+ NSOpenGLPFAAccelerated,
+ NSOpenGLPFANoRecovery,
+ NSOpenGLPFAColorSize, 32,
+ 0
+ };
+
+ NSOpenGLPixelFormat *pixel_format = [[NSOpenGLPixelFormat alloc] initWithAttributes:(void *)&attr];
+ ctx->color_space = CGColorSpaceCreateWithName(kCGColorSpaceGenericRGB);
+ ctx->glctx = CFBridgingRetain([CIContext contextWithCGLContext:CGLGetCurrentContext()
+ pixelFormat:[pixel_format CGLPixelFormatObj]
+ colorSpace:ctx->color_space
+ options:nil]);
+
+ if (!ctx->glctx) {
+ av_log(ctx, AV_LOG_ERROR, "CIContext not created.\n");
+ return AVERROR_EXTERNAL;
+ }
+
+ // Creating an empty input image as input container for the context
+ ctx->input_image = CFBridgingRetain([CIImage emptyImage]);
+
+ return 0;
+}
+
+static av_cold int init_src(AVFilterContext *fctx)
+{
+ CoreImageContext *ctx = fctx->priv;
+
+ ctx->is_video_source = 1;
+ ctx->time_base = av_inv_q(ctx->frame_rate);
+ ctx->pts = 0;
+
+ return init(fctx);
+}
+
+static av_cold void uninit(AVFilterContext *fctx)
+{
+#define SafeCFRelease(ptr) do { \
+ if (ptr) { \
+ CFRelease(ptr); \
+ ptr = NULL; \
+ } \
+} while (0)
+
+ CoreImageContext *ctx = fctx->priv;
+
+ SafeCFRelease(ctx->glctx);
+ SafeCFRelease(ctx->cgctx);
+ SafeCFRelease(ctx->color_space);
+ SafeCFRelease(ctx->input_image);
+
+ if (ctx->filters) {
+ for (int i = 0; i < ctx->num_filters; i++) {
+ SafeCFRelease(ctx->filters[i]);
+ }
+ av_freep(&ctx->filters);
+ }
+
+ av_frame_free(&ctx->picref);
+}
+
+static const AVFilterPad vf_coreimage_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad vf_coreimage_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+static const AVFilterPad vsrc_coreimagesrc_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+#define OFFSET(x) offsetof(CoreImageContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+#define GENERATOR_OPTIONS \
+ {"size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS}, \
+ {"s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS}, \
+ {"rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS}, \
+ {"r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS}, \
+ {"duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS}, \
+ {"d", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS}, \
+ {"sar", "set video sample aspect ratio", OFFSET(sar), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, INT_MAX, FLAGS},
+
+#define FILTER_OPTIONS \
+ {"list_filters", "list available filters", OFFSET(list_filters), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, .flags = FLAGS}, \
+ {"list_generators", "list available generators", OFFSET(list_generators), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, .flags = FLAGS}, \
+ {"filter", "names and options of filters to apply", OFFSET(filter_string), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS}, \
+ {"output_rect", "output rectangle within output image", OFFSET(output_rect), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS},
+
+
+// definitions for coreimage video filter
+static const AVOption coreimage_options[] = {
+ FILTER_OPTIONS
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(coreimage);
+
+AVFilter ff_vf_coreimage = {
+ .name = "coreimage",
+ .description = NULL_IF_CONFIG_SMALL("Video filtering using CoreImage API."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(CoreImageContext),
+ .priv_class = &coreimage_class,
+ .inputs = vf_coreimage_inputs,
+ .outputs = vf_coreimage_outputs,
+ .query_formats = query_formats,
+};
+
+// definitions for coreimagesrc video source
+static const AVOption coreimagesrc_options[] = {
+ GENERATOR_OPTIONS
+ FILTER_OPTIONS
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(coreimagesrc);
+
+AVFilter ff_vsrc_coreimagesrc = {
+ .name = "coreimagesrc",
+ .description = NULL_IF_CONFIG_SMALL("Video source using image generators of CoreImage API."),
+ .init = init_src,
+ .uninit = uninit,
+ .priv_size = sizeof(CoreImageContext),
+ .priv_class = &coreimagesrc_class,
+ .inputs = NULL,
+ .outputs = vsrc_coreimagesrc_outputs,
+ .query_formats = query_formats_src,
+};
diff --git a/libavfilter/vf_cover_rect.c b/libavfilter/vf_cover_rect.c
new file mode 100644
index 0000000000..f7f61038e3
--- /dev/null
+++ b/libavfilter/vf_cover_rect.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2014-2015 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @todo switch to dualinput
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "internal.h"
+
+#include "lavfutils.h"
+
+enum mode {
+ MODE_COVER,
+ MODE_BLUR,
+ NB_MODES
+};
+
+typedef struct CoverContext {
+ AVClass *class;
+ int mode;
+ char *cover_filename;
+ AVFrame *cover_frame;
+ int width, height;
+} CoverContext;
+
+#define OFFSET(x) offsetof(CoverContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption cover_rect_options[] = {
+ { "cover", "cover bitmap filename", OFFSET(cover_filename), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS },
+ { "mode", "set removal mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_BLUR}, 0, NB_MODES - 1, FLAGS, "mode" },
+ { "cover", "cover area with bitmap", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_COVER}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "blur", "blur area", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_BLUR}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(cover_rect);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_NONE
+ };
+
+ return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ return 0;
+}
+
+static void cover_rect(CoverContext *cover, AVFrame *in, int offx, int offy)
+{
+ int x, y, p;
+
+ for (p = 0; p < 3; p++) {
+ uint8_t *data = in->data[p] + (offx>>!!p) + (offy>>!!p) * in->linesize[p];
+ const uint8_t *src = cover->cover_frame->data[p];
+ int w = AV_CEIL_RSHIFT(cover->cover_frame->width , !!p);
+ int h = AV_CEIL_RSHIFT(cover->cover_frame->height, !!p);
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ data[x] = src[x];
+ }
+ data += in->linesize[p];
+ src += cover->cover_frame->linesize[p];
+ }
+ }
+}
+static void blur(CoverContext *cover, AVFrame *in, int offx, int offy)
+{
+ int x, y, p;
+
+ for (p=0; p<3; p++) {
+ int ox = offx>>!!p;
+ int oy = offy>>!!p;
+ int stride = in->linesize[p];
+ uint8_t *data = in->data[p] + ox + oy * stride;
+ int w = AV_CEIL_RSHIFT(cover->width , !!p);
+ int h = AV_CEIL_RSHIFT(cover->height, !!p);
+ int iw = AV_CEIL_RSHIFT(in->width , !!p);
+ int ih = AV_CEIL_RSHIFT(in->height, !!p);
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ int c = 0;
+ int s = 0;
+ if (ox) {
+ int scale = 65536 / (x + 1);
+ s += data[-1 + y*stride] * scale;
+ c += scale;
+ }
+ if (oy) {
+ int scale = 65536 / (y + 1);
+ s += data[x - stride] * scale;
+ c += scale;
+ }
+ if (ox + w < iw) {
+ int scale = 65536 / (w - x);
+ s += data[w + y*stride] * scale;
+ c += scale;
+ }
+ if (oy + h < ih) {
+ int scale = 65536 / (h - y);
+ s += data[x + h*stride] * scale;
+ c += scale;
+ }
+ data[x + y*stride] = c ? (s + (c>>1)) / c : 0;
+ }
+ }
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ CoverContext *cover = ctx->priv;
+ AVDictionaryEntry *ex, *ey, *ew, *eh;
+ int x = -1, y = -1, w = -1, h = -1;
+ char *xendptr = NULL, *yendptr = NULL, *wendptr = NULL, *hendptr = NULL;
+
+ ex = av_dict_get(in->metadata, "lavfi.rect.x", NULL, AV_DICT_MATCH_CASE);
+ ey = av_dict_get(in->metadata, "lavfi.rect.y", NULL, AV_DICT_MATCH_CASE);
+ ew = av_dict_get(in->metadata, "lavfi.rect.w", NULL, AV_DICT_MATCH_CASE);
+ eh = av_dict_get(in->metadata, "lavfi.rect.h", NULL, AV_DICT_MATCH_CASE);
+ if (ex && ey && ew && eh) {
+ x = strtol(ex->value, &xendptr, 10);
+ y = strtol(ey->value, &yendptr, 10);
+ w = strtol(ew->value, &wendptr, 10);
+ h = strtol(eh->value, &hendptr, 10);
+ }
+
+ if (!xendptr || *xendptr || !yendptr || *yendptr ||
+ !wendptr || *wendptr || !hendptr || !hendptr
+ ) {
+ return ff_filter_frame(ctx->outputs[0], in);
+ }
+
+ if (x < 0) {
+ w += x;
+ x = 0;
+ }
+ if (y < 0) {
+ h += y;
+ y = 0;
+ }
+ w = FFMIN(w, in->width - x);
+ h = FFMIN(h, in->height - y);
+
+ if (w > in->width || h > in->height || w <= 0 || h <= 0)
+ return AVERROR(EINVAL);
+
+ if (cover->cover_frame) {
+ if (w != cover->cover_frame->width || h != cover->cover_frame->height)
+ return AVERROR(EINVAL);
+ }
+
+ cover->width = w;
+ cover->height = h;
+
+ x = av_clip(x, 0, in->width - w);
+ y = av_clip(y, 0, in->height - h);
+
+ av_frame_make_writable(in);
+
+ if (cover->mode == MODE_BLUR) {
+ blur (cover, in, x, y);
+ } else {
+ cover_rect(cover, in, x, y);
+ }
+ return ff_filter_frame(ctx->outputs[0], in);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ CoverContext *cover = ctx->priv;
+
+ if (cover->cover_frame)
+ av_freep(&cover->cover_frame->data[0]);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ CoverContext *cover = ctx->priv;
+ int ret;
+
+ if (cover->mode == MODE_COVER) {
+ if (!cover->cover_filename) {
+ av_log(ctx, AV_LOG_ERROR, "cover filename not set\n");
+ return AVERROR(EINVAL);
+ }
+
+ cover->cover_frame = av_frame_alloc();
+ if (!cover->cover_frame)
+ return AVERROR(ENOMEM);
+
+ if ((ret = ff_load_image(cover->cover_frame->data, cover->cover_frame->linesize,
+ &cover->cover_frame->width, &cover->cover_frame->height,
+ &cover->cover_frame->format, cover->cover_filename, ctx)) < 0)
+ return ret;
+
+ if (cover->cover_frame->format != AV_PIX_FMT_YUV420P && cover->cover_frame->format != AV_PIX_FMT_YUVJ420P) {
+ av_log(ctx, AV_LOG_ERROR, "cover image is not a YUV420 image\n");
+ return AVERROR(EINVAL);
+ }
+ }
+
+ return 0;
+}
+
+static const AVFilterPad cover_rect_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad cover_rect_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_cover_rect = {
+ .name = "cover_rect",
+ .description = NULL_IF_CONFIG_SMALL("Find and cover a user specified object."),
+ .priv_size = sizeof(CoverContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = cover_rect_inputs,
+ .outputs = cover_rect_outputs,
+ .priv_class = &cover_rect_class,
+};
diff --git a/libavfilter/vf_crop.c b/libavfilter/vf_crop.c
index 4122d52f88..85ea892d01 100644
--- a/libavfilter/vf_crop.c
+++ b/libavfilter/vf_crop.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -38,13 +38,15 @@
#include "libavutil/opt.h"
static const char *const var_names[] = {
- "E",
- "PHI",
- "PI",
"in_w", "iw", ///< width of the input video
"in_h", "ih", ///< height of the input video
"out_w", "ow", ///< width of the cropped video
"out_h", "oh", ///< height of the cropped video
+ "a",
+ "sar",
+ "dar",
+ "hsub",
+ "vsub",
"x",
"y",
"n", ///< number of frame
@@ -54,16 +56,19 @@ static const char *const var_names[] = {
};
enum var_name {
- VAR_E,
- VAR_PHI,
- VAR_PI,
VAR_IN_W, VAR_IW,
VAR_IN_H, VAR_IH,
VAR_OUT_W, VAR_OW,
VAR_OUT_H, VAR_OH,
+ VAR_A,
+ VAR_SAR,
+ VAR_DAR,
+ VAR_HSUB,
+ VAR_VSUB,
VAR_X,
VAR_Y,
VAR_N,
+ VAR_POS,
VAR_T,
VAR_VARS_NB
};
@@ -75,44 +80,31 @@ typedef struct CropContext {
int w; ///< width of the cropped area
int h; ///< height of the cropped area
+ AVRational out_sar; ///< output sample aspect ratio
+ int keep_aspect; ///< keep display aspect ratio when cropping
+ int exact; ///< exact cropping, for subsampled formats
+
int max_step[4]; ///< max pixel step for each plane, expressed as a number of bytes
int hsub, vsub; ///< chroma subsampling
- char *x_expr, *y_expr, *ow_expr, *oh_expr;
+ char *x_expr, *y_expr, *w_expr, *h_expr;
AVExpr *x_pexpr, *y_pexpr; /* parsed expressions for x and y */
double var_values[VAR_VARS_NB];
} CropContext;
static int query_formats(AVFilterContext *ctx)
{
- static const enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_RGB48BE, AV_PIX_FMT_RGB48LE,
- AV_PIX_FMT_BGR48BE, AV_PIX_FMT_BGR48LE,
- AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
- AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
- AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
- AV_PIX_FMT_RGB565BE, AV_PIX_FMT_RGB565LE,
- AV_PIX_FMT_RGB555BE, AV_PIX_FMT_RGB555LE,
- AV_PIX_FMT_BGR565BE, AV_PIX_FMT_BGR565LE,
- AV_PIX_FMT_BGR555BE, AV_PIX_FMT_BGR555LE,
- AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE,
- AV_PIX_FMT_YUV420P16LE, AV_PIX_FMT_YUV420P16BE,
- AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUV422P16BE,
- AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUV444P16BE,
- AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
- AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
- AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
- AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
- AV_PIX_FMT_YUVA420P,
- AV_PIX_FMT_RGB8, AV_PIX_FMT_BGR8,
- AV_PIX_FMT_RGB4_BYTE, AV_PIX_FMT_BGR4_BYTE,
- AV_PIX_FMT_PAL8, AV_PIX_FMT_GRAY8,
- AV_PIX_FMT_NONE
- };
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ AVFilterFormats *formats = NULL;
+ int fmt, ret;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (!(desc->flags & (AV_PIX_FMT_FLAG_HWACCEL | AV_PIX_FMT_FLAG_BITSTREAM)) &&
+ !((desc->log2_chroma_w || desc->log2_chroma_h) && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR)) &&
+ (ret = ff_add_format(&formats, fmt)) < 0)
+ return ret;
+ }
- return 0;
+ return ff_set_common_formats(ctx, formats);
}
static av_cold void uninit(AVFilterContext *ctx)
@@ -135,7 +127,7 @@ static inline int normalize_double(int *n, double d)
*n = d > INT_MAX ? INT_MAX : INT_MIN;
ret = AVERROR(EINVAL);
} else
- *n = round(d);
+ *n = lrint(d);
return ret;
}
@@ -149,34 +141,37 @@ static int config_input(AVFilterLink *link)
const char *expr;
double res;
- s->var_values[VAR_E] = M_E;
- s->var_values[VAR_PHI] = M_PHI;
- s->var_values[VAR_PI] = M_PI;
s->var_values[VAR_IN_W] = s->var_values[VAR_IW] = ctx->inputs[0]->w;
s->var_values[VAR_IN_H] = s->var_values[VAR_IH] = ctx->inputs[0]->h;
+ s->var_values[VAR_A] = (float) link->w / link->h;
+ s->var_values[VAR_SAR] = link->sample_aspect_ratio.num ? av_q2d(link->sample_aspect_ratio) : 1;
+ s->var_values[VAR_DAR] = s->var_values[VAR_A] * s->var_values[VAR_SAR];
+ s->var_values[VAR_HSUB] = 1<<pix_desc->log2_chroma_w;
+ s->var_values[VAR_VSUB] = 1<<pix_desc->log2_chroma_h;
s->var_values[VAR_X] = NAN;
s->var_values[VAR_Y] = NAN;
s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = NAN;
s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = NAN;
s->var_values[VAR_N] = 0;
s->var_values[VAR_T] = NAN;
+ s->var_values[VAR_POS] = NAN;
av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc);
s->hsub = pix_desc->log2_chroma_w;
s->vsub = pix_desc->log2_chroma_h;
- if ((ret = av_expr_parse_and_eval(&res, (expr = s->ow_expr),
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
var_names, s->var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto fail_expr;
s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = res;
- if ((ret = av_expr_parse_and_eval(&res, (expr = s->oh_expr),
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
var_names, s->var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto fail_expr;
s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = res;
/* evaluate again ow as it may depend on oh */
- if ((ret = av_expr_parse_and_eval(&res, (expr = s->ow_expr),
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
var_names, s->var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto fail_expr;
@@ -187,11 +182,14 @@ static int config_input(AVFilterLink *link)
av_log(ctx, AV_LOG_ERROR,
"Too big value or invalid expression for out_w/ow or out_h/oh. "
"Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\n",
- s->ow_expr, s->oh_expr);
+ s->w_expr, s->h_expr);
return AVERROR(EINVAL);
}
- s->w &= ~((1 << s->hsub) - 1);
- s->h &= ~((1 << s->vsub) - 1);
+
+ if (!s->exact) {
+ s->w &= ~((1 << s->hsub) - 1);
+ s->h &= ~((1 << s->vsub) - 1);
+ }
av_expr_free(s->x_pexpr);
av_expr_free(s->y_pexpr);
@@ -202,8 +200,17 @@ static int config_input(AVFilterLink *link)
NULL, NULL, NULL, NULL, 0, ctx)) < 0)
return AVERROR(EINVAL);
- av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d\n",
- link->w, link->h, s->w, s->h);
+ if (s->keep_aspect) {
+ AVRational dar = av_mul_q(link->sample_aspect_ratio,
+ (AVRational){ link->w, link->h });
+ av_reduce(&s->out_sar.num, &s->out_sar.den,
+ dar.num * s->h, dar.den * s->w, INT_MAX);
+ } else
+ s->out_sar = link->sample_aspect_ratio;
+
+ av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d sar:%d/%d -> w:%d h:%d sar:%d/%d\n",
+ link->w, link->h, link->sample_aspect_ratio.num, link->sample_aspect_ratio.den,
+ s->w, s->h, s->out_sar.num, s->out_sar.den);
if (s->w <= 0 || s->h <= 0 ||
s->w > link->w || s->h > link->h) {
@@ -216,8 +223,10 @@ static int config_input(AVFilterLink *link)
/* set default, required in the case the first computed value for x/y is NAN */
s->x = (link->w - s->w) / 2;
s->y = (link->h - s->h) / 2;
- s->x &= ~((1 << s->hsub) - 1);
- s->y &= ~((1 << s->vsub) - 1);
+ if (!s->exact) {
+ s->x &= ~((1 << s->hsub) - 1);
+ s->y &= ~((1 << s->vsub) - 1);
+ }
return 0;
fail_expr:
@@ -231,6 +240,7 @@ static int config_output(AVFilterLink *link)
link->w = s->w;
link->h = s->h;
+ link->sample_aspect_ratio = s->out_sar;
return 0;
}
@@ -245,8 +255,11 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
frame->width = s->w;
frame->height = s->h;
+ s->var_values[VAR_N] = link->frame_count_out;
s->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
NAN : frame->pts * av_q2d(link->time_base);
+ s->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ?
+ NAN : av_frame_get_pkt_pos(frame);
s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, NULL);
s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
@@ -262,12 +275,14 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
s->x = link->w - s->w;
if ((unsigned)s->y + (unsigned)s->h > link->h)
s->y = link->h - s->h;
- s->x &= ~((1 << s->hsub) - 1);
- s->y &= ~((1 << s->vsub) - 1);
+ if (!s->exact) {
+ s->x &= ~((1 << s->hsub) - 1);
+ s->y &= ~((1 << s->vsub) - 1);
+ }
- av_log(ctx, AV_LOG_TRACE, "n:%d t:%f x:%d y:%d x+w:%d y+h:%d\n",
- (int)s->var_values[VAR_N], s->var_values[VAR_T], s->x,
- s->y, s->x+s->w, s->y+s->h);
+ av_log(ctx, AV_LOG_TRACE, "n:%d t:%f pos:%f x:%d y:%d x+w:%d y+h:%d\n",
+ (int)s->var_values[VAR_N], s->var_values[VAR_T], s->var_values[VAR_POS],
+ s->x, s->y, s->x+s->w, s->y+s->h);
frame->data[0] += s->y * frame->linesize[0];
frame->data[0] += s->x * s->max_step[0];
@@ -287,37 +302,68 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
frame->data[3] += s->x * s->max_step[3];
}
- s->var_values[VAR_N] += 1.0;
-
return ff_filter_frame(link->dst->outputs[0], frame);
}
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ CropContext *s = ctx->priv;
+ int ret;
+
+ if ( !strcmp(cmd, "out_w") || !strcmp(cmd, "w")
+ || !strcmp(cmd, "out_h") || !strcmp(cmd, "h")
+ || !strcmp(cmd, "x") || !strcmp(cmd, "y")) {
+
+ int old_x = s->x;
+ int old_y = s->y;
+ int old_w = s->w;
+ int old_h = s->h;
+
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ av_opt_set(s, cmd, args, 0);
+
+ if ((ret = config_input(inlink)) < 0) {
+ s->x = old_x;
+ s->y = old_y;
+ s->w = old_w;
+ s->h = old_h;
+ return ret;
+ }
+
+ ret = config_output(outlink);
+
+ } else
+ ret = AVERROR(ENOSYS);
+
+ return ret;
+}
+
#define OFFSET(x) offsetof(CropContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "out_w", "Output video width", OFFSET(ow_expr), AV_OPT_TYPE_STRING, { .str = "iw" }, .flags = FLAGS },
- { "out_h", "Output video height", OFFSET(oh_expr), AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS },
- { "x", "Horizontal position in the input video of the left edge of the cropped output video",
- OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str = "(in_w - out_w) / 2" }, .flags = FLAGS },
- { "y", "Vertical position in the input video of the top edge of the cropped output video",
- OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str = "(in_h - out_h) / 2" }, .flags = FLAGS },
- { NULL },
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption crop_options[] = {
+ { "out_w", "set the width crop area expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "w", "set the width crop area expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "out_h", "set the height crop area expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "h", "set the height crop area expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "x", "set the x crop area expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "(in_w-out_w)/2"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "y", "set the y crop area expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "(in_h-out_h)/2"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "keep_aspect", "keep aspect ratio", OFFSET(keep_aspect), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { "exact", "do exact cropping", OFFSET(exact), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { NULL }
};
-static const AVClass crop_class = {
- .class_name = "crop",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(crop);
static const AVFilterPad avfilter_vf_crop_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame,
- .get_video_buffer = ff_null_get_video_buffer,
- .config_props = config_input,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
},
{ NULL }
};
@@ -332,15 +378,13 @@ static const AVFilterPad avfilter_vf_crop_outputs[] = {
};
AVFilter ff_vf_crop = {
- .name = "crop",
- .description = NULL_IF_CONFIG_SMALL("Crop the input video to width:height:x:y."),
-
- .priv_size = sizeof(CropContext),
- .priv_class = &crop_class,
-
- .query_formats = query_formats,
- .uninit = uninit,
-
- .inputs = avfilter_vf_crop_inputs,
- .outputs = avfilter_vf_crop_outputs,
+ .name = "crop",
+ .description = NULL_IF_CONFIG_SMALL("Crop the input video."),
+ .priv_size = sizeof(CropContext),
+ .priv_class = &crop_class,
+ .query_formats = query_formats,
+ .uninit = uninit,
+ .inputs = avfilter_vf_crop_inputs,
+ .outputs = avfilter_vf_crop_outputs,
+ .process_command = process_command,
};
diff --git a/libavfilter/vf_cropdetect.c b/libavfilter/vf_cropdetect.c
index 14c26c748f..4a89875502 100644
--- a/libavfilter/vf_cropdetect.c
+++ b/libavfilter/vf_cropdetect.c
@@ -1,19 +1,19 @@
/*
* Copyright (c) 2002 A'rpi
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or modify
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
@@ -23,8 +23,6 @@
* Ported from MPlayer libmpcodecs/vf_cropdetect.c.
*/
-#include <stdio.h>
-
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
@@ -37,11 +35,12 @@
typedef struct CropDetectContext {
const AVClass *class;
int x1, y1, x2, y2;
- int limit;
+ float limit;
int round;
int reset_count;
int frame_nb;
int max_pixsteps[4];
+ int max_outliers;
} CropDetectContext;
static int query_formats(AVFilterContext *ctx)
@@ -51,28 +50,66 @@ static int query_formats(AVFilterContext *ctx)
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV420P9 , AV_PIX_FMT_YUV422P9 , AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
AV_PIX_FMT_NV12, AV_PIX_FMT_NV21,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
AV_PIX_FMT_NONE
};
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
}
static int checkline(void *ctx, const unsigned char *src, int stride, int len, int bpp)
{
int total = 0;
int div = len;
+ const uint16_t *src16 = (const uint16_t *)src;
switch (bpp) {
case 1:
+ while (len >= 8) {
+ total += src[ 0] + src[ stride] + src[2*stride] + src[3*stride]
+ + src[4*stride] + src[5*stride] + src[6*stride] + src[7*stride];
+ src += 8*stride;
+ len -= 8;
+ }
while (--len >= 0) {
total += src[0];
src += stride;
}
break;
+ case 2:
+ stride >>= 1;
+ while (len >= 8) {
+ total += src16[ 0] + src16[ stride] + src16[2*stride] + src16[3*stride]
+ + src16[4*stride] + src16[5*stride] + src16[6*stride] + src16[7*stride];
+ src16 += 8*stride;
+ len -= 8;
+ }
+ while (--len >= 0) {
+ total += src16[0];
+ src16 += stride;
+ }
+ break;
case 3:
case 4:
+ while (len >= 4) {
+ total += src[0] + src[1 ] + src[2 ]
+ + src[ stride] + src[1+ stride] + src[2+ stride]
+ + src[2*stride] + src[1+2*stride] + src[2+2*stride]
+ + src[3*stride] + src[1+3*stride] + src[2+3*stride];
+ src += 4*stride;
+ len -= 4;
+ }
while (--len >= 0) {
total += src[0] + src[1] + src[2];
src += stride;
@@ -92,7 +129,7 @@ static av_cold int init(AVFilterContext *ctx)
s->frame_nb = -2;
- av_log(ctx, AV_LOG_VERBOSE, "limit:%d round:%d reset_count:%d\n",
+ av_log(ctx, AV_LOG_VERBOSE, "limit:%f round:%d reset_count:%d\n",
s->limit, s->round, s->reset_count);
return 0;
@@ -102,9 +139,12 @@ static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
CropDetectContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ av_image_fill_max_pixsteps(s->max_pixsteps, NULL, desc);
- av_image_fill_max_pixsteps(s->max_pixsteps, NULL,
- av_pix_fmt_desc_get(inlink->format));
+ if (s->limit < 1.0)
+ s->limit *= (1 << desc->comp[0].depth) - 1;
s->x1 = inlink->w - 1;
s->y1 = inlink->h - 1;
@@ -114,15 +154,23 @@ static int config_input(AVFilterLink *inlink)
return 0;
}
+#define SET_META(key, value) \
+ av_dict_set_int(metadata, key, value, 0)
+
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
CropDetectContext *s = ctx->priv;
int bpp = s->max_pixsteps[0];
int w, h, x, y, shrink_by;
+ AVDictionary **metadata;
+ int outliers, last_y;
+ int limit = lrint(s->limit);
// ignore first 2 frames - they may be empty
if (++s->frame_nb > 0) {
+ metadata = avpriv_frame_get_metadatap(frame);
+
// Reset the crop area every reset_count frames, if reset_count is > 0
if (s->reset_count > 0 && s->frame_nb > s->reset_count) {
s->x1 = frame->width - 1;
@@ -132,33 +180,23 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
s->frame_nb = 1;
}
- for (y = 0; y < s->y1; y++) {
- if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > s->limit) {
- s->y1 = y;
- break;
- }
+#define FIND(DST, FROM, NOEND, INC, STEP0, STEP1, LEN) \
+ outliers = 0;\
+ for (last_y = y = FROM; NOEND; y = y INC) {\
+ if (checkline(ctx, frame->data[0] + STEP0 * y, STEP1, LEN, bpp) > limit) {\
+ if (++outliers > s->max_outliers) { \
+ DST = last_y;\
+ break;\
+ }\
+ } else\
+ last_y = y INC;\
}
- for (y = frame->height - 1; y > s->y2; y--) {
- if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > s->limit) {
- s->y2 = y;
- break;
- }
- }
+ FIND(s->y1, 0, y < s->y1, +1, frame->linesize[0], bpp, frame->width);
+ FIND(s->y2, frame->height - 1, y > FFMAX(s->y2, s->y1), -1, frame->linesize[0], bpp, frame->width);
+ FIND(s->x1, 0, y < s->x1, +1, bpp, frame->linesize[0], frame->height);
+ FIND(s->x2, frame->width - 1, y > FFMAX(s->x2, s->x1), -1, bpp, frame->linesize[0], frame->height);
- for (y = 0; y < s->x1; y++) {
- if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > s->limit) {
- s->x1 = y;
- break;
- }
- }
-
- for (y = frame->width - 1; y > s->x2; y--) {
- if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > s->limit) {
- s->x2 = y;
- break;
- }
- }
// round x and y (up), important for yuv colorspaces
// make sure they stay rounded!
@@ -183,6 +221,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
h -= shrink_by;
y += (shrink_by/2 + 1) & ~1;
+ SET_META("lavfi.cropdetect.x1", s->x1);
+ SET_META("lavfi.cropdetect.x2", s->x2);
+ SET_META("lavfi.cropdetect.y1", s->y1);
+ SET_META("lavfi.cropdetect.y2", s->y2);
+ SET_META("lavfi.cropdetect.w", w);
+ SET_META("lavfi.cropdetect.h", h);
+ SET_META("lavfi.cropdetect.x", x);
+ SET_META("lavfi.cropdetect.y", y);
+
av_log(ctx, AV_LOG_INFO,
"x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n",
s->x1, s->x2, s->y1, s->y2, w, h, x, y, frame->pts,
@@ -194,28 +241,25 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
}
#define OFFSET(x) offsetof(CropDetectContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "limit", "Threshold below which the pixel is considered black", OFFSET(limit), AV_OPT_TYPE_INT, { .i64 = 24 }, 0, INT_MAX, FLAGS },
- { "round", "Value by which the width/height should be divisible", OFFSET(round), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption cropdetect_options[] = {
+ { "limit", "Threshold below which the pixel is considered black", OFFSET(limit), AV_OPT_TYPE_FLOAT, { .dbl = 24.0/255 }, 0, 65535, FLAGS },
+ { "round", "Value by which the width/height should be divisible", OFFSET(round), AV_OPT_TYPE_INT, { .i64 = 16 }, 0, INT_MAX, FLAGS },
{ "reset", "Recalculate the crop area after this many frames", OFFSET(reset_count), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
- { NULL },
+ { "reset_count", "Recalculate the crop area after this many frames",OFFSET(reset_count),AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, FLAGS },
+ { "max_outliers", "Threshold count of outliers", OFFSET(max_outliers),AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
+ { NULL }
};
-static const AVClass cropdetect_class = {
- .class_name = "cropdetect",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(cropdetect);
static const AVFilterPad avfilter_vf_cropdetect_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_input,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -229,16 +273,13 @@ static const AVFilterPad avfilter_vf_cropdetect_outputs[] = {
};
AVFilter ff_vf_cropdetect = {
- .name = "cropdetect",
- .description = NULL_IF_CONFIG_SMALL("Auto-detect crop size."),
-
- .priv_size = sizeof(CropDetectContext),
- .priv_class = &cropdetect_class,
- .init = init,
-
+ .name = "cropdetect",
+ .description = NULL_IF_CONFIG_SMALL("Auto-detect crop size."),
+ .priv_size = sizeof(CropDetectContext),
+ .priv_class = &cropdetect_class,
+ .init = init,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_cropdetect_inputs,
-
- .outputs = avfilter_vf_cropdetect_outputs,
+ .inputs = avfilter_vf_cropdetect_inputs,
+ .outputs = avfilter_vf_cropdetect_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/libavfilter/vf_curves.c b/libavfilter/vf_curves.c
new file mode 100644
index 0000000000..69ec1084bb
--- /dev/null
+++ b/libavfilter/vf_curves.c
@@ -0,0 +1,696 @@
+/*
+ * Copyright (c) 2013 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/bprint.h"
+#include "libavutil/eval.h"
+#include "libavutil/file.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define R 0
+#define G 1
+#define B 2
+#define A 3
+
+struct keypoint {
+ double x, y;
+ struct keypoint *next;
+};
+
+#define NB_COMP 3
+
+enum preset {
+ PRESET_NONE,
+ PRESET_COLOR_NEGATIVE,
+ PRESET_CROSS_PROCESS,
+ PRESET_DARKER,
+ PRESET_INCREASE_CONTRAST,
+ PRESET_LIGHTER,
+ PRESET_LINEAR_CONTRAST,
+ PRESET_MEDIUM_CONTRAST,
+ PRESET_NEGATIVE,
+ PRESET_STRONG_CONTRAST,
+ PRESET_VINTAGE,
+ NB_PRESETS,
+};
+
+typedef struct {
+ const AVClass *class;
+ int preset;
+ char *comp_points_str[NB_COMP + 1];
+ char *comp_points_str_all;
+ uint16_t *graph[NB_COMP + 1];
+ int lut_size;
+ char *psfile;
+ uint8_t rgba_map[4];
+ int step;
+ char *plot_filename;
+ int is_16bit;
+} CurvesContext;
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+} ThreadData;
+
+#define OFFSET(x) offsetof(CurvesContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption curves_options[] = {
+ { "preset", "select a color curves preset", OFFSET(preset), AV_OPT_TYPE_INT, {.i64=PRESET_NONE}, PRESET_NONE, NB_PRESETS-1, FLAGS, "preset_name" },
+ { "none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_NONE}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "color_negative", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_COLOR_NEGATIVE}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "cross_process", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_CROSS_PROCESS}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "darker", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_DARKER}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "increase_contrast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_INCREASE_CONTRAST}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "lighter", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_LIGHTER}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "linear_contrast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_LINEAR_CONTRAST}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "medium_contrast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_MEDIUM_CONTRAST}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "negative", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_NEGATIVE}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "strong_contrast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_STRONG_CONTRAST}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "vintage", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_VINTAGE}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "master","set master points coordinates",OFFSET(comp_points_str[NB_COMP]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "m", "set master points coordinates",OFFSET(comp_points_str[NB_COMP]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "red", "set red points coordinates", OFFSET(comp_points_str[0]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "r", "set red points coordinates", OFFSET(comp_points_str[0]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "green", "set green points coordinates", OFFSET(comp_points_str[1]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "g", "set green points coordinates", OFFSET(comp_points_str[1]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "blue", "set blue points coordinates", OFFSET(comp_points_str[2]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "b", "set blue points coordinates", OFFSET(comp_points_str[2]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "all", "set points coordinates for all components", OFFSET(comp_points_str_all), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "psfile", "set Photoshop curves file name", OFFSET(psfile), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "plot", "save Gnuplot script of the curves in specified file", OFFSET(plot_filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(curves);
+
+static const struct {
+ const char *r;
+ const char *g;
+ const char *b;
+ const char *master;
+} curves_presets[] = {
+ [PRESET_COLOR_NEGATIVE] = {
+ "0.129/1 0.466/0.498 0.725/0",
+ "0.109/1 0.301/0.498 0.517/0",
+ "0.098/1 0.235/0.498 0.423/0",
+ },
+ [PRESET_CROSS_PROCESS] = {
+ "0/0 0.25/0.156 0.501/0.501 0.686/0.745 1/1",
+ "0/0 0.25/0.188 0.38/0.501 0.745/0.815 1/0.815",
+ "0/0 0.231/0.094 0.709/0.874 1/1",
+ },
+ [PRESET_DARKER] = { .master = "0/0 0.5/0.4 1/1" },
+ [PRESET_INCREASE_CONTRAST] = { .master = "0/0 0.149/0.066 0.831/0.905 0.905/0.98 1/1" },
+ [PRESET_LIGHTER] = { .master = "0/0 0.4/0.5 1/1" },
+ [PRESET_LINEAR_CONTRAST] = { .master = "0/0 0.305/0.286 0.694/0.713 1/1" },
+ [PRESET_MEDIUM_CONTRAST] = { .master = "0/0 0.286/0.219 0.639/0.643 1/1" },
+ [PRESET_NEGATIVE] = { .master = "0/1 1/0" },
+ [PRESET_STRONG_CONTRAST] = { .master = "0/0 0.301/0.196 0.592/0.6 0.686/0.737 1/1" },
+ [PRESET_VINTAGE] = {
+ "0/0.11 0.42/0.51 1/0.95",
+ "0/0 0.50/0.48 1/1",
+ "0/0.22 0.49/0.44 1/0.8",
+ }
+};
+
+static struct keypoint *make_point(double x, double y, struct keypoint *next)
+{
+ struct keypoint *point = av_mallocz(sizeof(*point));
+
+ if (!point)
+ return NULL;
+ point->x = x;
+ point->y = y;
+ point->next = next;
+ return point;
+}
+
+static int parse_points_str(AVFilterContext *ctx, struct keypoint **points, const char *s,
+ int lut_size)
+{
+ char *p = (char *)s; // strtod won't alter the string
+ struct keypoint *last = NULL;
+ const int scale = lut_size - 1;
+
+ /* construct a linked list based on the key points string */
+ while (p && *p) {
+ struct keypoint *point = make_point(0, 0, NULL);
+ if (!point)
+ return AVERROR(ENOMEM);
+ point->x = av_strtod(p, &p); if (p && *p) p++;
+ point->y = av_strtod(p, &p); if (p && *p) p++;
+ if (point->x < 0 || point->x > 1 || point->y < 0 || point->y > 1) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid key point coordinates (%f;%f), "
+ "x and y must be in the [0;1] range.\n", point->x, point->y);
+ return AVERROR(EINVAL);
+ }
+ if (!*points)
+ *points = point;
+ if (last) {
+ if ((int)(last->x * scale) >= (int)(point->x * scale)) {
+ av_log(ctx, AV_LOG_ERROR, "Key point coordinates (%f;%f) "
+ "and (%f;%f) are too close from each other or not "
+ "strictly increasing on the x-axis\n",
+ last->x, last->y, point->x, point->y);
+ return AVERROR(EINVAL);
+ }
+ last->next = point;
+ }
+ last = point;
+ }
+
+ if (*points && !(*points)->next) {
+ av_log(ctx, AV_LOG_WARNING, "Only one point (at (%f;%f)) is defined, "
+ "this is unlikely to behave as you expect. You probably want"
+ "at least 2 points.",
+ (*points)->x, (*points)->y);
+ }
+
+ return 0;
+}
+
+static int get_nb_points(const struct keypoint *d)
+{
+ int n = 0;
+ while (d) {
+ n++;
+ d = d->next;
+ }
+ return n;
+}
+
+/**
+ * Natural cubic spline interpolation
+ * Finding curves using Cubic Splines notes by Steven Rauch and John Stockie.
+ * @see http://people.math.sfu.ca/~stockie/teaching/macm316/notes/splines.pdf
+ */
+
+#define CLIP(v) (nbits == 8 ? av_clip_uint8(v) : av_clip_uint16(v))
+
+static inline int interpolate(void *log_ctx, uint16_t *y,
+ const struct keypoint *points, int nbits)
+{
+ int i, ret = 0;
+ const struct keypoint *point = points;
+ double xprev = 0;
+ const int lut_size = 1<<nbits;
+ const int scale = lut_size - 1;
+
+ double (*matrix)[3];
+ double *h, *r;
+ const int n = get_nb_points(points); // number of splines
+
+ if (n == 0) {
+ for (i = 0; i < lut_size; i++)
+ y[i] = i;
+ return 0;
+ }
+
+ if (n == 1) {
+ for (i = 0; i < lut_size; i++)
+ y[i] = CLIP(point->y * scale);
+ return 0;
+ }
+
+ matrix = av_calloc(n, sizeof(*matrix));
+ h = av_malloc((n - 1) * sizeof(*h));
+ r = av_calloc(n, sizeof(*r));
+
+ if (!matrix || !h || !r) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ /* h(i) = x(i+1) - x(i) */
+ i = -1;
+ for (point = points; point; point = point->next) {
+ if (i != -1)
+ h[i] = point->x - xprev;
+ xprev = point->x;
+ i++;
+ }
+
+ /* right-side of the polynomials, will be modified to contains the solution */
+ point = points;
+ for (i = 1; i < n - 1; i++) {
+ const double yp = point->y;
+ const double yc = point->next->y;
+ const double yn = point->next->next->y;
+ r[i] = 6 * ((yn-yc)/h[i] - (yc-yp)/h[i-1]);
+ point = point->next;
+ }
+
+#define BD 0 /* sub diagonal (below main) */
+#define MD 1 /* main diagonal (center) */
+#define AD 2 /* sup diagonal (above main) */
+
+ /* left side of the polynomials into a tridiagonal matrix. */
+ matrix[0][MD] = matrix[n - 1][MD] = 1;
+ for (i = 1; i < n - 1; i++) {
+ matrix[i][BD] = h[i-1];
+ matrix[i][MD] = 2 * (h[i-1] + h[i]);
+ matrix[i][AD] = h[i];
+ }
+
+ /* tridiagonal solving of the linear system */
+ for (i = 1; i < n; i++) {
+ const double den = matrix[i][MD] - matrix[i][BD] * matrix[i-1][AD];
+ const double k = den ? 1./den : 1.;
+ matrix[i][AD] *= k;
+ r[i] = (r[i] - matrix[i][BD] * r[i - 1]) * k;
+ }
+ for (i = n - 2; i >= 0; i--)
+ r[i] = r[i] - matrix[i][AD] * r[i + 1];
+
+ point = points;
+
+ /* left padding */
+ for (i = 0; i < (int)(point->x * scale); i++)
+ y[i] = CLIP(point->y * scale);
+
+ /* compute the graph with x=[x0..xN] */
+ i = 0;
+ av_assert0(point->next); // always at least 2 key points
+ while (point->next) {
+ const double yc = point->y;
+ const double yn = point->next->y;
+
+ const double a = yc;
+ const double b = (yn-yc)/h[i] - h[i]*r[i]/2. - h[i]*(r[i+1]-r[i])/6.;
+ const double c = r[i] / 2.;
+ const double d = (r[i+1] - r[i]) / (6.*h[i]);
+
+ int x;
+ const int x_start = point->x * scale;
+ const int x_end = point->next->x * scale;
+
+ av_assert0(x_start >= 0 && x_start < lut_size &&
+ x_end >= 0 && x_end < lut_size);
+
+ for (x = x_start; x <= x_end; x++) {
+ const double xx = (x - x_start) * 1./scale;
+ const double yy = a + b*xx + c*xx*xx + d*xx*xx*xx;
+ y[x] = CLIP(yy * scale);
+ av_log(log_ctx, AV_LOG_DEBUG, "f(%f)=%f -> y[%d]=%d\n", xx, yy, x, y[x]);
+ }
+
+ point = point->next;
+ i++;
+ }
+
+ /* right padding */
+ for (i = (int)(point->x * scale); i < lut_size; i++)
+ y[i] = CLIP(point->y * scale);
+
+end:
+ av_free(matrix);
+ av_free(h);
+ av_free(r);
+ return ret;
+}
+
+#define DECLARE_INTERPOLATE_FUNC(nbits) \
+static int interpolate##nbits(void *log_ctx, uint16_t *y, \
+ const struct keypoint *points) \
+{ \
+ return interpolate(log_ctx, y, points, nbits); \
+}
+
+DECLARE_INTERPOLATE_FUNC(8)
+DECLARE_INTERPOLATE_FUNC(16)
+
+static int parse_psfile(AVFilterContext *ctx, const char *fname)
+{
+ CurvesContext *curves = ctx->priv;
+ uint8_t *buf;
+ size_t size;
+ int i, ret, av_unused(version), nb_curves;
+ AVBPrint ptstr;
+ static const int comp_ids[] = {3, 0, 1, 2};
+
+ av_bprint_init(&ptstr, 0, AV_BPRINT_SIZE_AUTOMATIC);
+
+ ret = av_file_map(fname, &buf, &size, 0, NULL);
+ if (ret < 0)
+ return ret;
+
+#define READ16(dst) do { \
+ if (size < 2) { \
+ ret = AVERROR_INVALIDDATA; \
+ goto end; \
+ } \
+ dst = AV_RB16(buf); \
+ buf += 2; \
+ size -= 2; \
+} while (0)
+
+ READ16(version);
+ READ16(nb_curves);
+ for (i = 0; i < FFMIN(nb_curves, FF_ARRAY_ELEMS(comp_ids)); i++) {
+ int nb_points, n;
+ av_bprint_clear(&ptstr);
+ READ16(nb_points);
+ for (n = 0; n < nb_points; n++) {
+ int y, x;
+ READ16(y);
+ READ16(x);
+ av_bprintf(&ptstr, "%f/%f ", x / 255., y / 255.);
+ }
+ if (*ptstr.str) {
+ char **pts = &curves->comp_points_str[comp_ids[i]];
+ if (!*pts) {
+ *pts = av_strdup(ptstr.str);
+ av_log(ctx, AV_LOG_DEBUG, "curves %d (intid=%d) [%d points]: [%s]\n",
+ i, comp_ids[i], nb_points, *pts);
+ if (!*pts) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ }
+ }
+ }
+end:
+ av_bprint_finalize(&ptstr, NULL);
+ av_file_unmap(buf, size);
+ return ret;
+}
+
+static int dump_curves(const char *fname, uint16_t *graph[NB_COMP + 1],
+ struct keypoint *comp_points[NB_COMP + 1],
+ int lut_size)
+{
+ int i;
+ AVBPrint buf;
+ const double scale = 1. / (lut_size - 1);
+ static const char * const colors[] = { "red", "green", "blue", "#404040", };
+ FILE *f = av_fopen_utf8(fname, "w");
+
+ av_assert0(FF_ARRAY_ELEMS(colors) == NB_COMP + 1);
+
+ if (!f) {
+ int ret = AVERROR(errno);
+ av_log(NULL, AV_LOG_ERROR, "Cannot open file '%s' for writing: %s\n",
+ fname, av_err2str(ret));
+ return ret;
+ }
+
+ av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED);
+
+ av_bprintf(&buf, "set xtics 0.1\n");
+ av_bprintf(&buf, "set ytics 0.1\n");
+ av_bprintf(&buf, "set size square\n");
+ av_bprintf(&buf, "set grid\n");
+
+ for (i = 0; i < FF_ARRAY_ELEMS(colors); i++) {
+ av_bprintf(&buf, "%s'-' using 1:2 with lines lc '%s' title ''",
+ i ? ", " : "plot ", colors[i]);
+ if (comp_points[i])
+ av_bprintf(&buf, ", '-' using 1:2 with points pointtype 3 lc '%s' title ''",
+ colors[i]);
+ }
+ av_bprintf(&buf, "\n");
+
+ for (i = 0; i < FF_ARRAY_ELEMS(colors); i++) {
+ int x;
+
+ /* plot generated values */
+ for (x = 0; x < lut_size; x++)
+ av_bprintf(&buf, "%f %f\n", x * scale, graph[i][x] * scale);
+ av_bprintf(&buf, "e\n");
+
+ /* plot user knots */
+ if (comp_points[i]) {
+ const struct keypoint *point = comp_points[i];
+
+ while (point) {
+ av_bprintf(&buf, "%f %f\n", point->x, point->y);
+ point = point->next;
+ }
+ av_bprintf(&buf, "e\n");
+ }
+ }
+
+ fwrite(buf.str, 1, buf.len, f);
+ fclose(f);
+ av_bprint_finalize(&buf, NULL);
+ return 0;
+}
+
+static av_cold int curves_init(AVFilterContext *ctx)
+{
+ int i, ret;
+ CurvesContext *curves = ctx->priv;
+ char **pts = curves->comp_points_str;
+ const char *allp = curves->comp_points_str_all;
+
+ //if (!allp && curves->preset != PRESET_NONE && curves_presets[curves->preset].all)
+ // allp = curves_presets[curves->preset].all;
+
+ if (allp) {
+ for (i = 0; i < NB_COMP; i++) {
+ if (!pts[i])
+ pts[i] = av_strdup(allp);
+ if (!pts[i])
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ if (curves->psfile) {
+ ret = parse_psfile(ctx, curves->psfile);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (curves->preset != PRESET_NONE) {
+#define SET_COMP_IF_NOT_SET(n, name) do { \
+ if (!pts[n] && curves_presets[curves->preset].name) { \
+ pts[n] = av_strdup(curves_presets[curves->preset].name); \
+ if (!pts[n]) \
+ return AVERROR(ENOMEM); \
+ } \
+} while (0)
+ SET_COMP_IF_NOT_SET(0, r);
+ SET_COMP_IF_NOT_SET(1, g);
+ SET_COMP_IF_NOT_SET(2, b);
+ SET_COMP_IF_NOT_SET(3, master);
+ }
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_RGB48, AV_PIX_FMT_BGR48,
+ AV_PIX_FMT_RGBA64, AV_PIX_FMT_BGRA64,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ int i, j, ret;
+ AVFilterContext *ctx = inlink->dst;
+ CurvesContext *curves = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ char **pts = curves->comp_points_str;
+ struct keypoint *comp_points[NB_COMP + 1] = {0};
+
+ ff_fill_rgba_map(curves->rgba_map, inlink->format);
+ curves->is_16bit = desc->comp[0].depth > 8;
+ curves->lut_size = curves->is_16bit ? 1<<16 : 1<<8;
+ curves->step = av_get_padded_bits_per_pixel(desc) >> (3 + curves->is_16bit);
+
+ for (i = 0; i < NB_COMP + 1; i++) {
+ curves->graph[i] = av_mallocz_array(curves->lut_size, sizeof(*curves->graph[0]));
+ if (!curves->graph[i])
+ return AVERROR(ENOMEM);
+ ret = parse_points_str(ctx, comp_points + i, curves->comp_points_str[i], curves->lut_size);
+ if (ret < 0)
+ return ret;
+ if (curves->is_16bit) ret = interpolate16(ctx, curves->graph[i], comp_points[i]);
+ else ret = interpolate8(ctx, curves->graph[i], comp_points[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (pts[NB_COMP]) {
+ for (i = 0; i < NB_COMP; i++)
+ for (j = 0; j < curves->lut_size; j++)
+ curves->graph[i][j] = curves->graph[NB_COMP][curves->graph[i][j]];
+ }
+
+ if (av_log_get_level() >= AV_LOG_VERBOSE) {
+ for (i = 0; i < NB_COMP; i++) {
+ const struct keypoint *point = comp_points[i];
+ av_log(ctx, AV_LOG_VERBOSE, "#%d points:", i);
+ while (point) {
+ av_log(ctx, AV_LOG_VERBOSE, " (%f;%f)", point->x, point->y);
+ point = point->next;
+ }
+ }
+ }
+
+ if (curves->plot_filename)
+ dump_curves(curves->plot_filename, curves->graph, comp_points, curves->lut_size);
+
+ for (i = 0; i < NB_COMP + 1; i++) {
+ struct keypoint *point = comp_points[i];
+ while (point) {
+ struct keypoint *next = point->next;
+ av_free(point);
+ point = next;
+ }
+ }
+
+ return 0;
+}
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ int x, y;
+ const CurvesContext *curves = ctx->priv;
+ const ThreadData *td = arg;
+ const AVFrame *in = td->in;
+ const AVFrame *out = td->out;
+ const int direct = out == in;
+ const int step = curves->step;
+ const uint8_t r = curves->rgba_map[R];
+ const uint8_t g = curves->rgba_map[G];
+ const uint8_t b = curves->rgba_map[B];
+ const uint8_t a = curves->rgba_map[A];
+ const int slice_start = (in->height * jobnr ) / nb_jobs;
+ const int slice_end = (in->height * (jobnr+1)) / nb_jobs;
+
+ if (curves->is_16bit) {
+ for (y = slice_start; y < slice_end; y++) {
+ uint16_t *dstp = ( uint16_t *)(out->data[0] + y * out->linesize[0]);
+ const uint16_t *srcp = (const uint16_t *)(in ->data[0] + y * in->linesize[0]);
+
+ for (x = 0; x < in->width * step; x += step) {
+ dstp[x + r] = curves->graph[R][srcp[x + r]];
+ dstp[x + g] = curves->graph[G][srcp[x + g]];
+ dstp[x + b] = curves->graph[B][srcp[x + b]];
+ if (!direct && step == 4)
+ dstp[x + a] = srcp[x + a];
+ }
+ }
+ } else {
+ uint8_t *dst = out->data[0] + slice_start * out->linesize[0];
+ const uint8_t *src = in->data[0] + slice_start * in->linesize[0];
+
+ for (y = slice_start; y < slice_end; y++) {
+ for (x = 0; x < in->width * step; x += step) {
+ dst[x + r] = curves->graph[R][src[x + r]];
+ dst[x + g] = curves->graph[G][src[x + g]];
+ dst[x + b] = curves->graph[B][src[x + b]];
+ if (!direct && step == 4)
+ dst[x + a] = src[x + a];
+ }
+ dst += out->linesize[0];
+ src += in ->linesize[0];
+ }
+ }
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ ThreadData td;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ td.in = in;
+ td.out = out;
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
+
+ if (out != in)
+ av_frame_free(&in);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void curves_uninit(AVFilterContext *ctx)
+{
+ int i;
+ CurvesContext *curves = ctx->priv;
+
+ for (i = 0; i < NB_COMP + 1; i++)
+ av_freep(&curves->graph[i]);
+}
+
+static const AVFilterPad curves_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad curves_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_curves = {
+ .name = "curves",
+ .description = NULL_IF_CONFIG_SMALL("Adjust components curves."),
+ .priv_size = sizeof(CurvesContext),
+ .init = curves_init,
+ .uninit = curves_uninit,
+ .query_formats = query_formats,
+ .inputs = curves_inputs,
+ .outputs = curves_outputs,
+ .priv_class = &curves_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_datascope.c b/libavfilter/vf_datascope.c
new file mode 100644
index 0000000000..01a5d992b0
--- /dev/null
+++ b/libavfilter/vf_datascope.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (c) 2016 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/xga_font_data.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct DatascopeContext {
+ const AVClass *class;
+ int ow, oh;
+ int x, y;
+ int mode;
+ int axis;
+ float opacity;
+
+ int nb_planes;
+ int nb_comps;
+ int chars;
+ FFDrawContext draw;
+ FFDrawColor yellow;
+ FFDrawColor white;
+ FFDrawColor black;
+ FFDrawColor gray;
+
+ void (*pick_color)(FFDrawContext *draw, FFDrawColor *color, AVFrame *in, int x, int y, int *value);
+ void (*reverse_color)(FFDrawContext *draw, FFDrawColor *color, FFDrawColor *reverse);
+ int (*filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
+} DatascopeContext;
+
+#define OFFSET(x) offsetof(DatascopeContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption datascope_options[] = {
+ { "size", "set output size", OFFSET(ow), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, FLAGS },
+ { "s", "set output size", OFFSET(ow), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, FLAGS },
+ { "x", "set x offset", OFFSET(x), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
+ { "y", "set y offset", OFFSET(y), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
+ { "mode", "set scope mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, FLAGS, "mode" },
+ { "mono", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "mode" },
+ { "color", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "mode" },
+ { "color2", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "mode" },
+ { "axis", "draw column/row numbers", OFFSET(axis), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { "opacity", "set background opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(datascope);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ return ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
+}
+
+static void draw_text(DatascopeContext *s, AVFrame *frame, FFDrawColor *color,
+ int x0, int y0, const uint8_t *text, int vertical)
+{
+ int x = x0;
+
+ for (; *text; text++) {
+ if (*text == '\n') {
+ x = x0;
+ y0 += 8;
+ continue;
+ }
+ ff_blend_mask(&s->draw, color, frame->data, frame->linesize,
+ frame->width, frame->height,
+ avpriv_cga_font + *text * 8, 1, 8, 8, 0, 0, x, y0);
+ if (vertical) {
+ x = x0;
+ y0 += 8;
+ } else {
+ x += 8;
+ }
+ }
+}
+
+static void pick_color8(FFDrawContext *draw, FFDrawColor *color, AVFrame *in, int x, int y, int *value)
+{
+ int p, i;
+
+ color->rgba[3] = 255;
+ for (p = 0; p < draw->nb_planes; p++) {
+ if (draw->nb_planes == 1) {
+ for (i = 0; i < 4; i++) {
+ value[i] = in->data[0][y * in->linesize[0] + x * draw->pixelstep[0] + i];
+ color->comp[0].u8[i] = value[i];
+ }
+ } else {
+ value[p] = in->data[p][(y >> draw->vsub[p]) * in->linesize[p] + (x >> draw->hsub[p])];
+ color->comp[p].u8[0] = value[p];
+ }
+ }
+}
+
+static void pick_color16(FFDrawContext *draw, FFDrawColor *color, AVFrame *in, int x, int y, int *value)
+{
+ int p, i;
+
+ color->rgba[3] = 255;
+ for (p = 0; p < draw->nb_planes; p++) {
+ if (draw->nb_planes == 1) {
+ for (i = 0; i < 4; i++) {
+ value[i] = AV_RL16(in->data[0] + y * in->linesize[0] + x * draw->pixelstep[0] + i * 2);
+ color->comp[0].u16[i] = value[i];
+ }
+ } else {
+ value[p] = AV_RL16(in->data[p] + (y >> draw->vsub[p]) * in->linesize[p] + (x >> draw->hsub[p]) * 2);
+ color->comp[p].u16[0] = value[p];
+ }
+ }
+}
+
+static void reverse_color8(FFDrawContext *draw, FFDrawColor *color, FFDrawColor *reverse)
+{
+ int p;
+
+ reverse->rgba[3] = 255;
+ for (p = 0; p < draw->nb_planes; p++) {
+ reverse->comp[p].u8[0] = color->comp[p].u8[0] > 127 ? 0 : 255;
+ reverse->comp[p].u8[1] = color->comp[p].u8[1] > 127 ? 0 : 255;
+ reverse->comp[p].u8[2] = color->comp[p].u8[2] > 127 ? 0 : 255;
+ }
+}
+
+static void reverse_color16(FFDrawContext *draw, FFDrawColor *color, FFDrawColor *reverse)
+{
+ int p;
+
+ reverse->rgba[3] = 255;
+ for (p = 0; p < draw->nb_planes; p++) {
+ const unsigned max = (1 << draw->desc->comp[p].depth) - 1;
+ const unsigned mid = (max + 1) / 2;
+
+ reverse->comp[p].u16[0] = color->comp[p].u16[0] > mid ? 0 : max;
+ reverse->comp[p].u16[1] = color->comp[p].u16[1] > mid ? 0 : max;
+ reverse->comp[p].u16[2] = color->comp[p].u16[2] > mid ? 0 : max;
+ }
+}
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+ int xoff, yoff;
+} ThreadData;
+
+static int filter_color2(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ DatascopeContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFilterLink *inlink = ctx->inputs[0];
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int xoff = td->xoff;
+ const int yoff = td->yoff;
+ const int P = FFMAX(s->nb_planes, s->nb_comps);
+ const int C = s->chars;
+ const int W = (outlink->w - xoff) / (C * 10);
+ const int H = (outlink->h - yoff) / (P * 12);
+ const char *format[2] = {"%02X\n", "%04X\n"};
+ const int slice_start = (W * jobnr) / nb_jobs;
+ const int slice_end = (W * (jobnr+1)) / nb_jobs;
+ int x, y, p;
+
+ for (y = 0; y < H && (y + s->y < inlink->h); y++) {
+ for (x = slice_start; x < slice_end && (x + s->x < inlink->w); x++) {
+ FFDrawColor color = { { 0 } };
+ FFDrawColor reverse = { { 0 } };
+ int value[4] = { 0 };
+
+ s->pick_color(&s->draw, &color, in, x + s->x, y + s->y, value);
+ s->reverse_color(&s->draw, &color, &reverse);
+ ff_fill_rectangle(&s->draw, &color, out->data, out->linesize,
+ xoff + x * C * 10, yoff + y * P * 12, C * 10, P * 12);
+
+ for (p = 0; p < P; p++) {
+ char text[256];
+
+ snprintf(text, sizeof(text), format[C>>2], value[p]);
+ draw_text(s, out, &reverse, xoff + x * C * 10 + 2, yoff + y * P * 12 + p * 10 + 2, text, 0);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int filter_color(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ DatascopeContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFilterLink *inlink = ctx->inputs[0];
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int xoff = td->xoff;
+ const int yoff = td->yoff;
+ const int P = FFMAX(s->nb_planes, s->nb_comps);
+ const int C = s->chars;
+ const int W = (outlink->w - xoff) / (C * 10);
+ const int H = (outlink->h - yoff) / (P * 12);
+ const char *format[2] = {"%02X\n", "%04X\n"};
+ const int slice_start = (W * jobnr) / nb_jobs;
+ const int slice_end = (W * (jobnr+1)) / nb_jobs;
+ int x, y, p;
+
+ for (y = 0; y < H && (y + s->y < inlink->h); y++) {
+ for (x = slice_start; x < slice_end && (x + s->x < inlink->w); x++) {
+ FFDrawColor color = { { 0 } };
+ int value[4] = { 0 };
+
+ s->pick_color(&s->draw, &color, in, x + s->x, y + s->y, value);
+
+ for (p = 0; p < P; p++) {
+ char text[256];
+
+ snprintf(text, sizeof(text), format[C>>2], value[p]);
+ draw_text(s, out, &color, xoff + x * C * 10 + 2, yoff + y * P * 12 + p * 10 + 2, text, 0);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int filter_mono(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ DatascopeContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFilterLink *inlink = ctx->inputs[0];
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int xoff = td->xoff;
+ const int yoff = td->yoff;
+ const int P = FFMAX(s->nb_planes, s->nb_comps);
+ const int C = s->chars;
+ const int W = (outlink->w - xoff) / (C * 10);
+ const int H = (outlink->h - yoff) / (P * 12);
+ const char *format[2] = {"%02X\n", "%04X\n"};
+ const int slice_start = (W * jobnr) / nb_jobs;
+ const int slice_end = (W * (jobnr+1)) / nb_jobs;
+ int x, y, p;
+
+ for (y = 0; y < H && (y + s->y < inlink->h); y++) {
+ for (x = slice_start; x < slice_end && (x + s->x < inlink->w); x++) {
+ FFDrawColor color = { { 0 } };
+ int value[4] = { 0 };
+
+ s->pick_color(&s->draw, &color, in, x + s->x, y + s->y, value);
+ for (p = 0; p < P; p++) {
+ char text[256];
+
+ snprintf(text, sizeof(text), format[C>>2], value[p]);
+ draw_text(s, out, &s->white, xoff + x * C * 10 + 2, yoff + y * P * 12 + p * 10 + 2, text, 0);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ DatascopeContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ThreadData td = { 0 };
+ int ymaxlen = 0;
+ int xmaxlen = 0;
+ AVFrame *out;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ out->pts = in->pts;
+
+ ff_fill_rectangle(&s->draw, &s->black, out->data, out->linesize,
+ 0, 0, outlink->w, outlink->h);
+
+ if (s->axis) {
+ const int P = FFMAX(s->nb_planes, s->nb_comps);
+ const int C = s->chars;
+ int Y = outlink->h / (P * 12);
+ int X = outlink->w / (C * 10);
+ char text[256] = { 0 };
+ int x, y;
+
+ snprintf(text, sizeof(text), "%d", s->y + Y);
+ ymaxlen = strlen(text);
+ ymaxlen *= 10;
+ snprintf(text, sizeof(text), "%d", s->x + X);
+ xmaxlen = strlen(text);
+ xmaxlen *= 10;
+
+ Y = (outlink->h - xmaxlen) / (P * 12);
+ X = (outlink->w - ymaxlen) / (C * 10);
+
+ for (y = 0; y < Y; y++) {
+ snprintf(text, sizeof(text), "%d", s->y + y);
+
+ ff_fill_rectangle(&s->draw, &s->gray, out->data, out->linesize,
+ 0, xmaxlen + y * P * 12 + (P + 1) * P - 2, ymaxlen, 10);
+
+ draw_text(s, out, &s->yellow, 2, xmaxlen + y * P * 12 + (P + 1) * P, text, 0);
+ }
+
+ for (x = 0; x < X; x++) {
+ snprintf(text, sizeof(text), "%d", s->x + x);
+
+ ff_fill_rectangle(&s->draw, &s->gray, out->data, out->linesize,
+ ymaxlen + x * C * 10 + 2 * C - 2, 0, 10, xmaxlen);
+
+ draw_text(s, out, &s->yellow, ymaxlen + x * C * 10 + 2 * C, 2, text, 1);
+ }
+ }
+
+ td.in = in; td.out = out, td.yoff = xmaxlen, td.xoff = ymaxlen;
+ ctx->internal->execute(ctx, s->filter, &td, NULL, FFMIN(ff_filter_get_nb_threads(ctx), FFMAX(outlink->w / 20, 1)));
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ DatascopeContext *s = inlink->dst->priv;
+ uint8_t alpha = s->opacity * 255;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+ ff_draw_init(&s->draw, inlink->format, 0);
+ ff_draw_color(&s->draw, &s->white, (uint8_t[]){ 255, 255, 255, 255} );
+ ff_draw_color(&s->draw, &s->black, (uint8_t[]){ 0, 0, 0, alpha} );
+ ff_draw_color(&s->draw, &s->yellow, (uint8_t[]){ 255, 255, 0, 255} );
+ ff_draw_color(&s->draw, &s->gray, (uint8_t[]){ 77, 77, 77, 255} );
+ s->chars = (s->draw.desc->comp[0].depth + 7) / 8 * 2;
+ s->nb_comps = s->draw.desc->nb_components;
+
+ switch (s->mode) {
+ case 0: s->filter = filter_mono; break;
+ case 1: s->filter = filter_color; break;
+ case 2: s->filter = filter_color2; break;
+ }
+
+ if (s->draw.desc->comp[0].depth <= 8) {
+ s->pick_color = pick_color8;
+ s->reverse_color = reverse_color8;
+ } else {
+ s->pick_color = pick_color16;
+ s->reverse_color = reverse_color16;
+ }
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ DatascopeContext *s = outlink->src->priv;
+
+ outlink->h = s->oh;
+ outlink->w = s->ow;
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+
+ return 0;
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_datascope = {
+ .name = "datascope",
+ .description = NULL_IF_CONFIG_SMALL("Video data analysis."),
+ .priv_size = sizeof(DatascopeContext),
+ .priv_class = &datascope_class,
+ .query_formats = query_formats,
+ .inputs = inputs,
+ .outputs = outputs,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_dctdnoiz.c b/libavfilter/vf_dctdnoiz.c
new file mode 100644
index 0000000000..62763bf349
--- /dev/null
+++ b/libavfilter/vf_dctdnoiz.c
@@ -0,0 +1,778 @@
+/*
+ * Copyright (c) 2013-2014 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * A simple, relatively efficient and slow DCT image denoiser.
+ *
+ * @see http://www.ipol.im/pub/art/2011/ys-dct/
+ *
+ * The DCT factorization used is based on "Fast and numerically stable
+ * algorithms for discrete cosine transforms" from Gerlind Plonkaa & Manfred
+ * Tasche (DOI: 10.1016/j.laa.2004.07.015).
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/eval.h"
+#include "libavutil/opt.h"
+#include "internal.h"
+
+static const char *const var_names[] = { "c", NULL };
+enum { VAR_C, VAR_VARS_NB };
+
+#define MAX_THREADS 8
+
+typedef struct DCTdnoizContext {
+ const AVClass *class;
+
+ /* coefficient factor expression */
+ char *expr_str;
+ AVExpr *expr[MAX_THREADS];
+ double var_values[MAX_THREADS][VAR_VARS_NB];
+
+ int nb_threads;
+ int pr_width, pr_height; // width and height to process
+ float sigma; // used when no expression are st
+ float th; // threshold (3*sigma)
+ float *cbuf[2][3]; // two planar rgb color buffers
+ float *slices[MAX_THREADS]; // slices buffers (1 slice buffer per thread)
+ float *weights; // dct coeff are cumulated with overlapping; these values are used for averaging
+ int p_linesize; // line sizes for color and weights
+ int overlap; // number of block overlapping pixels
+ int step; // block step increment (blocksize - overlap)
+ int n; // 1<<n is the block size
+ int bsize; // block size, 1<<n
+ void (*filter_freq_func)(struct DCTdnoizContext *s,
+ const float *src, int src_linesize,
+ float *dst, int dst_linesize,
+ int thread_id);
+ void (*color_decorrelation)(float **dst, int dst_linesize,
+ const uint8_t *src, int src_linesize,
+ int w, int h);
+ void (*color_correlation)(uint8_t *dst, int dst_linesize,
+ float **src, int src_linesize,
+ int w, int h);
+} DCTdnoizContext;
+
+#define MIN_NBITS 3 /* blocksize = 1<<3 = 8 */
+#define MAX_NBITS 4 /* blocksize = 1<<4 = 16 */
+#define DEFAULT_NBITS 3
+
+#define OFFSET(x) offsetof(DCTdnoizContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption dctdnoiz_options[] = {
+ { "sigma", "set noise sigma constant", OFFSET(sigma), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 999, .flags = FLAGS },
+ { "s", "set noise sigma constant", OFFSET(sigma), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 999, .flags = FLAGS },
+ { "overlap", "set number of block overlapping pixels", OFFSET(overlap), AV_OPT_TYPE_INT, {.i64=-1}, -1, (1<<MAX_NBITS)-1, .flags = FLAGS },
+ { "expr", "set coefficient factor expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "e", "set coefficient factor expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "n", "set the block size, expressed in bits", OFFSET(n), AV_OPT_TYPE_INT, {.i64=DEFAULT_NBITS}, MIN_NBITS, MAX_NBITS, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(dctdnoiz);
+
+static void av_always_inline fdct8_1d(float *dst, const float *src,
+ int dst_stridea, int dst_strideb,
+ int src_stridea, int src_strideb)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ const float x00 = src[0*src_stridea] + src[7*src_stridea];
+ const float x01 = src[1*src_stridea] + src[6*src_stridea];
+ const float x02 = src[2*src_stridea] + src[5*src_stridea];
+ const float x03 = src[3*src_stridea] + src[4*src_stridea];
+ const float x04 = src[0*src_stridea] - src[7*src_stridea];
+ const float x05 = src[1*src_stridea] - src[6*src_stridea];
+ const float x06 = src[2*src_stridea] - src[5*src_stridea];
+ const float x07 = src[3*src_stridea] - src[4*src_stridea];
+ const float x08 = x00 + x03;
+ const float x09 = x01 + x02;
+ const float x0a = x00 - x03;
+ const float x0b = x01 - x02;
+ const float x0c = 1.38703984532215f*x04 + 0.275899379282943f*x07;
+ const float x0d = 1.17587560241936f*x05 + 0.785694958387102f*x06;
+ const float x0e = -0.785694958387102f*x05 + 1.17587560241936f*x06;
+ const float x0f = 0.275899379282943f*x04 - 1.38703984532215f*x07;
+ const float x10 = 0.353553390593274f * (x0c - x0d);
+ const float x11 = 0.353553390593274f * (x0e - x0f);
+ dst[0*dst_stridea] = 0.353553390593274f * (x08 + x09);
+ dst[1*dst_stridea] = 0.353553390593274f * (x0c + x0d);
+ dst[2*dst_stridea] = 0.461939766255643f*x0a + 0.191341716182545f*x0b;
+ dst[3*dst_stridea] = 0.707106781186547f * (x10 - x11);
+ dst[4*dst_stridea] = 0.353553390593274f * (x08 - x09);
+ dst[5*dst_stridea] = 0.707106781186547f * (x10 + x11);
+ dst[6*dst_stridea] = 0.191341716182545f*x0a - 0.461939766255643f*x0b;
+ dst[7*dst_stridea] = 0.353553390593274f * (x0e + x0f);
+ dst += dst_strideb;
+ src += src_strideb;
+ }
+}
+
+static void av_always_inline idct8_1d(float *dst, const float *src,
+ int dst_stridea, int dst_strideb,
+ int src_stridea, int src_strideb,
+ int add)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ const float x00 = 1.4142135623731f *src[0*src_stridea];
+ const float x01 = 1.38703984532215f *src[1*src_stridea] + 0.275899379282943f*src[7*src_stridea];
+ const float x02 = 1.30656296487638f *src[2*src_stridea] + 0.541196100146197f*src[6*src_stridea];
+ const float x03 = 1.17587560241936f *src[3*src_stridea] + 0.785694958387102f*src[5*src_stridea];
+ const float x04 = 1.4142135623731f *src[4*src_stridea];
+ const float x05 = -0.785694958387102f*src[3*src_stridea] + 1.17587560241936f*src[5*src_stridea];
+ const float x06 = 0.541196100146197f*src[2*src_stridea] - 1.30656296487638f*src[6*src_stridea];
+ const float x07 = -0.275899379282943f*src[1*src_stridea] + 1.38703984532215f*src[7*src_stridea];
+ const float x09 = x00 + x04;
+ const float x0a = x01 + x03;
+ const float x0b = 1.4142135623731f*x02;
+ const float x0c = x00 - x04;
+ const float x0d = x01 - x03;
+ const float x0e = 0.353553390593274f * (x09 - x0b);
+ const float x0f = 0.353553390593274f * (x0c + x0d);
+ const float x10 = 0.353553390593274f * (x0c - x0d);
+ const float x11 = 1.4142135623731f*x06;
+ const float x12 = x05 + x07;
+ const float x13 = x05 - x07;
+ const float x14 = 0.353553390593274f * (x11 + x12);
+ const float x15 = 0.353553390593274f * (x11 - x12);
+ const float x16 = 0.5f*x13;
+ dst[0*dst_stridea] = (add ? dst[ 0*dst_stridea] : 0) + 0.25f * (x09 + x0b) + 0.353553390593274f*x0a;
+ dst[1*dst_stridea] = (add ? dst[ 1*dst_stridea] : 0) + 0.707106781186547f * (x0f + x15);
+ dst[2*dst_stridea] = (add ? dst[ 2*dst_stridea] : 0) + 0.707106781186547f * (x0f - x15);
+ dst[3*dst_stridea] = (add ? dst[ 3*dst_stridea] : 0) + 0.707106781186547f * (x0e + x16);
+ dst[4*dst_stridea] = (add ? dst[ 4*dst_stridea] : 0) + 0.707106781186547f * (x0e - x16);
+ dst[5*dst_stridea] = (add ? dst[ 5*dst_stridea] : 0) + 0.707106781186547f * (x10 - x14);
+ dst[6*dst_stridea] = (add ? dst[ 6*dst_stridea] : 0) + 0.707106781186547f * (x10 + x14);
+ dst[7*dst_stridea] = (add ? dst[ 7*dst_stridea] : 0) + 0.25f * (x09 + x0b) - 0.353553390593274f*x0a;
+ dst += dst_strideb;
+ src += src_strideb;
+ }
+}
+
+
+static void av_always_inline fdct16_1d(float *dst, const float *src,
+ int dst_stridea, int dst_strideb,
+ int src_stridea, int src_strideb)
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ const float x00 = src[ 0*src_stridea] + src[15*src_stridea];
+ const float x01 = src[ 1*src_stridea] + src[14*src_stridea];
+ const float x02 = src[ 2*src_stridea] + src[13*src_stridea];
+ const float x03 = src[ 3*src_stridea] + src[12*src_stridea];
+ const float x04 = src[ 4*src_stridea] + src[11*src_stridea];
+ const float x05 = src[ 5*src_stridea] + src[10*src_stridea];
+ const float x06 = src[ 6*src_stridea] + src[ 9*src_stridea];
+ const float x07 = src[ 7*src_stridea] + src[ 8*src_stridea];
+ const float x08 = src[ 0*src_stridea] - src[15*src_stridea];
+ const float x09 = src[ 1*src_stridea] - src[14*src_stridea];
+ const float x0a = src[ 2*src_stridea] - src[13*src_stridea];
+ const float x0b = src[ 3*src_stridea] - src[12*src_stridea];
+ const float x0c = src[ 4*src_stridea] - src[11*src_stridea];
+ const float x0d = src[ 5*src_stridea] - src[10*src_stridea];
+ const float x0e = src[ 6*src_stridea] - src[ 9*src_stridea];
+ const float x0f = src[ 7*src_stridea] - src[ 8*src_stridea];
+ const float x10 = x00 + x07;
+ const float x11 = x01 + x06;
+ const float x12 = x02 + x05;
+ const float x13 = x03 + x04;
+ const float x14 = x00 - x07;
+ const float x15 = x01 - x06;
+ const float x16 = x02 - x05;
+ const float x17 = x03 - x04;
+ const float x18 = x10 + x13;
+ const float x19 = x11 + x12;
+ const float x1a = x10 - x13;
+ const float x1b = x11 - x12;
+ const float x1c = 1.38703984532215f*x14 + 0.275899379282943f*x17;
+ const float x1d = 1.17587560241936f*x15 + 0.785694958387102f*x16;
+ const float x1e = -0.785694958387102f*x15 + 1.17587560241936f *x16;
+ const float x1f = 0.275899379282943f*x14 - 1.38703984532215f *x17;
+ const float x20 = 0.25f * (x1c - x1d);
+ const float x21 = 0.25f * (x1e - x1f);
+ const float x22 = 1.40740373752638f *x08 + 0.138617169199091f*x0f;
+ const float x23 = 1.35331800117435f *x09 + 0.410524527522357f*x0e;
+ const float x24 = 1.24722501298667f *x0a + 0.666655658477747f*x0d;
+ const float x25 = 1.09320186700176f *x0b + 0.897167586342636f*x0c;
+ const float x26 = -0.897167586342636f*x0b + 1.09320186700176f *x0c;
+ const float x27 = 0.666655658477747f*x0a - 1.24722501298667f *x0d;
+ const float x28 = -0.410524527522357f*x09 + 1.35331800117435f *x0e;
+ const float x29 = 0.138617169199091f*x08 - 1.40740373752638f *x0f;
+ const float x2a = x22 + x25;
+ const float x2b = x23 + x24;
+ const float x2c = x22 - x25;
+ const float x2d = x23 - x24;
+ const float x2e = 0.25f * (x2a - x2b);
+ const float x2f = 0.326640741219094f*x2c + 0.135299025036549f*x2d;
+ const float x30 = 0.135299025036549f*x2c - 0.326640741219094f*x2d;
+ const float x31 = x26 + x29;
+ const float x32 = x27 + x28;
+ const float x33 = x26 - x29;
+ const float x34 = x27 - x28;
+ const float x35 = 0.25f * (x31 - x32);
+ const float x36 = 0.326640741219094f*x33 + 0.135299025036549f*x34;
+ const float x37 = 0.135299025036549f*x33 - 0.326640741219094f*x34;
+ dst[ 0*dst_stridea] = 0.25f * (x18 + x19);
+ dst[ 1*dst_stridea] = 0.25f * (x2a + x2b);
+ dst[ 2*dst_stridea] = 0.25f * (x1c + x1d);
+ dst[ 3*dst_stridea] = 0.707106781186547f * (x2f - x37);
+ dst[ 4*dst_stridea] = 0.326640741219094f*x1a + 0.135299025036549f*x1b;
+ dst[ 5*dst_stridea] = 0.707106781186547f * (x2f + x37);
+ dst[ 6*dst_stridea] = 0.707106781186547f * (x20 - x21);
+ dst[ 7*dst_stridea] = 0.707106781186547f * (x2e + x35);
+ dst[ 8*dst_stridea] = 0.25f * (x18 - x19);
+ dst[ 9*dst_stridea] = 0.707106781186547f * (x2e - x35);
+ dst[10*dst_stridea] = 0.707106781186547f * (x20 + x21);
+ dst[11*dst_stridea] = 0.707106781186547f * (x30 - x36);
+ dst[12*dst_stridea] = 0.135299025036549f*x1a - 0.326640741219094f*x1b;
+ dst[13*dst_stridea] = 0.707106781186547f * (x30 + x36);
+ dst[14*dst_stridea] = 0.25f * (x1e + x1f);
+ dst[15*dst_stridea] = 0.25f * (x31 + x32);
+ dst += dst_strideb;
+ src += src_strideb;
+ }
+}
+
+static void av_always_inline idct16_1d(float *dst, const float *src,
+ int dst_stridea, int dst_strideb,
+ int src_stridea, int src_strideb,
+ int add)
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ const float x00 = 1.4142135623731f *src[ 0*src_stridea];
+ const float x01 = 1.40740373752638f *src[ 1*src_stridea] + 0.138617169199091f*src[15*src_stridea];
+ const float x02 = 1.38703984532215f *src[ 2*src_stridea] + 0.275899379282943f*src[14*src_stridea];
+ const float x03 = 1.35331800117435f *src[ 3*src_stridea] + 0.410524527522357f*src[13*src_stridea];
+ const float x04 = 1.30656296487638f *src[ 4*src_stridea] + 0.541196100146197f*src[12*src_stridea];
+ const float x05 = 1.24722501298667f *src[ 5*src_stridea] + 0.666655658477747f*src[11*src_stridea];
+ const float x06 = 1.17587560241936f *src[ 6*src_stridea] + 0.785694958387102f*src[10*src_stridea];
+ const float x07 = 1.09320186700176f *src[ 7*src_stridea] + 0.897167586342636f*src[ 9*src_stridea];
+ const float x08 = 1.4142135623731f *src[ 8*src_stridea];
+ const float x09 = -0.897167586342636f*src[ 7*src_stridea] + 1.09320186700176f*src[ 9*src_stridea];
+ const float x0a = 0.785694958387102f*src[ 6*src_stridea] - 1.17587560241936f*src[10*src_stridea];
+ const float x0b = -0.666655658477747f*src[ 5*src_stridea] + 1.24722501298667f*src[11*src_stridea];
+ const float x0c = 0.541196100146197f*src[ 4*src_stridea] - 1.30656296487638f*src[12*src_stridea];
+ const float x0d = -0.410524527522357f*src[ 3*src_stridea] + 1.35331800117435f*src[13*src_stridea];
+ const float x0e = 0.275899379282943f*src[ 2*src_stridea] - 1.38703984532215f*src[14*src_stridea];
+ const float x0f = -0.138617169199091f*src[ 1*src_stridea] + 1.40740373752638f*src[15*src_stridea];
+ const float x12 = x00 + x08;
+ const float x13 = x01 + x07;
+ const float x14 = x02 + x06;
+ const float x15 = x03 + x05;
+ const float x16 = 1.4142135623731f*x04;
+ const float x17 = x00 - x08;
+ const float x18 = x01 - x07;
+ const float x19 = x02 - x06;
+ const float x1a = x03 - x05;
+ const float x1d = x12 + x16;
+ const float x1e = x13 + x15;
+ const float x1f = 1.4142135623731f*x14;
+ const float x20 = x12 - x16;
+ const float x21 = x13 - x15;
+ const float x22 = 0.25f * (x1d - x1f);
+ const float x23 = 0.25f * (x20 + x21);
+ const float x24 = 0.25f * (x20 - x21);
+ const float x25 = 1.4142135623731f*x17;
+ const float x26 = 1.30656296487638f*x18 + 0.541196100146197f*x1a;
+ const float x27 = 1.4142135623731f*x19;
+ const float x28 = -0.541196100146197f*x18 + 1.30656296487638f*x1a;
+ const float x29 = 0.176776695296637f * (x25 + x27) + 0.25f*x26;
+ const float x2a = 0.25f * (x25 - x27);
+ const float x2b = 0.176776695296637f * (x25 + x27) - 0.25f*x26;
+ const float x2c = 0.353553390593274f*x28;
+ const float x1b = 0.707106781186547f * (x2a - x2c);
+ const float x1c = 0.707106781186547f * (x2a + x2c);
+ const float x2d = 1.4142135623731f*x0c;
+ const float x2e = x0b + x0d;
+ const float x2f = x0a + x0e;
+ const float x30 = x09 + x0f;
+ const float x31 = x09 - x0f;
+ const float x32 = x0a - x0e;
+ const float x33 = x0b - x0d;
+ const float x37 = 1.4142135623731f*x2d;
+ const float x38 = 1.30656296487638f*x2e + 0.541196100146197f*x30;
+ const float x39 = 1.4142135623731f*x2f;
+ const float x3a = -0.541196100146197f*x2e + 1.30656296487638f*x30;
+ const float x3b = 0.176776695296637f * (x37 + x39) + 0.25f*x38;
+ const float x3c = 0.25f * (x37 - x39);
+ const float x3d = 0.176776695296637f * (x37 + x39) - 0.25f*x38;
+ const float x3e = 0.353553390593274f*x3a;
+ const float x34 = 0.707106781186547f * (x3c - x3e);
+ const float x35 = 0.707106781186547f * (x3c + x3e);
+ const float x3f = 1.4142135623731f*x32;
+ const float x40 = x31 + x33;
+ const float x41 = x31 - x33;
+ const float x42 = 0.25f * (x3f + x40);
+ const float x43 = 0.25f * (x3f - x40);
+ const float x44 = 0.353553390593274f*x41;
+ dst[ 0*dst_stridea] = (add ? dst[ 0*dst_stridea] : 0) + 0.176776695296637f * (x1d + x1f) + 0.25f*x1e;
+ dst[ 1*dst_stridea] = (add ? dst[ 1*dst_stridea] : 0) + 0.707106781186547f * (x29 + x3d);
+ dst[ 2*dst_stridea] = (add ? dst[ 2*dst_stridea] : 0) + 0.707106781186547f * (x29 - x3d);
+ dst[ 3*dst_stridea] = (add ? dst[ 3*dst_stridea] : 0) + 0.707106781186547f * (x23 - x43);
+ dst[ 4*dst_stridea] = (add ? dst[ 4*dst_stridea] : 0) + 0.707106781186547f * (x23 + x43);
+ dst[ 5*dst_stridea] = (add ? dst[ 5*dst_stridea] : 0) + 0.707106781186547f * (x1b - x35);
+ dst[ 6*dst_stridea] = (add ? dst[ 6*dst_stridea] : 0) + 0.707106781186547f * (x1b + x35);
+ dst[ 7*dst_stridea] = (add ? dst[ 7*dst_stridea] : 0) + 0.707106781186547f * (x22 + x44);
+ dst[ 8*dst_stridea] = (add ? dst[ 8*dst_stridea] : 0) + 0.707106781186547f * (x22 - x44);
+ dst[ 9*dst_stridea] = (add ? dst[ 9*dst_stridea] : 0) + 0.707106781186547f * (x1c + x34);
+ dst[10*dst_stridea] = (add ? dst[10*dst_stridea] : 0) + 0.707106781186547f * (x1c - x34);
+ dst[11*dst_stridea] = (add ? dst[11*dst_stridea] : 0) + 0.707106781186547f * (x24 + x42);
+ dst[12*dst_stridea] = (add ? dst[12*dst_stridea] : 0) + 0.707106781186547f * (x24 - x42);
+ dst[13*dst_stridea] = (add ? dst[13*dst_stridea] : 0) + 0.707106781186547f * (x2b - x3b);
+ dst[14*dst_stridea] = (add ? dst[14*dst_stridea] : 0) + 0.707106781186547f * (x2b + x3b);
+ dst[15*dst_stridea] = (add ? dst[15*dst_stridea] : 0) + 0.176776695296637f * (x1d + x1f) - 0.25f*x1e;
+ dst += dst_strideb;
+ src += src_strideb;
+ }
+}
+
+#define DEF_FILTER_FREQ_FUNCS(bsize) \
+static av_always_inline void filter_freq_##bsize(const float *src, int src_linesize, \
+ float *dst, int dst_linesize, \
+ AVExpr *expr, double *var_values, \
+ int sigma_th) \
+{ \
+ unsigned i; \
+ DECLARE_ALIGNED(32, float, tmp_block1)[bsize * bsize]; \
+ DECLARE_ALIGNED(32, float, tmp_block2)[bsize * bsize]; \
+ \
+ /* forward DCT */ \
+ fdct##bsize##_1d(tmp_block1, src, 1, bsize, 1, src_linesize); \
+ fdct##bsize##_1d(tmp_block2, tmp_block1, bsize, 1, bsize, 1); \
+ \
+ for (i = 0; i < bsize*bsize; i++) { \
+ float *b = &tmp_block2[i]; \
+ /* frequency filtering */ \
+ if (expr) { \
+ var_values[VAR_C] = fabsf(*b); \
+ *b *= av_expr_eval(expr, var_values, NULL); \
+ } else { \
+ if (fabsf(*b) < sigma_th) \
+ *b = 0; \
+ } \
+ } \
+ \
+ /* inverse DCT */ \
+ idct##bsize##_1d(tmp_block1, tmp_block2, 1, bsize, 1, bsize, 0); \
+ idct##bsize##_1d(dst, tmp_block1, dst_linesize, 1, bsize, 1, 1); \
+} \
+ \
+static void filter_freq_sigma_##bsize(DCTdnoizContext *s, \
+ const float *src, int src_linesize, \
+ float *dst, int dst_linesize, int thread_id) \
+{ \
+ filter_freq_##bsize(src, src_linesize, dst, dst_linesize, NULL, NULL, s->th); \
+} \
+ \
+static void filter_freq_expr_##bsize(DCTdnoizContext *s, \
+ const float *src, int src_linesize, \
+ float *dst, int dst_linesize, int thread_id) \
+{ \
+ filter_freq_##bsize(src, src_linesize, dst, dst_linesize, \
+ s->expr[thread_id], s->var_values[thread_id], 0); \
+}
+
+DEF_FILTER_FREQ_FUNCS(8)
+DEF_FILTER_FREQ_FUNCS(16)
+
+#define DCT3X3_0_0 0.5773502691896258f /* 1/sqrt(3) */
+#define DCT3X3_0_1 0.5773502691896258f /* 1/sqrt(3) */
+#define DCT3X3_0_2 0.5773502691896258f /* 1/sqrt(3) */
+#define DCT3X3_1_0 0.7071067811865475f /* 1/sqrt(2) */
+#define DCT3X3_1_2 -0.7071067811865475f /* -1/sqrt(2) */
+#define DCT3X3_2_0 0.4082482904638631f /* 1/sqrt(6) */
+#define DCT3X3_2_1 -0.8164965809277261f /* -2/sqrt(6) */
+#define DCT3X3_2_2 0.4082482904638631f /* 1/sqrt(6) */
+
+static av_always_inline void color_decorrelation(float **dst, int dst_linesize,
+ const uint8_t *src, int src_linesize,
+ int w, int h,
+ int r, int g, int b)
+{
+ int x, y;
+ float *dstp_r = dst[0];
+ float *dstp_g = dst[1];
+ float *dstp_b = dst[2];
+
+ for (y = 0; y < h; y++) {
+ const uint8_t *srcp = src;
+
+ for (x = 0; x < w; x++) {
+ dstp_r[x] = srcp[r] * DCT3X3_0_0 + srcp[g] * DCT3X3_0_1 + srcp[b] * DCT3X3_0_2;
+ dstp_g[x] = srcp[r] * DCT3X3_1_0 + srcp[b] * DCT3X3_1_2;
+ dstp_b[x] = srcp[r] * DCT3X3_2_0 + srcp[g] * DCT3X3_2_1 + srcp[b] * DCT3X3_2_2;
+ srcp += 3;
+ }
+ src += src_linesize;
+ dstp_r += dst_linesize;
+ dstp_g += dst_linesize;
+ dstp_b += dst_linesize;
+ }
+}
+
+static av_always_inline void color_correlation(uint8_t *dst, int dst_linesize,
+ float **src, int src_linesize,
+ int w, int h,
+ int r, int g, int b)
+{
+ int x, y;
+ const float *src_r = src[0];
+ const float *src_g = src[1];
+ const float *src_b = src[2];
+
+ for (y = 0; y < h; y++) {
+ uint8_t *dstp = dst;
+
+ for (x = 0; x < w; x++) {
+ dstp[r] = av_clip_uint8(src_r[x] * DCT3X3_0_0 + src_g[x] * DCT3X3_1_0 + src_b[x] * DCT3X3_2_0);
+ dstp[g] = av_clip_uint8(src_r[x] * DCT3X3_0_1 + src_b[x] * DCT3X3_2_1);
+ dstp[b] = av_clip_uint8(src_r[x] * DCT3X3_0_2 + src_g[x] * DCT3X3_1_2 + src_b[x] * DCT3X3_2_2);
+ dstp += 3;
+ }
+ dst += dst_linesize;
+ src_r += src_linesize;
+ src_g += src_linesize;
+ src_b += src_linesize;
+ }
+}
+
+#define DECLARE_COLOR_FUNCS(name, r, g, b) \
+static void color_decorrelation_##name(float **dst, int dst_linesize, \
+ const uint8_t *src, int src_linesize, \
+ int w, int h) \
+{ \
+ color_decorrelation(dst, dst_linesize, src, src_linesize, w, h, r, g, b); \
+} \
+ \
+static void color_correlation_##name(uint8_t *dst, int dst_linesize, \
+ float **src, int src_linesize, \
+ int w, int h) \
+{ \
+ color_correlation(dst, dst_linesize, src, src_linesize, w, h, r, g, b); \
+}
+
+DECLARE_COLOR_FUNCS(rgb, 0, 1, 2)
+DECLARE_COLOR_FUNCS(bgr, 2, 1, 0)
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ DCTdnoizContext *s = ctx->priv;
+ int i, x, y, bx, by, linesize, *iweights, max_slice_h, slice_h;
+ const int bsize = 1 << s->n;
+
+ switch (inlink->format) {
+ case AV_PIX_FMT_BGR24:
+ s->color_decorrelation = color_decorrelation_bgr;
+ s->color_correlation = color_correlation_bgr;
+ break;
+ case AV_PIX_FMT_RGB24:
+ s->color_decorrelation = color_decorrelation_rgb;
+ s->color_correlation = color_correlation_rgb;
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ s->pr_width = inlink->w - (inlink->w - bsize) % s->step;
+ s->pr_height = inlink->h - (inlink->h - bsize) % s->step;
+ if (s->pr_width != inlink->w)
+ av_log(ctx, AV_LOG_WARNING, "The last %d horizontal pixels won't be denoised\n",
+ inlink->w - s->pr_width);
+ if (s->pr_height != inlink->h)
+ av_log(ctx, AV_LOG_WARNING, "The last %d vertical pixels won't be denoised\n",
+ inlink->h - s->pr_height);
+
+ max_slice_h = s->pr_height / ((s->bsize - 1) * 2);
+ s->nb_threads = FFMIN3(MAX_THREADS, ff_filter_get_nb_threads(ctx), max_slice_h);
+ av_log(ctx, AV_LOG_DEBUG, "threads: [max=%d hmax=%d user=%d] => %d\n",
+ MAX_THREADS, max_slice_h, ff_filter_get_nb_threads(ctx), s->nb_threads);
+
+ s->p_linesize = linesize = FFALIGN(s->pr_width, 32);
+ for (i = 0; i < 2; i++) {
+ s->cbuf[i][0] = av_malloc_array(linesize * s->pr_height, sizeof(*s->cbuf[i][0]));
+ s->cbuf[i][1] = av_malloc_array(linesize * s->pr_height, sizeof(*s->cbuf[i][1]));
+ s->cbuf[i][2] = av_malloc_array(linesize * s->pr_height, sizeof(*s->cbuf[i][2]));
+ if (!s->cbuf[i][0] || !s->cbuf[i][1] || !s->cbuf[i][2])
+ return AVERROR(ENOMEM);
+ }
+
+ /* eval expressions are probably not thread safe when the eval internal
+ * state can be changed (typically through load & store operations) */
+ if (s->expr_str) {
+ for (i = 0; i < s->nb_threads; i++) {
+ int ret = av_expr_parse(&s->expr[i], s->expr_str, var_names,
+ NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* each slice will need to (pre & re)process the top and bottom block of
+ * the previous one in in addition to its processing area. This is because
+ * each pixel is averaged by all the surrounding blocks */
+ slice_h = (int)ceilf(s->pr_height / (float)s->nb_threads) + (s->bsize - 1) * 2;
+ for (i = 0; i < s->nb_threads; i++) {
+ s->slices[i] = av_malloc_array(linesize, slice_h * sizeof(*s->slices[i]));
+ if (!s->slices[i])
+ return AVERROR(ENOMEM);
+ }
+
+ s->weights = av_malloc(s->pr_height * linesize * sizeof(*s->weights));
+ if (!s->weights)
+ return AVERROR(ENOMEM);
+ iweights = av_calloc(s->pr_height, linesize * sizeof(*iweights));
+ if (!iweights)
+ return AVERROR(ENOMEM);
+ for (y = 0; y < s->pr_height - bsize + 1; y += s->step)
+ for (x = 0; x < s->pr_width - bsize + 1; x += s->step)
+ for (by = 0; by < bsize; by++)
+ for (bx = 0; bx < bsize; bx++)
+ iweights[(y + by)*linesize + x + bx]++;
+ for (y = 0; y < s->pr_height; y++)
+ for (x = 0; x < s->pr_width; x++)
+ s->weights[y*linesize + x] = 1. / iweights[y*linesize + x];
+ av_free(iweights);
+
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ DCTdnoizContext *s = ctx->priv;
+
+ s->bsize = 1 << s->n;
+ if (s->overlap == -1)
+ s->overlap = s->bsize - 1;
+
+ if (s->overlap > s->bsize - 1) {
+ av_log(s, AV_LOG_ERROR, "Overlap value can not except %d "
+ "with a block size of %dx%d\n",
+ s->bsize - 1, s->bsize, s->bsize);
+ return AVERROR(EINVAL);
+ }
+
+ if (s->expr_str) {
+ switch (s->n) {
+ case 3: s->filter_freq_func = filter_freq_expr_8; break;
+ case 4: s->filter_freq_func = filter_freq_expr_16; break;
+ default: av_assert0(0);
+ }
+ } else {
+ switch (s->n) {
+ case 3: s->filter_freq_func = filter_freq_sigma_8; break;
+ case 4: s->filter_freq_func = filter_freq_sigma_16; break;
+ default: av_assert0(0);
+ }
+ }
+
+ s->th = s->sigma * 3.;
+ s->step = s->bsize - s->overlap;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB24,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+typedef struct ThreadData {
+ float *src, *dst;
+} ThreadData;
+
+static int filter_slice(AVFilterContext *ctx,
+ void *arg, int jobnr, int nb_jobs)
+{
+ int x, y;
+ DCTdnoizContext *s = ctx->priv;
+ const ThreadData *td = arg;
+ const int w = s->pr_width;
+ const int h = s->pr_height;
+ const int slice_start = (h * jobnr ) / nb_jobs;
+ const int slice_end = (h * (jobnr+1)) / nb_jobs;
+ const int slice_start_ctx = FFMAX(slice_start - s->bsize + 1, 0);
+ const int slice_end_ctx = FFMIN(slice_end, h - s->bsize + 1);
+ const int slice_h = slice_end_ctx - slice_start_ctx;
+ const int src_linesize = s->p_linesize;
+ const int dst_linesize = s->p_linesize;
+ const int slice_linesize = s->p_linesize;
+ float *dst;
+ const float *src = td->src + slice_start_ctx * src_linesize;
+ const float *weights = s->weights + slice_start * dst_linesize;
+ float *slice = s->slices[jobnr];
+
+ // reset block sums
+ memset(slice, 0, (slice_h + s->bsize - 1) * dst_linesize * sizeof(*slice));
+
+ // block dct sums
+ for (y = 0; y < slice_h; y += s->step) {
+ for (x = 0; x < w - s->bsize + 1; x += s->step)
+ s->filter_freq_func(s, src + x, src_linesize,
+ slice + x, slice_linesize,
+ jobnr);
+ src += s->step * src_linesize;
+ slice += s->step * slice_linesize;
+ }
+
+ // average blocks
+ slice = s->slices[jobnr] + (slice_start - slice_start_ctx) * slice_linesize;
+ dst = td->dst + slice_start * dst_linesize;
+ for (y = slice_start; y < slice_end; y++) {
+ for (x = 0; x < w; x++)
+ dst[x] = slice[x] * weights[x];
+ slice += slice_linesize;
+ dst += dst_linesize;
+ weights += dst_linesize;
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ DCTdnoizContext *s = ctx->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ int direct, plane;
+ AVFrame *out;
+
+ if (av_frame_is_writable(in)) {
+ direct = 1;
+ out = in;
+ } else {
+ direct = 0;
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ s->color_decorrelation(s->cbuf[0], s->p_linesize,
+ in->data[0], in->linesize[0],
+ s->pr_width, s->pr_height);
+ for (plane = 0; plane < 3; plane++) {
+ ThreadData td = {
+ .src = s->cbuf[0][plane],
+ .dst = s->cbuf[1][plane],
+ };
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, s->nb_threads);
+ }
+ s->color_correlation(out->data[0], out->linesize[0],
+ s->cbuf[1], s->p_linesize,
+ s->pr_width, s->pr_height);
+
+ if (!direct) {
+ int y;
+ uint8_t *dst = out->data[0];
+ const uint8_t *src = in->data[0];
+ const int dst_linesize = out->linesize[0];
+ const int src_linesize = in->linesize[0];
+ const int hpad = (inlink->w - s->pr_width) * 3;
+ const int vpad = (inlink->h - s->pr_height);
+
+ if (hpad) {
+ uint8_t *dstp = dst + s->pr_width * 3;
+ const uint8_t *srcp = src + s->pr_width * 3;
+
+ for (y = 0; y < s->pr_height; y++) {
+ memcpy(dstp, srcp, hpad);
+ dstp += dst_linesize;
+ srcp += src_linesize;
+ }
+ }
+ if (vpad) {
+ uint8_t *dstp = dst + s->pr_height * dst_linesize;
+ const uint8_t *srcp = src + s->pr_height * src_linesize;
+
+ for (y = 0; y < vpad; y++) {
+ memcpy(dstp, srcp, inlink->w * 3);
+ dstp += dst_linesize;
+ srcp += src_linesize;
+ }
+ }
+
+ av_frame_free(&in);
+ }
+
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ int i;
+ DCTdnoizContext *s = ctx->priv;
+
+ av_freep(&s->weights);
+ for (i = 0; i < 2; i++) {
+ av_freep(&s->cbuf[i][0]);
+ av_freep(&s->cbuf[i][1]);
+ av_freep(&s->cbuf[i][2]);
+ }
+ for (i = 0; i < s->nb_threads; i++) {
+ av_freep(&s->slices[i]);
+ av_expr_free(s->expr[i]);
+ }
+}
+
+static const AVFilterPad dctdnoiz_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad dctdnoiz_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_dctdnoiz = {
+ .name = "dctdnoiz",
+ .description = NULL_IF_CONFIG_SMALL("Denoise frames using 2D DCT."),
+ .priv_size = sizeof(DCTdnoizContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = dctdnoiz_inputs,
+ .outputs = dctdnoiz_outputs,
+ .priv_class = &dctdnoiz_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_deband.c b/libavfilter/vf_deband.c
new file mode 100644
index 0000000000..713e80b049
--- /dev/null
+++ b/libavfilter/vf_deband.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright (c) 2015 Niklas Haas
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct DebandContext {
+ const AVClass *class;
+
+ int coupling;
+ float threshold[4];
+ int range;
+ int blur;
+ float direction;
+
+ int nb_components;
+ int planewidth[4];
+ int planeheight[4];
+ int shift[2];
+ int thr[4];
+
+ int *x_pos;
+ int *y_pos;
+
+ int (*deband)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
+} DebandContext;
+
+#define OFFSET(x) offsetof(DebandContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption deband_options[] = {
+ { "1thr", "set 1st plane threshold", OFFSET(threshold[0]), AV_OPT_TYPE_FLOAT, {.dbl=0.02}, 0.00003, 0.5, FLAGS },
+ { "2thr", "set 2nd plane threshold", OFFSET(threshold[1]), AV_OPT_TYPE_FLOAT, {.dbl=0.02}, 0.00003, 0.5, FLAGS },
+ { "3thr", "set 3rd plane threshold", OFFSET(threshold[2]), AV_OPT_TYPE_FLOAT, {.dbl=0.02}, 0.00003, 0.5, FLAGS },
+ { "4thr", "set 4th plane threshold", OFFSET(threshold[3]), AV_OPT_TYPE_FLOAT, {.dbl=0.02}, 0.00003, 0.5, FLAGS },
+ { "range", "set range", OFFSET(range), AV_OPT_TYPE_INT, {.i64=16}, INT_MIN, INT_MAX, FLAGS },
+ { "r", "set range", OFFSET(range), AV_OPT_TYPE_INT, {.i64=16}, INT_MIN, INT_MAX, FLAGS },
+ { "direction", "set direction", OFFSET(direction), AV_OPT_TYPE_FLOAT, {.dbl=2*M_PI},-2*M_PI, 2*M_PI, FLAGS },
+ { "d", "set direction", OFFSET(direction), AV_OPT_TYPE_FLOAT, {.dbl=2*M_PI},-2*M_PI, 2*M_PI, FLAGS },
+ { "blur", "set blur", OFFSET(blur), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
+ { "b", "set blur", OFFSET(blur), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
+ { "coupling", "set plane coupling", OFFSET(coupling), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { "c", "set plane coupling", OFFSET(coupling), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(deband);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ DebandContext *s = ctx->priv;
+
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
+ AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14,
+ AV_PIX_FMT_GBRP16, AV_PIX_FMT_GBRAP16,
+ AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
+ AV_PIX_FMT_NONE
+ };
+
+ static const enum AVPixelFormat cpix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_YUV444P16, AV_PIX_FMT_YUVA444P16,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14,
+ AV_PIX_FMT_GBRP16, AV_PIX_FMT_GBRAP16,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(s->coupling ? cpix_fmts : pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static float frand(int x, int y)
+{
+ const float r = sinf(x * 12.9898 + y * 78.233) * 43758.545;
+
+ return r - floorf(r);
+}
+
+static int inline get_avg(int ref0, int ref1, int ref2, int ref3)
+{
+ return (ref0 + ref1 + ref2 + ref3) / 4;
+}
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+} ThreadData;
+
+static int deband_8_c(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ DebandContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ int x, y, p;
+
+ for (p = 0; p < s->nb_components; p++) {
+ const uint8_t *src_ptr = (const uint8_t *)in->data[p];
+ uint8_t *dst_ptr = (uint8_t *)out->data[p];
+ const int dst_linesize = out->linesize[p];
+ const int src_linesize = in->linesize[p];
+ const int thr = s->thr[p];
+ const int start = (s->planeheight[p] * jobnr ) / nb_jobs;
+ const int end = (s->planeheight[p] * (jobnr+1)) / nb_jobs;
+ const int w = s->planewidth[p] - 1;
+ const int h = s->planeheight[p] - 1;
+
+ for (y = start; y < end; y++) {
+ const int pos = y * s->planewidth[0];
+
+ for (x = 0; x < s->planewidth[p]; x++) {
+ const int x_pos = s->x_pos[pos + x];
+ const int y_pos = s->y_pos[pos + x];
+ const int ref0 = src_ptr[av_clip(y + y_pos, 0, h) * src_linesize + av_clip(x + x_pos, 0, w)];
+ const int ref1 = src_ptr[av_clip(y + -y_pos, 0, h) * src_linesize + av_clip(x + x_pos, 0, w)];
+ const int ref2 = src_ptr[av_clip(y + -y_pos, 0, h) * src_linesize + av_clip(x + -x_pos, 0, w)];
+ const int ref3 = src_ptr[av_clip(y + y_pos, 0, h) * src_linesize + av_clip(x + -x_pos, 0, w)];
+ const int src0 = src_ptr[y * src_linesize + x];
+
+ if (s->blur) {
+ const int avg = get_avg(ref0, ref1, ref2, ref3);
+ const int diff = FFABS(src0 - avg);
+
+ dst_ptr[y * dst_linesize + x] = diff < thr ? avg : src0;
+ } else {
+ dst_ptr[y * dst_linesize + x] = (FFABS(src0 - ref0) < thr) &&
+ (FFABS(src0 - ref1) < thr) &&
+ (FFABS(src0 - ref2) < thr) &&
+ (FFABS(src0 - ref3) < thr) ? get_avg(ref0, ref1, ref2, ref3) : src0;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int deband_8_coupling_c(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ DebandContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int start = (s->planeheight[0] * jobnr ) / nb_jobs;
+ const int end = (s->planeheight[0] * (jobnr+1)) / nb_jobs;
+ int x, y, p;
+
+ for (y = start; y < end; y++) {
+ const int pos = y * s->planewidth[0];
+
+ for (x = 0; x < s->planewidth[0]; x++) {
+ const int x_pos = s->x_pos[pos + x];
+ const int y_pos = s->y_pos[pos + x];
+ int avg[4], cmp[4] = { 0 }, src[4];
+
+ for (p = 0; p < s->nb_components; p++) {
+ const uint8_t *src_ptr = (const uint8_t *)in->data[p];
+ const int src_linesize = in->linesize[p];
+ const int thr = s->thr[p];
+ const int w = s->planewidth[p] - 1;
+ const int h = s->planeheight[p] - 1;
+ const int ref0 = src_ptr[av_clip(y + y_pos, 0, h) * src_linesize + av_clip(x + x_pos, 0, w)];
+ const int ref1 = src_ptr[av_clip(y + -y_pos, 0, h) * src_linesize + av_clip(x + x_pos, 0, w)];
+ const int ref2 = src_ptr[av_clip(y + -y_pos, 0, h) * src_linesize + av_clip(x + -x_pos, 0, w)];
+ const int ref3 = src_ptr[av_clip(y + y_pos, 0, h) * src_linesize + av_clip(x + -x_pos, 0, w)];
+ const int src0 = src_ptr[y * src_linesize + x];
+
+ src[p] = src0;
+ avg[p] = get_avg(ref0, ref1, ref2, ref3);
+
+ if (s->blur) {
+ cmp[p] = FFABS(src0 - avg[p]) < thr;
+ } else {
+ cmp[p] = (FFABS(src0 - ref0) < thr) &&
+ (FFABS(src0 - ref1) < thr) &&
+ (FFABS(src0 - ref2) < thr) &&
+ (FFABS(src0 - ref3) < thr);
+ }
+ }
+
+ for (p = 0; p < s->nb_components; p++)
+ if (!cmp[p])
+ break;
+ if (p == s->nb_components) {
+ for (p = 0; p < s->nb_components; p++) {
+ const int dst_linesize = out->linesize[p];
+
+ out->data[p][y * dst_linesize + x] = avg[p];
+ }
+ } else {
+ for (p = 0; p < s->nb_components; p++) {
+ const int dst_linesize = out->linesize[p];
+
+ out->data[p][y * dst_linesize + x] = src[p];
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int deband_16_coupling_c(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ DebandContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int start = (s->planeheight[0] * jobnr ) / nb_jobs;
+ const int end = (s->planeheight[0] * (jobnr+1)) / nb_jobs;
+ int x, y, p, z;
+
+ for (y = start; y < end; y++) {
+ const int pos = y * s->planewidth[0];
+
+ for (x = 0; x < s->planewidth[0]; x++) {
+ const int x_pos = s->x_pos[pos + x];
+ const int y_pos = s->y_pos[pos + x];
+ int avg[4], cmp[4] = { 0 }, src[4];
+
+ for (p = 0; p < s->nb_components; p++) {
+ const uint16_t *src_ptr = (const uint16_t *)in->data[p];
+ const int src_linesize = in->linesize[p] / 2;
+ const int thr = s->thr[p];
+ const int w = s->planewidth[p] - 1;
+ const int h = s->planeheight[p] - 1;
+ const int ref0 = src_ptr[av_clip(y + y_pos, 0, h) * src_linesize + av_clip(x + x_pos, 0, w)];
+ const int ref1 = src_ptr[av_clip(y + -y_pos, 0, h) * src_linesize + av_clip(x + x_pos, 0, w)];
+ const int ref2 = src_ptr[av_clip(y + -y_pos, 0, h) * src_linesize + av_clip(x + -x_pos, 0, w)];
+ const int ref3 = src_ptr[av_clip(y + y_pos, 0, h) * src_linesize + av_clip(x + -x_pos, 0, w)];
+ const int src0 = src_ptr[y * src_linesize + x];
+
+ src[p] = src0;
+ avg[p] = get_avg(ref0, ref1, ref2, ref3);
+
+ if (s->blur) {
+ cmp[p] = FFABS(src0 - avg[p]) < thr;
+ } else {
+ cmp[p] = (FFABS(src0 - ref0) < thr) &&
+ (FFABS(src0 - ref1) < thr) &&
+ (FFABS(src0 - ref2) < thr) &&
+ (FFABS(src0 - ref3) < thr);
+ }
+ }
+
+ for (z = 0; z < s->nb_components; z++)
+ if (!cmp[z])
+ break;
+ if (z == s->nb_components) {
+ for (p = 0; p < s->nb_components; p++) {
+ const int dst_linesize = out->linesize[p] / 2;
+ uint16_t *dst = (uint16_t *)out->data[p] + y * dst_linesize + x;
+
+ dst[0] = avg[p];
+ }
+ } else {
+ for (p = 0; p < s->nb_components; p++) {
+ const int dst_linesize = out->linesize[p] / 2;
+ uint16_t *dst = (uint16_t *)out->data[p] + y * dst_linesize + x;
+
+ dst[0] = src[p];
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int deband_16_c(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ DebandContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ int x, y, p;
+
+ for (p = 0; p < s->nb_components; p++) {
+ const uint16_t *src_ptr = (const uint16_t *)in->data[p];
+ uint16_t *dst_ptr = (uint16_t *)out->data[p];
+ const int dst_linesize = out->linesize[p] / 2;
+ const int src_linesize = in->linesize[p] / 2;
+ const int thr = s->thr[p];
+ const int start = (s->planeheight[p] * jobnr ) / nb_jobs;
+ const int end = (s->planeheight[p] * (jobnr+1)) / nb_jobs;
+ const int w = s->planewidth[p] - 1;
+ const int h = s->planeheight[p] - 1;
+
+ for (y = start; y < end; y++) {
+ const int pos = y * s->planewidth[0];
+
+ for (x = 0; x < s->planewidth[p]; x++) {
+ const int x_pos = s->x_pos[pos + x];
+ const int y_pos = s->y_pos[pos + x];
+ const int ref0 = src_ptr[av_clip(y + y_pos, 0, h) * src_linesize + av_clip(x + x_pos, 0, w)];
+ const int ref1 = src_ptr[av_clip(y + -y_pos, 0, h) * src_linesize + av_clip(x + x_pos, 0, w)];
+ const int ref2 = src_ptr[av_clip(y + -y_pos, 0, h) * src_linesize + av_clip(x + -x_pos, 0, w)];
+ const int ref3 = src_ptr[av_clip(y + y_pos, 0, h) * src_linesize + av_clip(x + -x_pos, 0, w)];
+ const int src0 = src_ptr[y * src_linesize + x];
+
+ if (s->blur) {
+ const int avg = get_avg(ref0, ref1, ref2, ref3);
+ const int diff = FFABS(src0 - avg);
+
+ dst_ptr[y * dst_linesize + x] = diff < thr ? avg : src0;
+ } else {
+ dst_ptr[y * dst_linesize + x] = (FFABS(src0 - ref0) < thr) &&
+ (FFABS(src0 - ref1) < thr) &&
+ (FFABS(src0 - ref2) < thr) &&
+ (FFABS(src0 - ref3) < thr) ? get_avg(ref0, ref1, ref2, ref3) : src0;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ AVFilterContext *ctx = inlink->dst;
+ DebandContext *s = ctx->priv;
+ const float direction = s->direction;
+ const int range = s->range;
+ int x, y;
+
+ s->nb_components = desc->nb_components;
+
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+ s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+ s->shift[0] = desc->log2_chroma_w;
+ s->shift[1] = desc->log2_chroma_h;
+
+ if (s->coupling)
+ s->deband = desc->comp[0].depth > 8 ? deband_16_coupling_c : deband_8_coupling_c;
+ else
+ s->deband = desc->comp[0].depth > 8 ? deband_16_c : deband_8_c;
+
+ s->thr[0] = ((1 << desc->comp[0].depth) - 1) * s->threshold[0];
+ s->thr[1] = ((1 << desc->comp[1].depth) - 1) * s->threshold[1];
+ s->thr[2] = ((1 << desc->comp[2].depth) - 1) * s->threshold[2];
+ s->thr[3] = ((1 << desc->comp[3].depth) - 1) * s->threshold[3];
+
+ s->x_pos = av_malloc(s->planewidth[0] * s->planeheight[0] * sizeof(*s->x_pos));
+ s->y_pos = av_malloc(s->planewidth[0] * s->planeheight[0] * sizeof(*s->y_pos));
+ if (!s->x_pos || !s->y_pos)
+ return AVERROR(ENOMEM);
+
+ for (y = 0; y < s->planeheight[0]; y++) {
+ for (x = 0; x < s->planewidth[0]; x++) {
+ const float r = frand(x, y);
+ const float dir = direction < 0 ? -direction : r * direction;
+ const int dist = range < 0 ? -range : r * range;
+
+ s->x_pos[y * s->planewidth[0] + x] = cosf(dir) * dist;
+ s->y_pos[y * s->planewidth[0] + x] = sinf(dir) * dist;
+ }
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ DebandContext *s = ctx->priv;
+ AVFrame *out;
+ ThreadData td;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ td.in = in; td.out = out;
+ ctx->internal->execute(ctx, s->deband, &td, NULL, FFMIN3(s->planeheight[1],
+ s->planeheight[2],
+ ff_filter_get_nb_threads(ctx)));
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ DebandContext *s = ctx->priv;
+
+ av_freep(&s->x_pos);
+ av_freep(&s->y_pos);
+}
+
+static const AVFilterPad avfilter_vf_deband_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_vf_deband_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_deband = {
+ .name = "deband",
+ .description = NULL_IF_CONFIG_SMALL("Debands video."),
+ .priv_size = sizeof(DebandContext),
+ .priv_class = &deband_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = avfilter_vf_deband_inputs,
+ .outputs = avfilter_vf_deband_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_decimate.c b/libavfilter/vf_decimate.c
new file mode 100644
index 0000000000..1fb242a3ae
--- /dev/null
+++ b/libavfilter/vf_decimate.c
@@ -0,0 +1,410 @@
+/*
+ * Copyright (c) 2012 Fredrik Mellbin
+ * Copyright (c) 2013 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/timestamp.h"
+#include "avfilter.h"
+#include "internal.h"
+
+#define INPUT_MAIN 0
+#define INPUT_CLEANSRC 1
+
+struct qitem {
+ AVFrame *frame;
+ int64_t maxbdiff;
+ int64_t totdiff;
+};
+
+typedef struct {
+ const AVClass *class;
+ struct qitem *queue; ///< window of cycle frames and the associated data diff
+ int fid; ///< current frame id in the queue
+ int filled; ///< 1 if the queue is filled, 0 otherwise
+ AVFrame *last; ///< last frame from the previous queue
+ AVFrame **clean_src; ///< frame queue for the clean source
+ int got_frame[2]; ///< frame request flag for each input stream
+ AVRational ts_unit; ///< timestamp units for the output frames
+ int64_t start_pts; ///< base for output timestamps
+ uint32_t eof; ///< bitmask for end of stream
+ int hsub, vsub; ///< chroma subsampling values
+ int depth;
+ int nxblocks, nyblocks;
+ int bdiffsize;
+ int64_t *bdiffs;
+
+ /* options */
+ int cycle;
+ double dupthresh_flt;
+ double scthresh_flt;
+ int64_t dupthresh;
+ int64_t scthresh;
+ int blockx, blocky;
+ int ppsrc;
+ int chroma;
+} DecimateContext;
+
+#define OFFSET(x) offsetof(DecimateContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption decimate_options[] = {
+ { "cycle", "set the number of frame from which one will be dropped", OFFSET(cycle), AV_OPT_TYPE_INT, {.i64 = 5}, 2, 25, FLAGS },
+ { "dupthresh", "set duplicate threshold", OFFSET(dupthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl = 1.1}, 0, 100, FLAGS },
+ { "scthresh", "set scene change threshold", OFFSET(scthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl = 15.0}, 0, 100, FLAGS },
+ { "blockx", "set the size of the x-axis blocks used during metric calculations", OFFSET(blockx), AV_OPT_TYPE_INT, {.i64 = 32}, 4, 1<<9, FLAGS },
+ { "blocky", "set the size of the y-axis blocks used during metric calculations", OFFSET(blocky), AV_OPT_TYPE_INT, {.i64 = 32}, 4, 1<<9, FLAGS },
+ { "ppsrc", "mark main input as a pre-processed input and activate clean source input stream", OFFSET(ppsrc), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { "chroma", "set whether or not chroma is considered in the metric calculations", OFFSET(chroma), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(decimate);
+
+static void calc_diffs(const DecimateContext *dm, struct qitem *q,
+ const AVFrame *f1, const AVFrame *f2)
+{
+ int64_t maxdiff = -1;
+ int64_t *bdiffs = dm->bdiffs;
+ int plane, i, j;
+
+ memset(bdiffs, 0, dm->bdiffsize * sizeof(*bdiffs));
+
+ for (plane = 0; plane < (dm->chroma && f1->data[2] ? 3 : 1); plane++) {
+ int x, y, xl;
+ const int linesize1 = f1->linesize[plane];
+ const int linesize2 = f2->linesize[plane];
+ const uint8_t *f1p = f1->data[plane];
+ const uint8_t *f2p = f2->data[plane];
+ int width = plane ? AV_CEIL_RSHIFT(f1->width, dm->hsub) : f1->width;
+ int height = plane ? AV_CEIL_RSHIFT(f1->height, dm->vsub) : f1->height;
+ int hblockx = dm->blockx / 2;
+ int hblocky = dm->blocky / 2;
+
+ if (plane) {
+ hblockx >>= dm->hsub;
+ hblocky >>= dm->vsub;
+ }
+
+ for (y = 0; y < height; y++) {
+ int ydest = y / hblocky;
+ int xdest = 0;
+
+#define CALC_DIFF(nbits) do { \
+ for (x = 0; x < width; x += hblockx) { \
+ int64_t acc = 0; \
+ int m = FFMIN(width, x + hblockx); \
+ for (xl = x; xl < m; xl++) \
+ acc += abs(((const uint##nbits##_t *)f1p)[xl] - \
+ ((const uint##nbits##_t *)f2p)[xl]); \
+ bdiffs[ydest * dm->nxblocks + xdest] += acc; \
+ xdest++; \
+ } \
+} while (0)
+ if (dm->depth == 8) CALC_DIFF(8);
+ else CALC_DIFF(16);
+
+ f1p += linesize1;
+ f2p += linesize2;
+ }
+ }
+
+ for (i = 0; i < dm->nyblocks - 1; i++) {
+ for (j = 0; j < dm->nxblocks - 1; j++) {
+ int64_t tmp = bdiffs[ i * dm->nxblocks + j ]
+ + bdiffs[ i * dm->nxblocks + j + 1]
+ + bdiffs[(i + 1) * dm->nxblocks + j ]
+ + bdiffs[(i + 1) * dm->nxblocks + j + 1];
+ if (tmp > maxdiff)
+ maxdiff = tmp;
+ }
+ }
+
+ q->totdiff = 0;
+ for (i = 0; i < dm->bdiffsize; i++)
+ q->totdiff += bdiffs[i];
+ q->maxbdiff = maxdiff;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ int scpos = -1, duppos = -1;
+ int drop = INT_MIN, i, lowest = 0, ret;
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ DecimateContext *dm = ctx->priv;
+ AVFrame *prv;
+
+ /* update frames queue(s) */
+ if (FF_INLINK_IDX(inlink) == INPUT_MAIN) {
+ dm->queue[dm->fid].frame = in;
+ dm->got_frame[INPUT_MAIN] = 1;
+ } else {
+ dm->clean_src[dm->fid] = in;
+ dm->got_frame[INPUT_CLEANSRC] = 1;
+ }
+ if (!dm->got_frame[INPUT_MAIN] || (dm->ppsrc && !dm->got_frame[INPUT_CLEANSRC]))
+ return 0;
+ dm->got_frame[INPUT_MAIN] = dm->got_frame[INPUT_CLEANSRC] = 0;
+
+ if (dm->ppsrc)
+ in = dm->clean_src[dm->fid];
+
+ if (in) {
+ /* update frame metrics */
+ prv = dm->fid ? (dm->ppsrc ? dm->clean_src[dm->fid - 1] : dm->queue[dm->fid - 1].frame) : dm->last;
+ if (!prv) {
+ dm->queue[dm->fid].maxbdiff = INT64_MAX;
+ dm->queue[dm->fid].totdiff = INT64_MAX;
+ } else {
+ calc_diffs(dm, &dm->queue[dm->fid], prv, in);
+ }
+ if (++dm->fid != dm->cycle)
+ return 0;
+ av_frame_free(&dm->last);
+ dm->last = av_frame_clone(in);
+ dm->fid = 0;
+
+ /* we have a complete cycle, select the frame to drop */
+ lowest = 0;
+ for (i = 0; i < dm->cycle; i++) {
+ if (dm->queue[i].totdiff > dm->scthresh)
+ scpos = i;
+ if (dm->queue[i].maxbdiff < dm->queue[lowest].maxbdiff)
+ lowest = i;
+ }
+ if (dm->queue[lowest].maxbdiff < dm->dupthresh)
+ duppos = lowest;
+ drop = scpos >= 0 && duppos < 0 ? scpos : lowest;
+ }
+
+ /* metrics debug */
+ if (av_log_get_level() >= AV_LOG_DEBUG) {
+ av_log(ctx, AV_LOG_DEBUG, "1/%d frame drop:\n", dm->cycle);
+ for (i = 0; i < dm->cycle && dm->queue[i].frame; i++) {
+ av_log(ctx, AV_LOG_DEBUG," #%d: totdiff=%08"PRIx64" maxbdiff=%08"PRIx64"%s%s%s%s\n",
+ i + 1, dm->queue[i].totdiff, dm->queue[i].maxbdiff,
+ i == scpos ? " sc" : "",
+ i == duppos ? " dup" : "",
+ i == lowest ? " lowest" : "",
+ i == drop ? " [DROP]" : "");
+ }
+ }
+
+ /* push all frames except the drop */
+ ret = 0;
+ for (i = 0; i < dm->cycle && dm->queue[i].frame; i++) {
+ if (i == drop) {
+ if (dm->ppsrc)
+ av_frame_free(&dm->clean_src[i]);
+ av_frame_free(&dm->queue[i].frame);
+ } else {
+ AVFrame *frame = dm->queue[i].frame;
+ if (frame->pts != AV_NOPTS_VALUE && dm->start_pts == AV_NOPTS_VALUE)
+ dm->start_pts = frame->pts;
+ if (dm->ppsrc) {
+ av_frame_free(&frame);
+ frame = dm->clean_src[i];
+ }
+ frame->pts = av_rescale_q(outlink->frame_count_in, dm->ts_unit, (AVRational){1,1}) +
+ (dm->start_pts == AV_NOPTS_VALUE ? 0 : dm->start_pts);
+ ret = ff_filter_frame(outlink, frame);
+ if (ret < 0)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ int max_value;
+ AVFilterContext *ctx = inlink->dst;
+ DecimateContext *dm = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+ const int w = inlink->w;
+ const int h = inlink->h;
+
+ dm->hsub = pix_desc->log2_chroma_w;
+ dm->vsub = pix_desc->log2_chroma_h;
+ dm->depth = pix_desc->comp[0].depth;
+ max_value = (1 << dm->depth) - 1;
+ dm->scthresh = (int64_t)(((int64_t)max_value * w * h * dm->scthresh_flt) / 100);
+ dm->dupthresh = (int64_t)(((int64_t)max_value * dm->blockx * dm->blocky * dm->dupthresh_flt) / 100);
+ dm->nxblocks = (w + dm->blockx/2 - 1) / (dm->blockx/2);
+ dm->nyblocks = (h + dm->blocky/2 - 1) / (dm->blocky/2);
+ dm->bdiffsize = dm->nxblocks * dm->nyblocks;
+ dm->bdiffs = av_malloc_array(dm->bdiffsize, sizeof(*dm->bdiffs));
+ dm->queue = av_calloc(dm->cycle, sizeof(*dm->queue));
+
+ if (!dm->bdiffs || !dm->queue)
+ return AVERROR(ENOMEM);
+
+ if (dm->ppsrc) {
+ dm->clean_src = av_calloc(dm->cycle, sizeof(*dm->clean_src));
+ if (!dm->clean_src)
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
+static av_cold int decimate_init(AVFilterContext *ctx)
+{
+ DecimateContext *dm = ctx->priv;
+ AVFilterPad pad = {
+ .name = av_strdup("main"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ };
+
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ ff_insert_inpad(ctx, INPUT_MAIN, &pad);
+
+ if (dm->ppsrc) {
+ pad.name = av_strdup("clean_src");
+ pad.config_props = NULL;
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ ff_insert_inpad(ctx, INPUT_CLEANSRC, &pad);
+ }
+
+ if ((dm->blockx & (dm->blockx - 1)) ||
+ (dm->blocky & (dm->blocky - 1))) {
+ av_log(ctx, AV_LOG_ERROR, "blockx and blocky settings must be power of two\n");
+ return AVERROR(EINVAL);
+ }
+
+ dm->start_pts = AV_NOPTS_VALUE;
+
+ return 0;
+}
+
+static av_cold void decimate_uninit(AVFilterContext *ctx)
+{
+ int i;
+ DecimateContext *dm = ctx->priv;
+
+ av_frame_free(&dm->last);
+ av_freep(&dm->bdiffs);
+ av_freep(&dm->queue);
+ av_freep(&dm->clean_src);
+ for (i = 0; i < ctx->nb_inputs; i++)
+ av_freep(&ctx->input_pads[i].name);
+}
+
+static int request_inlink(AVFilterContext *ctx, int lid)
+{
+ int ret = 0;
+ DecimateContext *dm = ctx->priv;
+
+ if (!dm->got_frame[lid]) {
+ AVFilterLink *inlink = ctx->inputs[lid];
+ ret = ff_request_frame(inlink);
+ if (ret == AVERROR_EOF) { // flushing
+ dm->eof |= 1 << lid;
+ ret = filter_frame(inlink, NULL);
+ }
+ }
+ return ret;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ int ret;
+ AVFilterContext *ctx = outlink->src;
+ DecimateContext *dm = ctx->priv;
+ const uint32_t eof_mask = 1<<INPUT_MAIN | dm->ppsrc<<INPUT_CLEANSRC;
+
+ if ((dm->eof & eof_mask) == eof_mask) // flush done?
+ return AVERROR_EOF;
+ if ((ret = request_inlink(ctx, INPUT_MAIN)) < 0)
+ return ret;
+ if (dm->ppsrc && (ret = request_inlink(ctx, INPUT_CLEANSRC)) < 0)
+ return ret;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+#define PF_NOALPHA(suf) AV_PIX_FMT_YUV420##suf, AV_PIX_FMT_YUV422##suf, AV_PIX_FMT_YUV444##suf
+#define PF_ALPHA(suf) AV_PIX_FMT_YUVA420##suf, AV_PIX_FMT_YUVA422##suf, AV_PIX_FMT_YUVA444##suf
+#define PF(suf) PF_NOALPHA(suf), PF_ALPHA(suf)
+ PF(P), PF(P9), PF(P10), PF_NOALPHA(P12), PF_NOALPHA(P14), PF(P16),
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ DecimateContext *dm = ctx->priv;
+ const AVFilterLink *inlink =
+ ctx->inputs[dm->ppsrc ? INPUT_CLEANSRC : INPUT_MAIN];
+ AVRational fps = inlink->frame_rate;
+
+ if (!fps.num || !fps.den) {
+ av_log(ctx, AV_LOG_ERROR, "The input needs a constant frame rate; "
+ "current rate of %d/%d is invalid\n", fps.num, fps.den);
+ return AVERROR(EINVAL);
+ }
+ fps = av_mul_q(fps, (AVRational){dm->cycle - 1, dm->cycle});
+ av_log(ctx, AV_LOG_VERBOSE, "FPS: %d/%d -> %d/%d\n",
+ inlink->frame_rate.num, inlink->frame_rate.den, fps.num, fps.den);
+ outlink->time_base = inlink->time_base;
+ outlink->frame_rate = fps;
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ outlink->w = inlink->w;
+ outlink->h = inlink->h;
+ dm->ts_unit = av_inv_q(av_mul_q(fps, outlink->time_base));
+ return 0;
+}
+
+static const AVFilterPad decimate_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_decimate = {
+ .name = "decimate",
+ .description = NULL_IF_CONFIG_SMALL("Decimate frames (post field matching filter)."),
+ .init = decimate_init,
+ .uninit = decimate_uninit,
+ .priv_size = sizeof(DecimateContext),
+ .query_formats = query_formats,
+ .outputs = decimate_outputs,
+ .priv_class = &decimate_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
diff --git a/libavfilter/vf_deinterlace_qsv.c b/libavfilter/vf_deinterlace_qsv.c
index b26a900c4f..e7491e10df 100644
--- a/libavfilter/vf_deinterlace_qsv.c
+++ b/libavfilter/vf_deinterlace_qsv.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -74,7 +74,6 @@ typedef struct QSVDeintContext {
int64_t last_pts;
- int got_output_frame;
int eof;
} QSVDeintContext;
@@ -110,8 +109,10 @@ static int qsvdeint_query_formats(AVFilterContext *ctx)
AV_PIX_FMT_QSV, AV_PIX_FMT_NONE,
};
AVFilterFormats *pix_fmts = ff_make_format_list(pixel_formats);
+ int ret;
- ff_set_common_formats(ctx, pix_fmts);
+ if ((ret = ff_set_common_formats(ctx, pix_fmts)) < 0)
+ return ret;
return 0;
}
@@ -523,14 +524,8 @@ static int qsvdeint_filter_frame(AVFilterLink *link, AVFrame *in)
static int qsvdeint_request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
- QSVDeintContext *s = ctx->priv;
- int ret = 0;
- s->got_output_frame = 0;
- while (ret >= 0 && !s->got_output_frame)
- ret = ff_request_frame(ctx->inputs[0]);
-
- return ret;
+ return ff_request_frame(ctx->inputs[0]);
}
#define OFFSET(x) offsetof(QSVDeintContext, x)
diff --git a/libavfilter/vf_deinterlace_vaapi.c b/libavfilter/vf_deinterlace_vaapi.c
new file mode 100644
index 0000000000..032e65b225
--- /dev/null
+++ b/libavfilter/vf_deinterlace_vaapi.c
@@ -0,0 +1,634 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <string.h>
+
+#include <va/va.h>
+#include <va/va_vpp.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/hwcontext_vaapi.h"
+#include "libavutil/mem.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define MAX_REFERENCES 8
+
+typedef struct DeintVAAPIContext {
+ const AVClass *class;
+
+ AVVAAPIDeviceContext *hwctx;
+ AVBufferRef *device_ref;
+
+ int mode;
+
+ int valid_ids;
+ VAConfigID va_config;
+ VAContextID va_context;
+
+ AVBufferRef *input_frames_ref;
+ AVHWFramesContext *input_frames;
+
+ AVBufferRef *output_frames_ref;
+ AVHWFramesContext *output_frames;
+ int output_height;
+ int output_width;
+
+ VAProcFilterCapDeinterlacing
+ deint_caps[VAProcDeinterlacingCount];
+ int nb_deint_caps;
+ VAProcPipelineCaps pipeline_caps;
+
+ int queue_depth;
+ int queue_count;
+ AVFrame *frame_queue[MAX_REFERENCES];
+
+ VABufferID filter_buffer;
+} DeintVAAPIContext;
+
+static const char *deint_vaapi_mode_name(int mode)
+{
+ switch (mode) {
+#define D(name) case VAProcDeinterlacing ## name: return #name
+ D(Bob);
+ D(Weave);
+ D(MotionAdaptive);
+ D(MotionCompensated);
+#undef D
+ default:
+ return "Invalid";
+ }
+}
+
+static int deint_vaapi_query_formats(AVFilterContext *avctx)
+{
+ enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_VAAPI, AV_PIX_FMT_NONE,
+ };
+ int err;
+
+ if ((err = ff_formats_ref(ff_make_format_list(pix_fmts),
+ &avctx->inputs[0]->out_formats)) < 0)
+ return err;
+ if ((err = ff_formats_ref(ff_make_format_list(pix_fmts),
+ &avctx->outputs[0]->in_formats)) < 0)
+ return err;
+
+ return 0;
+}
+
+static int deint_vaapi_pipeline_uninit(AVFilterContext *avctx)
+{
+ DeintVAAPIContext *ctx = avctx->priv;
+ int i;
+
+ for (i = 0; i < ctx->queue_count; i++)
+ av_frame_free(&ctx->frame_queue[i]);
+ ctx->queue_count = 0;
+
+ if (ctx->filter_buffer != VA_INVALID_ID) {
+ vaDestroyBuffer(ctx->hwctx->display, ctx->filter_buffer);
+ ctx->filter_buffer = VA_INVALID_ID;
+ }
+
+ if (ctx->va_context != VA_INVALID_ID) {
+ vaDestroyContext(ctx->hwctx->display, ctx->va_context);
+ ctx->va_context = VA_INVALID_ID;
+ }
+
+ if (ctx->va_config != VA_INVALID_ID) {
+ vaDestroyConfig(ctx->hwctx->display, ctx->va_config);
+ ctx->va_config = VA_INVALID_ID;
+ }
+
+ av_buffer_unref(&ctx->device_ref);
+ ctx->hwctx = NULL;
+
+ return 0;
+}
+
+static int deint_vaapi_config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *avctx = inlink->dst;
+ DeintVAAPIContext *ctx = avctx->priv;
+
+ deint_vaapi_pipeline_uninit(avctx);
+
+ if (!inlink->hw_frames_ctx) {
+ av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is "
+ "required to associate the processing device.\n");
+ return AVERROR(EINVAL);
+ }
+
+ ctx->input_frames_ref = av_buffer_ref(inlink->hw_frames_ctx);
+ ctx->input_frames = (AVHWFramesContext*)ctx->input_frames_ref->data;
+
+ return 0;
+}
+
+static int deint_vaapi_build_filter_params(AVFilterContext *avctx)
+{
+ DeintVAAPIContext *ctx = avctx->priv;
+ VAStatus vas;
+ VAProcFilterParameterBufferDeinterlacing params;
+ int i;
+
+ ctx->nb_deint_caps = VAProcDeinterlacingCount;
+ vas = vaQueryVideoProcFilterCaps(ctx->hwctx->display,
+ ctx->va_context,
+ VAProcFilterDeinterlacing,
+ &ctx->deint_caps,
+ &ctx->nb_deint_caps);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to query deinterlacing "
+ "caps: %d (%s).\n", vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+
+ if (ctx->mode == VAProcDeinterlacingNone) {
+ for (i = 0; i < ctx->nb_deint_caps; i++) {
+ if (ctx->deint_caps[i].type > ctx->mode)
+ ctx->mode = ctx->deint_caps[i].type;
+ }
+ av_log(avctx, AV_LOG_VERBOSE, "Picking %d (%s) as default "
+ "deinterlacing mode.\n", ctx->mode,
+ deint_vaapi_mode_name(ctx->mode));
+ } else {
+ for (i = 0; i < ctx->nb_deint_caps; i++) {
+ if (ctx->deint_caps[i].type == ctx->mode)
+ break;
+ }
+ if (i >= ctx->nb_deint_caps) {
+ av_log(avctx, AV_LOG_ERROR, "Deinterlacing mode %d (%s) is "
+ "not supported.\n", ctx->mode,
+ deint_vaapi_mode_name(ctx->mode));
+ }
+ }
+
+ params.type = VAProcFilterDeinterlacing;
+ params.algorithm = ctx->mode;
+ params.flags = 0;
+
+ av_assert0(ctx->filter_buffer == VA_INVALID_ID);
+ vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
+ VAProcFilterParameterBufferType,
+ sizeof(params), 1, &params,
+ &ctx->filter_buffer);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create deinterlace "
+ "parameter buffer: %d (%s).\n", vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+
+ vas = vaQueryVideoProcPipelineCaps(ctx->hwctx->display,
+ ctx->va_context,
+ &ctx->filter_buffer, 1,
+ &ctx->pipeline_caps);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to query pipeline "
+ "caps: %d (%s).\n", vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+
+ ctx->queue_depth = ctx->pipeline_caps.num_backward_references +
+ ctx->pipeline_caps.num_forward_references + 1;
+ if (ctx->queue_depth > MAX_REFERENCES) {
+ av_log(avctx, AV_LOG_ERROR, "Pipeline requires too many "
+ "references (%u forward, %u back).\n",
+ ctx->pipeline_caps.num_forward_references,
+ ctx->pipeline_caps.num_backward_references);
+ return AVERROR(ENOSYS);
+ }
+
+ return 0;
+}
+
+static int deint_vaapi_config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *avctx = outlink->src;
+ DeintVAAPIContext *ctx = avctx->priv;
+ AVVAAPIHWConfig *hwconfig = NULL;
+ AVHWFramesConstraints *constraints = NULL;
+ AVVAAPIFramesContext *va_frames;
+ VAStatus vas;
+ int err;
+
+ deint_vaapi_pipeline_uninit(avctx);
+
+ av_assert0(ctx->input_frames);
+ ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref);
+ ctx->hwctx = ((AVHWDeviceContext*)ctx->device_ref->data)->hwctx;
+
+ ctx->output_width = ctx->input_frames->width;
+ ctx->output_height = ctx->input_frames->height;
+
+ av_assert0(ctx->va_config == VA_INVALID_ID);
+ vas = vaCreateConfig(ctx->hwctx->display, VAProfileNone,
+ VAEntrypointVideoProc, 0, 0, &ctx->va_config);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create processing pipeline "
+ "config: %d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+
+ hwconfig = av_hwdevice_hwconfig_alloc(ctx->device_ref);
+ if (!hwconfig) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ hwconfig->config_id = ctx->va_config;
+
+ constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref,
+ hwconfig);
+ if (!constraints) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ if (ctx->output_width < constraints->min_width ||
+ ctx->output_height < constraints->min_height ||
+ ctx->output_width > constraints->max_width ||
+ ctx->output_height > constraints->max_height) {
+ av_log(avctx, AV_LOG_ERROR, "Hardware does not support "
+ "deinterlacing to size %dx%d "
+ "(constraints: width %d-%d height %d-%d).\n",
+ ctx->output_width, ctx->output_height,
+ constraints->min_width, constraints->max_width,
+ constraints->min_height, constraints->max_height);
+ err = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ ctx->output_frames_ref = av_hwframe_ctx_alloc(ctx->device_ref);
+ if (!ctx->output_frames_ref) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create HW frame context "
+ "for output.\n");
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ ctx->output_frames = (AVHWFramesContext*)ctx->output_frames_ref->data;
+
+ ctx->output_frames->format = AV_PIX_FMT_VAAPI;
+ ctx->output_frames->sw_format = ctx->input_frames->sw_format;
+ ctx->output_frames->width = ctx->output_width;
+ ctx->output_frames->height = ctx->output_height;
+
+ // The number of output frames we need is determined by what follows
+ // the filter. If it's an encoder with complex frame reference
+ // structures then this could be very high.
+ ctx->output_frames->initial_pool_size = 10;
+
+ err = av_hwframe_ctx_init(ctx->output_frames_ref);
+ if (err < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to initialise VAAPI frame "
+ "context for output: %d\n", err);
+ goto fail;
+ }
+
+ va_frames = ctx->output_frames->hwctx;
+
+ av_assert0(ctx->va_context == VA_INVALID_ID);
+ vas = vaCreateContext(ctx->hwctx->display, ctx->va_config,
+ ctx->output_width, ctx->output_height, 0,
+ va_frames->surface_ids, va_frames->nb_surfaces,
+ &ctx->va_context);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create processing pipeline "
+ "context: %d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+
+ err = deint_vaapi_build_filter_params(avctx);
+ if (err < 0)
+ goto fail;
+
+ outlink->w = ctx->output_width;
+ outlink->h = ctx->output_height;
+
+ outlink->hw_frames_ctx = av_buffer_ref(ctx->output_frames_ref);
+ if (!outlink->hw_frames_ctx) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ av_freep(&hwconfig);
+ av_hwframe_constraints_free(&constraints);
+ return 0;
+
+fail:
+ av_buffer_unref(&ctx->output_frames_ref);
+ av_freep(&hwconfig);
+ av_hwframe_constraints_free(&constraints);
+ return err;
+}
+
+static int vaapi_proc_colour_standard(enum AVColorSpace av_cs)
+{
+ switch(av_cs) {
+#define CS(av, va) case AVCOL_SPC_ ## av: return VAProcColorStandard ## va;
+ CS(BT709, BT709);
+ CS(BT470BG, BT470BG);
+ CS(SMPTE170M, SMPTE170M);
+ CS(SMPTE240M, SMPTE240M);
+#undef CS
+ default:
+ return VAProcColorStandardNone;
+ }
+}
+
+static int deint_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame)
+{
+ AVFilterContext *avctx = inlink->dst;
+ AVFilterLink *outlink = avctx->outputs[0];
+ DeintVAAPIContext *ctx = avctx->priv;
+ AVFrame *output_frame = NULL;
+ VASurfaceID input_surface, output_surface;
+ VASurfaceID backward_references[MAX_REFERENCES];
+ VASurfaceID forward_references[MAX_REFERENCES];
+ VAProcPipelineParameterBuffer params;
+ VAProcFilterParameterBufferDeinterlacing *filter_params;
+ VARectangle input_region;
+ VABufferID params_id;
+ VAStatus vas;
+ void *filter_params_addr = NULL;
+ int err, i;
+
+ av_log(avctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n",
+ av_get_pix_fmt_name(input_frame->format),
+ input_frame->width, input_frame->height, input_frame->pts);
+
+ if (ctx->queue_count < ctx->queue_depth) {
+ ctx->frame_queue[ctx->queue_count++] = input_frame;
+ if (ctx->queue_count < ctx->queue_depth) {
+ // Need more reference surfaces before we can continue.
+ return 0;
+ }
+ } else {
+ av_frame_free(&ctx->frame_queue[0]);
+ for (i = 0; i + 1 < ctx->queue_count; i++)
+ ctx->frame_queue[i] = ctx->frame_queue[i + 1];
+ ctx->frame_queue[i] = input_frame;
+ }
+
+ input_frame =
+ ctx->frame_queue[ctx->pipeline_caps.num_backward_references];
+ input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3];
+ for (i = 0; i < ctx->pipeline_caps.num_backward_references; i++)
+ backward_references[i] = (VASurfaceID)(uintptr_t)
+ ctx->frame_queue[ctx->pipeline_caps.num_backward_references -
+ i - 1]->data[3];
+ for (i = 0; i < ctx->pipeline_caps.num_forward_references; i++)
+ forward_references[i] = (VASurfaceID)(uintptr_t)
+ ctx->frame_queue[ctx->pipeline_caps.num_backward_references +
+ i + 1]->data[3];
+
+ av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for "
+ "deinterlace input.\n", input_surface);
+ av_log(avctx, AV_LOG_DEBUG, "Backward references:");
+ for (i = 0; i < ctx->pipeline_caps.num_backward_references; i++)
+ av_log(avctx, AV_LOG_DEBUG, " %#x", backward_references[i]);
+ av_log(avctx, AV_LOG_DEBUG, "\n");
+ av_log(avctx, AV_LOG_DEBUG, "Forward references:");
+ for (i = 0; i < ctx->pipeline_caps.num_forward_references; i++)
+ av_log(avctx, AV_LOG_DEBUG, " %#x", forward_references[i]);
+ av_log(avctx, AV_LOG_DEBUG, "\n");
+
+ output_frame = av_frame_alloc();
+ if (!output_frame) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ err = av_hwframe_get_buffer(ctx->output_frames_ref,
+ output_frame, 0);
+ if (err < 0) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ output_surface = (VASurfaceID)(uintptr_t)output_frame->data[3];
+ av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for "
+ "deinterlace output.\n", output_surface);
+
+ memset(&params, 0, sizeof(params));
+
+ input_region = (VARectangle) {
+ .x = 0,
+ .y = 0,
+ .width = input_frame->width,
+ .height = input_frame->height,
+ };
+
+ params.surface = input_surface;
+ params.surface_region = &input_region;
+ params.surface_color_standard = vaapi_proc_colour_standard(
+ av_frame_get_colorspace(input_frame));
+
+ params.output_region = NULL;
+ params.output_background_color = 0xff000000;
+ params.output_color_standard = params.surface_color_standard;
+
+ params.pipeline_flags = 0;
+ params.filter_flags = VA_FRAME_PICTURE;
+
+ vas = vaMapBuffer(ctx->hwctx->display, ctx->filter_buffer,
+ &filter_params_addr);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to map filter parameter "
+ "buffer: %d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+ filter_params = filter_params_addr;
+ filter_params->flags = 0;
+ if (input_frame->interlaced_frame && !input_frame->top_field_first)
+ filter_params->flags |= VA_DEINTERLACING_BOTTOM_FIELD_FIRST;
+ filter_params_addr = NULL;
+ vas = vaUnmapBuffer(ctx->hwctx->display, ctx->filter_buffer);
+ if (vas != VA_STATUS_SUCCESS)
+ av_log(avctx, AV_LOG_ERROR, "Failed to unmap filter parameter "
+ "buffer: %d (%s).\n", vas, vaErrorStr(vas));
+
+ params.filters = &ctx->filter_buffer;
+ params.num_filters = 1;
+
+ params.forward_references = forward_references;
+ params.num_forward_references =
+ ctx->pipeline_caps.num_forward_references;
+ params.backward_references = backward_references;
+ params.num_backward_references =
+ ctx->pipeline_caps.num_backward_references;
+
+ vas = vaBeginPicture(ctx->hwctx->display,
+ ctx->va_context, output_surface);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to attach new picture: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+
+ vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
+ VAProcPipelineParameterBufferType,
+ sizeof(params), 1, &params, &params_id);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create parameter buffer: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail_after_begin;
+ }
+ av_log(avctx, AV_LOG_DEBUG, "Pipeline parameter buffer is %#x.\n",
+ params_id);
+
+ vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context,
+ &params_id, 1);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to render parameter buffer: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail_after_begin;
+ }
+
+ vas = vaEndPicture(ctx->hwctx->display, ctx->va_context);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to start picture processing: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail_after_render;
+ }
+
+ if (ctx->hwctx->driver_quirks &
+ AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS) {
+ vas = vaDestroyBuffer(ctx->hwctx->display, params_id);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to free parameter buffer: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ // And ignore.
+ }
+ }
+
+ err = av_frame_copy_props(output_frame, input_frame);
+ if (err < 0)
+ goto fail;
+
+ av_log(avctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n",
+ av_get_pix_fmt_name(output_frame->format),
+ output_frame->width, output_frame->height, output_frame->pts);
+
+ return ff_filter_frame(outlink, output_frame);
+
+fail_after_begin:
+ vaRenderPicture(ctx->hwctx->display, ctx->va_context, &params_id, 1);
+fail_after_render:
+ vaEndPicture(ctx->hwctx->display, ctx->va_context);
+fail:
+ if (filter_params_addr)
+ vaUnmapBuffer(ctx->hwctx->display, ctx->filter_buffer);
+ av_frame_free(&output_frame);
+ return err;
+}
+
+static av_cold int deint_vaapi_init(AVFilterContext *avctx)
+{
+ DeintVAAPIContext *ctx = avctx->priv;
+
+ ctx->va_config = VA_INVALID_ID;
+ ctx->va_context = VA_INVALID_ID;
+ ctx->filter_buffer = VA_INVALID_ID;
+ ctx->valid_ids = 1;
+
+ return 0;
+}
+
+static av_cold void deint_vaapi_uninit(AVFilterContext *avctx)
+{
+ DeintVAAPIContext *ctx = avctx->priv;
+
+ if (ctx->valid_ids)
+ deint_vaapi_pipeline_uninit(avctx);
+
+ av_buffer_unref(&ctx->input_frames_ref);
+ av_buffer_unref(&ctx->output_frames_ref);
+ av_buffer_unref(&ctx->device_ref);
+}
+
+#define OFFSET(x) offsetof(DeintVAAPIContext, x)
+#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM)
+static const AVOption deint_vaapi_options[] = {
+ { "mode", "Deinterlacing mode",
+ OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = VAProcDeinterlacingNone },
+ VAProcDeinterlacingNone, VAProcDeinterlacingCount - 1, FLAGS, "mode" },
+ { "default", "Use the highest-numbered (and therefore possibly most advanced) deinterlacing algorithm",
+ 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingNone }, .unit = "mode" },
+ { "bob", "Use the bob deinterlacing algorithm",
+ 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingBob }, .unit = "mode" },
+ { "weave", "Use the weave deinterlacing algorithm",
+ 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingWeave }, .unit = "mode" },
+ { "motion_adaptive", "Use the motion adaptive deinterlacing algorithm",
+ 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingMotionAdaptive }, .unit = "mode" },
+ { "motion_compensated", "Use the motion compensated deinterlacing algorithm",
+ 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingMotionCompensated }, .unit = "mode" },
+ { NULL },
+};
+
+static const AVClass deint_vaapi_class = {
+ .class_name = "deinterlace_vaapi",
+ .item_name = av_default_item_name,
+ .option = deint_vaapi_options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+static const AVFilterPad deint_vaapi_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = &deint_vaapi_filter_frame,
+ .config_props = &deint_vaapi_config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad deint_vaapi_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = &deint_vaapi_config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_deinterlace_vaapi = {
+ .name = "deinterlace_vaapi",
+ .description = NULL_IF_CONFIG_SMALL("Deinterlacing of VAAPI surfaces"),
+ .priv_size = sizeof(DeintVAAPIContext),
+ .init = &deint_vaapi_init,
+ .uninit = &deint_vaapi_uninit,
+ .query_formats = &deint_vaapi_query_formats,
+ .inputs = deint_vaapi_inputs,
+ .outputs = deint_vaapi_outputs,
+ .priv_class = &deint_vaapi_class,
+};
diff --git a/libavfilter/vf_dejudder.c b/libavfilter/vf_dejudder.c
new file mode 100644
index 0000000000..c4d7b6bb6c
--- /dev/null
+++ b/libavfilter/vf_dejudder.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2014 Nicholas Robbins
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * remove judder in video stream
+ *
+ * Algorithm:
+ * - If the old packets had PTS of old_pts[i]. Replace these with new
+ * value based on the running average of the last n=cycle frames. So
+ *
+ * new_pts[i] = Sum(k=i-n+1, i, old_pts[k])/n
+ * + (old_pts[i]-old_pts[i-n])*(n-1)/2n
+ *
+ * For any repeating pattern of length n of judder this will produce
+ * an even progression of PTS's.
+ *
+ * - In order to avoid calculating this sum ever frame, a running tally
+ * is maintained in ctx->new_pts. Each frame the new term at the start
+ * of the sum is added, the one and the end is removed, and the offset
+ * terms (second line in formula above) are recalculated.
+ *
+ * - To aid in this a ringbuffer of the last n-2 PTS's is maintained in
+ * ctx->ringbuff. With the indices of the first two and last two entries
+ * stored in i1, i2, i3, & i4.
+ *
+ * - To ensure that the new PTS's are integers, time_base is divided
+ * by 2n. This removes the division in the new_pts calculation.
+ *
+ * - frame_rate is also multiplied by 2n to allow the frames to fall
+ * where they may in what may now be a VFR output. This produces more
+ * even output then setting frame_rate=1/0 in practice.
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/mathematics.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ int64_t *ringbuff;
+ int i1, i2, i3, i4;
+ int64_t new_pts;
+ int start_count;
+
+ /* options */
+ int cycle;
+} DejudderContext;
+
+#define OFFSET(x) offsetof(DejudderContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption dejudder_options[] = {
+ {"cycle", "set the length of the cycle to use for dejuddering",
+ OFFSET(cycle), AV_OPT_TYPE_INT, {.i64 = 4}, 2, 240, .flags = FLAGS},
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(dejudder);
+
+static int config_out_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ DejudderContext *s = ctx->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+
+ outlink->time_base = av_mul_q(inlink->time_base, av_make_q(1, 2 * s->cycle));
+ outlink->frame_rate = av_mul_q(inlink->frame_rate, av_make_q(2 * s->cycle, 1));
+
+ av_log(ctx, AV_LOG_VERBOSE, "cycle:%d\n", s->cycle);
+
+ return 0;
+}
+
+static av_cold int dejudder_init(AVFilterContext *ctx)
+{
+ DejudderContext *s = ctx->priv;
+
+ s->ringbuff = av_mallocz_array(s->cycle+2, sizeof(*s->ringbuff));
+ if (!s->ringbuff)
+ return AVERROR(ENOMEM);
+
+ s->new_pts = 0;
+ s->i1 = 0;
+ s->i2 = 1;
+ s->i3 = 2;
+ s->i4 = 3;
+ s->start_count = s->cycle + 2;
+
+ return 0;
+}
+
+static av_cold void dejudder_uninit(AVFilterContext *ctx)
+{
+ DejudderContext *s = ctx->priv;
+
+ av_freep(&(s->ringbuff));
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ int k;
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ DejudderContext *s = ctx->priv;
+ int64_t *judbuff = s->ringbuff;
+ int64_t next_pts = frame->pts;
+ int64_t offset;
+
+ if (next_pts == AV_NOPTS_VALUE)
+ return ff_filter_frame(outlink, frame);
+
+ if (s->start_count) {
+ s->start_count--;
+ s->new_pts = next_pts * 2 * s->cycle;
+ } else {
+ if (next_pts < judbuff[s->i2]) {
+ offset = next_pts + judbuff[s->i3] - judbuff[s->i4] - judbuff[s->i1];
+ for (k = 0; k < s->cycle + 2; k++)
+ judbuff[k] += offset;
+ }
+ s->new_pts += (s->cycle - 1) * (judbuff[s->i3] - judbuff[s->i1])
+ + (s->cycle + 1) * (next_pts - judbuff[s->i4]);
+ }
+
+ judbuff[s->i2] = next_pts;
+ s->i1 = s->i2;
+ s->i2 = s->i3;
+ s->i3 = s->i4;
+ s->i4 = (s->i4 + 1) % (s->cycle + 2);
+
+ frame->pts = s->new_pts;
+
+ for (k = 0; k < s->cycle + 2; k++)
+ av_log(ctx, AV_LOG_DEBUG, "%"PRId64"\t", judbuff[k]);
+ av_log(ctx, AV_LOG_DEBUG, "next=%"PRId64", new=%"PRId64"\n", next_pts, frame->pts);
+
+ return ff_filter_frame(outlink, frame);
+}
+
+static const AVFilterPad dejudder_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad dejudder_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_out_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_dejudder = {
+ .name = "dejudder",
+ .description = NULL_IF_CONFIG_SMALL("Remove judder produced by pullup."),
+ .priv_size = sizeof(DejudderContext),
+ .priv_class = &dejudder_class,
+ .inputs = dejudder_inputs,
+ .outputs = dejudder_outputs,
+ .init = dejudder_init,
+ .uninit = dejudder_uninit,
+};
diff --git a/libavfilter/vf_delogo.c b/libavfilter/vf_delogo.c
index dc58078d04..065d093641 100644
--- a/libavfilter/vf_delogo.c
+++ b/libavfilter/vf_delogo.c
@@ -1,28 +1,30 @@
/*
* Copyright (c) 2002 Jindrich Makovicka <makovick@gmail.com>
* Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2013, 2015 Jean Delvare <jdelvare@suse.com>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or modify
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/**
* @file
* A very simple tv station logo remover
- * Ported from MPlayer libmpcodecs/vf_delogo.c.
+ * Originally imported from MPlayer libmpcodecs/vf_delogo.c,
+ * the algorithm was later improved.
*/
#include "libavutil/common.h"
@@ -35,8 +37,8 @@
#include "video.h"
/**
- * Apply a simple delogo algorithm to the image in dst and put the
- * result in src.
+ * Apply a simple delogo algorithm to the image in src and put the
+ * result in dst.
*
* The algorithm is only applied to the region specified by the logo
* parameters.
@@ -54,15 +56,16 @@
*/
static void apply_delogo(uint8_t *dst, int dst_linesize,
uint8_t *src, int src_linesize,
- int w, int h,
+ int w, int h, AVRational sar,
int logo_x, int logo_y, int logo_w, int logo_h,
- int band, int show, int direct)
+ unsigned int band, int show, int direct)
{
int x, y;
- int interp, dist;
+ uint64_t interp, weightl, weightr, weightt, weightb, weight;
uint8_t *xdst, *xsrc;
uint8_t *topleft, *botleft, *topright;
+ unsigned int left_sample, right_sample;
int xclipl, xclipr, yclipt, yclipb;
int logo_x1, logo_x2, logo_y1, logo_y2;
@@ -72,13 +75,13 @@ static void apply_delogo(uint8_t *dst, int dst_linesize,
yclipb = FFMAX(logo_y+logo_h-h, 0);
logo_x1 = logo_x + xclipl;
- logo_x2 = logo_x + logo_w - xclipr;
+ logo_x2 = logo_x + logo_w - xclipr - 1;
logo_y1 = logo_y + yclipt;
- logo_y2 = logo_y + logo_h - yclipb;
+ logo_y2 = logo_y + logo_h - yclipb - 1;
- topleft = src+logo_y1 * src_linesize+logo_x1;
- topright = src+logo_y1 * src_linesize+logo_x2-1;
- botleft = src+(logo_y2-1) * src_linesize+logo_x1;
+ topleft = src+logo_y1 * src_linesize+logo_x1;
+ topright = src+logo_y1 * src_linesize+logo_x2;
+ botleft = src+logo_y2 * src_linesize+logo_x1;
if (!direct)
av_image_copy_plane(dst, dst_linesize, src, src_linesize, w, h);
@@ -86,29 +89,51 @@ static void apply_delogo(uint8_t *dst, int dst_linesize,
dst += (logo_y1 + 1) * dst_linesize;
src += (logo_y1 + 1) * src_linesize;
- for (y = logo_y1+1; y < logo_y2-1; y++) {
+ for (y = logo_y1+1; y < logo_y2; y++) {
+ left_sample = topleft[src_linesize*(y-logo_y1)] +
+ topleft[src_linesize*(y-logo_y1-1)] +
+ topleft[src_linesize*(y-logo_y1+1)];
+ right_sample = topright[src_linesize*(y-logo_y1)] +
+ topright[src_linesize*(y-logo_y1-1)] +
+ topright[src_linesize*(y-logo_y1+1)];
+
for (x = logo_x1+1,
xdst = dst+logo_x1+1,
- xsrc = src+logo_x1+1; x < logo_x2-1; x++, xdst++, xsrc++) {
- interp = (topleft[src_linesize*(y-logo_y -yclipt)] +
- topleft[src_linesize*(y-logo_y-1-yclipt)] +
- topleft[src_linesize*(y-logo_y+1-yclipt)]) * (logo_w-(x-logo_x))/logo_w
- + (topright[src_linesize*(y-logo_y-yclipt)] +
- topright[src_linesize*(y-logo_y-1-yclipt)] +
- topright[src_linesize*(y-logo_y+1-yclipt)]) * (x-logo_x)/logo_w
- + (topleft[x-logo_x-xclipl] +
- topleft[x-logo_x-1-xclipl] +
- topleft[x-logo_x+1-xclipl]) * (logo_h-(y-logo_y))/logo_h
- + (botleft[x-logo_x-xclipl] +
- botleft[x-logo_x-1-xclipl] +
- botleft[x-logo_x+1-xclipl]) * (y-logo_y)/logo_h;
- interp /= 6;
+ xsrc = src+logo_x1+1; x < logo_x2; x++, xdst++, xsrc++) {
+
+ if (show && (y == logo_y1+1 || y == logo_y2-1 ||
+ x == logo_x1+1 || x == logo_x2-1)) {
+ *xdst = 0;
+ continue;
+ }
+
+ /* Weighted interpolation based on relative distances, taking SAR into account */
+ weightl = (uint64_t) (logo_x2-x) * (y-logo_y1) * (logo_y2-y) * sar.den;
+ weightr = (uint64_t)(x-logo_x1) * (y-logo_y1) * (logo_y2-y) * sar.den;
+ weightt = (uint64_t)(x-logo_x1) * (logo_x2-x) * (logo_y2-y) * sar.num;
+ weightb = (uint64_t)(x-logo_x1) * (logo_x2-x) * (y-logo_y1) * sar.num;
+
+ interp =
+ left_sample * weightl
+ +
+ right_sample * weightr
+ +
+ (topleft[x-logo_x1] +
+ topleft[x-logo_x1-1] +
+ topleft[x-logo_x1+1]) * weightt
+ +
+ (botleft[x-logo_x1] +
+ botleft[x-logo_x1-1] +
+ botleft[x-logo_x1+1]) * weightb;
+ weight = (weightl + weightr + weightt + weightb) * 3U;
+ interp = ROUNDED_DIV(interp, weight);
if (y >= logo_y+band && y < logo_y+logo_h-band &&
x >= logo_x+band && x < logo_x+logo_w-band) {
*xdst = interp;
} else {
- dist = 0;
+ unsigned dist = 0;
+
if (x < logo_x+band)
dist = FFMAX(dist, logo_x-x+band);
else if (x >= logo_x+logo_w-band)
@@ -120,8 +145,6 @@ static void apply_delogo(uint8_t *dst, int dst_linesize,
dist = FFMAX(dist, y-(logo_y+logo_h-1-band));
*xdst = (*xsrc*dist + interp*(band-dist))/band;
- if (show && (dist == band-1))
- *xdst = 0;
}
}
@@ -136,41 +159,36 @@ typedef struct DelogoContext {
} DelogoContext;
#define OFFSET(x) offsetof(DelogoContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption delogo_options[]= {
{ "x", "set logo x position", OFFSET(x), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
{ "y", "set logo y position", OFFSET(y), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
{ "w", "set logo width", OFFSET(w), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
{ "h", "set logo height", OFFSET(h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
- { "band", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 4 }, -1, INT_MAX, FLAGS },
- { "t", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 4 }, -1, INT_MAX, FLAGS },
- { "show", "show delogo area", OFFSET(show), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
- { NULL },
+#if LIBAVFILTER_VERSION_MAJOR < 7
+ /* Actual default value for band/t is 1, set in init */
+ { "band", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
+ { "t", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
+#endif
+ { "show", "show delogo area", OFFSET(show), AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, FLAGS },
+ { NULL }
};
-static const char *delogo_get_name(void *ctx)
-{
- return "delogo";
-}
-
-static const AVClass delogo_class = {
- .class_name = "DelogoContext",
- .item_name = delogo_get_name,
- .option = delogo_options,
-};
+AVFILTER_DEFINE_CLASS(delogo);
static int query_formats(AVFilterContext *ctx)
{
- enum AVPixelFormat pix_fmts[] = {
+ static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUVA420P, AV_PIX_FMT_GRAY8,
AV_PIX_FMT_NONE
};
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
}
static av_cold int init(AVFilterContext *ctx)
@@ -187,10 +205,17 @@ static av_cold int init(AVFilterContext *ctx)
CHECK_UNSET_OPT(w);
CHECK_UNSET_OPT(h);
- if (s->show)
- s->band = 4;
-
- av_log(ctx, AV_LOG_DEBUG, "x:%d y:%d, w:%d h:%d band:%d show:%d\n",
+#if LIBAVFILTER_VERSION_MAJOR < 7
+ if (s->band == 0) { /* Unset, use default */
+ av_log(ctx, AV_LOG_WARNING, "Note: default band value was changed from 4 to 1.\n");
+ s->band = 1;
+ } else if (s->band != 1) {
+ av_log(ctx, AV_LOG_WARNING, "Option band is deprecated.\n");
+ }
+#else
+ s->band = 1;
+#endif
+ av_log(ctx, AV_LOG_VERBOSE, "x:%d y:%d, w:%d h:%d band:%d show:%d\n",
s->x, s->y, s->w, s->h, s->band, s->show);
s->w += s->band*2;
@@ -201,6 +226,20 @@ static av_cold int init(AVFilterContext *ctx)
return 0;
}
+static int config_input(AVFilterLink *inlink)
+{
+ DelogoContext *s = inlink->dst->priv;
+
+ /* Check whether the logo area fits in the frame */
+ if (s->x + (s->band - 1) < 0 || s->x + s->w - (s->band*2 - 2) > inlink->w ||
+ s->y + (s->band - 1) < 0 || s->y + s->h - (s->band*2 - 2) > inlink->h) {
+ av_log(s, AV_LOG_ERROR, "Logo area is outside of the frame.\n");
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
DelogoContext *s = inlink->dst->priv;
@@ -211,6 +250,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
int vsub0 = desc->log2_chroma_h;
int direct = 0;
int plane;
+ AVRational sar;
if (av_frame_is_writable(in)) {
direct = 1;
@@ -223,19 +263,26 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
av_frame_copy_props(out, in);
- out->width = outlink->w;
- out->height = outlink->h;
}
- for (plane = 0; plane < 4 && in->data[plane]; plane++) {
+ sar = in->sample_aspect_ratio;
+ /* Assume square pixels if SAR is unknown */
+ if (!sar.num)
+ sar.num = sar.den = 1;
+
+ for (plane = 0; plane < desc->nb_components; plane++) {
int hsub = plane == 1 || plane == 2 ? hsub0 : 0;
int vsub = plane == 1 || plane == 2 ? vsub0 : 0;
apply_delogo(out->data[plane], out->linesize[plane],
in ->data[plane], in ->linesize[plane],
- inlink->w>>hsub, inlink->h>>vsub,
- s->x>>hsub, s->y>>vsub,
- s->w>>hsub, s->h>>vsub,
+ AV_CEIL_RSHIFT(inlink->w, hsub),
+ AV_CEIL_RSHIFT(inlink->h, vsub),
+ sar, s->x>>hsub, s->y>>vsub,
+ /* Up and left borders were rounded down, inject lost bits
+ * into width and height to avoid error accumulation */
+ AV_CEIL_RSHIFT(s->w + (s->x & ((1<<hsub)-1)), hsub),
+ AV_CEIL_RSHIFT(s->h + (s->y & ((1<<vsub)-1)), vsub),
s->band>>FFMIN(hsub, vsub),
s->show, direct);
}
@@ -248,10 +295,10 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static const AVFilterPad avfilter_vf_delogo_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
},
{ NULL }
};
@@ -271,7 +318,7 @@ AVFilter ff_vf_delogo = {
.priv_class = &delogo_class,
.init = init,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_delogo_inputs,
- .outputs = avfilter_vf_delogo_outputs,
+ .inputs = avfilter_vf_delogo_inputs,
+ .outputs = avfilter_vf_delogo_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/libavfilter/vf_deshake.c b/libavfilter/vf_deshake.c
new file mode 100644
index 0000000000..64b48c6d02
--- /dev/null
+++ b/libavfilter/vf_deshake.c
@@ -0,0 +1,579 @@
+/*
+ * Copyright (C) 2010 Georg Martius <georg.martius@web.de>
+ * Copyright (C) 2010 Daniel G. Taylor <dan@programmer-art.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * fast deshake / depan video filter
+ *
+ * SAD block-matching motion compensation to fix small changes in
+ * horizontal and/or vertical shift. This filter helps remove camera shake
+ * from hand-holding a camera, bumping a tripod, moving on a vehicle, etc.
+ *
+ * Algorithm:
+ * - For each frame with one previous reference frame
+ * - For each block in the frame
+ * - If contrast > threshold then find likely motion vector
+ * - For all found motion vectors
+ * - Find most common, store as global motion vector
+ * - Find most likely rotation angle
+ * - Transform image along global motion
+ *
+ * TODO:
+ * - Fill frame edges based on previous/next reference frames
+ * - Fill frame edges by stretching image near the edges?
+ * - Can this be done quickly and look decent?
+ *
+ * Dark Shikari links to http://wiki.videolan.org/SoC_x264_2010#GPU_Motion_Estimation_2
+ * for an algorithm similar to what could be used here to get the gmv
+ * It requires only a couple diamond searches + fast downscaling
+ *
+ * Special thanks to Jason Kotenko for his help with the algorithm and my
+ * inability to see simple errors in C code.
+ */
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "libavutil/common.h"
+#include "libavutil/mem.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/qsort.h"
+
+#include "deshake.h"
+#include "deshake_opencl.h"
+
+#define OFFSET(x) offsetof(DeshakeContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption deshake_options[] = {
+ { "x", "set x for the rectangular search area", OFFSET(cx), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+ { "y", "set y for the rectangular search area", OFFSET(cy), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+ { "w", "set width for the rectangular search area", OFFSET(cw), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+ { "h", "set height for the rectangular search area", OFFSET(ch), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+ { "rx", "set x for the rectangular search area", OFFSET(rx), AV_OPT_TYPE_INT, {.i64=16}, 0, MAX_R, .flags = FLAGS },
+ { "ry", "set y for the rectangular search area", OFFSET(ry), AV_OPT_TYPE_INT, {.i64=16}, 0, MAX_R, .flags = FLAGS },
+ { "edge", "set edge mode", OFFSET(edge), AV_OPT_TYPE_INT, {.i64=FILL_MIRROR}, FILL_BLANK, FILL_COUNT-1, FLAGS, "edge"},
+ { "blank", "fill zeroes at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_BLANK}, INT_MIN, INT_MAX, FLAGS, "edge" },
+ { "original", "original image at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_ORIGINAL}, INT_MIN, INT_MAX, FLAGS, "edge" },
+ { "clamp", "extruded edge value at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_CLAMP}, INT_MIN, INT_MAX, FLAGS, "edge" },
+ { "mirror", "mirrored edge at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_MIRROR}, INT_MIN, INT_MAX, FLAGS, "edge" },
+ { "blocksize", "set motion search blocksize", OFFSET(blocksize), AV_OPT_TYPE_INT, {.i64=8}, 4, 128, .flags = FLAGS },
+ { "contrast", "set contrast threshold for blocks", OFFSET(contrast), AV_OPT_TYPE_INT, {.i64=125}, 1, 255, .flags = FLAGS },
+ { "search", "set search strategy", OFFSET(search), AV_OPT_TYPE_INT, {.i64=EXHAUSTIVE}, EXHAUSTIVE, SEARCH_COUNT-1, FLAGS, "smode" },
+ { "exhaustive", "exhaustive search", 0, AV_OPT_TYPE_CONST, {.i64=EXHAUSTIVE}, INT_MIN, INT_MAX, FLAGS, "smode" },
+ { "less", "less exhaustive search", 0, AV_OPT_TYPE_CONST, {.i64=SMART_EXHAUSTIVE}, INT_MIN, INT_MAX, FLAGS, "smode" },
+ { "filename", "set motion search detailed log file name", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "opencl", "use OpenCL filtering capabilities", OFFSET(opencl), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(deshake);
+
+static int cmp(const void *a, const void *b)
+{
+ return FFDIFFSIGN(*(const double *)a, *(const double *)b);
+}
+
+/**
+ * Cleaned mean (cuts off 20% of values to remove outliers and then averages)
+ */
+static double clean_mean(double *values, int count)
+{
+ double mean = 0;
+ int cut = count / 5;
+ int x;
+
+ AV_QSORT(values, count, double, cmp);
+
+ for (x = cut; x < count - cut; x++) {
+ mean += values[x];
+ }
+
+ return mean / (count - cut * 2);
+}
+
+/**
+ * Find the most likely shift in motion between two frames for a given
+ * macroblock. Test each block against several shifts given by the rx
+ * and ry attributes. Searches using a simple matrix of those shifts and
+ * chooses the most likely shift by the smallest difference in blocks.
+ */
+static void find_block_motion(DeshakeContext *deshake, uint8_t *src1,
+ uint8_t *src2, int cx, int cy, int stride,
+ IntMotionVector *mv)
+{
+ int x, y;
+ int diff;
+ int smallest = INT_MAX;
+ int tmp, tmp2;
+
+ #define CMP(i, j) deshake->sad(src1 + cy * stride + cx, stride,\
+ src2 + (j) * stride + (i), stride)
+
+ if (deshake->search == EXHAUSTIVE) {
+ // Compare every possible position - this is sloooow!
+ for (y = -deshake->ry; y <= deshake->ry; y++) {
+ for (x = -deshake->rx; x <= deshake->rx; x++) {
+ diff = CMP(cx - x, cy - y);
+ if (diff < smallest) {
+ smallest = diff;
+ mv->x = x;
+ mv->y = y;
+ }
+ }
+ }
+ } else if (deshake->search == SMART_EXHAUSTIVE) {
+ // Compare every other possible position and find the best match
+ for (y = -deshake->ry + 1; y < deshake->ry; y += 2) {
+ for (x = -deshake->rx + 1; x < deshake->rx; x += 2) {
+ diff = CMP(cx - x, cy - y);
+ if (diff < smallest) {
+ smallest = diff;
+ mv->x = x;
+ mv->y = y;
+ }
+ }
+ }
+
+ // Hone in on the specific best match around the match we found above
+ tmp = mv->x;
+ tmp2 = mv->y;
+
+ for (y = tmp2 - 1; y <= tmp2 + 1; y++) {
+ for (x = tmp - 1; x <= tmp + 1; x++) {
+ if (x == tmp && y == tmp2)
+ continue;
+
+ diff = CMP(cx - x, cy - y);
+ if (diff < smallest) {
+ smallest = diff;
+ mv->x = x;
+ mv->y = y;
+ }
+ }
+ }
+ }
+
+ if (smallest > 512) {
+ mv->x = -1;
+ mv->y = -1;
+ }
+ emms_c();
+ //av_log(NULL, AV_LOG_ERROR, "%d\n", smallest);
+ //av_log(NULL, AV_LOG_ERROR, "Final: (%d, %d) = %d x %d\n", cx, cy, mv->x, mv->y);
+}
+
+/**
+ * Find the contrast of a given block. When searching for global motion we
+ * really only care about the high contrast blocks, so using this method we
+ * can actually skip blocks we don't care much about.
+ */
+static int block_contrast(uint8_t *src, int x, int y, int stride, int blocksize)
+{
+ int highest = 0;
+ int lowest = 255;
+ int i, j, pos;
+
+ for (i = 0; i <= blocksize * 2; i++) {
+ // We use a width of 16 here to match the sad function
+ for (j = 0; j <= 15; j++) {
+ pos = (y - i) * stride + (x - j);
+ if (src[pos] < lowest)
+ lowest = src[pos];
+ else if (src[pos] > highest) {
+ highest = src[pos];
+ }
+ }
+ }
+
+ return highest - lowest;
+}
+
+/**
+ * Find the rotation for a given block.
+ */
+static double block_angle(int x, int y, int cx, int cy, IntMotionVector *shift)
+{
+ double a1, a2, diff;
+
+ a1 = atan2(y - cy, x - cx);
+ a2 = atan2(y - cy + shift->y, x - cx + shift->x);
+
+ diff = a2 - a1;
+
+ return (diff > M_PI) ? diff - 2 * M_PI :
+ (diff < -M_PI) ? diff + 2 * M_PI :
+ diff;
+}
+
+/**
+ * Find the estimated global motion for a scene given the most likely shift
+ * for each block in the frame. The global motion is estimated to be the
+ * same as the motion from most blocks in the frame, so if most blocks
+ * move one pixel to the right and two pixels down, this would yield a
+ * motion vector (1, -2).
+ */
+static void find_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2,
+ int width, int height, int stride, Transform *t)
+{
+ int x, y;
+ IntMotionVector mv = {0, 0};
+ int count_max_value = 0;
+ int contrast;
+
+ int pos;
+ int center_x = 0, center_y = 0;
+ double p_x, p_y;
+
+ av_fast_malloc(&deshake->angles, &deshake->angles_size, width * height / (16 * deshake->blocksize) * sizeof(*deshake->angles));
+
+ // Reset counts to zero
+ for (x = 0; x < deshake->rx * 2 + 1; x++) {
+ for (y = 0; y < deshake->ry * 2 + 1; y++) {
+ deshake->counts[x][y] = 0;
+ }
+ }
+
+ pos = 0;
+ // Find motion for every block and store the motion vector in the counts
+ for (y = deshake->ry; y < height - deshake->ry - (deshake->blocksize * 2); y += deshake->blocksize * 2) {
+ // We use a width of 16 here to match the sad function
+ for (x = deshake->rx; x < width - deshake->rx - 16; x += 16) {
+ // If the contrast is too low, just skip this block as it probably
+ // won't be very useful to us.
+ contrast = block_contrast(src2, x, y, stride, deshake->blocksize);
+ if (contrast > deshake->contrast) {
+ //av_log(NULL, AV_LOG_ERROR, "%d\n", contrast);
+ find_block_motion(deshake, src1, src2, x, y, stride, &mv);
+ if (mv.x != -1 && mv.y != -1) {
+ deshake->counts[mv.x + deshake->rx][mv.y + deshake->ry] += 1;
+ if (x > deshake->rx && y > deshake->ry)
+ deshake->angles[pos++] = block_angle(x, y, 0, 0, &mv);
+
+ center_x += mv.x;
+ center_y += mv.y;
+ }
+ }
+ }
+ }
+
+ if (pos) {
+ center_x /= pos;
+ center_y /= pos;
+ t->angle = clean_mean(deshake->angles, pos);
+ if (t->angle < 0.001)
+ t->angle = 0;
+ } else {
+ t->angle = 0;
+ }
+
+ // Find the most common motion vector in the frame and use it as the gmv
+ for (y = deshake->ry * 2; y >= 0; y--) {
+ for (x = 0; x < deshake->rx * 2 + 1; x++) {
+ //av_log(NULL, AV_LOG_ERROR, "%5d ", deshake->counts[x][y]);
+ if (deshake->counts[x][y] > count_max_value) {
+ t->vec.x = x - deshake->rx;
+ t->vec.y = y - deshake->ry;
+ count_max_value = deshake->counts[x][y];
+ }
+ }
+ //av_log(NULL, AV_LOG_ERROR, "\n");
+ }
+
+ p_x = (center_x - width / 2.0);
+ p_y = (center_y - height / 2.0);
+ t->vec.x += (cos(t->angle)-1)*p_x - sin(t->angle)*p_y;
+ t->vec.y += sin(t->angle)*p_x + (cos(t->angle)-1)*p_y;
+
+ // Clamp max shift & rotation?
+ t->vec.x = av_clipf(t->vec.x, -deshake->rx * 2, deshake->rx * 2);
+ t->vec.y = av_clipf(t->vec.y, -deshake->ry * 2, deshake->ry * 2);
+ t->angle = av_clipf(t->angle, -0.1, 0.1);
+
+ //av_log(NULL, AV_LOG_ERROR, "%d x %d\n", avg->x, avg->y);
+}
+
+static int deshake_transform_c(AVFilterContext *ctx,
+ int width, int height, int cw, int ch,
+ const float *matrix_y, const float *matrix_uv,
+ enum InterpolateMethod interpolate,
+ enum FillMethod fill, AVFrame *in, AVFrame *out)
+{
+ int i = 0, ret = 0;
+ const float *matrixs[3];
+ int plane_w[3], plane_h[3];
+ matrixs[0] = matrix_y;
+ matrixs[1] = matrixs[2] = matrix_uv;
+ plane_w[0] = width;
+ plane_w[1] = plane_w[2] = cw;
+ plane_h[0] = height;
+ plane_h[1] = plane_h[2] = ch;
+
+ for (i = 0; i < 3; i++) {
+ // Transform the luma and chroma planes
+ ret = avfilter_transform(in->data[i], out->data[i], in->linesize[i], out->linesize[i],
+ plane_w[i], plane_h[i], matrixs[i], interpolate, fill);
+ if (ret < 0)
+ return ret;
+ }
+ return ret;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ int ret;
+ DeshakeContext *deshake = ctx->priv;
+
+ deshake->sad = av_pixelutils_get_sad_fn(4, 4, 1, deshake); // 16x16, 2nd source unaligned
+ if (!deshake->sad)
+ return AVERROR(EINVAL);
+
+ deshake->refcount = 20; // XXX: add to options?
+ deshake->blocksize /= 2;
+ deshake->blocksize = av_clip(deshake->blocksize, 4, 128);
+
+ if (deshake->rx % 16) {
+ av_log(ctx, AV_LOG_ERROR, "rx must be a multiple of 16\n");
+ return AVERROR_PATCHWELCOME;
+ }
+
+ if (deshake->filename)
+ deshake->fp = fopen(deshake->filename, "w");
+ if (deshake->fp)
+ fwrite("Ori x, Avg x, Fin x, Ori y, Avg y, Fin y, Ori angle, Avg angle, Fin angle, Ori zoom, Avg zoom, Fin zoom\n", sizeof(char), 104, deshake->fp);
+
+ // Quadword align left edge of box for MMX code, adjust width if necessary
+ // to keep right margin
+ if (deshake->cx > 0) {
+ deshake->cw += deshake->cx - (deshake->cx & ~15);
+ deshake->cx &= ~15;
+ }
+ deshake->transform = deshake_transform_c;
+ if (!CONFIG_OPENCL && deshake->opencl) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL support was not enabled in this build, cannot be selected\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (CONFIG_OPENCL && deshake->opencl) {
+ deshake->transform = ff_opencl_transform;
+ ret = ff_opencl_deshake_init(ctx);
+ if (ret < 0)
+ return ret;
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "cx: %d, cy: %d, cw: %d, ch: %d, rx: %d, ry: %d, edge: %d blocksize: %d contrast: %d search: %d\n",
+ deshake->cx, deshake->cy, deshake->cw, deshake->ch,
+ deshake->rx, deshake->ry, deshake->edge, deshake->blocksize * 2, deshake->contrast, deshake->search);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_props(AVFilterLink *link)
+{
+ DeshakeContext *deshake = link->dst->priv;
+
+ deshake->ref = NULL;
+ deshake->last.vec.x = 0;
+ deshake->last.vec.y = 0;
+ deshake->last.angle = 0;
+ deshake->last.zoom = 0;
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ DeshakeContext *deshake = ctx->priv;
+ if (CONFIG_OPENCL && deshake->opencl) {
+ ff_opencl_deshake_uninit(ctx);
+ }
+ av_frame_free(&deshake->ref);
+ av_freep(&deshake->angles);
+ deshake->angles_size = 0;
+ if (deshake->fp)
+ fclose(deshake->fp);
+}
+
+static int filter_frame(AVFilterLink *link, AVFrame *in)
+{
+ DeshakeContext *deshake = link->dst->priv;
+ AVFilterLink *outlink = link->dst->outputs[0];
+ AVFrame *out;
+ Transform t = {{0},0}, orig = {{0},0};
+ float matrix_y[9], matrix_uv[9];
+ float alpha = 2.0 / deshake->refcount;
+ char tmp[256];
+ int ret = 0;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
+ const int chroma_width = AV_CEIL_RSHIFT(link->w, desc->log2_chroma_w);
+ const int chroma_height = AV_CEIL_RSHIFT(link->h, desc->log2_chroma_h);
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ if (CONFIG_OPENCL && deshake->opencl) {
+ ret = ff_opencl_deshake_process_inout_buf(link->dst,in, out);
+ if (ret < 0)
+ goto fail;
+ }
+
+ if (deshake->cx < 0 || deshake->cy < 0 || deshake->cw < 0 || deshake->ch < 0) {
+ // Find the most likely global motion for the current frame
+ find_motion(deshake, (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0], in->data[0], link->w, link->h, in->linesize[0], &t);
+ } else {
+ uint8_t *src1 = (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0];
+ uint8_t *src2 = in->data[0];
+
+ deshake->cx = FFMIN(deshake->cx, link->w);
+ deshake->cy = FFMIN(deshake->cy, link->h);
+
+ if ((unsigned)deshake->cx + (unsigned)deshake->cw > link->w) deshake->cw = link->w - deshake->cx;
+ if ((unsigned)deshake->cy + (unsigned)deshake->ch > link->h) deshake->ch = link->h - deshake->cy;
+
+ // Quadword align right margin
+ deshake->cw &= ~15;
+
+ src1 += deshake->cy * in->linesize[0] + deshake->cx;
+ src2 += deshake->cy * in->linesize[0] + deshake->cx;
+
+ find_motion(deshake, src1, src2, deshake->cw, deshake->ch, in->linesize[0], &t);
+ }
+
+
+ // Copy transform so we can output it later to compare to the smoothed value
+ orig.vec.x = t.vec.x;
+ orig.vec.y = t.vec.y;
+ orig.angle = t.angle;
+ orig.zoom = t.zoom;
+
+ // Generate a one-sided moving exponential average
+ deshake->avg.vec.x = alpha * t.vec.x + (1.0 - alpha) * deshake->avg.vec.x;
+ deshake->avg.vec.y = alpha * t.vec.y + (1.0 - alpha) * deshake->avg.vec.y;
+ deshake->avg.angle = alpha * t.angle + (1.0 - alpha) * deshake->avg.angle;
+ deshake->avg.zoom = alpha * t.zoom + (1.0 - alpha) * deshake->avg.zoom;
+
+ // Remove the average from the current motion to detect the motion that
+ // is not on purpose, just as jitter from bumping the camera
+ t.vec.x -= deshake->avg.vec.x;
+ t.vec.y -= deshake->avg.vec.y;
+ t.angle -= deshake->avg.angle;
+ t.zoom -= deshake->avg.zoom;
+
+ // Invert the motion to undo it
+ t.vec.x *= -1;
+ t.vec.y *= -1;
+ t.angle *= -1;
+
+ // Write statistics to file
+ if (deshake->fp) {
+ snprintf(tmp, 256, "%f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f\n", orig.vec.x, deshake->avg.vec.x, t.vec.x, orig.vec.y, deshake->avg.vec.y, t.vec.y, orig.angle, deshake->avg.angle, t.angle, orig.zoom, deshake->avg.zoom, t.zoom);
+ fwrite(tmp, sizeof(char), strlen(tmp), deshake->fp);
+ }
+
+ // Turn relative current frame motion into absolute by adding it to the
+ // last absolute motion
+ t.vec.x += deshake->last.vec.x;
+ t.vec.y += deshake->last.vec.y;
+ t.angle += deshake->last.angle;
+ t.zoom += deshake->last.zoom;
+
+ // Shrink motion by 10% to keep things centered in the camera frame
+ t.vec.x *= 0.9;
+ t.vec.y *= 0.9;
+ t.angle *= 0.9;
+
+ // Store the last absolute motion information
+ deshake->last.vec.x = t.vec.x;
+ deshake->last.vec.y = t.vec.y;
+ deshake->last.angle = t.angle;
+ deshake->last.zoom = t.zoom;
+
+ // Generate a luma transformation matrix
+ avfilter_get_matrix(t.vec.x, t.vec.y, t.angle, 1.0 + t.zoom / 100.0, matrix_y);
+ // Generate a chroma transformation matrix
+ avfilter_get_matrix(t.vec.x / (link->w / chroma_width), t.vec.y / (link->h / chroma_height), t.angle, 1.0 + t.zoom / 100.0, matrix_uv);
+ // Transform the luma and chroma planes
+ ret = deshake->transform(link->dst, link->w, link->h, chroma_width, chroma_height,
+ matrix_y, matrix_uv, INTERPOLATE_BILINEAR, deshake->edge, in, out);
+
+ // Cleanup the old reference frame
+ av_frame_free(&deshake->ref);
+
+ if (ret < 0)
+ goto fail;
+
+ // Store the current frame as the reference frame for calculating the
+ // motion of the next frame
+ deshake->ref = in;
+
+ return ff_filter_frame(outlink, out);
+fail:
+ av_frame_free(&out);
+ return ret;
+}
+
+static const AVFilterPad deshake_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad deshake_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_deshake = {
+ .name = "deshake",
+ .description = NULL_IF_CONFIG_SMALL("Stabilize shaky video."),
+ .priv_size = sizeof(DeshakeContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = deshake_inputs,
+ .outputs = deshake_outputs,
+ .priv_class = &deshake_class,
+};
diff --git a/libavfilter/vf_detelecine.c b/libavfilter/vf_detelecine.c
new file mode 100644
index 0000000000..0d5f88df77
--- /dev/null
+++ b/libavfilter/vf_detelecine.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright (c) 2015 Himangi Saraogi <himangi774@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file detelecine filter.
+ */
+
+
+#include "libavutil/avstring.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ int first_field;
+ char *pattern;
+ int start_frame;
+ int init_len;
+ unsigned int pattern_pos;
+ unsigned int nskip_fields;
+ int64_t start_time;
+
+ AVRational pts;
+ AVRational ts_unit;
+ int occupied;
+
+ int nb_planes;
+ int planeheight[4];
+ int stride[4];
+
+ AVFrame *frame[2];
+ AVFrame *temp;
+} DetelecineContext;
+
+#define OFFSET(x) offsetof(DetelecineContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption detelecine_options[] = {
+ {"first_field", "select first field", OFFSET(first_field), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "field"},
+ {"top", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
+ {"t", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
+ {"bottom", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
+ {"b", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
+ {"pattern", "pattern that describe for how many fields a frame is to be displayed", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str="23"}, 0, 0, FLAGS},
+ {"start_frame", "position of first frame with respect to the pattern if stream is cut", OFFSET(start_frame), AV_OPT_TYPE_INT, {.i64=0}, 0, 13, FLAGS},
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(detelecine);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ DetelecineContext *s = ctx->priv;
+ const char *p;
+ int max = 0;
+ int sum = 0;
+
+ if (!strlen(s->pattern)) {
+ av_log(ctx, AV_LOG_ERROR, "No pattern provided.\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ for (p = s->pattern; *p; p++) {
+ if (!av_isdigit(*p)) {
+ av_log(ctx, AV_LOG_ERROR, "Provided pattern includes non-numeric characters.\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ sum += *p - '0';
+ max = FFMAX(*p - '0', max);
+ s->pts.num += *p - '0';
+ s->pts.den += 2;
+ }
+
+ if (s->start_frame >= sum) {
+ av_log(ctx, AV_LOG_ERROR, "Provided start_frame is too big.\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ s->nskip_fields = 0;
+ s->pattern_pos = 0;
+ s->start_time = AV_NOPTS_VALUE;
+ s->init_len = 0;
+
+ if (s->start_frame != 0) {
+ int nfields = 0;
+ for (p = s->pattern; *p; p++) {
+ nfields += *p - '0';
+ s->pattern_pos++;
+ if (nfields >= 2*s->start_frame) {
+ s->init_len = nfields - 2*s->start_frame;
+ break;
+ }
+ }
+ }
+
+ av_log(ctx, AV_LOG_INFO, "Detelecine pattern %s removes up to %d frames per frame, pts advance factor: %d/%d\n",
+ s->pattern, (max + 1) / 2, s->pts.num, s->pts.den);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *pix_fmts = NULL;
+ int fmt, ret;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
+ desc->flags & AV_PIX_FMT_FLAG_PAL ||
+ desc->flags & AV_PIX_FMT_FLAG_BITSTREAM) &&
+ (ret = ff_add_format(&pix_fmts, fmt)) < 0)
+ return ret;
+ }
+
+ return ff_set_common_formats(ctx, pix_fmts);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ DetelecineContext *s = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+
+ s->temp = ff_get_video_buffer(inlink, inlink->w, inlink->h);
+ if (!s->temp)
+ return AVERROR(ENOMEM);
+
+ s->frame[0] = ff_get_video_buffer(inlink, inlink->w, inlink->h);
+ if (!s->frame[0])
+ return AVERROR(ENOMEM);
+
+ s->frame[1] = ff_get_video_buffer(inlink, inlink->w, inlink->h);
+ if (!s->frame[1])
+ return AVERROR(ENOMEM);
+
+ if ((ret = av_image_fill_linesizes(s->stride, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ DetelecineContext *s = ctx->priv;
+ const AVFilterLink *inlink = ctx->inputs[0];
+ AVRational fps = inlink->frame_rate;
+
+ if (!fps.num || !fps.den) {
+ av_log(ctx, AV_LOG_ERROR, "The input needs a constant frame rate; "
+ "current rate of %d/%d is invalid\n", fps.num, fps.den);
+ return AVERROR(EINVAL);
+ }
+ fps = av_mul_q(fps, av_inv_q(s->pts));
+ av_log(ctx, AV_LOG_VERBOSE, "FPS: %d/%d -> %d/%d\n",
+ inlink->frame_rate.num, inlink->frame_rate.den, fps.num, fps.den);
+
+ outlink->frame_rate = fps;
+ outlink->time_base = av_mul_q(inlink->time_base, s->pts);
+ av_log(ctx, AV_LOG_VERBOSE, "TB: %d/%d -> %d/%d\n",
+ inlink->time_base.num, inlink->time_base.den, outlink->time_base.num, outlink->time_base.den);
+
+ s->ts_unit = av_inv_q(av_mul_q(fps, outlink->time_base));
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ DetelecineContext *s = ctx->priv;
+ int i, len = 0, ret = 0, out = 0;
+
+ if (s->start_time == AV_NOPTS_VALUE)
+ s->start_time = inpicref->pts;
+
+ if (s->nskip_fields >= 2) {
+ s->nskip_fields -= 2;
+ return 0;
+ } else if (s->nskip_fields >= 1) {
+ for (i = 0; i < s->nb_planes; i++) {
+ av_image_copy_plane(s->temp->data[i], s->temp->linesize[i],
+ inpicref->data[i], inpicref->linesize[i],
+ s->stride[i],
+ s->planeheight[i]);
+ }
+ s->occupied = 1;
+ s->nskip_fields--;
+ return 0;
+ }
+
+ if (s->nskip_fields == 0) {
+ len = s->init_len;
+ s->init_len = 0;
+ while(!len && s->pattern[s->pattern_pos]) {
+ len = s->pattern[s->pattern_pos] - '0';
+ s->pattern_pos++;
+ }
+
+ if (!s->pattern[s->pattern_pos])
+ s->pattern_pos = 0;
+
+ if(!len) { // do not output any field as the entire pattern is zero
+ av_frame_free(&inpicref);
+ return 0;
+ }
+
+ if (len == 1 && s->occupied) {
+ s->occupied = 0;
+ // output THIS image as-is
+ for (i = 0; i < s->nb_planes; i++)
+ av_image_copy_plane(s->frame[out]->data[i], s->frame[out]->linesize[i],
+ s->temp->data[i], s->temp->linesize[i],
+ s->stride[i],
+ s->planeheight[i]);
+ len = 0;
+ while(!len && s->pattern[s->pattern_pos]) {
+ len = s->pattern[s->pattern_pos] - '0';
+ s->pattern_pos++;
+ }
+
+ if (!s->pattern[s->pattern_pos])
+ s->pattern_pos = 0;
+
+ s->occupied = 0;
+ ++out;
+ }
+
+ if (s->occupied) {
+ for (i = 0; i < s->nb_planes; i++) {
+ // fill in the EARLIER field from the new pic
+ av_image_copy_plane(s->frame[out]->data[i] + s->frame[out]->linesize[i] * s->first_field,
+ s->frame[out]->linesize[i] * 2,
+ inpicref->data[i] + inpicref->linesize[i] * s->first_field,
+ inpicref->linesize[i] * 2,
+ s->stride[i],
+ (s->planeheight[i] - s->first_field + 1) / 2);
+ // fill in the LATER field from the buffered pic
+ av_image_copy_plane(s->frame[out]->data[i] + s->frame[out]->linesize[i] * !s->first_field,
+ s->frame[out]->linesize[i] * 2,
+ s->temp->data[i] + s->temp->linesize[i] * !s->first_field,
+ s->temp->linesize[i] * 2,
+ s->stride[i],
+ (s->planeheight[i] - !s->first_field + 1) / 2);
+ }
+
+ s->occupied = 0;
+ if (len <= 2) {
+ for (i = 0; i < s->nb_planes; i++) {
+ av_image_copy_plane(s->temp->data[i], s->temp->linesize[i],
+ inpicref->data[i], inpicref->linesize[i],
+ s->stride[i],
+ s->planeheight[i]);
+ }
+ s->occupied = 1;
+ }
+ ++out;
+ len = (len >= 3) ? len - 3 : 0;
+ } else {
+ if (len >= 2) {
+ // output THIS image as-is
+ for (i = 0; i < s->nb_planes; i++)
+ av_image_copy_plane(s->frame[out]->data[i], s->frame[out]->linesize[i],
+ inpicref->data[i], inpicref->linesize[i],
+ s->stride[i],
+ s->planeheight[i]);
+ len -= 2;
+ ++out;
+ } else if (len == 1) {
+ // output THIS image as-is
+ for (i = 0; i < s->nb_planes; i++)
+ av_image_copy_plane(s->frame[out]->data[i], s->frame[out]->linesize[i],
+ inpicref->data[i], inpicref->linesize[i],
+ s->stride[i],
+ s->planeheight[i]);
+
+ for (i = 0; i < s->nb_planes; i++) {
+ av_image_copy_plane(s->temp->data[i], s->temp->linesize[i],
+ inpicref->data[i], inpicref->linesize[i],
+ s->stride[i],
+ s->planeheight[i]);
+ }
+ s->occupied = 1;
+
+ len--;
+ ++out;
+ }
+ }
+
+ if (len == 1 && s->occupied)
+ {
+ len--;
+ s->occupied = 0;
+ }
+ }
+ s->nskip_fields = len;
+
+ for (i = 0; i < out; ++i) {
+ AVFrame *frame = av_frame_clone(s->frame[i]);
+
+ if (!frame) {
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+
+ av_frame_copy_props(frame, inpicref);
+ frame->pts = ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time) +
+ av_rescale(outlink->frame_count_in, s->ts_unit.num,
+ s->ts_unit.den);
+ ret = ff_filter_frame(outlink, frame);
+ }
+
+ av_frame_free(&inpicref);
+
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ DetelecineContext *s = ctx->priv;
+
+ av_frame_free(&s->temp);
+ av_frame_free(&s->frame[0]);
+ av_frame_free(&s->frame[1]);
+}
+
+static const AVFilterPad detelecine_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad detelecine_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_detelecine = {
+ .name = "detelecine",
+ .description = NULL_IF_CONFIG_SMALL("Apply an inverse telecine pattern."),
+ .priv_size = sizeof(DetelecineContext),
+ .priv_class = &detelecine_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = detelecine_inputs,
+ .outputs = detelecine_outputs,
+};
diff --git a/libavfilter/vf_displace.c b/libavfilter/vf_displace.c
new file mode 100644
index 0000000000..9daa0c9ddb
--- /dev/null
+++ b/libavfilter/vf_displace.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "framesync.h"
+#include "internal.h"
+#include "video.h"
+
+enum EdgeMode {
+ EDGE_BLANK,
+ EDGE_SMEAR,
+ EDGE_WRAP,
+ EDGE_NB
+};
+
+typedef struct DisplaceContext {
+ const AVClass *class;
+ int width[4], height[4];
+ enum EdgeMode edge;
+ int nb_planes;
+ int nb_components;
+ int step;
+ uint8_t blank[4];
+ FFFrameSync fs;
+
+ void (*displace)(struct DisplaceContext *s, const AVFrame *in,
+ const AVFrame *xpic, const AVFrame *ypic, AVFrame *out);
+} DisplaceContext;
+
+#define OFFSET(x) offsetof(DisplaceContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption displace_options[] = {
+ { "edge", "set edge mode", OFFSET(edge), AV_OPT_TYPE_INT, {.i64=EDGE_SMEAR}, 0, EDGE_NB-1, FLAGS, "edge" },
+ { "blank", "", 0, AV_OPT_TYPE_CONST, {.i64=EDGE_BLANK}, 0, 0, FLAGS, "edge" },
+ { "smear", "", 0, AV_OPT_TYPE_CONST, {.i64=EDGE_SMEAR}, 0, 0, FLAGS, "edge" },
+ { "wrap" , "", 0, AV_OPT_TYPE_CONST, {.i64=EDGE_WRAP}, 0, 0, FLAGS, "edge" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(displace);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR, AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
+ };
+
+ return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+}
+
+static void displace_planar(DisplaceContext *s, const AVFrame *in,
+ const AVFrame *xpic, const AVFrame *ypic,
+ AVFrame *out)
+{
+ int plane, x, y;
+
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ const int h = s->height[plane];
+ const int w = s->width[plane];
+ const int dlinesize = out->linesize[plane];
+ const int slinesize = in->linesize[plane];
+ const int xlinesize = xpic->linesize[plane];
+ const int ylinesize = ypic->linesize[plane];
+ const uint8_t *src = in->data[plane];
+ const uint8_t *ysrc = ypic->data[plane];
+ const uint8_t *xsrc = xpic->data[plane];
+ uint8_t *dst = out->data[plane];
+ const uint8_t blank = s->blank[plane];
+
+ for (y = 0; y < h; y++) {
+ switch (s->edge) {
+ case EDGE_BLANK:
+ for (x = 0; x < w; x++) {
+ int Y = y + ysrc[x] - 128;
+ int X = x + xsrc[x] - 128;
+
+ if (Y < 0 || Y >= h || X < 0 || X >= w)
+ dst[x] = blank;
+ else
+ dst[x] = src[Y * slinesize + X];
+ }
+ break;
+ case EDGE_SMEAR:
+ for (x = 0; x < w; x++) {
+ int Y = av_clip(y + ysrc[x] - 128, 0, h - 1);
+ int X = av_clip(x + xsrc[x] - 128, 0, w - 1);
+ dst[x] = src[Y * slinesize + X];
+ }
+ break;
+ case EDGE_WRAP:
+ for (x = 0; x < w; x++) {
+ int Y = (y + ysrc[x] - 128) % h;
+ int X = (x + xsrc[x] - 128) % w;
+
+ if (Y < 0)
+ Y += h;
+ if (X < 0)
+ X += w;
+ dst[x] = src[Y * slinesize + X];
+ }
+ break;
+ }
+
+ ysrc += ylinesize;
+ xsrc += xlinesize;
+ dst += dlinesize;
+ }
+ }
+}
+
+static void displace_packed(DisplaceContext *s, const AVFrame *in,
+ const AVFrame *xpic, const AVFrame *ypic,
+ AVFrame *out)
+{
+ const int step = s->step;
+ const int h = s->height[0];
+ const int w = s->width[0];
+ const int dlinesize = out->linesize[0];
+ const int slinesize = in->linesize[0];
+ const int xlinesize = xpic->linesize[0];
+ const int ylinesize = ypic->linesize[0];
+ const uint8_t *src = in->data[0];
+ const uint8_t *ysrc = ypic->data[0];
+ const uint8_t *xsrc = xpic->data[0];
+ const uint8_t *blank = s->blank;
+ uint8_t *dst = out->data[0];
+ int c, x, y;
+
+ for (y = 0; y < h; y++) {
+ switch (s->edge) {
+ case EDGE_BLANK:
+ for (x = 0; x < w; x++) {
+ for (c = 0; c < s->nb_components; c++) {
+ int Y = y + (ysrc[x * step + c] - 128);
+ int X = x + (xsrc[x * step + c] - 128);
+
+ if (Y < 0 || Y >= h || X < 0 || X >= w)
+ dst[x * step + c] = blank[c];
+ else
+ dst[x * step + c] = src[Y * slinesize + X * step + c];
+ }
+ }
+ break;
+ case EDGE_SMEAR:
+ for (x = 0; x < w; x++) {
+ for (c = 0; c < s->nb_components; c++) {
+ int Y = av_clip(y + (ysrc[x * step + c] - 128), 0, h - 1);
+ int X = av_clip(x + (xsrc[x * step + c] - 128), 0, w - 1);
+
+ dst[x * step + c] = src[Y * slinesize + X * step + c];
+ }
+ }
+ break;
+ case EDGE_WRAP:
+ for (x = 0; x < w; x++) {
+ for (c = 0; c < s->nb_components; c++) {
+ int Y = (y + (ysrc[x * step + c] - 128)) % h;
+ int X = (x + (xsrc[x * step + c] - 128)) % w;
+
+ if (Y < 0)
+ Y += h;
+ if (X < 0)
+ X += w;
+ dst[x * step + c] = src[Y * slinesize + X * step + c];
+ }
+ }
+ break;
+ }
+
+ ysrc += ylinesize;
+ xsrc += xlinesize;
+ dst += dlinesize;
+ }
+}
+
+static int process_frame(FFFrameSync *fs)
+{
+ AVFilterContext *ctx = fs->parent;
+ DisplaceContext *s = fs->opaque;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out, *in, *xpic, *ypic;
+ int ret;
+
+ if ((ret = ff_framesync_get_frame(&s->fs, 0, &in, 0)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 1, &xpic, 0)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 2, &ypic, 0)) < 0)
+ return ret;
+
+ if (ctx->is_disabled) {
+ out = av_frame_clone(in);
+ if (!out)
+ return AVERROR(ENOMEM);
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, in);
+
+ s->displace(s, in, xpic, ypic, out);
+ }
+ out->pts = av_rescale_q(in->pts, s->fs.time_base, outlink->time_base);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ DisplaceContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int vsub, hsub;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+ s->nb_components = desc->nb_components;
+
+ if (s->nb_planes > 1 || s->nb_components == 1)
+ s->displace = displace_planar;
+ else
+ s->displace = displace_packed;
+
+ if (!(desc->flags & AV_PIX_FMT_FLAG_RGB)) {
+ s->blank[1] = s->blank[2] = 128;
+ s->blank[0] = 16;
+ }
+
+ s->step = av_get_padded_bits_per_pixel(desc) >> 3;
+ hsub = desc->log2_chroma_w;
+ vsub = desc->log2_chroma_h;
+ s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
+ s->height[0] = s->height[3] = inlink->h;
+ s->width[1] = s->width[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
+ s->width[0] = s->width[3] = inlink->w;
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ DisplaceContext *s = ctx->priv;
+ AVFilterLink *srclink = ctx->inputs[0];
+ AVFilterLink *xlink = ctx->inputs[1];
+ AVFilterLink *ylink = ctx->inputs[2];
+ FFFrameSyncIn *in;
+ int ret;
+
+ if (srclink->format != xlink->format ||
+ srclink->format != ylink->format) {
+ av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
+ return AVERROR(EINVAL);
+ }
+ if (srclink->w != xlink->w ||
+ srclink->h != xlink->h ||
+ srclink->sample_aspect_ratio.num != xlink->sample_aspect_ratio.num ||
+ srclink->sample_aspect_ratio.den != xlink->sample_aspect_ratio.den ||
+ srclink->w != ylink->w ||
+ srclink->h != ylink->h ||
+ srclink->sample_aspect_ratio.num != ylink->sample_aspect_ratio.num ||
+ srclink->sample_aspect_ratio.den != ylink->sample_aspect_ratio.den) {
+ av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
+ "(size %dx%d, SAR %d:%d) do not match the corresponding "
+ "second input link %s parameters (%dx%d, SAR %d:%d) "
+ "and/or third input link %s parameters (%dx%d, SAR %d:%d)\n",
+ ctx->input_pads[0].name, srclink->w, srclink->h,
+ srclink->sample_aspect_ratio.num,
+ srclink->sample_aspect_ratio.den,
+ ctx->input_pads[1].name, xlink->w, xlink->h,
+ xlink->sample_aspect_ratio.num,
+ xlink->sample_aspect_ratio.den,
+ ctx->input_pads[2].name, ylink->w, ylink->h,
+ ylink->sample_aspect_ratio.num,
+ ylink->sample_aspect_ratio.den);
+ return AVERROR(EINVAL);
+ }
+
+ outlink->w = srclink->w;
+ outlink->h = srclink->h;
+ outlink->time_base = srclink->time_base;
+ outlink->sample_aspect_ratio = srclink->sample_aspect_ratio;
+ outlink->frame_rate = srclink->frame_rate;
+
+ ret = ff_framesync_init(&s->fs, ctx, 3);
+ if (ret < 0)
+ return ret;
+
+ in = s->fs.in;
+ in[0].time_base = srclink->time_base;
+ in[1].time_base = xlink->time_base;
+ in[2].time_base = ylink->time_base;
+ in[0].sync = 2;
+ in[0].before = EXT_STOP;
+ in[0].after = EXT_STOP;
+ in[1].sync = 1;
+ in[1].before = EXT_NULL;
+ in[1].after = EXT_INFINITY;
+ in[2].sync = 1;
+ in[2].before = EXT_NULL;
+ in[2].after = EXT_INFINITY;
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+
+ return ff_framesync_configure(&s->fs);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ DisplaceContext *s = inlink->dst->priv;
+ return ff_framesync_filter_frame(&s->fs, inlink, buf);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ DisplaceContext *s = outlink->src->priv;
+ return ff_framesync_request_frame(&s->fs, outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ DisplaceContext *s = ctx->priv;
+
+ ff_framesync_uninit(&s->fs);
+}
+
+static const AVFilterPad displace_inputs[] = {
+ {
+ .name = "source",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ {
+ .name = "xmap",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ {
+ .name = "ymap",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad displace_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_displace = {
+ .name = "displace",
+ .description = NULL_IF_CONFIG_SMALL("Displace pixels."),
+ .priv_size = sizeof(DisplaceContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = displace_inputs,
+ .outputs = displace_outputs,
+ .priv_class = &displace_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_drawbox.c b/libavfilter/vf_drawbox.c
index ab14af2e12..88bb9ae5c0 100644
--- a/libavfilter/vf_drawbox.c
+++ b/libavfilter/vf_drawbox.c
@@ -1,32 +1,34 @@
/*
* Copyright (c) 2008 Affine Systems, Inc (Michael Sullivan, Bobby Impollonia)
+ * Copyright (c) 2013 Andrey Utkin <andrey.krieger.utkin gmail com>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
- * Box drawing filter. Also a nice template for a filter that needs to
- * write in the input frame.
+ * Box and grid drawing filters. Also a nice template for a filter
+ * that needs to write in the input frame.
*/
#include "libavutil/colorspace.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
+#include "libavutil/eval.h"
#include "libavutil/pixdesc.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
@@ -34,62 +36,175 @@
#include "internal.h"
#include "video.h"
+static const char *const var_names[] = {
+ "dar",
+ "hsub", "vsub",
+ "in_h", "ih", ///< height of the input video
+ "in_w", "iw", ///< width of the input video
+ "sar",
+ "x",
+ "y",
+ "h", ///< height of the rendered box
+ "w", ///< width of the rendered box
+ "t",
+ "max",
+ NULL
+};
+
enum { Y, U, V, A };
+enum var_name {
+ VAR_DAR,
+ VAR_HSUB, VAR_VSUB,
+ VAR_IN_H, VAR_IH,
+ VAR_IN_W, VAR_IW,
+ VAR_SAR,
+ VAR_X,
+ VAR_Y,
+ VAR_H,
+ VAR_W,
+ VAR_T,
+ VAR_MAX,
+ VARS_NB
+};
+
typedef struct DrawBoxContext {
const AVClass *class;
- int x, y, w_opt, h_opt, w, h;
+ int x, y, w, h;
+ int thickness;
char *color_str;
unsigned char yuv_color[4];
+ int invert_color; ///< invert luma color
int vsub, hsub; ///< chroma subsampling
+ char *x_expr, *y_expr; ///< expression for x and y
+ char *w_expr, *h_expr; ///< expression for width and height
+ char *t_expr; ///< expression for thickness
+ int have_alpha;
} DrawBoxContext;
+static const int NUM_EXPR_EVALS = 5;
+
static av_cold int init(AVFilterContext *ctx)
{
DrawBoxContext *s = ctx->priv;
uint8_t rgba_color[4];
- if (av_parse_color(rgba_color, s->color_str, -1, ctx) < 0)
+ if (!strcmp(s->color_str, "invert"))
+ s->invert_color = 1;
+ else if (av_parse_color(rgba_color, s->color_str, -1, ctx) < 0)
return AVERROR(EINVAL);
- s->yuv_color[Y] = RGB_TO_Y_CCIR(rgba_color[0], rgba_color[1], rgba_color[2]);
- s->yuv_color[U] = RGB_TO_U_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
- s->yuv_color[V] = RGB_TO_V_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
- s->yuv_color[A] = rgba_color[3];
+ if (!s->invert_color) {
+ s->yuv_color[Y] = RGB_TO_Y_CCIR(rgba_color[0], rgba_color[1], rgba_color[2]);
+ s->yuv_color[U] = RGB_TO_U_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
+ s->yuv_color[V] = RGB_TO_V_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
+ s->yuv_color[A] = rgba_color[3];
+ }
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
- enum AVPixelFormat pix_fmts[] = {
+ static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
AV_PIX_FMT_NONE
};
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
}
static int config_input(AVFilterLink *inlink)
{
- DrawBoxContext *s = inlink->dst->priv;
+ AVFilterContext *ctx = inlink->dst;
+ DrawBoxContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ double var_values[VARS_NB], res;
+ char *expr;
+ int ret;
+ int i;
s->hsub = desc->log2_chroma_w;
s->vsub = desc->log2_chroma_h;
+ s->have_alpha = desc->flags & AV_PIX_FMT_FLAG_ALPHA;
+
+ var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
+ var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
+ var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? av_q2d(inlink->sample_aspect_ratio) : 1;
+ var_values[VAR_DAR] = (double)inlink->w / inlink->h * var_values[VAR_SAR];
+ var_values[VAR_HSUB] = s->hsub;
+ var_values[VAR_VSUB] = s->vsub;
+ var_values[VAR_X] = NAN;
+ var_values[VAR_Y] = NAN;
+ var_values[VAR_H] = NAN;
+ var_values[VAR_W] = NAN;
+ var_values[VAR_T] = NAN;
+
+ for (i = 0; i <= NUM_EXPR_EVALS; i++) {
+ /* evaluate expressions, fail on last iteration */
+ var_values[VAR_MAX] = inlink->w;
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
+ goto fail;
+ s->x = var_values[VAR_X] = res;
+
+ var_values[VAR_MAX] = inlink->h;
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->y_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
+ goto fail;
+ s->y = var_values[VAR_Y] = res;
+
+ var_values[VAR_MAX] = inlink->w - s->x;
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
+ goto fail;
+ s->w = var_values[VAR_W] = res;
+
+ var_values[VAR_MAX] = inlink->h - s->y;
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
+ goto fail;
+ s->h = var_values[VAR_H] = res;
+
+ var_values[VAR_MAX] = INT_MAX;
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->t_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
+ goto fail;
+ s->thickness = var_values[VAR_T] = res;
+ }
+
+ /* if w or h are zero, use the input w/h */
+ s->w = (s->w > 0) ? s->w : inlink->w;
+ s->h = (s->h > 0) ? s->h : inlink->h;
- s->w = (s->w_opt > 0) ? s->w_opt : inlink->w;
- s->h = (s->h_opt > 0) ? s->h_opt : inlink->h;
+ /* sanity check width and height */
+ if (s->w < 0 || s->h < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Size values less than 0 are not acceptable.\n");
+ return AVERROR(EINVAL);
+ }
- av_log(inlink->dst, AV_LOG_VERBOSE, "x:%d y:%d w:%d h:%d color:0x%02X%02X%02X%02X\n",
- s->w, s->y, s->w, s->h,
+ av_log(ctx, AV_LOG_VERBOSE, "x:%d y:%d w:%d h:%d color:0x%02X%02X%02X%02X\n",
+ s->x, s->y, s->w, s->h,
s->yuv_color[Y], s->yuv_color[U], s->yuv_color[V], s->yuv_color[A]);
return 0;
+
+fail:
+ av_log(ctx, AV_LOG_ERROR,
+ "Error when evaluating the expression '%s'.\n",
+ expr);
+ return ret;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
@@ -98,21 +213,56 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
int plane, x, y, xb = s->x, yb = s->y;
unsigned char *row[4];
- for (y = FFMAX(yb, 0); y < frame->height && y < (yb + s->h); y++) {
- row[0] = frame->data[0] + y * frame->linesize[0];
+ if (s->have_alpha) {
+ for (y = FFMAX(yb, 0); y < frame->height && y < (yb + s->h); y++) {
+ row[0] = frame->data[0] + y * frame->linesize[0];
+ row[3] = frame->data[3] + y * frame->linesize[3];
- for (plane = 1; plane < 3; plane++)
- row[plane] = frame->data[plane] +
- frame->linesize[plane] * (y >> s->vsub);
+ for (plane = 1; plane < 3; plane++)
+ row[plane] = frame->data[plane] +
+ frame->linesize[plane] * (y >> s->vsub);
- for (x = FFMAX(xb, 0); x < (xb + s->w) && x < frame->width; x++) {
- double alpha = (double)s->yuv_color[A] / 255;
+ if (s->invert_color) {
+ for (x = FFMAX(xb, 0); x < xb + s->w && x < frame->width; x++)
+ if ((y - yb < s->thickness) || (yb + s->h - 1 - y < s->thickness) ||
+ (x - xb < s->thickness) || (xb + s->w - 1 - x < s->thickness))
+ row[0][x] = 0xff - row[0][x];
+ } else {
+ for (x = FFMAX(xb, 0); x < xb + s->w && x < frame->width; x++) {
+ if ((y - yb < s->thickness) || (yb + s->h - 1 - y < s->thickness) ||
+ (x - xb < s->thickness) || (xb + s->w - 1 - x < s->thickness)) {
+ row[0][x ] = s->yuv_color[Y];
+ row[1][x >> s->hsub] = s->yuv_color[U];
+ row[2][x >> s->hsub] = s->yuv_color[V];
+ row[3][x ] = s->yuv_color[A];
+ }
+ }
+ }
+ }
+ } else {
+ for (y = FFMAX(yb, 0); y < frame->height && y < (yb + s->h); y++) {
+ row[0] = frame->data[0] + y * frame->linesize[0];
- if ((y - yb < 3) || (yb + s->h - y < 4) ||
- (x - xb < 3) || (xb + s->w - x < 4)) {
- row[0][x ] = (1 - alpha) * row[0][x ] + alpha * s->yuv_color[Y];
- row[1][x >> s->hsub] = (1 - alpha) * row[1][x >> s->hsub] + alpha * s->yuv_color[U];
- row[2][x >> s->hsub] = (1 - alpha) * row[2][x >> s->hsub] + alpha * s->yuv_color[V];
+ for (plane = 1; plane < 3; plane++)
+ row[plane] = frame->data[plane] +
+ frame->linesize[plane] * (y >> s->vsub);
+
+ if (s->invert_color) {
+ for (x = FFMAX(xb, 0); x < xb + s->w && x < frame->width; x++)
+ if ((y - yb < s->thickness) || (yb + s->h - 1 - y < s->thickness) ||
+ (x - xb < s->thickness) || (xb + s->w - 1 - x < s->thickness))
+ row[0][x] = 0xff - row[0][x];
+ } else {
+ for (x = FFMAX(xb, 0); x < xb + s->w && x < frame->width; x++) {
+ double alpha = (double)s->yuv_color[A] / 255;
+
+ if ((y - yb < s->thickness) || (yb + s->h - 1 - y < s->thickness) ||
+ (x - xb < s->thickness) || (xb + s->w - 1 - x < s->thickness)) {
+ row[0][x ] = (1 - alpha) * row[0][x ] + alpha * s->yuv_color[Y];
+ row[1][x >> s->hsub] = (1 - alpha) * row[1][x >> s->hsub] + alpha * s->yuv_color[U];
+ row[2][x >> s->hsub] = (1 - alpha) * row[2][x >> s->hsub] + alpha * s->yuv_color[V];
+ }
+ }
}
}
}
@@ -121,36 +271,38 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
}
#define OFFSET(x) offsetof(DrawBoxContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "x", "Horizontal position of the left box edge", OFFSET(x), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, FLAGS },
- { "y", "Vertical position of the top box edge", OFFSET(y), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, FLAGS },
- { "width", "Width of the box", OFFSET(w_opt), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
- { "height", "Height of the box", OFFSET(h_opt), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
- { "color", "Color of the box", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, .flags = FLAGS },
- { NULL },
-};
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+#if CONFIG_DRAWBOX_FILTER
-static const AVClass drawbox_class = {
- .class_name = "drawbox",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
+static const AVOption drawbox_options[] = {
+ { "x", "set horizontal position of the left box edge", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "y", "set vertical position of the top box edge", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "width", "set width of the box", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "w", "set width of the box", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "height", "set height of the box", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "h", "set height of the box", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "color", "set color of the box", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "c", "set color of the box", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "thickness", "set the box thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, { .str="3" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "t", "set the box thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, { .str="3" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { NULL }
};
-static const AVFilterPad avfilter_vf_drawbox_inputs[] = {
+AVFILTER_DEFINE_CLASS(drawbox);
+
+static const AVFilterPad drawbox_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_input,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
- .needs_writable = 1,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ .needs_writable = 1,
},
{ NULL }
};
-static const AVFilterPad avfilter_vf_drawbox_outputs[] = {
+static const AVFilterPad drawbox_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
@@ -159,13 +311,147 @@ static const AVFilterPad avfilter_vf_drawbox_outputs[] = {
};
AVFilter ff_vf_drawbox = {
- .name = "drawbox",
- .description = NULL_IF_CONFIG_SMALL("Draw a colored box on the input video."),
- .priv_size = sizeof(DrawBoxContext),
- .priv_class = &drawbox_class,
- .init = init,
-
- .query_formats = query_formats,
- .inputs = avfilter_vf_drawbox_inputs,
- .outputs = avfilter_vf_drawbox_outputs,
+ .name = "drawbox",
+ .description = NULL_IF_CONFIG_SMALL("Draw a colored box on the input video."),
+ .priv_size = sizeof(DrawBoxContext),
+ .priv_class = &drawbox_class,
+ .init = init,
+ .query_formats = query_formats,
+ .inputs = drawbox_inputs,
+ .outputs = drawbox_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
+#endif /* CONFIG_DRAWBOX_FILTER */
+
+#if CONFIG_DRAWGRID_FILTER
+static av_pure av_always_inline int pixel_belongs_to_grid(DrawBoxContext *drawgrid, int x, int y)
+{
+ // x is horizontal (width) coord,
+ // y is vertical (height) coord
+ int x_modulo;
+ int y_modulo;
+
+ // Abstract from the offset
+ x -= drawgrid->x;
+ y -= drawgrid->y;
+
+ x_modulo = x % drawgrid->w;
+ y_modulo = y % drawgrid->h;
+
+ // If x or y got negative, fix values to preserve logics
+ if (x_modulo < 0)
+ x_modulo += drawgrid->w;
+ if (y_modulo < 0)
+ y_modulo += drawgrid->h;
+
+ return x_modulo < drawgrid->thickness // Belongs to vertical line
+ || y_modulo < drawgrid->thickness; // Belongs to horizontal line
+}
+
+static int drawgrid_filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ DrawBoxContext *drawgrid = inlink->dst->priv;
+ int plane, x, y;
+ uint8_t *row[4];
+
+ if (drawgrid->have_alpha) {
+ for (y = 0; y < frame->height; y++) {
+ row[0] = frame->data[0] + y * frame->linesize[0];
+ row[3] = frame->data[3] + y * frame->linesize[3];
+
+ for (plane = 1; plane < 3; plane++)
+ row[plane] = frame->data[plane] +
+ frame->linesize[plane] * (y >> drawgrid->vsub);
+
+ if (drawgrid->invert_color) {
+ for (x = 0; x < frame->width; x++)
+ if (pixel_belongs_to_grid(drawgrid, x, y))
+ row[0][x] = 0xff - row[0][x];
+ } else {
+ for (x = 0; x < frame->width; x++) {
+ if (pixel_belongs_to_grid(drawgrid, x, y)) {
+ row[0][x ] = drawgrid->yuv_color[Y];
+ row[1][x >> drawgrid->hsub] = drawgrid->yuv_color[U];
+ row[2][x >> drawgrid->hsub] = drawgrid->yuv_color[V];
+ row[3][x ] = drawgrid->yuv_color[A];
+ }
+ }
+ }
+ }
+ } else {
+ for (y = 0; y < frame->height; y++) {
+ row[0] = frame->data[0] + y * frame->linesize[0];
+
+ for (plane = 1; plane < 3; plane++)
+ row[plane] = frame->data[plane] +
+ frame->linesize[plane] * (y >> drawgrid->vsub);
+
+ if (drawgrid->invert_color) {
+ for (x = 0; x < frame->width; x++)
+ if (pixel_belongs_to_grid(drawgrid, x, y))
+ row[0][x] = 0xff - row[0][x];
+ } else {
+ for (x = 0; x < frame->width; x++) {
+ double alpha = (double)drawgrid->yuv_color[A] / 255;
+
+ if (pixel_belongs_to_grid(drawgrid, x, y)) {
+ row[0][x ] = (1 - alpha) * row[0][x ] + alpha * drawgrid->yuv_color[Y];
+ row[1][x >> drawgrid->hsub] = (1 - alpha) * row[1][x >> drawgrid->hsub] + alpha * drawgrid->yuv_color[U];
+ row[2][x >> drawgrid->hsub] = (1 - alpha) * row[2][x >> drawgrid->hsub] + alpha * drawgrid->yuv_color[V];
+ }
+ }
+ }
+ }
+ }
+
+ return ff_filter_frame(inlink->dst->outputs[0], frame);
+}
+
+static const AVOption drawgrid_options[] = {
+ { "x", "set horizontal offset", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "y", "set vertical offset", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "width", "set width of grid cell", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "w", "set width of grid cell", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "height", "set height of grid cell", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "h", "set height of grid cell", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "color", "set color of the grid", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "c", "set color of the grid", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "thickness", "set grid line thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, {.str="1"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "t", "set grid line thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, {.str="1"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(drawgrid);
+
+static const AVFilterPad drawgrid_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = drawgrid_filter_frame,
+ .needs_writable = 1,
+ },
+ { NULL }
+};
+
+static const AVFilterPad drawgrid_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_drawgrid = {
+ .name = "drawgrid",
+ .description = NULL_IF_CONFIG_SMALL("Draw a colored grid on the input video."),
+ .priv_size = sizeof(DrawBoxContext),
+ .priv_class = &drawgrid_class,
+ .init = init,
+ .query_formats = query_formats,
+ .inputs = drawgrid_inputs,
+ .outputs = drawgrid_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
+
+#endif /* CONFIG_DRAWGRID_FILTER */
diff --git a/libavfilter/vf_drawtext.c b/libavfilter/vf_drawtext.c
index e36cfa2c00..bcbe2d9106 100644
--- a/libavfilter/vf_drawtext.c
+++ b/libavfilter/vf_drawtext.c
@@ -3,20 +3,20 @@
* Copyright (c) 2010 S.N. Hemanth Meenakshisundaram
* Copyright (c) 2003 Gustavo Sverzut Barbieri <gsbarbieri@yahoo.com.br>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -28,25 +28,30 @@
#include "config.h"
-#include <sys/types.h>
+#if HAVE_SYS_TIME_H
#include <sys/time.h>
+#endif
+#include <sys/types.h>
#include <sys/stat.h>
#include <time.h>
+#if HAVE_UNISTD_H
#include <unistd.h>
+#endif
+#include <fenv.h>
#if CONFIG_LIBFONTCONFIG
#include <fontconfig/fontconfig.h>
#endif
-#include "libavutil/colorspace.h"
+#include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
#include "libavutil/common.h"
#include "libavutil/file.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
-#include "libavutil/mathematics.h"
#include "libavutil/random_seed.h"
#include "libavutil/parseutils.h"
-#include "libavutil/pixdesc.h"
+#include "libavutil/timecode.h"
#include "libavutil/time_internal.h"
#include "libavutil/tree.h"
#include "libavutil/lfg.h"
@@ -56,22 +61,33 @@
#include "internal.h"
#include "video.h"
+#if CONFIG_LIBFRIBIDI
+#include <fribidi.h>
+#endif
+
#include <ft2build.h>
#include FT_FREETYPE_H
#include FT_GLYPH_H
+#include FT_STROKER_H
static const char *const var_names[] = {
- "E",
- "PHI",
- "PI",
- "main_w", "W", ///< width of the main video
- "main_h", "H", ///< height of the main video
- "text_w", "w", ///< width of the overlay text
- "text_h", "h", ///< height of the overlay text
+ "dar",
+ "hsub", "vsub",
+ "line_h", "lh", ///< line height, same as max_glyph_h
+ "main_h", "h", "H", ///< height of the input video
+ "main_w", "w", "W", ///< width of the input video
+ "max_glyph_a", "ascent", ///< max glyph ascent
+ "max_glyph_d", "descent", ///< min glyph descent
+ "max_glyph_h", ///< max glyph height
+ "max_glyph_w", ///< max glyph width
+ "n", ///< number of frame
+ "sar",
+ "t", ///< timestamp expressed in seconds
+ "text_h", "th", ///< height of the rendered text
+ "text_w", "tw", ///< width of the rendered text
"x",
"y",
- "n", ///< number of processed frames
- "t", ///< timestamp expressed in seconds
+ "pict_type",
NULL
};
@@ -92,99 +108,142 @@ static const eval_func2 fun2[] = {
};
enum var_name {
- VAR_E,
- VAR_PHI,
- VAR_PI,
- VAR_MAIN_W, VAR_MW,
- VAR_MAIN_H, VAR_MH,
- VAR_TEXT_W, VAR_TW,
+ VAR_DAR,
+ VAR_HSUB, VAR_VSUB,
+ VAR_LINE_H, VAR_LH,
+ VAR_MAIN_H, VAR_h, VAR_H,
+ VAR_MAIN_W, VAR_w, VAR_W,
+ VAR_MAX_GLYPH_A, VAR_ASCENT,
+ VAR_MAX_GLYPH_D, VAR_DESCENT,
+ VAR_MAX_GLYPH_H,
+ VAR_MAX_GLYPH_W,
+ VAR_N,
+ VAR_SAR,
+ VAR_T,
VAR_TEXT_H, VAR_TH,
+ VAR_TEXT_W, VAR_TW,
VAR_X,
VAR_Y,
- VAR_N,
- VAR_T,
+ VAR_PICT_TYPE,
VAR_VARS_NB
};
+enum expansion_mode {
+ EXP_NONE,
+ EXP_NORMAL,
+ EXP_STRFTIME,
+};
+
typedef struct DrawTextContext {
const AVClass *class;
+ int exp_mode; ///< expansion mode to use for the text
+ int reinit; ///< tells if the filter is being reinited
#if CONFIG_LIBFONTCONFIG
uint8_t *font; ///< font to be used
#endif
uint8_t *fontfile; ///< font to be used
uint8_t *text; ///< text to be drawn
- uint8_t *expanded_text; ///< used to contain the strftime()-expanded text
- size_t expanded_text_size; ///< size in bytes of the expanded_text buffer
+ AVBPrint expanded_text; ///< used to contain the expanded text
+ uint8_t *fontcolor_expr; ///< fontcolor expression to evaluate
+ AVBPrint expanded_fontcolor; ///< used to contain the expanded fontcolor spec
int ft_load_flags; ///< flags used for loading fonts, see FT_LOAD_*
FT_Vector *positions; ///< positions for each element in the text
size_t nb_positions; ///< number of elements of positions array
char *textfile; ///< file with text to be drawn
- int x, y; ///< position to start drawing text
- int w, h; ///< dimension of the text block
+ int x; ///< x position to start drawing text
+ int y; ///< y position to start drawing text
+ int max_glyph_w; ///< max glyph width
+ int max_glyph_h; ///< max glyph height
int shadowx, shadowy;
+ int borderw; ///< border width
unsigned int fontsize; ///< font size to use
- char *fontcolor_string; ///< font color as string
- char *boxcolor_string; ///< box color as string
- char *shadowcolor_string; ///< shadow color as string
- uint8_t fontcolor[4]; ///< foreground color
- uint8_t boxcolor[4]; ///< background color
- uint8_t shadowcolor[4]; ///< shadow color
- uint8_t fontcolor_rgba[4]; ///< foreground color in RGBA
- uint8_t boxcolor_rgba[4]; ///< background color in RGBA
- uint8_t shadowcolor_rgba[4]; ///< shadow color in RGBA
+ int line_spacing; ///< lines spacing in pixels
short int draw_box; ///< draw box around text - true or false
+ int boxborderw; ///< box border width
int use_kerning; ///< font kerning is used - true/false
int tabsize; ///< tab size
int fix_bounds; ///< do we let it go out of frame bounds - t/f
+ FFDrawContext dc;
+ FFDrawColor fontcolor; ///< foreground color
+ FFDrawColor shadowcolor; ///< shadow color
+ FFDrawColor bordercolor; ///< border color
+ FFDrawColor boxcolor; ///< background color
+
FT_Library library; ///< freetype font library handle
FT_Face face; ///< freetype font face handle
+ FT_Stroker stroker; ///< freetype stroker handle
struct AVTreeNode *glyphs; ///< rendered glyphs, stored using the UTF-32 char code
- int hsub, vsub; ///< chroma subsampling values
- int is_packed_rgb;
- int pixel_step[4]; ///< distance in bytes between the component of each pixel
- uint8_t rgba_map[4]; ///< map RGBA offsets to the positions in the packed RGBA format
- uint8_t *box_line[4]; ///< line used for filling the box background
- char *x_expr, *y_expr;
+ char *x_expr; ///< expression for x position
+ char *y_expr; ///< expression for y position
AVExpr *x_pexpr, *y_pexpr; ///< parsed expressions for x and y
+ int64_t basetime; ///< base pts time in the real world for display
double var_values[VAR_VARS_NB];
- char *d_expr;
- AVExpr *d_pexpr;
- int draw; ///< set to zero to prevent drawing
char *a_expr;
AVExpr *a_pexpr;
int alpha;
AVLFG prng; ///< random
+ char *tc_opt_string; ///< specified timecode option string
+ AVRational tc_rate; ///< frame rate for timecode
+ AVTimecode tc; ///< timecode context
+ int tc24hmax; ///< 1 if timecode is wrapped to 24 hours, 0 otherwise
+ int reload; ///< reload text file for each frame
+ int start_number; ///< starting frame number for n/frame_num var
+#if CONFIG_LIBFRIBIDI
+ int text_shaping; ///< 1 to shape the text before drawing it
+#endif
+ AVDictionary *metadata;
} DrawTextContext;
#define OFFSET(x) offsetof(DrawTextContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption drawtext_options[]= {
+ {"fontfile", "set font file", OFFSET(fontfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"text", "set text", OFFSET(text), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"textfile", "set text file", OFFSET(textfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"fontcolor", "set foreground color", OFFSET(fontcolor.rgba), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"fontcolor_expr", "set foreground color expression", OFFSET(fontcolor_expr), AV_OPT_TYPE_STRING, {.str=""}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"boxcolor", "set box color", OFFSET(boxcolor.rgba), AV_OPT_TYPE_COLOR, {.str="white"}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"bordercolor", "set border color", OFFSET(bordercolor.rgba), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"shadowcolor", "set shadow color", OFFSET(shadowcolor.rgba), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"box", "set box", OFFSET(draw_box), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1 , FLAGS},
+ {"boxborderw", "set box border width", OFFSET(boxborderw), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX , FLAGS},
+ {"line_spacing", "set line spacing in pixels", OFFSET(line_spacing), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX,FLAGS},
+ {"fontsize", "set font size", OFFSET(fontsize), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX , FLAGS},
+ {"x", "set x expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"y", "set y expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"shadowx", "set shadow x offset", OFFSET(shadowx), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX , FLAGS},
+ {"shadowy", "set shadow y offset", OFFSET(shadowy), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX , FLAGS},
+ {"borderw", "set border width", OFFSET(borderw), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX , FLAGS},
+ {"tabsize", "set tab size", OFFSET(tabsize), AV_OPT_TYPE_INT, {.i64=4}, 0, INT_MAX , FLAGS},
+ {"basetime", "set base time", OFFSET(basetime), AV_OPT_TYPE_INT64, {.i64=AV_NOPTS_VALUE}, INT64_MIN, INT64_MAX , FLAGS},
#if CONFIG_LIBFONTCONFIG
{ "font", "Font name", OFFSET(font), AV_OPT_TYPE_STRING, { .str = "Sans" }, .flags = FLAGS },
#endif
- { "fontfile", NULL, OFFSET(fontfile), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "text", NULL, OFFSET(text), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "textfile", NULL, OFFSET(textfile), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "fontcolor", NULL, OFFSET(fontcolor_string), AV_OPT_TYPE_STRING, { .str = "black" }, .flags = FLAGS },
- { "boxcolor", NULL, OFFSET(boxcolor_string), AV_OPT_TYPE_STRING, { .str = "white" }, .flags = FLAGS },
- { "shadowcolor", NULL, OFFSET(shadowcolor_string), AV_OPT_TYPE_STRING, { .str = "black" }, .flags = FLAGS },
- { "box", NULL, OFFSET(draw_box), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
- { "fontsize", NULL, OFFSET(fontsize), AV_OPT_TYPE_INT, { .i64 = 16 }, 1, 1024, FLAGS },
- { "x", NULL, OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
- { "y", NULL, OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
- { "shadowx", NULL, OFFSET(shadowx), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, FLAGS },
- { "shadowy", NULL, OFFSET(shadowy), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, FLAGS },
- { "tabsize", NULL, OFFSET(tabsize), AV_OPT_TYPE_INT, { .i64 = 4 }, 0, INT_MAX, FLAGS },
- { "draw", "if false do not draw", OFFSET(d_expr), AV_OPT_TYPE_STRING, { .str = "1" }, .flags = FLAGS },
+
+ {"expansion", "set the expansion mode", OFFSET(exp_mode), AV_OPT_TYPE_INT, {.i64=EXP_NORMAL}, 0, 2, FLAGS, "expansion"},
+ {"none", "set no expansion", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_NONE}, 0, 0, FLAGS, "expansion"},
+ {"normal", "set normal expansion", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_NORMAL}, 0, 0, FLAGS, "expansion"},
+ {"strftime", "set strftime expansion (deprecated)", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_STRFTIME}, 0, 0, FLAGS, "expansion"},
+
+ {"timecode", "set initial timecode", OFFSET(tc_opt_string), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"tc24hmax", "set 24 hours max (timecode only)", OFFSET(tc24hmax), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
+ {"timecode_rate", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS},
+ {"r", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS},
+ {"rate", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS},
+ {"reload", "reload text file for each frame", OFFSET(reload), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{ "alpha", "apply alpha while rendering", OFFSET(a_expr), AV_OPT_TYPE_STRING, { .str = "1" }, .flags = FLAGS },
- { "fix_bounds", "if true, check and fix text coords to avoid clipping",
- OFFSET(fix_bounds), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, FLAGS },
+ {"fix_bounds", "check and fix text coords to avoid clipping", OFFSET(fix_bounds), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS},
+ {"start_number", "start frame number for n/frame_num variable", OFFSET(start_number), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS},
+
+#if CONFIG_LIBFRIBIDI
+ {"text_shaping", "attempt to shape text before drawing", OFFSET(text_shaping), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS},
+#endif
/* FT_LOAD_* flags */
- { "ft_load_flags", "set font loading flags for libfreetype", OFFSET(ft_load_flags), AV_OPT_TYPE_FLAGS, { .i64 = FT_LOAD_DEFAULT | FT_LOAD_RENDER}, 0, INT_MAX, FLAGS, "ft_load_flags" },
+ { "ft_load_flags", "set font loading flags for libfreetype", OFFSET(ft_load_flags), AV_OPT_TYPE_FLAGS, { .i64 = FT_LOAD_DEFAULT }, 0, INT_MAX, FLAGS, "ft_load_flags" },
{ "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_DEFAULT }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "no_scale", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_NO_SCALE }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "no_hinting", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_NO_HINTING }, .flags = FLAGS, .unit = "ft_load_flags" },
@@ -200,45 +259,38 @@ static const AVOption drawtext_options[]= {
{ "monochrome", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_MONOCHROME }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "linear_design", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_LINEAR_DESIGN }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "no_autohint", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_NO_AUTOHINT }, .flags = FLAGS, .unit = "ft_load_flags" },
- { NULL},
+ { NULL }
};
-static const char *drawtext_get_name(void *ctx)
-{
- return "drawtext";
-}
-
-static const AVClass drawtext_class = {
- "DrawTextContext",
- drawtext_get_name,
- drawtext_options
-};
+AVFILTER_DEFINE_CLASS(drawtext);
#undef __FTERRORS_H__
#define FT_ERROR_START_LIST {
#define FT_ERRORDEF(e, v, s) { (e), (s) },
#define FT_ERROR_END_LIST { 0, NULL } };
-struct ft_error
+static const struct ft_error
{
int err;
const char *err_msg;
-} static ft_errors[] =
+} ft_errors[] =
#include FT_ERRORS_H
#define FT_ERRMSG(e) ft_errors[e].err_msg
typedef struct Glyph {
- FT_Glyph *glyph;
+ FT_Glyph glyph;
+ FT_Glyph border_glyph;
uint32_t code;
FT_Bitmap bitmap; ///< array holding bitmaps of font
+ FT_Bitmap border_bitmap; ///< array holding bitmaps of font border
FT_BBox bbox;
int advance;
int bitmap_left;
int bitmap_top;
} Glyph;
-static int glyph_cmp(void *key, const void *b)
+static int glyph_cmp(const void *key, const void *b)
{
const Glyph *a = key, *bb = b;
int64_t diff = (int64_t)a->code - (int64_t)bb->code;
@@ -251,6 +303,7 @@ static int glyph_cmp(void *key, const void *b)
static int load_glyph(AVFilterContext *ctx, Glyph **glyph_ptr, uint32_t code)
{
DrawTextContext *s = ctx->priv;
+ FT_BitmapGlyph bitmapglyph;
Glyph *glyph;
struct AVTreeNode *node = NULL;
int ret;
@@ -259,26 +312,40 @@ static int load_glyph(AVFilterContext *ctx, Glyph **glyph_ptr, uint32_t code)
if (FT_Load_Char(s->face, code, s->ft_load_flags))
return AVERROR(EINVAL);
- /* save glyph */
- if (!(glyph = av_mallocz(sizeof(*glyph))) ||
- !(glyph->glyph = av_mallocz(sizeof(*glyph->glyph)))) {
+ glyph = av_mallocz(sizeof(*glyph));
+ if (!glyph) {
ret = AVERROR(ENOMEM);
goto error;
}
glyph->code = code;
- if (FT_Get_Glyph(s->face->glyph, glyph->glyph)) {
+ if (FT_Get_Glyph(s->face->glyph, &glyph->glyph)) {
ret = AVERROR(EINVAL);
goto error;
}
+ if (s->borderw) {
+ glyph->border_glyph = glyph->glyph;
+ if (FT_Glyph_StrokeBorder(&glyph->border_glyph, s->stroker, 0, 0) ||
+ FT_Glyph_To_Bitmap(&glyph->border_glyph, FT_RENDER_MODE_NORMAL, 0, 1)) {
+ ret = AVERROR_EXTERNAL;
+ goto error;
+ }
+ bitmapglyph = (FT_BitmapGlyph) glyph->border_glyph;
+ glyph->border_bitmap = bitmapglyph->bitmap;
+ }
+ if (FT_Glyph_To_Bitmap(&glyph->glyph, FT_RENDER_MODE_NORMAL, 0, 1)) {
+ ret = AVERROR_EXTERNAL;
+ goto error;
+ }
+ bitmapglyph = (FT_BitmapGlyph) glyph->glyph;
- glyph->bitmap = s->face->glyph->bitmap;
- glyph->bitmap_left = s->face->glyph->bitmap_left;
- glyph->bitmap_top = s->face->glyph->bitmap_top;
+ glyph->bitmap = bitmapglyph->bitmap;
+ glyph->bitmap_left = bitmapglyph->left;
+ glyph->bitmap_top = bitmapglyph->top;
glyph->advance = s->face->glyph->advance.x >> 6;
/* measure text height to calculate text_height (or the maximum text height) */
- FT_Glyph_Get_CBox(*glyph->glyph, ft_glyph_bbox_pixels, &glyph->bbox);
+ FT_Glyph_Get_CBox(glyph->glyph, ft_glyph_bbox_pixels, &glyph->bbox);
/* cache the newly created glyph */
if (!(node = av_tree_node_alloc())) {
@@ -294,148 +361,284 @@ static int load_glyph(AVFilterContext *ctx, Glyph **glyph_ptr, uint32_t code)
error:
if (glyph)
av_freep(&glyph->glyph);
+
av_freep(&glyph);
av_freep(&node);
return ret;
}
-static int parse_font(AVFilterContext *ctx)
+static int load_font_file(AVFilterContext *ctx, const char *path, int index)
{
DrawTextContext *s = ctx->priv;
+ int err;
+
+ err = FT_New_Face(s->library, path, index, &s->face);
+ if (err) {
#if !CONFIG_LIBFONTCONFIG
- if (!s->fontfile) {
- av_log(ctx, AV_LOG_ERROR, "No font filename provided\n");
+ av_log(ctx, AV_LOG_ERROR, "Could not load font \"%s\": %s\n",
+ s->fontfile, FT_ERRMSG(err));
+#endif
return AVERROR(EINVAL);
}
-
return 0;
-#else
+}
+
+#if CONFIG_LIBFONTCONFIG
+static int load_font_fontconfig(AVFilterContext *ctx)
+{
+ DrawTextContext *s = ctx->priv;
+ FcConfig *fontconfig;
FcPattern *pat, *best;
FcResult result = FcResultMatch;
-
- FcBool fc_bool;
- FcChar8* fc_string;
+ FcChar8 *filename;
+ int index;
+ double size;
int err = AVERROR(ENOENT);
- if (s->fontfile)
- return 0;
-
- if (!FcInit())
+ fontconfig = FcInitLoadConfigAndFonts();
+ if (!fontconfig) {
+ av_log(ctx, AV_LOG_ERROR, "impossible to init fontconfig\n");
return AVERROR_UNKNOWN;
-
- if (!(pat = FcPatternCreate()))
- return AVERROR(ENOMEM);
+ }
+ pat = FcNameParse(s->fontfile ? s->fontfile :
+ (uint8_t *)(intptr_t)"default");
+ if (!pat) {
+ av_log(ctx, AV_LOG_ERROR, "could not parse fontconfig pat");
+ return AVERROR(EINVAL);
+ }
FcPatternAddString(pat, FC_FAMILY, s->font);
- FcPatternAddBool(pat, FC_OUTLINE, FcTrue);
- FcPatternAddDouble(pat, FC_SIZE, (double)s->fontsize);
+ if (s->fontsize)
+ FcPatternAddDouble(pat, FC_SIZE, (double)s->fontsize);
FcDefaultSubstitute(pat);
- if (!FcConfigSubstitute(NULL, pat, FcMatchPattern)) {
+ if (!FcConfigSubstitute(fontconfig, pat, FcMatchPattern)) {
+ av_log(ctx, AV_LOG_ERROR, "could not substitue fontconfig options"); /* very unlikely */
FcPatternDestroy(pat);
return AVERROR(ENOMEM);
}
- best = FcFontMatch(NULL, pat, &result);
+ best = FcFontMatch(fontconfig, pat, &result);
FcPatternDestroy(pat);
- if (!best || result == FcResultNoMatch) {
- av_log(ctx, AV_LOG_ERROR,
- "Cannot find a valid font for the family %s\n",
- s->font);
- goto fail;
- }
-
- if (FcPatternGetBool(best, FC_OUTLINE, 0, &fc_bool) != FcResultMatch ||
- !fc_bool) {
- av_log(ctx, AV_LOG_ERROR, "Outline not available for %s\n",
+ if (!best || result != FcResultMatch) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Cannot find a valid font for the family %s\n",
s->font);
goto fail;
}
- if (FcPatternGetString(best, FC_FAMILY, 0, &fc_string) != FcResultMatch) {
- av_log(ctx, AV_LOG_ERROR, "No matches for %s\n",
- s->font);
- goto fail;
+ if (
+ FcPatternGetInteger(best, FC_INDEX, 0, &index ) != FcResultMatch ||
+ FcPatternGetDouble (best, FC_SIZE, 0, &size ) != FcResultMatch) {
+ av_log(ctx, AV_LOG_ERROR, "impossible to find font information");
+ return AVERROR(EINVAL);
}
- if (FcPatternGetString(best, FC_FILE, 0, &fc_string) != FcResultMatch) {
+ if (FcPatternGetString(best, FC_FILE, 0, &filename) != FcResultMatch) {
av_log(ctx, AV_LOG_ERROR, "No file path for %s\n",
s->font);
goto fail;
}
- s->fontfile = av_strdup(fc_string);
- if (!s->fontfile)
- err = AVERROR(ENOMEM);
- else
- err = 0;
+ av_log(ctx, AV_LOG_INFO, "Using \"%s\"\n", filename);
+ if (!s->fontsize)
+ s->fontsize = size + 0.5;
+ err = load_font_file(ctx, filename, index);
+ if (err)
+ return err;
+ FcConfigDestroy(fontconfig);
fail:
FcPatternDestroy(best);
return err;
+}
+#endif
+
+static int load_font(AVFilterContext *ctx)
+{
+ DrawTextContext *s = ctx->priv;
+ int err;
+
+ /* load the face, and set up the encoding, which is by default UTF-8 */
+ err = load_font_file(ctx, s->fontfile, 0);
+ if (!err)
+ return 0;
+#if CONFIG_LIBFONTCONFIG
+ err = load_font_fontconfig(ctx);
+ if (!err)
+ return 0;
#endif
+ return err;
+}
+
+static int load_textfile(AVFilterContext *ctx)
+{
+ DrawTextContext *s = ctx->priv;
+ int err;
+ uint8_t *textbuf;
+ uint8_t *tmp;
+ size_t textbuf_size;
+
+ if ((err = av_file_map(s->textfile, &textbuf, &textbuf_size, 0, ctx)) < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "The text file '%s' could not be read or is empty\n",
+ s->textfile);
+ return err;
+ }
+
+ if (textbuf_size > SIZE_MAX - 1 || !(tmp = av_realloc(s->text, textbuf_size + 1))) {
+ av_file_unmap(textbuf, textbuf_size);
+ return AVERROR(ENOMEM);
+ }
+ s->text = tmp;
+ memcpy(s->text, textbuf, textbuf_size);
+ s->text[textbuf_size] = 0;
+ av_file_unmap(textbuf, textbuf_size);
+
+ return 0;
+}
+
+static inline int is_newline(uint32_t c)
+{
+ return c == '\n' || c == '\r' || c == '\f' || c == '\v';
}
+#if CONFIG_LIBFRIBIDI
+static int shape_text(AVFilterContext *ctx)
+{
+ DrawTextContext *s = ctx->priv;
+ uint8_t *tmp;
+ int ret = AVERROR(ENOMEM);
+ static const FriBidiFlags flags = FRIBIDI_FLAGS_DEFAULT |
+ FRIBIDI_FLAGS_ARABIC;
+ FriBidiChar *unicodestr = NULL;
+ FriBidiStrIndex len;
+ FriBidiParType direction = FRIBIDI_PAR_LTR;
+ FriBidiStrIndex line_start = 0;
+ FriBidiStrIndex line_end = 0;
+ FriBidiLevel *embedding_levels = NULL;
+ FriBidiArabicProp *ar_props = NULL;
+ FriBidiCharType *bidi_types = NULL;
+ FriBidiStrIndex i,j;
+
+ len = strlen(s->text);
+ if (!(unicodestr = av_malloc_array(len, sizeof(*unicodestr)))) {
+ goto out;
+ }
+ len = fribidi_charset_to_unicode(FRIBIDI_CHAR_SET_UTF8,
+ s->text, len, unicodestr);
+
+ bidi_types = av_malloc_array(len, sizeof(*bidi_types));
+ if (!bidi_types) {
+ goto out;
+ }
+
+ fribidi_get_bidi_types(unicodestr, len, bidi_types);
+
+ embedding_levels = av_malloc_array(len, sizeof(*embedding_levels));
+ if (!embedding_levels) {
+ goto out;
+ }
+
+ if (!fribidi_get_par_embedding_levels(bidi_types, len, &direction,
+ embedding_levels)) {
+ goto out;
+ }
+
+ ar_props = av_malloc_array(len, sizeof(*ar_props));
+ if (!ar_props) {
+ goto out;
+ }
+
+ fribidi_get_joining_types(unicodestr, len, ar_props);
+ fribidi_join_arabic(bidi_types, len, embedding_levels, ar_props);
+ fribidi_shape(flags, embedding_levels, len, ar_props, unicodestr);
+
+ for (line_end = 0, line_start = 0; line_end < len; line_end++) {
+ if (is_newline(unicodestr[line_end]) || line_end == len - 1) {
+ if (!fribidi_reorder_line(flags, bidi_types,
+ line_end - line_start + 1, line_start,
+ direction, embedding_levels, unicodestr,
+ NULL)) {
+ goto out;
+ }
+ line_start = line_end + 1;
+ }
+ }
+
+ /* Remove zero-width fill chars put in by libfribidi */
+ for (i = 0, j = 0; i < len; i++)
+ if (unicodestr[i] != FRIBIDI_CHAR_FILL)
+ unicodestr[j++] = unicodestr[i];
+ len = j;
+
+ if (!(tmp = av_realloc(s->text, (len * 4 + 1) * sizeof(*s->text)))) {
+ /* Use len * 4, as a unicode character can be up to 4 bytes in UTF-8 */
+ goto out;
+ }
+
+ s->text = tmp;
+ len = fribidi_unicode_to_charset(FRIBIDI_CHAR_SET_UTF8,
+ unicodestr, len, s->text);
+ ret = 0;
+
+out:
+ av_free(unicodestr);
+ av_free(embedding_levels);
+ av_free(ar_props);
+ av_free(bidi_types);
+ return ret;
+}
+#endif
+
static av_cold int init(AVFilterContext *ctx)
{
int err;
DrawTextContext *s = ctx->priv;
Glyph *glyph;
- if ((err = parse_font(ctx)) < 0)
- return err;
+ if (!s->fontfile && !CONFIG_LIBFONTCONFIG) {
+ av_log(ctx, AV_LOG_ERROR, "No font filename provided\n");
+ return AVERROR(EINVAL);
+ }
if (s->textfile) {
- uint8_t *textbuf;
- size_t textbuf_size;
-
if (s->text) {
av_log(ctx, AV_LOG_ERROR,
"Both text and text file provided. Please provide only one\n");
return AVERROR(EINVAL);
}
- if ((err = av_file_map(s->textfile, &textbuf, &textbuf_size, 0, ctx)) < 0) {
- av_log(ctx, AV_LOG_ERROR,
- "The text file '%s' could not be read or is empty\n",
- s->textfile);
+ if ((err = load_textfile(ctx)) < 0)
return err;
- }
-
- if (textbuf_size > SIZE_MAX - 1 ||
- !(s->text = av_malloc(textbuf_size + 1))) {
- av_file_unmap(textbuf, textbuf_size);
- return AVERROR(ENOMEM);
- }
- memcpy(s->text, textbuf, textbuf_size);
- s->text[textbuf_size] = 0;
- av_file_unmap(textbuf, textbuf_size);
}
- if (!s->text) {
- av_log(ctx, AV_LOG_ERROR,
- "Either text or a valid file must be provided\n");
- return AVERROR(EINVAL);
- }
+ if (s->reload && !s->textfile)
+ av_log(ctx, AV_LOG_WARNING, "No file to reload\n");
- if ((err = av_parse_color(s->fontcolor_rgba, s->fontcolor_string, -1, ctx))) {
- av_log(ctx, AV_LOG_ERROR,
- "Invalid font color '%s'\n", s->fontcolor_string);
- return err;
+ if (s->tc_opt_string) {
+ int ret = av_timecode_init_from_string(&s->tc, s->tc_rate,
+ s->tc_opt_string, ctx);
+ if (ret < 0)
+ return ret;
+ if (s->tc24hmax)
+ s->tc.flags |= AV_TIMECODE_FLAG_24HOURSMAX;
+ if (!s->text)
+ s->text = av_strdup("");
}
- if ((err = av_parse_color(s->boxcolor_rgba, s->boxcolor_string, -1, ctx))) {
+ if (!s->text) {
av_log(ctx, AV_LOG_ERROR,
- "Invalid box color '%s'\n", s->boxcolor_string);
- return err;
+ "Either text, a valid file or a timecode must be provided\n");
+ return AVERROR(EINVAL);
}
- if ((err = av_parse_color(s->shadowcolor_rgba, s->shadowcolor_string, -1, ctx))) {
- av_log(ctx, AV_LOG_ERROR,
- "Invalid shadow color '%s'\n", s->shadowcolor_string);
- return err;
- }
+#if CONFIG_LIBFRIBIDI
+ if (s->text_shaping)
+ if ((err = shape_text(ctx)) < 0)
+ return err;
+#endif
if ((err = FT_Init_FreeType(&(s->library)))) {
av_log(ctx, AV_LOG_ERROR,
@@ -443,51 +646,59 @@ static av_cold int init(AVFilterContext *ctx)
return AVERROR(EINVAL);
}
- /* load the face, and set up the encoding, which is by default UTF-8 */
- if ((err = FT_New_Face(s->library, s->fontfile, 0, &s->face))) {
- av_log(ctx, AV_LOG_ERROR, "Could not load fontface from file '%s': %s\n",
- s->fontfile, FT_ERRMSG(err));
- return AVERROR(EINVAL);
- }
+ err = load_font(ctx);
+ if (err)
+ return err;
+ if (!s->fontsize)
+ s->fontsize = 16;
if ((err = FT_Set_Pixel_Sizes(s->face, 0, s->fontsize))) {
av_log(ctx, AV_LOG_ERROR, "Could not set font size to %d pixels: %s\n",
s->fontsize, FT_ERRMSG(err));
return AVERROR(EINVAL);
}
+ if (s->borderw) {
+ if (FT_Stroker_New(s->library, &s->stroker)) {
+ av_log(ctx, AV_LOG_ERROR, "Coult not init FT stroker\n");
+ return AVERROR_EXTERNAL;
+ }
+ FT_Stroker_Set(s->stroker, s->borderw << 6, FT_STROKER_LINECAP_ROUND,
+ FT_STROKER_LINEJOIN_ROUND, 0);
+ }
+
s->use_kerning = FT_HAS_KERNING(s->face);
/* load the fallback glyph with code 0 */
load_glyph(ctx, NULL, 0);
/* set the tabsize in pixels */
- if ((err = load_glyph(ctx, &glyph, ' ') < 0)) {
+ if ((err = load_glyph(ctx, &glyph, ' ')) < 0) {
av_log(ctx, AV_LOG_ERROR, "Could not set tabsize.\n");
return err;
}
s->tabsize *= glyph->advance;
+ if (s->exp_mode == EXP_STRFTIME &&
+ (strchr(s->text, '%') || strchr(s->text, '\\')))
+ av_log(ctx, AV_LOG_WARNING, "expansion=strftime is deprecated.\n");
+
+ av_bprint_init(&s->expanded_text, 0, AV_BPRINT_SIZE_UNLIMITED);
+ av_bprint_init(&s->expanded_fontcolor, 0, AV_BPRINT_SIZE_UNLIMITED);
+
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
- static const enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
- AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
- AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV444P,
- AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P,
- AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
- AV_PIX_FMT_NONE
- };
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
+ return ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
}
static int glyph_enu_free(void *opaque, void *elem)
{
+ Glyph *glyph = elem;
+
+ FT_Done_Glyph(glyph->glyph);
+ FT_Done_Glyph(glyph->border_glyph);
av_free(elem);
return 0;
}
@@ -495,344 +706,387 @@ static int glyph_enu_free(void *opaque, void *elem)
static av_cold void uninit(AVFilterContext *ctx)
{
DrawTextContext *s = ctx->priv;
- int i;
av_expr_free(s->x_pexpr);
av_expr_free(s->y_pexpr);
- av_expr_free(s->d_pexpr);
- s->x_pexpr = s->y_pexpr = s->d_pexpr = NULL;
- av_freep(&s->expanded_text);
+ av_expr_free(s->a_pexpr);
+ s->x_pexpr = s->y_pexpr = s->a_pexpr = NULL;
av_freep(&s->positions);
+ s->nb_positions = 0;
+
+
av_tree_enumerate(s->glyphs, NULL, NULL, glyph_enu_free);
av_tree_destroy(s->glyphs);
- s->glyphs = 0;
+ s->glyphs = NULL;
+
FT_Done_Face(s->face);
+ FT_Stroker_Done(s->stroker);
FT_Done_FreeType(s->library);
- for (i = 0; i < 4; i++) {
- av_freep(&s->box_line[i]);
- s->pixel_step[i] = 0;
- }
-
+ av_bprint_finalize(&s->expanded_text, NULL);
+ av_bprint_finalize(&s->expanded_fontcolor, NULL);
}
-static inline int is_newline(uint32_t c)
+static int config_input(AVFilterLink *inlink)
{
- return c == '\n' || c == '\r' || c == '\f' || c == '\v';
-}
+ AVFilterContext *ctx = inlink->dst;
+ DrawTextContext *s = ctx->priv;
+ int ret;
-static int expand_strftime(DrawTextContext *s)
-{
- struct tm ltime;
- time_t now = time(0);
- uint8_t *buf = s->expanded_text;
- int buf_size = s->expanded_text_size;
+ ff_draw_init(&s->dc, inlink->format, FF_DRAW_PROCESS_ALPHA);
+ ff_draw_color(&s->dc, &s->fontcolor, s->fontcolor.rgba);
+ ff_draw_color(&s->dc, &s->shadowcolor, s->shadowcolor.rgba);
+ ff_draw_color(&s->dc, &s->bordercolor, s->bordercolor.rgba);
+ ff_draw_color(&s->dc, &s->boxcolor, s->boxcolor.rgba);
+
+ s->var_values[VAR_w] = s->var_values[VAR_W] = s->var_values[VAR_MAIN_W] = inlink->w;
+ s->var_values[VAR_h] = s->var_values[VAR_H] = s->var_values[VAR_MAIN_H] = inlink->h;
+ s->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? av_q2d(inlink->sample_aspect_ratio) : 1;
+ s->var_values[VAR_DAR] = (double)inlink->w / inlink->h * s->var_values[VAR_SAR];
+ s->var_values[VAR_HSUB] = 1 << s->dc.hsub_max;
+ s->var_values[VAR_VSUB] = 1 << s->dc.vsub_max;
+ s->var_values[VAR_X] = NAN;
+ s->var_values[VAR_Y] = NAN;
+ s->var_values[VAR_T] = NAN;
- if (!buf)
- buf_size = 2 * strlen(s->text) + 1;
+ av_lfg_init(&s->prng, av_get_random_seed());
- localtime_r(&now, &ltime);
+ av_expr_free(s->x_pexpr);
+ av_expr_free(s->y_pexpr);
+ av_expr_free(s->a_pexpr);
+ s->x_pexpr = s->y_pexpr = s->a_pexpr = NULL;
- while ((buf = av_realloc(buf, buf_size))) {
- *buf = 1;
- if (strftime(buf, buf_size, s->text, &ltime) != 0 || *buf == 0)
- break;
- buf_size *= 2;
- }
+ if ((ret = av_expr_parse(&s->x_pexpr, s->x_expr, var_names,
+ NULL, NULL, fun2_names, fun2, 0, ctx)) < 0 ||
+ (ret = av_expr_parse(&s->y_pexpr, s->y_expr, var_names,
+ NULL, NULL, fun2_names, fun2, 0, ctx)) < 0 ||
+ (ret = av_expr_parse(&s->a_pexpr, s->a_expr, var_names,
+ NULL, NULL, fun2_names, fun2, 0, ctx)) < 0)
- if (!buf)
- return AVERROR(ENOMEM);
- s->expanded_text = buf;
- s->expanded_text_size = buf_size;
+ return AVERROR(EINVAL);
return 0;
}
-static int dtext_prepare_text(AVFilterContext *ctx)
+static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
{
DrawTextContext *s = ctx->priv;
- uint32_t code = 0, prev_code = 0;
- int x = 0, y = 0, i = 0, ret;
- int text_height, baseline;
- char *text;
- uint8_t *p;
- int str_w = 0, len;
- int y_min = 32000, y_max = -32000;
- FT_Vector delta;
- Glyph *glyph = NULL, *prev_glyph = NULL;
- Glyph dummy = { 0 };
- int width = ctx->inputs[0]->w;
- int height = ctx->inputs[0]->h;
-
- ret = expand_strftime(s);
- if (ret < 0)
- return ret;
-
- text = s->expanded_text ? s->expanded_text : s->text;
-
- if ((len = strlen(text)) > s->nb_positions) {
- FT_Vector *p = av_realloc(s->positions,
- len * sizeof(*s->positions));
- if (!p) {
- av_freep(s->positions);
- s->nb_positions = 0;
- return AVERROR(ENOMEM);
- } else {
- s->positions = p;
- s->nb_positions = len;
- }
- }
-
- /* load and cache glyphs */
- for (i = 0, p = text; *p; i++) {
- GET_UTF8(code, *p++, continue;);
-
- /* get glyph */
- dummy.code = code;
- glyph = av_tree_find(s->glyphs, &dummy, glyph_cmp, NULL);
- if (!glyph) {
- ret = load_glyph(ctx, &glyph, code);
- if (ret)
- return ret;
- }
- y_min = FFMIN(glyph->bbox.yMin, y_min);
- y_max = FFMAX(glyph->bbox.yMax, y_max);
+ if (!strcmp(cmd, "reinit")) {
+ int ret;
+ uninit(ctx);
+ s->reinit = 1;
+ if ((ret = av_set_options_string(ctx, arg, "=", ":")) < 0)
+ return ret;
+ if ((ret = init(ctx)) < 0)
+ return ret;
+ return config_input(ctx->inputs[0]);
}
- text_height = y_max - y_min;
- baseline = y_max;
- /* compute and save position for each glyph */
- glyph = NULL;
- for (i = 0, p = text; *p; i++) {
- GET_UTF8(code, *p++, continue;);
+ return AVERROR(ENOSYS);
+}
- /* skip the \n in the sequence \r\n */
- if (prev_code == '\r' && code == '\n')
- continue;
+static int func_pict_type(AVFilterContext *ctx, AVBPrint *bp,
+ char *fct, unsigned argc, char **argv, int tag)
+{
+ DrawTextContext *s = ctx->priv;
- prev_code = code;
- if (is_newline(code)) {
- str_w = FFMAX(str_w, x - s->x);
- y += text_height;
- x = 0;
- continue;
- }
+ av_bprintf(bp, "%c", av_get_picture_type_char(s->var_values[VAR_PICT_TYPE]));
+ return 0;
+}
- /* get glyph */
- prev_glyph = glyph;
- dummy.code = code;
- glyph = av_tree_find(s->glyphs, &dummy, glyph_cmp, NULL);
+static int func_pts(AVFilterContext *ctx, AVBPrint *bp,
+ char *fct, unsigned argc, char **argv, int tag)
+{
+ DrawTextContext *s = ctx->priv;
+ const char *fmt;
+ double pts = s->var_values[VAR_T];
+ int ret;
- /* kerning */
- if (s->use_kerning && prev_glyph && glyph->code) {
- FT_Get_Kerning(s->face, prev_glyph->code, glyph->code,
- ft_kerning_default, &delta);
- x += delta.x >> 6;
+ fmt = argc >= 1 ? argv[0] : "flt";
+ if (argc >= 2) {
+ int64_t delta;
+ if ((ret = av_parse_time(&delta, argv[1], 1)) < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid delta '%s'\n", argv[1]);
+ return ret;
}
-
- if (x + glyph->bbox.xMax >= width) {
- str_w = FFMAX(str_w, x);
- y += text_height;
- x = 0;
+ pts += (double)delta / AV_TIME_BASE;
+ }
+ if (!strcmp(fmt, "flt")) {
+ av_bprintf(bp, "%.6f", pts);
+ } else if (!strcmp(fmt, "hms")) {
+ if (isnan(pts)) {
+ av_bprintf(bp, " ??:??:??.???");
+ } else {
+ int64_t ms = llrint(pts * 1000);
+ char sign = ' ';
+ if (ms < 0) {
+ sign = '-';
+ ms = -ms;
+ }
+ av_bprintf(bp, "%c%02d:%02d:%02d.%03d", sign,
+ (int)(ms / (60 * 60 * 1000)),
+ (int)(ms / (60 * 1000)) % 60,
+ (int)(ms / 1000) % 60,
+ (int)(ms % 1000));
}
-
- /* save position */
- s->positions[i].x = x + glyph->bitmap_left;
- s->positions[i].y = y - glyph->bitmap_top + baseline;
- if (code == '\t') x = (x / s->tabsize + 1)*s->tabsize;
- else x += glyph->advance;
+ } else if (!strcmp(fmt, "localtime") ||
+ !strcmp(fmt, "gmtime")) {
+ struct tm tm;
+ time_t ms = (time_t)pts;
+ const char *timefmt = argc >= 3 ? argv[2] : "%Y-%m-%d %H:%M:%S";
+ if (!strcmp(fmt, "localtime"))
+ localtime_r(&ms, &tm);
+ else
+ gmtime_r(&ms, &tm);
+ av_bprint_strftime(bp, timefmt, &tm);
+ } else {
+ av_log(ctx, AV_LOG_ERROR, "Invalid format '%s'\n", fmt);
+ return AVERROR(EINVAL);
}
+ return 0;
+}
- str_w = FFMIN(width - 1, FFMAX(str_w, x));
- y = FFMIN(y + text_height, height - 1);
+static int func_frame_num(AVFilterContext *ctx, AVBPrint *bp,
+ char *fct, unsigned argc, char **argv, int tag)
+{
+ DrawTextContext *s = ctx->priv;
- s->w = str_w;
- s->var_values[VAR_TEXT_W] = s->var_values[VAR_TW] = s->w;
- s->h = y;
- s->var_values[VAR_TEXT_H] = s->var_values[VAR_TH] = s->h;
+ av_bprintf(bp, "%d", (int)s->var_values[VAR_N]);
+ return 0;
+}
+static int func_metadata(AVFilterContext *ctx, AVBPrint *bp,
+ char *fct, unsigned argc, char **argv, int tag)
+{
+ DrawTextContext *s = ctx->priv;
+ AVDictionaryEntry *e = av_dict_get(s->metadata, argv[0], NULL, 0);
+
+ if (e && e->value)
+ av_bprintf(bp, "%s", e->value);
+ else if (argc >= 2)
+ av_bprintf(bp, "%s", argv[1]);
return 0;
}
+static int func_strftime(AVFilterContext *ctx, AVBPrint *bp,
+ char *fct, unsigned argc, char **argv, int tag)
+{
+ const char *fmt = argc ? argv[0] : "%Y-%m-%d %H:%M:%S";
+ time_t now;
+ struct tm tm;
-static int config_input(AVFilterLink *inlink)
+ time(&now);
+ if (tag == 'L')
+ localtime_r(&now, &tm);
+ else
+ tm = *gmtime_r(&now, &tm);
+ av_bprint_strftime(bp, fmt, &tm);
+ return 0;
+}
+
+static int func_eval_expr(AVFilterContext *ctx, AVBPrint *bp,
+ char *fct, unsigned argc, char **argv, int tag)
{
- AVFilterContext *ctx = inlink->dst;
DrawTextContext *s = ctx->priv;
- const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+ double res;
int ret;
- s->hsub = pix_desc->log2_chroma_w;
- s->vsub = pix_desc->log2_chroma_h;
+ ret = av_expr_parse_and_eval(&res, argv[0], var_names, s->var_values,
+ NULL, NULL, fun2_names, fun2,
+ &s->prng, 0, ctx);
+ if (ret < 0)
+ av_log(ctx, AV_LOG_ERROR,
+ "Expression '%s' for the expr text expansion function is not valid\n",
+ argv[0]);
+ else
+ av_bprintf(bp, "%f", res);
- s->var_values[VAR_E ] = M_E;
- s->var_values[VAR_PHI] = M_PHI;
- s->var_values[VAR_PI ] = M_PI;
+ return ret;
+}
- s->var_values[VAR_MAIN_W] =
- s->var_values[VAR_MW] = ctx->inputs[0]->w;
- s->var_values[VAR_MAIN_H] =
- s->var_values[VAR_MH] = ctx->inputs[0]->h;
+static int func_eval_expr_int_format(AVFilterContext *ctx, AVBPrint *bp,
+ char *fct, unsigned argc, char **argv, int tag)
+{
+ DrawTextContext *s = ctx->priv;
+ double res;
+ int intval;
+ int ret;
+ unsigned int positions = 0;
+ char fmt_str[30] = "%";
+
+ /*
+ * argv[0] expression to be converted to `int`
+ * argv[1] format: 'x', 'X', 'd' or 'u'
+ * argv[2] positions printed (optional)
+ */
+
+ ret = av_expr_parse_and_eval(&res, argv[0], var_names, s->var_values,
+ NULL, NULL, fun2_names, fun2,
+ &s->prng, 0, ctx);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Expression '%s' for the expr text expansion function is not valid\n",
+ argv[0]);
+ return ret;
+ }
- s->var_values[VAR_X] = 0;
- s->var_values[VAR_Y] = 0;
- s->var_values[VAR_T] = NAN;
+ if (!strchr("xXdu", argv[1][0])) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid format '%c' specified,"
+ " allowed values: 'x', 'X', 'd', 'u'\n", argv[1][0]);
+ return AVERROR(EINVAL);
+ }
- av_lfg_init(&s->prng, av_get_random_seed());
+ if (argc == 3) {
+ ret = sscanf(argv[2], "%u", &positions);
+ if (ret != 1) {
+ av_log(ctx, AV_LOG_ERROR, "expr_int_format(): Invalid number of positions"
+ " to print: '%s'\n", argv[2]);
+ return AVERROR(EINVAL);
+ }
+ }
- av_expr_free(s->x_pexpr);
- av_expr_free(s->y_pexpr);
- av_expr_free(s->d_pexpr);
- s->x_pexpr = s->y_pexpr = s->d_pexpr = NULL;
- if ((ret = av_expr_parse(&s->x_pexpr, s->x_expr, var_names,
- NULL, NULL, fun2_names, fun2, 0, ctx)) < 0 ||
- (ret = av_expr_parse(&s->y_pexpr, s->y_expr, var_names,
- NULL, NULL, fun2_names, fun2, 0, ctx)) < 0 ||
- (ret = av_expr_parse(&s->d_pexpr, s->d_expr, var_names,
- NULL, NULL, fun2_names, fun2, 0, ctx)) < 0 ||
- (ret = av_expr_parse(&s->a_pexpr, s->a_expr, var_names,
- NULL, NULL, fun2_names, fun2, 0, ctx)) < 0)
+ feclearexcept(FE_ALL_EXCEPT);
+ intval = res;
+ if ((ret = fetestexcept(FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW))) {
+ av_log(ctx, AV_LOG_ERROR, "Conversion of floating-point result to int failed. Control register: 0x%08x. Conversion result: %d\n", ret, intval);
return AVERROR(EINVAL);
+ }
- if ((ret =
- ff_fill_line_with_color(s->box_line, s->pixel_step,
- inlink->w, s->boxcolor,
- inlink->format, s->boxcolor_rgba,
- &s->is_packed_rgb, s->rgba_map)) < 0)
- return ret;
+ if (argc == 3)
+ av_strlcatf(fmt_str, sizeof(fmt_str), "0%u", positions);
+ av_strlcatf(fmt_str, sizeof(fmt_str), "%c", argv[1][0]);
- if (!s->is_packed_rgb) {
- uint8_t *rgba = s->fontcolor_rgba;
- s->fontcolor[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]);
- s->fontcolor[1] = RGB_TO_U_CCIR(rgba[0], rgba[1], rgba[2], 0);
- s->fontcolor[2] = RGB_TO_V_CCIR(rgba[0], rgba[1], rgba[2], 0);
- s->fontcolor[3] = rgba[3];
- rgba = s->shadowcolor_rgba;
- s->shadowcolor[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]);
- s->shadowcolor[1] = RGB_TO_U_CCIR(rgba[0], rgba[1], rgba[2], 0);
- s->shadowcolor[2] = RGB_TO_V_CCIR(rgba[0], rgba[1], rgba[2], 0);
- s->shadowcolor[3] = rgba[3];
- }
+ av_log(ctx, AV_LOG_DEBUG, "Formatting value %f (expr '%s') with spec '%s'\n",
+ res, argv[0], fmt_str);
- s->draw = 1;
+ av_bprintf(bp, fmt_str, intval);
- return dtext_prepare_text(ctx);
+ return 0;
}
-#define GET_BITMAP_VAL(r, c) \
- bitmap->pixel_mode == FT_PIXEL_MODE_MONO ? \
- (bitmap->buffer[(r) * bitmap->pitch + ((c)>>3)] & (0x80 >> ((c)&7))) * 255 : \
- bitmap->buffer[(r) * bitmap->pitch + (c)]
-
-#define SET_PIXEL_YUV(frame, yuva_color, val, x, y, hsub, vsub) { \
- luma_pos = ((x) ) + ((y) ) * frame->linesize[0]; \
- alpha = yuva_color[3] * alpha_mul * (val) * 129 / 255; \
- frame->data[0][luma_pos] = (alpha * yuva_color[0] + (255*255*129 - alpha) * frame->data[0][luma_pos] ) >> 23; \
- if (((x) & ((1<<(hsub)) - 1)) == 0 && ((y) & ((1<<(vsub)) - 1)) == 0) {\
- chroma_pos1 = ((x) >> (hsub)) + ((y) >> (vsub)) * frame->linesize[1]; \
- chroma_pos2 = ((x) >> (hsub)) + ((y) >> (vsub)) * frame->linesize[2]; \
- frame->data[1][chroma_pos1] = (alpha * yuva_color[1] + (255*255*129 - alpha) * frame->data[1][chroma_pos1]) >> 23; \
- frame->data[2][chroma_pos2] = (alpha * yuva_color[2] + (255*255*129 - alpha) * frame->data[2][chroma_pos2]) >> 23; \
- }\
-}
+static const struct drawtext_function {
+ const char *name;
+ unsigned argc_min, argc_max;
+ int tag; /**< opaque argument to func */
+ int (*func)(AVFilterContext *, AVBPrint *, char *, unsigned, char **, int);
+} functions[] = {
+ { "expr", 1, 1, 0, func_eval_expr },
+ { "e", 1, 1, 0, func_eval_expr },
+ { "expr_int_format", 2, 3, 0, func_eval_expr_int_format },
+ { "eif", 2, 3, 0, func_eval_expr_int_format },
+ { "pict_type", 0, 0, 0, func_pict_type },
+ { "pts", 0, 3, 0, func_pts },
+ { "gmtime", 0, 1, 'G', func_strftime },
+ { "localtime", 0, 1, 'L', func_strftime },
+ { "frame_num", 0, 0, 0, func_frame_num },
+ { "n", 0, 0, 0, func_frame_num },
+ { "metadata", 1, 2, 0, func_metadata },
+};
-static inline int draw_glyph_yuv(AVFrame *frame, FT_Bitmap *bitmap, unsigned int x,
- unsigned int y, unsigned int width, unsigned int height,
- const uint8_t yuva_color[4], int hsub, int vsub,
- int alpha_mul)
+static int eval_function(AVFilterContext *ctx, AVBPrint *bp, char *fct,
+ unsigned argc, char **argv)
{
- int r, c, alpha;
- unsigned int luma_pos, chroma_pos1, chroma_pos2;
- uint8_t src_val;
-
- for (r = 0; r < bitmap->rows && r+y < height; r++) {
- for (c = 0; c < bitmap->width && c+x < width; c++) {
- /* get intensity value in the glyph bitmap (source) */
- src_val = GET_BITMAP_VAL(r, c);
- if (!src_val)
- continue;
-
- SET_PIXEL_YUV(frame, yuva_color, src_val, c+x, y+r, hsub, vsub);
+ unsigned i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(functions); i++) {
+ if (strcmp(fct, functions[i].name))
+ continue;
+ if (argc < functions[i].argc_min) {
+ av_log(ctx, AV_LOG_ERROR, "%%{%s} requires at least %d arguments\n",
+ fct, functions[i].argc_min);
+ return AVERROR(EINVAL);
}
+ if (argc > functions[i].argc_max) {
+ av_log(ctx, AV_LOG_ERROR, "%%{%s} requires at most %d arguments\n",
+ fct, functions[i].argc_max);
+ return AVERROR(EINVAL);
+ }
+ break;
}
-
- return 0;
-}
-
-#define SET_PIXEL_RGB(frame, rgba_color, val, x, y, pixel_step, r_off, g_off, b_off, a_off) { \
- p = frame->data[0] + (x) * pixel_step + ((y) * frame->linesize[0]); \
- alpha = rgba_color[3] * alpha_mul * (val) * 129 / 255; \
- *(p+r_off) = (alpha * rgba_color[0] + (255*255*129 - alpha) * *(p+r_off)) >> 23; \
- *(p+g_off) = (alpha * rgba_color[1] + (255*255*129 - alpha) * *(p+g_off)) >> 23; \
- *(p+b_off) = (alpha * rgba_color[2] + (255*255*129 - alpha) * *(p+b_off)) >> 23; \
+ if (i >= FF_ARRAY_ELEMS(functions)) {
+ av_log(ctx, AV_LOG_ERROR, "%%{%s} is not known\n", fct);
+ return AVERROR(EINVAL);
+ }
+ return functions[i].func(ctx, bp, fct, argc, argv, functions[i].tag);
}
-static inline int draw_glyph_rgb(AVFrame *frame, FT_Bitmap *bitmap,
- unsigned int x, unsigned int y,
- unsigned int width, unsigned int height, int pixel_step,
- const uint8_t rgba_color[4], const uint8_t rgba_map[4],
- int alpha_mul)
+static int expand_function(AVFilterContext *ctx, AVBPrint *bp, char **rtext)
{
- int r, c, alpha;
- uint8_t *p;
- uint8_t src_val;
-
- for (r = 0; r < bitmap->rows && r+y < height; r++) {
- for (c = 0; c < bitmap->width && c+x < width; c++) {
- /* get intensity value in the glyph bitmap (source) */
- src_val = GET_BITMAP_VAL(r, c);
- if (!src_val)
- continue;
+ const char *text = *rtext;
+ char *argv[16] = { NULL };
+ unsigned argc = 0, i;
+ int ret;
- SET_PIXEL_RGB(frame, rgba_color, src_val, c+x, y+r, pixel_step,
- rgba_map[0], rgba_map[1], rgba_map[2], rgba_map[3]);
+ if (*text != '{') {
+ av_log(ctx, AV_LOG_ERROR, "Stray %% near '%s'\n", text);
+ return AVERROR(EINVAL);
+ }
+ text++;
+ while (1) {
+ if (!(argv[argc++] = av_get_token(&text, ":}"))) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ if (!*text) {
+ av_log(ctx, AV_LOG_ERROR, "Unterminated %%{} near '%s'\n", *rtext);
+ ret = AVERROR(EINVAL);
+ goto end;
}
+ if (argc == FF_ARRAY_ELEMS(argv))
+ av_freep(&argv[--argc]); /* error will be caught later */
+ if (*text == '}')
+ break;
+ text++;
}
- return 0;
+ if ((ret = eval_function(ctx, bp, argv[0], argc - 1, argv + 1)) < 0)
+ goto end;
+ ret = 0;
+ *rtext = (char *)text + 1;
+
+end:
+ for (i = 0; i < argc; i++)
+ av_freep(&argv[i]);
+ return ret;
}
-static inline void drawbox(AVFrame *frame, unsigned int x, unsigned int y,
- unsigned int width, unsigned int height,
- uint8_t *line[4], int pixel_step[4], uint8_t color[4],
- int hsub, int vsub, int is_rgba_packed, uint8_t rgba_map[4],
- int alpha_mul)
+static int expand_text(AVFilterContext *ctx, char *text, AVBPrint *bp)
{
- int i, j, alpha;
-
- if (color[3] != 0xFF || alpha_mul != 0xFF) {
- if (is_rgba_packed) {
- uint8_t *p;
- for (j = 0; j < height; j++)
- for (i = 0; i < width; i++)
- SET_PIXEL_RGB(frame, color, 255, i+x, y+j, pixel_step[0],
- rgba_map[0], rgba_map[1], rgba_map[2], rgba_map[3]);
+ int ret;
+
+ av_bprint_clear(bp);
+ while (*text) {
+ if (*text == '\\' && text[1]) {
+ av_bprint_chars(bp, text[1], 1);
+ text += 2;
+ } else if (*text == '%') {
+ text++;
+ if ((ret = expand_function(ctx, bp, &text)) < 0)
+ return ret;
} else {
- unsigned int luma_pos, chroma_pos1, chroma_pos2;
- for (j = 0; j < height; j++)
- for (i = 0; i < width; i++)
- SET_PIXEL_YUV(frame, color, 255, i+x, y+j, hsub, vsub);
+ av_bprint_chars(bp, *text, 1);
+ text++;
}
- } else {
- ff_draw_rectangle(frame->data, frame->linesize,
- line, pixel_step, hsub, vsub,
- x, y, width, height);
}
+ if (!av_bprint_is_complete(bp))
+ return AVERROR(ENOMEM);
+ return 0;
}
static int draw_glyphs(DrawTextContext *s, AVFrame *frame,
int width, int height,
- const uint8_t rgbcolor[4], const uint8_t yuvcolor[4],
- int x, int y)
+ FFDrawColor *color,
+ int x, int y, int borderw)
{
- char *text = s->expanded_text;
+ char *text = s->expanded_text.str;
uint32_t code = 0;
- int i;
+ int i, x1, y1;
uint8_t *p;
Glyph *glyph = NULL;
for (i = 0, p = text; *p; i++) {
+ FT_Bitmap bitmap;
Glyph dummy = { 0 };
GET_UTF8(code, *p++, continue;);
@@ -841,71 +1095,34 @@ static int draw_glyphs(DrawTextContext *s, AVFrame *frame,
continue;
dummy.code = code;
- glyph = av_tree_find(s->glyphs, &dummy, (void *)glyph_cmp, NULL);
+ glyph = av_tree_find(s->glyphs, &dummy, glyph_cmp, NULL);
+
+ bitmap = borderw ? glyph->border_bitmap : glyph->bitmap;
if (glyph->bitmap.pixel_mode != FT_PIXEL_MODE_MONO &&
glyph->bitmap.pixel_mode != FT_PIXEL_MODE_GRAY)
return AVERROR(EINVAL);
- if (s->is_packed_rgb) {
- draw_glyph_rgb(frame, &glyph->bitmap,
- s->positions[i].x+x, s->positions[i].y+y, width, height,
- s->pixel_step[0], rgbcolor, s->rgba_map, s->alpha);
- } else {
- draw_glyph_yuv(frame, &glyph->bitmap,
- s->positions[i].x+x, s->positions[i].y+y, width, height,
- yuvcolor, s->hsub, s->vsub, s->alpha);
- }
- }
-
- return 0;
-}
-
-static int draw_text(AVFilterContext *ctx, AVFrame *frame,
- int width, int height)
-{
- DrawTextContext *s = ctx->priv;
- int ret;
+ x1 = s->positions[i].x+s->x+x - borderw;
+ y1 = s->positions[i].y+s->y+y - borderw;
- /* draw box */
- if (s->draw_box)
- drawbox(frame, s->x, s->y, s->w, s->h,
- s->box_line, s->pixel_step, s->boxcolor,
- s->hsub, s->vsub, s->is_packed_rgb,
- s->rgba_map, s->alpha);
-
- if (s->shadowx || s->shadowy) {
- if ((ret = draw_glyphs(s, frame, width, height,
- s->shadowcolor_rgba,
- s->shadowcolor,
- s->x + s->shadowx,
- s->y + s->shadowy)) < 0)
- return ret;
+ ff_blend_mask(&s->dc, color,
+ frame->data, frame->linesize, width, height,
+ bitmap.buffer, bitmap.pitch,
+ bitmap.width, bitmap.rows,
+ bitmap.pixel_mode == FT_PIXEL_MODE_MONO ? 0 : 3,
+ 0, x1, y1);
}
- if ((ret = draw_glyphs(s, frame, width, height,
- s->fontcolor_rgba,
- s->fontcolor,
- s->x,
- s->y)) < 0)
- return ret;
-
return 0;
}
-static inline int normalize_double(int *n, double d)
-{
- int ret = 0;
- if (isnan(d)) {
- ret = AVERROR(EINVAL);
- } else if (d > INT_MAX || d < INT_MIN) {
- *n = d > INT_MAX ? INT_MAX : INT_MIN;
- ret = AVERROR(EINVAL);
- } else
- *n = round(d);
-
- return ret;
+static void update_color_with_alpha(DrawTextContext *s, FFDrawColor *color, const FFDrawColor incolor)
+{
+ *color = incolor;
+ color->rgba[3] = (color->rgba[3] * s->alpha) / 255;
+ ff_draw_color(&s->dc, color, color->rgba);
}
static void update_alpha(DrawTextContext *s)
@@ -923,66 +1140,239 @@ static void update_alpha(DrawTextContext *s)
s->alpha = 256 * alpha;
}
-static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+static int draw_text(AVFilterContext *ctx, AVFrame *frame,
+ int width, int height)
{
- AVFilterContext *ctx = inlink->dst;
DrawTextContext *s = ctx->priv;
- int ret = 0;
+ AVFilterLink *inlink = ctx->inputs[0];
- if ((ret = dtext_prepare_text(ctx)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Can't draw text\n");
- av_frame_free(&frame);
- return ret;
+ uint32_t code = 0, prev_code = 0;
+ int x = 0, y = 0, i = 0, ret;
+ int max_text_line_w = 0, len;
+ int box_w, box_h;
+ char *text;
+ uint8_t *p;
+ int y_min = 32000, y_max = -32000;
+ int x_min = 32000, x_max = -32000;
+ FT_Vector delta;
+ Glyph *glyph = NULL, *prev_glyph = NULL;
+ Glyph dummy = { 0 };
+
+ time_t now = time(0);
+ struct tm ltime;
+ AVBPrint *bp = &s->expanded_text;
+
+ FFDrawColor fontcolor;
+ FFDrawColor shadowcolor;
+ FFDrawColor bordercolor;
+ FFDrawColor boxcolor;
+
+ av_bprint_clear(bp);
+
+ if(s->basetime != AV_NOPTS_VALUE)
+ now= frame->pts*av_q2d(ctx->inputs[0]->time_base) + s->basetime/1000000;
+
+ switch (s->exp_mode) {
+ case EXP_NONE:
+ av_bprintf(bp, "%s", s->text);
+ break;
+ case EXP_NORMAL:
+ if ((ret = expand_text(ctx, s->text, &s->expanded_text)) < 0)
+ return ret;
+ break;
+ case EXP_STRFTIME:
+ localtime_r(&now, &ltime);
+ av_bprint_strftime(bp, s->text, &ltime);
+ break;
}
- s->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
- NAN : frame->pts * av_q2d(inlink->time_base);
- s->var_values[VAR_X] =
- av_expr_eval(s->x_pexpr, s->var_values, &s->prng);
- s->var_values[VAR_Y] =
- av_expr_eval(s->y_pexpr, s->var_values, &s->prng);
- s->var_values[VAR_X] =
- av_expr_eval(s->x_pexpr, s->var_values, &s->prng);
+ if (s->tc_opt_string) {
+ char tcbuf[AV_TIMECODE_STR_SIZE];
+ av_timecode_make_string(&s->tc, tcbuf, inlink->frame_count_out);
+ av_bprint_clear(bp);
+ av_bprintf(bp, "%s%s", s->text, tcbuf);
+ }
+
+ if (!av_bprint_is_complete(bp))
+ return AVERROR(ENOMEM);
+ text = s->expanded_text.str;
+ if ((len = s->expanded_text.len) > s->nb_positions) {
+ if (!(s->positions =
+ av_realloc(s->positions, len*sizeof(*s->positions))))
+ return AVERROR(ENOMEM);
+ s->nb_positions = len;
+ }
+
+ if (s->fontcolor_expr[0]) {
+ /* If expression is set, evaluate and replace the static value */
+ av_bprint_clear(&s->expanded_fontcolor);
+ if ((ret = expand_text(ctx, s->fontcolor_expr, &s->expanded_fontcolor)) < 0)
+ return ret;
+ if (!av_bprint_is_complete(&s->expanded_fontcolor))
+ return AVERROR(ENOMEM);
+ av_log(s, AV_LOG_DEBUG, "Evaluated fontcolor is '%s'\n", s->expanded_fontcolor.str);
+ ret = av_parse_color(s->fontcolor.rgba, s->expanded_fontcolor.str, -1, s);
+ if (ret)
+ return ret;
+ ff_draw_color(&s->dc, &s->fontcolor, s->fontcolor.rgba);
+ }
+
+ x = 0;
+ y = 0;
+
+ /* load and cache glyphs */
+ for (i = 0, p = text; *p; i++) {
+ GET_UTF8(code, *p++, continue;);
+
+ /* get glyph */
+ dummy.code = code;
+ glyph = av_tree_find(s->glyphs, &dummy, glyph_cmp, NULL);
+ if (!glyph) {
+ ret = load_glyph(ctx, &glyph, code);
+ if (ret < 0)
+ return ret;
+ }
+
+ y_min = FFMIN(glyph->bbox.yMin, y_min);
+ y_max = FFMAX(glyph->bbox.yMax, y_max);
+ x_min = FFMIN(glyph->bbox.xMin, x_min);
+ x_max = FFMAX(glyph->bbox.xMax, x_max);
+ }
+ s->max_glyph_h = y_max - y_min;
+ s->max_glyph_w = x_max - x_min;
+
+ /* compute and save position for each glyph */
+ glyph = NULL;
+ for (i = 0, p = text; *p; i++) {
+ GET_UTF8(code, *p++, continue;);
+
+ /* skip the \n in the sequence \r\n */
+ if (prev_code == '\r' && code == '\n')
+ continue;
- s->draw = av_expr_eval(s->d_pexpr, s->var_values, &s->prng);
+ prev_code = code;
+ if (is_newline(code)) {
+
+ max_text_line_w = FFMAX(max_text_line_w, x);
+ y += s->max_glyph_h + s->line_spacing;
+ x = 0;
+ continue;
+ }
+
+ /* get glyph */
+ prev_glyph = glyph;
+ dummy.code = code;
+ glyph = av_tree_find(s->glyphs, &dummy, glyph_cmp, NULL);
+
+ /* kerning */
+ if (s->use_kerning && prev_glyph && glyph->code) {
+ FT_Get_Kerning(s->face, prev_glyph->code, glyph->code,
+ ft_kerning_default, &delta);
+ x += delta.x >> 6;
+ }
+
+ /* save position */
+ s->positions[i].x = x + glyph->bitmap_left;
+ s->positions[i].y = y - glyph->bitmap_top + y_max;
+ if (code == '\t') x = (x / s->tabsize + 1)*s->tabsize;
+ else x += glyph->advance;
+ }
+
+ max_text_line_w = FFMAX(x, max_text_line_w);
+
+ s->var_values[VAR_TW] = s->var_values[VAR_TEXT_W] = max_text_line_w;
+ s->var_values[VAR_TH] = s->var_values[VAR_TEXT_H] = y + s->max_glyph_h;
+
+ s->var_values[VAR_MAX_GLYPH_W] = s->max_glyph_w;
+ s->var_values[VAR_MAX_GLYPH_H] = s->max_glyph_h;
+ s->var_values[VAR_MAX_GLYPH_A] = s->var_values[VAR_ASCENT ] = y_max;
+ s->var_values[VAR_MAX_GLYPH_D] = s->var_values[VAR_DESCENT] = y_min;
+
+ s->var_values[VAR_LINE_H] = s->var_values[VAR_LH] = s->max_glyph_h;
+
+ s->x = s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, &s->prng);
+ s->y = s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, &s->prng);
+ s->x = s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, &s->prng);
update_alpha(s);
+ update_color_with_alpha(s, &fontcolor , s->fontcolor );
+ update_color_with_alpha(s, &shadowcolor, s->shadowcolor);
+ update_color_with_alpha(s, &bordercolor, s->bordercolor);
+ update_color_with_alpha(s, &boxcolor , s->boxcolor );
- normalize_double(&s->x, s->var_values[VAR_X]);
- normalize_double(&s->y, s->var_values[VAR_Y]);
+ box_w = FFMIN(width - 1 , max_text_line_w);
+ box_h = FFMIN(height - 1, y + s->max_glyph_h);
- if (s->fix_bounds) {
- if (s->x < 0) s->x = 0;
- if (s->y < 0) s->y = 0;
- if ((unsigned)s->x + (unsigned)s->w > inlink->w)
- s->x = inlink->w - s->w;
- if ((unsigned)s->y + (unsigned)s->h > inlink->h)
- s->y = inlink->h - s->h;
+ /* draw box */
+ if (s->draw_box)
+ ff_blend_rectangle(&s->dc, &boxcolor,
+ frame->data, frame->linesize, width, height,
+ s->x - s->boxborderw, s->y - s->boxborderw,
+ box_w + s->boxborderw * 2, box_h + s->boxborderw * 2);
+
+ if (s->shadowx || s->shadowy) {
+ if ((ret = draw_glyphs(s, frame, width, height,
+ &shadowcolor, s->shadowx, s->shadowy, 0)) < 0)
+ return ret;
}
- s->x &= ~((1 << s->hsub) - 1);
- s->y &= ~((1 << s->vsub) - 1);
+ if (s->borderw) {
+ if ((ret = draw_glyphs(s, frame, width, height,
+ &bordercolor, 0, 0, s->borderw)) < 0)
+ return ret;
+ }
+ if ((ret = draw_glyphs(s, frame, width, height,
+ &fontcolor, 0, 0, 0)) < 0)
+ return ret;
- av_log(ctx, AV_LOG_TRACE, "n:%d t:%f x:%d y:%d x+w:%d y+h:%d\n",
- (int)s->var_values[VAR_N], s->var_values[VAR_T],
- s->x, s->y, s->x+s->w, s->y+s->h);
+ return 0;
+}
- if (s->draw)
- draw_text(inlink->dst, frame, frame->width, frame->height);
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ DrawTextContext *s = ctx->priv;
+ int ret;
+
+ if (s->reload) {
+ if ((ret = load_textfile(ctx)) < 0) {
+ av_frame_free(&frame);
+ return ret;
+ }
+#if CONFIG_LIBFRIBIDI
+ if (s->text_shaping)
+ if ((ret = shape_text(ctx)) < 0) {
+ av_frame_free(&frame);
+ return ret;
+ }
+#endif
+ }
+
+ s->var_values[VAR_N] = inlink->frame_count_out + s->start_number;
+ s->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
+ NAN : frame->pts * av_q2d(inlink->time_base);
- s->var_values[VAR_N] += 1.0;
+ s->var_values[VAR_PICT_TYPE] = frame->pict_type;
+ s->metadata = av_frame_get_metadata(frame);
- return ff_filter_frame(inlink->dst->outputs[0], frame);
+ draw_text(ctx, frame, frame->width, frame->height);
+
+ av_log(ctx, AV_LOG_DEBUG, "n:%d t:%f text_w:%d text_h:%d x:%d y:%d\n",
+ (int)s->var_values[VAR_N], s->var_values[VAR_T],
+ (int)s->var_values[VAR_TEXT_W], (int)s->var_values[VAR_TEXT_H],
+ s->x, s->y);
+
+ return ff_filter_frame(outlink, frame);
}
static const AVFilterPad avfilter_vf_drawtext_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
- .config_props = config_input,
- .needs_writable = 1,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ .needs_writable = 1,
},
{ NULL }
};
@@ -1003,7 +1393,8 @@ AVFilter ff_vf_drawtext = {
.init = init,
.uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_drawtext_inputs,
- .outputs = avfilter_vf_drawtext_outputs,
+ .inputs = avfilter_vf_drawtext_inputs,
+ .outputs = avfilter_vf_drawtext_outputs,
+ .process_command = command,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/libavfilter/vf_edgedetect.c b/libavfilter/vf_edgedetect.c
new file mode 100644
index 0000000000..ac88e02a11
--- /dev/null
+++ b/libavfilter/vf_edgedetect.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright (c) 2012-2014 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Edge detection filter
+ *
+ * @see https://en.wikipedia.org/wiki/Canny_edge_detector
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+enum FilterMode {
+ MODE_WIRES,
+ MODE_COLORMIX,
+ NB_MODE
+};
+
+struct plane_info {
+ uint8_t *tmpbuf;
+ uint16_t *gradients;
+ char *directions;
+};
+
+typedef struct {
+ const AVClass *class;
+ struct plane_info planes[3];
+ int nb_planes;
+ double low, high;
+ uint8_t low_u8, high_u8;
+ int mode;
+} EdgeDetectContext;
+
+#define OFFSET(x) offsetof(EdgeDetectContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption edgedetect_options[] = {
+ { "high", "set high threshold", OFFSET(high), AV_OPT_TYPE_DOUBLE, {.dbl=50/255.}, 0, 1, FLAGS },
+ { "low", "set low threshold", OFFSET(low), AV_OPT_TYPE_DOUBLE, {.dbl=20/255.}, 0, 1, FLAGS },
+ { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_WIRES}, 0, NB_MODE-1, FLAGS, "mode" },
+ { "wires", "white/gray wires on black", 0, AV_OPT_TYPE_CONST, {.i64=MODE_WIRES}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "colormix", "mix colors", 0, AV_OPT_TYPE_CONST, {.i64=MODE_COLORMIX}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(edgedetect);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ EdgeDetectContext *edgedetect = ctx->priv;
+
+ edgedetect->low_u8 = edgedetect->low * 255. + .5;
+ edgedetect->high_u8 = edgedetect->high * 255. + .5;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ const EdgeDetectContext *edgedetect = ctx->priv;
+ static const enum AVPixelFormat wires_pix_fmts[] = {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE};
+ static const enum AVPixelFormat colormix_pix_fmts[] = {AV_PIX_FMT_GBRP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE};
+ AVFilterFormats *fmts_list;
+ const enum AVPixelFormat *pix_fmts = NULL;
+
+ if (edgedetect->mode == MODE_WIRES) {
+ pix_fmts = wires_pix_fmts;
+ } else if (edgedetect->mode == MODE_COLORMIX) {
+ pix_fmts = colormix_pix_fmts;
+ } else {
+ av_assert0(0);
+ }
+ fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ int p;
+ AVFilterContext *ctx = inlink->dst;
+ EdgeDetectContext *edgedetect = ctx->priv;
+
+ edgedetect->nb_planes = inlink->format == AV_PIX_FMT_GRAY8 ? 1 : 3;
+ for (p = 0; p < edgedetect->nb_planes; p++) {
+ struct plane_info *plane = &edgedetect->planes[p];
+
+ plane->tmpbuf = av_malloc(inlink->w * inlink->h);
+ plane->gradients = av_calloc(inlink->w * inlink->h, sizeof(*plane->gradients));
+ plane->directions = av_malloc(inlink->w * inlink->h);
+ if (!plane->tmpbuf || !plane->gradients || !plane->directions)
+ return AVERROR(ENOMEM);
+ }
+ return 0;
+}
+
+static void gaussian_blur(AVFilterContext *ctx, int w, int h,
+ uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize)
+{
+ int i, j;
+
+ memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
+ memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
+ for (j = 2; j < h - 2; j++) {
+ dst[0] = src[0];
+ dst[1] = src[1];
+ for (i = 2; i < w - 2; i++) {
+ /* Gaussian mask of size 5x5 with sigma = 1.4 */
+ dst[i] = ((src[-2*src_linesize + i-2] + src[2*src_linesize + i-2]) * 2
+ + (src[-2*src_linesize + i-1] + src[2*src_linesize + i-1]) * 4
+ + (src[-2*src_linesize + i ] + src[2*src_linesize + i ]) * 5
+ + (src[-2*src_linesize + i+1] + src[2*src_linesize + i+1]) * 4
+ + (src[-2*src_linesize + i+2] + src[2*src_linesize + i+2]) * 2
+
+ + (src[ -src_linesize + i-2] + src[ src_linesize + i-2]) * 4
+ + (src[ -src_linesize + i-1] + src[ src_linesize + i-1]) * 9
+ + (src[ -src_linesize + i ] + src[ src_linesize + i ]) * 12
+ + (src[ -src_linesize + i+1] + src[ src_linesize + i+1]) * 9
+ + (src[ -src_linesize + i+2] + src[ src_linesize + i+2]) * 4
+
+ + src[i-2] * 5
+ + src[i-1] * 12
+ + src[i ] * 15
+ + src[i+1] * 12
+ + src[i+2] * 5) / 159;
+ }
+ dst[i ] = src[i ];
+ dst[i + 1] = src[i + 1];
+
+ dst += dst_linesize;
+ src += src_linesize;
+ }
+ memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
+ memcpy(dst, src, w);
+}
+
+enum {
+ DIRECTION_45UP,
+ DIRECTION_45DOWN,
+ DIRECTION_HORIZONTAL,
+ DIRECTION_VERTICAL,
+};
+
+static int get_rounded_direction(int gx, int gy)
+{
+ /* reference angles:
+ * tan( pi/8) = sqrt(2)-1
+ * tan(3pi/8) = sqrt(2)+1
+ * Gy/Gx is the tangent of the angle (theta), so Gy/Gx is compared against
+ * <ref-angle>, or more simply Gy against <ref-angle>*Gx
+ *
+ * Gx and Gy bounds = [-1020;1020], using 16-bit arithmetic:
+ * round((sqrt(2)-1) * (1<<16)) = 27146
+ * round((sqrt(2)+1) * (1<<16)) = 158218
+ */
+ if (gx) {
+ int tanpi8gx, tan3pi8gx;
+
+ if (gx < 0)
+ gx = -gx, gy = -gy;
+ gy <<= 16;
+ tanpi8gx = 27146 * gx;
+ tan3pi8gx = 158218 * gx;
+ if (gy > -tan3pi8gx && gy < -tanpi8gx) return DIRECTION_45UP;
+ if (gy > -tanpi8gx && gy < tanpi8gx) return DIRECTION_HORIZONTAL;
+ if (gy > tanpi8gx && gy < tan3pi8gx) return DIRECTION_45DOWN;
+ }
+ return DIRECTION_VERTICAL;
+}
+
+static void sobel(int w, int h,
+ uint16_t *dst, int dst_linesize,
+ int8_t *dir, int dir_linesize,
+ const uint8_t *src, int src_linesize)
+{
+ int i, j;
+
+ for (j = 1; j < h - 1; j++) {
+ dst += dst_linesize;
+ dir += dir_linesize;
+ src += src_linesize;
+ for (i = 1; i < w - 1; i++) {
+ const int gx =
+ -1*src[-src_linesize + i-1] + 1*src[-src_linesize + i+1]
+ -2*src[ i-1] + 2*src[ i+1]
+ -1*src[ src_linesize + i-1] + 1*src[ src_linesize + i+1];
+ const int gy =
+ -1*src[-src_linesize + i-1] + 1*src[ src_linesize + i-1]
+ -2*src[-src_linesize + i ] + 2*src[ src_linesize + i ]
+ -1*src[-src_linesize + i+1] + 1*src[ src_linesize + i+1];
+
+ dst[i] = FFABS(gx) + FFABS(gy);
+ dir[i] = get_rounded_direction(gx, gy);
+ }
+ }
+}
+
+static void non_maximum_suppression(int w, int h,
+ uint8_t *dst, int dst_linesize,
+ const int8_t *dir, int dir_linesize,
+ const uint16_t *src, int src_linesize)
+{
+ int i, j;
+
+#define COPY_MAXIMA(ay, ax, by, bx) do { \
+ if (src[i] > src[(ay)*src_linesize + i+(ax)] && \
+ src[i] > src[(by)*src_linesize + i+(bx)]) \
+ dst[i] = av_clip_uint8(src[i]); \
+} while (0)
+
+ for (j = 1; j < h - 1; j++) {
+ dst += dst_linesize;
+ dir += dir_linesize;
+ src += src_linesize;
+ for (i = 1; i < w - 1; i++) {
+ switch (dir[i]) {
+ case DIRECTION_45UP: COPY_MAXIMA( 1, -1, -1, 1); break;
+ case DIRECTION_45DOWN: COPY_MAXIMA(-1, -1, 1, 1); break;
+ case DIRECTION_HORIZONTAL: COPY_MAXIMA( 0, -1, 0, 1); break;
+ case DIRECTION_VERTICAL: COPY_MAXIMA(-1, 0, 1, 0); break;
+ }
+ }
+ }
+}
+
+static void double_threshold(int low, int high, int w, int h,
+ uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize)
+{
+ int i, j;
+
+ for (j = 0; j < h; j++) {
+ for (i = 0; i < w; i++) {
+ if (src[i] > high) {
+ dst[i] = src[i];
+ continue;
+ }
+
+ if ((!i || i == w - 1 || !j || j == h - 1) &&
+ src[i] > low &&
+ (src[-src_linesize + i-1] > high ||
+ src[-src_linesize + i ] > high ||
+ src[-src_linesize + i+1] > high ||
+ src[ i-1] > high ||
+ src[ i+1] > high ||
+ src[ src_linesize + i-1] > high ||
+ src[ src_linesize + i ] > high ||
+ src[ src_linesize + i+1] > high))
+ dst[i] = src[i];
+ else
+ dst[i] = 0;
+ }
+ dst += dst_linesize;
+ src += src_linesize;
+ }
+}
+
+static void color_mix(int w, int h,
+ uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize)
+{
+ int i, j;
+
+ for (j = 0; j < h; j++) {
+ for (i = 0; i < w; i++)
+ dst[i] = (dst[i] + src[i]) >> 1;
+ dst += dst_linesize;
+ src += src_linesize;
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ EdgeDetectContext *edgedetect = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ int p, direct = 0;
+ AVFrame *out;
+
+ if (edgedetect->mode != MODE_COLORMIX && av_frame_is_writable(in)) {
+ direct = 1;
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ for (p = 0; p < edgedetect->nb_planes; p++) {
+ struct plane_info *plane = &edgedetect->planes[p];
+ uint8_t *tmpbuf = plane->tmpbuf;
+ uint16_t *gradients = plane->gradients;
+ int8_t *directions = plane->directions;
+
+ /* gaussian filter to reduce noise */
+ gaussian_blur(ctx, inlink->w, inlink->h,
+ tmpbuf, inlink->w,
+ in->data[p], in->linesize[p]);
+
+ /* compute the 16-bits gradients and directions for the next step */
+ sobel(inlink->w, inlink->h,
+ gradients, inlink->w,
+ directions,inlink->w,
+ tmpbuf, inlink->w);
+
+ /* non_maximum_suppression() will actually keep & clip what's necessary and
+ * ignore the rest, so we need a clean output buffer */
+ memset(tmpbuf, 0, inlink->w * inlink->h);
+ non_maximum_suppression(inlink->w, inlink->h,
+ tmpbuf, inlink->w,
+ directions,inlink->w,
+ gradients, inlink->w);
+
+ /* keep high values, or low values surrounded by high values */
+ double_threshold(edgedetect->low_u8, edgedetect->high_u8,
+ inlink->w, inlink->h,
+ out->data[p], out->linesize[p],
+ tmpbuf, inlink->w);
+
+ if (edgedetect->mode == MODE_COLORMIX) {
+ color_mix(inlink->w, inlink->h,
+ out->data[p], out->linesize[p],
+ in->data[p], in->linesize[p]);
+ }
+ }
+
+ if (!direct)
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ int p;
+ EdgeDetectContext *edgedetect = ctx->priv;
+
+ for (p = 0; p < edgedetect->nb_planes; p++) {
+ struct plane_info *plane = &edgedetect->planes[p];
+ av_freep(&plane->tmpbuf);
+ av_freep(&plane->gradients);
+ av_freep(&plane->directions);
+ }
+}
+
+static const AVFilterPad edgedetect_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad edgedetect_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_edgedetect = {
+ .name = "edgedetect",
+ .description = NULL_IF_CONFIG_SMALL("Detect and draw edge."),
+ .priv_size = sizeof(EdgeDetectContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = edgedetect_inputs,
+ .outputs = edgedetect_outputs,
+ .priv_class = &edgedetect_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_elbg.c b/libavfilter/vf_elbg.c
new file mode 100644
index 0000000000..396af82f77
--- /dev/null
+++ b/libavfilter/vf_elbg.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2013 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * video quantizer filter based on ELBG
+ */
+
+#include "libavcodec/elbg.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/random_seed.h"
+
+#include "avfilter.h"
+#include "drawutils.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct ELBGContext {
+ const AVClass *class;
+ AVLFG lfg;
+ unsigned int lfg_seed;
+ int max_steps_nb;
+ int *codeword;
+ int codeword_length;
+ int *codeword_closest_codebook_idxs;
+ int *codebook;
+ int codebook_length;
+ const AVPixFmtDescriptor *pix_desc;
+ uint8_t rgba_map[4];
+ int pal8;
+} ELBGContext;
+
+#define OFFSET(x) offsetof(ELBGContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption elbg_options[] = {
+ { "codebook_length", "set codebook length", OFFSET(codebook_length), AV_OPT_TYPE_INT, { .i64 = 256 }, 1, INT_MAX, FLAGS },
+ { "l", "set codebook length", OFFSET(codebook_length), AV_OPT_TYPE_INT, { .i64 = 256 }, 1, INT_MAX, FLAGS },
+ { "nb_steps", "set max number of steps used to compute the mapping", OFFSET(max_steps_nb), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, INT_MAX, FLAGS },
+ { "n", "set max number of steps used to compute the mapping", OFFSET(max_steps_nb), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, INT_MAX, FLAGS },
+ { "seed", "set the random seed", OFFSET(lfg_seed), AV_OPT_TYPE_INT, {.i64 = -1}, -1, UINT32_MAX, FLAGS },
+ { "s", "set the random seed", OFFSET(lfg_seed), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, UINT32_MAX, FLAGS },
+ { "pal8", "set the pal8 output", OFFSET(pal8), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(elbg);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ELBGContext *elbg = ctx->priv;
+
+ if (elbg->pal8 && elbg->codebook_length > 256) {
+ av_log(ctx, AV_LOG_ERROR, "pal8 output allows max 256 codebook length.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (elbg->lfg_seed == -1)
+ elbg->lfg_seed = av_get_random_seed();
+
+ av_lfg_init(&elbg->lfg, elbg->lfg_seed);
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ ELBGContext *elbg = ctx->priv;
+ int ret;
+
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_NONE
+ };
+ if (!elbg->pal8) {
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+ } else {
+ static const enum AVPixelFormat pal8_fmt[] = {
+ AV_PIX_FMT_PAL8,
+ AV_PIX_FMT_NONE
+ };
+ if ((ret = ff_formats_ref(ff_make_format_list(pix_fmts), &ctx->inputs[0]->out_formats)) < 0 ||
+ (ret = ff_formats_ref(ff_make_format_list(pal8_fmt), &ctx->outputs[0]->in_formats)) < 0)
+ return ret;
+ }
+ return 0;
+}
+
+#define NB_COMPONENTS 3
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ELBGContext *elbg = ctx->priv;
+
+ elbg->pix_desc = av_pix_fmt_desc_get(inlink->format);
+ elbg->codeword_length = inlink->w * inlink->h;
+ elbg->codeword = av_realloc_f(elbg->codeword, elbg->codeword_length,
+ NB_COMPONENTS * sizeof(*elbg->codeword));
+ if (!elbg->codeword)
+ return AVERROR(ENOMEM);
+
+ elbg->codeword_closest_codebook_idxs =
+ av_realloc_f(elbg->codeword_closest_codebook_idxs, elbg->codeword_length,
+ sizeof(*elbg->codeword_closest_codebook_idxs));
+ if (!elbg->codeword_closest_codebook_idxs)
+ return AVERROR(ENOMEM);
+
+ elbg->codebook = av_realloc_f(elbg->codebook, elbg->codebook_length,
+ NB_COMPONENTS * sizeof(*elbg->codebook));
+ if (!elbg->codebook)
+ return AVERROR(ENOMEM);
+
+ ff_fill_rgba_map(elbg->rgba_map, inlink->format);
+
+ return 0;
+}
+
+#define R 0
+#define G 1
+#define B 2
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ ELBGContext *elbg = inlink->dst->priv;
+ int i, j, k;
+ uint8_t *p, *p0;
+
+ const uint8_t r_idx = elbg->rgba_map[R];
+ const uint8_t g_idx = elbg->rgba_map[G];
+ const uint8_t b_idx = elbg->rgba_map[B];
+
+ /* build the codeword */
+ p0 = frame->data[0];
+ k = 0;
+ for (i = 0; i < inlink->h; i++) {
+ p = p0;
+ for (j = 0; j < inlink->w; j++) {
+ elbg->codeword[k++] = p[r_idx];
+ elbg->codeword[k++] = p[g_idx];
+ elbg->codeword[k++] = p[b_idx];
+ p += elbg->pix_desc->nb_components;
+ }
+ p0 += frame->linesize[0];
+ }
+
+ /* compute the codebook */
+ avpriv_init_elbg(elbg->codeword, NB_COMPONENTS, elbg->codeword_length,
+ elbg->codebook, elbg->codebook_length, elbg->max_steps_nb,
+ elbg->codeword_closest_codebook_idxs, &elbg->lfg);
+ avpriv_do_elbg(elbg->codeword, NB_COMPONENTS, elbg->codeword_length,
+ elbg->codebook, elbg->codebook_length, elbg->max_steps_nb,
+ elbg->codeword_closest_codebook_idxs, &elbg->lfg);
+
+ if (elbg->pal8) {
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ uint32_t *pal;
+
+ if (!out)
+ return AVERROR(ENOMEM);
+ out->pts = frame->pts;
+ av_frame_free(&frame);
+ pal = (uint32_t *)out->data[1];
+ p0 = (uint8_t *)out->data[0];
+
+ for (i = 0; i < elbg->codebook_length; i++) {
+ pal[i] = 0xFFU << 24 |
+ (elbg->codebook[i*3 ] << 16) |
+ (elbg->codebook[i*3+1] << 8) |
+ elbg->codebook[i*3+2];
+ }
+
+ k = 0;
+ for (i = 0; i < inlink->h; i++) {
+ p = p0;
+ for (j = 0; j < inlink->w; j++, p++) {
+ p[0] = elbg->codeword_closest_codebook_idxs[k++];
+ }
+ p0 += out->linesize[0];
+ }
+
+ return ff_filter_frame(outlink, out);
+ }
+
+ /* fill the output with the codebook values */
+ p0 = frame->data[0];
+
+ k = 0;
+ for (i = 0; i < inlink->h; i++) {
+ p = p0;
+ for (j = 0; j < inlink->w; j++) {
+ int cb_idx = NB_COMPONENTS * elbg->codeword_closest_codebook_idxs[k++];
+ p[r_idx] = elbg->codebook[cb_idx];
+ p[g_idx] = elbg->codebook[cb_idx+1];
+ p[b_idx] = elbg->codebook[cb_idx+2];
+ p += elbg->pix_desc->nb_components;
+ }
+ p0 += frame->linesize[0];
+ }
+
+ return ff_filter_frame(inlink->dst->outputs[0], frame);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ELBGContext *elbg = ctx->priv;
+
+ av_freep(&elbg->codebook);
+ av_freep(&elbg->codeword);
+ av_freep(&elbg->codeword_closest_codebook_idxs);
+}
+
+static const AVFilterPad elbg_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ .needs_writable = 1,
+ },
+ { NULL }
+};
+
+static const AVFilterPad elbg_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_elbg = {
+ .name = "elbg",
+ .description = NULL_IF_CONFIG_SMALL("Apply posterize effect, using the ELBG algorithm."),
+ .priv_size = sizeof(ELBGContext),
+ .priv_class = &elbg_class,
+ .query_formats = query_formats,
+ .init = init,
+ .uninit = uninit,
+ .inputs = elbg_inputs,
+ .outputs = elbg_outputs,
+};
diff --git a/libavfilter/vf_eq.c b/libavfilter/vf_eq.c
new file mode 100644
index 0000000000..c450d5ed02
--- /dev/null
+++ b/libavfilter/vf_eq.c
@@ -0,0 +1,389 @@
+/*
+ * Original MPlayer filters by Richard Felker, Hampa Hug, Daniel Moreno,
+ * and Michael Niedermeyer.
+ *
+ * Copyright (c) 2014 James Darnley <james.darnley@gmail.com>
+ * Copyright (c) 2015 Arwa Arif <arwaarif1994@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * very simple video equalizer
+ */
+
+#include "libavfilter/internal.h"
+#include "libavutil/common.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "vf_eq.h"
+
+static void create_lut(EQParameters *param)
+{
+ int i;
+ double g = 1.0 / param->gamma;
+ double lw = 1.0 - param->gamma_weight;
+
+ for (i = 0; i < 256; i++) {
+ double v = i / 255.0;
+ v = param->contrast * (v - 0.5) + 0.5 + param->brightness;
+
+ if (v <= 0.0) {
+ param->lut[i] = 0;
+ } else {
+ v = v * lw + pow(v, g) * param->gamma_weight;
+
+ if (v >= 1.0)
+ param->lut[i] = 255;
+ else
+ param->lut[i] = 256.0 * v;
+ }
+ }
+
+ param->lut_clean = 1;
+}
+
+static void apply_lut(EQParameters *param, uint8_t *dst, int dst_stride,
+ const uint8_t *src, int src_stride, int w, int h)
+{
+ int x, y;
+
+ if (!param->lut_clean)
+ create_lut(param);
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ dst[y * dst_stride + x] = param->lut[src[y * src_stride + x]];
+ }
+ }
+}
+
+static void process_c(EQParameters *param, uint8_t *dst, int dst_stride,
+ const uint8_t *src, int src_stride, int w, int h)
+{
+ int x, y, pel;
+
+ int contrast = (int) (param->contrast * 256 * 16);
+ int brightness = ((int) (100.0 * param->brightness + 100.0) * 511) / 200 - 128 - contrast / 32;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ pel = ((src[y * src_stride + x] * contrast) >> 12) + brightness;
+
+ if (pel & ~255)
+ pel = (-pel) >> 31;
+
+ dst[y * dst_stride + x] = pel;
+ }
+ }
+}
+
+static void check_values(EQParameters *param, EQContext *eq)
+{
+ if (param->contrast == 1.0 && param->brightness == 0.0 && param->gamma == 1.0)
+ param->adjust = NULL;
+ else if (param->gamma == 1.0 && fabs(param->contrast) < 7.9)
+ param->adjust = eq->process;
+ else
+ param->adjust = apply_lut;
+}
+
+static void set_contrast(EQContext *eq)
+{
+ eq->contrast = av_clipf(av_expr_eval(eq->contrast_pexpr, eq->var_values, eq), -1000.0, 1000.0);
+ eq->param[0].contrast = eq->contrast;
+ eq->param[0].lut_clean = 0;
+ check_values(&eq->param[0], eq);
+}
+
+static void set_brightness(EQContext *eq)
+{
+ eq->brightness = av_clipf(av_expr_eval(eq->brightness_pexpr, eq->var_values, eq), -1.0, 1.0);
+ eq->param[0].brightness = eq->brightness;
+ eq->param[0].lut_clean = 0;
+ check_values(&eq->param[0], eq);
+}
+
+static void set_gamma(EQContext *eq)
+{
+ int i;
+
+ eq->gamma = av_clipf(av_expr_eval(eq->gamma_pexpr, eq->var_values, eq), 0.1, 10.0);
+ eq->gamma_r = av_clipf(av_expr_eval(eq->gamma_r_pexpr, eq->var_values, eq), 0.1, 10.0);
+ eq->gamma_g = av_clipf(av_expr_eval(eq->gamma_g_pexpr, eq->var_values, eq), 0.1, 10.0);
+ eq->gamma_b = av_clipf(av_expr_eval(eq->gamma_b_pexpr, eq->var_values, eq), 0.1, 10.0);
+ eq->gamma_weight = av_clipf(av_expr_eval(eq->gamma_weight_pexpr, eq->var_values, eq), 0.0, 1.0);
+
+ eq->param[0].gamma = eq->gamma * eq->gamma_g;
+ eq->param[1].gamma = sqrt(eq->gamma_b / eq->gamma_g);
+ eq->param[2].gamma = sqrt(eq->gamma_r / eq->gamma_g);
+
+ for (i = 0; i < 3; i++) {
+ eq->param[i].gamma_weight = eq->gamma_weight;
+ eq->param[i].lut_clean = 0;
+ check_values(&eq->param[i], eq);
+ }
+}
+
+static void set_saturation(EQContext *eq)
+{
+ int i;
+
+ eq->saturation = av_clipf(av_expr_eval(eq->saturation_pexpr, eq->var_values, eq), 0.0, 3.0);
+
+ for (i = 1; i < 3; i++) {
+ eq->param[i].contrast = eq->saturation;
+ eq->param[i].lut_clean = 0;
+ check_values(&eq->param[i], eq);
+ }
+}
+
+static int set_expr(AVExpr **pexpr, const char *expr, const char *option, void *log_ctx)
+{
+ int ret;
+ AVExpr *old = NULL;
+
+ if (*pexpr)
+ old = *pexpr;
+ ret = av_expr_parse(pexpr, expr, var_names, NULL, NULL, NULL, NULL, 0, log_ctx);
+ if (ret < 0) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Error when parsing the expression '%s' for %s\n",
+ expr, option);
+ *pexpr = old;
+ return ret;
+ }
+
+ av_expr_free(old);
+ return 0;
+}
+
+static int initialize(AVFilterContext *ctx)
+{
+ EQContext *eq = ctx->priv;
+ int ret;
+
+ eq->process = process_c;
+
+ if ((ret = set_expr(&eq->contrast_pexpr, eq->contrast_expr, "contrast", ctx)) < 0 ||
+ (ret = set_expr(&eq->brightness_pexpr, eq->brightness_expr, "brightness", ctx)) < 0 ||
+ (ret = set_expr(&eq->saturation_pexpr, eq->saturation_expr, "saturation", ctx)) < 0 ||
+ (ret = set_expr(&eq->gamma_pexpr, eq->gamma_expr, "gamma", ctx)) < 0 ||
+ (ret = set_expr(&eq->gamma_r_pexpr, eq->gamma_r_expr, "gamma_r", ctx)) < 0 ||
+ (ret = set_expr(&eq->gamma_g_pexpr, eq->gamma_g_expr, "gamma_g", ctx)) < 0 ||
+ (ret = set_expr(&eq->gamma_b_pexpr, eq->gamma_b_expr, "gamma_b", ctx)) < 0 ||
+ (ret = set_expr(&eq->gamma_weight_pexpr, eq->gamma_weight_expr, "gamma_weight", ctx)) < 0 )
+ return ret;
+
+ if (ARCH_X86)
+ ff_eq_init_x86(eq);
+
+ if (eq->eval_mode == EVAL_MODE_INIT) {
+ set_gamma(eq);
+ set_contrast(eq);
+ set_brightness(eq);
+ set_saturation(eq);
+ }
+
+ return 0;
+}
+
+static void uninit(AVFilterContext *ctx)
+{
+ EQContext *eq = ctx->priv;
+
+ av_expr_free(eq->contrast_pexpr); eq->contrast_pexpr = NULL;
+ av_expr_free(eq->brightness_pexpr); eq->brightness_pexpr = NULL;
+ av_expr_free(eq->saturation_pexpr); eq->saturation_pexpr = NULL;
+ av_expr_free(eq->gamma_pexpr); eq->gamma_pexpr = NULL;
+ av_expr_free(eq->gamma_weight_pexpr); eq->gamma_weight_pexpr = NULL;
+ av_expr_free(eq->gamma_r_pexpr); eq->gamma_r_pexpr = NULL;
+ av_expr_free(eq->gamma_g_pexpr); eq->gamma_g_pexpr = NULL;
+ av_expr_free(eq->gamma_b_pexpr); eq->gamma_b_pexpr = NULL;
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ EQContext *eq = inlink->dst->priv;
+
+ eq->var_values[VAR_N] = 0;
+ eq->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ?
+ NAN : av_q2d(inlink->frame_rate);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pixel_fmts_eq[] = {
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pixel_fmts_eq);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ EQContext *eq = ctx->priv;
+ AVFrame *out;
+ int64_t pos = av_frame_get_pkt_pos(in);
+ const AVPixFmtDescriptor *desc;
+ int i;
+
+ out = ff_get_video_buffer(outlink, inlink->w, inlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+
+ av_frame_copy_props(out, in);
+ desc = av_pix_fmt_desc_get(inlink->format);
+
+ eq->var_values[VAR_N] = inlink->frame_count_out;
+ eq->var_values[VAR_POS] = pos == -1 ? NAN : pos;
+ eq->var_values[VAR_T] = TS2T(in->pts, inlink->time_base);
+
+ if (eq->eval_mode == EVAL_MODE_FRAME) {
+ set_gamma(eq);
+ set_contrast(eq);
+ set_brightness(eq);
+ set_saturation(eq);
+ }
+
+ for (i = 0; i < desc->nb_components; i++) {
+ int w = inlink->w;
+ int h = inlink->h;
+
+ if (i == 1 || i == 2) {
+ w = AV_CEIL_RSHIFT(w, desc->log2_chroma_w);
+ h = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
+ }
+
+ if (eq->param[i].adjust)
+ eq->param[i].adjust(&eq->param[i], out->data[i], out->linesize[i],
+ in->data[i], in->linesize[i], w, h);
+ else
+ av_image_copy_plane(out->data[i], out->linesize[i],
+ in->data[i], in->linesize[i], w, h);
+ }
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static inline int set_param(AVExpr **pexpr, const char *args, const char *cmd,
+ void (*set_fn)(EQContext *eq), AVFilterContext *ctx)
+{
+ EQContext *eq = ctx->priv;
+ int ret;
+ if ((ret = set_expr(pexpr, args, cmd, ctx)) < 0)
+ return ret;
+ if (eq->eval_mode == EVAL_MODE_INIT)
+ set_fn(eq);
+ return 0;
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ EQContext *eq = ctx->priv;
+
+#define SET_PARAM(param_name, set_fn_name) \
+ if (!strcmp(cmd, #param_name)) return set_param(&eq->param_name##_pexpr, args, cmd, set_##set_fn_name, ctx);
+
+ SET_PARAM(contrast, contrast)
+ else SET_PARAM(brightness, brightness)
+ else SET_PARAM(saturation, saturation)
+ else SET_PARAM(gamma, gamma)
+ else SET_PARAM(gamma_r, gamma)
+ else SET_PARAM(gamma_g, gamma)
+ else SET_PARAM(gamma_b, gamma)
+ else SET_PARAM(gamma_weight, gamma)
+ else return AVERROR(ENOSYS);
+}
+
+static const AVFilterPad eq_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad eq_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+#define OFFSET(x) offsetof(EQContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption eq_options[] = {
+ { "contrast", "set the contrast adjustment, negative values give a negative image",
+ OFFSET(contrast_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "brightness", "set the brightness adjustment",
+ OFFSET(brightness_expr), AV_OPT_TYPE_STRING, {.str = "0.0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "saturation", "set the saturation adjustment",
+ OFFSET(saturation_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "gamma", "set the initial gamma value",
+ OFFSET(gamma_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "gamma_r", "gamma value for red",
+ OFFSET(gamma_r_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "gamma_g", "gamma value for green",
+ OFFSET(gamma_g_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "gamma_b", "gamma value for blue",
+ OFFSET(gamma_b_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "gamma_weight", "set the gamma weight which reduces the effect of gamma on bright areas",
+ OFFSET(gamma_weight_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
+ { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
+ { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(eq);
+
+AVFilter ff_vf_eq = {
+ .name = "eq",
+ .description = NULL_IF_CONFIG_SMALL("Adjust brightness, contrast, gamma, and saturation."),
+ .priv_size = sizeof(EQContext),
+ .priv_class = &eq_class,
+ .inputs = eq_inputs,
+ .outputs = eq_outputs,
+ .process_command = process_command,
+ .query_formats = query_formats,
+ .init = initialize,
+ .uninit = uninit,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_eq.h b/libavfilter/vf_eq.h
new file mode 100644
index 0000000000..8525048b3f
--- /dev/null
+++ b/libavfilter/vf_eq.h
@@ -0,0 +1,105 @@
+/*
+ * Original MPlayer filters by Richard Felker, Hampa Hug, Daniel Moreno,
+ * and Michael Niedermeyer.
+ *
+ * Copyright (c) 2014 James Darnley <james.darnley@gmail.com>
+ * Copyright (c) 2015 Arwa Arif <arwaarif1994@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef AVFILTER_EQ_H
+#define AVFILTER_EQ_H
+
+#include "avfilter.h"
+#include "libavutil/eval.h"
+
+static const char *const var_names[] = {
+ "n", // frame count
+ "pos", // frame position
+ "r", // frame rate
+ "t", // timestamp expressed in seconds
+ NULL
+};
+
+enum var_name {
+ VAR_N,
+ VAR_POS,
+ VAR_R,
+ VAR_T,
+ VAR_NB
+};
+
+typedef struct EQParameters {
+ void (*adjust)(struct EQParameters *eq, uint8_t *dst, int dst_stride,
+ const uint8_t *src, int src_stride, int w, int h);
+
+ uint8_t lut[256];
+
+ double brightness, contrast, gamma, gamma_weight;
+ int lut_clean;
+
+} EQParameters;
+
+typedef struct {
+ const AVClass *class;
+
+ EQParameters param[3];
+
+ char *contrast_expr;
+ AVExpr *contrast_pexpr;
+ double contrast;
+
+ char *brightness_expr;
+ AVExpr *brightness_pexpr;
+ double brightness;
+
+ char *saturation_expr;
+ AVExpr *saturation_pexpr;
+ double saturation;
+
+ char *gamma_expr;
+ AVExpr *gamma_pexpr;
+ double gamma;
+
+ char *gamma_weight_expr;
+ AVExpr *gamma_weight_pexpr;
+ double gamma_weight;
+
+ char *gamma_r_expr;
+ AVExpr *gamma_r_pexpr;
+ double gamma_r;
+
+ char *gamma_g_expr;
+ AVExpr *gamma_g_pexpr;
+ double gamma_g;
+
+ char *gamma_b_expr;
+ AVExpr *gamma_b_pexpr;
+ double gamma_b;
+
+ double var_values[VAR_NB];
+
+ void (*process)(struct EQParameters *par, uint8_t *dst, int dst_stride,
+ const uint8_t *src, int src_stride, int w, int h);
+
+ enum EvalMode { EVAL_MODE_INIT, EVAL_MODE_FRAME, EVAL_MODE_NB } eval_mode;
+} EQContext;
+
+void ff_eq_init_x86(EQContext *eq);
+
+#endif /* AVFILTER_EQ_H */
diff --git a/libavfilter/vf_extractplanes.c b/libavfilter/vf_extractplanes.c
new file mode 100644
index 0000000000..65bba33d90
--- /dev/null
+++ b/libavfilter/vf_extractplanes.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+
+#define FF_INTERNAL_FIELDS 1
+#include "libavfilter/framequeue.h"
+
+#include "avfilter.h"
+#include "drawutils.h"
+#include "internal.h"
+
+#define PLANE_R 0x01
+#define PLANE_G 0x02
+#define PLANE_B 0x04
+#define PLANE_A 0x08
+#define PLANE_Y 0x10
+#define PLANE_U 0x20
+#define PLANE_V 0x40
+
+typedef struct {
+ const AVClass *class;
+ int requested_planes;
+ int map[4];
+ int linesize[4];
+ int is_packed;
+ int depth;
+ int step;
+} ExtractPlanesContext;
+
+#define OFFSET(x) offsetof(ExtractPlanesContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption extractplanes_options[] = {
+ { "planes", "set planes", OFFSET(requested_planes), AV_OPT_TYPE_FLAGS, {.i64=1}, 1, 0xff, FLAGS, "flags"},
+ { "y", "set luma plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_Y}, 0, 0, FLAGS, "flags"},
+ { "u", "set u plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_U}, 0, 0, FLAGS, "flags"},
+ { "v", "set v plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_V}, 0, 0, FLAGS, "flags"},
+ { "r", "set red plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_R}, 0, 0, FLAGS, "flags"},
+ { "g", "set green plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_G}, 0, 0, FLAGS, "flags"},
+ { "b", "set blue plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_B}, 0, 0, FLAGS, "flags"},
+ { "a", "set alpha plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_A}, 0, 0, FLAGS, "flags"},
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(extractplanes);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat in_pixfmts_le[] = {
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA422P,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_YUV420P16LE, AV_PIX_FMT_YUVA420P16LE,
+ AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUVA422P16LE,
+ AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUVA444P16LE,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY8A,
+ AV_PIX_FMT_YA16LE, AV_PIX_FMT_GRAY16LE,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_RGB48LE, AV_PIX_FMT_BGR48LE,
+ AV_PIX_FMT_RGBA64LE, AV_PIX_FMT_BGRA64LE,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_GBRP16LE, AV_PIX_FMT_GBRAP16LE,
+ AV_PIX_FMT_YUV420P10LE,
+ AV_PIX_FMT_YUV422P10LE,
+ AV_PIX_FMT_YUV444P10LE,
+ AV_PIX_FMT_YUV440P10LE,
+ AV_PIX_FMT_YUVA420P10LE,
+ AV_PIX_FMT_YUVA422P10LE,
+ AV_PIX_FMT_YUVA444P10LE,
+ AV_PIX_FMT_YUV420P12LE,
+ AV_PIX_FMT_YUV422P12LE,
+ AV_PIX_FMT_YUV444P12LE,
+ AV_PIX_FMT_YUV440P12LE,
+ AV_PIX_FMT_GBRP10LE, AV_PIX_FMT_GBRAP10LE,
+ AV_PIX_FMT_GBRP12LE, AV_PIX_FMT_GBRAP12LE,
+ AV_PIX_FMT_NONE,
+ };
+ static const enum AVPixelFormat in_pixfmts_be[] = {
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA422P,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUVA420P16BE,
+ AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUVA422P16BE,
+ AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUVA444P16BE,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY8A,
+ AV_PIX_FMT_YA16BE, AV_PIX_FMT_GRAY16BE,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_RGB48BE, AV_PIX_FMT_BGR48BE,
+ AV_PIX_FMT_RGBA64BE, AV_PIX_FMT_BGRA64BE,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_GBRP16BE, AV_PIX_FMT_GBRAP16BE,
+ AV_PIX_FMT_YUV420P10BE,
+ AV_PIX_FMT_YUV422P10BE,
+ AV_PIX_FMT_YUV444P10BE,
+ AV_PIX_FMT_YUV440P10BE,
+ AV_PIX_FMT_YUVA420P10BE,
+ AV_PIX_FMT_YUVA422P10BE,
+ AV_PIX_FMT_YUVA444P10BE,
+ AV_PIX_FMT_YUV420P12BE,
+ AV_PIX_FMT_YUV422P12BE,
+ AV_PIX_FMT_YUV444P12BE,
+ AV_PIX_FMT_YUV440P12BE,
+ AV_PIX_FMT_GBRP10BE, AV_PIX_FMT_GBRAP10BE,
+ AV_PIX_FMT_GBRP12BE, AV_PIX_FMT_GBRAP12BE,
+ AV_PIX_FMT_NONE,
+ };
+ static const enum AVPixelFormat out8_pixfmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
+ static const enum AVPixelFormat out10le_pixfmts[] = { AV_PIX_FMT_GRAY10LE, AV_PIX_FMT_NONE };
+ static const enum AVPixelFormat out10be_pixfmts[] = { AV_PIX_FMT_GRAY10BE, AV_PIX_FMT_NONE };
+ static const enum AVPixelFormat out12le_pixfmts[] = { AV_PIX_FMT_GRAY12LE, AV_PIX_FMT_NONE };
+ static const enum AVPixelFormat out12be_pixfmts[] = { AV_PIX_FMT_GRAY12BE, AV_PIX_FMT_NONE };
+ static const enum AVPixelFormat out16le_pixfmts[] = { AV_PIX_FMT_GRAY16LE, AV_PIX_FMT_NONE };
+ static const enum AVPixelFormat out16be_pixfmts[] = { AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_NONE };
+ const enum AVPixelFormat *out_pixfmts, *in_pixfmts;
+ const AVPixFmtDescriptor *desc;
+ AVFilterFormats *avff;
+ int i, ret, depth = 0, be = 0;
+
+ if (!ctx->inputs[0]->in_formats ||
+ !ctx->inputs[0]->in_formats->nb_formats) {
+ return AVERROR(EAGAIN);
+ }
+
+ avff = ctx->inputs[0]->in_formats;
+ desc = av_pix_fmt_desc_get(avff->formats[0]);
+ depth = desc->comp[0].depth;
+ be = desc->flags & AV_PIX_FMT_FLAG_BE;
+ if (be) {
+ in_pixfmts = in_pixfmts_be;
+ } else {
+ in_pixfmts = in_pixfmts_le;
+ }
+ if (!ctx->inputs[0]->out_formats)
+ if ((ret = ff_formats_ref(ff_make_format_list(in_pixfmts), &ctx->inputs[0]->out_formats)) < 0)
+ return ret;
+
+ for (i = 1; i < avff->nb_formats; i++) {
+ desc = av_pix_fmt_desc_get(avff->formats[i]);
+ if (depth != desc->comp[0].depth ||
+ be != (desc->flags & AV_PIX_FMT_FLAG_BE)) {
+ return AVERROR(EAGAIN);
+ }
+ }
+
+ if (depth == 8)
+ out_pixfmts = out8_pixfmts;
+ else if (!be && depth == 10)
+ out_pixfmts = out10le_pixfmts;
+ else if (be && depth == 10)
+ out_pixfmts = out10be_pixfmts;
+ else if (!be && depth == 12)
+ out_pixfmts = out12le_pixfmts;
+ else if (be && depth == 12)
+ out_pixfmts = out12be_pixfmts;
+ else if (be)
+ out_pixfmts = out16be_pixfmts;
+ else
+ out_pixfmts = out16le_pixfmts;
+
+ for (i = 0; i < ctx->nb_outputs; i++)
+ if ((ret = ff_formats_ref(ff_make_format_list(out_pixfmts), &ctx->outputs[i]->in_formats)) < 0)
+ return ret;
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ExtractPlanesContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int plane_avail, ret, i;
+ uint8_t rgba_map[4];
+
+ plane_avail = ((desc->flags & AV_PIX_FMT_FLAG_RGB) ? PLANE_R|PLANE_G|PLANE_B :
+ PLANE_Y |
+ ((desc->nb_components > 2) ? PLANE_U|PLANE_V : 0)) |
+ ((desc->flags & AV_PIX_FMT_FLAG_ALPHA) ? PLANE_A : 0);
+ if (s->requested_planes & ~plane_avail) {
+ av_log(ctx, AV_LOG_ERROR, "Requested planes not available.\n");
+ return AVERROR(EINVAL);
+ }
+ if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ s->depth = desc->comp[0].depth >> 3;
+ s->step = av_get_padded_bits_per_pixel(desc) >> 3;
+ s->is_packed = !(desc->flags & AV_PIX_FMT_FLAG_PLANAR) &&
+ (desc->nb_components > 1);
+ if (desc->flags & AV_PIX_FMT_FLAG_RGB) {
+ ff_fill_rgba_map(rgba_map, inlink->format);
+ for (i = 0; i < 4; i++)
+ s->map[i] = rgba_map[s->map[i]];
+ }
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ ExtractPlanesContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ const int output = outlink->srcpad - ctx->output_pads;
+
+ if (s->map[output] == 1 || s->map[output] == 2) {
+ outlink->h = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ outlink->w = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ }
+
+ return 0;
+}
+
+static void extract_from_packed(uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize,
+ int width, int height,
+ int depth, int step, int comp)
+{
+ int x, y;
+
+ for (y = 0; y < height; y++) {
+ switch (depth) {
+ case 1:
+ for (x = 0; x < width; x++)
+ dst[x] = src[x * step + comp];
+ break;
+ case 2:
+ for (x = 0; x < width; x++) {
+ dst[x * 2 ] = src[x * step + comp * 2 ];
+ dst[x * 2 + 1] = src[x * step + comp * 2 + 1];
+ }
+ break;
+ }
+ dst += dst_linesize;
+ src += src_linesize;
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ExtractPlanesContext *s = ctx->priv;
+ int i, eof = 0, ret = 0;
+
+ for (i = 0; i < ctx->nb_outputs; i++) {
+ AVFilterLink *outlink = ctx->outputs[i];
+ const int idx = s->map[i];
+ AVFrame *out;
+
+ if (outlink->status_in)
+ continue;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ ret = AVERROR(ENOMEM);
+ break;
+ }
+ av_frame_copy_props(out, frame);
+
+ if (s->is_packed) {
+ extract_from_packed(out->data[0], out->linesize[0],
+ frame->data[0], frame->linesize[0],
+ outlink->w, outlink->h,
+ s->depth,
+ s->step, idx);
+ } else {
+ av_image_copy_plane(out->data[0], out->linesize[0],
+ frame->data[idx], frame->linesize[idx],
+ s->linesize[idx], outlink->h);
+ }
+
+ ret = ff_filter_frame(outlink, out);
+ if (ret == AVERROR_EOF)
+ eof++;
+ else if (ret < 0)
+ break;
+ }
+ av_frame_free(&frame);
+
+ if (eof == ctx->nb_outputs)
+ ret = AVERROR_EOF;
+ else if (ret == AVERROR_EOF)
+ ret = 0;
+ return ret;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ExtractPlanesContext *s = ctx->priv;
+ int planes = (s->requested_planes & 0xf) | (s->requested_planes >> 4);
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ char *name;
+ AVFilterPad pad = { 0 };
+
+ if (!(planes & (1 << i)))
+ continue;
+
+ name = av_asprintf("out%d", ctx->nb_outputs);
+ if (!name)
+ return AVERROR(ENOMEM);
+ s->map[ctx->nb_outputs] = i;
+ pad.name = name;
+ pad.type = AVMEDIA_TYPE_VIDEO;
+ pad.config_props = config_output;
+
+ ff_insert_outpad(ctx, ctx->nb_outputs, &pad);
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ int i;
+
+ for (i = 0; i < ctx->nb_outputs; i++)
+ av_freep(&ctx->output_pads[i].name);
+}
+
+static const AVFilterPad extractplanes_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_extractplanes = {
+ .name = "extractplanes",
+ .description = NULL_IF_CONFIG_SMALL("Extract planes as grayscale frames."),
+ .priv_size = sizeof(ExtractPlanesContext),
+ .priv_class = &extractplanes_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = extractplanes_inputs,
+ .outputs = NULL,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
+
+#if CONFIG_ALPHAEXTRACT_FILTER
+
+static av_cold int init_alphaextract(AVFilterContext *ctx)
+{
+ ExtractPlanesContext *s = ctx->priv;
+
+ s->requested_planes = PLANE_A;
+
+ return init(ctx);
+}
+
+AVFilter ff_vf_alphaextract = {
+ .name = "alphaextract",
+ .description = NULL_IF_CONFIG_SMALL("Extract an alpha channel as a "
+ "grayscale image component."),
+ .priv_size = sizeof(ExtractPlanesContext),
+ .init = init_alphaextract,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = extractplanes_inputs,
+ .outputs = NULL,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
+#endif /* CONFIG_ALPHAEXTRACT_FILTER */
diff --git a/libavfilter/vf_fade.c b/libavfilter/vf_fade.c
index eb6d82a894..c30c41db0d 100644
--- a/libavfilter/vf_fade.c
+++ b/libavfilter/vf_fade.c
@@ -2,20 +2,20 @@
* Copyright (c) 2010 Brandon Mintern
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -25,14 +25,27 @@
* based heavily on vf_negate.c by Bobby Bingham
*/
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
#include "libavutil/common.h"
+#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
+#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
+#define R 0
+#define G 1
+#define B 2
+#define A 3
+
+#define Y 0
+#define U 1
+#define V 2
+
#define FADE_IN 0
#define FADE_OUT 1
@@ -41,8 +54,15 @@ typedef struct FadeContext {
int type;
int factor, fade_per_frame;
int start_frame, nb_frames;
- unsigned int frame_index, stop_frame;
int hsub, vsub, bpp;
+ unsigned int black_level, black_level_scaled;
+ uint8_t is_packed_rgb;
+ uint8_t rgba_map[4];
+ int alpha;
+ uint64_t start_time, duration;
+ enum {VF_FADE_WAITING=0, VF_FADE_FADING, VF_FADE_DONE} fade_state;
+ uint8_t color_rgba[4]; ///< fade color
+ int black_fade; ///< if color_rgba is black
} FadeContext;
static av_cold int init(AVFilterContext *ctx)
@@ -50,36 +70,87 @@ static av_cold int init(AVFilterContext *ctx)
FadeContext *s = ctx->priv;
s->fade_per_frame = (1 << 16) / s->nb_frames;
- if (s->type == FADE_IN) {
- s->factor = 0;
- } else if (s->type == FADE_OUT) {
- s->fade_per_frame = -s->fade_per_frame;
- s->factor = (1 << 16);
+ s->fade_state = VF_FADE_WAITING;
+
+ if (s->duration != 0) {
+ // If duration (seconds) is non-zero, assume that we are not fading based on frames
+ s->nb_frames = 0; // Mostly to clean up logging
+ }
+
+ // Choose what to log. If both time-based and frame-based options, both lines will be in the log
+ if (s->start_frame || s->nb_frames) {
+ av_log(ctx, AV_LOG_VERBOSE,
+ "type:%s start_frame:%d nb_frames:%d alpha:%d\n",
+ s->type == FADE_IN ? "in" : "out", s->start_frame,
+ s->nb_frames,s->alpha);
+ }
+ if (s->start_time || s->duration) {
+ av_log(ctx, AV_LOG_VERBOSE,
+ "type:%s start_time:%f duration:%f alpha:%d\n",
+ s->type == FADE_IN ? "in" : "out", (s->start_time / (double)AV_TIME_BASE),
+ (s->duration / (double)AV_TIME_BASE),s->alpha);
}
- s->stop_frame = s->start_frame + s->nb_frames;
- av_log(ctx, AV_LOG_VERBOSE,
- "type:%s start_frame:%d nb_frames:%d\n",
- s->type == FADE_IN ? "in" : "out", s->start_frame,
- s->nb_frames);
+ s->black_fade = !memcmp(s->color_rgba, "\x00\x00\x00\xff", 4);
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
+ const FadeContext *s = ctx->priv;
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
AV_PIX_FMT_NONE
};
+ static const enum AVPixelFormat pix_fmts_rgb[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_NONE
+ };
+ static const enum AVPixelFormat pix_fmts_alpha[] = {
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_NONE
+ };
+ static const enum AVPixelFormat pix_fmts_rgba[] = {
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list;
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
+ if (s->alpha) {
+ if (s->black_fade)
+ fmts_list = ff_make_format_list(pix_fmts_alpha);
+ else
+ fmts_list = ff_make_format_list(pix_fmts_rgba);
+ } else {
+ if (s->black_fade)
+ fmts_list = ff_make_format_list(pix_fmts);
+ else
+ fmts_list = ff_make_format_list(pix_fmts_rgb);
+ }
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
}
+const static enum AVPixelFormat studio_level_pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_NONE
+};
+
static int config_props(AVFilterLink *inlink)
{
FadeContext *s = inlink->dst->priv;
@@ -88,7 +159,59 @@ static int config_props(AVFilterLink *inlink)
s->hsub = pixdesc->log2_chroma_w;
s->vsub = pixdesc->log2_chroma_h;
- s->bpp = av_get_bits_per_pixel(pixdesc) >> 3;
+ s->bpp = pixdesc->flags & AV_PIX_FMT_FLAG_PLANAR ?
+ 1 :
+ av_get_bits_per_pixel(pixdesc) >> 3;
+ s->alpha &= !!(pixdesc->flags & AV_PIX_FMT_FLAG_ALPHA);
+ s->is_packed_rgb = ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0;
+
+ /* use CCIR601/709 black level for studio-level pixel non-alpha components */
+ s->black_level =
+ ff_fmt_is_in(inlink->format, studio_level_pix_fmts) && !s->alpha ? 16 : 0;
+ /* 32768 = 1 << 15, it is an integer representation
+ * of 0.5 and is for rounding. */
+ s->black_level_scaled = (s->black_level << 16) + 32768;
+ return 0;
+}
+
+static av_always_inline void filter_rgb(FadeContext *s, const AVFrame *frame,
+ int slice_start, int slice_end,
+ int do_alpha, int step)
+{
+ int i, j;
+ const uint8_t r_idx = s->rgba_map[R];
+ const uint8_t g_idx = s->rgba_map[G];
+ const uint8_t b_idx = s->rgba_map[B];
+ const uint8_t a_idx = s->rgba_map[A];
+ const uint8_t *c = s->color_rgba;
+
+ for (i = slice_start; i < slice_end; i++) {
+ uint8_t *p = frame->data[0] + i * frame->linesize[0];
+ for (j = 0; j < frame->width; j++) {
+#define INTERP(c_name, c_idx) av_clip_uint8(((c[c_idx]<<16) + ((int)p[c_name] - (int)c[c_idx]) * s->factor + (1<<15)) >> 16)
+ p[r_idx] = INTERP(r_idx, 0);
+ p[g_idx] = INTERP(g_idx, 1);
+ p[b_idx] = INTERP(b_idx, 2);
+ if (do_alpha)
+ p[a_idx] = INTERP(a_idx, 3);
+ p += step;
+ }
+ }
+}
+
+static int filter_slice_rgb(AVFilterContext *ctx, void *arg, int jobnr,
+ int nb_jobs)
+{
+ FadeContext *s = ctx->priv;
+ AVFrame *frame = arg;
+ int slice_start = (frame->height * jobnr ) / nb_jobs;
+ int slice_end = (frame->height * (jobnr+1)) / nb_jobs;
+
+ if (s->alpha) filter_rgb(s, frame, slice_start, slice_end, 1, 4);
+ else if (s->bpp == 3) filter_rgb(s, frame, slice_start, slice_end, 0, 3);
+ else if (s->bpp == 4) filter_rgb(s, frame, slice_start, slice_end, 0, 4);
+ else av_assert0(0);
+
return 0;
}
@@ -97,9 +220,8 @@ static int filter_slice_luma(AVFilterContext *ctx, void *arg, int jobnr,
{
FadeContext *s = ctx->priv;
AVFrame *frame = arg;
- int slice_h = frame->height / nb_jobs;
- int slice_start = jobnr * slice_h;
- int slice_end = (jobnr == nb_jobs - 1) ? frame->height : (jobnr + 1) * slice_h;
+ int slice_start = (frame->height * jobnr ) / nb_jobs;
+ int slice_end = (frame->height * (jobnr+1)) / nb_jobs;
int i, j;
for (i = slice_start; i < slice_end; i++) {
@@ -108,7 +230,7 @@ static int filter_slice_luma(AVFilterContext *ctx, void *arg, int jobnr,
/* s->factor is using 16 lower-order bits for decimal
* places. 32768 = 1 << 15, it is an integer representation
* of 0.5 and is for rounding. */
- *p = (*p * s->factor + 32768) >> 16;
+ *p = ((*p - s->black_level) * s->factor + s->black_level_scaled) >> 16;
p++;
}
}
@@ -121,15 +243,16 @@ static int filter_slice_chroma(AVFilterContext *ctx, void *arg, int jobnr,
{
FadeContext *s = ctx->priv;
AVFrame *frame = arg;
- int slice_h = FFALIGN(frame->height / nb_jobs, 1 << s->vsub);
- int slice_start = jobnr * slice_h;
- int slice_end = FFMIN((jobnr + 1) * slice_h, frame->height);
int i, j, plane;
+ const int width = AV_CEIL_RSHIFT(frame->width, s->hsub);
+ const int height= AV_CEIL_RSHIFT(frame->height, s->vsub);
+ int slice_start = (height * jobnr ) / nb_jobs;
+ int slice_end = FFMIN(((height * (jobnr+1)) / nb_jobs), frame->height);
for (plane = 1; plane < 3; plane++) {
for (i = slice_start; i < slice_end; i++) {
- uint8_t *p = frame->data[plane] + (i >> s->vsub) * frame->linesize[plane];
- for (j = 0; j < frame->width >> s->hsub; j++) {
+ uint8_t *p = frame->data[plane] + i * frame->linesize[plane];
+ for (j = 0; j < width; j++) {
/* 8421367 = ((128 << 1) + 1) << 15. It is an integer
* representation of 128.5. The .5 is for rounding
* purposes. */
@@ -142,60 +265,148 @@ static int filter_slice_chroma(AVFilterContext *ctx, void *arg, int jobnr,
return 0;
}
+static int filter_slice_alpha(AVFilterContext *ctx, void *arg, int jobnr,
+ int nb_jobs)
+{
+ FadeContext *s = ctx->priv;
+ AVFrame *frame = arg;
+ int plane = s->is_packed_rgb ? 0 : A;
+ int slice_start = (frame->height * jobnr ) / nb_jobs;
+ int slice_end = (frame->height * (jobnr+1)) / nb_jobs;
+ int i, j;
+
+ for (i = slice_start; i < slice_end; i++) {
+ uint8_t *p = frame->data[plane] + i * frame->linesize[plane] + s->is_packed_rgb*s->rgba_map[A];
+ int step = s->is_packed_rgb ? 4 : 1;
+ for (j = 0; j < frame->width; j++) {
+ /* s->factor is using 16 lower-order bits for decimal
+ * places. 32768 = 1 << 15, it is an integer representation
+ * of 0.5 and is for rounding. */
+ *p = ((*p - s->black_level) * s->factor + s->black_level_scaled) >> 16;
+ p += step;
+ }
+ }
+
+ return 0;
+}
+
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
FadeContext *s = ctx->priv;
+ double frame_timestamp = frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base);
- if (s->factor < UINT16_MAX) {
- /* luma or rgb plane */
- ctx->internal->execute(ctx, filter_slice_luma, frame, NULL,
- FFMIN(frame->height, ctx->graph->nb_threads));
-
- if (frame->data[1] && frame->data[2]) {
- /* chroma planes */
- ctx->internal->execute(ctx, filter_slice_chroma, frame, NULL,
- FFMIN(frame->height, ctx->graph->nb_threads));
+ // Calculate Fade assuming this is a Fade In
+ if (s->fade_state == VF_FADE_WAITING) {
+ s->factor=0;
+ if (frame_timestamp >= s->start_time/(double)AV_TIME_BASE
+ && inlink->frame_count_out >= s->start_frame) {
+ // Time to start fading
+ s->fade_state = VF_FADE_FADING;
+
+ // Save start time in case we are starting based on frames and fading based on time
+ if (s->start_time == 0 && s->start_frame != 0) {
+ s->start_time = frame_timestamp*(double)AV_TIME_BASE;
+ }
+
+ // Save start frame in case we are starting based on time and fading based on frames
+ if (s->start_time != 0 && s->start_frame == 0) {
+ s->start_frame = inlink->frame_count_out;
+ }
}
}
+ if (s->fade_state == VF_FADE_FADING) {
+ if (s->duration == 0) {
+ // Fading based on frame count
+ s->factor = (inlink->frame_count_out - s->start_frame) * s->fade_per_frame;
+ if (inlink->frame_count_out > s->start_frame + s->nb_frames) {
+ s->fade_state = VF_FADE_DONE;
+ }
+
+ } else {
+ // Fading based on duration
+ s->factor = (frame_timestamp - s->start_time/(double)AV_TIME_BASE)
+ * (float) UINT16_MAX / (s->duration/(double)AV_TIME_BASE);
+ if (frame_timestamp > s->start_time/(double)AV_TIME_BASE
+ + s->duration/(double)AV_TIME_BASE) {
+ s->fade_state = VF_FADE_DONE;
+ }
+ }
+ }
+ if (s->fade_state == VF_FADE_DONE) {
+ s->factor=UINT16_MAX;
+ }
- if (s->frame_index >= s->start_frame &&
- s->frame_index <= s->stop_frame)
- s->factor += s->fade_per_frame;
s->factor = av_clip_uint16(s->factor);
- s->frame_index++;
+
+ // Invert fade_factor if Fading Out
+ if (s->type == FADE_OUT) {
+ s->factor=UINT16_MAX-s->factor;
+ }
+
+ if (s->factor < UINT16_MAX) {
+ if (s->alpha) {
+ ctx->internal->execute(ctx, filter_slice_alpha, frame, NULL,
+ FFMIN(frame->height, ff_filter_get_nb_threads(ctx)));
+ } else if (s->is_packed_rgb && !s->black_fade) {
+ ctx->internal->execute(ctx, filter_slice_rgb, frame, NULL,
+ FFMIN(frame->height, ff_filter_get_nb_threads(ctx)));
+ } else {
+ /* luma, or rgb plane in case of black */
+ ctx->internal->execute(ctx, filter_slice_luma, frame, NULL,
+ FFMIN(frame->height, ff_filter_get_nb_threads(ctx)));
+
+ if (frame->data[1] && frame->data[2]) {
+ /* chroma planes */
+ ctx->internal->execute(ctx, filter_slice_chroma, frame, NULL,
+ FFMIN(frame->height, ff_filter_get_nb_threads(ctx)));
+ }
+ }
+ }
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
+
#define OFFSET(x) offsetof(FadeContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption fade_options[] = {
{ "type", "'in' or 'out' for fade-in/fade-out", OFFSET(type), AV_OPT_TYPE_INT, { .i64 = FADE_IN }, FADE_IN, FADE_OUT, FLAGS, "type" },
+ { "t", "'in' or 'out' for fade-in/fade-out", OFFSET(type), AV_OPT_TYPE_INT, { .i64 = FADE_IN }, FADE_IN, FADE_OUT, FLAGS, "type" },
{ "in", "fade-in", 0, AV_OPT_TYPE_CONST, { .i64 = FADE_IN }, .unit = "type" },
{ "out", "fade-out", 0, AV_OPT_TYPE_CONST, { .i64 = FADE_OUT }, .unit = "type" },
{ "start_frame", "Number of the first frame to which to apply the effect.",
OFFSET(start_frame), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
+ { "s", "Number of the first frame to which to apply the effect.",
+ OFFSET(start_frame), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
{ "nb_frames", "Number of frames to which the effect should be applied.",
- OFFSET(nb_frames), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, FLAGS },
- { NULL },
+ OFFSET(nb_frames), AV_OPT_TYPE_INT, { .i64 = 25 }, 0, INT_MAX, FLAGS },
+ { "n", "Number of frames to which the effect should be applied.",
+ OFFSET(nb_frames), AV_OPT_TYPE_INT, { .i64 = 25 }, 0, INT_MAX, FLAGS },
+ { "alpha", "fade alpha if it is available on the input", OFFSET(alpha), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, FLAGS },
+ { "start_time", "Number of seconds of the beginning of the effect.",
+ OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "st", "Number of seconds of the beginning of the effect.",
+ OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "duration", "Duration of the effect in seconds.",
+ OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "d", "Duration of the effect in seconds.",
+ OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "color", "set color", OFFSET(color_rgba), AV_OPT_TYPE_COLOR, {.str = "black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "c", "set color", OFFSET(color_rgba), AV_OPT_TYPE_COLOR, {.str = "black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { NULL }
};
-static const AVClass fade_class = {
- .class_name = "fade",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(fade);
static const AVFilterPad avfilter_vf_fade_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_props,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
- .needs_writable = 1,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ .filter_frame = filter_frame,
+ .needs_writable = 1,
},
{ NULL }
};
@@ -210,13 +421,12 @@ static const AVFilterPad avfilter_vf_fade_outputs[] = {
AVFilter ff_vf_fade = {
.name = "fade",
- .description = NULL_IF_CONFIG_SMALL("Fade in/out input video"),
+ .description = NULL_IF_CONFIG_SMALL("Fade in/out input video."),
.init = init,
.priv_size = sizeof(FadeContext),
.priv_class = &fade_class,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_fade_inputs,
- .outputs = avfilter_vf_fade_outputs,
- .flags = AVFILTER_FLAG_SLICE_THREADS,
+ .inputs = avfilter_vf_fade_inputs,
+ .outputs = avfilter_vf_fade_outputs,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
};
diff --git a/libavfilter/vf_fftfilt.c b/libavfilter/vf_fftfilt.c
new file mode 100644
index 0000000000..307b41a733
--- /dev/null
+++ b/libavfilter/vf_fftfilt.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2015 Arwa Arif <arwaarif1994@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License,
+ * or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * FFT domain filtering.
+ */
+
+#include "libavfilter/internal.h"
+#include "libavutil/common.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavcodec/avfft.h"
+#include "libavutil/eval.h"
+
+#define MAX_PLANES 4
+
+typedef struct {
+ const AVClass *class;
+
+ RDFTContext *rdft;
+ int rdft_hbits[MAX_PLANES];
+ int rdft_vbits[MAX_PLANES];
+ size_t rdft_hlen[MAX_PLANES];
+ size_t rdft_vlen[MAX_PLANES];
+ FFTSample *rdft_hdata[MAX_PLANES];
+ FFTSample *rdft_vdata[MAX_PLANES];
+
+ int dc[MAX_PLANES];
+ char *weight_str[MAX_PLANES];
+ AVExpr *weight_expr[MAX_PLANES];
+ double *weight[MAX_PLANES];
+
+} FFTFILTContext;
+
+static const char *const var_names[] = { "X", "Y", "W", "H", NULL };
+enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_VARS_NB };
+
+enum { Y = 0, U, V };
+
+#define OFFSET(x) offsetof(FFTFILTContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption fftfilt_options[] = {
+ { "dc_Y", "adjust gain in Y plane", OFFSET(dc[Y]), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1000, FLAGS },
+ { "dc_U", "adjust gain in U plane", OFFSET(dc[U]), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1000, FLAGS },
+ { "dc_V", "adjust gain in V plane", OFFSET(dc[V]), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1000, FLAGS },
+ { "weight_Y", "set luminance expression in Y plane", OFFSET(weight_str[Y]), AV_OPT_TYPE_STRING, {.str = "1"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "weight_U", "set chrominance expression in U plane", OFFSET(weight_str[U]), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "weight_V", "set chrominance expression in V plane", OFFSET(weight_str[V]), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ {NULL},
+};
+
+AVFILTER_DEFINE_CLASS(fftfilt);
+
+static inline double lum(void *priv, double x, double y, int plane)
+{
+ FFTFILTContext *s = priv;
+ return s->rdft_vdata[plane][(int)x * s->rdft_vlen[plane] + (int)y];
+}
+
+static double weight_Y(void *priv, double x, double y) { return lum(priv, x, y, Y); }
+static double weight_U(void *priv, double x, double y) { return lum(priv, x, y, U); }
+static double weight_V(void *priv, double x, double y) { return lum(priv, x, y, V); }
+
+static void copy_rev (FFTSample *dest, int w, int w2)
+{
+ int i;
+
+ for (i = w; i < w + (w2-w)/2; i++)
+ dest[i] = dest[2*w - i - 1];
+
+ for (; i < w2; i++)
+ dest[i] = dest[w2 - i];
+}
+
+/*Horizontal pass - RDFT*/
+static void rdft_horizontal(FFTFILTContext *s, AVFrame *in, int w, int h, int plane)
+{
+ int i, j;
+ s->rdft = av_rdft_init(s->rdft_hbits[plane], DFT_R2C);
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++)
+ s->rdft_hdata[plane][i * s->rdft_hlen[plane] + j] = *(in->data[plane] + in->linesize[plane] * i + j);
+
+ copy_rev(s->rdft_hdata[plane] + i * s->rdft_hlen[plane], w, s->rdft_hlen[plane]);
+ }
+
+ for (i = 0; i < h; i++)
+ av_rdft_calc(s->rdft, s->rdft_hdata[plane] + i * s->rdft_hlen[plane]);
+
+ av_rdft_end(s->rdft);
+}
+
+/*Vertical pass - RDFT*/
+static void rdft_vertical(FFTFILTContext *s, int h, int plane)
+{
+ int i, j;
+ s->rdft = av_rdft_init(s->rdft_vbits[plane], DFT_R2C);
+
+ for (i = 0; i < s->rdft_hlen[plane]; i++) {
+ for (j = 0; j < h; j++)
+ s->rdft_vdata[plane][i * s->rdft_vlen[plane] + j] =
+ s->rdft_hdata[plane][j * s->rdft_hlen[plane] + i];
+ copy_rev(s->rdft_vdata[plane] + i * s->rdft_vlen[plane], h, s->rdft_vlen[plane]);
+ }
+
+ for (i = 0; i < s->rdft_hlen[plane]; i++)
+ av_rdft_calc(s->rdft, s->rdft_vdata[plane] + i * s->rdft_vlen[plane]);
+
+ av_rdft_end(s->rdft);
+}
+/*Vertical pass - IRDFT*/
+static void irdft_vertical(FFTFILTContext *s, int h, int plane)
+{
+ int i, j;
+ s->rdft = av_rdft_init(s->rdft_vbits[plane], IDFT_C2R);
+ for (i = 0; i < s->rdft_hlen[plane]; i++)
+ av_rdft_calc(s->rdft, s->rdft_vdata[plane] + i * s->rdft_vlen[plane]);
+
+ for (i = 0; i < s->rdft_hlen[plane]; i++)
+ for (j = 0; j < h; j++)
+ s->rdft_hdata[plane][j * s->rdft_hlen[plane] + i] =
+ s->rdft_vdata[plane][i * s->rdft_vlen[plane] + j];
+
+ av_rdft_end(s->rdft);
+}
+
+/*Horizontal pass - IRDFT*/
+static void irdft_horizontal(FFTFILTContext *s, AVFrame *out, int w, int h, int plane)
+{
+ int i, j;
+ s->rdft = av_rdft_init(s->rdft_hbits[plane], IDFT_C2R);
+ for (i = 0; i < h; i++)
+ av_rdft_calc(s->rdft, s->rdft_hdata[plane] + i * s->rdft_hlen[plane]);
+
+ for (i = 0; i < h; i++)
+ for (j = 0; j < w; j++)
+ *(out->data[plane] + out->linesize[plane] * i + j) = av_clip(s->rdft_hdata[plane][i
+ *s->rdft_hlen[plane] + j] * 4 /
+ (s->rdft_hlen[plane] *
+ s->rdft_vlen[plane]), 0, 255);
+
+ av_rdft_end(s->rdft);
+}
+
+static av_cold int initialize(AVFilterContext *ctx)
+{
+ FFTFILTContext *s = ctx->priv;
+ int ret = 0, plane;
+
+ if (!s->dc[U] && !s->dc[V]) {
+ s->dc[U] = s->dc[Y];
+ s->dc[V] = s->dc[Y];
+ } else {
+ if (!s->dc[U]) s->dc[U] = s->dc[V];
+ if (!s->dc[V]) s->dc[V] = s->dc[U];
+ }
+
+ if (!s->weight_str[U] && !s->weight_str[V]) {
+ s->weight_str[U] = av_strdup(s->weight_str[Y]);
+ s->weight_str[V] = av_strdup(s->weight_str[Y]);
+ } else {
+ if (!s->weight_str[U]) s->weight_str[U] = av_strdup(s->weight_str[V]);
+ if (!s->weight_str[V]) s->weight_str[V] = av_strdup(s->weight_str[U]);
+ }
+
+ for (plane = 0; plane < 3; plane++) {
+ static double (*p[])(void *, double, double) = { weight_Y, weight_U, weight_V };
+ const char *const func2_names[] = {"weight_Y", "weight_U", "weight_V", NULL };
+ double (*func2[])(void *, double, double) = { weight_Y, weight_U, weight_V, p[plane], NULL };
+
+ ret = av_expr_parse(&s->weight_expr[plane], s->weight_str[plane], var_names,
+ NULL, NULL, func2_names, func2, 0, ctx);
+ if (ret < 0)
+ break;
+ }
+ return ret;
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ FFTFILTContext *s = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc;
+ int rdft_hbits, rdft_vbits, i, j, plane;
+ double values[VAR_VARS_NB];
+
+ desc = av_pix_fmt_desc_get(inlink->format);
+ for (i = 0; i < desc->nb_components; i++) {
+ int w = inlink->w;
+ int h = inlink->h;
+
+ /* RDFT - Array initialization for Horizontal pass*/
+ for (rdft_hbits = 1; 1 << rdft_hbits < w*10/9; rdft_hbits++);
+ s->rdft_hbits[i] = rdft_hbits;
+ s->rdft_hlen[i] = 1 << rdft_hbits;
+ if (!(s->rdft_hdata[i] = av_malloc_array(h, s->rdft_hlen[i] * sizeof(FFTSample))))
+ return AVERROR(ENOMEM);
+
+ /* RDFT - Array initialization for Vertical pass*/
+ for (rdft_vbits = 1; 1 << rdft_vbits < h*10/9; rdft_vbits++);
+ s->rdft_vbits[i] = rdft_vbits;
+ s->rdft_vlen[i] = 1 << rdft_vbits;
+ if (!(s->rdft_vdata[i] = av_malloc_array(s->rdft_hlen[i], s->rdft_vlen[i] * sizeof(FFTSample))))
+ return AVERROR(ENOMEM);
+ }
+
+ /*Luminance value - Array initialization*/
+ values[VAR_W] = inlink->w;
+ values[VAR_H] = inlink->h;
+ for (plane = 0; plane < 3; plane++)
+ {
+ if(!(s->weight[plane] = av_malloc_array(s->rdft_hlen[plane], s->rdft_vlen[plane] * sizeof(double))))
+ return AVERROR(ENOMEM);
+ for (i = 0; i < s->rdft_hlen[plane]; i++)
+ {
+ values[VAR_X] = i;
+ for (j = 0; j < s->rdft_vlen[plane]; j++)
+ {
+ values[VAR_Y] = j;
+ s->weight[plane][i * s->rdft_vlen[plane] + j] =
+ av_expr_eval(s->weight_expr[plane], values, s);
+ }
+ }
+ }
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ const AVPixFmtDescriptor *desc;
+ FFTFILTContext *s = ctx->priv;
+ AVFrame *out;
+ int i, j, plane;
+
+ out = ff_get_video_buffer(outlink, inlink->w, inlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+
+ av_frame_copy_props(out, in);
+
+ desc = av_pix_fmt_desc_get(inlink->format);
+ for (plane = 0; plane < desc->nb_components; plane++) {
+ int w = inlink->w;
+ int h = inlink->h;
+
+ if (plane == 1 || plane == 2) {
+ w = AV_CEIL_RSHIFT(w, desc->log2_chroma_w);
+ h = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
+ }
+
+ rdft_horizontal(s, in, w, h, plane);
+ rdft_vertical(s, h, plane);
+
+ /*Change user defined parameters*/
+ for (i = 0; i < s->rdft_hlen[plane]; i++)
+ for (j = 0; j < s->rdft_vlen[plane]; j++)
+ s->rdft_vdata[plane][i * s->rdft_vlen[plane] + j] *=
+ s->weight[plane][i * s->rdft_vlen[plane] + j];
+
+ s->rdft_vdata[plane][0] += s->rdft_hlen[plane] * s->rdft_vlen[plane] * s->dc[plane];
+
+ irdft_vertical(s, h, plane);
+ irdft_horizontal(s, out, w, h, plane);
+ }
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ FFTFILTContext *s = ctx->priv;
+ int i;
+ for (i = 0; i < MAX_PLANES; i++) {
+ av_free(s->rdft_hdata[i]);
+ av_free(s->rdft_vdata[i]);
+ av_expr_free(s->weight_expr[i]);
+ av_free(s->weight[i]);
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pixel_fmts_fftfilt[] = {
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pixel_fmts_fftfilt);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static const AVFilterPad fftfilt_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad fftfilt_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_fftfilt = {
+ .name = "fftfilt",
+ .description = NULL_IF_CONFIG_SMALL("Apply arbitrary expressions to pixels in frequency domain."),
+ .priv_size = sizeof(FFTFILTContext),
+ .priv_class = &fftfilt_class,
+ .inputs = fftfilt_inputs,
+ .outputs = fftfilt_outputs,
+ .query_formats = query_formats,
+ .init = initialize,
+ .uninit = uninit,
+};
diff --git a/libavfilter/vf_field.c b/libavfilter/vf_field.c
new file mode 100644
index 0000000000..72cabdb929
--- /dev/null
+++ b/libavfilter/vf_field.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2003 Rich Felker
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * field filter, based on libmpcodecs/vf_field.c by Rich Felker
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "internal.h"
+
+enum FieldType { FIELD_TYPE_TOP = 0, FIELD_TYPE_BOTTOM };
+
+typedef struct {
+ const AVClass *class;
+ int type; ///< FieldType
+ int nb_planes; ///< number of planes of the current format
+} FieldContext;
+
+#define OFFSET(x) offsetof(FieldContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption field_options[] = {
+ {"type", "set field type (top or bottom)", OFFSET(type), AV_OPT_TYPE_INT, {.i64=FIELD_TYPE_TOP}, 0, 1, FLAGS, "field_type" },
+ {"top", "select top field", 0, AV_OPT_TYPE_CONST, {.i64=FIELD_TYPE_TOP}, INT_MIN, INT_MAX, FLAGS, "field_type"},
+ {"bottom", "select bottom field", 0, AV_OPT_TYPE_CONST, {.i64=FIELD_TYPE_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "field_type"},
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(field);
+
+static int config_props_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ FieldContext *field = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ field->nb_planes = av_pix_fmt_count_planes(outlink->format);
+
+ outlink->w = inlink->w;
+ outlink->h = (inlink->h + (field->type == FIELD_TYPE_TOP)) / 2;
+
+ av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d type:%s -> w:%d h:%d\n",
+ inlink->w, inlink->h, field->type == FIELD_TYPE_BOTTOM ? "bottom" : "top",
+ outlink->w, outlink->h);
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ FieldContext *field = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ int i;
+
+ inpicref->height = outlink->h;
+ inpicref->interlaced_frame = 0;
+
+ for (i = 0; i < field->nb_planes; i++) {
+ if (field->type == FIELD_TYPE_BOTTOM)
+ inpicref->data[i] = inpicref->data[i] + inpicref->linesize[i];
+ inpicref->linesize[i] = 2 * inpicref->linesize[i];
+ }
+ return ff_filter_frame(outlink, inpicref);
+}
+
+static const AVFilterPad field_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad field_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_field = {
+ .name = "field",
+ .description = NULL_IF_CONFIG_SMALL("Extract a field from the input video."),
+ .priv_size = sizeof(FieldContext),
+ .inputs = field_inputs,
+ .outputs = field_outputs,
+ .priv_class = &field_class,
+};
diff --git a/libavfilter/vf_fieldhint.c b/libavfilter/vf_fieldhint.c
new file mode 100644
index 0000000000..3cfeb20a38
--- /dev/null
+++ b/libavfilter/vf_fieldhint.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2016 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct FieldHintContext {
+ const AVClass *class;
+
+ char *hint_file_str;
+ FILE *hint;
+ int mode;
+
+ AVFrame *frame[3];
+
+ int64_t line;
+ int nb_planes;
+ int eof;
+ int planewidth[4];
+ int planeheight[4];
+} FieldHintContext;
+
+#define OFFSET(x) offsetof(FieldHintContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption fieldhint_options[] = {
+ { "hint", "set hint file", OFFSET(hint_file_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
+ { "mode", "set hint mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "mode" },
+ { "absolute", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "mode" },
+ { "relative", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "mode" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(fieldhint);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ FieldHintContext *s = ctx->priv;
+ int ret;
+
+ if (!s->hint_file_str) {
+ av_log(ctx, AV_LOG_ERROR, "Hint file must be set.\n");
+ return AVERROR(EINVAL);
+ }
+ s->hint = fopen(s->hint_file_str, "r");
+ if (!s->hint) {
+ ret = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "%s: %s\n", s->hint_file_str, av_err2str(ret));
+ return ret;
+ }
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *pix_fmts = NULL;
+ int fmt, ret;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
+ desc->flags & AV_PIX_FMT_FLAG_PAL ||
+ desc->flags & AV_PIX_FMT_FLAG_BITSTREAM) &&
+ (ret = ff_add_format(&pix_fmts, fmt)) < 0)
+ return ret;
+ }
+
+ return ff_set_common_formats(ctx, pix_fmts);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ FieldHintContext *s = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+
+ if ((ret = av_image_fill_linesizes(s->planewidth, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ FieldHintContext *s = ctx->priv;
+ AVFrame *out, *top, *bottom;
+ char buf[1024] = { 0 };
+ int64_t tf, bf;
+ char hint = '=';
+ int p;
+
+ av_frame_free(&s->frame[0]);
+ s->frame[0] = s->frame[1];
+ s->frame[1] = s->frame[2];
+ s->frame[2] = in;
+ if (!s->frame[1])
+ return 0;
+ else if (!s->frame[0]) {
+ s->frame[0] = av_frame_clone(s->frame[1]);
+ if (!s->frame[0])
+ return AVERROR(ENOMEM);
+ }
+
+ while (1) {
+ if (fgets(buf, sizeof(buf)-1, s->hint)) {
+ s->line++;
+ if (buf[0] == '#' || buf[0] == ';') {
+ continue;
+ } else if (sscanf(buf, "%"PRId64",%"PRId64" %c", &tf, &bf, &hint) == 3) {
+ ;
+ } else if (sscanf(buf, "%"PRId64",%"PRId64"", &tf, &bf) == 2) {
+ ;
+ } else {
+ av_log(ctx, AV_LOG_ERROR, "Invalid entry at line %"PRId64".\n", s->line);
+ return AVERROR_INVALIDDATA;
+ }
+ switch (s->mode) {
+ case 0:
+ if (tf > outlink->frame_count_in + 1 || tf < FFMAX(0, outlink->frame_count_in - 1) ||
+ bf > outlink->frame_count_in + 1 || bf < FFMAX(0, outlink->frame_count_in - 1)) {
+ av_log(ctx, AV_LOG_ERROR, "Out of range frames %"PRId64" and/or %"PRId64" on line %"PRId64" for %"PRId64". input frame.\n", tf, bf, s->line, inlink->frame_count_out);
+ return AVERROR_INVALIDDATA;
+ }
+ break;
+ case 1:
+ if (tf > 1 || tf < -1 ||
+ bf > 1 || bf < -1) {
+ av_log(ctx, AV_LOG_ERROR, "Out of range %"PRId64" and/or %"PRId64" on line %"PRId64" for %"PRId64". input frame.\n", tf, bf, s->line, inlink->frame_count_out);
+ return AVERROR_INVALIDDATA;
+ }
+ };
+ break;
+ } else {
+ av_log(ctx, AV_LOG_ERROR, "Missing entry for %"PRId64". input frame.\n", inlink->frame_count_out);
+ return AVERROR_INVALIDDATA;
+ }
+ }
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, s->frame[1]);
+
+ switch (s->mode) {
+ case 0:
+ top = s->frame[tf - outlink->frame_count_in + 1];
+ bottom = s->frame[bf - outlink->frame_count_in + 1];
+ break;
+ case 1:
+ top = s->frame[1 + tf];
+ bottom = s->frame[1 + bf];
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ switch (hint) {
+ case '+':
+ out->interlaced_frame = 1;
+ break;
+ case '-':
+ out->interlaced_frame = 0;
+ break;
+ case '=':
+ break;
+ default:
+ av_log(ctx, AV_LOG_ERROR, "Invalid hint: %c.\n", hint);
+ av_frame_free(&out);
+ return AVERROR(EINVAL);
+ }
+
+ for (p = 0; p < s->nb_planes; p++) {
+ av_image_copy_plane(out->data[p],
+ out->linesize[p] * 2,
+ top->data[p],
+ top->linesize[p] * 2,
+ s->planewidth[p],
+ (s->planeheight[p] + 1) / 2);
+ av_image_copy_plane(out->data[p] + out->linesize[p],
+ out->linesize[p] * 2,
+ bottom->data[p] + bottom->linesize[p],
+ bottom->linesize[p] * 2,
+ s->planewidth[p],
+ (s->planeheight[p] + 1) / 2);
+ }
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ FieldHintContext *s = ctx->priv;
+ int ret;
+
+ if (s->eof)
+ return AVERROR_EOF;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+ if (ret == AVERROR_EOF && s->frame[2]) {
+ AVFrame *next = av_frame_clone(s->frame[2]);
+ if (!next)
+ return AVERROR(ENOMEM);
+ ret = filter_frame(ctx->inputs[0], next);
+ s->eof = 1;
+ }
+
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ FieldHintContext *s = ctx->priv;
+
+ if (s->hint)
+ fclose(s->hint);
+ s->hint = NULL;
+
+ av_frame_free(&s->frame[0]);
+ av_frame_free(&s->frame[1]);
+ av_frame_free(&s->frame[2]);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_fieldhint = {
+ .name = "fieldhint",
+ .description = NULL_IF_CONFIG_SMALL("Field matching using hints."),
+ .priv_size = sizeof(FieldHintContext),
+ .priv_class = &fieldhint_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = inputs,
+ .outputs = outputs,
+};
diff --git a/libavfilter/vf_fieldmatch.c b/libavfilter/vf_fieldmatch.c
new file mode 100644
index 0000000000..54a2c7aa60
--- /dev/null
+++ b/libavfilter/vf_fieldmatch.c
@@ -0,0 +1,988 @@
+/*
+ * Copyright (c) 2012 Fredrik Mellbin
+ * Copyright (c) 2013 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Fieldmatching filter, ported from VFM filter (VapourSynth) by Clément.
+ * Fredrik Mellbin is the author of the VIVTC/VFM filter, which is itself a
+ * light clone of the TIVTC/TFM (AviSynth) filter written by Kevin Stone
+ * (tritical), the original author.
+ *
+ * @see http://bengal.missouri.edu/~kes25c/
+ * @see http://www.vapoursynth.com/about/
+ */
+
+#include <inttypes.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/timestamp.h"
+#include "avfilter.h"
+#include "internal.h"
+
+#define INPUT_MAIN 0
+#define INPUT_CLEANSRC 1
+
+enum fieldmatch_parity {
+ FM_PARITY_AUTO = -1,
+ FM_PARITY_BOTTOM = 0,
+ FM_PARITY_TOP = 1,
+};
+
+enum matching_mode {
+ MODE_PC,
+ MODE_PC_N,
+ MODE_PC_U,
+ MODE_PC_N_UB,
+ MODE_PCN,
+ MODE_PCN_UB,
+ NB_MODE
+};
+
+enum comb_matching_mode {
+ COMBMATCH_NONE,
+ COMBMATCH_SC,
+ COMBMATCH_FULL,
+ NB_COMBMATCH
+};
+
+enum comb_dbg {
+ COMBDBG_NONE,
+ COMBDBG_PCN,
+ COMBDBG_PCNUB,
+ NB_COMBDBG
+};
+
+typedef struct {
+ const AVClass *class;
+
+ AVFrame *prv, *src, *nxt; ///< main sliding window of 3 frames
+ AVFrame *prv2, *src2, *nxt2; ///< sliding window of the optional second stream
+ int got_frame[2]; ///< frame request flag for each input stream
+ int hsub, vsub; ///< chroma subsampling values
+ uint32_t eof; ///< bitmask for end of stream
+ int64_t lastscdiff;
+ int64_t lastn;
+
+ /* options */
+ int order;
+ int ppsrc;
+ int mode; ///< matching_mode
+ int field;
+ int mchroma;
+ int y0, y1;
+ int64_t scthresh;
+ double scthresh_flt;
+ int combmatch; ///< comb_matching_mode
+ int combdbg;
+ int cthresh;
+ int chroma;
+ int blockx, blocky;
+ int combpel;
+
+ /* misc buffers */
+ uint8_t *map_data[4];
+ int map_linesize[4];
+ uint8_t *cmask_data[4];
+ int cmask_linesize[4];
+ int *c_array;
+ int tpitchy, tpitchuv;
+ uint8_t *tbuffer;
+} FieldMatchContext;
+
+#define OFFSET(x) offsetof(FieldMatchContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption fieldmatch_options[] = {
+ { "order", "specify the assumed field order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=FM_PARITY_AUTO}, -1, 1, FLAGS, "order" },
+ { "auto", "auto detect parity", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_AUTO}, INT_MIN, INT_MAX, FLAGS, "order" },
+ { "bff", "assume bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "order" },
+ { "tff", "assume top field first", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_TOP}, INT_MIN, INT_MAX, FLAGS, "order" },
+ { "mode", "set the matching mode or strategy to use", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_PC_N}, MODE_PC, NB_MODE-1, FLAGS, "mode" },
+ { "pc", "2-way match (p/c)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "pc_n", "2-way match + 3rd match on combed (p/c + u)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_N}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "pc_u", "2-way match + 3rd match (same order) on combed (p/c + u)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_U}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "pc_n_ub", "2-way match + 3rd match on combed + 4th/5th matches if still combed (p/c + u + u/b)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_N_UB}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "pcn", "3-way match (p/c/n)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PCN}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "pcn_ub", "3-way match + 4th/5th matches on combed (p/c/n + u/b)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PCN_UB}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "ppsrc", "mark main input as a pre-processed input and activate clean source input stream", OFFSET(ppsrc), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { "field", "set the field to match from", OFFSET(field), AV_OPT_TYPE_INT, {.i64=FM_PARITY_AUTO}, -1, 1, FLAGS, "field" },
+ { "auto", "automatic (same value as 'order')", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_AUTO}, INT_MIN, INT_MAX, FLAGS, "field" },
+ { "bottom", "bottom field", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "field" },
+ { "top", "top field", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_TOP}, INT_MIN, INT_MAX, FLAGS, "field" },
+ { "mchroma", "set whether or not chroma is included during the match comparisons", OFFSET(mchroma), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
+ { "y0", "define an exclusion band which excludes the lines between y0 and y1 from the field matching decision", OFFSET(y0), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
+ { "y1", "define an exclusion band which excludes the lines between y0 and y1 from the field matching decision", OFFSET(y1), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
+ { "scthresh", "set scene change detection threshold", OFFSET(scthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl=12}, 0, 100, FLAGS },
+ { "combmatch", "set combmatching mode", OFFSET(combmatch), AV_OPT_TYPE_INT, {.i64=COMBMATCH_SC}, COMBMATCH_NONE, NB_COMBMATCH-1, FLAGS, "combmatching" },
+ { "none", "disable combmatching", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_NONE}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
+ { "sc", "enable combmatching only on scene change", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_SC}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
+ { "full", "enable combmatching all the time", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_FULL}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
+ { "combdbg", "enable comb debug", OFFSET(combdbg), AV_OPT_TYPE_INT, {.i64=COMBDBG_NONE}, COMBDBG_NONE, NB_COMBDBG-1, FLAGS, "dbglvl" },
+ { "none", "no forced calculation", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_NONE}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
+ { "pcn", "calculate p/c/n", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_PCN}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
+ { "pcnub", "calculate p/c/n/u/b", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_PCNUB}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
+ { "cthresh", "set the area combing threshold used for combed frame detection", OFFSET(cthresh), AV_OPT_TYPE_INT, {.i64= 9}, -1, 0xff, FLAGS },
+ { "chroma", "set whether or not chroma is considered in the combed frame decision", OFFSET(chroma), AV_OPT_TYPE_BOOL,{.i64= 0}, 0, 1, FLAGS },
+ { "blockx", "set the x-axis size of the window used during combed frame detection", OFFSET(blockx), AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9, FLAGS },
+ { "blocky", "set the y-axis size of the window used during combed frame detection", OFFSET(blocky), AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9, FLAGS },
+ { "combpel", "set the number of combed pixels inside any of the blocky by blockx size blocks on the frame for the frame to be detected as combed", OFFSET(combpel), AV_OPT_TYPE_INT, {.i64=80}, 0, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(fieldmatch);
+
+static int get_width(const FieldMatchContext *fm, const AVFrame *f, int plane)
+{
+ return plane ? AV_CEIL_RSHIFT(f->width, fm->hsub) : f->width;
+}
+
+static int get_height(const FieldMatchContext *fm, const AVFrame *f, int plane)
+{
+ return plane ? AV_CEIL_RSHIFT(f->height, fm->vsub) : f->height;
+}
+
+static int64_t luma_abs_diff(const AVFrame *f1, const AVFrame *f2)
+{
+ int x, y;
+ const uint8_t *srcp1 = f1->data[0];
+ const uint8_t *srcp2 = f2->data[0];
+ const int src1_linesize = f1->linesize[0];
+ const int src2_linesize = f2->linesize[0];
+ const int width = f1->width;
+ const int height = f1->height;
+ int64_t acc = 0;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++)
+ acc += abs(srcp1[x] - srcp2[x]);
+ srcp1 += src1_linesize;
+ srcp2 += src2_linesize;
+ }
+ return acc;
+}
+
+static void fill_buf(uint8_t *data, int w, int h, int linesize, uint8_t v)
+{
+ int y;
+
+ for (y = 0; y < h; y++) {
+ memset(data, v, w);
+ data += linesize;
+ }
+}
+
+static int calc_combed_score(const FieldMatchContext *fm, const AVFrame *src)
+{
+ int x, y, plane, max_v = 0;
+ const int cthresh = fm->cthresh;
+ const int cthresh6 = cthresh * 6;
+
+ for (plane = 0; plane < (fm->chroma ? 3 : 1); plane++) {
+ const uint8_t *srcp = src->data[plane];
+ const int src_linesize = src->linesize[plane];
+ const int width = get_width (fm, src, plane);
+ const int height = get_height(fm, src, plane);
+ uint8_t *cmkp = fm->cmask_data[plane];
+ const int cmk_linesize = fm->cmask_linesize[plane];
+
+ if (cthresh < 0) {
+ fill_buf(cmkp, width, height, cmk_linesize, 0xff);
+ continue;
+ }
+ fill_buf(cmkp, width, height, cmk_linesize, 0);
+
+ /* [1 -3 4 -3 1] vertical filter */
+#define FILTER(xm2, xm1, xp1, xp2) \
+ abs( 4 * srcp[x] \
+ -3 * (srcp[x + (xm1)*src_linesize] + srcp[x + (xp1)*src_linesize]) \
+ + (srcp[x + (xm2)*src_linesize] + srcp[x + (xp2)*src_linesize])) > cthresh6
+
+ /* first line */
+ for (x = 0; x < width; x++) {
+ const int s1 = abs(srcp[x] - srcp[x + src_linesize]);
+ if (s1 > cthresh && FILTER(2, 1, 1, 2))
+ cmkp[x] = 0xff;
+ }
+ srcp += src_linesize;
+ cmkp += cmk_linesize;
+
+ /* second line */
+ for (x = 0; x < width; x++) {
+ const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
+ const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
+ if (s1 > cthresh && s2 > cthresh && FILTER(2, -1, 1, 2))
+ cmkp[x] = 0xff;
+ }
+ srcp += src_linesize;
+ cmkp += cmk_linesize;
+
+ /* all lines minus first two and last two */
+ for (y = 2; y < height-2; y++) {
+ for (x = 0; x < width; x++) {
+ const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
+ const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
+ if (s1 > cthresh && s2 > cthresh && FILTER(-2, -1, 1, 2))
+ cmkp[x] = 0xff;
+ }
+ srcp += src_linesize;
+ cmkp += cmk_linesize;
+ }
+
+ /* before-last line */
+ for (x = 0; x < width; x++) {
+ const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
+ const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
+ if (s1 > cthresh && s2 > cthresh && FILTER(-2, -1, 1, -2))
+ cmkp[x] = 0xff;
+ }
+ srcp += src_linesize;
+ cmkp += cmk_linesize;
+
+ /* last line */
+ for (x = 0; x < width; x++) {
+ const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
+ if (s1 > cthresh && FILTER(-2, -1, -1, -2))
+ cmkp[x] = 0xff;
+ }
+ }
+
+ if (fm->chroma) {
+ uint8_t *cmkp = fm->cmask_data[0];
+ uint8_t *cmkpU = fm->cmask_data[1];
+ uint8_t *cmkpV = fm->cmask_data[2];
+ const int width = AV_CEIL_RSHIFT(src->width, fm->hsub);
+ const int height = AV_CEIL_RSHIFT(src->height, fm->vsub);
+ const int cmk_linesize = fm->cmask_linesize[0] << 1;
+ const int cmk_linesizeUV = fm->cmask_linesize[2];
+ uint8_t *cmkpp = cmkp - (cmk_linesize>>1);
+ uint8_t *cmkpn = cmkp + (cmk_linesize>>1);
+ uint8_t *cmkpnn = cmkp + cmk_linesize;
+ for (y = 1; y < height - 1; y++) {
+ cmkpp += cmk_linesize;
+ cmkp += cmk_linesize;
+ cmkpn += cmk_linesize;
+ cmkpnn += cmk_linesize;
+ cmkpV += cmk_linesizeUV;
+ cmkpU += cmk_linesizeUV;
+ for (x = 1; x < width - 1; x++) {
+#define HAS_FF_AROUND(p, lz) (p[(x)-1 - (lz)] == 0xff || p[(x) - (lz)] == 0xff || p[(x)+1 - (lz)] == 0xff || \
+ p[(x)-1 ] == 0xff || p[(x)+1 ] == 0xff || \
+ p[(x)-1 + (lz)] == 0xff || p[(x) + (lz)] == 0xff || p[(x)+1 + (lz)] == 0xff)
+ if ((cmkpV[x] == 0xff && HAS_FF_AROUND(cmkpV, cmk_linesizeUV)) ||
+ (cmkpU[x] == 0xff && HAS_FF_AROUND(cmkpU, cmk_linesizeUV))) {
+ ((uint16_t*)cmkp)[x] = 0xffff;
+ ((uint16_t*)cmkpn)[x] = 0xffff;
+ if (y&1) ((uint16_t*)cmkpp)[x] = 0xffff;
+ else ((uint16_t*)cmkpnn)[x] = 0xffff;
+ }
+ }
+ }
+ }
+
+ {
+ const int blockx = fm->blockx;
+ const int blocky = fm->blocky;
+ const int xhalf = blockx/2;
+ const int yhalf = blocky/2;
+ const int cmk_linesize = fm->cmask_linesize[0];
+ const uint8_t *cmkp = fm->cmask_data[0] + cmk_linesize;
+ const int width = src->width;
+ const int height = src->height;
+ const int xblocks = ((width+xhalf)/blockx) + 1;
+ const int xblocks4 = xblocks<<2;
+ const int yblocks = ((height+yhalf)/blocky) + 1;
+ int *c_array = fm->c_array;
+ const int arraysize = (xblocks*yblocks)<<2;
+ int heighta = (height/(blocky/2))*(blocky/2);
+ const int widtha = (width /(blockx/2))*(blockx/2);
+ if (heighta == height)
+ heighta = height - yhalf;
+ memset(c_array, 0, arraysize * sizeof(*c_array));
+
+#define C_ARRAY_ADD(v) do { \
+ const int box1 = (x / blockx) * 4; \
+ const int box2 = ((x + xhalf) / blockx) * 4; \
+ c_array[temp1 + box1 ] += v; \
+ c_array[temp1 + box2 + 1] += v; \
+ c_array[temp2 + box1 + 2] += v; \
+ c_array[temp2 + box2 + 3] += v; \
+} while (0)
+
+#define VERTICAL_HALF(y_start, y_end) do { \
+ for (y = y_start; y < y_end; y++) { \
+ const int temp1 = (y / blocky) * xblocks4; \
+ const int temp2 = ((y + yhalf) / blocky) * xblocks4; \
+ for (x = 0; x < width; x++) \
+ if (cmkp[x - cmk_linesize] == 0xff && \
+ cmkp[x ] == 0xff && \
+ cmkp[x + cmk_linesize] == 0xff) \
+ C_ARRAY_ADD(1); \
+ cmkp += cmk_linesize; \
+ } \
+} while (0)
+
+ VERTICAL_HALF(1, yhalf);
+
+ for (y = yhalf; y < heighta; y += yhalf) {
+ const int temp1 = (y / blocky) * xblocks4;
+ const int temp2 = ((y + yhalf) / blocky) * xblocks4;
+
+ for (x = 0; x < widtha; x += xhalf) {
+ const uint8_t *cmkp_tmp = cmkp + x;
+ int u, v, sum = 0;
+ for (u = 0; u < yhalf; u++) {
+ for (v = 0; v < xhalf; v++)
+ if (cmkp_tmp[v - cmk_linesize] == 0xff &&
+ cmkp_tmp[v ] == 0xff &&
+ cmkp_tmp[v + cmk_linesize] == 0xff)
+ sum++;
+ cmkp_tmp += cmk_linesize;
+ }
+ if (sum)
+ C_ARRAY_ADD(sum);
+ }
+
+ for (x = widtha; x < width; x++) {
+ const uint8_t *cmkp_tmp = cmkp + x;
+ int u, sum = 0;
+ for (u = 0; u < yhalf; u++) {
+ if (cmkp_tmp[-cmk_linesize] == 0xff &&
+ cmkp_tmp[ 0] == 0xff &&
+ cmkp_tmp[ cmk_linesize] == 0xff)
+ sum++;
+ cmkp_tmp += cmk_linesize;
+ }
+ if (sum)
+ C_ARRAY_ADD(sum);
+ }
+
+ cmkp += cmk_linesize * yhalf;
+ }
+
+ VERTICAL_HALF(heighta, height - 1);
+
+ for (x = 0; x < arraysize; x++)
+ if (c_array[x] > max_v)
+ max_v = c_array[x];
+ }
+ return max_v;
+}
+
+// the secret is that tbuffer is an interlaced, offset subset of all the lines
+static void build_abs_diff_mask(const uint8_t *prvp, int prv_linesize,
+ const uint8_t *nxtp, int nxt_linesize,
+ uint8_t *tbuffer, int tbuf_linesize,
+ int width, int height)
+{
+ int y, x;
+
+ prvp -= prv_linesize;
+ nxtp -= nxt_linesize;
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++)
+ tbuffer[x] = FFABS(prvp[x] - nxtp[x]);
+ prvp += prv_linesize;
+ nxtp += nxt_linesize;
+ tbuffer += tbuf_linesize;
+ }
+}
+
+/**
+ * Build a map over which pixels differ a lot/a little
+ */
+static void build_diff_map(FieldMatchContext *fm,
+ const uint8_t *prvp, int prv_linesize,
+ const uint8_t *nxtp, int nxt_linesize,
+ uint8_t *dstp, int dst_linesize, int height,
+ int width, int plane)
+{
+ int x, y, u, diff, count;
+ int tpitch = plane ? fm->tpitchuv : fm->tpitchy;
+ const uint8_t *dp = fm->tbuffer + tpitch;
+
+ build_abs_diff_mask(prvp, prv_linesize, nxtp, nxt_linesize,
+ fm->tbuffer, tpitch, width, height>>1);
+
+ for (y = 2; y < height - 2; y += 2) {
+ for (x = 1; x < width - 1; x++) {
+ diff = dp[x];
+ if (diff > 3) {
+ for (count = 0, u = x-1; u < x+2 && count < 2; u++) {
+ count += dp[u-tpitch] > 3;
+ count += dp[u ] > 3;
+ count += dp[u+tpitch] > 3;
+ }
+ if (count > 1) {
+ dstp[x] = 1;
+ if (diff > 19) {
+ int upper = 0, lower = 0;
+ for (count = 0, u = x-1; u < x+2 && count < 6; u++) {
+ if (dp[u-tpitch] > 19) { count++; upper = 1; }
+ if (dp[u ] > 19) count++;
+ if (dp[u+tpitch] > 19) { count++; lower = 1; }
+ }
+ if (count > 3) {
+ if (upper && lower) {
+ dstp[x] |= 1<<1;
+ } else {
+ int upper2 = 0, lower2 = 0;
+ for (u = FFMAX(x-4,0); u < FFMIN(x+5,width); u++) {
+ if (y != 2 && dp[u-2*tpitch] > 19) upper2 = 1;
+ if ( dp[u- tpitch] > 19) upper = 1;
+ if ( dp[u+ tpitch] > 19) lower = 1;
+ if (y != height-4 && dp[u+2*tpitch] > 19) lower2 = 1;
+ }
+ if ((upper && (lower || upper2)) ||
+ (lower && (upper || lower2)))
+ dstp[x] |= 1<<1;
+ else if (count > 5)
+ dstp[x] |= 1<<2;
+ }
+ }
+ }
+ }
+ }
+ }
+ dp += tpitch;
+ dstp += dst_linesize;
+ }
+}
+
+enum { mP, mC, mN, mB, mU };
+
+static int get_field_base(int match, int field)
+{
+ return match < 3 ? 2 - field : 1 + field;
+}
+
+static AVFrame *select_frame(FieldMatchContext *fm, int match)
+{
+ if (match == mP || match == mB) return fm->prv;
+ else if (match == mN || match == mU) return fm->nxt;
+ else /* match == mC */ return fm->src;
+}
+
+static int compare_fields(FieldMatchContext *fm, int match1, int match2, int field)
+{
+ int plane, ret;
+ uint64_t accumPc = 0, accumPm = 0, accumPml = 0;
+ uint64_t accumNc = 0, accumNm = 0, accumNml = 0;
+ int norm1, norm2, mtn1, mtn2;
+ float c1, c2, mr;
+ const AVFrame *src = fm->src;
+
+ for (plane = 0; plane < (fm->mchroma ? 3 : 1); plane++) {
+ int x, y, temp1, temp2, fbase;
+ const AVFrame *prev, *next;
+ uint8_t *mapp = fm->map_data[plane];
+ int map_linesize = fm->map_linesize[plane];
+ const uint8_t *srcp = src->data[plane];
+ const int src_linesize = src->linesize[plane];
+ const int srcf_linesize = src_linesize << 1;
+ int prv_linesize, nxt_linesize;
+ int prvf_linesize, nxtf_linesize;
+ const int width = get_width (fm, src, plane);
+ const int height = get_height(fm, src, plane);
+ const int y0a = fm->y0 >> (plane != 0);
+ const int y1a = fm->y1 >> (plane != 0);
+ const int startx = (plane == 0 ? 8 : 4);
+ const int stopx = width - startx;
+ const uint8_t *srcpf, *srcf, *srcnf;
+ const uint8_t *prvpf, *prvnf, *nxtpf, *nxtnf;
+
+ fill_buf(mapp, width, height, map_linesize, 0);
+
+ /* match1 */
+ fbase = get_field_base(match1, field);
+ srcf = srcp + (fbase + 1) * src_linesize;
+ srcpf = srcf - srcf_linesize;
+ srcnf = srcf + srcf_linesize;
+ mapp = mapp + fbase * map_linesize;
+ prev = select_frame(fm, match1);
+ prv_linesize = prev->linesize[plane];
+ prvf_linesize = prv_linesize << 1;
+ prvpf = prev->data[plane] + fbase * prv_linesize; // previous frame, previous field
+ prvnf = prvpf + prvf_linesize; // previous frame, next field
+
+ /* match2 */
+ fbase = get_field_base(match2, field);
+ next = select_frame(fm, match2);
+ nxt_linesize = next->linesize[plane];
+ nxtf_linesize = nxt_linesize << 1;
+ nxtpf = next->data[plane] + fbase * nxt_linesize; // next frame, previous field
+ nxtnf = nxtpf + nxtf_linesize; // next frame, next field
+
+ map_linesize <<= 1;
+ if ((match1 >= 3 && field == 1) || (match1 < 3 && field != 1))
+ build_diff_map(fm, prvpf, prvf_linesize, nxtpf, nxtf_linesize,
+ mapp, map_linesize, height, width, plane);
+ else
+ build_diff_map(fm, prvnf, prvf_linesize, nxtnf, nxtf_linesize,
+ mapp + map_linesize, map_linesize, height, width, plane);
+
+ for (y = 2; y < height - 2; y += 2) {
+ if (y0a == y1a || y < y0a || y > y1a) {
+ for (x = startx; x < stopx; x++) {
+ if (mapp[x] > 0 || mapp[x + map_linesize] > 0) {
+ temp1 = srcpf[x] + (srcf[x] << 2) + srcnf[x]; // [1 4 1]
+
+ temp2 = abs(3 * (prvpf[x] + prvnf[x]) - temp1);
+ if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
+ accumPc += temp2;
+ if (temp2 > 42) {
+ if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
+ accumPm += temp2;
+ if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
+ accumPml += temp2;
+ }
+
+ temp2 = abs(3 * (nxtpf[x] + nxtnf[x]) - temp1);
+ if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
+ accumNc += temp2;
+ if (temp2 > 42) {
+ if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
+ accumNm += temp2;
+ if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
+ accumNml += temp2;
+ }
+ }
+ }
+ }
+ prvpf += prvf_linesize;
+ prvnf += prvf_linesize;
+ srcpf += srcf_linesize;
+ srcf += srcf_linesize;
+ srcnf += srcf_linesize;
+ nxtpf += nxtf_linesize;
+ nxtnf += nxtf_linesize;
+ mapp += map_linesize;
+ }
+ }
+
+ if (accumPm < 500 && accumNm < 500 && (accumPml >= 500 || accumNml >= 500) &&
+ FFMAX(accumPml,accumNml) > 3*FFMIN(accumPml,accumNml)) {
+ accumPm = accumPml;
+ accumNm = accumNml;
+ }
+
+ norm1 = (int)((accumPc / 6.0f) + 0.5f);
+ norm2 = (int)((accumNc / 6.0f) + 0.5f);
+ mtn1 = (int)((accumPm / 6.0f) + 0.5f);
+ mtn2 = (int)((accumNm / 6.0f) + 0.5f);
+ c1 = ((float)FFMAX(norm1,norm2)) / ((float)FFMAX(FFMIN(norm1,norm2),1));
+ c2 = ((float)FFMAX(mtn1, mtn2)) / ((float)FFMAX(FFMIN(mtn1, mtn2), 1));
+ mr = ((float)FFMAX(mtn1, mtn2)) / ((float)FFMAX(FFMAX(norm1,norm2),1));
+ if (((mtn1 >= 500 || mtn2 >= 500) && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1)) ||
+ ((mtn1 >= 1000 || mtn2 >= 1000) && (mtn1*3 < mtn2*2 || mtn2*3 < mtn1*2)) ||
+ ((mtn1 >= 2000 || mtn2 >= 2000) && (mtn1*5 < mtn2*4 || mtn2*5 < mtn1*4)) ||
+ ((mtn1 >= 4000 || mtn2 >= 4000) && c2 > c1))
+ ret = mtn1 > mtn2 ? match2 : match1;
+ else if (mr > 0.005 && FFMAX(mtn1, mtn2) > 150 && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1))
+ ret = mtn1 > mtn2 ? match2 : match1;
+ else
+ ret = norm1 > norm2 ? match2 : match1;
+ return ret;
+}
+
+static void copy_fields(const FieldMatchContext *fm, AVFrame *dst,
+ const AVFrame *src, int field)
+{
+ int plane;
+ for (plane = 0; plane < 4 && src->data[plane] && src->linesize[plane]; plane++) {
+ const int plane_h = get_height(fm, src, plane);
+ const int nb_copy_fields = (plane_h >> 1) + (field ? 0 : (plane_h & 1));
+ av_image_copy_plane(dst->data[plane] + field*dst->linesize[plane], dst->linesize[plane] << 1,
+ src->data[plane] + field*src->linesize[plane], src->linesize[plane] << 1,
+ get_width(fm, src, plane), nb_copy_fields);
+ }
+}
+
+static AVFrame *create_weave_frame(AVFilterContext *ctx, int match, int field,
+ const AVFrame *prv, AVFrame *src, const AVFrame *nxt)
+{
+ AVFrame *dst;
+ FieldMatchContext *fm = ctx->priv;
+
+ if (match == mC) {
+ dst = av_frame_clone(src);
+ } else {
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!dst)
+ return NULL;
+ av_frame_copy_props(dst, src);
+
+ switch (match) {
+ case mP: copy_fields(fm, dst, src, 1-field); copy_fields(fm, dst, prv, field); break;
+ case mN: copy_fields(fm, dst, src, 1-field); copy_fields(fm, dst, nxt, field); break;
+ case mB: copy_fields(fm, dst, src, field); copy_fields(fm, dst, prv, 1-field); break;
+ case mU: copy_fields(fm, dst, src, field); copy_fields(fm, dst, nxt, 1-field); break;
+ default: av_assert0(0);
+ }
+ }
+ return dst;
+}
+
+static int checkmm(AVFilterContext *ctx, int *combs, int m1, int m2,
+ AVFrame **gen_frames, int field)
+{
+ const FieldMatchContext *fm = ctx->priv;
+
+#define LOAD_COMB(mid) do { \
+ if (combs[mid] < 0) { \
+ if (!gen_frames[mid]) \
+ gen_frames[mid] = create_weave_frame(ctx, mid, field, \
+ fm->prv, fm->src, fm->nxt); \
+ combs[mid] = calc_combed_score(fm, gen_frames[mid]); \
+ } \
+} while (0)
+
+ LOAD_COMB(m1);
+ LOAD_COMB(m2);
+
+ if ((combs[m2] * 3 < combs[m1] || (combs[m2] * 2 < combs[m1] && combs[m1] > fm->combpel)) &&
+ abs(combs[m2] - combs[m1]) >= 30 && combs[m2] < fm->combpel)
+ return m2;
+ else
+ return m1;
+}
+
+static const int fxo0m[] = { mP, mC, mN, mB, mU };
+static const int fxo1m[] = { mN, mC, mP, mU, mB };
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ FieldMatchContext *fm = ctx->priv;
+ int combs[] = { -1, -1, -1, -1, -1 };
+ int order, field, i, match, sc = 0;
+ const int *fxo;
+ AVFrame *gen_frames[] = { NULL, NULL, NULL, NULL, NULL };
+ AVFrame *dst;
+
+ /* update frames queue(s) */
+#define SLIDING_FRAME_WINDOW(prv, src, nxt) do { \
+ if (prv != src) /* 2nd loop exception (1st has prv==src and we don't want to loose src) */ \
+ av_frame_free(&prv); \
+ prv = src; \
+ src = nxt; \
+ if (in) \
+ nxt = in; \
+ if (!prv) \
+ prv = src; \
+ if (!prv) /* received only one frame at that point */ \
+ return 0; \
+ av_assert0(prv && src && nxt); \
+} while (0)
+ if (FF_INLINK_IDX(inlink) == INPUT_MAIN) {
+ SLIDING_FRAME_WINDOW(fm->prv, fm->src, fm->nxt);
+ fm->got_frame[INPUT_MAIN] = 1;
+ } else {
+ SLIDING_FRAME_WINDOW(fm->prv2, fm->src2, fm->nxt2);
+ fm->got_frame[INPUT_CLEANSRC] = 1;
+ }
+ if (!fm->got_frame[INPUT_MAIN] || (fm->ppsrc && !fm->got_frame[INPUT_CLEANSRC]))
+ return 0;
+ fm->got_frame[INPUT_MAIN] = fm->got_frame[INPUT_CLEANSRC] = 0;
+ in = fm->src;
+
+ /* parity */
+ order = fm->order != FM_PARITY_AUTO ? fm->order : (in->interlaced_frame ? in->top_field_first : 1);
+ field = fm->field != FM_PARITY_AUTO ? fm->field : order;
+ av_assert0(order == 0 || order == 1 || field == 0 || field == 1);
+ fxo = field ^ order ? fxo1m : fxo0m;
+
+ /* debug mode: we generate all the fields combinations and their associated
+ * combed score. XXX: inject as frame metadata? */
+ if (fm->combdbg) {
+ for (i = 0; i < FF_ARRAY_ELEMS(combs); i++) {
+ if (i > mN && fm->combdbg == COMBDBG_PCN)
+ break;
+ gen_frames[i] = create_weave_frame(ctx, i, field, fm->prv, fm->src, fm->nxt);
+ if (!gen_frames[i])
+ return AVERROR(ENOMEM);
+ combs[i] = calc_combed_score(fm, gen_frames[i]);
+ }
+ av_log(ctx, AV_LOG_INFO, "COMBS: %3d %3d %3d %3d %3d\n",
+ combs[0], combs[1], combs[2], combs[3], combs[4]);
+ } else {
+ gen_frames[mC] = av_frame_clone(fm->src);
+ if (!gen_frames[mC])
+ return AVERROR(ENOMEM);
+ }
+
+ /* p/c selection and optional 3-way p/c/n matches */
+ match = compare_fields(fm, fxo[mC], fxo[mP], field);
+ if (fm->mode == MODE_PCN || fm->mode == MODE_PCN_UB)
+ match = compare_fields(fm, match, fxo[mN], field);
+
+ /* scene change check */
+ if (fm->combmatch == COMBMATCH_SC) {
+ if (fm->lastn == outlink->frame_count_in - 1) {
+ if (fm->lastscdiff > fm->scthresh)
+ sc = 1;
+ } else if (luma_abs_diff(fm->prv, fm->src) > fm->scthresh) {
+ sc = 1;
+ }
+
+ if (!sc) {
+ fm->lastn = outlink->frame_count_in;
+ fm->lastscdiff = luma_abs_diff(fm->src, fm->nxt);
+ sc = fm->lastscdiff > fm->scthresh;
+ }
+ }
+
+ if (fm->combmatch == COMBMATCH_FULL || (fm->combmatch == COMBMATCH_SC && sc)) {
+ switch (fm->mode) {
+ /* 2-way p/c matches */
+ case MODE_PC:
+ match = checkmm(ctx, combs, match, match == fxo[mP] ? fxo[mC] : fxo[mP], gen_frames, field);
+ break;
+ case MODE_PC_N:
+ match = checkmm(ctx, combs, match, fxo[mN], gen_frames, field);
+ break;
+ case MODE_PC_U:
+ match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
+ break;
+ case MODE_PC_N_UB:
+ match = checkmm(ctx, combs, match, fxo[mN], gen_frames, field);
+ match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
+ match = checkmm(ctx, combs, match, fxo[mB], gen_frames, field);
+ break;
+ /* 3-way p/c/n matches */
+ case MODE_PCN:
+ match = checkmm(ctx, combs, match, match == fxo[mP] ? fxo[mC] : fxo[mP], gen_frames, field);
+ break;
+ case MODE_PCN_UB:
+ match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
+ match = checkmm(ctx, combs, match, fxo[mB], gen_frames, field);
+ break;
+ default:
+ av_assert0(0);
+ }
+ }
+
+ /* get output frame and drop the others */
+ if (fm->ppsrc) {
+ /* field matching was based on a filtered/post-processed input, we now
+ * pick the untouched fields from the clean source */
+ dst = create_weave_frame(ctx, match, field, fm->prv2, fm->src2, fm->nxt2);
+ } else {
+ if (!gen_frames[match]) { // XXX: is that possible?
+ dst = create_weave_frame(ctx, match, field, fm->prv, fm->src, fm->nxt);
+ } else {
+ dst = gen_frames[match];
+ gen_frames[match] = NULL;
+ }
+ }
+ if (!dst)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < FF_ARRAY_ELEMS(gen_frames); i++)
+ av_frame_free(&gen_frames[i]);
+
+ /* mark the frame we are unable to match properly as interlaced so a proper
+ * de-interlacer can take the relay */
+ dst->interlaced_frame = combs[match] >= fm->combpel;
+ if (dst->interlaced_frame) {
+ av_log(ctx, AV_LOG_WARNING, "Frame #%"PRId64" at %s is still interlaced\n",
+ outlink->frame_count_in, av_ts2timestr(in->pts, &inlink->time_base));
+ dst->top_field_first = field;
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "SC:%d | COMBS: %3d %3d %3d %3d %3d (combpel=%d)"
+ " match=%d combed=%s\n", sc, combs[0], combs[1], combs[2], combs[3], combs[4],
+ fm->combpel, match, dst->interlaced_frame ? "YES" : "NO");
+
+ return ff_filter_frame(outlink, dst);
+}
+
+static int request_inlink(AVFilterContext *ctx, int lid)
+{
+ int ret = 0;
+ FieldMatchContext *fm = ctx->priv;
+
+ if (!fm->got_frame[lid]) {
+ AVFilterLink *inlink = ctx->inputs[lid];
+ ret = ff_request_frame(inlink);
+ if (ret == AVERROR_EOF) { // flushing
+ fm->eof |= 1 << lid;
+ ret = filter_frame(inlink, NULL);
+ }
+ }
+ return ret;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ int ret;
+ AVFilterContext *ctx = outlink->src;
+ FieldMatchContext *fm = ctx->priv;
+ const uint32_t eof_mask = 1<<INPUT_MAIN | fm->ppsrc<<INPUT_CLEANSRC;
+
+ if ((fm->eof & eof_mask) == eof_mask) // flush done?
+ return AVERROR_EOF;
+ if ((ret = request_inlink(ctx, INPUT_MAIN)) < 0)
+ return ret;
+ if (fm->ppsrc && (ret = request_inlink(ctx, INPUT_CLEANSRC)) < 0)
+ return ret;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ // TODO: second input source can support >8bit depth
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ int ret;
+ AVFilterContext *ctx = inlink->dst;
+ FieldMatchContext *fm = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+ const int w = inlink->w;
+ const int h = inlink->h;
+
+ fm->scthresh = (int64_t)((w * h * 255.0 * fm->scthresh_flt) / 100.0);
+
+ if ((ret = av_image_alloc(fm->map_data, fm->map_linesize, w, h, inlink->format, 32)) < 0 ||
+ (ret = av_image_alloc(fm->cmask_data, fm->cmask_linesize, w, h, inlink->format, 32)) < 0)
+ return ret;
+
+ fm->hsub = pix_desc->log2_chroma_w;
+ fm->vsub = pix_desc->log2_chroma_h;
+
+ fm->tpitchy = FFALIGN(w, 16);
+ fm->tpitchuv = FFALIGN(w >> 1, 16);
+
+ fm->tbuffer = av_malloc(h/2 * fm->tpitchy);
+ fm->c_array = av_malloc((((w + fm->blockx/2)/fm->blockx)+1) *
+ (((h + fm->blocky/2)/fm->blocky)+1) *
+ 4 * sizeof(*fm->c_array));
+ if (!fm->tbuffer || !fm->c_array)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static av_cold int fieldmatch_init(AVFilterContext *ctx)
+{
+ const FieldMatchContext *fm = ctx->priv;
+ AVFilterPad pad = {
+ .name = av_strdup("main"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ };
+
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ ff_insert_inpad(ctx, INPUT_MAIN, &pad);
+
+ if (fm->ppsrc) {
+ pad.name = av_strdup("clean_src");
+ pad.config_props = NULL;
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ ff_insert_inpad(ctx, INPUT_CLEANSRC, &pad);
+ }
+
+ if ((fm->blockx & (fm->blockx - 1)) ||
+ (fm->blocky & (fm->blocky - 1))) {
+ av_log(ctx, AV_LOG_ERROR, "blockx and blocky settings must be power of two\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (fm->combpel > fm->blockx * fm->blocky) {
+ av_log(ctx, AV_LOG_ERROR, "Combed pixel should not be larger than blockx x blocky\n");
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+static av_cold void fieldmatch_uninit(AVFilterContext *ctx)
+{
+ int i;
+ FieldMatchContext *fm = ctx->priv;
+
+ if (fm->prv != fm->src)
+ av_frame_free(&fm->prv);
+ if (fm->nxt != fm->src)
+ av_frame_free(&fm->nxt);
+ av_frame_free(&fm->src);
+ av_freep(&fm->map_data[0]);
+ av_freep(&fm->cmask_data[0]);
+ av_freep(&fm->tbuffer);
+ av_freep(&fm->c_array);
+ for (i = 0; i < ctx->nb_inputs; i++)
+ av_freep(&ctx->input_pads[i].name);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ const FieldMatchContext *fm = ctx->priv;
+ const AVFilterLink *inlink =
+ ctx->inputs[fm->ppsrc ? INPUT_CLEANSRC : INPUT_MAIN];
+
+ outlink->time_base = inlink->time_base;
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ outlink->frame_rate = inlink->frame_rate;
+ outlink->w = inlink->w;
+ outlink->h = inlink->h;
+ return 0;
+}
+
+static const AVFilterPad fieldmatch_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_fieldmatch = {
+ .name = "fieldmatch",
+ .description = NULL_IF_CONFIG_SMALL("Field matching for inverse telecine."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(FieldMatchContext),
+ .init = fieldmatch_init,
+ .uninit = fieldmatch_uninit,
+ .inputs = NULL,
+ .outputs = fieldmatch_outputs,
+ .priv_class = &fieldmatch_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
diff --git a/libavfilter/vf_fieldorder.c b/libavfilter/vf_fieldorder.c
index dd4f8cc694..ca55ff1f66 100644
--- a/libavfilter/vf_fieldorder.c
+++ b/libavfilter/vf_fieldorder.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2011 Mark Himsley
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -23,9 +23,6 @@
* video field order filter, heavily influenced by vf_pad.c
*/
-#include <stdio.h>
-#include <string.h>
-
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
@@ -55,15 +52,15 @@ static int query_formats(AVFilterContext *ctx)
while ((desc = av_pix_fmt_desc_next(desc))) {
pix_fmt = av_pix_fmt_desc_get_id(desc);
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
+ desc->flags & AV_PIX_FMT_FLAG_PAL ||
desc->flags & AV_PIX_FMT_FLAG_BITSTREAM) &&
desc->nb_components && !desc->log2_chroma_h &&
- (ret = ff_add_format(&formats, pix_fmt)) < 0) {
- ff_formats_unref(&formats);
+ (ret = ff_add_format(&formats, pix_fmt)) < 0)
return ret;
- }
}
- ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
- ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
+ if ((ret = ff_formats_ref(formats, &ctx->inputs[0]->out_formats)) < 0 ||
+ (ret = ff_formats_ref(formats, &ctx->outputs[0]->in_formats)) < 0)
+ return ret;
}
return 0;
@@ -73,16 +70,8 @@ static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
FieldOrderContext *s = ctx->priv;
- int plane;
-
- /** full an array with the number of bytes that the video
- * data occupies per line for each plane of the input video */
- for (plane = 0; plane < 4; plane++) {
- s->line_size[plane] = av_image_get_linesize(inlink->format, inlink->w,
- plane);
- }
- return 0;
+ return av_image_fill_linesizes(s->line_size, inlink->format, inlink->w);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
@@ -90,8 +79,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
AVFilterContext *ctx = inlink->dst;
FieldOrderContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
- int h, plane, line_step, line_size, line;
- uint8_t *data;
+ int h, plane, src_line_step, dst_line_step, line_size, line;
+ uint8_t *dst, *src;
+ AVFrame *out;
if (!frame->interlaced_frame ||
frame->top_field_first == s->dst_tff) {
@@ -102,14 +92,27 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
return ff_filter_frame(outlink, frame);
}
+ if (av_frame_is_writable(frame)) {
+ out = frame;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&frame);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, frame);
+ }
+
av_log(ctx, AV_LOG_TRACE,
"picture will move %s one line\n",
s->dst_tff ? "up" : "down");
h = frame->height;
- for (plane = 0; plane < 4 && frame->data[plane]; plane++) {
- line_step = frame->linesize[plane];
+ for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
+ dst_line_step = out->linesize[plane];
+ src_line_step = frame->linesize[plane];
line_size = s->line_size[plane];
- data = frame->data[plane];
+ dst = out->data[plane];
+ src = frame->data[plane];
if (s->dst_tff) {
/** Move every line up one line, working from
* the top to the bottom of the frame.
@@ -118,11 +121,12 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
* penultimate line from that field. */
for (line = 0; line < h; line++) {
if (1 + line < frame->height) {
- memcpy(data, data + line_step, line_size);
+ memcpy(dst, src + src_line_step, line_size);
} else {
- memcpy(data, data - line_step - line_step, line_size);
+ memcpy(dst, src - 2 * src_line_step, line_size);
}
- data += line_step;
+ dst += dst_line_step;
+ src += src_line_step;
}
} else {
/** Move every line down one line, working from
@@ -130,45 +134,44 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
* The original bottom line is lost.
* The new first line is created as a copy of the
* second line from that field. */
- data += (h - 1) * line_step;
+ dst += (h - 1) * dst_line_step;
+ src += (h - 1) * src_line_step;
for (line = h - 1; line >= 0 ; line--) {
if (line > 0) {
- memcpy(data, data - line_step, line_size);
+ memcpy(dst, src - src_line_step, line_size);
} else {
- memcpy(data, data + line_step + line_step, line_size);
+ memcpy(dst, src + 2 * src_line_step, line_size);
}
- data -= line_step;
+ dst -= dst_line_step;
+ src -= src_line_step;
}
}
}
- frame->top_field_first = s->dst_tff;
+ out->top_field_first = s->dst_tff;
- return ff_filter_frame(outlink, frame);
+ if (frame != out)
+ av_frame_free(&frame);
+ return ff_filter_frame(outlink, out);
}
#define OFFSET(x) offsetof(FieldOrderContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption fieldorder_options[] = {
{ "order", "output field order", OFFSET(dst_tff), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, FLAGS, "order" },
- { "bff", "bottom field first", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, .unit = "order" },
- { "tff", "top field first", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, .unit = "order" },
- { NULL },
+ { "bff", "bottom field first", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, .flags=FLAGS, .unit = "order" },
+ { "tff", "top field first", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, .flags=FLAGS, .unit = "order" },
+ { NULL }
};
-static const AVClass fieldorder_class = {
- .class_name = "fieldorder",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(fieldorder);
static const AVFilterPad avfilter_vf_fieldorder_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_input,
- .filter_frame = filter_frame,
- .needs_writable = 1,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -189,4 +192,5 @@ AVFilter ff_vf_fieldorder = {
.query_formats = query_formats,
.inputs = avfilter_vf_fieldorder_inputs,
.outputs = avfilter_vf_fieldorder_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/libavfilter/vf_find_rect.c b/libavfilter/vf_find_rect.c
new file mode 100644
index 0000000000..d7e6579af7
--- /dev/null
+++ b/libavfilter/vf_find_rect.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2014-2015 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @todo switch to dualinput
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "internal.h"
+
+#include "lavfutils.h"
+
+#define MAX_MIPMAPS 5
+
+typedef struct FOCContext {
+ AVClass *class;
+ float threshold;
+ int mipmaps;
+ int xmin, ymin, xmax, ymax;
+ char *obj_filename;
+ int last_x, last_y;
+ AVFrame *obj_frame;
+ AVFrame *needle_frame[MAX_MIPMAPS];
+ AVFrame *haystack_frame[MAX_MIPMAPS];
+} FOCContext;
+
+#define OFFSET(x) offsetof(FOCContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption find_rect_options[] = {
+ { "object", "object bitmap filename", OFFSET(obj_filename), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS },
+ { "threshold", "set threshold", OFFSET(threshold), AV_OPT_TYPE_FLOAT, {.dbl = 0.5}, 0, 1.0, FLAGS },
+ { "mipmaps", "set mipmaps", OFFSET(mipmaps), AV_OPT_TYPE_INT, {.i64 = 3}, 1, MAX_MIPMAPS, FLAGS },
+ { "xmin", "", OFFSET(xmin), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
+ { "ymin", "", OFFSET(ymin), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
+ { "xmax", "", OFFSET(xmax), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
+ { "ymax", "", OFFSET(ymax), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(find_rect);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_NONE
+ };
+
+ return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+}
+
+static AVFrame *downscale(AVFrame *in)
+{
+ int x, y;
+ AVFrame *frame = av_frame_alloc();
+ uint8_t *src, *dst;
+ if (!frame)
+ return NULL;
+
+ frame->format = in->format;
+ frame->width = (in->width + 1) / 2;
+ frame->height = (in->height+ 1) / 2;
+
+ if (av_frame_get_buffer(frame, 32) < 0) {
+ av_frame_free(&frame);
+ return NULL;
+ }
+ src = in ->data[0];
+ dst = frame->data[0];
+
+ for(y = 0; y < frame->height; y++) {
+ for(x = 0; x < frame->width; x++) {
+ dst[x] = ( src[2*x+0]
+ + src[2*x+1]
+ + src[2*x+0 + in->linesize[0]]
+ + src[2*x+1 + in->linesize[0]]
+ + 2) >> 2;
+ }
+ src += 2*in->linesize[0];
+ dst += frame->linesize[0];
+ }
+ return frame;
+}
+
+static float compare(const AVFrame *haystack, const AVFrame *obj, int offx, int offy)
+{
+ int x,y;
+ int o_sum_v = 0;
+ int h_sum_v = 0;
+ int64_t oo_sum_v = 0;
+ int64_t hh_sum_v = 0;
+ int64_t oh_sum_v = 0;
+ float c;
+ int n = obj->height * obj->width;
+ const uint8_t *odat = obj ->data[0];
+ const uint8_t *hdat = haystack->data[0] + offx + offy * haystack->linesize[0];
+ int64_t o_sigma, h_sigma;
+
+ for(y = 0; y < obj->height; y++) {
+ for(x = 0; x < obj->width; x++) {
+ int o_v = odat[x];
+ int h_v = hdat[x];
+ o_sum_v += o_v;
+ h_sum_v += h_v;
+ oo_sum_v += o_v * o_v;
+ hh_sum_v += h_v * h_v;
+ oh_sum_v += o_v * h_v;
+ }
+ odat += obj->linesize[0];
+ hdat += haystack->linesize[0];
+ }
+ o_sigma = n*oo_sum_v - o_sum_v*(int64_t)o_sum_v;
+ h_sigma = n*hh_sum_v - h_sum_v*(int64_t)h_sum_v;
+
+ if (o_sigma == 0 || h_sigma == 0)
+ return 1.0;
+
+ c = (n*oh_sum_v - o_sum_v*(int64_t)h_sum_v) / (sqrt(o_sigma)*sqrt(h_sigma));
+
+ return 1 - fabs(c);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ FOCContext *foc = ctx->priv;
+
+ if (foc->xmax <= 0)
+ foc->xmax = inlink->w - foc->obj_frame->width;
+ if (foc->ymax <= 0)
+ foc->ymax = inlink->h - foc->obj_frame->height;
+
+ return 0;
+}
+
+static float search(FOCContext *foc, int pass, int maxpass, int xmin, int xmax, int ymin, int ymax, int *best_x, int *best_y, float best_score)
+{
+ int x, y;
+
+ if (pass + 1 <= maxpass) {
+ int sub_x, sub_y;
+ search(foc, pass+1, maxpass, xmin>>1, (xmax+1)>>1, ymin>>1, (ymax+1)>>1, &sub_x, &sub_y, 1.0);
+ xmin = FFMAX(xmin, 2*sub_x - 4);
+ xmax = FFMIN(xmax, 2*sub_x + 4);
+ ymin = FFMAX(ymin, 2*sub_y - 4);
+ ymax = FFMIN(ymax, 2*sub_y + 4);
+ }
+
+ for (y = ymin; y <= ymax; y++) {
+ for (x = xmin; x <= xmax; x++) {
+ float score = compare(foc->haystack_frame[pass], foc->needle_frame[pass], x, y);
+ av_assert0(score != 0);
+ if (score < best_score) {
+ best_score = score;
+ *best_x = x;
+ *best_y = y;
+ }
+ }
+ }
+ return best_score;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ FOCContext *foc = ctx->priv;
+ float best_score;
+ int best_x, best_y;
+ int i;
+
+ foc->haystack_frame[0] = av_frame_clone(in);
+ for (i=1; i<foc->mipmaps; i++) {
+ foc->haystack_frame[i] = downscale(foc->haystack_frame[i-1]);
+ }
+
+ best_score = search(foc, 0, 0,
+ FFMAX(foc->xmin, foc->last_x - 8),
+ FFMIN(foc->xmax, foc->last_x + 8),
+ FFMAX(foc->ymin, foc->last_y - 8),
+ FFMIN(foc->ymax, foc->last_y + 8),
+ &best_x, &best_y, 1.0);
+
+ best_score = search(foc, 0, foc->mipmaps - 1, foc->xmin, foc->xmax, foc->ymin, foc->ymax,
+ &best_x, &best_y, best_score);
+
+ for (i=0; i<MAX_MIPMAPS; i++) {
+ av_frame_free(&foc->haystack_frame[i]);
+ }
+
+ if (best_score > foc->threshold) {
+ return ff_filter_frame(ctx->outputs[0], in);
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "Found at %d %d score %f\n", best_x, best_y, best_score);
+ foc->last_x = best_x;
+ foc->last_y = best_y;
+
+ av_frame_make_writable(in);
+
+ av_dict_set_int(&in->metadata, "lavfi.rect.w", foc->obj_frame->width, 0);
+ av_dict_set_int(&in->metadata, "lavfi.rect.h", foc->obj_frame->height, 0);
+ av_dict_set_int(&in->metadata, "lavfi.rect.x", best_x, 0);
+ av_dict_set_int(&in->metadata, "lavfi.rect.y", best_y, 0);
+
+ return ff_filter_frame(ctx->outputs[0], in);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ FOCContext *foc = ctx->priv;
+ int i;
+
+ for (i = 0; i < MAX_MIPMAPS; i++) {
+ av_frame_free(&foc->needle_frame[i]);
+ av_frame_free(&foc->haystack_frame[i]);
+ }
+
+ if (foc->obj_frame)
+ av_freep(&foc->obj_frame->data[0]);
+ av_frame_free(&foc->obj_frame);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ FOCContext *foc = ctx->priv;
+ int ret, i;
+
+ if (!foc->obj_filename) {
+ av_log(ctx, AV_LOG_ERROR, "object filename not set\n");
+ return AVERROR(EINVAL);
+ }
+
+ foc->obj_frame = av_frame_alloc();
+ if (!foc->obj_frame)
+ return AVERROR(ENOMEM);
+
+ if ((ret = ff_load_image(foc->obj_frame->data, foc->obj_frame->linesize,
+ &foc->obj_frame->width, &foc->obj_frame->height,
+ &foc->obj_frame->format, foc->obj_filename, ctx)) < 0)
+ return ret;
+
+ if (foc->obj_frame->format != AV_PIX_FMT_GRAY8) {
+ av_log(ctx, AV_LOG_ERROR, "object image is not a grayscale image\n");
+ return AVERROR(EINVAL);
+ }
+
+ foc->needle_frame[0] = av_frame_clone(foc->obj_frame);
+ for (i = 1; i < foc->mipmaps; i++) {
+ foc->needle_frame[i] = downscale(foc->needle_frame[i-1]);
+ if (!foc->needle_frame[i])
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
+static const AVFilterPad foc_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad foc_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_find_rect = {
+ .name = "find_rect",
+ .description = NULL_IF_CONFIG_SMALL("Find a user specified object."),
+ .priv_size = sizeof(FOCContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = foc_inputs,
+ .outputs = foc_outputs,
+ .priv_class = &find_rect_class,
+};
diff --git a/libavfilter/vf_format.c b/libavfilter/vf_format.c
index 914089deab..a57c99d797 100644
--- a/libavfilter/vf_format.c
+++ b/libavfilter/vf_format.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -58,6 +58,7 @@ static av_cold int init(AVFilterContext *ctx)
char *cur, *sep;
int nb_formats = 1;
int i;
+ int ret;
if (!s->pix_fmts) {
av_log(ctx, AV_LOG_ERROR, "Empty output format string.\n");
@@ -83,11 +84,8 @@ static av_cold int init(AVFilterContext *ctx)
if (sep)
*sep++ = 0;
- s->formats[i] = av_get_pix_fmt(cur);
- if (s->formats[i] == AV_PIX_FMT_NONE) {
- av_log(ctx, AV_LOG_ERROR, "Unknown pixel format: %s\n", cur);
- return AVERROR(EINVAL);
- }
+ if ((ret = ff_parse_pixel_format(&s->formats[i], cur, ctx)) < 0)
+ return ret;
cur = sep;
}
@@ -96,7 +94,7 @@ static av_cold int init(AVFilterContext *ctx)
if (!strcmp(ctx->filter->name, "noformat")) {
const AVPixFmtDescriptor *desc = NULL;
enum AVPixelFormat *formats_allowed;
- int nb_formats_lavu = 0, nb_formats_allowed = 0;;
+ int nb_formats_lavu = 0, nb_formats_allowed = 0;
/* count the formats known to lavu */
while ((desc = av_pix_fmt_desc_next(desc)))
@@ -136,24 +134,20 @@ static int query_formats(AVFilterContext *ctx)
if (!formats)
return AVERROR(ENOMEM);
- ff_set_common_formats(ctx, formats);
- return 0;
+ return ff_set_common_formats(ctx, formats);
}
#define OFFSET(x) offsetof(FormatContext, x)
static const AVOption options[] = {
- { "pix_fmts", "A '|'-separated list of pixel formats", OFFSET(pix_fmts), AV_OPT_TYPE_STRING, .flags = AV_OPT_FLAG_VIDEO_PARAM },
- { NULL },
+ { "pix_fmts", "A '|'-separated list of pixel formats", OFFSET(pix_fmts), AV_OPT_TYPE_STRING, .flags = AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM },
+ { NULL }
};
#if CONFIG_FORMAT_FILTER
-static const AVClass format_class = {
- .class_name = "format",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+
+#define format_options options
+AVFILTER_DEFINE_CLASS(format);
static const AVFilterPad avfilter_vf_format_inputs[] = {
{
@@ -173,29 +167,26 @@ static const AVFilterPad avfilter_vf_format_outputs[] = {
};
AVFilter ff_vf_format = {
- .name = "format",
- .description = NULL_IF_CONFIG_SMALL("Convert the input video to one of the specified pixel formats."),
+ .name = "format",
+ .description = NULL_IF_CONFIG_SMALL("Convert the input video to one of the specified pixel formats."),
- .init = init,
- .uninit = uninit,
+ .init = init,
+ .uninit = uninit,
.query_formats = query_formats,
- .priv_size = sizeof(FormatContext),
- .priv_class = &format_class,
+ .priv_size = sizeof(FormatContext),
+ .priv_class = &format_class,
- .inputs = avfilter_vf_format_inputs,
- .outputs = avfilter_vf_format_outputs,
+ .inputs = avfilter_vf_format_inputs,
+ .outputs = avfilter_vf_format_outputs,
};
#endif /* CONFIG_FORMAT_FILTER */
#if CONFIG_NOFORMAT_FILTER
-static const AVClass noformat_class = {
- .class_name = "noformat",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+
+#define noformat_options options
+AVFILTER_DEFINE_CLASS(noformat);
static const AVFilterPad avfilter_vf_noformat_inputs[] = {
{
@@ -215,18 +206,18 @@ static const AVFilterPad avfilter_vf_noformat_outputs[] = {
};
AVFilter ff_vf_noformat = {
- .name = "noformat",
- .description = NULL_IF_CONFIG_SMALL("Force libavfilter not to use any of the specified pixel formats for the input to the next filter."),
+ .name = "noformat",
+ .description = NULL_IF_CONFIG_SMALL("Force libavfilter not to use any of the specified pixel formats for the input to the next filter."),
- .init = init,
- .uninit = uninit,
+ .init = init,
+ .uninit = uninit,
.query_formats = query_formats,
- .priv_size = sizeof(FormatContext),
- .priv_class = &noformat_class,
+ .priv_size = sizeof(FormatContext),
+ .priv_class = &noformat_class,
- .inputs = avfilter_vf_noformat_inputs,
- .outputs = avfilter_vf_noformat_outputs,
+ .inputs = avfilter_vf_noformat_inputs,
+ .outputs = avfilter_vf_noformat_outputs,
};
#endif /* CONFIG_NOFORMAT_FILTER */
diff --git a/libavfilter/vf_fps.c b/libavfilter/vf_fps.c
index e5562c8517..20ccd797d1 100644
--- a/libavfilter/vf_fps.c
+++ b/libavfilter/vf_fps.c
@@ -1,18 +1,22 @@
/*
- * This file is part of Libav.
+ * Copyright 2007 Bobby Bingham
+ * Copyright 2012 Robert Nagy <ronag89 gmail com>
+ * Copyright 2012 Anton Khirnov <anton khirnov net>
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -41,12 +45,11 @@ typedef struct FPSContext {
/* timestamps in input timebase */
int64_t first_pts; ///< pts of the first frame that arrived on this filter
- int64_t pts; ///< pts of the first frame currently in the fifo
double start_time; ///< pts, in seconds, of the expected first frame
AVRational framerate; ///< target framerate
- char *fps; ///< a string describing target framerate
+ int rounding; ///< AVRounding method for timestamps
/* statistics */
int frames_in; ///< number of frames on input
@@ -57,33 +60,28 @@ typedef struct FPSContext {
#define OFFSET(x) offsetof(FPSContext, x)
#define V AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "fps", "A string describing desired output framerate", OFFSET(fps), AV_OPT_TYPE_STRING, { .str = "25" }, .flags = V },
+#define F AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption fps_options[] = {
+ { "fps", "A string describing desired output framerate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 0, INT_MAX, V|F },
{ "start_time", "Assume the first PTS should be this value.", OFFSET(start_time), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX}, -DBL_MAX, DBL_MAX, V },
- { NULL },
+ { "round", "set rounding method for timestamps", OFFSET(rounding), AV_OPT_TYPE_INT, { .i64 = AV_ROUND_NEAR_INF }, 0, 5, V|F, "round" },
+ { "zero", "round towards 0", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_ZERO }, 0, 5, V|F, "round" },
+ { "inf", "round away from 0", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_INF }, 0, 5, V|F, "round" },
+ { "down", "round towards -infty", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_DOWN }, 0, 5, V|F, "round" },
+ { "up", "round towards +infty", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_UP }, 0, 5, V|F, "round" },
+ { "near", "round to nearest", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_NEAR_INF }, 0, 5, V|F, "round" },
+ { NULL }
};
-static const AVClass class = {
- .class_name = "FPS filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(fps);
static av_cold int init(AVFilterContext *ctx)
{
FPSContext *s = ctx->priv;
- int ret;
-
- if ((ret = av_parse_video_rate(&s->framerate, s->fps)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Error parsing framerate %s.\n", s->fps);
- return ret;
- }
- if (!(s->fifo = av_fifo_alloc(2*sizeof(AVFrame*))))
+ if (!(s->fifo = av_fifo_alloc_array(2, sizeof(AVFrame*))))
return AVERROR(ENOMEM);
- s->pts = AV_NOPTS_VALUE;
s->first_pts = AV_NOPTS_VALUE;
av_log(ctx, AV_LOG_VERBOSE, "fps=%d/%d\n", s->framerate.num, s->framerate.den);
@@ -105,7 +103,7 @@ static av_cold void uninit(AVFilterContext *ctx)
if (s->fifo) {
s->drop += av_fifo_size(s->fifo) / sizeof(AVFrame*);
flush_fifo(s->fifo);
- av_fifo_free(s->fifo);
+ av_fifo_freep(&s->fifo);
}
av_log(ctx, AV_LOG_VERBOSE, "%d frames in, %d frames out; %d frames dropped, "
@@ -116,10 +114,10 @@ static int config_props(AVFilterLink* link)
{
FPSContext *s = link->src->priv;
- link->time_base = (AVRational){ s->framerate.den, s->framerate.num };
- link->frame_rate = s->framerate;
- link->w = link->src->inputs[0]->w;
- link->h = link->src->inputs[0]->h;
+ link->time_base = av_inv_q(s->framerate);
+ link->frame_rate= s->framerate;
+ link->w = link->src->inputs[0]->w;
+ link->h = link->src->inputs[0]->h;
return 0;
}
@@ -128,11 +126,9 @@ static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
FPSContext *s = ctx->priv;
- int frames_out = s->frames_out;
- int ret = 0;
+ int ret;
- while (ret >= 0 && s->frames_out == frames_out)
- ret = ff_request_frame(ctx->inputs[0]);
+ ret = ff_request_frame(ctx->inputs[0]);
/* flush the fifo */
if (ret == AVERROR_EOF && av_fifo_size(s->fifo)) {
@@ -179,22 +175,22 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
s->frames_in++;
/* discard frames until we get the first timestamp */
- if (s->pts == AV_NOPTS_VALUE) {
+ if (s->first_pts == AV_NOPTS_VALUE) {
if (buf->pts != AV_NOPTS_VALUE) {
ret = write_to_fifo(s->fifo, buf);
if (ret < 0)
return ret;
- if (s->start_time != DBL_MAX) {
+ if (s->start_time != DBL_MAX && s->start_time != AV_NOPTS_VALUE) {
double first_pts = s->start_time * AV_TIME_BASE;
first_pts = FFMIN(FFMAX(first_pts, INT64_MIN), INT64_MAX);
- s->first_pts = s->pts = av_rescale_q(first_pts, AV_TIME_BASE_Q,
+ s->first_pts = av_rescale_q(first_pts, AV_TIME_BASE_Q,
inlink->time_base);
av_log(ctx, AV_LOG_VERBOSE, "Set first pts to (in:%"PRId64" out:%"PRId64")\n",
s->first_pts, av_rescale_q(first_pts, AV_TIME_BASE_Q,
outlink->time_base));
} else {
- s->first_pts = s->pts = buf->pts;
+ s->first_pts = buf->pts;
}
} else {
av_log(ctx, AV_LOG_WARNING, "Discarding initial frame(s) with no "
@@ -206,27 +202,24 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
}
/* now wait for the next timestamp */
- if (buf->pts == AV_NOPTS_VALUE) {
+ if (buf->pts == AV_NOPTS_VALUE || av_fifo_size(s->fifo) <= 0) {
return write_to_fifo(s->fifo, buf);
}
/* number of output frames */
- delta = av_rescale_q(buf->pts - s->pts, inlink->time_base,
- outlink->time_base);
+ delta = av_rescale_q_rnd(buf->pts - s->first_pts, inlink->time_base,
+ outlink->time_base, s->rounding) - s->frames_out ;
if (delta < 1) {
- /* drop the frame and everything buffered except the first */
- AVFrame *tmp;
+ /* drop everything buffered except the last */
int drop = av_fifo_size(s->fifo)/sizeof(AVFrame*);
av_log(ctx, AV_LOG_DEBUG, "Dropping %d frame(s).\n", drop);
s->drop += drop;
- av_fifo_generic_read(s->fifo, &tmp, sizeof(tmp), NULL);
flush_fifo(s->fifo);
- ret = write_to_fifo(s->fifo, tmp);
+ ret = write_to_fifo(s->fifo, buf);
- av_frame_free(&buf);
return ret;
}
@@ -267,15 +260,14 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
flush_fifo(s->fifo);
ret = write_to_fifo(s->fifo, buf);
- s->pts = s->first_pts + av_rescale_q(s->frames_out, outlink->time_base, inlink->time_base);
return ret;
}
static const AVFilterPad avfilter_vf_fps_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
@@ -293,14 +285,11 @@ static const AVFilterPad avfilter_vf_fps_outputs[] = {
AVFilter ff_vf_fps = {
.name = "fps",
- .description = NULL_IF_CONFIG_SMALL("Force constant framerate"),
-
- .init = init,
- .uninit = uninit,
-
- .priv_size = sizeof(FPSContext),
- .priv_class = &class,
-
- .inputs = avfilter_vf_fps_inputs,
- .outputs = avfilter_vf_fps_outputs,
+ .description = NULL_IF_CONFIG_SMALL("Force constant framerate."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(FPSContext),
+ .priv_class = &fps_class,
+ .inputs = avfilter_vf_fps_inputs,
+ .outputs = avfilter_vf_fps_outputs,
};
diff --git a/libavfilter/vf_framepack.c b/libavfilter/vf_framepack.c
index fd0c1897d5..a5cd9540b9 100644
--- a/libavfilter/vf_framepack.c
+++ b/libavfilter/vf_framepack.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2013 Vittorio Giovara
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -62,8 +62,10 @@ static const enum AVPixelFormat formats_supported[] = {
static int query_formats(AVFilterContext *ctx)
{
// this will ensure that formats are the same on all pads
- ff_set_common_formats(ctx, ff_make_format_list(formats_supported));
- return 0;
+ AVFilterFormats *fmts_list = ff_make_format_list(formats_supported);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
}
static av_cold void framepack_uninit(AVFilterContext *ctx)
@@ -267,25 +269,26 @@ static av_always_inline void spatial_frame_pack(AVFilterLink *outlink,
}
}
+static int try_push_frame(AVFilterContext *ctx);
+
static int filter_frame_left(AVFilterLink *inlink, AVFrame *frame)
{
FramepackContext *s = inlink->dst->priv;
s->input_views[LEFT] = frame;
- return 0;
+ return try_push_frame(inlink->dst);
}
static int filter_frame_right(AVFilterLink *inlink, AVFrame *frame)
{
FramepackContext *s = inlink->dst->priv;
s->input_views[RIGHT] = frame;
- return 0;
+ return try_push_frame(inlink->dst);
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
FramepackContext *s = ctx->priv;
- AVStereo3D *stereo;
int ret, i;
/* get a frame on the either input, stop as soon as a video ends */
@@ -296,7 +299,18 @@ static int request_frame(AVFilterLink *outlink)
return ret;
}
}
+ return 0;
+}
+static int try_push_frame(AVFilterContext *ctx)
+{
+ FramepackContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVStereo3D *stereo;
+ int ret, i;
+
+ if (!(s->input_views[0] && s->input_views[1]))
+ return 0;
if (s->format == AV_STEREO3D_FRAMESEQUENCE) {
if (s->double_pts == AV_NOPTS_VALUE)
s->double_pts = s->input_views[LEFT]->pts;
@@ -349,7 +363,7 @@ static int request_frame(AVFilterLink *outlink)
#define OFFSET(x) offsetof(FramepackContext, x)
#define V AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
+static const AVOption framepack_options[] = {
{ "format", "Frame pack output format", OFFSET(format), AV_OPT_TYPE_INT,
{ .i64 = AV_STEREO3D_SIDEBYSIDE }, 0, INT_MAX, .flags = V, .unit = "format" },
{ "sbs", "Views are packed next to each other", 0, AV_OPT_TYPE_CONST,
@@ -365,12 +379,7 @@ static const AVOption options[] = {
{ NULL },
};
-static const AVClass framepack_class = {
- .class_name = "framepack",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(framepack);
static const AVFilterPad framepack_inputs[] = {
{
diff --git a/libavfilter/vf_framerate.c b/libavfilter/vf_framerate.c
new file mode 100644
index 0000000000..b4a74f7f7d
--- /dev/null
+++ b/libavfilter/vf_framerate.c
@@ -0,0 +1,731 @@
+/*
+ * Copyright (C) 2012 Mark Himsley
+ *
+ * get_scene_score() Copyright (c) 2011 Stefano Sabatini
+ * taken from libavfilter/vf_select.c
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * filter for upsampling or downsampling a progressive source
+ */
+
+#define DEBUG
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/pixelutils.h"
+
+#include "avfilter.h"
+#include "internal.h"
+#include "video.h"
+
+#define N_SRCE 3
+
+typedef struct FrameRateContext {
+ const AVClass *class;
+ // parameters
+ AVRational dest_frame_rate; ///< output frames per second
+ int flags; ///< flags affecting frame rate conversion algorithm
+ double scene_score; ///< score that denotes a scene change has happened
+ int interp_start; ///< start of range to apply linear interpolation
+ int interp_end; ///< end of range to apply linear interpolation
+
+ int line_size[4]; ///< bytes of pixel data per line for each plane
+ int vsub;
+
+ int frst, next, prev, crnt, last;
+ int pending_srce_frames; ///< how many input frames are still waiting to be processed
+ int flush; ///< are we flushing final frames
+ int pending_end_frame; ///< flag indicating we are waiting to call filter_frame()
+
+ AVRational srce_time_base; ///< timebase of source
+
+ AVRational dest_time_base; ///< timebase of destination
+ int32_t dest_frame_num;
+ int64_t last_dest_frame_pts; ///< pts of the last frame output
+ int64_t average_srce_pts_dest_delta;///< average input pts delta converted from input rate to output rate
+ int64_t average_dest_pts_delta; ///< calculated average output pts delta
+
+ av_pixelutils_sad_fn sad; ///< Sum of the absolute difference function (scene detect only)
+ double prev_mafd; ///< previous MAFD (scene detect only)
+
+ AVFrame *srce[N_SRCE]; ///< buffered source frames
+ int64_t srce_pts_dest[N_SRCE]; ///< pts for source frames scaled to output timebase
+ int64_t pts; ///< pts of frame we are working on
+
+ int (*blend_frames)(AVFilterContext *ctx, float interpolate,
+ AVFrame *copy_src1, AVFrame *copy_src2);
+ int max;
+ int bitdepth;
+ AVFrame *work;
+} FrameRateContext;
+
+#define OFFSET(x) offsetof(FrameRateContext, x)
+#define V AV_OPT_FLAG_VIDEO_PARAM
+#define F AV_OPT_FLAG_FILTERING_PARAM
+#define FRAMERATE_FLAG_SCD 01
+
+static const AVOption framerate_options[] = {
+ {"fps", "required output frames per second rate", OFFSET(dest_frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="50"}, 0, INT_MAX, V|F },
+
+ {"interp_start", "point to start linear interpolation", OFFSET(interp_start), AV_OPT_TYPE_INT, {.i64=15}, 0, 255, V|F },
+ {"interp_end", "point to end linear interpolation", OFFSET(interp_end), AV_OPT_TYPE_INT, {.i64=240}, 0, 255, V|F },
+ {"scene", "scene change level", OFFSET(scene_score), AV_OPT_TYPE_DOUBLE, {.dbl=7.0}, 0, INT_MAX, V|F },
+
+ {"flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=1}, 0, INT_MAX, V|F, "flags" },
+ {"scene_change_detect", "enable scene change detection", 0, AV_OPT_TYPE_CONST, {.i64=FRAMERATE_FLAG_SCD}, INT_MIN, INT_MAX, V|F, "flags" },
+ {"scd", "enable scene change detection", 0, AV_OPT_TYPE_CONST, {.i64=FRAMERATE_FLAG_SCD}, INT_MIN, INT_MAX, V|F, "flags" },
+
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(framerate);
+
+static void next_source(AVFilterContext *ctx)
+{
+ FrameRateContext *s = ctx->priv;
+ int i;
+
+ ff_dlog(ctx, "next_source()\n");
+
+ if (s->srce[s->last] && s->srce[s->last] != s->srce[s->last-1]) {
+ ff_dlog(ctx, "next_source() unlink %d\n", s->last);
+ av_frame_free(&s->srce[s->last]);
+ }
+ for (i = s->last; i > s->frst; i--) {
+ ff_dlog(ctx, "next_source() copy %d to %d\n", i - 1, i);
+ s->srce[i] = s->srce[i - 1];
+ }
+ ff_dlog(ctx, "next_source() make %d null\n", s->frst);
+ s->srce[s->frst] = NULL;
+}
+
+static av_always_inline int64_t sad_8x8_16(const uint16_t *src1, ptrdiff_t stride1,
+ const uint16_t *src2, ptrdiff_t stride2)
+{
+ int sum = 0;
+ int x, y;
+
+ for (y = 0; y < 8; y++) {
+ for (x = 0; x < 8; x++)
+ sum += FFABS(src1[x] - src2[x]);
+ src1 += stride1;
+ src2 += stride2;
+ }
+ return sum;
+}
+
+static double get_scene_score16(AVFilterContext *ctx, AVFrame *crnt, AVFrame *next)
+{
+ FrameRateContext *s = ctx->priv;
+ double ret = 0;
+
+ ff_dlog(ctx, "get_scene_score16()\n");
+
+ if (crnt &&
+ crnt->height == next->height &&
+ crnt->width == next->width) {
+ int x, y;
+ int64_t sad;
+ double mafd, diff;
+ const uint16_t *p1 = (const uint16_t *)crnt->data[0];
+ const uint16_t *p2 = (const uint16_t *)next->data[0];
+ const int p1_linesize = crnt->linesize[0] / 2;
+ const int p2_linesize = next->linesize[0] / 2;
+
+ ff_dlog(ctx, "get_scene_score16() process\n");
+
+ for (sad = y = 0; y < crnt->height; y += 8) {
+ for (x = 0; x < p1_linesize; x += 8) {
+ sad += sad_8x8_16(p1 + y * p1_linesize + x,
+ p1_linesize,
+ p2 + y * p2_linesize + x,
+ p2_linesize);
+ }
+ }
+ mafd = sad / (crnt->height * crnt->width * 3);
+ diff = fabs(mafd - s->prev_mafd);
+ ret = av_clipf(FFMIN(mafd, diff), 0, 100.0);
+ s->prev_mafd = mafd;
+ }
+ ff_dlog(ctx, "get_scene_score16() result is:%f\n", ret);
+ return ret;
+}
+
+static double get_scene_score(AVFilterContext *ctx, AVFrame *crnt, AVFrame *next)
+{
+ FrameRateContext *s = ctx->priv;
+ double ret = 0;
+
+ ff_dlog(ctx, "get_scene_score()\n");
+
+ if (crnt &&
+ crnt->height == next->height &&
+ crnt->width == next->width) {
+ int x, y;
+ int64_t sad;
+ double mafd, diff;
+ uint8_t *p1 = crnt->data[0];
+ uint8_t *p2 = next->data[0];
+ const int p1_linesize = crnt->linesize[0];
+ const int p2_linesize = next->linesize[0];
+
+ ff_dlog(ctx, "get_scene_score() process\n");
+
+ for (sad = y = 0; y < crnt->height; y += 8) {
+ for (x = 0; x < p1_linesize; x += 8) {
+ sad += s->sad(p1 + y * p1_linesize + x,
+ p1_linesize,
+ p2 + y * p2_linesize + x,
+ p2_linesize);
+ }
+ }
+ emms_c();
+ mafd = sad / (crnt->height * crnt->width * 3);
+ diff = fabs(mafd - s->prev_mafd);
+ ret = av_clipf(FFMIN(mafd, diff), 0, 100.0);
+ s->prev_mafd = mafd;
+ }
+ ff_dlog(ctx, "get_scene_score() result is:%f\n", ret);
+ return ret;
+}
+
+static int blend_frames16(AVFilterContext *ctx, float interpolate,
+ AVFrame *copy_src1, AVFrame *copy_src2)
+{
+ FrameRateContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ double interpolate_scene_score = 0;
+
+ if ((s->flags & FRAMERATE_FLAG_SCD) && copy_src2) {
+ interpolate_scene_score = get_scene_score16(ctx, copy_src1, copy_src2);
+ ff_dlog(ctx, "blend_frames16() interpolate scene score:%f\n", interpolate_scene_score);
+ }
+ // decide if the shot-change detection allows us to blend two frames
+ if (interpolate_scene_score < s->scene_score && copy_src2) {
+ uint16_t src2_factor = fabsf(interpolate) * (1 << (s->bitdepth - 8));
+ uint16_t src1_factor = s->max - src2_factor;
+ const int half = s->max / 2;
+ const int uv = (s->max + 1) * half;
+ const int shift = s->bitdepth;
+ int plane, line, pixel;
+
+ // get work-space for output frame
+ s->work = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!s->work)
+ return AVERROR(ENOMEM);
+
+ av_frame_copy_props(s->work, s->srce[s->crnt]);
+
+ ff_dlog(ctx, "blend_frames16() INTERPOLATE to create work frame\n");
+ for (plane = 0; plane < 4 && copy_src1->data[plane] && copy_src2->data[plane]; plane++) {
+ int cpy_line_width = s->line_size[plane];
+ const uint16_t *cpy_src1_data = (const uint16_t *)copy_src1->data[plane];
+ int cpy_src1_line_size = copy_src1->linesize[plane] / 2;
+ const uint16_t *cpy_src2_data = (const uint16_t *)copy_src2->data[plane];
+ int cpy_src2_line_size = copy_src2->linesize[plane] / 2;
+ int cpy_src_h = (plane > 0 && plane < 3) ? (copy_src1->height >> s->vsub) : (copy_src1->height);
+ uint16_t *cpy_dst_data = (uint16_t *)s->work->data[plane];
+ int cpy_dst_line_size = s->work->linesize[plane] / 2;
+
+ if (plane <1 || plane >2) {
+ // luma or alpha
+ for (line = 0; line < cpy_src_h; line++) {
+ for (pixel = 0; pixel < cpy_line_width; pixel++)
+ cpy_dst_data[pixel] = ((cpy_src1_data[pixel] * src1_factor) + (cpy_src2_data[pixel] * src2_factor) + half) >> shift;
+ cpy_src1_data += cpy_src1_line_size;
+ cpy_src2_data += cpy_src2_line_size;
+ cpy_dst_data += cpy_dst_line_size;
+ }
+ } else {
+ // chroma
+ for (line = 0; line < cpy_src_h; line++) {
+ for (pixel = 0; pixel < cpy_line_width; pixel++) {
+ cpy_dst_data[pixel] = (((cpy_src1_data[pixel] - half) * src1_factor) + ((cpy_src2_data[pixel] - half) * src2_factor) + uv) >> shift;
+ }
+ cpy_src1_data += cpy_src1_line_size;
+ cpy_src2_data += cpy_src2_line_size;
+ cpy_dst_data += cpy_dst_line_size;
+ }
+ }
+ }
+ return 1;
+ }
+ return 0;
+}
+
+static int blend_frames8(AVFilterContext *ctx, float interpolate,
+ AVFrame *copy_src1, AVFrame *copy_src2)
+{
+ FrameRateContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ double interpolate_scene_score = 0;
+
+ if ((s->flags & FRAMERATE_FLAG_SCD) && copy_src2) {
+ interpolate_scene_score = get_scene_score(ctx, copy_src1, copy_src2);
+ ff_dlog(ctx, "blend_frames8() interpolate scene score:%f\n", interpolate_scene_score);
+ }
+ // decide if the shot-change detection allows us to blend two frames
+ if (interpolate_scene_score < s->scene_score && copy_src2) {
+ uint16_t src2_factor = fabsf(interpolate);
+ uint16_t src1_factor = 256 - src2_factor;
+ int plane, line, pixel;
+
+ // get work-space for output frame
+ s->work = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!s->work)
+ return AVERROR(ENOMEM);
+
+ av_frame_copy_props(s->work, s->srce[s->crnt]);
+
+ ff_dlog(ctx, "blend_frames8() INTERPOLATE to create work frame\n");
+ for (plane = 0; plane < 4 && copy_src1->data[plane] && copy_src2->data[plane]; plane++) {
+ int cpy_line_width = s->line_size[plane];
+ uint8_t *cpy_src1_data = copy_src1->data[plane];
+ int cpy_src1_line_size = copy_src1->linesize[plane];
+ uint8_t *cpy_src2_data = copy_src2->data[plane];
+ int cpy_src2_line_size = copy_src2->linesize[plane];
+ int cpy_src_h = (plane > 0 && plane < 3) ? (copy_src1->height >> s->vsub) : (copy_src1->height);
+ uint8_t *cpy_dst_data = s->work->data[plane];
+ int cpy_dst_line_size = s->work->linesize[plane];
+ if (plane <1 || plane >2) {
+ // luma or alpha
+ for (line = 0; line < cpy_src_h; line++) {
+ for (pixel = 0; pixel < cpy_line_width; pixel++) {
+ // integer version of (src1 * src1_factor) + (src2 + src2_factor) + 0.5
+ // 0.5 is for rounding
+ // 128 is the integer representation of 0.5 << 8
+ cpy_dst_data[pixel] = ((cpy_src1_data[pixel] * src1_factor) + (cpy_src2_data[pixel] * src2_factor) + 128) >> 8;
+ }
+ cpy_src1_data += cpy_src1_line_size;
+ cpy_src2_data += cpy_src2_line_size;
+ cpy_dst_data += cpy_dst_line_size;
+ }
+ } else {
+ // chroma
+ for (line = 0; line < cpy_src_h; line++) {
+ for (pixel = 0; pixel < cpy_line_width; pixel++) {
+ // as above
+ // because U and V are based around 128 we have to subtract 128 from the components.
+ // 32896 is the integer representation of 128.5 << 8
+ cpy_dst_data[pixel] = (((cpy_src1_data[pixel] - 128) * src1_factor) + ((cpy_src2_data[pixel] - 128) * src2_factor) + 32896) >> 8;
+ }
+ cpy_src1_data += cpy_src1_line_size;
+ cpy_src2_data += cpy_src2_line_size;
+ cpy_dst_data += cpy_dst_line_size;
+ }
+ }
+ }
+ return 1;
+ }
+ return 0;
+}
+
+static int process_work_frame(AVFilterContext *ctx, int stop)
+{
+ FrameRateContext *s = ctx->priv;
+ int64_t work_next_pts;
+ AVFrame *copy_src1;
+ float interpolate;
+
+ ff_dlog(ctx, "process_work_frame()\n");
+
+ ff_dlog(ctx, "process_work_frame() pending_input_frames %d\n", s->pending_srce_frames);
+
+ if (s->srce[s->prev]) ff_dlog(ctx, "process_work_frame() srce prev pts:%"PRId64"\n", s->srce[s->prev]->pts);
+ if (s->srce[s->crnt]) ff_dlog(ctx, "process_work_frame() srce crnt pts:%"PRId64"\n", s->srce[s->crnt]->pts);
+ if (s->srce[s->next]) ff_dlog(ctx, "process_work_frame() srce next pts:%"PRId64"\n", s->srce[s->next]->pts);
+
+ if (!s->srce[s->crnt]) {
+ // the filter cannot do anything
+ ff_dlog(ctx, "process_work_frame() no current frame cached: move on to next frame, do not output a frame\n");
+ next_source(ctx);
+ return 0;
+ }
+
+ work_next_pts = s->pts + s->average_dest_pts_delta;
+
+ ff_dlog(ctx, "process_work_frame() work crnt pts:%"PRId64"\n", s->pts);
+ ff_dlog(ctx, "process_work_frame() work next pts:%"PRId64"\n", work_next_pts);
+ if (s->srce[s->prev])
+ ff_dlog(ctx, "process_work_frame() srce prev pts:%"PRId64" at dest time base:%u/%u\n",
+ s->srce_pts_dest[s->prev], s->dest_time_base.num, s->dest_time_base.den);
+ if (s->srce[s->crnt])
+ ff_dlog(ctx, "process_work_frame() srce crnt pts:%"PRId64" at dest time base:%u/%u\n",
+ s->srce_pts_dest[s->crnt], s->dest_time_base.num, s->dest_time_base.den);
+ if (s->srce[s->next])
+ ff_dlog(ctx, "process_work_frame() srce next pts:%"PRId64" at dest time base:%u/%u\n",
+ s->srce_pts_dest[s->next], s->dest_time_base.num, s->dest_time_base.den);
+
+ av_assert0(s->srce[s->next]);
+
+ // should filter be skipping input frame (output frame rate is lower than input frame rate)
+ if (!s->flush && s->pts >= s->srce_pts_dest[s->next]) {
+ ff_dlog(ctx, "process_work_frame() work crnt pts >= srce next pts: SKIP FRAME, move on to next frame, do not output a frame\n");
+ next_source(ctx);
+ s->pending_srce_frames--;
+ return 0;
+ }
+
+ // calculate interpolation
+ interpolate = ((s->pts - s->srce_pts_dest[s->crnt]) * 256.0 / s->average_srce_pts_dest_delta);
+ ff_dlog(ctx, "process_work_frame() interpolate:%f/256\n", interpolate);
+ copy_src1 = s->srce[s->crnt];
+ if (interpolate > s->interp_end) {
+ ff_dlog(ctx, "process_work_frame() source is:NEXT\n");
+ copy_src1 = s->srce[s->next];
+ }
+ if (s->srce[s->prev] && interpolate < -s->interp_end) {
+ ff_dlog(ctx, "process_work_frame() source is:PREV\n");
+ copy_src1 = s->srce[s->prev];
+ }
+
+ // decide whether to blend two frames
+ if ((interpolate >= s->interp_start && interpolate <= s->interp_end) || (interpolate <= -s->interp_start && interpolate >= -s->interp_end)) {
+ AVFrame *copy_src2;
+
+ if (interpolate > 0) {
+ ff_dlog(ctx, "process_work_frame() interpolate source is:NEXT\n");
+ copy_src2 = s->srce[s->next];
+ } else {
+ ff_dlog(ctx, "process_work_frame() interpolate source is:PREV\n");
+ copy_src2 = s->srce[s->prev];
+ }
+ if (s->blend_frames(ctx, interpolate, copy_src1, copy_src2))
+ goto copy_done;
+ else
+ ff_dlog(ctx, "process_work_frame() CUT - DON'T INTERPOLATE\n");
+ }
+
+ ff_dlog(ctx, "process_work_frame() COPY to the work frame\n");
+ // copy the frame we decided is our base source
+ s->work = av_frame_clone(copy_src1);
+ if (!s->work)
+ return AVERROR(ENOMEM);
+
+copy_done:
+ s->work->pts = s->pts;
+
+ // should filter be re-using input frame (output frame rate is higher than input frame rate)
+ if (!s->flush && (work_next_pts + s->average_dest_pts_delta) < (s->srce_pts_dest[s->crnt] + s->average_srce_pts_dest_delta)) {
+ ff_dlog(ctx, "process_work_frame() REPEAT FRAME\n");
+ } else {
+ ff_dlog(ctx, "process_work_frame() CONSUME FRAME, move to next frame\n");
+ s->pending_srce_frames--;
+ next_source(ctx);
+ }
+ ff_dlog(ctx, "process_work_frame() output a frame\n");
+ s->dest_frame_num++;
+ if (stop)
+ s->pending_end_frame = 0;
+ s->last_dest_frame_pts = s->work->pts;
+
+ return ff_filter_frame(ctx->outputs[0], s->work);
+}
+
+static void set_srce_frame_dest_pts(AVFilterContext *ctx)
+{
+ FrameRateContext *s = ctx->priv;
+
+ ff_dlog(ctx, "set_srce_frame_output_pts()\n");
+
+ // scale the input pts from the timebase difference between input and output
+ if (s->srce[s->prev])
+ s->srce_pts_dest[s->prev] = av_rescale_q(s->srce[s->prev]->pts, s->srce_time_base, s->dest_time_base);
+ if (s->srce[s->crnt])
+ s->srce_pts_dest[s->crnt] = av_rescale_q(s->srce[s->crnt]->pts, s->srce_time_base, s->dest_time_base);
+ if (s->srce[s->next])
+ s->srce_pts_dest[s->next] = av_rescale_q(s->srce[s->next]->pts, s->srce_time_base, s->dest_time_base);
+}
+
+static void set_work_frame_pts(AVFilterContext *ctx)
+{
+ FrameRateContext *s = ctx->priv;
+ int64_t pts, average_srce_pts_delta = 0;
+
+ ff_dlog(ctx, "set_work_frame_pts()\n");
+
+ av_assert0(s->srce[s->next]);
+ av_assert0(s->srce[s->crnt]);
+
+ ff_dlog(ctx, "set_work_frame_pts() srce crnt pts:%"PRId64"\n", s->srce[s->crnt]->pts);
+ ff_dlog(ctx, "set_work_frame_pts() srce next pts:%"PRId64"\n", s->srce[s->next]->pts);
+ if (s->srce[s->prev])
+ ff_dlog(ctx, "set_work_frame_pts() srce prev pts:%"PRId64"\n", s->srce[s->prev]->pts);
+
+ average_srce_pts_delta = s->average_srce_pts_dest_delta;
+ ff_dlog(ctx, "set_work_frame_pts() initial average srce pts:%"PRId64"\n", average_srce_pts_delta);
+
+ set_srce_frame_dest_pts(ctx);
+
+ // calculate the PTS delta
+ if ((pts = (s->srce_pts_dest[s->next] - s->srce_pts_dest[s->crnt]))) {
+ average_srce_pts_delta = average_srce_pts_delta?((average_srce_pts_delta+pts)>>1):pts;
+ } else if (s->srce[s->prev] && (pts = (s->srce_pts_dest[s->crnt] - s->srce_pts_dest[s->prev]))) {
+ average_srce_pts_delta = average_srce_pts_delta?((average_srce_pts_delta+pts)>>1):pts;
+ }
+
+ s->average_srce_pts_dest_delta = average_srce_pts_delta;
+ ff_dlog(ctx, "set_work_frame_pts() average srce pts:%"PRId64"\n", average_srce_pts_delta);
+ ff_dlog(ctx, "set_work_frame_pts() average srce pts:%"PRId64" at dest time base:%u/%u\n",
+ s->average_srce_pts_dest_delta, s->dest_time_base.num, s->dest_time_base.den);
+
+ if (ctx->inputs[0] && !s->average_dest_pts_delta) {
+ int64_t d = av_q2d(av_inv_q(av_mul_q(s->dest_time_base, s->dest_frame_rate)));
+ s->average_dest_pts_delta = d;
+ ff_dlog(ctx, "set_work_frame_pts() average dest pts delta:%"PRId64"\n", s->average_dest_pts_delta);
+ }
+
+ if (!s->dest_frame_num) {
+ s->pts = s->last_dest_frame_pts = s->srce_pts_dest[s->crnt];
+ } else {
+ s->pts = s->last_dest_frame_pts + s->average_dest_pts_delta;
+ }
+
+ ff_dlog(ctx, "set_work_frame_pts() calculated pts:%"PRId64" at dest time base:%u/%u\n",
+ s->pts, s->dest_time_base.num, s->dest_time_base.den);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ FrameRateContext *s = ctx->priv;
+
+ s->dest_frame_num = 0;
+
+ s->crnt = (N_SRCE)>>1;
+ s->last = N_SRCE - 1;
+
+ s->next = s->crnt - 1;
+ s->prev = s->crnt + 1;
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ FrameRateContext *s = ctx->priv;
+ int i;
+
+ for (i = s->frst; i < s->last; i++) {
+ if (s->srce[i] && (s->srce[i] != s->srce[i + 1]))
+ av_frame_free(&s->srce[i]);
+ }
+ av_frame_free(&s->srce[s->last]);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV420P12,
+ AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12,
+ AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ FrameRateContext *s = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+ int plane;
+
+ for (plane = 0; plane < 4; plane++) {
+ s->line_size[plane] = av_image_get_linesize(inlink->format, inlink->w,
+ plane);
+ }
+
+ s->bitdepth = pix_desc->comp[0].depth;
+ s->vsub = pix_desc->log2_chroma_h;
+
+ s->sad = av_pixelutils_get_sad_fn(3, 3, 2, s); // 8x8 both sources aligned
+ if (!s->sad)
+ return AVERROR(EINVAL);
+
+ s->srce_time_base = inlink->time_base;
+
+ if (s->bitdepth == 8)
+ s->blend_frames = blend_frames8;
+ else
+ s->blend_frames = blend_frames16;
+ s->max = 1 << (s->bitdepth);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ FrameRateContext *s = ctx->priv;
+
+ // we have one new frame
+ s->pending_srce_frames++;
+
+ if (inpicref->interlaced_frame)
+ av_log(ctx, AV_LOG_WARNING, "Interlaced frame found - the output will not be correct.\n");
+
+ // store the pointer to the new frame
+ av_frame_free(&s->srce[s->frst]);
+ s->srce[s->frst] = inpicref;
+
+ if (!s->pending_end_frame && s->srce[s->crnt]) {
+ set_work_frame_pts(ctx);
+ s->pending_end_frame = 1;
+ } else {
+ set_srce_frame_dest_pts(ctx);
+ }
+
+ return process_work_frame(ctx, 1);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ FrameRateContext *s = ctx->priv;
+ int exact;
+
+ ff_dlog(ctx, "config_output()\n");
+
+ ff_dlog(ctx,
+ "config_output() input time base:%u/%u (%f)\n",
+ ctx->inputs[0]->time_base.num,ctx->inputs[0]->time_base.den,
+ av_q2d(ctx->inputs[0]->time_base));
+
+ // make sure timebase is small enough to hold the framerate
+
+ exact = av_reduce(&s->dest_time_base.num, &s->dest_time_base.den,
+ av_gcd((int64_t)s->srce_time_base.num * s->dest_frame_rate.num,
+ (int64_t)s->srce_time_base.den * s->dest_frame_rate.den ),
+ (int64_t)s->srce_time_base.den * s->dest_frame_rate.num, INT_MAX);
+
+ av_log(ctx, AV_LOG_INFO,
+ "time base:%u/%u -> %u/%u exact:%d\n",
+ s->srce_time_base.num, s->srce_time_base.den,
+ s->dest_time_base.num, s->dest_time_base.den, exact);
+ if (!exact) {
+ av_log(ctx, AV_LOG_WARNING, "Timebase conversion is not exact\n");
+ }
+
+ outlink->frame_rate = s->dest_frame_rate;
+ outlink->time_base = s->dest_time_base;
+
+ ff_dlog(ctx,
+ "config_output() output time base:%u/%u (%f) w:%d h:%d\n",
+ outlink->time_base.num, outlink->time_base.den,
+ av_q2d(outlink->time_base),
+ outlink->w, outlink->h);
+
+
+ av_log(ctx, AV_LOG_INFO, "fps -> fps:%u/%u scene score:%f interpolate start:%d end:%d\n",
+ s->dest_frame_rate.num, s->dest_frame_rate.den,
+ s->scene_score, s->interp_start, s->interp_end);
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ FrameRateContext *s = ctx->priv;
+ int val, i;
+
+ ff_dlog(ctx, "request_frame()\n");
+
+ // if there is no "next" frame AND we are not in flush then get one from our input filter
+ if (!s->srce[s->frst] && !s->flush) {
+ ff_dlog(ctx, "request_frame() call source's request_frame()\n");
+ val = ff_request_frame(outlink->src->inputs[0]);
+ if (val < 0 && (val != AVERROR_EOF)) {
+ ff_dlog(ctx, "request_frame() source's request_frame() returned error:%d\n", val);
+ return val;
+ } else if (val == AVERROR_EOF) {
+ s->flush = 1;
+ }
+ ff_dlog(ctx, "request_frame() source's request_frame() returned:%d\n", val);
+ return 0;
+ }
+
+ ff_dlog(ctx, "request_frame() REPEAT or FLUSH\n");
+
+ if (s->pending_srce_frames <= 0) {
+ ff_dlog(ctx, "request_frame() nothing else to do, return:EOF\n");
+ return AVERROR_EOF;
+ }
+
+ // otherwise, make brand-new frame and pass to our output filter
+ ff_dlog(ctx, "request_frame() FLUSH\n");
+
+ // back fill at end of file when source has no more frames
+ for (i = s->last; i > s->frst; i--) {
+ if (!s->srce[i - 1] && s->srce[i]) {
+ ff_dlog(ctx, "request_frame() copy:%d to:%d\n", i, i - 1);
+ s->srce[i - 1] = s->srce[i];
+ }
+ }
+
+ set_work_frame_pts(ctx);
+ return process_work_frame(ctx, 0);
+}
+
+static const AVFilterPad framerate_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad framerate_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_framerate = {
+ .name = "framerate",
+ .description = NULL_IF_CONFIG_SMALL("Upsamples or downsamples progressive source between specified frame rates."),
+ .priv_size = sizeof(FrameRateContext),
+ .priv_class = &framerate_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = framerate_inputs,
+ .outputs = framerate_outputs,
+};
diff --git a/libavfilter/vf_framestep.c b/libavfilter/vf_framestep.c
new file mode 100644
index 0000000000..8102e7c719
--- /dev/null
+++ b/libavfilter/vf_framestep.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file framestep filter, inspired on libmpcodecs/vf_framestep.c by
+ * Daniele Fornighieri <guru AT digitalfantasy it>.
+ */
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct NullContext {
+ const AVClass *class;
+ int frame_step;
+} FrameStepContext;
+
+#define OFFSET(x) offsetof(FrameStepContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption framestep_options[] = {
+ { "step", "set frame step", OFFSET(frame_step), AV_OPT_TYPE_INT, {.i64=1}, 1, INT_MAX, FLAGS},
+ { NULL },
+};
+
+AVFILTER_DEFINE_CLASS(framestep);
+
+static int config_output_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ FrameStepContext *framestep = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ outlink->frame_rate =
+ av_div_q(inlink->frame_rate, (AVRational){framestep->frame_step, 1});
+
+ av_log(ctx, AV_LOG_VERBOSE, "step:%d frame_rate:%d/%d(%f) -> frame_rate:%d/%d(%f)\n",
+ framestep->frame_step,
+ inlink->frame_rate.num, inlink->frame_rate.den, av_q2d(inlink->frame_rate),
+ outlink->frame_rate.num, outlink->frame_rate.den, av_q2d(outlink->frame_rate));
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
+{
+ FrameStepContext *framestep = inlink->dst->priv;
+
+ if (!(inlink->frame_count_out % framestep->frame_step)) {
+ return ff_filter_frame(inlink->dst->outputs[0], ref);
+ } else {
+ av_frame_free(&ref);
+ return 0;
+ }
+}
+
+static const AVFilterPad framestep_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad framestep_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_framestep = {
+ .name = "framestep",
+ .description = NULL_IF_CONFIG_SMALL("Select one frame every N frames."),
+ .priv_size = sizeof(FrameStepContext),
+ .priv_class = &framestep_class,
+ .inputs = framestep_inputs,
+ .outputs = framestep_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_frei0r.c b/libavfilter/vf_frei0r.c
index f41fbcbb30..8aeac08519 100644
--- a/libavfilter/vf_frei0r.c
+++ b/libavfilter/vf_frei0r.c
@@ -1,19 +1,19 @@
/*
* Copyright (c) 2010 Stefano Sabatini
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -29,6 +29,8 @@
#include <stdlib.h>
#include "config.h"
#include "libavutil/avstring.h"
+#include "libavutil/common.h"
+#include "libavutil/eval.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
@@ -67,8 +69,7 @@ typedef struct Frei0rContext {
char *dl_name;
char *params;
- char *size;
- char *framerate;
+ AVRational framerate;
/* only used by the source */
int w, h;
@@ -104,7 +105,7 @@ static int set_param(AVFilterContext *ctx, f0r_param_info_t info, int index, cha
break;
case F0R_PARAM_DOUBLE:
- val.d = strtod(param, &tail);
+ val.d = av_strtod(param, &tail);
if (*tail || val.d == HUGE_VAL)
goto fail;
break;
@@ -139,6 +140,9 @@ static int set_params(AVFilterContext *ctx, const char *params)
Frei0rContext *s = ctx->priv;
int i;
+ if (!params)
+ return 0;
+
for (i = 0; i < s->plugin_info.num_params; i++) {
f0r_param_info_t info;
char *param;
@@ -161,13 +165,15 @@ static int set_params(AVFilterContext *ctx, const char *params)
return 0;
}
-static void *load_path(AVFilterContext *ctx, const char *prefix, const char *name)
+static int load_path(AVFilterContext *ctx, void **handle_ptr, const char *prefix, const char *name)
{
- char path[1024];
-
- snprintf(path, sizeof(path), "%s%s%s", prefix, name, SLIBSUF);
+ char *path = av_asprintf("%s%s%s", prefix, name, SLIBSUF);
+ if (!path)
+ return AVERROR(ENOMEM);
av_log(ctx, AV_LOG_DEBUG, "Looking for frei0r effect in '%s'.\n", path);
- return dlopen(path, RTLD_NOW|RTLD_LOCAL);
+ *handle_ptr = dlopen(path, RTLD_NOW|RTLD_LOCAL);
+ av_free(path);
+ return 0;
}
static av_cold int frei0r_init(AVFilterContext *ctx,
@@ -178,35 +184,62 @@ static av_cold int frei0r_init(AVFilterContext *ctx,
f0r_get_plugin_info_f f0r_get_plugin_info;
f0r_plugin_info_t *pi;
char *path;
+ int ret = 0;
+ int i;
+ static const char* const frei0r_pathlist[] = {
+ "/usr/local/lib/frei0r-1/",
+ "/usr/lib/frei0r-1/",
+ "/usr/local/lib64/frei0r-1/",
+ "/usr/lib64/frei0r-1/"
+ };
if (!dl_name) {
av_log(ctx, AV_LOG_ERROR, "No filter name provided.\n");
return AVERROR(EINVAL);
}
- /* see: http://piksel.org/frei0r/1.2/spec/1.2/spec/group__pluglocations.html */
- if (path = getenv("FREI0R_PATH")) {
- while(*path) {
- char *ptr = av_get_token((const char **)&path, ":");
- if (!ptr)
- return AVERROR(ENOMEM);
- s->dl_handle = load_path(ctx, ptr, dl_name);
- av_freep(&ptr);
+ /* see: http://frei0r.dyne.org/codedoc/html/group__pluglocations.html */
+ if ((path = av_strdup(getenv("FREI0R_PATH")))) {
+#ifdef _WIN32
+ const char *separator = ";";
+#else
+ const char *separator = ":";
+#endif
+ char *p, *ptr = NULL;
+ for (p = path; p = av_strtok(p, separator, &ptr); p = NULL) {
+ /* add additional trailing slash in case it is missing */
+ char *p1 = av_asprintf("%s/", p);
+ if (!p1) {
+ ret = AVERROR(ENOMEM);
+ goto check_path_end;
+ }
+ ret = load_path(ctx, &s->dl_handle, p1, dl_name);
+ av_free(p1);
+ if (ret < 0)
+ goto check_path_end;
if (s->dl_handle)
- break; /* found */
- if (*path)
- path++; /* skip ':' */
+ break;
}
+
+ check_path_end:
+ av_free(path);
+ if (ret < 0)
+ return ret;
}
if (!s->dl_handle && (path = getenv("HOME"))) {
- char prefix[1024];
- snprintf(prefix, sizeof(prefix), "%s/.frei0r-1/lib/", path);
- s->dl_handle = load_path(ctx, prefix, dl_name);
+ char *prefix = av_asprintf("%s/.frei0r-1/lib/", path);
+ if (!prefix)
+ return AVERROR(ENOMEM);
+ ret = load_path(ctx, &s->dl_handle, prefix, dl_name);
+ av_free(prefix);
+ if (ret < 0)
+ return ret;
+ }
+ for (i = 0; !s->dl_handle && i < FF_ARRAY_ELEMS(frei0r_pathlist); i++) {
+ ret = load_path(ctx, &s->dl_handle, frei0r_pathlist[i], dl_name);
+ if (ret < 0)
+ return ret;
}
- if (!s->dl_handle)
- s->dl_handle = load_path(ctx, "/usr/local/lib/frei0r-1/", dl_name);
- if (!s->dl_handle)
- s->dl_handle = load_path(ctx, "/usr/lib/frei0r-1/", dl_name);
if (!s->dl_handle) {
av_log(ctx, AV_LOG_ERROR, "Could not find module '%s'.\n", dl_name);
return AVERROR(EINVAL);
@@ -290,11 +323,14 @@ static int query_formats(AVFilterContext *ctx)
{
Frei0rContext *s = ctx->priv;
AVFilterFormats *formats = NULL;
+ int ret;
if (s->plugin_info.color_model == F0R_COLOR_MODEL_BGRA8888) {
- ff_add_format(&formats, AV_PIX_FMT_BGRA);
+ if ((ret = ff_add_format(&formats, AV_PIX_FMT_BGRA)) < 0)
+ return ret;
} else if (s->plugin_info.color_model == F0R_COLOR_MODEL_RGBA8888) {
- ff_add_format(&formats, AV_PIX_FMT_RGBA);
+ if ((ret = ff_add_format(&formats, AV_PIX_FMT_RGBA)) < 0)
+ return ret;
} else { /* F0R_COLOR_MODEL_PACKED32 */
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_BGRA, AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, AV_PIX_FMT_ARGB, AV_PIX_FMT_NONE
@@ -305,8 +341,7 @@ static int query_formats(AVFilterContext *ctx)
if (!formats)
return AVERROR(ENOMEM);
- ff_set_common_formats(ctx, formats);
- return 0;
+ return ff_set_common_formats(ctx, formats);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
@@ -332,19 +367,14 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
#define OFFSET(x) offsetof(Frei0rContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption filter_options[] = {
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption frei0r_options[] = {
{ "filter_name", NULL, OFFSET(dl_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "filter_params", NULL, OFFSET(params), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { NULL },
+ { NULL }
};
-static const AVClass filter_class = {
- .class_name = "frei0r",
- .item_name = av_default_item_name,
- .option = filter_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(frei0r);
static const AVFilterPad avfilter_vf_frei0r_inputs[] = {
{
@@ -365,38 +395,23 @@ static const AVFilterPad avfilter_vf_frei0r_outputs[] = {
};
AVFilter ff_vf_frei0r = {
- .name = "frei0r",
- .description = NULL_IF_CONFIG_SMALL("Apply a frei0r effect."),
-
+ .name = "frei0r",
+ .description = NULL_IF_CONFIG_SMALL("Apply a frei0r effect."),
.query_formats = query_formats,
- .init = filter_init,
- .uninit = uninit,
-
- .priv_size = sizeof(Frei0rContext),
- .priv_class = &filter_class,
-
- .inputs = avfilter_vf_frei0r_inputs,
-
- .outputs = avfilter_vf_frei0r_outputs,
+ .init = filter_init,
+ .uninit = uninit,
+ .priv_size = sizeof(Frei0rContext),
+ .priv_class = &frei0r_class,
+ .inputs = avfilter_vf_frei0r_inputs,
+ .outputs = avfilter_vf_frei0r_outputs,
};
static av_cold int source_init(AVFilterContext *ctx)
{
Frei0rContext *s = ctx->priv;
- AVRational frame_rate_q;
- if (av_parse_video_size(&s->w, &s->h, s->size) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame size: '%s'.\n", s->size);
- return AVERROR(EINVAL);
- }
-
- if (av_parse_video_rate(&frame_rate_q, s->framerate) < 0 ||
- frame_rate_q.den <= 0 || frame_rate_q.num <= 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: '%s'.\n", s->framerate);
- return AVERROR(EINVAL);
- }
- s->time_base.num = frame_rate_q.den;
- s->time_base.den = frame_rate_q.num;
+ s->time_base.num = s->framerate.den;
+ s->time_base.den = s->framerate.num;
return frei0r_init(ctx, s->dl_name, F0R_PLUGIN_TYPE_SOURCE);
}
@@ -412,6 +427,7 @@ static int source_config_props(AVFilterLink *outlink)
outlink->h = s->h;
outlink->time_base = s->time_base;
outlink->frame_rate = av_inv_q(s->time_base);
+ outlink->sample_aspect_ratio = (AVRational){1,1};
if (s->destruct && s->instance)
s->destruct(s->instance);
@@ -444,20 +460,15 @@ static int source_request_frame(AVFilterLink *outlink)
return ff_filter_frame(outlink, frame);
}
-static const AVOption src_options[] = {
- { "size", "Dimensions of the generated video.", OFFSET(size), AV_OPT_TYPE_STRING, { .str = "" }, .flags = FLAGS },
- { "framerate", NULL, OFFSET(framerate), AV_OPT_TYPE_STRING, { .str = "25" }, .flags = FLAGS },
+static const AVOption frei0r_src_options[] = {
+ { "size", "Dimensions of the generated video.", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, { .str = "320x240" }, .flags = FLAGS },
+ { "framerate", NULL, OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 0, INT_MAX, .flags = FLAGS },
{ "filter_name", NULL, OFFSET(dl_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "filter_params", NULL, OFFSET(params), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ NULL },
};
-static const AVClass src_class = {
- .class_name = "frei0r_src",
- .item_name = av_default_item_name,
- .option = src_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(frei0r_src);
static const AVFilterPad avfilter_vsrc_frei0r_src_outputs[] = {
{
@@ -470,17 +481,13 @@ static const AVFilterPad avfilter_vsrc_frei0r_src_outputs[] = {
};
AVFilter ff_vsrc_frei0r_src = {
- .name = "frei0r_src",
- .description = NULL_IF_CONFIG_SMALL("Generate a frei0r source."),
-
- .priv_size = sizeof(Frei0rContext),
- .priv_class = &src_class,
- .init = source_init,
- .uninit = uninit,
-
+ .name = "frei0r_src",
+ .description = NULL_IF_CONFIG_SMALL("Generate a frei0r source."),
+ .priv_size = sizeof(Frei0rContext),
+ .priv_class = &frei0r_src_class,
+ .init = source_init,
+ .uninit = uninit,
.query_formats = query_formats,
-
- .inputs = NULL,
-
- .outputs = avfilter_vsrc_frei0r_src_outputs,
+ .inputs = NULL,
+ .outputs = avfilter_vsrc_frei0r_src_outputs,
};
diff --git a/libavfilter/vf_fspp.c b/libavfilter/vf_fspp.c
new file mode 100644
index 0000000000..c6989046c4
--- /dev/null
+++ b/libavfilter/vf_fspp.c
@@ -0,0 +1,693 @@
+/*
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (C) 2005 Nikolaj Poroshin <porosh3@psu.ru>
+ * Copyright (c) 2014 Arwa Arif <arwaarif1994@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Fast Simple Post-processing filter
+ * This implementation is based on an algorithm described in
+ * "Aria Nosratinia Embedded Post-Processing for
+ * Enhancement of Compressed Images (1999)"
+ * (http://www.utdallas.edu/~aria/papers/vlsisp99.pdf)
+ * Further, with splitting (I)DCT into horizontal/vertical passes, one of
+ * them can be performed once per block, not per pixel. This allows for much
+ * higher speed.
+ *
+ * Originally written by Michael Niedermayer and Nikolaj for the MPlayer
+ * project, and ported by Arwa Arif for FFmpeg.
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "internal.h"
+#include "vf_fspp.h"
+
+#define OFFSET(x) offsetof(FSPPContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption fspp_options[] = {
+ { "quality", "set quality", OFFSET(log2_count), AV_OPT_TYPE_INT, {.i64 = 4}, 4, MAX_LEVEL, FLAGS },
+ { "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 64, FLAGS },
+ { "strength", "set filter strength", OFFSET(strength), AV_OPT_TYPE_INT, {.i64 = 0}, -15, 32, FLAGS },
+ { "use_bframe_qp", "use B-frames' QP", OFFSET(use_bframe_qp), AV_OPT_TYPE_BOOL,{.i64 = 0}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(fspp);
+
+DECLARE_ALIGNED(32, static const uint8_t, dither)[8][8] = {
+ { 0, 48, 12, 60, 3, 51, 15, 63, },
+ { 32, 16, 44, 28, 35, 19, 47, 31, },
+ { 8, 56, 4, 52, 11, 59, 7, 55, },
+ { 40, 24, 36, 20, 43, 27, 39, 23, },
+ { 2, 50, 14, 62, 1, 49, 13, 61, },
+ { 34, 18, 46, 30, 33, 17, 45, 29, },
+ { 10, 58, 6, 54, 9, 57, 5, 53, },
+ { 42, 26, 38, 22, 41, 25, 37, 21, },
+};
+
+static const short custom_threshold[64] = {
+// values (296) can't be too high
+// -it causes too big quant dependence
+// or maybe overflow(check), which results in some flashing
+ 71, 296, 295, 237, 71, 40, 38, 19,
+ 245, 193, 185, 121, 102, 73, 53, 27,
+ 158, 129, 141, 107, 97, 73, 50, 26,
+ 102, 116, 109, 98, 82, 66, 45, 23,
+ 71, 94, 95, 81, 70, 56, 38, 20,
+ 56, 77, 74, 66, 56, 44, 30, 15,
+ 38, 53, 50, 45, 38, 30, 21, 11,
+ 20, 27, 26, 23, 20, 15, 11, 5
+};
+
+//This func reads from 1 slice, 1 and clears 0 & 1
+static void store_slice_c(uint8_t *dst, int16_t *src,
+ ptrdiff_t dst_stride, ptrdiff_t src_stride,
+ ptrdiff_t width, ptrdiff_t height, ptrdiff_t log2_scale)
+{
+ int y, x;
+#define STORE(pos) \
+ temp = (src[x + pos] + (d[pos] >> log2_scale)) >> (6 - log2_scale); \
+ src[x + pos] = src[x + pos - 8 * src_stride] = 0; \
+ if (temp & 0x100) temp = ~(temp >> 31); \
+ dst[x + pos] = temp;
+
+ for (y = 0; y < height; y++) {
+ const uint8_t *d = dither[y];
+ for (x = 0; x < width; x += 8) {
+ int temp;
+ STORE(0);
+ STORE(1);
+ STORE(2);
+ STORE(3);
+ STORE(4);
+ STORE(5);
+ STORE(6);
+ STORE(7);
+ }
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+//This func reads from 2 slices, 0 & 2 and clears 2-nd
+static void store_slice2_c(uint8_t *dst, int16_t *src,
+ ptrdiff_t dst_stride, ptrdiff_t src_stride,
+ ptrdiff_t width, ptrdiff_t height, ptrdiff_t log2_scale)
+{
+ int y, x;
+#define STORE2(pos) \
+ temp = (src[x + pos] + src[x + pos + 16 * src_stride] + (d[pos] >> log2_scale)) >> (6 - log2_scale); \
+ src[x + pos + 16 * src_stride] = 0; \
+ if (temp & 0x100) temp = ~(temp >> 31); \
+ dst[x + pos] = temp;
+
+ for (y = 0; y < height; y++) {
+ const uint8_t *d = dither[y];
+ for (x = 0; x < width; x += 8) {
+ int temp;
+ STORE2(0);
+ STORE2(1);
+ STORE2(2);
+ STORE2(3);
+ STORE2(4);
+ STORE2(5);
+ STORE2(6);
+ STORE2(7);
+ }
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+static void mul_thrmat_c(int16_t *thr_adr_noq, int16_t *thr_adr, int q)
+{
+ int a;
+ for (a = 0; a < 64; a++)
+ thr_adr[a] = q * thr_adr_noq[a];
+}
+
+static void filter(FSPPContext *p, uint8_t *dst, uint8_t *src,
+ int dst_stride, int src_stride,
+ int width, int height,
+ uint8_t *qp_store, int qp_stride, int is_luma)
+{
+ int x, x0, y, es, qy, t;
+
+ const int stride = is_luma ? p->temp_stride : (width + 16);
+ const int step = 6 - p->log2_count;
+ const int qpsh = 4 - p->hsub * !is_luma;
+ const int qpsv = 4 - p->vsub * !is_luma;
+
+ DECLARE_ALIGNED(32, int32_t, block_align)[4 * 8 * BLOCKSZ + 4 * 8 * BLOCKSZ];
+ int16_t *block = (int16_t *)block_align;
+ int16_t *block3 = (int16_t *)(block_align + 4 * 8 * BLOCKSZ);
+
+ memset(block3, 0, 4 * 8 * BLOCKSZ);
+
+ if (!src || !dst) return;
+
+ for (y = 0; y < height; y++) {
+ int index = 8 + 8 * stride + y * stride;
+ memcpy(p->src + index, src + y * src_stride, width);
+ for (x = 0; x < 8; x++) {
+ p->src[index - x - 1] = p->src[index + x ];
+ p->src[index + width + x ] = p->src[index + width - x - 1];
+ }
+ }
+
+ for (y = 0; y < 8; y++) {
+ memcpy(p->src + ( 7 - y ) * stride, p->src + ( y + 8 ) * stride, stride);
+ memcpy(p->src + (height + 8 + y) * stride, p->src + (height - y + 7) * stride, stride);
+ }
+ //FIXME (try edge emu)
+
+ for (y = 8; y < 24; y++)
+ memset(p->temp + 8 + y * stride, 0, width * sizeof(int16_t));
+
+ for (y = step; y < height + 8; y += step) { //step= 1,2
+ const int y1 = y - 8 + step; //l5-7 l4-6;
+ qy = y - 4;
+
+ if (qy > height - 1) qy = height - 1;
+ if (qy < 0) qy = 0;
+
+ qy = (qy >> qpsv) * qp_stride;
+ p->row_fdct(block, p->src + y * stride + 2 - (y&1), stride, 2);
+
+ for (x0 = 0; x0 < width + 8 - 8 * (BLOCKSZ - 1); x0 += 8 * (BLOCKSZ - 1)) {
+ p->row_fdct(block + 8 * 8, p->src + y * stride + 8 + x0 + 2 - (y&1), stride, 2 * (BLOCKSZ - 1));
+
+ if (p->qp)
+ p->column_fidct((int16_t *)(&p->threshold_mtx[0]), block + 0 * 8, block3 + 0 * 8, 8 * (BLOCKSZ - 1)); //yes, this is a HOTSPOT
+ else
+ for (x = 0; x < 8 * (BLOCKSZ - 1); x += 8) {
+ t = x + x0 - 2; //correct t=x+x0-2-(y&1), but its the same
+
+ if (t < 0) t = 0; //t always < width-2
+
+ t = qp_store[qy + (t >> qpsh)];
+ t = ff_norm_qscale(t, p->qscale_type);
+
+ if (t != p->prev_q) p->prev_q = t, p->mul_thrmat((int16_t *)(&p->threshold_mtx_noq[0]), (int16_t *)(&p->threshold_mtx[0]), t);
+ p->column_fidct((int16_t *)(&p->threshold_mtx[0]), block + x * 8, block3 + x * 8, 8); //yes, this is a HOTSPOT
+ }
+ p->row_idct(block3 + 0 * 8, p->temp + (y & 15) * stride + x0 + 2 - (y & 1), stride, 2 * (BLOCKSZ - 1));
+ memmove(block, block + (BLOCKSZ - 1) * 64, 8 * 8 * sizeof(int16_t)); //cycling
+ memmove(block3, block3 + (BLOCKSZ - 1) * 64, 6 * 8 * sizeof(int16_t));
+ }
+
+ es = width + 8 - x0; // 8, ...
+ if (es > 8)
+ p->row_fdct(block + 8 * 8, p->src + y * stride + 8 + x0 + 2 - (y & 1), stride, (es - 4) >> 2);
+
+ p->column_fidct((int16_t *)(&p->threshold_mtx[0]), block, block3, es&(~1));
+ if (es > 3)
+ p->row_idct(block3 + 0 * 8, p->temp + (y & 15) * stride + x0 + 2 - (y & 1), stride, es >> 2);
+
+ if (!(y1 & 7) && y1) {
+ if (y1 & 8)
+ p->store_slice(dst + (y1 - 8) * dst_stride, p->temp + 8 + 8 * stride,
+ dst_stride, stride, width, 8, 5 - p->log2_count);
+ else
+ p->store_slice2(dst + (y1 - 8) * dst_stride, p->temp + 8 + 0 * stride,
+ dst_stride, stride, width, 8, 5 - p->log2_count);
+ }
+ }
+
+ if (y & 7) { // height % 8 != 0
+ if (y & 8)
+ p->store_slice(dst + ((y - 8) & ~7) * dst_stride, p->temp + 8 + 8 * stride,
+ dst_stride, stride, width, y&7, 5 - p->log2_count);
+ else
+ p->store_slice2(dst + ((y - 8) & ~7) * dst_stride, p->temp + 8 + 0 * stride,
+ dst_stride, stride, width, y&7, 5 - p->log2_count);
+ }
+}
+
+static void column_fidct_c(int16_t *thr_adr, int16_t *data, int16_t *output, int cnt)
+{
+ int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int_simd16_t tmp10, tmp11, tmp12, tmp13;
+ int_simd16_t z1,z2,z3,z4,z5, z10, z11, z12, z13;
+ int_simd16_t d0, d1, d2, d3, d4, d5, d6, d7;
+
+ int16_t *dataptr;
+ int16_t *wsptr;
+ int16_t *threshold;
+ int ctr;
+
+ dataptr = data;
+ wsptr = output;
+
+ for (; cnt > 0; cnt -= 2) { //start positions
+ threshold = (int16_t *)thr_adr;//threshold_mtx
+ for (ctr = DCTSIZE; ctr > 0; ctr--) {
+ // Process columns from input, add to output.
+ tmp0 = dataptr[DCTSIZE * 0] + dataptr[DCTSIZE * 7];
+ tmp7 = dataptr[DCTSIZE * 0] - dataptr[DCTSIZE * 7];
+
+ tmp1 = dataptr[DCTSIZE * 1] + dataptr[DCTSIZE * 6];
+ tmp6 = dataptr[DCTSIZE * 1] - dataptr[DCTSIZE * 6];
+
+ tmp2 = dataptr[DCTSIZE * 2] + dataptr[DCTSIZE * 5];
+ tmp5 = dataptr[DCTSIZE * 2] - dataptr[DCTSIZE * 5];
+
+ tmp3 = dataptr[DCTSIZE * 3] + dataptr[DCTSIZE * 4];
+ tmp4 = dataptr[DCTSIZE * 3] - dataptr[DCTSIZE * 4];
+
+ // Even part of FDCT
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+
+ d0 = tmp10 + tmp11;
+ d4 = tmp10 - tmp11;
+
+ z1 = MULTIPLY16H((tmp12 + tmp13) << 2, FIX_0_707106781);
+ d2 = tmp13 + z1;
+ d6 = tmp13 - z1;
+
+ // Even part of IDCT
+
+ THRESHOLD(tmp0, d0, threshold[0 * 8]);
+ THRESHOLD(tmp1, d2, threshold[2 * 8]);
+ THRESHOLD(tmp2, d4, threshold[4 * 8]);
+ THRESHOLD(tmp3, d6, threshold[6 * 8]);
+ tmp0 += 2;
+ tmp10 = (tmp0 + tmp2) >> 2;
+ tmp11 = (tmp0 - tmp2) >> 2;
+
+ tmp13 = (tmp1 + tmp3) >>2; //+2 ! (psnr decides)
+ tmp12 = MULTIPLY16H((tmp1 - tmp3), FIX_1_414213562_A) - tmp13; //<<2
+
+ tmp0 = tmp10 + tmp13; //->temps
+ tmp3 = tmp10 - tmp13; //->temps
+ tmp1 = tmp11 + tmp12; //->temps
+ tmp2 = tmp11 - tmp12; //->temps
+
+ // Odd part of FDCT
+
+ tmp10 = tmp4 + tmp5;
+ tmp11 = tmp5 + tmp6;
+ tmp12 = tmp6 + tmp7;
+
+ z5 = MULTIPLY16H((tmp10 - tmp12) << 2, FIX_0_382683433);
+ z2 = MULTIPLY16H(tmp10 << 2, FIX_0_541196100) + z5;
+ z4 = MULTIPLY16H(tmp12 << 2, FIX_1_306562965) + z5;
+ z3 = MULTIPLY16H(tmp11 << 2, FIX_0_707106781);
+
+ z11 = tmp7 + z3;
+ z13 = tmp7 - z3;
+
+ d5 = z13 + z2;
+ d3 = z13 - z2;
+ d1 = z11 + z4;
+ d7 = z11 - z4;
+
+ // Odd part of IDCT
+
+ THRESHOLD(tmp4, d1, threshold[1 * 8]);
+ THRESHOLD(tmp5, d3, threshold[3 * 8]);
+ THRESHOLD(tmp6, d5, threshold[5 * 8]);
+ THRESHOLD(tmp7, d7, threshold[7 * 8]);
+
+ //Simd version uses here a shortcut for the tmp5,tmp6,tmp7 == 0
+ z13 = tmp6 + tmp5;
+ z10 = (tmp6 - tmp5) << 1;
+ z11 = tmp4 + tmp7;
+ z12 = (tmp4 - tmp7) << 1;
+
+ tmp7 = (z11 + z13) >> 2; //+2 !
+ tmp11 = MULTIPLY16H((z11 - z13) << 1, FIX_1_414213562);
+ z5 = MULTIPLY16H(z10 + z12, FIX_1_847759065);
+ tmp10 = MULTIPLY16H(z12, FIX_1_082392200) - z5;
+ tmp12 = MULTIPLY16H(z10, FIX_2_613125930) + z5; // - !!
+
+ tmp6 = tmp12 - tmp7;
+ tmp5 = tmp11 - tmp6;
+ tmp4 = tmp10 + tmp5;
+
+ wsptr[DCTSIZE * 0] += (tmp0 + tmp7);
+ wsptr[DCTSIZE * 1] += (tmp1 + tmp6);
+ wsptr[DCTSIZE * 2] += (tmp2 + tmp5);
+ wsptr[DCTSIZE * 3] += (tmp3 - tmp4);
+ wsptr[DCTSIZE * 4] += (tmp3 + tmp4);
+ wsptr[DCTSIZE * 5] += (tmp2 - tmp5);
+ wsptr[DCTSIZE * 6] = (tmp1 - tmp6);
+ wsptr[DCTSIZE * 7] = (tmp0 - tmp7);
+ //
+ dataptr++; //next column
+ wsptr++;
+ threshold++;
+ }
+ dataptr += 8; //skip each second start pos
+ wsptr += 8;
+ }
+}
+
+static void row_idct_c(int16_t *workspace, int16_t *output_adr, ptrdiff_t output_stride, int cnt)
+{
+ int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int_simd16_t tmp10, tmp11, tmp12, tmp13;
+ int_simd16_t z5, z10, z11, z12, z13;
+ int16_t *outptr;
+ int16_t *wsptr;
+
+ cnt *= 4;
+ wsptr = workspace;
+ outptr = output_adr;
+ for (; cnt > 0; cnt--) {
+ // Even part
+ //Simd version reads 4x4 block and transposes it
+ tmp10 = wsptr[2] + wsptr[3];
+ tmp11 = wsptr[2] - wsptr[3];
+
+ tmp13 = wsptr[0] + wsptr[1];
+ tmp12 = (MULTIPLY16H(wsptr[0] - wsptr[1], FIX_1_414213562_A) << 2) - tmp13;//this shift order to avoid overflow
+
+ tmp0 = tmp10 + tmp13; //->temps
+ tmp3 = tmp10 - tmp13; //->temps
+ tmp1 = tmp11 + tmp12;
+ tmp2 = tmp11 - tmp12;
+
+ // Odd part
+ //Also transpose, with previous:
+ // ---- ---- ||||
+ // ---- ---- idct ||||
+ // ---- ---- ---> ||||
+ // ---- ---- ||||
+ z13 = wsptr[4] + wsptr[5];
+ z10 = wsptr[4] - wsptr[5];
+ z11 = wsptr[6] + wsptr[7];
+ z12 = wsptr[6] - wsptr[7];
+
+ tmp7 = z11 + z13;
+ tmp11 = MULTIPLY16H(z11 - z13, FIX_1_414213562);
+
+ z5 = MULTIPLY16H(z10 + z12, FIX_1_847759065);
+ tmp10 = MULTIPLY16H(z12, FIX_1_082392200) - z5;
+ tmp12 = MULTIPLY16H(z10, FIX_2_613125930) + z5; // - FIX_
+
+ tmp6 = (tmp12 << 3) - tmp7;
+ tmp5 = (tmp11 << 3) - tmp6;
+ tmp4 = (tmp10 << 3) + tmp5;
+
+ // Final output stage: descale and write column
+ outptr[0 * output_stride] += DESCALE(tmp0 + tmp7, 3);
+ outptr[1 * output_stride] += DESCALE(tmp1 + tmp6, 3);
+ outptr[2 * output_stride] += DESCALE(tmp2 + tmp5, 3);
+ outptr[3 * output_stride] += DESCALE(tmp3 - tmp4, 3);
+ outptr[4 * output_stride] += DESCALE(tmp3 + tmp4, 3);
+ outptr[5 * output_stride] += DESCALE(tmp2 - tmp5, 3);
+ outptr[6 * output_stride] += DESCALE(tmp1 - tmp6, 3); //no += ?
+ outptr[7 * output_stride] += DESCALE(tmp0 - tmp7, 3); //no += ?
+ outptr++;
+
+ wsptr += DCTSIZE; // advance pointer to next row
+ }
+}
+
+static void row_fdct_c(int16_t *data, const uint8_t *pixels, ptrdiff_t line_size, int cnt)
+{
+ int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int_simd16_t tmp10, tmp11, tmp12, tmp13;
+ int_simd16_t z1, z2, z3, z4, z5, z11, z13;
+ int16_t *dataptr;
+
+ cnt *= 4;
+ // Pass 1: process rows.
+
+ dataptr = data;
+ for (; cnt > 0; cnt--) {
+ tmp0 = pixels[line_size * 0] + pixels[line_size * 7];
+ tmp7 = pixels[line_size * 0] - pixels[line_size * 7];
+ tmp1 = pixels[line_size * 1] + pixels[line_size * 6];
+ tmp6 = pixels[line_size * 1] - pixels[line_size * 6];
+ tmp2 = pixels[line_size * 2] + pixels[line_size * 5];
+ tmp5 = pixels[line_size * 2] - pixels[line_size * 5];
+ tmp3 = pixels[line_size * 3] + pixels[line_size * 4];
+ tmp4 = pixels[line_size * 3] - pixels[line_size * 4];
+
+ // Even part
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ //Even columns are written first, this leads to different order of columns
+ //in column_fidct(), but they are processed independently, so all ok.
+ //Later in the row_idct() columns readed at the same order.
+ dataptr[2] = tmp10 + tmp11;
+ dataptr[3] = tmp10 - tmp11;
+
+ z1 = MULTIPLY16H((tmp12 + tmp13) << 2, FIX_0_707106781);
+ dataptr[0] = tmp13 + z1;
+ dataptr[1] = tmp13 - z1;
+
+ // Odd part
+
+ tmp10 = (tmp4 + tmp5) << 2;
+ tmp11 = (tmp5 + tmp6) << 2;
+ tmp12 = (tmp6 + tmp7) << 2;
+
+ z5 = MULTIPLY16H(tmp10 - tmp12, FIX_0_382683433);
+ z2 = MULTIPLY16H(tmp10, FIX_0_541196100) + z5;
+ z4 = MULTIPLY16H(tmp12, FIX_1_306562965) + z5;
+ z3 = MULTIPLY16H(tmp11, FIX_0_707106781);
+
+ z11 = tmp7 + z3;
+ z13 = tmp7 - z3;
+
+ dataptr[4] = z13 + z2;
+ dataptr[5] = z13 - z2;
+ dataptr[6] = z11 + z4;
+ dataptr[7] = z11 - z4;
+
+ pixels++; // advance pointer to next column
+ dataptr += DCTSIZE;
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ FSPPContext *fspp = ctx->priv;
+ const int h = FFALIGN(inlink->h + 16, 16);
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ fspp->hsub = desc->log2_chroma_w;
+ fspp->vsub = desc->log2_chroma_h;
+
+ fspp->temp_stride = FFALIGN(inlink->w + 16, 16);
+ fspp->temp = av_malloc_array(fspp->temp_stride, h * sizeof(*fspp->temp));
+ fspp->src = av_malloc_array(fspp->temp_stride, h * sizeof(*fspp->src));
+
+ if (!fspp->temp || !fspp->src)
+ return AVERROR(ENOMEM);
+
+ if (!fspp->use_bframe_qp && !fspp->qp) {
+ fspp->non_b_qp_alloc_size = AV_CEIL_RSHIFT(inlink->w, 4) * AV_CEIL_RSHIFT(inlink->h, 4);
+ fspp->non_b_qp_table = av_calloc(fspp->non_b_qp_alloc_size, sizeof(*fspp->non_b_qp_table));
+ if (!fspp->non_b_qp_table)
+ return AVERROR(ENOMEM);
+ }
+
+ fspp->store_slice = store_slice_c;
+ fspp->store_slice2 = store_slice2_c;
+ fspp->mul_thrmat = mul_thrmat_c;
+ fspp->column_fidct = column_fidct_c;
+ fspp->row_idct = row_idct_c;
+ fspp->row_fdct = row_fdct_c;
+
+ if (ARCH_X86)
+ ff_fspp_init_x86(fspp);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ FSPPContext *fspp = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out = in;
+
+ int qp_stride = 0;
+ uint8_t *qp_table = NULL;
+ int i, bias;
+ int custom_threshold_m[64];
+
+ bias = (1 << 4) + fspp->strength;
+
+ for (i = 0; i < 64; i++) //FIXME: tune custom_threshold[] and remove this !
+ custom_threshold_m[i] = (int)(custom_threshold[i] * (bias / 71.0) + 0.5);
+
+ for (i = 0; i < 8; i++) {
+ fspp->threshold_mtx_noq[2 * i] = (uint64_t)custom_threshold_m[i * 8 + 2]
+ |(((uint64_t)custom_threshold_m[i * 8 + 6]) << 16)
+ |(((uint64_t)custom_threshold_m[i * 8 + 0]) << 32)
+ |(((uint64_t)custom_threshold_m[i * 8 + 4]) << 48);
+
+ fspp->threshold_mtx_noq[2 * i + 1] = (uint64_t)custom_threshold_m[i * 8 + 5]
+ |(((uint64_t)custom_threshold_m[i * 8 + 3]) << 16)
+ |(((uint64_t)custom_threshold_m[i * 8 + 1]) << 32)
+ |(((uint64_t)custom_threshold_m[i * 8 + 7]) << 48);
+ }
+
+ if (fspp->qp)
+ fspp->prev_q = fspp->qp, fspp->mul_thrmat((int16_t *)(&fspp->threshold_mtx_noq[0]), (int16_t *)(&fspp->threshold_mtx[0]), fspp->qp);
+
+ /* if we are not in a constant user quantizer mode and we don't want to use
+ * the quantizers from the B-frames (B-frames often have a higher QP), we
+ * need to save the qp table from the last non B-frame; this is what the
+ * following code block does */
+ if (!fspp->qp) {
+ qp_table = av_frame_get_qp_table(in, &qp_stride, &fspp->qscale_type);
+
+ if (qp_table && !fspp->use_bframe_qp && in->pict_type != AV_PICTURE_TYPE_B) {
+ int w, h;
+
+ /* if the qp stride is not set, it means the QP are only defined on
+ * a line basis */
+ if (!qp_stride) {
+ w = AV_CEIL_RSHIFT(inlink->w, 4);
+ h = 1;
+ } else {
+ w = qp_stride;
+ h = AV_CEIL_RSHIFT(inlink->h, 4);
+ }
+ if (w * h > fspp->non_b_qp_alloc_size) {
+ int ret = av_reallocp_array(&fspp->non_b_qp_table, w, h);
+ if (ret < 0) {
+ fspp->non_b_qp_alloc_size = 0;
+ return ret;
+ }
+ fspp->non_b_qp_alloc_size = w * h;
+ }
+
+ av_assert0(w * h <= fspp->non_b_qp_alloc_size);
+ memcpy(fspp->non_b_qp_table, qp_table, w * h);
+ }
+ }
+
+ if (fspp->log2_count && !ctx->is_disabled) {
+ if (!fspp->use_bframe_qp && fspp->non_b_qp_table)
+ qp_table = fspp->non_b_qp_table;
+
+ if (qp_table || fspp->qp) {
+ const int cw = AV_CEIL_RSHIFT(inlink->w, fspp->hsub);
+ const int ch = AV_CEIL_RSHIFT(inlink->h, fspp->vsub);
+
+ /* get a new frame if in-place is not possible or if the dimensions
+ * are not multiple of 8 */
+ if (!av_frame_is_writable(in) || (inlink->w & 7) || (inlink->h & 7)) {
+ const int aligned_w = FFALIGN(inlink->w, 8);
+ const int aligned_h = FFALIGN(inlink->h, 8);
+
+ out = ff_get_video_buffer(outlink, aligned_w, aligned_h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ out->width = in->width;
+ out->height = in->height;
+ }
+
+ filter(fspp, out->data[0], in->data[0], out->linesize[0], in->linesize[0],
+ inlink->w, inlink->h, qp_table, qp_stride, 1);
+ filter(fspp, out->data[1], in->data[1], out->linesize[1], in->linesize[1],
+ cw, ch, qp_table, qp_stride, 0);
+ filter(fspp, out->data[2], in->data[2], out->linesize[2], in->linesize[2],
+ cw, ch, qp_table, qp_stride, 0);
+ emms_c();
+ }
+ }
+
+ if (in != out) {
+ if (in->data[3])
+ av_image_copy_plane(out->data[3], out->linesize[3],
+ in ->data[3], in ->linesize[3],
+ inlink->w, inlink->h);
+ av_frame_free(&in);
+ }
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ FSPPContext *fspp = ctx->priv;
+ av_freep(&fspp->temp);
+ av_freep(&fspp->src);
+ av_freep(&fspp->non_b_qp_table);
+}
+
+static const AVFilterPad fspp_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad fspp_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_fspp = {
+ .name = "fspp",
+ .description = NULL_IF_CONFIG_SMALL("Apply Fast Simple Post-processing filter."),
+ .priv_size = sizeof(FSPPContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = fspp_inputs,
+ .outputs = fspp_outputs,
+ .priv_class = &fspp_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_fspp.h b/libavfilter/vf_fspp.h
new file mode 100644
index 0000000000..74a34473bb
--- /dev/null
+++ b/libavfilter/vf_fspp.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (C) 2005 Nikolaj Poroshin <porosh3@psu.ru>
+ * Copyright (c) 2014 Arwa Arif <arwaarif1994@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef AVFILTER_FSPP_H
+#define AVFILTER_FSPP_H
+
+#include "avfilter.h"
+
+#define BLOCKSZ 12
+#define MAX_LEVEL 5
+
+#define DCTSIZE 8
+#define DCTSIZE_S "8"
+
+#define FIX(x,s) ((int) ((x) * (1 << s) + 0.5) & 0xffff)
+#define C64(x) ((uint64_t)((x) | (x) << 16)) <<32 | (uint64_t)(x) | (uint64_t)(x) << 16
+#define FIX64(x,s) C64(FIX(x,s))
+
+#define MULTIPLY16H(x,k) (((x) * (k)) >> 16)
+#define THRESHOLD(r,x,t) \
+ if(((unsigned)((x) + t)) > t * 2) r = (x); \
+ else r = 0;
+#define DESCALE(x,n) (((x) + (1 << ((n) - 1))) >> n)
+
+typedef int32_t int_simd16_t;
+static const int16_t FIX_0_382683433 = FIX(0.382683433, 14);
+static const int16_t FIX_0_541196100 = FIX(0.541196100, 14);
+static const int16_t FIX_0_707106781 = FIX(M_SQRT1_2 , 14);
+static const int16_t FIX_1_306562965 = FIX(1.306562965, 14);
+static const int16_t FIX_1_414213562_A = FIX(M_SQRT2 , 14);
+static const int16_t FIX_1_847759065 = FIX(1.847759065, 13);
+static const int16_t FIX_2_613125930 = FIX(-2.613125930, 13);
+static const int16_t FIX_1_414213562 = FIX(M_SQRT2 , 13);
+static const int16_t FIX_1_082392200 = FIX(1.082392200, 13);
+
+typedef struct FSPPContext {
+ AVClass *class;
+ uint64_t threshold_mtx_noq[8 * 2];
+ uint64_t threshold_mtx[8 * 2]; //used in both C & MMX (& later SSE2) versions
+
+ int log2_count;
+ int strength;
+ int hsub;
+ int vsub;
+ int temp_stride;
+ int qp;
+ int qscale_type;
+ int prev_q;
+ uint8_t *src;
+ int16_t *temp;
+ uint8_t *non_b_qp_table;
+ int non_b_qp_alloc_size;
+ int use_bframe_qp;
+
+ void (*store_slice)(uint8_t *dst, int16_t *src,
+ ptrdiff_t dst_stride, ptrdiff_t src_stride,
+ ptrdiff_t width, ptrdiff_t height, ptrdiff_t log2_scale);
+
+ void (*store_slice2)(uint8_t *dst, int16_t *src,
+ ptrdiff_t dst_stride, ptrdiff_t src_stride,
+ ptrdiff_t width, ptrdiff_t height, ptrdiff_t log2_scale);
+
+ void (*mul_thrmat)(int16_t *thr_adr_noq, int16_t *thr_adr, int q);
+
+ void (*column_fidct)(int16_t *thr_adr, int16_t *data,
+ int16_t *output, int cnt);
+
+ void (*row_idct)(int16_t *workspace, int16_t *output_adr,
+ ptrdiff_t output_stride, int cnt);
+
+ void (*row_fdct)(int16_t *data, const uint8_t *pixels,
+ ptrdiff_t line_size, int cnt);
+
+} FSPPContext;
+
+void ff_fspp_init_x86(FSPPContext *fspp);
+
+#endif /* AVFILTER_FSPP_H */
diff --git a/libavfilter/vf_gblur.c b/libavfilter/vf_gblur.c
new file mode 100644
index 0000000000..f843e3f376
--- /dev/null
+++ b/libavfilter/vf_gblur.c
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2011 Pascal Getreuer
+ * Copyright (c) 2016 Paul B Mahol
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct GBlurContext {
+ const AVClass *class;
+
+ float sigma;
+ float sigmaV;
+ int steps;
+ int planes;
+
+ int depth;
+ int planewidth[4];
+ int planeheight[4];
+ float *buffer;
+ float boundaryscale;
+ float boundaryscaleV;
+ float postscale;
+ float postscaleV;
+ float nu;
+ float nuV;
+ int nb_planes;
+} GBlurContext;
+
+#define OFFSET(x) offsetof(GBlurContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption gblur_options[] = {
+ { "sigma", "set sigma", OFFSET(sigma), AV_OPT_TYPE_FLOAT, {.dbl=0.5}, 0.0, 1024, FLAGS },
+ { "steps", "set number of steps", OFFSET(steps), AV_OPT_TYPE_INT, {.i64=1}, 1, 6, FLAGS },
+ { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=0xF}, 0, 0xF, FLAGS },
+ { "sigmaV", "set vertical sigma", OFFSET(sigmaV), AV_OPT_TYPE_FLOAT, {.dbl=-1}, -1, 1024, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(gblur);
+
+typedef struct ThreadData {
+ int height;
+ int width;
+} ThreadData;
+
+static int filter_horizontally(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ GBlurContext *s = ctx->priv;
+ ThreadData *td = arg;
+ const int height = td->height;
+ const int width = td->width;
+ const int slice_start = (height * jobnr ) / nb_jobs;
+ const int slice_end = (height * (jobnr+1)) / nb_jobs;
+ const float boundaryscale = s->boundaryscale;
+ const int steps = s->steps;
+ const float nu = s->nu;
+ float *buffer = s->buffer;
+ int y, x, step;
+ float *ptr;
+
+ /* Filter horizontally along each row */
+ for (y = slice_start; y < slice_end; y++) {
+ for (step = 0; step < steps; step++) {
+ ptr = buffer + width * y;
+ ptr[0] *= boundaryscale;
+
+ /* Filter rightwards */
+ for (x = 1; x < width; x++)
+ ptr[x] += nu * ptr[x - 1];
+
+ ptr[x = width - 1] *= boundaryscale;
+
+ /* Filter leftwards */
+ for (; x > 0; x--)
+ ptr[x - 1] += nu * ptr[x];
+ }
+ }
+
+ return 0;
+}
+
+static int filter_vertically(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ GBlurContext *s = ctx->priv;
+ ThreadData *td = arg;
+ const int height = td->height;
+ const int width = td->width;
+ const int slice_start = (width * jobnr ) / nb_jobs;
+ const int slice_end = (width * (jobnr+1)) / nb_jobs;
+ const float boundaryscale = s->boundaryscaleV;
+ const int numpixels = width * height;
+ const int steps = s->steps;
+ const float nu = s->nuV;
+ float *buffer = s->buffer;
+ int i, x, step;
+ float *ptr;
+
+ /* Filter vertically along each column */
+ for (x = slice_start; x < slice_end; x++) {
+ for (step = 0; step < steps; step++) {
+ ptr = buffer + x;
+ ptr[0] *= boundaryscale;
+
+ /* Filter downwards */
+ for (i = width; i < numpixels; i += width)
+ ptr[i] += nu * ptr[i - width];
+
+ ptr[i = numpixels - width] *= boundaryscale;
+
+ /* Filter upwards */
+ for (; i > 0; i -= width)
+ ptr[i - width] += nu * ptr[i];
+ }
+ }
+
+ return 0;
+}
+
+
+static int filter_postscale(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ GBlurContext *s = ctx->priv;
+ ThreadData *td = arg;
+ const int height = td->height;
+ const int width = td->width;
+ const int64_t numpixels = width * (int64_t)height;
+ const unsigned slice_start = (numpixels * jobnr ) / nb_jobs;
+ const unsigned slice_end = (numpixels * (jobnr+1)) / nb_jobs;
+ const float postscale = s->postscale * s->postscaleV;
+ float *buffer = s->buffer;
+ unsigned i;
+
+ for (i = slice_start; i < slice_end; i++)
+ buffer[i] *= postscale;
+
+ return 0;
+}
+
+static void gaussianiir2d(AVFilterContext *ctx, int plane)
+{
+ GBlurContext *s = ctx->priv;
+ const int width = s->planewidth[plane];
+ const int height = s->planeheight[plane];
+ const int nb_threads = ff_filter_get_nb_threads(ctx);
+ ThreadData td;
+
+ if (s->sigma <= 0 || s->steps < 0)
+ return;
+
+ td.width = width;
+ td.height = height;
+ ctx->internal->execute(ctx, filter_horizontally, &td, NULL, FFMIN(height, nb_threads));
+ ctx->internal->execute(ctx, filter_vertically, &td, NULL, FFMIN(width, nb_threads));
+ ctx->internal->execute(ctx, filter_postscale, &td, NULL, FFMIN(width * height, nb_threads));
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
+ AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
+ AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_NONE
+ };
+
+ return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ GBlurContext *s = inlink->dst->priv;
+
+ s->depth = desc->comp[0].depth;
+ s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ s->buffer = av_malloc_array(inlink->w, inlink->h * sizeof(*s->buffer));
+ if (!s->buffer)
+ return AVERROR(ENOMEM);
+
+ if (s->sigmaV < 0) {
+ s->sigmaV = s->sigma;
+ }
+
+ return 0;
+}
+
+static void set_params(float sigma, int steps, float *postscale, float *boundaryscale, float *nu)
+{
+ double dnu, lambda;
+
+ lambda = (sigma * sigma) / (2.0 * steps);
+ dnu = (1.0 + 2.0 * lambda - sqrt(1.0 + 4.0 * lambda)) / (2.0 * lambda);
+ *postscale = pow(dnu / lambda, steps);
+ *boundaryscale = 1.0 / (1.0 - dnu);
+ *nu = (float)dnu;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ GBlurContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ int plane;
+
+ set_params(s->sigma, s->steps, &s->postscale, &s->boundaryscale, &s->nu);
+ set_params(s->sigmaV, s->steps, &s->postscaleV, &s->boundaryscaleV, &s->nuV);
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ const int height = s->planeheight[plane];
+ const int width = s->planewidth[plane];
+ float *bptr = s->buffer;
+ const uint8_t *src = in->data[plane];
+ const uint16_t *src16 = (const uint16_t *)in->data[plane];
+ uint8_t *dst = out->data[plane];
+ uint16_t *dst16 = (uint16_t *)out->data[plane];
+ int y, x;
+
+ if (!s->sigma || !(s->planes & (1 << plane))) {
+ if (out != in)
+ av_image_copy_plane(out->data[plane], out->linesize[plane],
+ in->data[plane], in->linesize[plane],
+ width * ((s->depth + 7) / 8), height);
+ continue;
+ }
+
+ if (s->depth == 8) {
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ bptr[x] = src[x];
+ }
+ bptr += width;
+ src += in->linesize[plane];
+ }
+ } else {
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ bptr[x] = src16[x];
+ }
+ bptr += width;
+ src16 += in->linesize[plane] / 2;
+ }
+ }
+
+ gaussianiir2d(ctx, plane);
+
+ bptr = s->buffer;
+ if (s->depth == 8) {
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ dst[x] = bptr[x];
+ }
+ bptr += width;
+ dst += out->linesize[plane];
+ }
+ } else {
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ dst16[x] = bptr[x];
+ }
+ bptr += width;
+ dst16 += out->linesize[plane] / 2;
+ }
+ }
+ }
+
+ if (out != in)
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ GBlurContext *s = ctx->priv;
+
+ av_freep(&s->buffer);
+}
+
+static const AVFilterPad gblur_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad gblur_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_gblur = {
+ .name = "gblur",
+ .description = NULL_IF_CONFIG_SMALL("Apply Gaussian Blur filter."),
+ .priv_size = sizeof(GBlurContext),
+ .priv_class = &gblur_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = gblur_inputs,
+ .outputs = gblur_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_geq.c b/libavfilter/vf_geq.c
new file mode 100644
index 0000000000..9d26f54422
--- /dev/null
+++ b/libavfilter/vf_geq.c
@@ -0,0 +1,287 @@
+/*
+ * Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (C) 2012 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Generic equation change filter
+ * Originally written by Michael Niedermayer for the MPlayer project, and
+ * ported by Clément Bœsch for FFmpeg.
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/eval.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ AVExpr *e[4]; ///< expressions for each plane
+ char *expr_str[4+3]; ///< expression strings for each plane
+ AVFrame *picref; ///< current input buffer
+ int hsub, vsub; ///< chroma subsampling
+ int planes; ///< number of planes
+ int is_rgb;
+} GEQContext;
+
+enum { Y = 0, U, V, A, G, B, R };
+
+#define OFFSET(x) offsetof(GEQContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption geq_options[] = {
+ { "lum_expr", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "lum", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "cb_expr", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "cb", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "cr_expr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "cr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "alpha_expr", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "a", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "red_expr", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "r", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "green_expr", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "g", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "blue_expr", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "b", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ {NULL},
+};
+
+AVFILTER_DEFINE_CLASS(geq);
+
+static inline double getpix(void *priv, double x, double y, int plane)
+{
+ int xi, yi;
+ GEQContext *geq = priv;
+ AVFrame *picref = geq->picref;
+ const uint8_t *src = picref->data[plane];
+ const int linesize = picref->linesize[plane];
+ const int w = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->width, geq->hsub) : picref->width;
+ const int h = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->height, geq->vsub) : picref->height;
+
+ if (!src)
+ return 0;
+
+ xi = x = av_clipf(x, 0, w - 2);
+ yi = y = av_clipf(y, 0, h - 2);
+
+ x -= xi;
+ y -= yi;
+
+ return (1-y)*((1-x)*src[xi + yi * linesize] + x*src[xi + 1 + yi * linesize])
+ + y *((1-x)*src[xi + (yi+1) * linesize] + x*src[xi + 1 + (yi+1) * linesize]);
+}
+
+//TODO: cubic interpolate
+//TODO: keep the last few frames
+static double lum(void *priv, double x, double y) { return getpix(priv, x, y, 0); }
+static double cb(void *priv, double x, double y) { return getpix(priv, x, y, 1); }
+static double cr(void *priv, double x, double y) { return getpix(priv, x, y, 2); }
+static double alpha(void *priv, double x, double y) { return getpix(priv, x, y, 3); }
+
+static const char *const var_names[] = { "X", "Y", "W", "H", "N", "SW", "SH", "T", NULL };
+enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_N, VAR_SW, VAR_SH, VAR_T, VAR_VARS_NB };
+
+static av_cold int geq_init(AVFilterContext *ctx)
+{
+ GEQContext *geq = ctx->priv;
+ int plane, ret = 0;
+
+ if (!geq->expr_str[Y] && !geq->expr_str[G] && !geq->expr_str[B] && !geq->expr_str[R]) {
+ av_log(ctx, AV_LOG_ERROR, "A luminance or RGB expression is mandatory\n");
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+ geq->is_rgb = !geq->expr_str[Y];
+
+ if ((geq->expr_str[Y] || geq->expr_str[U] || geq->expr_str[V]) && (geq->expr_str[G] || geq->expr_str[B] || geq->expr_str[R])) {
+ av_log(ctx, AV_LOG_ERROR, "Either YCbCr or RGB but not both must be specified\n");
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+
+ if (!geq->expr_str[U] && !geq->expr_str[V]) {
+ /* No chroma at all: fallback on luma */
+ geq->expr_str[U] = av_strdup(geq->expr_str[Y]);
+ geq->expr_str[V] = av_strdup(geq->expr_str[Y]);
+ } else {
+ /* One chroma unspecified, fallback on the other */
+ if (!geq->expr_str[U]) geq->expr_str[U] = av_strdup(geq->expr_str[V]);
+ if (!geq->expr_str[V]) geq->expr_str[V] = av_strdup(geq->expr_str[U]);
+ }
+
+ if (!geq->expr_str[A])
+ geq->expr_str[A] = av_strdup("255");
+ if (!geq->expr_str[G])
+ geq->expr_str[G] = av_strdup("g(X,Y)");
+ if (!geq->expr_str[B])
+ geq->expr_str[B] = av_strdup("b(X,Y)");
+ if (!geq->expr_str[R])
+ geq->expr_str[R] = av_strdup("r(X,Y)");
+
+ if (geq->is_rgb ?
+ (!geq->expr_str[G] || !geq->expr_str[B] || !geq->expr_str[R])
+ :
+ (!geq->expr_str[U] || !geq->expr_str[V] || !geq->expr_str[A])) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ for (plane = 0; plane < 4; plane++) {
+ static double (*p[])(void *, double, double) = { lum, cb, cr, alpha };
+ static const char *const func2_yuv_names[] = { "lum", "cb", "cr", "alpha", "p", NULL };
+ static const char *const func2_rgb_names[] = { "g", "b", "r", "alpha", "p", NULL };
+ const char *const *func2_names = geq->is_rgb ? func2_rgb_names : func2_yuv_names;
+ double (*func2[])(void *, double, double) = { lum, cb, cr, alpha, p[plane], NULL };
+
+ ret = av_expr_parse(&geq->e[plane], geq->expr_str[plane < 3 && geq->is_rgb ? plane+4 : plane], var_names,
+ NULL, NULL, func2_names, func2, 0, ctx);
+ if (ret < 0)
+ break;
+ }
+
+end:
+ return ret;
+}
+
+static int geq_query_formats(AVFilterContext *ctx)
+{
+ GEQContext *geq = ctx->priv;
+ static const enum AVPixelFormat yuv_pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+ static const enum AVPixelFormat rgb_pix_fmts[] = {
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list;
+
+ if (geq->is_rgb) {
+ fmts_list = ff_make_format_list(rgb_pix_fmts);
+ } else
+ fmts_list = ff_make_format_list(yuv_pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int geq_config_props(AVFilterLink *inlink)
+{
+ GEQContext *geq = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ av_assert0(desc);
+
+ geq->hsub = desc->log2_chroma_w;
+ geq->vsub = desc->log2_chroma_h;
+ geq->planes = desc->nb_components;
+ return 0;
+}
+
+static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ int plane;
+ GEQContext *geq = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *out;
+ double values[VAR_VARS_NB] = {
+ [VAR_N] = inlink->frame_count_out,
+ [VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base),
+ };
+
+ geq->picref = in;
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ for (plane = 0; plane < geq->planes && out->data[plane]; plane++) {
+ int x, y;
+ uint8_t *dst = out->data[plane];
+ const int linesize = out->linesize[plane];
+ const int w = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(inlink->w, geq->hsub) : inlink->w;
+ const int h = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(inlink->h, geq->vsub) : inlink->h;
+
+ values[VAR_W] = w;
+ values[VAR_H] = h;
+ values[VAR_SW] = w / (double)inlink->w;
+ values[VAR_SH] = h / (double)inlink->h;
+
+ for (y = 0; y < h; y++) {
+ values[VAR_Y] = y;
+ for (x = 0; x < w; x++) {
+ values[VAR_X] = x;
+ dst[x] = av_expr_eval(geq->e[plane], values, geq);
+ }
+ dst += linesize;
+ }
+ }
+
+ av_frame_free(&geq->picref);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void geq_uninit(AVFilterContext *ctx)
+{
+ int i;
+ GEQContext *geq = ctx->priv;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(geq->e); i++)
+ av_expr_free(geq->e[i]);
+}
+
+static const AVFilterPad geq_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = geq_config_props,
+ .filter_frame = geq_filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad geq_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_geq = {
+ .name = "geq",
+ .description = NULL_IF_CONFIG_SMALL("Apply generic equation to each pixel."),
+ .priv_size = sizeof(GEQContext),
+ .init = geq_init,
+ .uninit = geq_uninit,
+ .query_formats = geq_query_formats,
+ .inputs = geq_inputs,
+ .outputs = geq_outputs,
+ .priv_class = &geq_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_gradfun.c b/libavfilter/vf_gradfun.c
index 973cd7bd6a..f63128d72e 100644
--- a/libavfilter/vf_gradfun.c
+++ b/libavfilter/vf_gradfun.c
@@ -2,20 +2,20 @@
* Copyright (c) 2010 Nolan Lum <nol888@gmail.com>
* Copyright (c) 2009 Loren Merritt <lorenm@u.washington.edu>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -54,7 +54,7 @@ DECLARE_ALIGNED(16, static const uint16_t, dither)[8][8] = {
{0x54,0x34,0x4C,0x2C,0x52,0x32,0x4A,0x2A},
};
-void ff_gradfun_filter_line_c(uint8_t *dst, uint8_t *src, uint16_t *dc, int width, int thresh, const uint16_t *dithers)
+void ff_gradfun_filter_line_c(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers)
{
int x;
for (x = 0; x < width; dc += x & 1, x++) {
@@ -68,7 +68,7 @@ void ff_gradfun_filter_line_c(uint8_t *dst, uint8_t *src, uint16_t *dc, int widt
}
}
-void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, uint16_t *buf1, uint8_t *src, int src_linesize, int width)
+void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width)
{
int x, v, old;
for (x = 0; x < width; x++) {
@@ -79,7 +79,7 @@ void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, uint16_t *buf1, uint8_t
}
}
-static void filter(GradFunContext *ctx, uint8_t *dst, uint8_t *src, int width, int height, int dst_linesize, int src_linesize, int r)
+static void filter(GradFunContext *ctx, uint8_t *dst, const uint8_t *src, int width, int height, int dst_linesize, int src_linesize, int r)
{
int bstride = FFALIGN(width, 16) / 2;
int y;
@@ -126,9 +126,9 @@ static av_cold int init(AVFilterContext *ctx)
GradFunContext *s = ctx->priv;
s->thresh = (1 << 15) / s->strength;
- s->radius &= ~1;
+ s->radius = av_clip((s->radius + 1) & ~1, 4, 32);
- s->blur_line = ff_gradfun_blur_line_c;
+ s->blur_line = ff_gradfun_blur_line_c;
s->filter_line = ff_gradfun_filter_line_c;
if (ARCH_X86)
@@ -149,15 +149,16 @@ static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV420P,
- AV_PIX_FMT_GRAY8, AV_PIX_FMT_NV12,
- AV_PIX_FMT_NV21, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_GBRP,
AV_PIX_FMT_NONE
};
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
-
- return 0;
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
}
static int config_input(AVFilterLink *inlink)
@@ -168,7 +169,7 @@ static int config_input(AVFilterLink *inlink)
int vsub = desc->log2_chroma_h;
av_freep(&s->buf);
- s->buf = av_mallocz((FFALIGN(inlink->w, 16) * (s->radius + 1) / 2 + 32) * sizeof(uint16_t));
+ s->buf = av_calloc((FFALIGN(inlink->w, 16) * (s->radius + 1) / 2 + 32), sizeof(*s->buf));
if (!s->buf)
return AVERROR(ENOMEM);
@@ -196,13 +197,10 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
av_frame_free(&in);
return AVERROR(ENOMEM);
}
-
av_frame_copy_props(out, in);
- out->width = outlink->w;
- out->height = outlink->h;
}
- for (p = 0; p < 4 && in->data[p]; p++) {
+ for (p = 0; p < 4 && in->data[p] && in->linesize[p]; p++) {
int w = inlink->w;
int h = inlink->h;
int r = s->radius;
@@ -225,19 +223,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
#define OFFSET(x) offsetof(GradFunContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption gradfun_options[] = {
{ "strength", "The maximum amount by which the filter will change any one pixel.", OFFSET(strength), AV_OPT_TYPE_FLOAT, { .dbl = 1.2 }, 0.51, 64, FLAGS },
{ "radius", "The neighborhood to fit the gradient to.", OFFSET(radius), AV_OPT_TYPE_INT, { .i64 = 16 }, 4, 32, FLAGS },
- { NULL },
+ { NULL }
};
-static const AVClass gradfun_class = {
- .class_name = "gradfun",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(gradfun);
static const AVFilterPad avfilter_vf_gradfun_inputs[] = {
{
@@ -265,7 +259,7 @@ AVFilter ff_vf_gradfun = {
.init = init,
.uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_gradfun_inputs,
- .outputs = avfilter_vf_gradfun_outputs,
+ .inputs = avfilter_vf_gradfun_inputs,
+ .outputs = avfilter_vf_gradfun_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/libavfilter/vf_hflip.c b/libavfilter/vf_hflip.c
index 1eb8d698d1..cf20c193f7 100644
--- a/libavfilter/vf_hflip.c
+++ b/libavfilter/vf_hflip.c
@@ -2,20 +2,20 @@
* Copyright (c) 2007 Benoit Fouet
* Copyright (c) 2010 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -26,6 +26,7 @@
#include <string.h>
+#include "libavutil/opt.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
@@ -36,81 +37,79 @@
#include "libavutil/imgutils.h"
typedef struct FlipContext {
+ const AVClass *class;
int max_step[4]; ///< max pixel step for each plane, expressed as a number of bytes
- int hsub, vsub; ///< chroma subsampling
+ int planewidth[4]; ///< width of each plane
+ int planeheight[4]; ///< height of each plane
} FlipContext;
+static const AVOption hflip_options[] = {
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(hflip);
+
static int query_formats(AVFilterContext *ctx)
{
- static const enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_RGB48BE, AV_PIX_FMT_RGB48LE,
- AV_PIX_FMT_BGR48BE, AV_PIX_FMT_BGR48LE,
- AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
- AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
- AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
- AV_PIX_FMT_RGB565BE, AV_PIX_FMT_RGB565LE,
- AV_PIX_FMT_RGB555BE, AV_PIX_FMT_RGB555LE,
- AV_PIX_FMT_BGR565BE, AV_PIX_FMT_BGR565LE,
- AV_PIX_FMT_BGR555BE, AV_PIX_FMT_BGR555LE,
- AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE,
- AV_PIX_FMT_YUV420P16LE, AV_PIX_FMT_YUV420P16BE,
- AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUV422P16BE,
- AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUV444P16BE,
- AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
- AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
- AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
- AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
- AV_PIX_FMT_YUVA420P,
- AV_PIX_FMT_RGB8, AV_PIX_FMT_BGR8,
- AV_PIX_FMT_RGB4_BYTE, AV_PIX_FMT_BGR4_BYTE,
- AV_PIX_FMT_PAL8, AV_PIX_FMT_GRAY8,
- AV_PIX_FMT_NONE
- };
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
+ AVFilterFormats *pix_fmts = NULL;
+ int fmt, ret;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
+ desc->flags & AV_PIX_FMT_FLAG_BITSTREAM ||
+ (desc->log2_chroma_w != desc->log2_chroma_h &&
+ desc->comp[0].plane == desc->comp[1].plane)) &&
+ (ret = ff_add_format(&pix_fmts, fmt)) < 0)
+ return ret;
+ }
+
+ return ff_set_common_formats(ctx, pix_fmts);
}
static int config_props(AVFilterLink *inlink)
{
FlipContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+ const int hsub = pix_desc->log2_chroma_w;
+ const int vsub = pix_desc->log2_chroma_h;
av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc);
- s->hsub = pix_desc->log2_chroma_w;
- s->vsub = pix_desc->log2_chroma_h;
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+ s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
return 0;
}
-static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+typedef struct ThreadData {
+ AVFrame *in, *out;
+} ThreadData;
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
{
- AVFilterContext *ctx = inlink->dst;
- FlipContext *s = ctx->priv;
- AVFilterLink *outlink = ctx->outputs[0];
- AVFrame *out;
+ FlipContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
uint8_t *inrow, *outrow;
- int i, j, plane, step, hsub, vsub;
+ int i, j, plane, step;
- out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
- if (!out) {
- av_frame_free(&in);
- return AVERROR(ENOMEM);
- }
- av_frame_copy_props(out, in);
+ for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
+ const int width = s->planewidth[plane];
+ const int height = s->planeheight[plane];
+ const int start = (height * job ) / nb_jobs;
+ const int end = (height * (job+1)) / nb_jobs;
- for (plane = 0; plane < 4 && in->data[plane]; plane++) {
step = s->max_step[plane];
- hsub = (plane == 1 || plane == 2) ? s->hsub : 0;
- vsub = (plane == 1 || plane == 2) ? s->vsub : 0;
- outrow = out->data[plane];
- inrow = in ->data[plane] + ((inlink->w >> hsub) - 1) * step;
- for (i = 0; i < in->height >> vsub; i++) {
+ outrow = out->data[plane] + start * out->linesize[plane];
+ inrow = in ->data[plane] + start * in->linesize[plane] + (width - 1) * step;
+ for (i = start; i < end; i++) {
switch (step) {
case 1:
- for (j = 0; j < (inlink->w >> hsub); j++)
+ for (j = 0; j < width; j++)
outrow[j] = inrow[-j];
break;
@@ -118,7 +117,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
uint16_t *outrow16 = (uint16_t *)outrow;
uint16_t * inrow16 = (uint16_t *) inrow;
- for (j = 0; j < (inlink->w >> hsub); j++)
+ for (j = 0; j < width; j++)
outrow16[j] = inrow16[-j];
}
break;
@@ -127,7 +126,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
uint8_t *in = inrow;
uint8_t *out = outrow;
- for (j = 0; j < (inlink->w >> hsub); j++, out += 3, in -= 3) {
+ for (j = 0; j < width; j++, out += 3, in -= 3) {
int32_t v = AV_RB24(in);
AV_WB24(out, v);
}
@@ -138,13 +137,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
uint32_t *outrow32 = (uint32_t *)outrow;
uint32_t * inrow32 = (uint32_t *) inrow;
- for (j = 0; j < (inlink->w >> hsub); j++)
+ for (j = 0; j < width; j++)
outrow32[j] = inrow32[-j];
}
break;
default:
- for (j = 0; j < (inlink->w >> hsub); j++)
+ for (j = 0; j < width; j++)
memcpy(outrow + j*step, inrow - j*step, step);
}
@@ -153,6 +152,30 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
}
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ThreadData td;
+ AVFrame *out;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ /* copy palette if required */
+ if (av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_PAL)
+ memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);
+
+ td.in = in, td.out = out;
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
+
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
@@ -176,11 +199,12 @@ static const AVFilterPad avfilter_vf_hflip_outputs[] = {
};
AVFilter ff_vf_hflip = {
- .name = "hflip",
- .description = NULL_IF_CONFIG_SMALL("Horizontally flip the input video."),
- .priv_size = sizeof(FlipContext),
+ .name = "hflip",
+ .description = NULL_IF_CONFIG_SMALL("Horizontally flip the input video."),
+ .priv_size = sizeof(FlipContext),
+ .priv_class = &hflip_class,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_hflip_inputs,
- .outputs = avfilter_vf_hflip_outputs,
+ .inputs = avfilter_vf_hflip_inputs,
+ .outputs = avfilter_vf_hflip_outputs,
+ .flags = AVFILTER_FLAG_SLICE_THREADS | AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/libavfilter/vf_histeq.c b/libavfilter/vf_histeq.c
new file mode 100644
index 0000000000..b3d2545b9f
--- /dev/null
+++ b/libavfilter/vf_histeq.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2012 Jeremy Tran
+ * Copyright (c) 2001 Donald A. Graft
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Histogram equalization filter, based on the VirtualDub filter by
+ * Donald A. Graft <neuron2 AT home DOT com>.
+ * Implements global automatic contrast adjustment by means of
+ * histogram equalization.
+ */
+
+#include "libavutil/common.h"
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+// #define DEBUG
+
+// Linear Congruential Generator, see "Numerical Recipes"
+#define LCG_A 4096
+#define LCG_C 150889
+#define LCG_M 714025
+#define LCG(x) (((x) * LCG_A + LCG_C) % LCG_M)
+#define LCG_SEED 739187
+
+enum HisteqAntibanding {
+ HISTEQ_ANTIBANDING_NONE = 0,
+ HISTEQ_ANTIBANDING_WEAK = 1,
+ HISTEQ_ANTIBANDING_STRONG = 2,
+ HISTEQ_ANTIBANDING_NB,
+};
+
+typedef struct {
+ const AVClass *class;
+ float strength;
+ float intensity;
+ int antibanding; ///< HisteqAntibanding
+ int in_histogram [256]; ///< input histogram
+ int out_histogram[256]; ///< output histogram
+ int LUT[256]; ///< lookup table derived from histogram[]
+ uint8_t rgba_map[4]; ///< components position
+ int bpp; ///< bytes per pixel
+} HisteqContext;
+
+#define OFFSET(x) offsetof(HisteqContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
+
+static const AVOption histeq_options[] = {
+ { "strength", "set the strength", OFFSET(strength), AV_OPT_TYPE_FLOAT, {.dbl=0.2}, 0, 1, FLAGS },
+ { "intensity", "set the intensity", OFFSET(intensity), AV_OPT_TYPE_FLOAT, {.dbl=0.21}, 0, 1, FLAGS },
+ { "antibanding", "set the antibanding level", OFFSET(antibanding), AV_OPT_TYPE_INT, {.i64=HISTEQ_ANTIBANDING_NONE}, 0, HISTEQ_ANTIBANDING_NB-1, FLAGS, "antibanding" },
+ CONST("none", "apply no antibanding", HISTEQ_ANTIBANDING_NONE, "antibanding"),
+ CONST("weak", "apply weak antibanding", HISTEQ_ANTIBANDING_WEAK, "antibanding"),
+ CONST("strong", "apply strong antibanding", HISTEQ_ANTIBANDING_STRONG, "antibanding"),
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(histeq);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ HisteqContext *histeq = ctx->priv;
+
+ av_log(ctx, AV_LOG_VERBOSE,
+ "strength:%0.3f intensity:%0.3f antibanding:%d\n",
+ histeq->strength, histeq->intensity, histeq->antibanding);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ HisteqContext *histeq = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+
+ histeq->bpp = av_get_bits_per_pixel(pix_desc) / 8;
+ ff_fill_rgba_map(histeq->rgba_map, inlink->format);
+
+ return 0;
+}
+
+#define R 0
+#define G 1
+#define B 2
+#define A 3
+
+#define GET_RGB_VALUES(r, g, b, src, map) do { \
+ r = src[x + map[R]]; \
+ g = src[x + map[G]]; \
+ b = src[x + map[B]]; \
+} while (0)
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
+{
+ AVFilterContext *ctx = inlink->dst;
+ HisteqContext *histeq = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ int strength = histeq->strength * 1000;
+ int intensity = histeq->intensity * 1000;
+ int x, y, i, luthi, lutlo, lut, luma, oluma, m;
+ AVFrame *outpic;
+ unsigned int r, g, b, jran;
+ uint8_t *src, *dst;
+
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpic) {
+ av_frame_free(&inpic);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpic, inpic);
+
+ /* Seed random generator for antibanding. */
+ jran = LCG_SEED;
+
+ /* Calculate and store the luminance and calculate the global histogram
+ based on the luminance. */
+ memset(histeq->in_histogram, 0, sizeof(histeq->in_histogram));
+ src = inpic->data[0];
+ dst = outpic->data[0];
+ for (y = 0; y < inlink->h; y++) {
+ for (x = 0; x < inlink->w * histeq->bpp; x += histeq->bpp) {
+ GET_RGB_VALUES(r, g, b, src, histeq->rgba_map);
+ luma = (55 * r + 182 * g + 19 * b) >> 8;
+ dst[x + histeq->rgba_map[A]] = luma;
+ histeq->in_histogram[luma]++;
+ }
+ src += inpic->linesize[0];
+ dst += outpic->linesize[0];
+ }
+
+#ifdef DEBUG
+ for (x = 0; x < 256; x++)
+ ff_dlog(ctx, "in[%d]: %u\n", x, histeq->in_histogram[x]);
+#endif
+
+ /* Calculate the lookup table. */
+ histeq->LUT[0] = histeq->in_histogram[0];
+ /* Accumulate */
+ for (x = 1; x < 256; x++)
+ histeq->LUT[x] = histeq->LUT[x-1] + histeq->in_histogram[x];
+
+ /* Normalize */
+ for (x = 0; x < 256; x++)
+ histeq->LUT[x] = (histeq->LUT[x] * intensity) / (inlink->h * inlink->w);
+
+ /* Adjust the LUT based on the selected strength. This is an alpha
+ mix of the calculated LUT and a linear LUT with gain 1. */
+ for (x = 0; x < 256; x++)
+ histeq->LUT[x] = (strength * histeq->LUT[x]) / 255 +
+ ((255 - strength) * x) / 255;
+
+ /* Output the equalized frame. */
+ memset(histeq->out_histogram, 0, sizeof(histeq->out_histogram));
+
+ src = inpic->data[0];
+ dst = outpic->data[0];
+ for (y = 0; y < inlink->h; y++) {
+ for (x = 0; x < inlink->w * histeq->bpp; x += histeq->bpp) {
+ luma = dst[x + histeq->rgba_map[A]];
+ if (luma == 0) {
+ for (i = 0; i < histeq->bpp; ++i)
+ dst[x + i] = 0;
+ histeq->out_histogram[0]++;
+ } else {
+ lut = histeq->LUT[luma];
+ if (histeq->antibanding != HISTEQ_ANTIBANDING_NONE) {
+ if (luma > 0) {
+ lutlo = histeq->antibanding == HISTEQ_ANTIBANDING_WEAK ?
+ (histeq->LUT[luma] + histeq->LUT[luma - 1]) / 2 :
+ histeq->LUT[luma - 1];
+ } else
+ lutlo = lut;
+
+ if (luma < 255) {
+ luthi = (histeq->antibanding == HISTEQ_ANTIBANDING_WEAK) ?
+ (histeq->LUT[luma] + histeq->LUT[luma + 1]) / 2 :
+ histeq->LUT[luma + 1];
+ } else
+ luthi = lut;
+
+ if (lutlo != luthi) {
+ jran = LCG(jran);
+ lut = lutlo + ((luthi - lutlo + 1) * jran) / LCG_M;
+ }
+ }
+
+ GET_RGB_VALUES(r, g, b, src, histeq->rgba_map);
+ if (((m = FFMAX3(r, g, b)) * lut) / luma > 255) {
+ r = (r * 255) / m;
+ g = (g * 255) / m;
+ b = (b * 255) / m;
+ } else {
+ r = (r * lut) / luma;
+ g = (g * lut) / luma;
+ b = (b * lut) / luma;
+ }
+ dst[x + histeq->rgba_map[R]] = r;
+ dst[x + histeq->rgba_map[G]] = g;
+ dst[x + histeq->rgba_map[B]] = b;
+ oluma = av_clip_uint8((55 * r + 182 * g + 19 * b) >> 8);
+ histeq->out_histogram[oluma]++;
+ }
+ }
+ src += inpic->linesize[0];
+ dst += outpic->linesize[0];
+ }
+#ifdef DEBUG
+ for (x = 0; x < 256; x++)
+ ff_dlog(ctx, "out[%d]: %u\n", x, histeq->out_histogram[x]);
+#endif
+
+ av_frame_free(&inpic);
+ return ff_filter_frame(outlink, outpic);
+}
+
+static const AVFilterPad histeq_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad histeq_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_histeq = {
+ .name = "histeq",
+ .description = NULL_IF_CONFIG_SMALL("Apply global color histogram equalization."),
+ .priv_size = sizeof(HisteqContext),
+ .init = init,
+ .query_formats = query_formats,
+ .inputs = histeq_inputs,
+ .outputs = histeq_outputs,
+ .priv_class = &histeq_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_histogram.c b/libavfilter/vf_histogram.c
new file mode 100644
index 0000000000..f04f5dea10
--- /dev/null
+++ b/libavfilter/vf_histogram.c
@@ -0,0 +1,382 @@
+/*
+ * Copyright (c) 2012-2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/intreadwrite.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct HistogramContext {
+ const AVClass *class; ///< AVClass context for log and options purpose
+ unsigned histogram[256*256];
+ int histogram_size;
+ int mult;
+ int ncomp;
+ int dncomp;
+ uint8_t bg_color[4];
+ uint8_t fg_color[4];
+ int level_height;
+ int scale_height;
+ int display_mode;
+ int levels_mode;
+ const AVPixFmtDescriptor *desc, *odesc;
+ int components;
+ float fgopacity;
+ float bgopacity;
+ int planewidth[4];
+ int planeheight[4];
+} HistogramContext;
+
+#define OFFSET(x) offsetof(HistogramContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption histogram_options[] = {
+ { "level_height", "set level height", OFFSET(level_height), AV_OPT_TYPE_INT, {.i64=200}, 50, 2048, FLAGS},
+ { "scale_height", "set scale height", OFFSET(scale_height), AV_OPT_TYPE_INT, {.i64=12}, 0, 40, FLAGS},
+ { "display_mode", "set display mode", OFFSET(display_mode), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "display_mode"},
+ { "d", "set display mode", OFFSET(display_mode), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "display_mode"},
+ { "parade", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "display_mode" },
+ { "overlay", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "display_mode" },
+ { "levels_mode", "set levels mode", OFFSET(levels_mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "levels_mode"},
+ { "m", "set levels mode", OFFSET(levels_mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "levels_mode"},
+ { "linear", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "levels_mode" },
+ { "logarithmic", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "levels_mode" },
+ { "components", "set color components to display", OFFSET(components), AV_OPT_TYPE_INT, {.i64=7}, 1, 15, FLAGS},
+ { "c", "set color components to display", OFFSET(components), AV_OPT_TYPE_INT, {.i64=7}, 1, 15, FLAGS},
+ { "fgopacity", "set foreground opacity", OFFSET(fgopacity), AV_OPT_TYPE_FLOAT, {.dbl=0.7}, 0, 1, FLAGS},
+ { "f", "set foreground opacity", OFFSET(fgopacity), AV_OPT_TYPE_FLOAT, {.dbl=0.7}, 0, 1, FLAGS},
+ { "bgopacity", "set background opacity", OFFSET(bgopacity), AV_OPT_TYPE_FLOAT, {.dbl=0.5}, 0, 1, FLAGS},
+ { "b", "set background opacity", OFFSET(bgopacity), AV_OPT_TYPE_FLOAT, {.dbl=0.5}, 0, 1, FLAGS},
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(histogram);
+
+static const enum AVPixelFormat levels_in_pix_fmts[] = {
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRAP12,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat levels_out_yuv8_pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat levels_out_yuv9_pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat levels_out_yuv10_pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat levels_out_yuv12_pix_fmts[] = {
+ AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat levels_out_rgb8_pix_fmts[] = {
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat levels_out_rgb9_pix_fmts[] = {
+ AV_PIX_FMT_GBRP9,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat levels_out_rgb10_pix_fmts[] = {
+ AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat levels_out_rgb12_pix_fmts[] = {
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRAP12,
+ AV_PIX_FMT_NONE
+};
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *avff;
+ const AVPixFmtDescriptor *desc;
+ const enum AVPixelFormat *out_pix_fmts;
+ int rgb, i, bits;
+ int ret;
+
+ if (!ctx->inputs[0]->in_formats ||
+ !ctx->inputs[0]->in_formats->nb_formats) {
+ return AVERROR(EAGAIN);
+ }
+
+ if (!ctx->inputs[0]->out_formats)
+ if ((ret = ff_formats_ref(ff_make_format_list(levels_in_pix_fmts), &ctx->inputs[0]->out_formats)) < 0)
+ return ret;
+ avff = ctx->inputs[0]->in_formats;
+ desc = av_pix_fmt_desc_get(avff->formats[0]);
+ rgb = desc->flags & AV_PIX_FMT_FLAG_RGB;
+ bits = desc->comp[0].depth;
+ for (i = 1; i < avff->nb_formats; i++) {
+ desc = av_pix_fmt_desc_get(avff->formats[i]);
+ if ((rgb != (desc->flags & AV_PIX_FMT_FLAG_RGB)) ||
+ (bits != desc->comp[0].depth))
+ return AVERROR(EAGAIN);
+ }
+
+ if (rgb && bits == 8)
+ out_pix_fmts = levels_out_rgb8_pix_fmts;
+ else if (rgb && bits == 9)
+ out_pix_fmts = levels_out_rgb9_pix_fmts;
+ else if (rgb && bits == 10)
+ out_pix_fmts = levels_out_rgb10_pix_fmts;
+ else if (rgb && bits == 12)
+ out_pix_fmts = levels_out_rgb12_pix_fmts;
+ else if (bits == 8)
+ out_pix_fmts = levels_out_yuv8_pix_fmts;
+ else if (bits == 9)
+ out_pix_fmts = levels_out_yuv9_pix_fmts;
+ else if (bits == 10)
+ out_pix_fmts = levels_out_yuv10_pix_fmts;
+ else if (bits == 12)
+ out_pix_fmts = levels_out_yuv12_pix_fmts;
+ else
+ return AVERROR(EAGAIN);
+ if ((ret = ff_formats_ref(ff_make_format_list(out_pix_fmts), &ctx->outputs[0]->in_formats)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static const uint8_t black_yuva_color[4] = { 0, 127, 127, 255 };
+static const uint8_t black_gbrp_color[4] = { 0, 0, 0, 255 };
+static const uint8_t white_yuva_color[4] = { 255, 127, 127, 255 };
+static const uint8_t white_gbrp_color[4] = { 255, 255, 255, 255 };
+
+static int config_input(AVFilterLink *inlink)
+{
+ HistogramContext *h = inlink->dst->priv;
+
+ h->desc = av_pix_fmt_desc_get(inlink->format);
+ h->ncomp = h->desc->nb_components;
+ h->histogram_size = 1 << h->desc->comp[0].depth;
+ h->mult = h->histogram_size / 256;
+
+ switch (inlink->format) {
+ case AV_PIX_FMT_GBRP12:
+ case AV_PIX_FMT_GBRP10:
+ case AV_PIX_FMT_GBRP9:
+ case AV_PIX_FMT_GBRAP:
+ case AV_PIX_FMT_GBRP:
+ memcpy(h->bg_color, black_gbrp_color, 4);
+ memcpy(h->fg_color, white_gbrp_color, 4);
+ break;
+ default:
+ memcpy(h->bg_color, black_yuva_color, 4);
+ memcpy(h->fg_color, white_yuva_color, 4);
+ }
+
+ h->fg_color[3] = h->fgopacity * 255;
+ h->bg_color[3] = h->bgopacity * 255;
+
+ h->planeheight[1] = h->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, h->desc->log2_chroma_h);
+ h->planeheight[0] = h->planeheight[3] = inlink->h;
+ h->planewidth[1] = h->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, h->desc->log2_chroma_w);
+ h->planewidth[0] = h->planewidth[3] = inlink->w;
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ HistogramContext *h = ctx->priv;
+ int ncomp = 0, i;
+
+ for (i = 0; i < h->ncomp; i++) {
+ if ((1 << i) & h->components)
+ ncomp++;
+ }
+ outlink->w = h->histogram_size;
+ outlink->h = (h->level_height + h->scale_height) * FFMAX(ncomp * h->display_mode, 1);
+
+ h->odesc = av_pix_fmt_desc_get(outlink->format);
+ h->dncomp = h->odesc->nb_components;
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ HistogramContext *h = inlink->dst->priv;
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ int i, j, k, l, m;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+
+ out->pts = in->pts;
+
+ for (k = 0; k < 4 && out->data[k]; k++) {
+ const int is_chroma = (k == 1 || k == 2);
+ const int dst_h = AV_CEIL_RSHIFT(outlink->h, (is_chroma ? h->odesc->log2_chroma_h : 0));
+ const int dst_w = AV_CEIL_RSHIFT(outlink->w, (is_chroma ? h->odesc->log2_chroma_w : 0));
+
+ if (h->histogram_size <= 256) {
+ for (i = 0; i < dst_h ; i++)
+ memset(out->data[h->odesc->comp[k].plane] +
+ i * out->linesize[h->odesc->comp[k].plane],
+ h->bg_color[k], dst_w);
+ } else {
+ const int mult = h->mult;
+
+ for (i = 0; i < dst_h ; i++)
+ for (j = 0; j < dst_w; j++)
+ AV_WN16(out->data[h->odesc->comp[k].plane] +
+ i * out->linesize[h->odesc->comp[k].plane] + j * 2,
+ h->bg_color[k] * mult);
+ }
+ }
+
+ for (m = 0, k = 0; k < h->ncomp; k++) {
+ const int p = h->desc->comp[k].plane;
+ const int height = h->planeheight[p];
+ const int width = h->planewidth[p];
+ double max_hval_log;
+ unsigned max_hval = 0;
+ int start;
+
+ if (!((1 << k) & h->components))
+ continue;
+ start = m++ * (h->level_height + h->scale_height) * h->display_mode;
+
+ if (h->histogram_size <= 256) {
+ for (i = 0; i < height; i++) {
+ const uint8_t *src = in->data[p] + i * in->linesize[p];
+ for (j = 0; j < width; j++)
+ h->histogram[src[j]]++;
+ }
+ } else {
+ for (i = 0; i < height; i++) {
+ const uint16_t *src = (const uint16_t *)(in->data[p] + i * in->linesize[p]);
+ for (j = 0; j < width; j++)
+ h->histogram[src[j]]++;
+ }
+ }
+
+ for (i = 0; i < h->histogram_size; i++)
+ max_hval = FFMAX(max_hval, h->histogram[i]);
+ max_hval_log = log2(max_hval + 1);
+
+ for (i = 0; i < outlink->w; i++) {
+ int col_height;
+
+ if (h->levels_mode)
+ col_height = lrint(h->level_height * (1. - (log2(h->histogram[i] + 1) / max_hval_log)));
+ else
+ col_height = h->level_height - (h->histogram[i] * (int64_t)h->level_height + max_hval - 1) / max_hval;
+
+ if (h->histogram_size <= 256) {
+ for (j = h->level_height - 1; j >= col_height; j--) {
+ if (h->display_mode) {
+ for (l = 0; l < h->dncomp; l++)
+ out->data[l][(j + start) * out->linesize[l] + i] = h->fg_color[l];
+ } else {
+ out->data[p][(j + start) * out->linesize[p] + i] = 255;
+ }
+ }
+ for (j = h->level_height + h->scale_height - 1; j >= h->level_height; j--)
+ out->data[p][(j + start) * out->linesize[p] + i] = i;
+ } else {
+ const int mult = h->mult;
+
+ for (j = h->level_height - 1; j >= col_height; j--) {
+ if (h->display_mode) {
+ for (l = 0; l < h->dncomp; l++)
+ AV_WN16(out->data[l] + (j + start) * out->linesize[l] + i * 2, h->fg_color[l] * mult);
+ } else {
+ AV_WN16(out->data[p] + (j + start) * out->linesize[p] + i * 2, 255 * mult);
+ }
+ }
+ for (j = h->level_height + h->scale_height - 1; j >= h->level_height; j--)
+ AV_WN16(out->data[p] + (j + start) * out->linesize[p] + i * 2, i);
+ }
+ }
+
+ memset(h->histogram, 0, h->histogram_size * sizeof(unsigned));
+ }
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_histogram = {
+ .name = "histogram",
+ .description = NULL_IF_CONFIG_SMALL("Compute and draw a histogram."),
+ .priv_size = sizeof(HistogramContext),
+ .query_formats = query_formats,
+ .inputs = inputs,
+ .outputs = outputs,
+ .priv_class = &histogram_class,
+};
diff --git a/libavfilter/vf_hqdn3d.c b/libavfilter/vf_hqdn3d.c
index 4d8297df1f..d6c14bb3d8 100644
--- a/libavfilter/vf_hqdn3d.c
+++ b/libavfilter/vf_hqdn3d.c
@@ -3,20 +3,20 @@
* Copyright (c) 2010 Baptiste Coudurier
* Copyright (c) 2012 Loren Merritt
*
- * This file is part of Libav, ported from MPlayer.
+ * This file is part of FFmpeg, ported from MPlayer.
*
- * Libav is free software; you can redistribute it and/or modify
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
@@ -133,7 +133,7 @@ static int denoise_depth(HQDN3DContext *s,
uint16_t *frame_ant = *frame_ant_ptr;
if (!frame_ant) {
uint8_t *frame_src = src;
- *frame_ant_ptr = frame_ant = av_malloc(w*h*sizeof(uint16_t));
+ *frame_ant_ptr = frame_ant = av_malloc_array(w, h*sizeof(uint16_t));
if (!frame_ant)
return AVERROR(ENOMEM);
for (y = 0; y < h; y++, src += sstride, frame_ant += w)
@@ -155,7 +155,7 @@ static int denoise_depth(HQDN3DContext *s,
#define denoise(...) \
do { \
- int ret = AVERROR_INVALIDDATA; \
+ int ret = AVERROR_BUG; \
switch (s->depth) { \
case 8: ret = denoise_depth(__VA_ARGS__, 8); break; \
case 9: ret = denoise_depth(__VA_ARGS__, 9); break; \
@@ -180,9 +180,9 @@ static int16_t *precalc_coefs(double dist25, int depth)
gamma = log(0.25) / log(1.0 - FFMIN(dist25,252.0)/255.0 - 0.00001);
- for (i = -255<<LUT_BITS; i <= 255<<LUT_BITS; i++) {
+ for (i = -256<<LUT_BITS; i < 256<<LUT_BITS; i++) {
double f = ((i<<(9-LUT_BITS)) + (1<<(8-LUT_BITS)) - 1) / 512.0; // midpoint of the bin
- simil = 1.0 - FFABS(f) / 255.0;
+ simil = FFMAX(0, 1.0 - fabs(f) / 255.0);
C = pow(simil, gamma) * 256.0 * f;
ct[(256<<LUT_BITS)+i] = lrint(C);
}
@@ -242,21 +242,21 @@ static int query_formats(AVFilterContext *ctx)
AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_YUVJ440P,
- AV_NE( AV_PIX_FMT_YUV420P9BE, AV_PIX_FMT_YUV420P9LE ),
- AV_NE( AV_PIX_FMT_YUV422P9BE, AV_PIX_FMT_YUV422P9LE ),
- AV_NE( AV_PIX_FMT_YUV444P9BE, AV_PIX_FMT_YUV444P9LE ),
- AV_NE( AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUV420P10LE ),
- AV_NE( AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUV422P10LE ),
- AV_NE( AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUV444P10LE ),
- AV_NE( AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUV420P16LE ),
- AV_NE( AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUV422P16LE ),
- AV_NE( AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUV444P16LE ),
+ AV_PIX_FMT_YUV420P9,
+ AV_PIX_FMT_YUV422P9,
+ AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUV422P10,
+ AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P16,
+ AV_PIX_FMT_YUV422P16,
+ AV_PIX_FMT_YUV444P16,
AV_PIX_FMT_NONE
};
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
-
- return 0;
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
}
static int config_input(AVFilterLink *inlink)
@@ -271,7 +271,7 @@ static int config_input(AVFilterLink *inlink)
s->vsub = desc->log2_chroma_h;
s->depth = desc->comp[0].depth;
- s->line = av_malloc(inlink->w * sizeof(*s->line));
+ s->line = av_malloc_array(inlink->w, sizeof(*s->line));
if (!s->line)
return AVERROR(ENOMEM);
@@ -289,10 +289,12 @@ static int config_input(AVFilterLink *inlink)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
- HQDN3DContext *s = inlink->dst->priv;
- AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFilterContext *ctx = inlink->dst;
+ HQDN3DContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+
AVFrame *out;
- int c, direct = av_frame_is_writable(in);
+ int c, direct = av_frame_is_writable(in) && !ctx->is_disabled;
if (direct) {
out = in;
@@ -304,17 +306,21 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
av_frame_copy_props(out, in);
- out->width = outlink->w;
- out->height = outlink->h;
}
for (c = 0; c < 3; c++) {
denoise(s, in->data[c], out->data[c],
s->line, &s->frame_prev[c],
- in->width >> (!!c * s->hsub),
- in->height >> (!!c * s->vsub),
+ AV_CEIL_RSHIFT(in->width, (!!c * s->hsub)),
+ AV_CEIL_RSHIFT(in->height, (!!c * s->vsub)),
in->linesize[c], out->linesize[c],
- s->coefs[c?2:0], s->coefs[c?3:1]);
+ s->coefs[c ? CHROMA_SPATIAL : LUMA_SPATIAL],
+ s->coefs[c ? CHROMA_TMP : LUMA_TMP]);
+ }
+
+ if (ctx->is_disabled) {
+ av_frame_free(&out);
+ return ff_filter_frame(outlink, in);
}
if (!direct)
@@ -324,21 +330,16 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
#define OFFSET(x) offsetof(HQDN3DContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption hqdn3d_options[] = {
{ "luma_spatial", "spatial luma strength", OFFSET(strength[LUMA_SPATIAL]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
{ "chroma_spatial", "spatial chroma strength", OFFSET(strength[CHROMA_SPATIAL]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
{ "luma_tmp", "temporal luma strength", OFFSET(strength[LUMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
{ "chroma_tmp", "temporal chroma strength", OFFSET(strength[CHROMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
- { NULL },
+ { NULL }
};
-static const AVClass hqdn3d_class = {
- .class_name = "hqdn3d",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(hqdn3d);
static const AVFilterPad avfilter_vf_hqdn3d_inputs[] = {
{
@@ -350,6 +351,7 @@ static const AVFilterPad avfilter_vf_hqdn3d_inputs[] = {
{ NULL }
};
+
static const AVFilterPad avfilter_vf_hqdn3d_outputs[] = {
{
.name = "default",
@@ -361,14 +363,12 @@ static const AVFilterPad avfilter_vf_hqdn3d_outputs[] = {
AVFilter ff_vf_hqdn3d = {
.name = "hqdn3d",
.description = NULL_IF_CONFIG_SMALL("Apply a High Quality 3D Denoiser."),
-
.priv_size = sizeof(HQDN3DContext),
.priv_class = &hqdn3d_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_hqdn3d_inputs,
-
- .outputs = avfilter_vf_hqdn3d_outputs,
+ .inputs = avfilter_vf_hqdn3d_inputs,
+ .outputs = avfilter_vf_hqdn3d_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};
diff --git a/libavfilter/vf_hqdn3d.h b/libavfilter/vf_hqdn3d.h
index a3445188b1..03a79a108c 100644
--- a/libavfilter/vf_hqdn3d.h
+++ b/libavfilter/vf_hqdn3d.h
@@ -1,23 +1,27 @@
/*
- * This file is part of Libav.
+ * Copyright (c) 2003 Daniel Moreno <comac AT comac DOT darktech DOT org>
+ * Copyright (c) 2010 Baptiste Coudurier
+ * Copyright (c) 2012 Loren Merritt
*
- * Libav is free software; you can redistribute it and/or modify
+ * This file is part of FFmpeg, ported from MPlayer.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#ifndef AVFILTER_VF_HQDN3D_H
-#define AVFILTER_VF_HQDN3D_H
+#ifndef AVFILTER_HQDN3D_H
+#define AVFILTER_HQDN3D_H
#include <stddef.h>
#include <stdint.h>
@@ -42,4 +46,4 @@ typedef struct HQDN3DContext {
void ff_hqdn3d_init_x86(HQDN3DContext *hqdn3d);
-#endif /* AVFILTER_VF_HQDN3D_H */
+#endif /* AVFILTER_HQDN3D_H */
diff --git a/libavfilter/vf_hqx.c b/libavfilter/vf_hqx.c
new file mode 100644
index 0000000000..5f63b2a3f9
--- /dev/null
+++ b/libavfilter/vf_hqx.c
@@ -0,0 +1,566 @@
+/*
+ * Copyright (c) 2014 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * @file
+ * hqx magnification filters (hq2x, hq3x, hq4x)
+ *
+ * Originally designed by Maxim Stephin.
+ *
+ * @see http://en.wikipedia.org/wiki/Hqx
+ * @see http://web.archive.org/web/20131114143602/http://www.hiend3d.com/hq3x.html
+ * @see http://blog.pkh.me/p/19-butchering-hqx-scaling-filters.html
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+#include "internal.h"
+
+typedef int (*hqxfunc_t)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
+
+typedef struct {
+ const AVClass *class;
+ int n;
+ hqxfunc_t func;
+ uint32_t rgbtoyuv[1<<24];
+} HQXContext;
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+ const uint32_t *rgbtoyuv;
+} ThreadData;
+
+#define OFFSET(x) offsetof(HQXContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption hqx_options[] = {
+ { "n", "set scale factor", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 3}, 2, 4, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(hqx);
+
+static av_always_inline uint32_t rgb2yuv(const uint32_t *r2y, uint32_t c)
+{
+ return r2y[c & 0xffffff];
+}
+
+static av_always_inline int yuv_diff(uint32_t yuv1, uint32_t yuv2)
+{
+#define YMASK 0xff0000
+#define UMASK 0x00ff00
+#define VMASK 0x0000ff
+#define ABSDIFF(a,b) (abs((int)(a)-(int)(b)))
+
+ return ABSDIFF(yuv1 & YMASK, yuv2 & YMASK) > (48 << 16) ||
+ ABSDIFF(yuv1 & UMASK, yuv2 & UMASK) > ( 7 << 8) ||
+ ABSDIFF(yuv1 & VMASK, yuv2 & VMASK) > ( 6 << 0);
+}
+
+/* (c1*w1 + c2*w2) >> s */
+static av_always_inline uint32_t interp_2px(uint32_t c1, int w1, uint32_t c2, int w2, int s)
+{
+ return (((((c1 & 0xff00ff00) >> 8) * w1 + ((c2 & 0xff00ff00) >> 8) * w2) << (8 - s)) & 0xff00ff00) |
+ (((((c1 & 0x00ff00ff) ) * w1 + ((c2 & 0x00ff00ff) ) * w2) >> s ) & 0x00ff00ff);
+}
+
+/* (c1*w1 + c2*w2 + c3*w3) >> s */
+static av_always_inline uint32_t interp_3px(uint32_t c1, int w1, uint32_t c2, int w2, uint32_t c3, int w3, int s)
+{
+ return (((((c1 & 0xff00ff00) >> 8) * w1 + ((c2 & 0xff00ff00) >> 8) * w2 + ((c3 & 0xff00ff00) >> 8) * w3) << (8 - s)) & 0xff00ff00) |
+ (((((c1 & 0x00ff00ff) ) * w1 + ((c2 & 0x00ff00ff) ) * w2 + ((c3 & 0x00ff00ff) ) * w3) >> s ) & 0x00ff00ff);
+}
+
+/* m is the mask of diff with the center pixel that matters in the pattern, and
+ * r is the expected result (bit set to 1 if there is difference with the
+ * center, 0 otherwise) */
+#define P(m, r) ((k_shuffled & (m)) == (r))
+
+/* adjust 012345678 to 01235678: the mask doesn't contain the (null) diff
+ * between the center/current pixel and itself */
+#define DROP4(z) ((z) > 4 ? (z)-1 : (z))
+
+/* shuffle the input mask: move bit n (4-adjusted) to position stored in p<n> */
+#define SHF(x, rot, n) (((x) >> ((rot) ? 7-DROP4(n) : DROP4(n)) & 1) << DROP4(p##n))
+
+/* used to check if there is YUV difference between 2 pixels */
+#define WDIFF(c1, c2) yuv_diff(rgb2yuv(r2y, c1), rgb2yuv(r2y, c2))
+
+/* bootstrap template for every interpolation code. It defines the shuffled
+ * masks and surrounding pixels. The rot flag is used to indicate if it's a
+ * rotation; its basic effect is to shuffle k using p8..p0 instead of p0..p8 */
+#define INTERP_BOOTSTRAP(rot) \
+ const int k_shuffled = SHF(k,rot,0) | SHF(k,rot,1) | SHF(k,rot,2) \
+ | SHF(k,rot,3) | 0 | SHF(k,rot,5) \
+ | SHF(k,rot,6) | SHF(k,rot,7) | SHF(k,rot,8); \
+ \
+ const uint32_t w0 = w[p0], w1 = w[p1], \
+ w3 = w[p3], w4 = w[p4], w5 = w[p5], \
+ w7 = w[p7]
+
+/* Assuming p0..p8 is mapped to pixels 0..8, this function interpolates the
+ * top-left pixel in the total of the 2x2 pixels to interpolates. The function
+ * is also used for the 3 other pixels */
+static av_always_inline uint32_t hq2x_interp_1x1(const uint32_t *r2y, int k,
+ const uint32_t *w,
+ int p0, int p1, int p2,
+ int p3, int p4, int p5,
+ int p6, int p7, int p8)
+{
+ INTERP_BOOTSTRAP(0);
+
+ if ((P(0xbf,0x37) || P(0xdb,0x13)) && WDIFF(w1, w5))
+ return interp_2px(w4, 3, w3, 1, 2);
+ if ((P(0xdb,0x49) || P(0xef,0x6d)) && WDIFF(w7, w3))
+ return interp_2px(w4, 3, w1, 1, 2);
+ if ((P(0x0b,0x0b) || P(0xfe,0x4a) || P(0xfe,0x1a)) && WDIFF(w3, w1))
+ return w4;
+ if ((P(0x6f,0x2a) || P(0x5b,0x0a) || P(0xbf,0x3a) || P(0xdf,0x5a) ||
+ P(0x9f,0x8a) || P(0xcf,0x8a) || P(0xef,0x4e) || P(0x3f,0x0e) ||
+ P(0xfb,0x5a) || P(0xbb,0x8a) || P(0x7f,0x5a) || P(0xaf,0x8a) ||
+ P(0xeb,0x8a)) && WDIFF(w3, w1))
+ return interp_2px(w4, 3, w0, 1, 2);
+ if (P(0x0b,0x08))
+ return interp_3px(w4, 2, w0, 1, w1, 1, 2);
+ if (P(0x0b,0x02))
+ return interp_3px(w4, 2, w0, 1, w3, 1, 2);
+ if (P(0x2f,0x2f))
+ return interp_3px(w4, 14, w3, 1, w1, 1, 4);
+ if (P(0xbf,0x37) || P(0xdb,0x13))
+ return interp_3px(w4, 5, w1, 2, w3, 1, 3);
+ if (P(0xdb,0x49) || P(0xef,0x6d))
+ return interp_3px(w4, 5, w3, 2, w1, 1, 3);
+ if (P(0x1b,0x03) || P(0x4f,0x43) || P(0x8b,0x83) || P(0x6b,0x43))
+ return interp_2px(w4, 3, w3, 1, 2);
+ if (P(0x4b,0x09) || P(0x8b,0x89) || P(0x1f,0x19) || P(0x3b,0x19))
+ return interp_2px(w4, 3, w1, 1, 2);
+ if (P(0x7e,0x2a) || P(0xef,0xab) || P(0xbf,0x8f) || P(0x7e,0x0e))
+ return interp_3px(w4, 2, w3, 3, w1, 3, 3);
+ if (P(0xfb,0x6a) || P(0x6f,0x6e) || P(0x3f,0x3e) || P(0xfb,0xfa) ||
+ P(0xdf,0xde) || P(0xdf,0x1e))
+ return interp_2px(w4, 3, w0, 1, 2);
+ if (P(0x0a,0x00) || P(0x4f,0x4b) || P(0x9f,0x1b) || P(0x2f,0x0b) ||
+ P(0xbe,0x0a) || P(0xee,0x0a) || P(0x7e,0x0a) || P(0xeb,0x4b) ||
+ P(0x3b,0x1b))
+ return interp_3px(w4, 2, w3, 1, w1, 1, 2);
+ return interp_3px(w4, 6, w3, 1, w1, 1, 3);
+}
+
+/* Assuming p0..p8 is mapped to pixels 0..8, this function interpolates the
+ * top-left and top-center pixel in the total of the 3x3 pixels to
+ * interpolates. The function is also used for the 3 other couples of pixels
+ * defining the outline. The center pixel is not defined through this function,
+ * since it's just the same as the original value. */
+static av_always_inline void hq3x_interp_2x1(uint32_t *dst, int dst_linesize,
+ const uint32_t *r2y, int k,
+ const uint32_t *w,
+ int pos00, int pos01,
+ int p0, int p1, int p2,
+ int p3, int p4, int p5,
+ int p6, int p7, int p8,
+ int rotate)
+{
+ INTERP_BOOTSTRAP(rotate);
+
+ uint32_t *dst00 = &dst[dst_linesize*(pos00>>1) + (pos00&1)];
+ uint32_t *dst01 = &dst[dst_linesize*(pos01>>1) + (pos01&1)];
+
+ if ((P(0xdb,0x49) || P(0xef,0x6d)) && WDIFF(w7, w3))
+ *dst00 = interp_2px(w4, 3, w1, 1, 2);
+ else if ((P(0xbf,0x37) || P(0xdb,0x13)) && WDIFF(w1, w5))
+ *dst00 = interp_2px(w4, 3, w3, 1, 2);
+ else if ((P(0x0b,0x0b) || P(0xfe,0x4a) || P(0xfe,0x1a)) && WDIFF(w3, w1))
+ *dst00 = w4;
+ else if ((P(0x6f,0x2a) || P(0x5b,0x0a) || P(0xbf,0x3a) || P(0xdf,0x5a) ||
+ P(0x9f,0x8a) || P(0xcf,0x8a) || P(0xef,0x4e) || P(0x3f,0x0e) ||
+ P(0xfb,0x5a) || P(0xbb,0x8a) || P(0x7f,0x5a) || P(0xaf,0x8a) ||
+ P(0xeb,0x8a)) && WDIFF(w3, w1))
+ *dst00 = interp_2px(w4, 3, w0, 1, 2);
+ else if (P(0x4b,0x09) || P(0x8b,0x89) || P(0x1f,0x19) || P(0x3b,0x19))
+ *dst00 = interp_2px(w4, 3, w1, 1, 2);
+ else if (P(0x1b,0x03) || P(0x4f,0x43) || P(0x8b,0x83) || P(0x6b,0x43))
+ *dst00 = interp_2px(w4, 3, w3, 1, 2);
+ else if (P(0x7e,0x2a) || P(0xef,0xab) || P(0xbf,0x8f) || P(0x7e,0x0e))
+ *dst00 = interp_2px(w3, 1, w1, 1, 1);
+ else if (P(0x4f,0x4b) || P(0x9f,0x1b) || P(0x2f,0x0b) || P(0xbe,0x0a) ||
+ P(0xee,0x0a) || P(0x7e,0x0a) || P(0xeb,0x4b) || P(0x3b,0x1b))
+ *dst00 = interp_3px(w4, 2, w3, 7, w1, 7, 4);
+ else if (P(0x0b,0x08) || P(0xf9,0x68) || P(0xf3,0x62) || P(0x6d,0x6c) ||
+ P(0x67,0x66) || P(0x3d,0x3c) || P(0x37,0x36) || P(0xf9,0xf8) ||
+ P(0xdd,0xdc) || P(0xf3,0xf2) || P(0xd7,0xd6) || P(0xdd,0x1c) ||
+ P(0xd7,0x16) || P(0x0b,0x02))
+ *dst00 = interp_2px(w4, 3, w0, 1, 2);
+ else
+ *dst00 = interp_3px(w4, 2, w3, 1, w1, 1, 2);
+
+ if ((P(0xfe,0xde) || P(0x9e,0x16) || P(0xda,0x12) || P(0x17,0x16) ||
+ P(0x5b,0x12) || P(0xbb,0x12)) && WDIFF(w1, w5))
+ *dst01 = w4;
+ else if ((P(0x0f,0x0b) || P(0x5e,0x0a) || P(0xfb,0x7b) || P(0x3b,0x0b) ||
+ P(0xbe,0x0a) || P(0x7a,0x0a)) && WDIFF(w3, w1))
+ *dst01 = w4;
+ else if (P(0xbf,0x8f) || P(0x7e,0x0e) || P(0xbf,0x37) || P(0xdb,0x13))
+ *dst01 = interp_2px(w1, 3, w4, 1, 2);
+ else if (P(0x02,0x00) || P(0x7c,0x28) || P(0xed,0xa9) || P(0xf5,0xb4) ||
+ P(0xd9,0x90))
+ *dst01 = interp_2px(w4, 3, w1, 1, 2);
+ else if (P(0x4f,0x4b) || P(0xfb,0x7b) || P(0xfe,0x7e) || P(0x9f,0x1b) ||
+ P(0x2f,0x0b) || P(0xbe,0x0a) || P(0x7e,0x0a) || P(0xfb,0x4b) ||
+ P(0xfb,0xdb) || P(0xfe,0xde) || P(0xfe,0x56) || P(0x57,0x56) ||
+ P(0x97,0x16) || P(0x3f,0x1e) || P(0xdb,0x12) || P(0xbb,0x12))
+ *dst01 = interp_2px(w4, 7, w1, 1, 3);
+ else
+ *dst01 = w4;
+}
+
+/* Assuming p0..p8 is mapped to pixels 0..8, this function interpolates the
+ * top-left block of 2x2 pixels in the total of the 4x4 pixels (or 4 blocks) to
+ * interpolates. The function is also used for the 3 other blocks of 2x2
+ * pixels. */
+static av_always_inline void hq4x_interp_2x2(uint32_t *dst, int dst_linesize,
+ const uint32_t *r2y, int k,
+ const uint32_t *w,
+ int pos00, int pos01,
+ int pos10, int pos11,
+ int p0, int p1, int p2,
+ int p3, int p4, int p5,
+ int p6, int p7, int p8)
+{
+ INTERP_BOOTSTRAP(0);
+
+ uint32_t *dst00 = &dst[dst_linesize*(pos00>>1) + (pos00&1)];
+ uint32_t *dst01 = &dst[dst_linesize*(pos01>>1) + (pos01&1)];
+ uint32_t *dst10 = &dst[dst_linesize*(pos10>>1) + (pos10&1)];
+ uint32_t *dst11 = &dst[dst_linesize*(pos11>>1) + (pos11&1)];
+
+ const int cond00 = (P(0xbf,0x37) || P(0xdb,0x13)) && WDIFF(w1, w5);
+ const int cond01 = (P(0xdb,0x49) || P(0xef,0x6d)) && WDIFF(w7, w3);
+ const int cond02 = (P(0x6f,0x2a) || P(0x5b,0x0a) || P(0xbf,0x3a) ||
+ P(0xdf,0x5a) || P(0x9f,0x8a) || P(0xcf,0x8a) ||
+ P(0xef,0x4e) || P(0x3f,0x0e) || P(0xfb,0x5a) ||
+ P(0xbb,0x8a) || P(0x7f,0x5a) || P(0xaf,0x8a) ||
+ P(0xeb,0x8a)) && WDIFF(w3, w1);
+ const int cond03 = P(0xdb,0x49) || P(0xef,0x6d);
+ const int cond04 = P(0xbf,0x37) || P(0xdb,0x13);
+ const int cond05 = P(0x1b,0x03) || P(0x4f,0x43) || P(0x8b,0x83) ||
+ P(0x6b,0x43);
+ const int cond06 = P(0x4b,0x09) || P(0x8b,0x89) || P(0x1f,0x19) ||
+ P(0x3b,0x19);
+ const int cond07 = P(0x0b,0x08) || P(0xf9,0x68) || P(0xf3,0x62) ||
+ P(0x6d,0x6c) || P(0x67,0x66) || P(0x3d,0x3c) ||
+ P(0x37,0x36) || P(0xf9,0xf8) || P(0xdd,0xdc) ||
+ P(0xf3,0xf2) || P(0xd7,0xd6) || P(0xdd,0x1c) ||
+ P(0xd7,0x16) || P(0x0b,0x02);
+ const int cond08 = (P(0x0f,0x0b) || P(0x2b,0x0b) || P(0xfe,0x4a) ||
+ P(0xfe,0x1a)) && WDIFF(w3, w1);
+ const int cond09 = P(0x2f,0x2f);
+ const int cond10 = P(0x0a,0x00);
+ const int cond11 = P(0x0b,0x09);
+ const int cond12 = P(0x7e,0x2a) || P(0xef,0xab);
+ const int cond13 = P(0xbf,0x8f) || P(0x7e,0x0e);
+ const int cond14 = P(0x4f,0x4b) || P(0x9f,0x1b) || P(0x2f,0x0b) ||
+ P(0xbe,0x0a) || P(0xee,0x0a) || P(0x7e,0x0a) ||
+ P(0xeb,0x4b) || P(0x3b,0x1b);
+ const int cond15 = P(0x0b,0x03);
+
+ if (cond00)
+ *dst00 = interp_2px(w4, 5, w3, 3, 3);
+ else if (cond01)
+ *dst00 = interp_2px(w4, 5, w1, 3, 3);
+ else if ((P(0x0b,0x0b) || P(0xfe,0x4a) || P(0xfe,0x1a)) && WDIFF(w3, w1))
+ *dst00 = w4;
+ else if (cond02)
+ *dst00 = interp_2px(w4, 5, w0, 3, 3);
+ else if (cond03)
+ *dst00 = interp_2px(w4, 3, w3, 1, 2);
+ else if (cond04)
+ *dst00 = interp_2px(w4, 3, w1, 1, 2);
+ else if (cond05)
+ *dst00 = interp_2px(w4, 5, w3, 3, 3);
+ else if (cond06)
+ *dst00 = interp_2px(w4, 5, w1, 3, 3);
+ else if (P(0x0f,0x0b) || P(0x5e,0x0a) || P(0x2b,0x0b) || P(0xbe,0x0a) ||
+ P(0x7a,0x0a) || P(0xee,0x0a))
+ *dst00 = interp_2px(w1, 1, w3, 1, 1);
+ else if (cond07)
+ *dst00 = interp_2px(w4, 5, w0, 3, 3);
+ else
+ *dst00 = interp_3px(w4, 2, w1, 1, w3, 1, 2);
+
+ if (cond00)
+ *dst01 = interp_2px(w4, 7, w3, 1, 3);
+ else if (cond08)
+ *dst01 = w4;
+ else if (cond02)
+ *dst01 = interp_2px(w4, 3, w0, 1, 2);
+ else if (cond09)
+ *dst01 = w4;
+ else if (cond10)
+ *dst01 = interp_3px(w4, 5, w1, 2, w3, 1, 3);
+ else if (P(0x0b,0x08))
+ *dst01 = interp_3px(w4, 5, w1, 2, w0, 1, 3);
+ else if (cond11)
+ *dst01 = interp_2px(w4, 5, w1, 3, 3);
+ else if (cond04)
+ *dst01 = interp_2px(w1, 3, w4, 1, 2);
+ else if (cond12)
+ *dst01 = interp_3px(w1, 2, w4, 1, w3, 1, 2);
+ else if (cond13)
+ *dst01 = interp_2px(w1, 5, w3, 3, 3);
+ else if (cond05)
+ *dst01 = interp_2px(w4, 7, w3, 1, 3);
+ else if (P(0xf3,0x62) || P(0x67,0x66) || P(0x37,0x36) || P(0xf3,0xf2) ||
+ P(0xd7,0xd6) || P(0xd7,0x16) || P(0x0b,0x02))
+ *dst01 = interp_2px(w4, 3, w0, 1, 2);
+ else if (cond14)
+ *dst01 = interp_2px(w1, 1, w4, 1, 1);
+ else
+ *dst01 = interp_2px(w4, 3, w1, 1, 2);
+
+ if (cond01)
+ *dst10 = interp_2px(w4, 7, w1, 1, 3);
+ else if (cond08)
+ *dst10 = w4;
+ else if (cond02)
+ *dst10 = interp_2px(w4, 3, w0, 1, 2);
+ else if (cond09)
+ *dst10 = w4;
+ else if (cond10)
+ *dst10 = interp_3px(w4, 5, w3, 2, w1, 1, 3);
+ else if (P(0x0b,0x02))
+ *dst10 = interp_3px(w4, 5, w3, 2, w0, 1, 3);
+ else if (cond15)
+ *dst10 = interp_2px(w4, 5, w3, 3, 3);
+ else if (cond03)
+ *dst10 = interp_2px(w3, 3, w4, 1, 2);
+ else if (cond13)
+ *dst10 = interp_3px(w3, 2, w4, 1, w1, 1, 2);
+ else if (cond12)
+ *dst10 = interp_2px(w3, 5, w1, 3, 3);
+ else if (cond06)
+ *dst10 = interp_2px(w4, 7, w1, 1, 3);
+ else if (P(0x0b,0x08) || P(0xf9,0x68) || P(0x6d,0x6c) || P(0x3d,0x3c) ||
+ P(0xf9,0xf8) || P(0xdd,0xdc) || P(0xdd,0x1c))
+ *dst10 = interp_2px(w4, 3, w0, 1, 2);
+ else if (cond14)
+ *dst10 = interp_2px(w3, 1, w4, 1, 1);
+ else
+ *dst10 = interp_2px(w4, 3, w3, 1, 2);
+
+ if ((P(0x7f,0x2b) || P(0xef,0xab) || P(0xbf,0x8f) || P(0x7f,0x0f)) &&
+ WDIFF(w3, w1))
+ *dst11 = w4;
+ else if (cond02)
+ *dst11 = interp_2px(w4, 7, w0, 1, 3);
+ else if (cond15)
+ *dst11 = interp_2px(w4, 7, w3, 1, 3);
+ else if (cond11)
+ *dst11 = interp_2px(w4, 7, w1, 1, 3);
+ else if (P(0x0a,0x00) || P(0x7e,0x2a) || P(0xef,0xab) || P(0xbf,0x8f) ||
+ P(0x7e,0x0e))
+ *dst11 = interp_3px(w4, 6, w3, 1, w1, 1, 3);
+ else if (cond07)
+ *dst11 = interp_2px(w4, 7, w0, 1, 3);
+ else
+ *dst11 = w4;
+}
+
+static av_always_inline void hqx_filter(const ThreadData *td, int jobnr, int nb_jobs, int n)
+{
+ int x, y;
+ AVFrame *in = td->in, *out = td->out;
+ const uint32_t *r2y = td->rgbtoyuv;
+ const int height = in->height;
+ const int width = in->width;
+ const int slice_start = (height * jobnr ) / nb_jobs;
+ const int slice_end = (height * (jobnr+1)) / nb_jobs;
+ const int dst_linesize = out->linesize[0];
+ const int src_linesize = in->linesize[0];
+ uint8_t *dst = out->data[0] + slice_start * dst_linesize * n;
+ const uint8_t *src = in->data[0] + slice_start * src_linesize;
+
+ const int dst32_linesize = dst_linesize >> 2;
+ const int src32_linesize = src_linesize >> 2;
+
+ for (y = slice_start; y < slice_end; y++) {
+ const uint32_t *src32 = (const uint32_t *)src;
+ uint32_t *dst32 = (uint32_t *)dst;
+ const int prevline = y > 0 ? -src32_linesize : 0;
+ const int nextline = y < height - 1 ? src32_linesize : 0;
+
+ for (x = 0; x < width; x++) {
+ const int prevcol = x > 0 ? -1 : 0;
+ const int nextcol = x < width -1 ? 1 : 0;
+ const uint32_t w[3*3] = {
+ src32[prevcol + prevline], src32[prevline], src32[prevline + nextcol],
+ src32[prevcol ], src32[ 0], src32[ nextcol],
+ src32[prevcol + nextline], src32[nextline], src32[nextline + nextcol]
+ };
+ const uint32_t yuv1 = rgb2yuv(r2y, w[4]);
+ const int pattern = (w[4] != w[0] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[0]))) : 0)
+ | (w[4] != w[1] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[1]))) : 0) << 1
+ | (w[4] != w[2] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[2]))) : 0) << 2
+ | (w[4] != w[3] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[3]))) : 0) << 3
+ | (w[4] != w[5] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[5]))) : 0) << 4
+ | (w[4] != w[6] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[6]))) : 0) << 5
+ | (w[4] != w[7] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[7]))) : 0) << 6
+ | (w[4] != w[8] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[8]))) : 0) << 7;
+
+ if (n == 2) {
+ dst32[dst32_linesize*0 + 0] = hq2x_interp_1x1(r2y, pattern, w, 0,1,2,3,4,5,6,7,8); // 00
+ dst32[dst32_linesize*0 + 1] = hq2x_interp_1x1(r2y, pattern, w, 2,1,0,5,4,3,8,7,6); // 01 (vert mirrored)
+ dst32[dst32_linesize*1 + 0] = hq2x_interp_1x1(r2y, pattern, w, 6,7,8,3,4,5,0,1,2); // 10 (horiz mirrored)
+ dst32[dst32_linesize*1 + 1] = hq2x_interp_1x1(r2y, pattern, w, 8,7,6,5,4,3,2,1,0); // 11 (center mirrored)
+ } else if (n == 3) {
+ hq3x_interp_2x1(dst32, dst32_linesize, r2y, pattern, w, 0,1, 0,1,2,3,4,5,6,7,8, 0); // 00 01
+ hq3x_interp_2x1(dst32 + 1, dst32_linesize, r2y, pattern, w, 1,3, 2,5,8,1,4,7,0,3,6, 1); // 02 12 (rotated to the right)
+ hq3x_interp_2x1(dst32 + 1*dst32_linesize, dst32_linesize, r2y, pattern, w, 2,0, 6,3,0,7,4,1,8,5,2, 1); // 20 10 (rotated to the left)
+ hq3x_interp_2x1(dst32 + 1*dst32_linesize + 1, dst32_linesize, r2y, pattern, w, 3,2, 8,7,6,5,4,3,2,1,0, 0); // 22 21 (center mirrored)
+ dst32[dst32_linesize + 1] = w[4]; // 11
+ } else if (n == 4) {
+ hq4x_interp_2x2(dst32, dst32_linesize, r2y, pattern, w, 0,1,2,3, 0,1,2,3,4,5,6,7,8); // 00 01 10 11
+ hq4x_interp_2x2(dst32 + 2, dst32_linesize, r2y, pattern, w, 1,0,3,2, 2,1,0,5,4,3,8,7,6); // 02 03 12 13 (vert mirrored)
+ hq4x_interp_2x2(dst32 + 2*dst32_linesize, dst32_linesize, r2y, pattern, w, 2,3,0,1, 6,7,8,3,4,5,0,1,2); // 20 21 30 31 (horiz mirrored)
+ hq4x_interp_2x2(dst32 + 2*dst32_linesize + 2, dst32_linesize, r2y, pattern, w, 3,2,1,0, 8,7,6,5,4,3,2,1,0); // 22 23 32 33 (center mirrored)
+ } else {
+ av_assert0(0);
+ }
+
+ src32 += 1;
+ dst32 += n;
+ }
+
+ src += src_linesize;
+ dst += dst_linesize * n;
+ }
+}
+
+#define HQX_FUNC(size) \
+static int hq##size##x(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \
+{ \
+ hqx_filter(arg, jobnr, nb_jobs, size); \
+ return 0; \
+}
+
+HQX_FUNC(2)
+HQX_FUNC(3)
+HQX_FUNC(4)
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE};
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ HQXContext *hqx = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ outlink->w = inlink->w * hqx->n;
+ outlink->h = inlink->h * hqx->n;
+ av_log(inlink->dst, AV_LOG_VERBOSE, "fmt:%s size:%dx%d -> size:%dx%d\n",
+ av_get_pix_fmt_name(inlink->format),
+ inlink->w, inlink->h, outlink->w, outlink->h);
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ HQXContext *hqx = ctx->priv;
+ ThreadData td;
+ AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ out->width = outlink->w;
+ out->height = outlink->h;
+
+ td.in = in;
+ td.out = out;
+ td.rgbtoyuv = hqx->rgbtoyuv;
+ ctx->internal->execute(ctx, hqx->func, &td, NULL, FFMIN(inlink->h, ff_filter_get_nb_threads(ctx)));
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ HQXContext *hqx = ctx->priv;
+ static const hqxfunc_t hqxfuncs[] = {hq2x, hq3x, hq4x};
+
+ uint32_t c;
+ int bg, rg, g;
+
+ for (bg=-255; bg<256; bg++) {
+ for (rg=-255; rg<256; rg++) {
+ const uint32_t u = (uint32_t)((-169*rg + 500*bg)/1000) + 128;
+ const uint32_t v = (uint32_t)(( 500*rg - 81*bg)/1000) + 128;
+ int startg = FFMAX3(-bg, -rg, 0);
+ int endg = FFMIN3(255-bg, 255-rg, 255);
+ uint32_t y = (uint32_t)(( 299*rg + 1000*startg + 114*bg)/1000);
+ c = bg + (rg<<16) + 0x010101 * startg;
+ for (g = startg; g <= endg; g++) {
+ hqx->rgbtoyuv[c] = ((y++) << 16) + (u << 8) + v;
+ c+= 0x010101;
+ }
+ }
+ }
+
+ hqx->func = hqxfuncs[hqx->n - 2];
+ return 0;
+}
+
+static const AVFilterPad hqx_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad hqx_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_hqx = {
+ .name = "hqx",
+ .description = NULL_IF_CONFIG_SMALL("Scale the input by 2, 3 or 4 using the hq*x magnification algorithm."),
+ .priv_size = sizeof(HQXContext),
+ .init = init,
+ .query_formats = query_formats,
+ .inputs = hqx_inputs,
+ .outputs = hqx_outputs,
+ .priv_class = &hqx_class,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_hue.c b/libavfilter/vf_hue.c
new file mode 100644
index 0000000000..0d2862fb12
--- /dev/null
+++ b/libavfilter/vf_hue.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright (c) 2003 Michael Niedermayer
+ * Copyright (c) 2012 Jeremy Tran
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Apply a hue/saturation filter to the input video
+ * Ported from MPlayer libmpcodecs/vf_hue.c.
+ */
+
+#include <float.h>
+#include "libavutil/eval.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define SAT_MIN_VAL -10
+#define SAT_MAX_VAL 10
+
+static const char *const var_names[] = {
+ "n", // frame count
+ "pts", // presentation timestamp expressed in AV_TIME_BASE units
+ "r", // frame rate
+ "t", // timestamp expressed in seconds
+ "tb", // timebase
+ NULL
+};
+
+enum var_name {
+ VAR_N,
+ VAR_PTS,
+ VAR_R,
+ VAR_T,
+ VAR_TB,
+ VAR_NB
+};
+
+typedef struct {
+ const AVClass *class;
+ float hue_deg; /* hue expressed in degrees */
+ float hue; /* hue expressed in radians */
+ char *hue_deg_expr;
+ char *hue_expr;
+ AVExpr *hue_deg_pexpr;
+ AVExpr *hue_pexpr;
+ float saturation;
+ char *saturation_expr;
+ AVExpr *saturation_pexpr;
+ float brightness;
+ char *brightness_expr;
+ AVExpr *brightness_pexpr;
+ int hsub;
+ int vsub;
+ int is_first;
+ int32_t hue_sin;
+ int32_t hue_cos;
+ double var_values[VAR_NB];
+ uint8_t lut_l[256];
+ uint8_t lut_u[256][256];
+ uint8_t lut_v[256][256];
+} HueContext;
+
+#define OFFSET(x) offsetof(HueContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption hue_options[] = {
+ { "h", "set the hue angle degrees expression", OFFSET(hue_deg_expr), AV_OPT_TYPE_STRING,
+ { .str = NULL }, .flags = FLAGS },
+ { "s", "set the saturation expression", OFFSET(saturation_expr), AV_OPT_TYPE_STRING,
+ { .str = "1" }, .flags = FLAGS },
+ { "H", "set the hue angle radians expression", OFFSET(hue_expr), AV_OPT_TYPE_STRING,
+ { .str = NULL }, .flags = FLAGS },
+ { "b", "set the brightness expression", OFFSET(brightness_expr), AV_OPT_TYPE_STRING,
+ { .str = "0" }, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(hue);
+
+static inline void compute_sin_and_cos(HueContext *hue)
+{
+ /*
+ * Scale the value to the norm of the resulting (U,V) vector, that is
+ * the saturation.
+ * This will be useful in the apply_lut function.
+ */
+ hue->hue_sin = lrint(sin(hue->hue) * (1 << 16) * hue->saturation);
+ hue->hue_cos = lrint(cos(hue->hue) * (1 << 16) * hue->saturation);
+}
+
+static inline void create_luma_lut(HueContext *h)
+{
+ const float b = h->brightness;
+ int i;
+
+ for (i = 0; i < 256; i++) {
+ h->lut_l[i] = av_clip_uint8(i + b * 25.5);
+ }
+}
+
+static inline void create_chrominance_lut(HueContext *h, const int32_t c,
+ const int32_t s)
+{
+ int32_t i, j, u, v, new_u, new_v;
+
+ /*
+ * If we consider U and V as the components of a 2D vector then its angle
+ * is the hue and the norm is the saturation
+ */
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ /* Normalize the components from range [16;140] to [-112;112] */
+ u = i - 128;
+ v = j - 128;
+ /*
+ * Apply the rotation of the vector : (c * u) - (s * v)
+ * (s * u) + (c * v)
+ * De-normalize the components (without forgetting to scale 128
+ * by << 16)
+ * Finally scale back the result by >> 16
+ */
+ new_u = ((c * u) - (s * v) + (1 << 15) + (128 << 16)) >> 16;
+ new_v = ((s * u) + (c * v) + (1 << 15) + (128 << 16)) >> 16;
+
+ /* Prevent a potential overflow */
+ h->lut_u[i][j] = av_clip_uint8(new_u);
+ h->lut_v[i][j] = av_clip_uint8(new_v);
+ }
+ }
+}
+
+static int set_expr(AVExpr **pexpr_ptr, char **expr_ptr,
+ const char *expr, const char *option, void *log_ctx)
+{
+ int ret;
+ AVExpr *new_pexpr;
+ char *new_expr;
+
+ new_expr = av_strdup(expr);
+ if (!new_expr)
+ return AVERROR(ENOMEM);
+ ret = av_expr_parse(&new_pexpr, expr, var_names,
+ NULL, NULL, NULL, NULL, 0, log_ctx);
+ if (ret < 0) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Error when evaluating the expression '%s' for %s\n",
+ expr, option);
+ av_free(new_expr);
+ return ret;
+ }
+
+ if (*pexpr_ptr)
+ av_expr_free(*pexpr_ptr);
+ *pexpr_ptr = new_pexpr;
+ av_freep(expr_ptr);
+ *expr_ptr = new_expr;
+
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ HueContext *hue = ctx->priv;
+ int ret;
+
+ if (hue->hue_expr && hue->hue_deg_expr) {
+ av_log(ctx, AV_LOG_ERROR,
+ "H and h options are incompatible and cannot be specified "
+ "at the same time\n");
+ return AVERROR(EINVAL);
+ }
+
+#define SET_EXPR(expr, option) \
+ if (hue->expr##_expr) do { \
+ ret = set_expr(&hue->expr##_pexpr, &hue->expr##_expr, \
+ hue->expr##_expr, option, ctx); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+ SET_EXPR(brightness, "b");
+ SET_EXPR(saturation, "s");
+ SET_EXPR(hue_deg, "h");
+ SET_EXPR(hue, "H");
+#undef SET_EXPR
+
+ av_log(ctx, AV_LOG_VERBOSE,
+ "H_expr:%s h_deg_expr:%s s_expr:%s b_expr:%s\n",
+ hue->hue_expr, hue->hue_deg_expr, hue->saturation_expr, hue->brightness_expr);
+ compute_sin_and_cos(hue);
+ hue->is_first = 1;
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ HueContext *hue = ctx->priv;
+
+ av_expr_free(hue->brightness_pexpr);
+ av_expr_free(hue->hue_deg_pexpr);
+ av_expr_free(hue->hue_pexpr);
+ av_expr_free(hue->saturation_pexpr);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P,
+ AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ HueContext *hue = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ hue->hsub = desc->log2_chroma_w;
+ hue->vsub = desc->log2_chroma_h;
+
+ hue->var_values[VAR_N] = 0;
+ hue->var_values[VAR_TB] = av_q2d(inlink->time_base);
+ hue->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ?
+ NAN : av_q2d(inlink->frame_rate);
+
+ return 0;
+}
+
+static void apply_luma_lut(HueContext *s,
+ uint8_t *ldst, const int dst_linesize,
+ uint8_t *lsrc, const int src_linesize,
+ int w, int h)
+{
+ int i;
+
+ while (h--) {
+ for (i = 0; i < w; i++)
+ ldst[i] = s->lut_l[lsrc[i]];
+
+ lsrc += src_linesize;
+ ldst += dst_linesize;
+ }
+}
+
+static void apply_lut(HueContext *s,
+ uint8_t *udst, uint8_t *vdst, const int dst_linesize,
+ uint8_t *usrc, uint8_t *vsrc, const int src_linesize,
+ int w, int h)
+{
+ int i;
+
+ while (h--) {
+ for (i = 0; i < w; i++) {
+ const int u = usrc[i];
+ const int v = vsrc[i];
+
+ udst[i] = s->lut_u[u][v];
+ vdst[i] = s->lut_v[u][v];
+ }
+
+ usrc += src_linesize;
+ vsrc += src_linesize;
+ udst += dst_linesize;
+ vdst += dst_linesize;
+ }
+}
+
+#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
+#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
+{
+ HueContext *hue = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outpic;
+ const int32_t old_hue_sin = hue->hue_sin, old_hue_cos = hue->hue_cos;
+ const float old_brightness = hue->brightness;
+ int direct = 0;
+
+ if (av_frame_is_writable(inpic)) {
+ direct = 1;
+ outpic = inpic;
+ } else {
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpic) {
+ av_frame_free(&inpic);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpic, inpic);
+ }
+
+ hue->var_values[VAR_N] = inlink->frame_count_out;
+ hue->var_values[VAR_T] = TS2T(inpic->pts, inlink->time_base);
+ hue->var_values[VAR_PTS] = TS2D(inpic->pts);
+
+ if (hue->saturation_expr) {
+ hue->saturation = av_expr_eval(hue->saturation_pexpr, hue->var_values, NULL);
+
+ if (hue->saturation < SAT_MIN_VAL || hue->saturation > SAT_MAX_VAL) {
+ hue->saturation = av_clip(hue->saturation, SAT_MIN_VAL, SAT_MAX_VAL);
+ av_log(inlink->dst, AV_LOG_WARNING,
+ "Saturation value not in range [%d,%d]: clipping value to %0.1f\n",
+ SAT_MIN_VAL, SAT_MAX_VAL, hue->saturation);
+ }
+ }
+
+ if (hue->brightness_expr) {
+ hue->brightness = av_expr_eval(hue->brightness_pexpr, hue->var_values, NULL);
+
+ if (hue->brightness < -10 || hue->brightness > 10) {
+ hue->brightness = av_clipf(hue->brightness, -10, 10);
+ av_log(inlink->dst, AV_LOG_WARNING,
+ "Brightness value not in range [%d,%d]: clipping value to %0.1f\n",
+ -10, 10, hue->brightness);
+ }
+ }
+
+ if (hue->hue_deg_expr) {
+ hue->hue_deg = av_expr_eval(hue->hue_deg_pexpr, hue->var_values, NULL);
+ hue->hue = hue->hue_deg * M_PI / 180;
+ } else if (hue->hue_expr) {
+ hue->hue = av_expr_eval(hue->hue_pexpr, hue->var_values, NULL);
+ hue->hue_deg = hue->hue * 180 / M_PI;
+ }
+
+ av_log(inlink->dst, AV_LOG_DEBUG,
+ "H:%0.1f*PI h:%0.1f s:%0.1f b:%0.f t:%0.1f n:%d\n",
+ hue->hue/M_PI, hue->hue_deg, hue->saturation, hue->brightness,
+ hue->var_values[VAR_T], (int)hue->var_values[VAR_N]);
+
+ compute_sin_and_cos(hue);
+ if (hue->is_first || (old_hue_sin != hue->hue_sin || old_hue_cos != hue->hue_cos))
+ create_chrominance_lut(hue, hue->hue_cos, hue->hue_sin);
+
+ if (hue->is_first || (old_brightness != hue->brightness && hue->brightness))
+ create_luma_lut(hue);
+
+ if (!direct) {
+ if (!hue->brightness)
+ av_image_copy_plane(outpic->data[0], outpic->linesize[0],
+ inpic->data[0], inpic->linesize[0],
+ inlink->w, inlink->h);
+ if (inpic->data[3])
+ av_image_copy_plane(outpic->data[3], outpic->linesize[3],
+ inpic->data[3], inpic->linesize[3],
+ inlink->w, inlink->h);
+ }
+
+ apply_lut(hue, outpic->data[1], outpic->data[2], outpic->linesize[1],
+ inpic->data[1], inpic->data[2], inpic->linesize[1],
+ AV_CEIL_RSHIFT(inlink->w, hue->hsub),
+ AV_CEIL_RSHIFT(inlink->h, hue->vsub));
+ if (hue->brightness)
+ apply_luma_lut(hue, outpic->data[0], outpic->linesize[0],
+ inpic->data[0], inpic->linesize[0], inlink->w, inlink->h);
+
+ if (!direct)
+ av_frame_free(&inpic);
+
+ hue->is_first = 0;
+ return ff_filter_frame(outlink, outpic);
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ HueContext *hue = ctx->priv;
+ int ret;
+
+#define SET_EXPR(expr, option) \
+ do { \
+ ret = set_expr(&hue->expr##_pexpr, &hue->expr##_expr, \
+ args, option, ctx); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+ if (!strcmp(cmd, "h")) {
+ SET_EXPR(hue_deg, "h");
+ av_freep(&hue->hue_expr);
+ } else if (!strcmp(cmd, "H")) {
+ SET_EXPR(hue, "H");
+ av_freep(&hue->hue_deg_expr);
+ } else if (!strcmp(cmd, "s")) {
+ SET_EXPR(saturation, "s");
+ } else if (!strcmp(cmd, "b")) {
+ SET_EXPR(brightness, "b");
+ } else
+ return AVERROR(ENOSYS);
+
+ return 0;
+}
+
+static const AVFilterPad hue_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad hue_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_hue = {
+ .name = "hue",
+ .description = NULL_IF_CONFIG_SMALL("Adjust the hue and saturation of the input video."),
+ .priv_size = sizeof(HueContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .process_command = process_command,
+ .inputs = hue_inputs,
+ .outputs = hue_outputs,
+ .priv_class = &hue_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_hwdownload.c b/libavfilter/vf_hwdownload.c
index 42925b83d3..f3138f366a 100644
--- a/libavfilter/vf_hwdownload.c
+++ b/libavfilter/vf_hwdownload.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -55,8 +55,10 @@ static int hwdownload_query_formats(AVFilterContext *avctx)
}
}
- ff_formats_ref(infmts, &avctx->inputs[0]->out_formats);
- ff_formats_ref(outfmts, &avctx->outputs[0]->in_formats);
+ if ((err = ff_formats_ref(infmts, &avctx->inputs[0]->out_formats)) < 0 ||
+ (err = ff_formats_ref(outfmts, &avctx->outputs[0]->in_formats)) < 0)
+ return err;
+
return 0;
}
diff --git a/libavfilter/vf_hwupload.c b/libavfilter/vf_hwupload.c
index c438d5aac9..f54ce9faa7 100644
--- a/libavfilter/vf_hwupload.c
+++ b/libavfilter/vf_hwupload.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -74,17 +74,15 @@ static int hwupload_query_formats(AVFilterContext *avctx)
if (input_pix_fmts) {
for (i = 0; input_pix_fmts[i] != AV_PIX_FMT_NONE; i++) {
err = ff_add_format(&input_formats, input_pix_fmts[i]);
- if (err < 0) {
- ff_formats_unref(&input_formats);
+ if (err < 0)
goto fail;
- }
}
}
- ff_formats_ref(input_formats, &avctx->inputs[0]->out_formats);
-
- ff_formats_ref(ff_make_format_list(output_pix_fmts),
- &avctx->outputs[0]->in_formats);
+ if ((err = ff_formats_ref(input_formats, &avctx->inputs[0]->out_formats)) < 0 ||
+ (err = ff_formats_ref(ff_make_format_list(output_pix_fmts),
+ &avctx->outputs[0]->in_formats)) < 0)
+ goto fail;
av_hwframe_constraints_free(&constraints);
return 0;
diff --git a/libavfilter/vf_hwupload_cuda.c b/libavfilter/vf_hwupload_cuda.c
index dfb35066f6..49f34b6c52 100644
--- a/libavfilter/vf_hwupload_cuda.c
+++ b/libavfilter/vf_hwupload_cuda.c
@@ -1,24 +1,23 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/buffer.h"
#include "libavutil/hwcontext.h"
-#include "libavutil/hwcontext_cuda.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
@@ -35,60 +34,14 @@ typedef struct CudaUploadContext {
AVBufferRef *hwframe;
} CudaUploadContext;
-static void cudaupload_ctx_free(AVHWDeviceContext *ctx)
-{
- AVCUDADeviceContext *hwctx = ctx->hwctx;
- cuCtxDestroy(hwctx->cuda_ctx);
-}
-
static av_cold int cudaupload_init(AVFilterContext *ctx)
{
CudaUploadContext *s = ctx->priv;
+ char buf[64] = { 0 };
- AVHWDeviceContext *device_ctx;
- AVCUDADeviceContext *device_hwctx;
- CUdevice device;
- CUcontext cuda_ctx = NULL, dummy;
- CUresult err;
- int ret;
-
- err = cuInit(0);
- if (err != CUDA_SUCCESS) {
- av_log(ctx, AV_LOG_ERROR, "Could not initialize the CUDA driver API\n");
- return AVERROR_UNKNOWN;
- }
-
- err = cuDeviceGet(&device, s->device_idx);
- if (err != CUDA_SUCCESS) {
- av_log(ctx, AV_LOG_ERROR, "Could not get the device number %d\n", s->device_idx);
- return AVERROR_UNKNOWN;
- }
-
- err = cuCtxCreate(&cuda_ctx, 0, device);
- if (err != CUDA_SUCCESS) {
- av_log(ctx, AV_LOG_ERROR, "Error creating a CUDA context\n");
- return AVERROR_UNKNOWN;
- }
-
- cuCtxPopCurrent(&dummy);
-
- s->hwdevice = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_CUDA);
- if (!s->hwdevice) {
- cuCtxDestroy(cuda_ctx);
- return AVERROR(ENOMEM);
- }
-
- device_ctx = (AVHWDeviceContext*)s->hwdevice->data;
- device_ctx->free = cudaupload_ctx_free;
+ snprintf(buf, sizeof(buf), "%d", s->device_idx);
- device_hwctx = device_ctx->hwctx;
- device_hwctx->cuda_ctx = cuda_ctx;
-
- ret = av_hwdevice_ctx_init(s->hwdevice);
- if (ret < 0)
- return ret;
-
- return 0;
+ return av_hwdevice_ctx_create(&s->hwdevice, AV_HWDEVICE_TYPE_CUDA, buf, NULL, 0);
}
static av_cold void cudaupload_uninit(AVFilterContext *ctx)
@@ -101,6 +54,8 @@ static av_cold void cudaupload_uninit(AVFilterContext *ctx)
static int cudaupload_query_formats(AVFilterContext *ctx)
{
+ int ret;
+
static const enum AVPixelFormat input_pix_fmts[] = {
AV_PIX_FMT_NV12, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_NONE,
@@ -109,10 +64,17 @@ static int cudaupload_query_formats(AVFilterContext *ctx)
AV_PIX_FMT_CUDA, AV_PIX_FMT_NONE,
};
AVFilterFormats *in_fmts = ff_make_format_list(input_pix_fmts);
- AVFilterFormats *out_fmts = ff_make_format_list(output_pix_fmts);
+ AVFilterFormats *out_fmts;
- ff_formats_ref(in_fmts, &ctx->inputs[0]->out_formats);
- ff_formats_ref(out_fmts, &ctx->outputs[0]->in_formats);
+ ret = ff_formats_ref(in_fmts, &ctx->inputs[0]->out_formats);
+ if (ret < 0)
+ return ret;
+
+ out_fmts = ff_make_format_list(output_pix_fmts);
+
+ ret = ff_formats_ref(out_fmts, &ctx->outputs[0]->in_formats);
+ if (ret < 0)
+ return ret;
return 0;
}
@@ -134,8 +96,8 @@ static int cudaupload_config_output(AVFilterLink *outlink)
hwframe_ctx = (AVHWFramesContext*)s->hwframe->data;
hwframe_ctx->format = AV_PIX_FMT_CUDA;
hwframe_ctx->sw_format = inlink->format;
- hwframe_ctx->width = FFALIGN(inlink->w, 16);
- hwframe_ctx->height = FFALIGN(inlink->h, 16);
+ hwframe_ctx->width = inlink->w;
+ hwframe_ctx->height = inlink->h;
ret = av_hwframe_ctx_init(s->hwframe);
if (ret < 0)
@@ -189,18 +151,13 @@ fail:
}
#define OFFSET(x) offsetof(CudaUploadContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "device", "Number of the device to use", OFFSET(device_idx), AV_OPT_TYPE_INT, { .i64 = 0 }, .flags = FLAGS },
+#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
+static const AVOption cudaupload_options[] = {
+ { "device", "Number of the device to use", OFFSET(device_idx), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
{ NULL },
};
-static const AVClass cudaupload_class = {
- .class_name = "cudaupload",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(cudaupload);
static const AVFilterPad cudaupload_inputs[] = {
{
@@ -222,7 +179,7 @@ static const AVFilterPad cudaupload_outputs[] = {
AVFilter ff_vf_hwupload_cuda = {
.name = "hwupload_cuda",
- .description = NULL_IF_CONFIG_SMALL("Upload a system memory frame to a CUDA device"),
+ .description = NULL_IF_CONFIG_SMALL("Upload a system memory frame to a CUDA device."),
.init = cudaupload_init,
.uninit = cudaupload_uninit,
diff --git a/libavfilter/vf_hysteresis.c b/libavfilter/vf_hysteresis.c
new file mode 100644
index 0000000000..8f05b716c9
--- /dev/null
+++ b/libavfilter/vf_hysteresis.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright (c) 2013 Oka Motofumi (chikuzen.mo at gmail dot com)
+ * Copyright (c) 2016 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "framesync.h"
+
+#define OFFSET(x) offsetof(HysteresisContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+typedef struct HysteresisContext {
+ const AVClass *class;
+
+ int planes;
+ int threshold;
+
+ int width[4], height[4];
+ int nb_planes;
+ int depth;
+ FFFrameSync fs;
+
+ uint8_t *map;
+ uint32_t *xy;
+ int index;
+
+ void (*hysteresis)(struct HysteresisContext *s, const uint8_t *bsrc, const uint8_t *osrc, uint8_t *dst,
+ ptrdiff_t blinesize, ptrdiff_t olinesize,
+ ptrdiff_t destlinesize,
+ int w, int h);
+} HysteresisContext;
+
+static const AVOption hysteresis_options[] = {
+ { "planes", "set planes", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=0xF}, 0, 0xF, FLAGS },
+ { "threshold", "set threshold", OFFSET(threshold), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT16_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(hysteresis);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
+ AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
+ AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_NONE
+ };
+
+ return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+}
+
+static int process_frame(FFFrameSync *fs)
+{
+ AVFilterContext *ctx = fs->parent;
+ HysteresisContext *s = fs->opaque;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out, *base, *alt;
+ int ret;
+
+ if ((ret = ff_framesync_get_frame(&s->fs, 0, &base, 0)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 1, &alt, 0)) < 0)
+ return ret;
+
+ if (ctx->is_disabled) {
+ out = av_frame_clone(base);
+ if (!out)
+ return AVERROR(ENOMEM);
+ } else {
+ int p;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, base);
+
+ for (p = 0; p < s->nb_planes; p++) {
+ if (!((1 << p) & s->planes)) {
+ av_image_copy_plane(out->data[p], out->linesize[p], base->data[p], base->linesize[p],
+ s->width[p], s->height[p]);
+ continue;
+ } else {
+ int y;
+
+ for (y = 0; y < s->height[p]; y++) {
+ memset(out->data[p] + y * out->linesize[p], 0, s->width[p]);
+ }
+ }
+
+ s->index = -1;
+ memset(s->map, 0, s->width[0] * s->height[0]);
+ memset(s->xy, 0, s->width[0] * s->height[0] * 4);
+
+ s->hysteresis(s, base->data[p], alt->data[p],
+ out->data[p],
+ base->linesize[p], alt->linesize[p],
+ out->linesize[p],
+ s->width[p], s->height[p]);
+ }
+ }
+ out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int passed(HysteresisContext *s, int x, int y, int w)
+{
+ return s->map[x + y * w];
+}
+
+static void push(HysteresisContext *s, int x, int y, int w)
+{
+ s->map[x + y * w] = 0xff;
+ s->xy[++s->index] = (uint16_t)(x) << 16 | (uint16_t)y;
+}
+
+static void pop(HysteresisContext *s, int *x, int *y)
+{
+ uint32_t val = s->xy[s->index--];
+
+ *x = val >> 16;
+ *y = val & 0x0000FFFF;
+}
+
+static int is_empty(HysteresisContext *s)
+{
+ return s->index < 0;
+}
+
+static void hysteresis8(HysteresisContext *s, const uint8_t *bsrc, const uint8_t *asrc,
+ uint8_t *dst,
+ ptrdiff_t blinesize, ptrdiff_t alinesize,
+ ptrdiff_t dlinesize,
+ int w, int h)
+{
+ const int t = s->threshold;
+ int x, y;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ if ((bsrc[x + y * blinesize] > t) && (asrc[x + y * alinesize] > t) && !passed(s, x, y, w)) {
+ int posx, posy;
+
+ dst[x + y * dlinesize] = asrc[x + y * alinesize];
+
+ push(s, x, y, w);
+
+ while (!is_empty(s)) {
+ int x_min, x_max, y_min, y_max, yy, xx;
+
+ pop(s, &posx, &posy);
+
+ x_min = posx > 0 ? posx - 1 : 0;
+ x_max = posx < w - 1 ? posx + 1 : posx;
+ y_min = posy > 0 ? posy - 1 : 0;
+ y_max = posy < h - 1 ? posy + 1 : posy;
+
+ for (yy = y_min; yy <= y_max; yy++) {
+ for (xx = x_min; xx <= x_max; xx++) {
+ if ((asrc[xx + yy * alinesize] > t) && !passed(s, xx, yy, w)) {
+ dst[xx + yy * dlinesize] = asrc[xx + yy * alinesize];
+ push(s, xx, yy, w);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static void hysteresis16(HysteresisContext *s, const uint8_t *bbsrc, const uint8_t *aasrc,
+ uint8_t *ddst,
+ ptrdiff_t blinesize, ptrdiff_t alinesize,
+ ptrdiff_t dlinesize,
+ int w, int h)
+{
+ const uint16_t *bsrc = (const uint16_t *)bbsrc;
+ const uint16_t *asrc = (const uint16_t *)aasrc;
+ uint16_t *dst = (uint16_t *)ddst;
+ const int t = s->threshold;
+ int x, y;
+
+ blinesize /= 2;
+ alinesize /= 2;
+ dlinesize /= 2;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ if ((bsrc[x + y * blinesize] > t) && (asrc[x + y * alinesize] > t) && !passed(s, x, y, w)) {
+ int posx, posy;
+
+ dst[x + y * dlinesize] = asrc[x + y * alinesize];
+
+ push(s, x, y, w);
+
+ while (!is_empty(s)) {
+ int x_min, x_max, y_min, y_max, yy, xx;
+
+ pop(s, &posx, &posy);
+
+ x_min = posx > 0 ? posx - 1 : 0;
+ x_max = posx < w - 1 ? posx + 1 : posx;
+ y_min = posy > 0 ? posy - 1 : 0;
+ y_max = posy < h - 1 ? posy + 1 : posy;
+
+ for (yy = y_min; yy <= y_max; yy++) {
+ for (xx = x_min; xx <= x_max; xx++) {
+ if ((asrc[xx + yy * alinesize] > t) && !passed(s, xx, yy, w)) {
+ dst[xx + yy * dlinesize] = asrc[xx + yy * alinesize];
+ push(s, xx, yy, w);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ HysteresisContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int vsub, hsub;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ hsub = desc->log2_chroma_w;
+ vsub = desc->log2_chroma_h;
+ s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
+ s->height[0] = s->height[3] = inlink->h;
+ s->width[1] = s->width[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
+ s->width[0] = s->width[3] = inlink->w;
+
+ s->depth = desc->comp[0].depth;
+
+ if (desc->comp[0].depth == 8)
+ s->hysteresis = hysteresis8;
+ else
+ s->hysteresis = hysteresis16;
+
+ s->map = av_calloc(inlink->w, inlink->h * sizeof (*s->map));
+ if (!s->map)
+ return AVERROR(ENOMEM);
+
+ s->xy = av_calloc(inlink->w, inlink->h * sizeof(*s->xy));
+ if (!s->xy)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ HysteresisContext *s = ctx->priv;
+ AVFilterLink *base = ctx->inputs[0];
+ AVFilterLink *alt = ctx->inputs[1];
+ FFFrameSyncIn *in;
+ int ret;
+
+ if (base->format != alt->format) {
+ av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
+ return AVERROR(EINVAL);
+ }
+ if (base->w != alt->w ||
+ base->h != alt->h ||
+ base->sample_aspect_ratio.num != alt->sample_aspect_ratio.num ||
+ base->sample_aspect_ratio.den != alt->sample_aspect_ratio.den) {
+ av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
+ "(size %dx%d, SAR %d:%d) do not match the corresponding "
+ "second input link %s parameters (%dx%d, SAR %d:%d)\n",
+ ctx->input_pads[0].name, base->w, base->h,
+ base->sample_aspect_ratio.num,
+ base->sample_aspect_ratio.den,
+ ctx->input_pads[1].name,
+ alt->w, alt->h,
+ alt->sample_aspect_ratio.num,
+ alt->sample_aspect_ratio.den);
+ return AVERROR(EINVAL);
+ }
+
+ outlink->w = base->w;
+ outlink->h = base->h;
+ outlink->time_base = base->time_base;
+ outlink->sample_aspect_ratio = base->sample_aspect_ratio;
+ outlink->frame_rate = base->frame_rate;
+
+ if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
+ return ret;
+
+ in = s->fs.in;
+ in[0].time_base = base->time_base;
+ in[1].time_base = alt->time_base;
+ in[0].sync = 1;
+ in[0].before = EXT_STOP;
+ in[0].after = EXT_INFINITY;
+ in[1].sync = 1;
+ in[1].before = EXT_STOP;
+ in[1].after = EXT_INFINITY;
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+
+ return ff_framesync_configure(&s->fs);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ HysteresisContext *s = inlink->dst->priv;
+ return ff_framesync_filter_frame(&s->fs, inlink, buf);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ HysteresisContext *s = outlink->src->priv;
+ return ff_framesync_request_frame(&s->fs, outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ HysteresisContext *s = ctx->priv;
+
+ ff_framesync_uninit(&s->fs);
+ av_freep(&s->map);
+ av_freep(&s->xy);
+}
+
+static const AVFilterPad hysteresis_inputs[] = {
+ {
+ .name = "base",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ {
+ .name = "alt",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad hysteresis_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_hysteresis = {
+ .name = "hysteresis",
+ .description = NULL_IF_CONFIG_SMALL("Grow first stream into second stream by connecting components."),
+ .priv_size = sizeof(HysteresisContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = hysteresis_inputs,
+ .outputs = hysteresis_outputs,
+ .priv_class = &hysteresis_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_idet.c b/libavfilter/vf_idet.c
new file mode 100644
index 0000000000..87d4144e9e
--- /dev/null
+++ b/libavfilter/vf_idet.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (C) 2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <float.h> /* FLT_MAX */
+
+#include "libavutil/cpu.h"
+#include "libavutil/common.h"
+#include "libavutil/opt.h"
+#include "internal.h"
+#include "vf_idet.h"
+
+#define OFFSET(x) offsetof(IDETContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption idet_options[] = {
+ { "intl_thres", "set interlacing threshold", OFFSET(interlace_threshold), AV_OPT_TYPE_FLOAT, {.dbl = 1.04}, -1, FLT_MAX, FLAGS },
+ { "prog_thres", "set progressive threshold", OFFSET(progressive_threshold), AV_OPT_TYPE_FLOAT, {.dbl = 1.5}, -1, FLT_MAX, FLAGS },
+ { "rep_thres", "set repeat threshold", OFFSET(repeat_threshold), AV_OPT_TYPE_FLOAT, {.dbl = 3.0}, -1, FLT_MAX, FLAGS },
+ { "half_life", "half life of cumulative statistics", OFFSET(half_life), AV_OPT_TYPE_FLOAT, {.dbl = 0.0}, -1, INT_MAX, FLAGS },
+ { "analyze_interlaced_flag", "set number of frames to use to determine if the interlace flag is accurate", OFFSET(analyze_interlaced_flag), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(idet);
+
+static const char *type2str(Type type)
+{
+ switch(type) {
+ case TFF : return "tff";
+ case BFF : return "bff";
+ case PROGRESSIVE : return "progressive";
+ case UNDETERMINED : return "undetermined";
+ }
+ return NULL;
+}
+
+#define PRECISION 1048576
+
+static uint64_t uintpow(uint64_t b,unsigned int e)
+{
+ uint64_t r=1;
+ while(e--) r*=b;
+ return r;
+}
+
+static int av_dict_set_fxp(AVDictionary **pm, const char *key, uint64_t value, unsigned int digits,
+ int flags)
+{
+ char valuestr[44];
+ uint64_t print_precision = uintpow(10, digits);
+
+ value = av_rescale(value, print_precision, PRECISION);
+
+ snprintf(valuestr, sizeof(valuestr), "%"PRId64".%0*"PRId64,
+ value / print_precision, digits, value % print_precision);
+
+ return av_dict_set(pm, key, valuestr, flags);
+}
+
+static const char *rep2str(RepeatedField repeated_field)
+{
+ switch(repeated_field) {
+ case REPEAT_NONE : return "neither";
+ case REPEAT_TOP : return "top";
+ case REPEAT_BOTTOM : return "bottom";
+ }
+ return NULL;
+}
+
+int ff_idet_filter_line_c(const uint8_t *a, const uint8_t *b, const uint8_t *c, int w)
+{
+ int x;
+ int ret=0;
+
+ for(x=0; x<w; x++){
+ int v = (*a++ + *c++) - 2 * *b++;
+ ret += FFABS(v);
+ }
+
+ return ret;
+}
+
+int ff_idet_filter_line_c_16bit(const uint16_t *a, const uint16_t *b, const uint16_t *c, int w)
+{
+ int x;
+ int ret=0;
+
+ for(x=0; x<w; x++){
+ int v = (*a++ + *c++) - 2 * *b++;
+ ret += FFABS(v);
+ }
+
+ return ret;
+}
+
+static void filter(AVFilterContext *ctx)
+{
+ IDETContext *idet = ctx->priv;
+ int y, i;
+ int64_t alpha[2]={0};
+ int64_t delta=0;
+ int64_t gamma[2]={0};
+ Type type, best_type;
+ RepeatedField repeat;
+ int match = 0;
+ AVDictionary **metadata = avpriv_frame_get_metadatap(idet->cur);
+
+ for (i = 0; i < idet->csp->nb_components; i++) {
+ int w = idet->cur->width;
+ int h = idet->cur->height;
+ int refs = idet->cur->linesize[i];
+
+ if (i && i<3) {
+ w = AV_CEIL_RSHIFT(w, idet->csp->log2_chroma_w);
+ h = AV_CEIL_RSHIFT(h, idet->csp->log2_chroma_h);
+ }
+
+ for (y = 2; y < h - 2; y++) {
+ uint8_t *prev = &idet->prev->data[i][y*refs];
+ uint8_t *cur = &idet->cur ->data[i][y*refs];
+ uint8_t *next = &idet->next->data[i][y*refs];
+ alpha[ y &1] += idet->filter_line(cur-refs, prev, cur+refs, w);
+ alpha[(y^1)&1] += idet->filter_line(cur-refs, next, cur+refs, w);
+ delta += idet->filter_line(cur-refs, cur, cur+refs, w);
+ gamma[(y^1)&1] += idet->filter_line(cur , prev, cur , w);
+ }
+ }
+
+ if (alpha[0] > idet->interlace_threshold * alpha[1]){
+ type = TFF;
+ }else if(alpha[1] > idet->interlace_threshold * alpha[0]){
+ type = BFF;
+ }else if(alpha[1] > idet->progressive_threshold * delta){
+ type = PROGRESSIVE;
+ }else{
+ type = UNDETERMINED;
+ }
+
+ if ( gamma[0] > idet->repeat_threshold * gamma[1] ){
+ repeat = REPEAT_TOP;
+ } else if ( gamma[1] > idet->repeat_threshold * gamma[0] ){
+ repeat = REPEAT_BOTTOM;
+ } else {
+ repeat = REPEAT_NONE;
+ }
+
+ memmove(idet->history+1, idet->history, HIST_SIZE-1);
+ idet->history[0] = type;
+ best_type = UNDETERMINED;
+ for(i=0; i<HIST_SIZE; i++){
+ if(idet->history[i] != UNDETERMINED){
+ if(best_type == UNDETERMINED)
+ best_type = idet->history[i];
+
+ if(idet->history[i] == best_type) {
+ match++;
+ }else{
+ match=0;
+ break;
+ }
+ }
+ }
+ if(idet->last_type == UNDETERMINED){
+ if(match ) idet->last_type = best_type;
+ }else{
+ if(match>2) idet->last_type = best_type;
+ }
+
+ if (idet->last_type == TFF){
+ idet->cur->top_field_first = 1;
+ idet->cur->interlaced_frame = 1;
+ }else if(idet->last_type == BFF){
+ idet->cur->top_field_first = 0;
+ idet->cur->interlaced_frame = 1;
+ }else if(idet->last_type == PROGRESSIVE){
+ idet->cur->interlaced_frame = 0;
+ }
+
+ for(i=0; i<3; i++)
+ idet->repeats[i] = av_rescale(idet->repeats [i], idet->decay_coefficient, PRECISION);
+
+ for(i=0; i<4; i++){
+ idet->prestat [i] = av_rescale(idet->prestat [i], idet->decay_coefficient, PRECISION);
+ idet->poststat[i] = av_rescale(idet->poststat[i], idet->decay_coefficient, PRECISION);
+ }
+
+ idet->total_repeats [ repeat] ++;
+ idet->repeats [ repeat] += PRECISION;
+
+ idet->total_prestat [ type] ++;
+ idet->prestat [ type] += PRECISION;
+
+ idet->total_poststat[idet->last_type] ++;
+ idet->poststat [idet->last_type] += PRECISION;
+
+ av_log(ctx, AV_LOG_DEBUG, "Repeated Field:%12s, Single frame:%12s, Multi frame:%12s\n",
+ rep2str(repeat), type2str(type), type2str(idet->last_type));
+
+ av_dict_set (metadata, "lavfi.idet.repeated.current_frame", rep2str(repeat), 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.repeated.neither", idet->repeats[REPEAT_NONE], 2, 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.repeated.top", idet->repeats[REPEAT_TOP], 2, 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.repeated.bottom", idet->repeats[REPEAT_BOTTOM], 2, 0);
+
+ av_dict_set (metadata, "lavfi.idet.single.current_frame", type2str(type), 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.single.tff", idet->prestat[TFF], 2 , 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.single.bff", idet->prestat[BFF], 2, 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.single.progressive", idet->prestat[PROGRESSIVE], 2, 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.single.undetermined", idet->prestat[UNDETERMINED], 2, 0);
+
+ av_dict_set (metadata, "lavfi.idet.multiple.current_frame", type2str(idet->last_type), 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.multiple.tff", idet->poststat[TFF], 2, 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.multiple.bff", idet->poststat[BFF], 2, 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.multiple.progressive", idet->poststat[PROGRESSIVE], 2, 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.multiple.undetermined", idet->poststat[UNDETERMINED], 2, 0);
+}
+
+static int filter_frame(AVFilterLink *link, AVFrame *picref)
+{
+ AVFilterContext *ctx = link->dst;
+ IDETContext *idet = ctx->priv;
+
+ // initial frame(s) and not interlaced, just pass through for
+ // the analyze_interlaced_flag mode
+ if (idet->analyze_interlaced_flag &&
+ !picref->interlaced_frame &&
+ !idet->next) {
+ return ff_filter_frame(ctx->outputs[0], picref);
+ }
+ if (idet->analyze_interlaced_flag_done) {
+ if (picref->interlaced_frame && idet->interlaced_flag_accuracy < 0)
+ picref->interlaced_frame = 0;
+ return ff_filter_frame(ctx->outputs[0], picref);
+ }
+
+ av_frame_free(&idet->prev);
+
+ if( picref->width != link->w
+ || picref->height != link->h
+ || picref->format != link->format) {
+ link->dst->inputs[0]->format = picref->format;
+ link->dst->inputs[0]->w = picref->width;
+ link->dst->inputs[0]->h = picref->height;
+
+ av_frame_free(&idet->cur );
+ av_frame_free(&idet->next);
+ }
+
+ idet->prev = idet->cur;
+ idet->cur = idet->next;
+ idet->next = picref;
+
+ if (!idet->cur &&
+ !(idet->cur = av_frame_clone(idet->next)))
+ return AVERROR(ENOMEM);
+
+ if (!idet->prev)
+ return 0;
+
+ if (!idet->csp)
+ idet->csp = av_pix_fmt_desc_get(link->format);
+ if (idet->csp->comp[0].depth > 8){
+ idet->filter_line = (ff_idet_filter_func)ff_idet_filter_line_c_16bit;
+ if (ARCH_X86)
+ ff_idet_init_x86(idet, 1);
+ }
+
+ if (idet->analyze_interlaced_flag) {
+ if (idet->cur->interlaced_frame) {
+ idet->cur->interlaced_frame = 0;
+ filter(ctx);
+ if (idet->last_type == PROGRESSIVE) {
+ idet->interlaced_flag_accuracy --;
+ idet->analyze_interlaced_flag --;
+ } else if (idet->last_type != UNDETERMINED) {
+ idet->interlaced_flag_accuracy ++;
+ idet->analyze_interlaced_flag --;
+ }
+ if (idet->analyze_interlaced_flag == 1) {
+ ff_filter_frame(ctx->outputs[0], av_frame_clone(idet->cur));
+
+ if (idet->next->interlaced_frame && idet->interlaced_flag_accuracy < 0)
+ idet->next->interlaced_frame = 0;
+ idet->analyze_interlaced_flag_done = 1;
+ av_log(ctx, AV_LOG_INFO, "Final flag accuracy %d\n", idet->interlaced_flag_accuracy);
+ return ff_filter_frame(ctx->outputs[0], av_frame_clone(idet->next));
+ }
+ }
+ } else {
+ filter(ctx);
+ }
+
+ return ff_filter_frame(ctx->outputs[0], av_frame_clone(idet->cur));
+}
+
+static int request_frame(AVFilterLink *link)
+{
+ AVFilterContext *ctx = link->src;
+ IDETContext *idet = ctx->priv;
+ int ret;
+
+ if (idet->eof)
+ return AVERROR_EOF;
+
+ ret = ff_request_frame(link->src->inputs[0]);
+
+ if (ret == AVERROR_EOF && idet->cur && !idet->analyze_interlaced_flag_done) {
+ AVFrame *next = av_frame_clone(idet->next);
+
+ if (!next)
+ return AVERROR(ENOMEM);
+
+ ret = filter_frame(link->src->inputs[0], next);
+ idet->eof = 1;
+ }
+
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ IDETContext *idet = ctx->priv;
+ int level = strncmp(ctx->name, "auto-inserted", 13) ? AV_LOG_INFO : AV_LOG_DEBUG;
+
+ av_log(ctx, level, "Repeated Fields: Neither:%6"PRId64" Top:%6"PRId64" Bottom:%6"PRId64"\n",
+ idet->total_repeats[REPEAT_NONE],
+ idet->total_repeats[REPEAT_TOP],
+ idet->total_repeats[REPEAT_BOTTOM]
+ );
+ av_log(ctx, level, "Single frame detection: TFF:%6"PRId64" BFF:%6"PRId64" Progressive:%6"PRId64" Undetermined:%6"PRId64"\n",
+ idet->total_prestat[TFF],
+ idet->total_prestat[BFF],
+ idet->total_prestat[PROGRESSIVE],
+ idet->total_prestat[UNDETERMINED]
+ );
+ av_log(ctx, level, "Multi frame detection: TFF:%6"PRId64" BFF:%6"PRId64" Progressive:%6"PRId64" Undetermined:%6"PRId64"\n",
+ idet->total_poststat[TFF],
+ idet->total_poststat[BFF],
+ idet->total_poststat[PROGRESSIVE],
+ idet->total_poststat[UNDETERMINED]
+ );
+
+ av_frame_free(&idet->prev);
+ av_frame_free(&idet->cur );
+ av_frame_free(&idet->next);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUV420P9,
+ AV_PIX_FMT_YUV422P9,
+ AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUV422P10,
+ AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P12,
+ AV_PIX_FMT_YUV422P12,
+ AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_YUV420P14,
+ AV_PIX_FMT_YUV422P14,
+ AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_YUV420P16,
+ AV_PIX_FMT_YUV422P16,
+ AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ IDETContext *idet = ctx->priv;
+
+ idet->eof = 0;
+ idet->last_type = UNDETERMINED;
+ memset(idet->history, UNDETERMINED, HIST_SIZE);
+
+ if( idet->half_life > 0 )
+ idet->decay_coefficient = lrint( PRECISION * exp2(-1.0 / idet->half_life) );
+ else
+ idet->decay_coefficient = PRECISION;
+
+ idet->filter_line = ff_idet_filter_line_c;
+
+ if (ARCH_X86)
+ ff_idet_init_x86(idet, 0);
+
+ return 0;
+}
+
+static const AVFilterPad idet_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad idet_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_idet = {
+ .name = "idet",
+ .description = NULL_IF_CONFIG_SMALL("Interlace detect Filter."),
+ .priv_size = sizeof(IDETContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = idet_inputs,
+ .outputs = idet_outputs,
+ .priv_class = &idet_class,
+};
diff --git a/libavfilter/vf_idet.h b/libavfilter/vf_idet.h
new file mode 100644
index 0000000000..47e3d9ce81
--- /dev/null
+++ b/libavfilter/vf_idet.h
@@ -0,0 +1,80 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_IDET_H
+#define AVFILTER_IDET_H
+
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+
+#define HIST_SIZE 4
+
+typedef int (*ff_idet_filter_func)(const uint8_t *a, const uint8_t *b, const uint8_t *c, int w);
+
+typedef enum {
+ TFF,
+ BFF,
+ PROGRESSIVE,
+ UNDETERMINED,
+} Type;
+
+typedef enum {
+ REPEAT_NONE,
+ REPEAT_TOP,
+ REPEAT_BOTTOM,
+} RepeatedField;
+
+typedef struct {
+ const AVClass *class;
+ float interlace_threshold;
+ float progressive_threshold;
+ float repeat_threshold;
+ float half_life;
+ uint64_t decay_coefficient;
+
+ Type last_type;
+
+ uint64_t repeats[3];
+ uint64_t prestat[4];
+ uint64_t poststat[4];
+ uint64_t total_repeats[3];
+ uint64_t total_prestat[4];
+ uint64_t total_poststat[4];
+
+ uint8_t history[HIST_SIZE];
+
+ AVFrame *cur;
+ AVFrame *next;
+ AVFrame *prev;
+ ff_idet_filter_func filter_line;
+
+ int interlaced_flag_accuracy;
+ int analyze_interlaced_flag;
+ int analyze_interlaced_flag_done;
+
+ const AVPixFmtDescriptor *csp;
+ int eof;
+} IDETContext;
+
+void ff_idet_init_x86(IDETContext *idet, int for_16b);
+
+/* main fall-back for left-over */
+int ff_idet_filter_line_c(const uint8_t *a, const uint8_t *b, const uint8_t *c, int w);
+int ff_idet_filter_line_c_16bit(const uint16_t *a, const uint16_t *b, const uint16_t *c, int w);
+
+#endif
diff --git a/libavfilter/vf_il.c b/libavfilter/vf_il.c
new file mode 100644
index 0000000000..e0bf8d5941
--- /dev/null
+++ b/libavfilter/vf_il.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * (de)interleave fields filter
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "internal.h"
+
+enum FilterMode {
+ MODE_NONE,
+ MODE_INTERLEAVE,
+ MODE_DEINTERLEAVE
+};
+
+typedef struct {
+ const AVClass *class;
+ int luma_mode, chroma_mode, alpha_mode; ///<FilterMode
+ int luma_swap, chroma_swap, alpha_swap;
+ int nb_planes;
+ int linesize[4], chroma_height;
+ int has_alpha;
+} IlContext;
+
+#define OFFSET(x) offsetof(IlContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption il_options[] = {
+ {"luma_mode", "select luma mode", OFFSET(luma_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "luma_mode"},
+ {"l", "select luma mode", OFFSET(luma_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "luma_mode"},
+ {"none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_NONE}, 0, 0, FLAGS, "luma_mode"},
+ {"interleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "luma_mode"},
+ {"i", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "luma_mode"},
+ {"deinterleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "luma_mode"},
+ {"d", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "luma_mode"},
+ {"chroma_mode", "select chroma mode", OFFSET(chroma_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "chroma_mode"},
+ {"c", "select chroma mode", OFFSET(chroma_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "chroma_mode"},
+ {"none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_NONE}, 0, 0, FLAGS, "chroma_mode"},
+ {"interleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "chroma_mode"},
+ {"i", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "chroma_mode"},
+ {"deinterleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "chroma_mode"},
+ {"d", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "chroma_mode"},
+ {"alpha_mode", "select alpha mode", OFFSET(alpha_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "alpha_mode"},
+ {"a", "select alpha mode", OFFSET(alpha_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "alpha_mode"},
+ {"none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_NONE}, 0, 0, FLAGS, "alpha_mode"},
+ {"interleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "alpha_mode"},
+ {"i", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "alpha_mode"},
+ {"deinterleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "alpha_mode"},
+ {"d", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "alpha_mode"},
+ {"luma_swap", "swap luma fields", OFFSET(luma_swap), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
+ {"ls", "swap luma fields", OFFSET(luma_swap), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
+ {"chroma_swap", "swap chroma fields", OFFSET(chroma_swap), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
+ {"cs", "swap chroma fields", OFFSET(chroma_swap), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
+ {"alpha_swap", "swap alpha fields", OFFSET(alpha_swap), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
+ {"as", "swap alpha fields", OFFSET(alpha_swap), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(il);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ int fmt, ret;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_PAL) &&
+ !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL) &&
+ (ret = ff_add_format(&formats, fmt)) < 0)
+ return ret;
+ }
+
+ return ff_set_common_formats(ctx, formats);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ IlContext *s = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ s->has_alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
+ if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ s->chroma_height = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+
+ return 0;
+}
+
+static void interleave(uint8_t *dst, uint8_t *src, int w, int h,
+ int dst_linesize, int src_linesize,
+ enum FilterMode mode, int swap)
+{
+ const int a = swap;
+ const int b = 1 - a;
+ const int m = h >> 1;
+ int y;
+
+ switch (mode) {
+ case MODE_DEINTERLEAVE:
+ for (y = 0; y < m; y++) {
+ memcpy(dst + dst_linesize * y , src + src_linesize * (y * 2 + a), w);
+ memcpy(dst + dst_linesize * (y + m), src + src_linesize * (y * 2 + b), w);
+ }
+ break;
+ case MODE_NONE:
+ for (y = 0; y < m; y++) {
+ memcpy(dst + dst_linesize * y * 2 , src + src_linesize * (y * 2 + a), w);
+ memcpy(dst + dst_linesize * (y * 2 + 1), src + src_linesize * (y * 2 + b), w);
+ }
+ break;
+ case MODE_INTERLEAVE:
+ for (y = 0; y < m; y++) {
+ memcpy(dst + dst_linesize * (y * 2 + a), src + src_linesize * y , w);
+ memcpy(dst + dst_linesize * (y * 2 + b), src + src_linesize * (y + m), w);
+ }
+ break;
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ IlContext *s = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *out;
+ int comp;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, inpicref);
+
+ interleave(out->data[0], inpicref->data[0],
+ s->linesize[0], inlink->h,
+ out->linesize[0], inpicref->linesize[0],
+ s->luma_mode, s->luma_swap);
+
+ for (comp = 1; comp < (s->nb_planes - s->has_alpha); comp++) {
+ interleave(out->data[comp], inpicref->data[comp],
+ s->linesize[comp], s->chroma_height,
+ out->linesize[comp], inpicref->linesize[comp],
+ s->chroma_mode, s->chroma_swap);
+ }
+
+ if (s->has_alpha) {
+ comp = s->nb_planes - 1;
+ interleave(out->data[comp], inpicref->data[comp],
+ s->linesize[comp], inlink->h,
+ out->linesize[comp], inpicref->linesize[comp],
+ s->alpha_mode, s->alpha_swap);
+ }
+
+ av_frame_free(&inpicref);
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_il = {
+ .name = "il",
+ .description = NULL_IF_CONFIG_SMALL("Deinterleave or interleave fields."),
+ .priv_size = sizeof(IlContext),
+ .query_formats = query_formats,
+ .inputs = inputs,
+ .outputs = outputs,
+ .priv_class = &il_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_interlace.c b/libavfilter/vf_interlace.c
index ac435d768a..efa3128727 100644
--- a/libavfilter/vf_interlace.c
+++ b/libavfilter/vf_interlace.c
@@ -1,18 +1,23 @@
/*
- * This file is part of Libav.
+ * Copyright (c) 2003 Michael Zucchi <notzed@ximian.com>
+ * Copyright (c) 2010 Baptiste Coudurier
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2013 Vittorio Giovara <vittorio.giovara@gmail.com>
*
- * Libav is free software; you can redistribute it and/or modify
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
@@ -33,25 +38,20 @@
#include "video.h"
#define OFFSET(x) offsetof(InterlaceContext, x)
-#define V AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption interlace_options[] = {
{ "scan", "scanning mode", OFFSET(scan),
- AV_OPT_TYPE_INT, {.i64 = MODE_TFF }, 0, 1, .flags = V, .unit = "scan" },
+ AV_OPT_TYPE_INT, {.i64 = MODE_TFF }, 0, 1, .flags = FLAGS, .unit = "scan" },
{ "tff", "top field first", 0,
- AV_OPT_TYPE_CONST, {.i64 = MODE_TFF }, INT_MIN, INT_MAX, .flags = V, .unit = "scan" },
+ AV_OPT_TYPE_CONST, {.i64 = MODE_TFF }, INT_MIN, INT_MAX, .flags = FLAGS, .unit = "scan" },
{ "bff", "bottom field first", 0,
- AV_OPT_TYPE_CONST, {.i64 = MODE_BFF }, INT_MIN, INT_MAX, .flags = V, .unit = "scan" },
- { "lowpass", "enable vertical low-pass filter", OFFSET(lowpass),
- AV_OPT_TYPE_INT, {.i64 = 1 }, 0, 1, .flags = V },
+ AV_OPT_TYPE_CONST, {.i64 = MODE_BFF }, INT_MIN, INT_MAX, .flags = FLAGS, .unit = "scan" },
+ { "lowpass", "set vertical low-pass filter", OFFSET(lowpass),
+ AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, .flags = FLAGS },
{ NULL }
};
-static const AVClass class = {
- .class_name = "interlace filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(interlace);
static void lowpass_line_c(uint8_t *dstp, ptrdiff_t linesize,
const uint8_t *srcp,
@@ -76,8 +76,10 @@ static const enum AVPixelFormat formats_supported[] = {
static int query_formats(AVFilterContext *ctx)
{
- ff_set_common_formats(ctx, ff_make_format_list(formats_supported));
- return 0;
+ AVFilterFormats *fmts_list = ff_make_format_list(formats_supported);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
}
static av_cold void uninit(AVFilterContext *ctx)
@@ -86,8 +88,6 @@ static av_cold void uninit(AVFilterContext *ctx)
av_frame_free(&s->cur);
av_frame_free(&s->next);
-
- av_opt_free(s);
}
static int config_out_props(AVFilterLink *outlink)
@@ -138,10 +138,8 @@ static void copy_picture_field(InterlaceContext *s,
int plane, j;
for (plane = 0; plane < desc->nb_components; plane++) {
- int cols = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(inlink->w, hsub)
- : inlink->w;
- int lines = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(inlink->h, vsub)
- : inlink->h;
+ int cols = (plane == 1 || plane == 2) ? -(-inlink->w) >> hsub : inlink->w;
+ int lines = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;
uint8_t *dstp = dst_frame->data[plane];
const uint8_t *srcp = src_frame->data[plane];
@@ -198,7 +196,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
return AVERROR(ENOMEM);
out->pts /= 2; // adjust pts to new framerate
ret = ff_filter_frame(outlink, out);
- s->got_output = 1;
return ret;
}
@@ -221,20 +218,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
av_frame_free(&s->next);
ret = ff_filter_frame(outlink, out);
- s->got_output = 1;
-
- return ret;
-}
-
-static int request_frame(AVFilterLink *outlink)
-{
- AVFilterContext *ctx = outlink->src;
- InterlaceContext *s = ctx->priv;
- int ret = 0;
-
- s->got_output = 0;
- while (ret >= 0 && !s->got_output)
- ret = ff_request_frame(ctx->inputs[0]);
return ret;
}
@@ -250,10 +233,9 @@ static const AVFilterPad inputs[] = {
static const AVFilterPad outputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_out_props,
- .request_frame = request_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_out_props,
},
{ NULL }
};
@@ -262,12 +244,9 @@ AVFilter ff_vf_interlace = {
.name = "interlace",
.description = NULL_IF_CONFIG_SMALL("Convert progressive video into interlaced."),
.uninit = uninit,
-
- .priv_class = &class,
+ .priv_class = &interlace_class,
.priv_size = sizeof(InterlaceContext),
.query_formats = query_formats,
-
.inputs = inputs,
.outputs = outputs,
};
-
diff --git a/libavfilter/vf_kerndeint.c b/libavfilter/vf_kerndeint.c
new file mode 100644
index 0000000000..4825ed5e3e
--- /dev/null
+++ b/libavfilter/vf_kerndeint.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2012 Jeremy Tran
+ * Copyright (c) 2004 Tobias Diedrich
+ * Copyright (c) 2003 Donald A. Graft
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Kernel Deinterlacer
+ * Ported from MPlayer libmpcodecs/vf_kerndeint.c.
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ int frame; ///< frame count, starting from 0
+ int thresh, map, order, sharp, twoway;
+ int vsub;
+ int is_packed_rgb;
+ uint8_t *tmp_data [4]; ///< temporary plane data buffer
+ int tmp_linesize[4]; ///< temporary plane byte linesize
+ int tmp_bwidth [4]; ///< temporary plane byte width
+} KerndeintContext;
+
+#define OFFSET(x) offsetof(KerndeintContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption kerndeint_options[] = {
+ { "thresh", "set the threshold", OFFSET(thresh), AV_OPT_TYPE_INT, {.i64=10}, 0, 255, FLAGS },
+ { "map", "set the map", OFFSET(map), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { "order", "set the order", OFFSET(order), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { "sharp", "set sharpening", OFFSET(sharp), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { "twoway", "set twoway", OFFSET(twoway), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(kerndeint);
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ KerndeintContext *kerndeint = ctx->priv;
+
+ av_freep(&kerndeint->tmp_data[0]);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUYV422,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_0RGB,
+ AV_PIX_FMT_ABGR, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_RGB0,
+ AV_PIX_FMT_BGRA, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ KerndeintContext *kerndeint = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+
+ kerndeint->is_packed_rgb = av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_RGB;
+ kerndeint->vsub = desc->log2_chroma_h;
+
+ ret = av_image_alloc(kerndeint->tmp_data, kerndeint->tmp_linesize,
+ inlink->w, inlink->h, inlink->format, 16);
+ if (ret < 0)
+ return ret;
+ memset(kerndeint->tmp_data[0], 0, ret);
+
+ if ((ret = av_image_fill_linesizes(kerndeint->tmp_bwidth, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
+{
+ KerndeintContext *kerndeint = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outpic;
+ const uint8_t *prvp; ///< Previous field's pixel line number n
+ const uint8_t *prvpp; ///< Previous field's pixel line number (n - 1)
+ const uint8_t *prvpn; ///< Previous field's pixel line number (n + 1)
+ const uint8_t *prvppp; ///< Previous field's pixel line number (n - 2)
+ const uint8_t *prvpnn; ///< Previous field's pixel line number (n + 2)
+ const uint8_t *prvp4p; ///< Previous field's pixel line number (n - 4)
+ const uint8_t *prvp4n; ///< Previous field's pixel line number (n + 4)
+
+ const uint8_t *srcp; ///< Current field's pixel line number n
+ const uint8_t *srcpp; ///< Current field's pixel line number (n - 1)
+ const uint8_t *srcpn; ///< Current field's pixel line number (n + 1)
+ const uint8_t *srcppp; ///< Current field's pixel line number (n - 2)
+ const uint8_t *srcpnn; ///< Current field's pixel line number (n + 2)
+ const uint8_t *srcp3p; ///< Current field's pixel line number (n - 3)
+ const uint8_t *srcp3n; ///< Current field's pixel line number (n + 3)
+ const uint8_t *srcp4p; ///< Current field's pixel line number (n - 4)
+ const uint8_t *srcp4n; ///< Current field's pixel line number (n + 4)
+
+ uint8_t *dstp, *dstp_saved;
+ const uint8_t *srcp_saved;
+
+ int src_linesize, psrc_linesize, dst_linesize, bwidth;
+ int x, y, plane, val, hi, lo, g, h, n = kerndeint->frame++;
+ double valf;
+
+ const int thresh = kerndeint->thresh;
+ const int order = kerndeint->order;
+ const int map = kerndeint->map;
+ const int sharp = kerndeint->sharp;
+ const int twoway = kerndeint->twoway;
+
+ const int is_packed_rgb = kerndeint->is_packed_rgb;
+
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpic) {
+ av_frame_free(&inpic);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpic, inpic);
+ outpic->interlaced_frame = 0;
+
+ for (plane = 0; plane < 4 && inpic->data[plane] && inpic->linesize[plane]; plane++) {
+ h = plane == 0 ? inlink->h : AV_CEIL_RSHIFT(inlink->h, kerndeint->vsub);
+ bwidth = kerndeint->tmp_bwidth[plane];
+
+ srcp_saved = inpic->data[plane];
+ src_linesize = inpic->linesize[plane];
+ psrc_linesize = kerndeint->tmp_linesize[plane];
+ dstp_saved = outpic->data[plane];
+ dst_linesize = outpic->linesize[plane];
+ srcp = srcp_saved + (1 - order) * src_linesize;
+ dstp = dstp_saved + (1 - order) * dst_linesize;
+
+ for (y = 0; y < h; y += 2) {
+ memcpy(dstp, srcp, bwidth);
+ srcp += 2 * src_linesize;
+ dstp += 2 * dst_linesize;
+ }
+
+ // Copy through the lines that will be missed below.
+ memcpy(dstp_saved + order * dst_linesize, srcp_saved + (1 - order) * src_linesize, bwidth);
+ memcpy(dstp_saved + (2 + order ) * dst_linesize, srcp_saved + (3 - order) * src_linesize, bwidth);
+ memcpy(dstp_saved + (h - 2 + order) * dst_linesize, srcp_saved + (h - 1 - order) * src_linesize, bwidth);
+ memcpy(dstp_saved + (h - 4 + order) * dst_linesize, srcp_saved + (h - 3 - order) * src_linesize, bwidth);
+
+ /* For the other field choose adaptively between using the previous field
+ or the interpolant from the current field. */
+ prvp = kerndeint->tmp_data[plane] + 5 * psrc_linesize - (1 - order) * psrc_linesize;
+ prvpp = prvp - psrc_linesize;
+ prvppp = prvp - 2 * psrc_linesize;
+ prvp4p = prvp - 4 * psrc_linesize;
+ prvpn = prvp + psrc_linesize;
+ prvpnn = prvp + 2 * psrc_linesize;
+ prvp4n = prvp + 4 * psrc_linesize;
+
+ srcp = srcp_saved + 5 * src_linesize - (1 - order) * src_linesize;
+ srcpp = srcp - src_linesize;
+ srcppp = srcp - 2 * src_linesize;
+ srcp3p = srcp - 3 * src_linesize;
+ srcp4p = srcp - 4 * src_linesize;
+
+ srcpn = srcp + src_linesize;
+ srcpnn = srcp + 2 * src_linesize;
+ srcp3n = srcp + 3 * src_linesize;
+ srcp4n = srcp + 4 * src_linesize;
+
+ dstp = dstp_saved + 5 * dst_linesize - (1 - order) * dst_linesize;
+
+ for (y = 5 - (1 - order); y <= h - 5 - (1 - order); y += 2) {
+ for (x = 0; x < bwidth; x++) {
+ if (thresh == 0 || n == 0 ||
+ (abs((int)prvp[x] - (int)srcp[x]) > thresh) ||
+ (abs((int)prvpp[x] - (int)srcpp[x]) > thresh) ||
+ (abs((int)prvpn[x] - (int)srcpn[x]) > thresh)) {
+ if (map) {
+ g = x & ~3;
+
+ if (is_packed_rgb) {
+ AV_WB32(dstp + g, 0xffffffff);
+ x = g + 3;
+ } else if (inlink->format == AV_PIX_FMT_YUYV422) {
+ // y <- 235, u <- 128, y <- 235, v <- 128
+ AV_WB32(dstp + g, 0xeb80eb80);
+ x = g + 3;
+ } else {
+ dstp[x] = plane == 0 ? 235 : 128;
+ }
+ } else {
+ if (is_packed_rgb) {
+ hi = 255;
+ lo = 0;
+ } else if (inlink->format == AV_PIX_FMT_YUYV422) {
+ hi = x & 1 ? 240 : 235;
+ lo = 16;
+ } else {
+ hi = plane == 0 ? 235 : 240;
+ lo = 16;
+ }
+
+ if (sharp) {
+ if (twoway) {
+ valf = + 0.526 * ((int)srcpp[x] + (int)srcpn[x])
+ + 0.170 * ((int)srcp[x] + (int)prvp[x])
+ - 0.116 * ((int)srcppp[x] + (int)srcpnn[x] + (int)prvppp[x] + (int)prvpnn[x])
+ - 0.026 * ((int)srcp3p[x] + (int)srcp3n[x])
+ + 0.031 * ((int)srcp4p[x] + (int)srcp4n[x] + (int)prvp4p[x] + (int)prvp4n[x]);
+ } else {
+ valf = + 0.526 * ((int)srcpp[x] + (int)srcpn[x])
+ + 0.170 * ((int)prvp[x])
+ - 0.116 * ((int)prvppp[x] + (int)prvpnn[x])
+ - 0.026 * ((int)srcp3p[x] + (int)srcp3n[x])
+ + 0.031 * ((int)prvp4p[x] + (int)prvp4p[x]);
+ }
+ dstp[x] = av_clip(valf, lo, hi);
+ } else {
+ if (twoway) {
+ val = (8 * ((int)srcpp[x] + (int)srcpn[x]) + 2 * ((int)srcp[x] + (int)prvp[x])
+ - (int)(srcppp[x]) - (int)(srcpnn[x])
+ - (int)(prvppp[x]) - (int)(prvpnn[x])) >> 4;
+ } else {
+ val = (8 * ((int)srcpp[x] + (int)srcpn[x]) + 2 * ((int)prvp[x])
+ - (int)(prvppp[x]) - (int)(prvpnn[x])) >> 4;
+ }
+ dstp[x] = av_clip(val, lo, hi);
+ }
+ }
+ } else {
+ dstp[x] = srcp[x];
+ }
+ }
+ prvp += 2 * psrc_linesize;
+ prvpp += 2 * psrc_linesize;
+ prvppp += 2 * psrc_linesize;
+ prvpn += 2 * psrc_linesize;
+ prvpnn += 2 * psrc_linesize;
+ prvp4p += 2 * psrc_linesize;
+ prvp4n += 2 * psrc_linesize;
+ srcp += 2 * src_linesize;
+ srcpp += 2 * src_linesize;
+ srcppp += 2 * src_linesize;
+ srcp3p += 2 * src_linesize;
+ srcp4p += 2 * src_linesize;
+ srcpn += 2 * src_linesize;
+ srcpnn += 2 * src_linesize;
+ srcp3n += 2 * src_linesize;
+ srcp4n += 2 * src_linesize;
+ dstp += 2 * dst_linesize;
+ }
+
+ srcp = inpic->data[plane];
+ dstp = kerndeint->tmp_data[plane];
+ av_image_copy_plane(dstp, psrc_linesize, srcp, src_linesize, bwidth, h);
+ }
+
+ av_frame_free(&inpic);
+ return ff_filter_frame(outlink, outpic);
+}
+
+static const AVFilterPad kerndeint_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad kerndeint_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+
+AVFilter ff_vf_kerndeint = {
+ .name = "kerndeint",
+ .description = NULL_IF_CONFIG_SMALL("Apply kernel deinterlacing to the input."),
+ .priv_size = sizeof(KerndeintContext),
+ .priv_class = &kerndeint_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = kerndeint_inputs,
+ .outputs = kerndeint_outputs,
+};
diff --git a/libavfilter/vf_lenscorrection.c b/libavfilter/vf_lenscorrection.c
new file mode 100644
index 0000000000..239fe195bd
--- /dev/null
+++ b/libavfilter/vf_lenscorrection.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (C) 2007 Richard Spindler (author of frei0r plugin from which this was derived)
+ * Copyright (C) 2014 Daniel Oberhoff
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Lenscorrection filter, algorithm from the frei0r plugin with the same name
+*/
+#include <stdlib.h>
+#include <math.h>
+
+#include "libavutil/opt.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/pixdesc.h"
+
+#include "avfilter.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct LenscorrectionCtx {
+ const AVClass *av_class;
+ unsigned int width;
+ unsigned int height;
+ int hsub, vsub;
+ int nb_planes;
+ double cx, cy, k1, k2;
+ int32_t *correction[4];
+} LenscorrectionCtx;
+
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption lenscorrection_options[] = {
+ { "cx", "set relative center x", offsetof(LenscorrectionCtx, cx), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, .flags=FLAGS },
+ { "cy", "set relative center y", offsetof(LenscorrectionCtx, cy), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, .flags=FLAGS },
+ { "k1", "set quadratic distortion factor", offsetof(LenscorrectionCtx, k1), AV_OPT_TYPE_DOUBLE, {.dbl=0.0}, -1, 1, .flags=FLAGS },
+ { "k2", "set double quadratic distortion factor", offsetof(LenscorrectionCtx, k2), AV_OPT_TYPE_DOUBLE, {.dbl=0.0}, -1, 1, .flags=FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(lenscorrection);
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+ int w, h;
+ int plane;
+ int xcenter, ycenter;
+ int32_t *correction;
+} ThreadData;
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
+{
+ ThreadData *td = (ThreadData*)arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+
+ const int w = td->w, h = td->h;
+ const int xcenter = td->xcenter;
+ const int ycenter = td->ycenter;
+ const int start = (h * job ) / nb_jobs;
+ const int end = (h * (job+1)) / nb_jobs;
+ const int plane = td->plane;
+ const int inlinesize = in->linesize[plane];
+ const int outlinesize = out->linesize[plane];
+ const uint8_t *indata = in->data[plane];
+ uint8_t *outrow = out->data[plane] + start * outlinesize;
+ int i;
+ for (i = start; i < end; i++, outrow += outlinesize) {
+ const int off_y = i - ycenter;
+ uint8_t *out = outrow;
+ int j;
+ for (j = 0; j < w; j++) {
+ const int off_x = j - xcenter;
+ const int64_t radius_mult = td->correction[j + i*w];
+ const int x = xcenter + ((radius_mult * off_x + (1<<23))>>24);
+ const int y = ycenter + ((radius_mult * off_y + (1<<23))>>24);
+ const char isvalid = x > 0 && x < w - 1 && y > 0 && y < h - 1;
+ *out++ = isvalid ? indata[y * inlinesize + x] : 0;
+ }
+ }
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ LenscorrectionCtx *rect = ctx->priv;
+ int i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(rect->correction); i++) {
+ av_freep(&rect->correction[i]);
+ }
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ LenscorrectionCtx *rect = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(inlink->format);
+ rect->hsub = pixdesc->log2_chroma_w;
+ rect->vsub = pixdesc->log2_chroma_h;
+ outlink->w = rect->width = inlink->w;
+ outlink->h = rect->height = inlink->h;
+ rect->nb_planes = av_pix_fmt_count_planes(inlink->format);
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ LenscorrectionCtx *rect = (LenscorrectionCtx*)ctx->priv;
+ AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ int plane;
+
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+
+ av_frame_copy_props(out, in);
+
+ for (plane = 0; plane < rect->nb_planes; ++plane) {
+ int hsub = plane == 1 || plane == 2 ? rect->hsub : 0;
+ int vsub = plane == 1 || plane == 2 ? rect->vsub : 0;
+ int hdiv = 1 << hsub;
+ int vdiv = 1 << vsub;
+ int w = rect->width / hdiv;
+ int h = rect->height / vdiv;
+ int xcenter = rect->cx * w;
+ int ycenter = rect->cy * h;
+ int k1 = rect->k1 * (1<<24);
+ int k2 = rect->k2 * (1<<24);
+ ThreadData td = {
+ .in = in,
+ .out = out,
+ .w = w,
+ .h = h,
+ .xcenter = xcenter,
+ .ycenter = ycenter,
+ .plane = plane};
+
+ if (!rect->correction[plane]) {
+ int i,j;
+ const int64_t r2inv = (4LL<<60) / (w * w + h * h);
+
+ rect->correction[plane] = av_malloc_array(w, h * sizeof(**rect->correction));
+ if (!rect->correction[plane])
+ return AVERROR(ENOMEM);
+ for (j = 0; j < h; j++) {
+ const int off_y = j - ycenter;
+ const int off_y2 = off_y * off_y;
+ for (i = 0; i < w; i++) {
+ const int off_x = i - xcenter;
+ const int64_t r2 = ((off_x * off_x + off_y2) * r2inv + (1LL<<31)) >> 32;
+ const int64_t r4 = (r2 * r2 + (1<<27)) >> 28;
+ const int radius_mult = (r2 * k1 + r4 * k2 + (1LL<<27) + (1LL<<52))>>28;
+ rect->correction[plane][j * w + i] = radius_mult;
+ }
+ }
+ }
+
+ td.correction = rect->correction[plane];
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(h, ff_filter_get_nb_threads(ctx)));
+ }
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad lenscorrection_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad lenscorrection_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_lenscorrection = {
+ .name = "lenscorrection",
+ .description = NULL_IF_CONFIG_SMALL("Rectify the image by correcting for lens distortion."),
+ .priv_size = sizeof(LenscorrectionCtx),
+ .query_formats = query_formats,
+ .inputs = lenscorrection_inputs,
+ .outputs = lenscorrection_outputs,
+ .priv_class = &lenscorrection_class,
+ .uninit = uninit,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_libopencv.c b/libavfilter/vf_libopencv.c
index 50d02f84a8..8128030b8c 100644
--- a/libavfilter/vf_libopencv.c
+++ b/libavfilter/vf_libopencv.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2010 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -23,8 +23,14 @@
* libopencv wrapper functions
*/
+#include "config.h"
+#if HAVE_OPENCV2_CORE_CORE_C_H
+#include <opencv2/core/core_c.h>
+#include <opencv2/imgproc/imgproc_c.h>
+#else
#include <opencv/cv.h>
#include <opencv/cxcore.h>
+#endif
#include "libavutil/avstring.h"
#include "libavutil/common.h"
#include "libavutil/file.h"
@@ -63,9 +69,10 @@ static int query_formats(AVFilterContext *ctx)
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_BGR24, AV_PIX_FMT_BGRA, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
};
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
}
typedef struct OCVContext {
@@ -150,7 +157,8 @@ static int read_shape_from_file(int *cols, int *rows, int **values, const char *
if (buf[i] == '\n') {
if (*rows == INT_MAX) {
av_log(log_ctx, AV_LOG_ERROR, "Overflow on the number of rows in the file\n");
- return AVERROR_INVALIDDATA;
+ ret = AVERROR_INVALIDDATA;
+ goto end;
}
++(*rows);
*cols = FFMAX(*cols, w);
@@ -164,10 +172,13 @@ static int read_shape_from_file(int *cols, int *rows, int **values, const char *
if (*rows > (SIZE_MAX / sizeof(int) / *cols)) {
av_log(log_ctx, AV_LOG_ERROR, "File with size %dx%d is too big\n",
*rows, *cols);
- return AVERROR_INVALIDDATA;
+ ret = AVERROR_INVALIDDATA;
+ goto end;
+ }
+ if (!(*values = av_mallocz_array(sizeof(int) * *rows, *cols))) {
+ ret = AVERROR(ENOMEM);
+ goto end;
}
- if (!(*values = av_mallocz(sizeof(int) * *rows * *cols)))
- return AVERROR(ENOMEM);
/* fill *values */
p = buf;
@@ -181,6 +192,8 @@ static int read_shape_from_file(int *cols, int *rows, int **values, const char *
(*values)[*cols*i + j] = !!av_isgraph(*(p++));
}
}
+
+end:
av_file_unmap(buf, size);
#ifdef DEBUG
@@ -265,10 +278,9 @@ static av_cold int dilate_init(AVFilterContext *ctx, const char *args)
const char *buf = args;
int ret;
- dilate->nb_iterations = 1;
-
if (args) {
kernel_str = av_get_token(&buf, "|");
+
if (!kernel_str)
return AVERROR(ENOMEM);
}
@@ -281,7 +293,8 @@ static av_cold int dilate_init(AVFilterContext *ctx, const char *args)
if (ret < 0)
return ret;
- sscanf(buf, "|%d", &dilate->nb_iterations);
+ if (!buf || sscanf(buf, "|%d", &dilate->nb_iterations) != 1)
+ dilate->nb_iterations = 1;
av_log(ctx, AV_LOG_VERBOSE, "iterations_nb:%d\n", dilate->nb_iterations);
if (dilate->nb_iterations <= 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid non-positive value '%d' for nb_iterations\n",
@@ -321,7 +334,7 @@ typedef struct OCVFilterEntry {
void (*end_frame_filter)(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg);
} OCVFilterEntry;
-static OCVFilterEntry ocv_filter_entries[] = {
+static const OCVFilterEntry ocv_filter_entries[] = {
{ "dilate", sizeof(DilateContext), dilate_init, dilate_uninit, dilate_end_frame_filter },
{ "erode", sizeof(DilateContext), dilate_init, dilate_uninit, erode_end_frame_filter },
{ "smooth", sizeof(SmoothContext), smooth_init, NULL, smooth_end_frame_filter },
@@ -332,8 +345,12 @@ static av_cold int init(AVFilterContext *ctx)
OCVContext *s = ctx->priv;
int i;
+ if (!s->name) {
+ av_log(ctx, AV_LOG_ERROR, "No libopencv filter name specified\n");
+ return AVERROR(EINVAL);
+ }
for (i = 0; i < FF_ARRAY_ELEMS(ocv_filter_entries); i++) {
- OCVFilterEntry *entry = &ocv_filter_entries[i];
+ const OCVFilterEntry *entry = &ocv_filter_entries[i];
if (!strcmp(s->name, entry->name)) {
s->init = entry->init;
s->uninit = entry->uninit;
@@ -355,7 +372,7 @@ static av_cold void uninit(AVFilterContext *ctx)
if (s->uninit)
s->uninit(ctx);
- av_free(s->priv);
+ av_freep(&s->priv);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
@@ -384,24 +401,19 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
#define OFFSET(x) offsetof(OCVContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption ocv_options[] = {
{ "filter_name", NULL, OFFSET(name), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "filter_params", NULL, OFFSET(params), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { NULL },
+ { NULL }
};
-static const AVClass ocv_class = {
- .class_name = "ocv",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(ocv);
static const AVFilterPad avfilter_vf_ocv_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
@@ -416,17 +428,13 @@ static const AVFilterPad avfilter_vf_ocv_outputs[] = {
};
AVFilter ff_vf_ocv = {
- .name = "ocv",
- .description = NULL_IF_CONFIG_SMALL("Apply transform using libopencv."),
-
- .priv_size = sizeof(OCVContext),
- .priv_class = &ocv_class,
-
+ .name = "ocv",
+ .description = NULL_IF_CONFIG_SMALL("Apply transform using libopencv."),
+ .priv_size = sizeof(OCVContext),
+ .priv_class = &ocv_class,
.query_formats = query_formats,
- .init = init,
- .uninit = uninit,
-
- .inputs = avfilter_vf_ocv_inputs,
-
- .outputs = avfilter_vf_ocv_outputs,
+ .init = init,
+ .uninit = uninit,
+ .inputs = avfilter_vf_ocv_inputs,
+ .outputs = avfilter_vf_ocv_outputs,
};
diff --git a/libavfilter/vf_lut.c b/libavfilter/vf_lut.c
index 9299d4019a..d005afae87 100644
--- a/libavfilter/vf_lut.c
+++ b/libavfilter/vf_lut.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2011 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -25,20 +25,18 @@
*/
#include "libavutil/attributes.h"
+#include "libavutil/bswap.h"
#include "libavutil/common.h"
#include "libavutil/eval.h"
-#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
+#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
static const char *const var_names[] = {
- "E",
- "PHI",
- "PI",
"w", ///< width of the input video
"h", ///< height of the input video
"val", ///< input value for the pixel
@@ -50,9 +48,6 @@ static const char *const var_names[] = {
};
enum var_name {
- VAR_E,
- VAR_PHI,
- VAR_PI,
VAR_W,
VAR_H,
VAR_VAL,
@@ -65,13 +60,14 @@ enum var_name {
typedef struct LutContext {
const AVClass *class;
- uint8_t lut[4][256]; ///< lookup table for each component
+ uint16_t lut[4][256 * 256]; ///< lookup table for each component
char *comp_expr_str[4];
AVExpr *comp_expr[4];
int hsub, vsub;
double var_values[VAR_VARS_NB];
int is_rgb, is_yuv;
- int rgba_map[4];
+ int is_planar;
+ int is_16bit;
int step;
int negate_alpha; /* only used by negate */
} LutContext;
@@ -85,37 +81,23 @@ typedef struct LutContext {
#define A 3
#define OFFSET(x) offsetof(LutContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-
-static const AVOption lut_options[] = {
- { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
- { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
- { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
- { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
- { "y", "set Y expression", OFFSET(comp_expr_str[Y]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
- { "u", "set U expression", OFFSET(comp_expr_str[U]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
- { "v", "set V expression", OFFSET(comp_expr_str[V]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
- { "r", "set R expression", OFFSET(comp_expr_str[R]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
- { "g", "set G expression", OFFSET(comp_expr_str[G]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
- { "b", "set B expression", OFFSET(comp_expr_str[B]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
- { "a", "set A expression", OFFSET(comp_expr_str[A]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
- { NULL },
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption options[] = {
+ { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
+ { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
+ { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
+ { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
+ { "y", "set Y expression", OFFSET(comp_expr_str[Y]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
+ { "u", "set U expression", OFFSET(comp_expr_str[U]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
+ { "v", "set V expression", OFFSET(comp_expr_str[V]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
+ { "r", "set R expression", OFFSET(comp_expr_str[R]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
+ { "g", "set G expression", OFFSET(comp_expr_str[G]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
+ { "b", "set B expression", OFFSET(comp_expr_str[B]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
+ { "a", "set A expression", OFFSET(comp_expr_str[A]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
+ { NULL }
};
-static av_cold int init(AVFilterContext *ctx)
-{
- LutContext *s = ctx->priv;
-
- s->var_values[VAR_PHI] = M_PHI;
- s->var_values[VAR_PI] = M_PI;
- s->var_values[VAR_E ] = M_E;
-
- s->is_rgb = !strcmp(ctx->filter->name, "lutrgb");
- s->is_yuv = !strcmp(ctx->filter->name, "lutyuv");
-
- return 0;
-}
-
static av_cold void uninit(AVFilterContext *ctx)
{
LutContext *s = ctx->priv;
@@ -131,14 +113,26 @@ static av_cold void uninit(AVFilterContext *ctx)
#define YUV_FORMATS \
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, \
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, \
- AV_PIX_FMT_YUVA420P, \
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, \
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, \
- AV_PIX_FMT_YUVJ440P
+ AV_PIX_FMT_YUVJ440P, \
+ AV_PIX_FMT_YUV444P9LE, AV_PIX_FMT_YUV422P9LE, AV_PIX_FMT_YUV420P9LE, \
+ AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUV440P10LE, \
+ AV_PIX_FMT_YUV444P12LE, AV_PIX_FMT_YUV422P12LE, AV_PIX_FMT_YUV420P12LE, AV_PIX_FMT_YUV440P12LE, \
+ AV_PIX_FMT_YUV444P14LE, AV_PIX_FMT_YUV422P14LE, AV_PIX_FMT_YUV420P14LE, \
+ AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUV420P16LE, \
+ AV_PIX_FMT_YUVA444P16LE, AV_PIX_FMT_YUVA422P16LE, AV_PIX_FMT_YUVA420P16LE
#define RGB_FORMATS \
AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, \
AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, \
- AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, \
+ AV_PIX_FMT_RGB48LE, AV_PIX_FMT_RGBA64LE, \
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, \
+ AV_PIX_FMT_GBRP9LE, AV_PIX_FMT_GBRP10LE, \
+ AV_PIX_FMT_GBRP12LE, AV_PIX_FMT_GBRP14LE, \
+ AV_PIX_FMT_GBRP16LE, AV_PIX_FMT_GBRAP12LE, \
+ AV_PIX_FMT_GBRAP16LE
static const enum AVPixelFormat yuv_pix_fmts[] = { YUV_FORMATS, AV_PIX_FMT_NONE };
static const enum AVPixelFormat rgb_pix_fmts[] = { RGB_FORMATS, AV_PIX_FMT_NONE };
@@ -151,9 +145,10 @@ static int query_formats(AVFilterContext *ctx)
const enum AVPixelFormat *pix_fmts = s->is_rgb ? rgb_pix_fmts :
s->is_yuv ? yuv_pix_fmts :
all_pix_fmts;
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
}
/**
@@ -182,15 +177,32 @@ static double compute_gammaval(void *opaque, double gamma)
return pow((val-minval)/(maxval-minval), gamma) * (maxval-minval)+minval;
}
+/**
+ * Compute ITU Rec.709 gamma correction of value val.
+ */
+static double compute_gammaval709(void *opaque, double gamma)
+{
+ LutContext *s = opaque;
+ double val = s->var_values[VAR_CLIPVAL];
+ double minval = s->var_values[VAR_MINVAL];
+ double maxval = s->var_values[VAR_MAXVAL];
+ double level = (val - minval) / (maxval - minval);
+ level = level < 0.018 ? 4.5 * level
+ : 1.099 * pow(level, 1.0 / gamma) - 0.099;
+ return level * (maxval - minval) + minval;
+}
+
static double (* const funcs1[])(void *, double) = {
clip,
compute_gammaval,
+ compute_gammaval709,
NULL
};
static const char * const funcs1_names[] = {
"clip",
"gammaval",
+ "gammaval709",
NULL
};
@@ -199,14 +211,16 @@ static int config_props(AVFilterLink *inlink)
AVFilterContext *ctx = inlink->dst;
LutContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ uint8_t rgba_map[4]; /* component index -> RGBA color index map */
int min[4], max[4];
- int val, comp, ret;
+ int val, color, ret;
s->hsub = desc->log2_chroma_w;
s->vsub = desc->log2_chroma_h;
s->var_values[VAR_W] = inlink->w;
s->var_values[VAR_H] = inlink->h;
+ s->is_16bit = desc->comp[0].depth > 8;
switch (inlink->format) {
case AV_PIX_FMT_YUV410P:
@@ -216,66 +230,101 @@ static int config_props(AVFilterLink *inlink)
case AV_PIX_FMT_YUV440P:
case AV_PIX_FMT_YUV444P:
case AV_PIX_FMT_YUVA420P:
- min[Y] = min[U] = min[V] = 16;
- max[Y] = 235;
- max[U] = max[V] = 240;
- min[A] = 0; max[A] = 255;
+ case AV_PIX_FMT_YUVA422P:
+ case AV_PIX_FMT_YUVA444P:
+ case AV_PIX_FMT_YUV420P9LE:
+ case AV_PIX_FMT_YUV422P9LE:
+ case AV_PIX_FMT_YUV444P9LE:
+ case AV_PIX_FMT_YUVA420P9LE:
+ case AV_PIX_FMT_YUVA422P9LE:
+ case AV_PIX_FMT_YUVA444P9LE:
+ case AV_PIX_FMT_YUV420P10LE:
+ case AV_PIX_FMT_YUV422P10LE:
+ case AV_PIX_FMT_YUV440P10LE:
+ case AV_PIX_FMT_YUV444P10LE:
+ case AV_PIX_FMT_YUVA420P10LE:
+ case AV_PIX_FMT_YUVA422P10LE:
+ case AV_PIX_FMT_YUVA444P10LE:
+ case AV_PIX_FMT_YUV420P12LE:
+ case AV_PIX_FMT_YUV422P12LE:
+ case AV_PIX_FMT_YUV440P12LE:
+ case AV_PIX_FMT_YUV444P12LE:
+ case AV_PIX_FMT_YUV420P14LE:
+ case AV_PIX_FMT_YUV422P14LE:
+ case AV_PIX_FMT_YUV444P14LE:
+ case AV_PIX_FMT_YUV420P16LE:
+ case AV_PIX_FMT_YUV422P16LE:
+ case AV_PIX_FMT_YUV444P16LE:
+ case AV_PIX_FMT_YUVA420P16LE:
+ case AV_PIX_FMT_YUVA422P16LE:
+ case AV_PIX_FMT_YUVA444P16LE:
+ min[Y] = 16 * (1 << (desc->comp[0].depth - 8));
+ min[U] = 16 * (1 << (desc->comp[1].depth - 8));
+ min[V] = 16 * (1 << (desc->comp[2].depth - 8));
+ min[A] = 0;
+ max[Y] = 235 * (1 << (desc->comp[0].depth - 8));
+ max[U] = 240 * (1 << (desc->comp[1].depth - 8));
+ max[V] = 240 * (1 << (desc->comp[2].depth - 8));
+ max[A] = (1 << desc->comp[0].depth) - 1;
+ break;
+ case AV_PIX_FMT_RGB48LE:
+ case AV_PIX_FMT_RGBA64LE:
+ min[0] = min[1] = min[2] = min[3] = 0;
+ max[0] = max[1] = max[2] = max[3] = 65535;
break;
default:
min[0] = min[1] = min[2] = min[3] = 0;
- max[0] = max[1] = max[2] = max[3] = 255;
+ max[0] = max[1] = max[2] = max[3] = 255 * (1 << (desc->comp[0].depth - 8));
}
s->is_yuv = s->is_rgb = 0;
+ s->is_planar = desc->flags & AV_PIX_FMT_FLAG_PLANAR;
if (ff_fmt_is_in(inlink->format, yuv_pix_fmts)) s->is_yuv = 1;
else if (ff_fmt_is_in(inlink->format, rgb_pix_fmts)) s->is_rgb = 1;
if (s->is_rgb) {
- switch (inlink->format) {
- case AV_PIX_FMT_ARGB: s->rgba_map[A] = 0; s->rgba_map[R] = 1; s->rgba_map[G] = 2; s->rgba_map[B] = 3; break;
- case AV_PIX_FMT_ABGR: s->rgba_map[A] = 0; s->rgba_map[B] = 1; s->rgba_map[G] = 2; s->rgba_map[R] = 3; break;
- case AV_PIX_FMT_RGBA:
- case AV_PIX_FMT_RGB24: s->rgba_map[R] = 0; s->rgba_map[G] = 1; s->rgba_map[B] = 2; s->rgba_map[A] = 3; break;
- case AV_PIX_FMT_BGRA:
- case AV_PIX_FMT_BGR24: s->rgba_map[B] = 0; s->rgba_map[G] = 1; s->rgba_map[R] = 2; s->rgba_map[A] = 3; break;
- }
+ ff_fill_rgba_map(rgba_map, inlink->format);
s->step = av_get_bits_per_pixel(desc) >> 3;
+ if (s->is_16bit) {
+ s->step = s->step >> 1;
+ }
}
- for (comp = 0; comp < desc->nb_components; comp++) {
+ for (color = 0; color < desc->nb_components; color++) {
double res;
+ int comp = s->is_rgb ? rgba_map[color] : color;
/* create the parsed expression */
- av_expr_free(s->comp_expr[comp]);
- s->comp_expr[comp] = NULL;
- ret = av_expr_parse(&s->comp_expr[comp], s->comp_expr_str[comp],
+ av_expr_free(s->comp_expr[color]);
+ s->comp_expr[color] = NULL;
+ ret = av_expr_parse(&s->comp_expr[color], s->comp_expr_str[color],
var_names, funcs1_names, funcs1, NULL, NULL, 0, ctx);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR,
- "Error when parsing the expression '%s' for the component %d.\n",
- s->comp_expr_str[comp], comp);
+ "Error when parsing the expression '%s' for the component %d and color %d.\n",
+ s->comp_expr_str[comp], comp, color);
return AVERROR(EINVAL);
}
- /* compute the s */
- s->var_values[VAR_MAXVAL] = max[comp];
- s->var_values[VAR_MINVAL] = min[comp];
+ /* compute the lut */
+ s->var_values[VAR_MAXVAL] = max[color];
+ s->var_values[VAR_MINVAL] = min[color];
- for (val = 0; val < 256; val++) {
+ for (val = 0; val < FF_ARRAY_ELEMS(s->lut[comp]); val++) {
s->var_values[VAR_VAL] = val;
- s->var_values[VAR_CLIPVAL] = av_clip(val, min[comp], max[comp]);
+ s->var_values[VAR_CLIPVAL] = av_clip(val, min[color], max[color]);
s->var_values[VAR_NEGVAL] =
- av_clip(min[comp] + max[comp] - s->var_values[VAR_VAL],
- min[comp], max[comp]);
+ av_clip(min[color] + max[color] - s->var_values[VAR_VAL],
+ min[color], max[color]);
- res = av_expr_eval(s->comp_expr[comp], s->var_values, s);
+ res = av_expr_eval(s->comp_expr[color], s->var_values, s);
if (isnan(res)) {
av_log(ctx, AV_LOG_ERROR,
- "Error when evaluating the expression '%s' for the value %d for the component #%d.\n",
- s->comp_expr_str[comp], val, comp);
+ "Error when evaluating the expression '%s' for the value %d for the component %d.\n",
+ s->comp_expr_str[color], val, comp);
return AVERROR(EINVAL);
}
- s->lut[comp][val] = av_clip((int)res, min[comp], max[comp]);
+ s->lut[comp][val] = av_clip((int)res, 0, max[A]);
av_log(ctx, AV_LOG_DEBUG, "val[%d][%d] = %d\n", comp, val, s->lut[comp][val]);
}
}
@@ -289,106 +338,230 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
LutContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
- uint8_t *inrow, *outrow, *inrow0, *outrow0;
- int i, j, k, plane;
+ int i, j, plane, direct = 0;
- out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
- if (!out) {
- av_frame_free(&in);
- return AVERROR(ENOMEM);
+ if (av_frame_is_writable(in)) {
+ direct = 1;
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
}
- av_frame_copy_props(out, in);
- if (s->is_rgb) {
+ if (s->is_rgb && s->is_16bit && !s->is_planar) {
+ /* packed, 16-bit */
+ uint16_t *inrow, *outrow, *inrow0, *outrow0;
+ const int w = inlink->w;
+ const int h = in->height;
+ const uint16_t (*tab)[256*256] = (const uint16_t (*)[256*256])s->lut;
+ const int in_linesize = in->linesize[0] / 2;
+ const int out_linesize = out->linesize[0] / 2;
+ const int step = s->step;
+
+ inrow0 = (uint16_t*) in ->data[0];
+ outrow0 = (uint16_t*) out->data[0];
+
+ for (i = 0; i < h; i ++) {
+ inrow = inrow0;
+ outrow = outrow0;
+ for (j = 0; j < w; j++) {
+
+ switch (step) {
+#if HAVE_BIGENDIAN
+ case 4: outrow[3] = av_bswap16(tab[3][av_bswap16(inrow[3])]); // Fall-through
+ case 3: outrow[2] = av_bswap16(tab[2][av_bswap16(inrow[2])]); // Fall-through
+ case 2: outrow[1] = av_bswap16(tab[1][av_bswap16(inrow[1])]); // Fall-through
+ default: outrow[0] = av_bswap16(tab[0][av_bswap16(inrow[0])]);
+#else
+ case 4: outrow[3] = tab[3][inrow[3]]; // Fall-through
+ case 3: outrow[2] = tab[2][inrow[2]]; // Fall-through
+ case 2: outrow[1] = tab[1][inrow[1]]; // Fall-through
+ default: outrow[0] = tab[0][inrow[0]];
+#endif
+ }
+ outrow += step;
+ inrow += step;
+ }
+ inrow0 += in_linesize;
+ outrow0 += out_linesize;
+ }
+ } else if (s->is_rgb && !s->is_planar) {
/* packed */
+ uint8_t *inrow, *outrow, *inrow0, *outrow0;
+ const int w = inlink->w;
+ const int h = in->height;
+ const uint16_t (*tab)[256*256] = (const uint16_t (*)[256*256])s->lut;
+ const int in_linesize = in->linesize[0];
+ const int out_linesize = out->linesize[0];
+ const int step = s->step;
+
inrow0 = in ->data[0];
outrow0 = out->data[0];
- for (i = 0; i < in->height; i ++) {
+ for (i = 0; i < h; i ++) {
inrow = inrow0;
outrow = outrow0;
- for (j = 0; j < inlink->w; j++) {
- for (k = 0; k < s->step; k++)
- outrow[k] = s->lut[s->rgba_map[k]][inrow[k]];
- outrow += s->step;
- inrow += s->step;
+ for (j = 0; j < w; j++) {
+ switch (step) {
+ case 4: outrow[3] = tab[3][inrow[3]]; // Fall-through
+ case 3: outrow[2] = tab[2][inrow[2]]; // Fall-through
+ case 2: outrow[1] = tab[1][inrow[1]]; // Fall-through
+ default: outrow[0] = tab[0][inrow[0]];
+ }
+ outrow += step;
+ inrow += step;
+ }
+ inrow0 += in_linesize;
+ outrow0 += out_linesize;
+ }
+ } else if (s->is_16bit) {
+ // planar >8 bit depth
+ uint16_t *inrow, *outrow;
+
+ for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
+ int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
+ int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
+ int h = AV_CEIL_RSHIFT(inlink->h, vsub);
+ int w = AV_CEIL_RSHIFT(inlink->w, hsub);
+ const uint16_t *tab = s->lut[plane];
+ const int in_linesize = in->linesize[plane] / 2;
+ const int out_linesize = out->linesize[plane] / 2;
+
+ inrow = (uint16_t *)in ->data[plane];
+ outrow = (uint16_t *)out->data[plane];
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+#if HAVE_BIGENDIAN
+ outrow[j] = av_bswap16(tab[av_bswap16(inrow[j])]);
+#else
+ outrow[j] = tab[inrow[j]];
+#endif
+ }
+ inrow += in_linesize;
+ outrow += out_linesize;
}
- inrow0 += in ->linesize[0];
- outrow0 += out->linesize[0];
}
} else {
- /* planar */
- for (plane = 0; plane < 4 && in->data[plane]; plane++) {
+ /* planar 8bit depth */
+ uint8_t *inrow, *outrow;
+
+ for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
+ int h = AV_CEIL_RSHIFT(inlink->h, vsub);
+ int w = AV_CEIL_RSHIFT(inlink->w, hsub);
+ const uint16_t *tab = s->lut[plane];
+ const int in_linesize = in->linesize[plane];
+ const int out_linesize = out->linesize[plane];
inrow = in ->data[plane];
outrow = out->data[plane];
- for (i = 0; i < in->height >> vsub; i ++) {
- for (j = 0; j < inlink->w>>hsub; j++)
- outrow[j] = s->lut[plane][inrow[j]];
- inrow += in ->linesize[plane];
- outrow += out->linesize[plane];
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++)
+ outrow[j] = tab[inrow[j]];
+ inrow += in_linesize;
+ outrow += out_linesize;
}
}
}
- av_frame_free(&in);
+ if (!direct)
+ av_frame_free(&in);
+
return ff_filter_frame(outlink, out);
}
static const AVFilterPad inputs[] = {
- { .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame,
- .config_props = config_props,
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
},
- { .name = NULL}
+ { NULL }
};
static const AVFilterPad outputs[] = {
- { .name = "default",
- .type = AVMEDIA_TYPE_VIDEO, },
- { .name = NULL}
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
};
-#define DEFINE_LUT_FILTER(name_, description_, init_, options) \
- static const AVClass name_ ## _class = { \
- .class_name = #name_, \
- .item_name = av_default_item_name, \
- .option = options, \
- .version = LIBAVUTIL_VERSION_INT, \
- }; \
+
+#define DEFINE_LUT_FILTER(name_, description_) \
AVFilter ff_vf_##name_ = { \
.name = #name_, \
.description = NULL_IF_CONFIG_SMALL(description_), \
.priv_size = sizeof(LutContext), \
.priv_class = &name_ ## _class, \
- \
- .init = init_, \
+ .init = name_##_init, \
.uninit = uninit, \
.query_formats = query_formats, \
- \
.inputs = inputs, \
.outputs = outputs, \
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, \
}
#if CONFIG_LUT_FILTER
-DEFINE_LUT_FILTER(lut, "Compute and apply a lookup table to the RGB/YUV input video.", init, lut_options);
+
+#define lut_options options
+AVFILTER_DEFINE_CLASS(lut);
+
+static int lut_init(AVFilterContext *ctx)
+{
+ return 0;
+}
+
+DEFINE_LUT_FILTER(lut, "Compute and apply a lookup table to the RGB/YUV input video.");
#endif
+
#if CONFIG_LUTYUV_FILTER
-DEFINE_LUT_FILTER(lutyuv, "Compute and apply a lookup table to the YUV input video.", init, lut_options);
+
+#define lutyuv_options options
+AVFILTER_DEFINE_CLASS(lutyuv);
+
+static av_cold int lutyuv_init(AVFilterContext *ctx)
+{
+ LutContext *s = ctx->priv;
+
+ s->is_yuv = 1;
+
+ return 0;
+}
+
+DEFINE_LUT_FILTER(lutyuv, "Compute and apply a lookup table to the YUV input video.");
#endif
+
#if CONFIG_LUTRGB_FILTER
-DEFINE_LUT_FILTER(lutrgb, "Compute and apply a lookup table to the RGB input video.", init, lut_options);
+
+#define lutrgb_options options
+AVFILTER_DEFINE_CLASS(lutrgb);
+
+static av_cold int lutrgb_init(AVFilterContext *ctx)
+{
+ LutContext *s = ctx->priv;
+
+ s->is_rgb = 1;
+
+ return 0;
+}
+
+DEFINE_LUT_FILTER(lutrgb, "Compute and apply a lookup table to the RGB input video.");
#endif
#if CONFIG_NEGATE_FILTER
static const AVOption negate_options[] = {
- { "negate_alpha", NULL, OFFSET(negate_alpha), AV_OPT_TYPE_INT, { .i64 = 0 }, .flags = FLAGS },
- { NULL },
+ { "negate_alpha", NULL, OFFSET(negate_alpha), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
+ { NULL }
};
+AVFILTER_DEFINE_CLASS(negate);
+
static av_cold int negate_init(AVFilterContext *ctx)
{
LutContext *s = ctx->priv;
@@ -397,7 +570,7 @@ static av_cold int negate_init(AVFilterContext *ctx)
av_log(ctx, AV_LOG_DEBUG, "negate_alpha:%d\n", s->negate_alpha);
for (i = 0; i < 4; i++) {
- s->comp_expr_str[i] = av_strdup((i == 3 && s->negate_alpha) ?
+ s->comp_expr_str[i] = av_strdup((i == 3 && !s->negate_alpha) ?
"val" : "negval");
if (!s->comp_expr_str[i]) {
uninit(ctx);
@@ -405,9 +578,9 @@ static av_cold int negate_init(AVFilterContext *ctx)
}
}
- return init(ctx);
+ return 0;
}
-DEFINE_LUT_FILTER(negate, "Negate input video.", negate_init, negate_options);
+DEFINE_LUT_FILTER(negate, "Negate input video.");
#endif
diff --git a/libavfilter/vf_lut2.c b/libavfilter/vf_lut2.c
new file mode 100644
index 0000000000..85b10531b6
--- /dev/null
+++ b/libavfilter/vf_lut2.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2016 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/common.h"
+#include "libavutil/eval.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "framesync.h"
+
+static const char *const var_names[] = {
+ "w", ///< width of the input video
+ "h", ///< height of the input video
+ "x", ///< input value for the pixel from input #1
+ "y", ///< input value for the pixel from input #2
+ "bdx", ///< input #1 video bitdepth
+ "bdy", ///< input #2 video bitdepth
+ NULL
+};
+
+enum var_name {
+ VAR_W,
+ VAR_H,
+ VAR_X,
+ VAR_Y,
+ VAR_BITDEPTHX,
+ VAR_BITDEPTHY,
+ VAR_VARS_NB
+};
+
+typedef struct LUT2Context {
+ const AVClass *class;
+
+ char *comp_expr_str[4];
+
+ AVExpr *comp_expr[4];
+ double var_values[VAR_VARS_NB];
+ uint16_t *lut[4]; ///< lookup table for each component
+ int width[4], height[4];
+ int nb_planes;
+ int depth, depthx, depthy;
+
+ void (*lut2)(struct LUT2Context *s, AVFrame *dst, AVFrame *srcx, AVFrame *srcy);
+
+ FFFrameSync fs;
+} LUT2Context;
+
+#define OFFSET(x) offsetof(LUT2Context, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption lut2_options[] = {
+ { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
+ { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
+ { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
+ { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
+ { NULL }
+};
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ LUT2Context *s = ctx->priv;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ av_expr_free(s->comp_expr[i]);
+ s->comp_expr[i] = NULL;
+ av_freep(&s->comp_expr_str[i]);
+ av_freep(&s->lut[i]);
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
+ AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
+ AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12,
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP12,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+
+ return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+}
+
+static int config_inputx(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ LUT2Context *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int hsub = desc->log2_chroma_w;
+ int vsub = desc->log2_chroma_h;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+ s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
+ s->height[0] = s->height[3] = inlink->h;
+ s->width[1] = s->width[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
+ s->width[0] = s->width[3] = inlink->w;
+
+ s->var_values[VAR_W] = inlink->w;
+ s->var_values[VAR_H] = inlink->h;
+ s->depthx = desc->comp[0].depth;
+ s->var_values[VAR_BITDEPTHX] = s->depthx;
+
+ return 0;
+}
+
+static int config_inputy(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ LUT2Context *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ s->depthy = desc->comp[0].depth;
+ s->var_values[VAR_BITDEPTHY] = s->depthy;
+
+ return 0;
+}
+
+static void lut2_8bit(struct LUT2Context *s, AVFrame *out, AVFrame *srcx, AVFrame *srcy)
+{
+ int p, y, x;
+
+ for (p = 0; p < s->nb_planes; p++) {
+ const uint16_t *lut = s->lut[p];
+ const uint8_t *srcxx, *srcyy;
+ uint8_t *dst;
+
+ dst = out->data[p];
+ srcxx = srcx->data[p];
+ srcyy = srcy->data[p];
+
+ for (y = 0; y < s->height[p]; y++) {
+ for (x = 0; x < s->width[p]; x++) {
+ dst[x] = lut[(srcyy[x] << s->depthx) | srcxx[x]];
+ }
+
+ dst += out->linesize[p];
+ srcxx += srcx->linesize[p];
+ srcyy += srcy->linesize[p];
+ }
+ }
+}
+
+static void lut2_16bit(struct LUT2Context *s, AVFrame *out, AVFrame *srcx, AVFrame *srcy)
+{
+ int p, y, x;
+
+ for (p = 0; p < s->nb_planes; p++) {
+ const uint16_t *lut = s->lut[p];
+ const uint16_t *srcxx, *srcyy;
+ uint16_t *dst;
+
+ dst = (uint16_t *)out->data[p];
+ srcxx = (uint16_t *)srcx->data[p];
+ srcyy = (uint16_t *)srcy->data[p];
+
+ for (y = 0; y < s->height[p]; y++) {
+ for (x = 0; x < s->width[p]; x++) {
+ dst[x] = lut[(srcyy[x] << s->depthx) | srcxx[x]];
+ }
+
+ dst += out->linesize[p] / 2;
+ srcxx += srcx->linesize[p] / 2;
+ srcyy += srcy->linesize[p] / 2;
+ }
+ }
+}
+
+static int process_frame(FFFrameSync *fs)
+{
+ AVFilterContext *ctx = fs->parent;
+ LUT2Context *s = fs->opaque;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out, *srcx, *srcy;
+ int ret;
+
+ if ((ret = ff_framesync_get_frame(&s->fs, 0, &srcx, 0)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 1, &srcy, 0)) < 0)
+ return ret;
+
+ if (ctx->is_disabled) {
+ out = av_frame_clone(srcx);
+ if (!out)
+ return AVERROR(ENOMEM);
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, srcx);
+
+ s->lut2(s, out, srcx, srcy);
+ }
+
+ out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ LUT2Context *s = ctx->priv;
+ AVFilterLink *srcx = ctx->inputs[0];
+ AVFilterLink *srcy = ctx->inputs[1];
+ FFFrameSyncIn *in;
+ int p, ret;
+
+ s->depth = s->depthx + s->depthy;
+
+ if (srcx->format != srcy->format) {
+ av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
+ return AVERROR(EINVAL);
+ }
+ if (srcx->w != srcy->w ||
+ srcx->h != srcy->h ||
+ srcx->sample_aspect_ratio.num != srcy->sample_aspect_ratio.num ||
+ srcx->sample_aspect_ratio.den != srcy->sample_aspect_ratio.den) {
+ av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
+ "(size %dx%d, SAR %d:%d) do not match the corresponding "
+ "second input link %s parameters (%dx%d, SAR %d:%d)\n",
+ ctx->input_pads[0].name, srcx->w, srcx->h,
+ srcx->sample_aspect_ratio.num,
+ srcx->sample_aspect_ratio.den,
+ ctx->input_pads[1].name,
+ srcy->w, srcy->h,
+ srcy->sample_aspect_ratio.num,
+ srcy->sample_aspect_ratio.den);
+ return AVERROR(EINVAL);
+ }
+
+ outlink->w = srcx->w;
+ outlink->h = srcx->h;
+ outlink->time_base = srcx->time_base;
+ outlink->sample_aspect_ratio = srcx->sample_aspect_ratio;
+ outlink->frame_rate = srcx->frame_rate;
+
+ if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
+ return ret;
+
+ in = s->fs.in;
+ in[0].time_base = srcx->time_base;
+ in[1].time_base = srcy->time_base;
+ in[0].sync = 1;
+ in[0].before = EXT_STOP;
+ in[0].after = EXT_INFINITY;
+ in[1].sync = 1;
+ in[1].before = EXT_STOP;
+ in[1].after = EXT_INFINITY;
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+
+ s->lut2 = s->depth > 16 ? lut2_16bit : lut2_8bit;
+
+ for (p = 0; p < s->nb_planes; p++) {
+ s->lut[p] = av_malloc_array(1 << s->depth, sizeof(uint16_t));
+ if (!s->lut[p])
+ return AVERROR(ENOMEM);
+ }
+
+ for (p = 0; p < s->nb_planes; p++) {
+ double res;
+ int x, y;
+
+ /* create the parsed expression */
+ av_expr_free(s->comp_expr[p]);
+ s->comp_expr[p] = NULL;
+ ret = av_expr_parse(&s->comp_expr[p], s->comp_expr_str[p],
+ var_names, NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Error when parsing the expression '%s' for the component %d.\n",
+ s->comp_expr_str[p], p);
+ return AVERROR(EINVAL);
+ }
+
+ /* compute the lut */
+ for (y = 0; y < (1 << s->depthx); y++) {
+ s->var_values[VAR_Y] = y;
+ for (x = 0; x < (1 << s->depthx); x++) {
+ s->var_values[VAR_X] = x;
+ res = av_expr_eval(s->comp_expr[p], s->var_values, s);
+ if (isnan(res)) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Error when evaluating the expression '%s' for the values %d and %d for the component %d.\n",
+ s->comp_expr_str[p], x, y, p);
+ return AVERROR(EINVAL);
+ }
+
+ s->lut[p][(y << s->depthx) + x] = res;
+ }
+ }
+ }
+
+ return ff_framesync_configure(&s->fs);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ LUT2Context *s = inlink->dst->priv;
+ return ff_framesync_filter_frame(&s->fs, inlink, buf);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ LUT2Context *s = outlink->src->priv;
+ return ff_framesync_request_frame(&s->fs, outlink);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "srcx",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_inputx,
+ },
+ {
+ .name = "srcy",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_inputy,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(lut2);
+
+AVFilter ff_vf_lut2 = {
+ .name = "lut2",
+ .description = NULL_IF_CONFIG_SMALL("Compute and apply a lookup table from two video inputs."),
+ .priv_size = sizeof(LUT2Context),
+ .priv_class = &lut2_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = inputs,
+ .outputs = outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_lut3d.c b/libavfilter/vf_lut3d.c
new file mode 100644
index 0000000000..7a294b0761
--- /dev/null
+++ b/libavfilter/vf_lut3d.c
@@ -0,0 +1,820 @@
+/*
+ * Copyright (c) 2013 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * 3D Lookup table filter
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/file.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/avstring.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "dualinput.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define R 0
+#define G 1
+#define B 2
+#define A 3
+
+enum interp_mode {
+ INTERPOLATE_NEAREST,
+ INTERPOLATE_TRILINEAR,
+ INTERPOLATE_TETRAHEDRAL,
+ NB_INTERP_MODE
+};
+
+struct rgbvec {
+ float r, g, b;
+};
+
+/* 3D LUT don't often go up to level 32, but it is common to have a Hald CLUT
+ * of 512x512 (64x64x64) */
+#define MAX_LEVEL 64
+
+typedef struct LUT3DContext {
+ const AVClass *class;
+ int interpolation; ///<interp_mode
+ char *file;
+ uint8_t rgba_map[4];
+ int step;
+ avfilter_action_func *interp;
+ struct rgbvec lut[MAX_LEVEL][MAX_LEVEL][MAX_LEVEL];
+ int lutsize;
+#if CONFIG_HALDCLUT_FILTER
+ uint8_t clut_rgba_map[4];
+ int clut_step;
+ int clut_is16bit;
+ int clut_width;
+ FFDualInputContext dinput;
+#endif
+} LUT3DContext;
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+} ThreadData;
+
+#define OFFSET(x) offsetof(LUT3DContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+#define COMMON_OPTIONS \
+ { "interp", "select interpolation mode", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=INTERPOLATE_TETRAHEDRAL}, 0, NB_INTERP_MODE-1, FLAGS, "interp_mode" }, \
+ { "nearest", "use values from the nearest defined points", 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_NEAREST}, INT_MIN, INT_MAX, FLAGS, "interp_mode" }, \
+ { "trilinear", "interpolate values using the 8 points defining a cube", 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_TRILINEAR}, INT_MIN, INT_MAX, FLAGS, "interp_mode" }, \
+ { "tetrahedral", "interpolate values using a tetrahedron", 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_TETRAHEDRAL}, INT_MIN, INT_MAX, FLAGS, "interp_mode" }, \
+ { NULL }
+
+static inline float lerpf(float v0, float v1, float f)
+{
+ return v0 + (v1 - v0) * f;
+}
+
+static inline struct rgbvec lerp(const struct rgbvec *v0, const struct rgbvec *v1, float f)
+{
+ struct rgbvec v = {
+ lerpf(v0->r, v1->r, f), lerpf(v0->g, v1->g, f), lerpf(v0->b, v1->b, f)
+ };
+ return v;
+}
+
+#define NEAR(x) ((int)((x) + .5))
+#define PREV(x) ((int)(x))
+#define NEXT(x) (FFMIN((int)(x) + 1, lut3d->lutsize - 1))
+
+/**
+ * Get the nearest defined point
+ */
+static inline struct rgbvec interp_nearest(const LUT3DContext *lut3d,
+ const struct rgbvec *s)
+{
+ return lut3d->lut[NEAR(s->r)][NEAR(s->g)][NEAR(s->b)];
+}
+
+/**
+ * Interpolate using the 8 vertices of a cube
+ * @see https://en.wikipedia.org/wiki/Trilinear_interpolation
+ */
+static inline struct rgbvec interp_trilinear(const LUT3DContext *lut3d,
+ const struct rgbvec *s)
+{
+ const int prev[] = {PREV(s->r), PREV(s->g), PREV(s->b)};
+ const int next[] = {NEXT(s->r), NEXT(s->g), NEXT(s->b)};
+ const struct rgbvec d = {s->r - prev[0], s->g - prev[1], s->b - prev[2]};
+ const struct rgbvec c000 = lut3d->lut[prev[0]][prev[1]][prev[2]];
+ const struct rgbvec c001 = lut3d->lut[prev[0]][prev[1]][next[2]];
+ const struct rgbvec c010 = lut3d->lut[prev[0]][next[1]][prev[2]];
+ const struct rgbvec c011 = lut3d->lut[prev[0]][next[1]][next[2]];
+ const struct rgbvec c100 = lut3d->lut[next[0]][prev[1]][prev[2]];
+ const struct rgbvec c101 = lut3d->lut[next[0]][prev[1]][next[2]];
+ const struct rgbvec c110 = lut3d->lut[next[0]][next[1]][prev[2]];
+ const struct rgbvec c111 = lut3d->lut[next[0]][next[1]][next[2]];
+ const struct rgbvec c00 = lerp(&c000, &c100, d.r);
+ const struct rgbvec c10 = lerp(&c010, &c110, d.r);
+ const struct rgbvec c01 = lerp(&c001, &c101, d.r);
+ const struct rgbvec c11 = lerp(&c011, &c111, d.r);
+ const struct rgbvec c0 = lerp(&c00, &c10, d.g);
+ const struct rgbvec c1 = lerp(&c01, &c11, d.g);
+ const struct rgbvec c = lerp(&c0, &c1, d.b);
+ return c;
+}
+
+/**
+ * Tetrahedral interpolation. Based on code found in Truelight Software Library paper.
+ * @see http://www.filmlight.ltd.uk/pdf/whitepapers/FL-TL-TN-0057-SoftwareLib.pdf
+ */
+static inline struct rgbvec interp_tetrahedral(const LUT3DContext *lut3d,
+ const struct rgbvec *s)
+{
+ const int prev[] = {PREV(s->r), PREV(s->g), PREV(s->b)};
+ const int next[] = {NEXT(s->r), NEXT(s->g), NEXT(s->b)};
+ const struct rgbvec d = {s->r - prev[0], s->g - prev[1], s->b - prev[2]};
+ const struct rgbvec c000 = lut3d->lut[prev[0]][prev[1]][prev[2]];
+ const struct rgbvec c111 = lut3d->lut[next[0]][next[1]][next[2]];
+ struct rgbvec c;
+ if (d.r > d.g) {
+ if (d.g > d.b) {
+ const struct rgbvec c100 = lut3d->lut[next[0]][prev[1]][prev[2]];
+ const struct rgbvec c110 = lut3d->lut[next[0]][next[1]][prev[2]];
+ c.r = (1-d.r) * c000.r + (d.r-d.g) * c100.r + (d.g-d.b) * c110.r + (d.b) * c111.r;
+ c.g = (1-d.r) * c000.g + (d.r-d.g) * c100.g + (d.g-d.b) * c110.g + (d.b) * c111.g;
+ c.b = (1-d.r) * c000.b + (d.r-d.g) * c100.b + (d.g-d.b) * c110.b + (d.b) * c111.b;
+ } else if (d.r > d.b) {
+ const struct rgbvec c100 = lut3d->lut[next[0]][prev[1]][prev[2]];
+ const struct rgbvec c101 = lut3d->lut[next[0]][prev[1]][next[2]];
+ c.r = (1-d.r) * c000.r + (d.r-d.b) * c100.r + (d.b-d.g) * c101.r + (d.g) * c111.r;
+ c.g = (1-d.r) * c000.g + (d.r-d.b) * c100.g + (d.b-d.g) * c101.g + (d.g) * c111.g;
+ c.b = (1-d.r) * c000.b + (d.r-d.b) * c100.b + (d.b-d.g) * c101.b + (d.g) * c111.b;
+ } else {
+ const struct rgbvec c001 = lut3d->lut[prev[0]][prev[1]][next[2]];
+ const struct rgbvec c101 = lut3d->lut[next[0]][prev[1]][next[2]];
+ c.r = (1-d.b) * c000.r + (d.b-d.r) * c001.r + (d.r-d.g) * c101.r + (d.g) * c111.r;
+ c.g = (1-d.b) * c000.g + (d.b-d.r) * c001.g + (d.r-d.g) * c101.g + (d.g) * c111.g;
+ c.b = (1-d.b) * c000.b + (d.b-d.r) * c001.b + (d.r-d.g) * c101.b + (d.g) * c111.b;
+ }
+ } else {
+ if (d.b > d.g) {
+ const struct rgbvec c001 = lut3d->lut[prev[0]][prev[1]][next[2]];
+ const struct rgbvec c011 = lut3d->lut[prev[0]][next[1]][next[2]];
+ c.r = (1-d.b) * c000.r + (d.b-d.g) * c001.r + (d.g-d.r) * c011.r + (d.r) * c111.r;
+ c.g = (1-d.b) * c000.g + (d.b-d.g) * c001.g + (d.g-d.r) * c011.g + (d.r) * c111.g;
+ c.b = (1-d.b) * c000.b + (d.b-d.g) * c001.b + (d.g-d.r) * c011.b + (d.r) * c111.b;
+ } else if (d.b > d.r) {
+ const struct rgbvec c010 = lut3d->lut[prev[0]][next[1]][prev[2]];
+ const struct rgbvec c011 = lut3d->lut[prev[0]][next[1]][next[2]];
+ c.r = (1-d.g) * c000.r + (d.g-d.b) * c010.r + (d.b-d.r) * c011.r + (d.r) * c111.r;
+ c.g = (1-d.g) * c000.g + (d.g-d.b) * c010.g + (d.b-d.r) * c011.g + (d.r) * c111.g;
+ c.b = (1-d.g) * c000.b + (d.g-d.b) * c010.b + (d.b-d.r) * c011.b + (d.r) * c111.b;
+ } else {
+ const struct rgbvec c010 = lut3d->lut[prev[0]][next[1]][prev[2]];
+ const struct rgbvec c110 = lut3d->lut[next[0]][next[1]][prev[2]];
+ c.r = (1-d.g) * c000.r + (d.g-d.r) * c010.r + (d.r-d.b) * c110.r + (d.b) * c111.r;
+ c.g = (1-d.g) * c000.g + (d.g-d.r) * c010.g + (d.r-d.b) * c110.g + (d.b) * c111.g;
+ c.b = (1-d.g) * c000.b + (d.g-d.r) * c010.b + (d.r-d.b) * c110.b + (d.b) * c111.b;
+ }
+ }
+ return c;
+}
+
+#define DEFINE_INTERP_FUNC(name, nbits) \
+static int interp_##nbits##_##name(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \
+{ \
+ int x, y; \
+ const LUT3DContext *lut3d = ctx->priv; \
+ const ThreadData *td = arg; \
+ const AVFrame *in = td->in; \
+ const AVFrame *out = td->out; \
+ const int direct = out == in; \
+ const int step = lut3d->step; \
+ const uint8_t r = lut3d->rgba_map[R]; \
+ const uint8_t g = lut3d->rgba_map[G]; \
+ const uint8_t b = lut3d->rgba_map[B]; \
+ const uint8_t a = lut3d->rgba_map[A]; \
+ const int slice_start = (in->height * jobnr ) / nb_jobs; \
+ const int slice_end = (in->height * (jobnr+1)) / nb_jobs; \
+ uint8_t *dstrow = out->data[0] + slice_start * out->linesize[0]; \
+ const uint8_t *srcrow = in ->data[0] + slice_start * in ->linesize[0]; \
+ const float scale = (1. / ((1<<nbits) - 1)) * (lut3d->lutsize - 1); \
+ \
+ for (y = slice_start; y < slice_end; y++) { \
+ uint##nbits##_t *dst = (uint##nbits##_t *)dstrow; \
+ const uint##nbits##_t *src = (const uint##nbits##_t *)srcrow; \
+ for (x = 0; x < in->width * step; x += step) { \
+ const struct rgbvec scaled_rgb = {src[x + r] * scale, \
+ src[x + g] * scale, \
+ src[x + b] * scale}; \
+ struct rgbvec vec = interp_##name(lut3d, &scaled_rgb); \
+ dst[x + r] = av_clip_uint##nbits(vec.r * (float)((1<<nbits) - 1)); \
+ dst[x + g] = av_clip_uint##nbits(vec.g * (float)((1<<nbits) - 1)); \
+ dst[x + b] = av_clip_uint##nbits(vec.b * (float)((1<<nbits) - 1)); \
+ if (!direct && step == 4) \
+ dst[x + a] = src[x + a]; \
+ } \
+ dstrow += out->linesize[0]; \
+ srcrow += in ->linesize[0]; \
+ } \
+ return 0; \
+}
+
+DEFINE_INTERP_FUNC(nearest, 8)
+DEFINE_INTERP_FUNC(trilinear, 8)
+DEFINE_INTERP_FUNC(tetrahedral, 8)
+
+DEFINE_INTERP_FUNC(nearest, 16)
+DEFINE_INTERP_FUNC(trilinear, 16)
+DEFINE_INTERP_FUNC(tetrahedral, 16)
+
+#define MAX_LINE_SIZE 512
+
+static int skip_line(const char *p)
+{
+ while (*p && av_isspace(*p))
+ p++;
+ return !*p || *p == '#';
+}
+
+#define NEXT_LINE(loop_cond) do { \
+ if (!fgets(line, sizeof(line), f)) { \
+ av_log(ctx, AV_LOG_ERROR, "Unexpected EOF\n"); \
+ return AVERROR_INVALIDDATA; \
+ } \
+} while (loop_cond)
+
+/* Basically r g and b float values on each line, with a facultative 3DLUTSIZE
+ * directive; seems to be generated by Davinci */
+static int parse_dat(AVFilterContext *ctx, FILE *f)
+{
+ LUT3DContext *lut3d = ctx->priv;
+ char line[MAX_LINE_SIZE];
+ int i, j, k, size;
+
+ lut3d->lutsize = size = 33;
+
+ NEXT_LINE(skip_line(line));
+ if (!strncmp(line, "3DLUTSIZE ", 10)) {
+ size = strtol(line + 10, NULL, 0);
+ if (size < 2 || size > MAX_LEVEL) {
+ av_log(ctx, AV_LOG_ERROR, "Too large or invalid 3D LUT size\n");
+ return AVERROR(EINVAL);
+ }
+ lut3d->lutsize = size;
+ NEXT_LINE(skip_line(line));
+ }
+ for (k = 0; k < size; k++) {
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < size; i++) {
+ struct rgbvec *vec = &lut3d->lut[k][j][i];
+ if (k != 0 || j != 0 || i != 0)
+ NEXT_LINE(skip_line(line));
+ if (sscanf(line, "%f %f %f", &vec->r, &vec->g, &vec->b) != 3)
+ return AVERROR_INVALIDDATA;
+ }
+ }
+ }
+ return 0;
+}
+
+/* Iridas format */
+static int parse_cube(AVFilterContext *ctx, FILE *f)
+{
+ LUT3DContext *lut3d = ctx->priv;
+ char line[MAX_LINE_SIZE];
+ float min[3] = {0.0, 0.0, 0.0};
+ float max[3] = {1.0, 1.0, 1.0};
+
+ while (fgets(line, sizeof(line), f)) {
+ if (!strncmp(line, "LUT_3D_SIZE ", 12)) {
+ int i, j, k;
+ const int size = strtol(line + 12, NULL, 0);
+
+ if (size < 2 || size > MAX_LEVEL) {
+ av_log(ctx, AV_LOG_ERROR, "Too large or invalid 3D LUT size\n");
+ return AVERROR(EINVAL);
+ }
+ lut3d->lutsize = size;
+ for (k = 0; k < size; k++) {
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < size; i++) {
+ struct rgbvec *vec = &lut3d->lut[i][j][k];
+
+ do {
+try_again:
+ NEXT_LINE(0);
+ if (!strncmp(line, "DOMAIN_", 7)) {
+ float *vals = NULL;
+ if (!strncmp(line + 7, "MIN ", 4)) vals = min;
+ else if (!strncmp(line + 7, "MAX ", 4)) vals = max;
+ if (!vals)
+ return AVERROR_INVALIDDATA;
+ sscanf(line + 11, "%f %f %f", vals, vals + 1, vals + 2);
+ av_log(ctx, AV_LOG_DEBUG, "min: %f %f %f | max: %f %f %f\n",
+ min[0], min[1], min[2], max[0], max[1], max[2]);
+ goto try_again;
+ }
+ } while (skip_line(line));
+ if (sscanf(line, "%f %f %f", &vec->r, &vec->g, &vec->b) != 3)
+ return AVERROR_INVALIDDATA;
+ vec->r *= max[0] - min[0];
+ vec->g *= max[1] - min[1];
+ vec->b *= max[2] - min[2];
+ }
+ }
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+/* Assume 17x17x17 LUT with a 16-bit depth
+ * FIXME: it seems there are various 3dl formats */
+static int parse_3dl(AVFilterContext *ctx, FILE *f)
+{
+ char line[MAX_LINE_SIZE];
+ LUT3DContext *lut3d = ctx->priv;
+ int i, j, k;
+ const int size = 17;
+ const float scale = 16*16*16;
+
+ lut3d->lutsize = size;
+ NEXT_LINE(skip_line(line));
+ for (k = 0; k < size; k++) {
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < size; i++) {
+ int r, g, b;
+ struct rgbvec *vec = &lut3d->lut[k][j][i];
+
+ NEXT_LINE(skip_line(line));
+ if (sscanf(line, "%d %d %d", &r, &g, &b) != 3)
+ return AVERROR_INVALIDDATA;
+ vec->r = r / scale;
+ vec->g = g / scale;
+ vec->b = b / scale;
+ }
+ }
+ }
+ return 0;
+}
+
+/* Pandora format */
+static int parse_m3d(AVFilterContext *ctx, FILE *f)
+{
+ LUT3DContext *lut3d = ctx->priv;
+ float scale;
+ int i, j, k, size, in = -1, out = -1;
+ char line[MAX_LINE_SIZE];
+ uint8_t rgb_map[3] = {0, 1, 2};
+
+ while (fgets(line, sizeof(line), f)) {
+ if (!strncmp(line, "in", 2)) in = strtol(line + 2, NULL, 0);
+ else if (!strncmp(line, "out", 3)) out = strtol(line + 3, NULL, 0);
+ else if (!strncmp(line, "values", 6)) {
+ const char *p = line + 6;
+#define SET_COLOR(id) do { \
+ while (av_isspace(*p)) \
+ p++; \
+ switch (*p) { \
+ case 'r': rgb_map[id] = 0; break; \
+ case 'g': rgb_map[id] = 1; break; \
+ case 'b': rgb_map[id] = 2; break; \
+ } \
+ while (*p && !av_isspace(*p)) \
+ p++; \
+} while (0)
+ SET_COLOR(0);
+ SET_COLOR(1);
+ SET_COLOR(2);
+ break;
+ }
+ }
+
+ if (in == -1 || out == -1) {
+ av_log(ctx, AV_LOG_ERROR, "in and out must be defined\n");
+ return AVERROR_INVALIDDATA;
+ }
+ if (in < 2 || out < 2 ||
+ in > MAX_LEVEL*MAX_LEVEL*MAX_LEVEL ||
+ out > MAX_LEVEL*MAX_LEVEL*MAX_LEVEL) {
+ av_log(ctx, AV_LOG_ERROR, "invalid in (%d) or out (%d)\n", in, out);
+ return AVERROR_INVALIDDATA;
+ }
+ for (size = 1; size*size*size < in; size++);
+ lut3d->lutsize = size;
+ scale = 1. / (out - 1);
+
+ for (k = 0; k < size; k++) {
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < size; i++) {
+ struct rgbvec *vec = &lut3d->lut[k][j][i];
+ float val[3];
+
+ NEXT_LINE(0);
+ if (sscanf(line, "%f %f %f", val, val + 1, val + 2) != 3)
+ return AVERROR_INVALIDDATA;
+ vec->r = val[rgb_map[0]] * scale;
+ vec->g = val[rgb_map[1]] * scale;
+ vec->b = val[rgb_map[2]] * scale;
+ }
+ }
+ }
+ return 0;
+}
+
+static void set_identity_matrix(LUT3DContext *lut3d, int size)
+{
+ int i, j, k;
+ const float c = 1. / (size - 1);
+
+ lut3d->lutsize = size;
+ for (k = 0; k < size; k++) {
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < size; i++) {
+ struct rgbvec *vec = &lut3d->lut[k][j][i];
+ vec->r = k * c;
+ vec->g = j * c;
+ vec->b = i * c;
+ }
+ }
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_RGB48, AV_PIX_FMT_BGR48,
+ AV_PIX_FMT_RGBA64, AV_PIX_FMT_BGRA64,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ int is16bit = 0;
+ LUT3DContext *lut3d = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ switch (inlink->format) {
+ case AV_PIX_FMT_RGB48:
+ case AV_PIX_FMT_BGR48:
+ case AV_PIX_FMT_RGBA64:
+ case AV_PIX_FMT_BGRA64:
+ is16bit = 1;
+ }
+
+ ff_fill_rgba_map(lut3d->rgba_map, inlink->format);
+ lut3d->step = av_get_padded_bits_per_pixel(desc) >> (3 + is16bit);
+
+#define SET_FUNC(name) do { \
+ if (is16bit) lut3d->interp = interp_16_##name; \
+ else lut3d->interp = interp_8_##name; \
+} while (0)
+
+ switch (lut3d->interpolation) {
+ case INTERPOLATE_NEAREST: SET_FUNC(nearest); break;
+ case INTERPOLATE_TRILINEAR: SET_FUNC(trilinear); break;
+ case INTERPOLATE_TETRAHEDRAL: SET_FUNC(tetrahedral); break;
+ default:
+ av_assert0(0);
+ }
+
+ return 0;
+}
+
+static AVFrame *apply_lut(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ LUT3DContext *lut3d = ctx->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *out;
+ ThreadData td;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return NULL;
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ td.in = in;
+ td.out = out;
+ ctx->internal->execute(ctx, lut3d->interp, &td, NULL, FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
+
+ if (out != in)
+ av_frame_free(&in);
+
+ return out;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *out = apply_lut(inlink, in);
+ if (!out)
+ return AVERROR(ENOMEM);
+ return ff_filter_frame(outlink, out);
+}
+
+#if CONFIG_LUT3D_FILTER
+static const AVOption lut3d_options[] = {
+ { "file", "set 3D LUT file name", OFFSET(file), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ COMMON_OPTIONS
+};
+
+AVFILTER_DEFINE_CLASS(lut3d);
+
+static av_cold int lut3d_init(AVFilterContext *ctx)
+{
+ int ret;
+ FILE *f;
+ const char *ext;
+ LUT3DContext *lut3d = ctx->priv;
+
+ if (!lut3d->file) {
+ set_identity_matrix(lut3d, 32);
+ return 0;
+ }
+
+ f = fopen(lut3d->file, "r");
+ if (!f) {
+ ret = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "%s: %s\n", lut3d->file, av_err2str(ret));
+ return ret;
+ }
+
+ ext = strrchr(lut3d->file, '.');
+ if (!ext) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to guess the format from the extension\n");
+ ret = AVERROR_INVALIDDATA;
+ goto end;
+ }
+ ext++;
+
+ if (!av_strcasecmp(ext, "dat")) {
+ ret = parse_dat(ctx, f);
+ } else if (!av_strcasecmp(ext, "3dl")) {
+ ret = parse_3dl(ctx, f);
+ } else if (!av_strcasecmp(ext, "cube")) {
+ ret = parse_cube(ctx, f);
+ } else if (!av_strcasecmp(ext, "m3d")) {
+ ret = parse_m3d(ctx, f);
+ } else {
+ av_log(ctx, AV_LOG_ERROR, "Unrecognized '.%s' file type\n", ext);
+ ret = AVERROR(EINVAL);
+ }
+
+ if (!ret && !lut3d->lutsize) {
+ av_log(ctx, AV_LOG_ERROR, "3D LUT is empty\n");
+ ret = AVERROR_INVALIDDATA;
+ }
+
+end:
+ fclose(f);
+ return ret;
+}
+
+static const AVFilterPad lut3d_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad lut3d_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_lut3d = {
+ .name = "lut3d",
+ .description = NULL_IF_CONFIG_SMALL("Adjust colors using a 3D LUT."),
+ .priv_size = sizeof(LUT3DContext),
+ .init = lut3d_init,
+ .query_formats = query_formats,
+ .inputs = lut3d_inputs,
+ .outputs = lut3d_outputs,
+ .priv_class = &lut3d_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
+#endif
+
+#if CONFIG_HALDCLUT_FILTER
+
+static void update_clut(LUT3DContext *lut3d, const AVFrame *frame)
+{
+ const uint8_t *data = frame->data[0];
+ const int linesize = frame->linesize[0];
+ const int w = lut3d->clut_width;
+ const int step = lut3d->clut_step;
+ const uint8_t *rgba_map = lut3d->clut_rgba_map;
+ const int level = lut3d->lutsize;
+
+#define LOAD_CLUT(nbits) do { \
+ int i, j, k, x = 0, y = 0; \
+ \
+ for (k = 0; k < level; k++) { \
+ for (j = 0; j < level; j++) { \
+ for (i = 0; i < level; i++) { \
+ const uint##nbits##_t *src = (const uint##nbits##_t *) \
+ (data + y*linesize + x*step); \
+ struct rgbvec *vec = &lut3d->lut[i][j][k]; \
+ vec->r = src[rgba_map[0]] / (float)((1<<(nbits)) - 1); \
+ vec->g = src[rgba_map[1]] / (float)((1<<(nbits)) - 1); \
+ vec->b = src[rgba_map[2]] / (float)((1<<(nbits)) - 1); \
+ if (++x == w) { \
+ x = 0; \
+ y++; \
+ } \
+ } \
+ } \
+ } \
+} while (0)
+
+ if (!lut3d->clut_is16bit) LOAD_CLUT(8);
+ else LOAD_CLUT(16);
+}
+
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ LUT3DContext *lut3d = ctx->priv;
+ int ret;
+
+ outlink->w = ctx->inputs[0]->w;
+ outlink->h = ctx->inputs[0]->h;
+ outlink->time_base = ctx->inputs[0]->time_base;
+ if ((ret = ff_dualinput_init(ctx, &lut3d->dinput)) < 0)
+ return ret;
+ return 0;
+}
+
+static int filter_frame_hald(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ LUT3DContext *s = inlink->dst->priv;
+ return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ LUT3DContext *s = outlink->src->priv;
+ return ff_dualinput_request_frame(&s->dinput, outlink);
+}
+
+static int config_clut(AVFilterLink *inlink)
+{
+ int size, level, w, h;
+ AVFilterContext *ctx = inlink->dst;
+ LUT3DContext *lut3d = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ av_assert0(desc);
+
+ lut3d->clut_is16bit = 0;
+ switch (inlink->format) {
+ case AV_PIX_FMT_RGB48:
+ case AV_PIX_FMT_BGR48:
+ case AV_PIX_FMT_RGBA64:
+ case AV_PIX_FMT_BGRA64:
+ lut3d->clut_is16bit = 1;
+ }
+
+ lut3d->clut_step = av_get_padded_bits_per_pixel(desc) >> 3;
+ ff_fill_rgba_map(lut3d->clut_rgba_map, inlink->format);
+
+ if (inlink->w > inlink->h)
+ av_log(ctx, AV_LOG_INFO, "Padding on the right (%dpx) of the "
+ "Hald CLUT will be ignored\n", inlink->w - inlink->h);
+ else if (inlink->w < inlink->h)
+ av_log(ctx, AV_LOG_INFO, "Padding at the bottom (%dpx) of the "
+ "Hald CLUT will be ignored\n", inlink->h - inlink->w);
+ lut3d->clut_width = w = h = FFMIN(inlink->w, inlink->h);
+
+ for (level = 1; level*level*level < w; level++);
+ size = level*level*level;
+ if (size != w) {
+ av_log(ctx, AV_LOG_WARNING, "The Hald CLUT width does not match the level\n");
+ return AVERROR_INVALIDDATA;
+ }
+ av_assert0(w == h && w == size);
+ level *= level;
+ if (level > MAX_LEVEL) {
+ const int max_clut_level = sqrt(MAX_LEVEL);
+ const int max_clut_size = max_clut_level*max_clut_level*max_clut_level;
+ av_log(ctx, AV_LOG_ERROR, "Too large Hald CLUT "
+ "(maximum level is %d, or %dx%d CLUT)\n",
+ max_clut_level, max_clut_size, max_clut_size);
+ return AVERROR(EINVAL);
+ }
+ lut3d->lutsize = level;
+
+ return 0;
+}
+
+static AVFrame *update_apply_clut(AVFilterContext *ctx, AVFrame *main,
+ const AVFrame *second)
+{
+ AVFilterLink *inlink = ctx->inputs[0];
+ update_clut(ctx->priv, second);
+ return apply_lut(inlink, main);
+}
+
+static av_cold int haldclut_init(AVFilterContext *ctx)
+{
+ LUT3DContext *lut3d = ctx->priv;
+ lut3d->dinput.process = update_apply_clut;
+ return 0;
+}
+
+static av_cold void haldclut_uninit(AVFilterContext *ctx)
+{
+ LUT3DContext *lut3d = ctx->priv;
+ ff_dualinput_uninit(&lut3d->dinput);
+}
+
+static const AVOption haldclut_options[] = {
+ { "shortest", "force termination when the shortest input terminates", OFFSET(dinput.shortest), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
+ { "repeatlast", "continue applying the last clut after eos", OFFSET(dinput.repeatlast), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
+ COMMON_OPTIONS
+};
+
+AVFILTER_DEFINE_CLASS(haldclut);
+
+static const AVFilterPad haldclut_inputs[] = {
+ {
+ .name = "main",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame_hald,
+ .config_props = config_input,
+ },{
+ .name = "clut",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame_hald,
+ .config_props = config_clut,
+ },
+ { NULL }
+};
+
+static const AVFilterPad haldclut_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_haldclut = {
+ .name = "haldclut",
+ .description = NULL_IF_CONFIG_SMALL("Adjust colors using a Hald CLUT."),
+ .priv_size = sizeof(LUT3DContext),
+ .init = haldclut_init,
+ .uninit = haldclut_uninit,
+ .query_formats = query_formats,
+ .inputs = haldclut_inputs,
+ .outputs = haldclut_outputs,
+ .priv_class = &haldclut_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
+};
+#endif
diff --git a/libavfilter/vf_maskedclamp.c b/libavfilter/vf_maskedclamp.c
new file mode 100644
index 0000000000..3e720b1972
--- /dev/null
+++ b/libavfilter/vf_maskedclamp.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2016 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "framesync.h"
+
+#define OFFSET(x) offsetof(MaskedClampContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+typedef struct MaskedClampContext {
+ const AVClass *class;
+
+ int planes;
+ int undershoot;
+ int overshoot;
+
+ int width[4], height[4];
+ int nb_planes;
+ int depth;
+ FFFrameSync fs;
+
+ void (*maskedclamp)(const uint8_t *bsrc, const uint8_t *osrc,
+ const uint8_t *msrc, uint8_t *dst,
+ ptrdiff_t blinesize, ptrdiff_t darklinesize,
+ ptrdiff_t brightlinesize, ptrdiff_t destlinesize,
+ int w, int h, int undershoot, int overshoot);
+} MaskedClampContext;
+
+static const AVOption maskedclamp_options[] = {
+ { "undershoot", "set undershoot", OFFSET(undershoot), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
+ { "overshoot", "set overshoot", OFFSET(overshoot), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
+ { "planes", "set planes", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=0xF}, 0, 0xF, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(maskedclamp);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
+ AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
+ AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_NONE
+ };
+
+ return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+}
+
+static int process_frame(FFFrameSync *fs)
+{
+ AVFilterContext *ctx = fs->parent;
+ MaskedClampContext *s = fs->opaque;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out, *base, *dark, *bright;
+ int ret;
+
+ if ((ret = ff_framesync_get_frame(&s->fs, 0, &base, 0)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 1, &dark, 0)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 2, &bright, 0)) < 0)
+ return ret;
+
+ if (ctx->is_disabled) {
+ out = av_frame_clone(base);
+ if (!out)
+ return AVERROR(ENOMEM);
+ } else {
+ int p;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, base);
+
+ for (p = 0; p < s->nb_planes; p++) {
+ if (!((1 << p) & s->planes)) {
+ av_image_copy_plane(out->data[p], out->linesize[p], base->data[p], base->linesize[p],
+ s->width[p], s->height[p]);
+ continue;
+ }
+
+ s->maskedclamp(base->data[p], dark->data[p],
+ bright->data[p], out->data[p],
+ base->linesize[p], dark->linesize[p],
+ bright->linesize[p], out->linesize[p],
+ s->width[p], s->height[p],
+ s->undershoot, s->overshoot);
+ }
+ }
+ out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static void maskedclamp8(const uint8_t *bsrc, const uint8_t *darksrc,
+ const uint8_t *brightsrc, uint8_t *dst,
+ ptrdiff_t blinesize, ptrdiff_t darklinesize,
+ ptrdiff_t brightlinesize, ptrdiff_t dlinesize,
+ int w, int h,
+ int undershoot, int overshoot)
+{
+ int x, y;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ if (bsrc[x] < darksrc[x] - undershoot)
+ dst[x] = darksrc[x] - undershoot;
+ else if (bsrc[x] > brightsrc[x] + overshoot)
+ dst[x] = brightsrc[x] + overshoot;
+ else
+ dst[x] = bsrc[x];
+ }
+
+ dst += dlinesize;
+ bsrc += blinesize;
+ darksrc += darklinesize;
+ brightsrc += brightlinesize;
+ }
+}
+
+static void maskedclamp16(const uint8_t *bbsrc, const uint8_t *oosrc,
+ const uint8_t *mmsrc, uint8_t *ddst,
+ ptrdiff_t blinesize, ptrdiff_t darklinesize,
+ ptrdiff_t brightlinesize, ptrdiff_t dlinesize,
+ int w, int h,
+ int undershoot, int overshoot)
+{
+ const uint16_t *bsrc = (const uint16_t *)bbsrc;
+ const uint16_t *darksrc = (const uint16_t *)oosrc;
+ const uint16_t *brightsrc = (const uint16_t *)mmsrc;
+ uint16_t *dst = (uint16_t *)ddst;
+ int x, y;
+
+ dlinesize /= 2;
+ blinesize /= 2;
+ darklinesize /= 2;
+ brightlinesize /= 2;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ if (bsrc[x] < darksrc[x] - undershoot)
+ dst[x] = darksrc[x] - undershoot;
+ else if (bsrc[x] > brightsrc[x] + overshoot)
+ dst[x] = brightsrc[x] + overshoot;
+ else
+ dst[x] = bsrc[x];
+ }
+
+ dst += dlinesize;
+ bsrc += blinesize;
+ darksrc += darklinesize;
+ brightsrc += brightlinesize;
+ }
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ MaskedClampContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int vsub, hsub;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ hsub = desc->log2_chroma_w;
+ vsub = desc->log2_chroma_h;
+ s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
+ s->height[0] = s->height[3] = inlink->h;
+ s->width[1] = s->width[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
+ s->width[0] = s->width[3] = inlink->w;
+
+ s->depth = desc->comp[0].depth;
+
+ if (desc->comp[0].depth == 8)
+ s->maskedclamp = maskedclamp8;
+ else
+ s->maskedclamp = maskedclamp16;
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ MaskedClampContext *s = ctx->priv;
+ AVFilterLink *base = ctx->inputs[0];
+ AVFilterLink *dark = ctx->inputs[1];
+ AVFilterLink *bright = ctx->inputs[2];
+ FFFrameSyncIn *in;
+ int ret;
+
+ if (base->format != dark->format ||
+ base->format != bright->format) {
+ av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
+ return AVERROR(EINVAL);
+ }
+ if (base->w != dark->w ||
+ base->h != dark->h ||
+ base->sample_aspect_ratio.num != dark->sample_aspect_ratio.num ||
+ base->sample_aspect_ratio.den != dark->sample_aspect_ratio.den ||
+ base->w != bright->w ||
+ base->h != bright->h ||
+ base->sample_aspect_ratio.num != bright->sample_aspect_ratio.num ||
+ base->sample_aspect_ratio.den != bright->sample_aspect_ratio.den) {
+ av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
+ "(size %dx%d, SAR %d:%d) do not match the corresponding "
+ "second input link %s parameters (%dx%d, SAR %d:%d) "
+ "and/or third input link %s parameters (%dx%d, SAR %d:%d)\n",
+ ctx->input_pads[0].name, base->w, base->h,
+ base->sample_aspect_ratio.num,
+ base->sample_aspect_ratio.den,
+ ctx->input_pads[1].name, dark->w, dark->h,
+ dark->sample_aspect_ratio.num,
+ dark->sample_aspect_ratio.den,
+ ctx->input_pads[2].name, bright->w, bright->h,
+ bright->sample_aspect_ratio.num,
+ bright->sample_aspect_ratio.den);
+ return AVERROR(EINVAL);
+ }
+
+ outlink->w = base->w;
+ outlink->h = base->h;
+ outlink->time_base = base->time_base;
+ outlink->sample_aspect_ratio = base->sample_aspect_ratio;
+ outlink->frame_rate = base->frame_rate;
+
+ if ((ret = ff_framesync_init(&s->fs, ctx, 3)) < 0)
+ return ret;
+
+ in = s->fs.in;
+ in[0].time_base = base->time_base;
+ in[1].time_base = dark->time_base;
+ in[2].time_base = bright->time_base;
+ in[0].sync = 1;
+ in[0].before = EXT_STOP;
+ in[0].after = EXT_INFINITY;
+ in[1].sync = 1;
+ in[1].before = EXT_STOP;
+ in[1].after = EXT_INFINITY;
+ in[2].sync = 1;
+ in[2].before = EXT_STOP;
+ in[2].after = EXT_INFINITY;
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+
+ return ff_framesync_configure(&s->fs);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ MaskedClampContext *s = inlink->dst->priv;
+ return ff_framesync_filter_frame(&s->fs, inlink, buf);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ MaskedClampContext *s = outlink->src->priv;
+ return ff_framesync_request_frame(&s->fs, outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ MaskedClampContext *s = ctx->priv;
+
+ ff_framesync_uninit(&s->fs);
+}
+
+static const AVFilterPad maskedclamp_inputs[] = {
+ {
+ .name = "base",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ {
+ .name = "dark",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ {
+ .name = "bright",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad maskedclamp_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_maskedclamp = {
+ .name = "maskedclamp",
+ .description = NULL_IF_CONFIG_SMALL("Clamp first stream with second stream and third stream."),
+ .priv_size = sizeof(MaskedClampContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = maskedclamp_inputs,
+ .outputs = maskedclamp_outputs,
+ .priv_class = &maskedclamp_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_maskedmerge.c b/libavfilter/vf_maskedmerge.c
new file mode 100644
index 0000000000..2c42d62d8a
--- /dev/null
+++ b/libavfilter/vf_maskedmerge.c
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "maskedmerge.h"
+
+#define OFFSET(x) offsetof(MaskedMergeContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption maskedmerge_options[] = {
+ { "planes", "set planes", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=0xF}, 0, 0xF, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(maskedmerge);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
+ AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
+ AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_NONE
+ };
+
+ return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+}
+
+static int process_frame(FFFrameSync *fs)
+{
+ AVFilterContext *ctx = fs->parent;
+ MaskedMergeContext *s = fs->opaque;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out, *base, *overlay, *mask;
+ int ret;
+
+ if ((ret = ff_framesync_get_frame(&s->fs, 0, &base, 0)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 1, &overlay, 0)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 2, &mask, 0)) < 0)
+ return ret;
+
+ if (ctx->is_disabled) {
+ out = av_frame_clone(base);
+ if (!out)
+ return AVERROR(ENOMEM);
+ } else {
+ int p;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, base);
+
+ for (p = 0; p < s->nb_planes; p++) {
+ if (!((1 << p) & s->planes)) {
+ av_image_copy_plane(out->data[p], out->linesize[p], base->data[p], base->linesize[p],
+ s->width[p], s->height[p]);
+ continue;
+ }
+
+ s->maskedmerge(base->data[p], overlay->data[p],
+ mask->data[p], out->data[p],
+ base->linesize[p], overlay->linesize[p],
+ mask->linesize[p], out->linesize[p],
+ s->width[p], s->height[p],
+ s->half, s->depth);
+ }
+ }
+ out->pts = av_rescale_q(base->pts, s->fs.time_base, outlink->time_base);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static void maskedmerge8(const uint8_t *bsrc, const uint8_t *osrc,
+ const uint8_t *msrc, uint8_t *dst,
+ ptrdiff_t blinesize, ptrdiff_t olinesize,
+ ptrdiff_t mlinesize, ptrdiff_t dlinesize,
+ int w, int h,
+ int half, int shift)
+{
+ int x, y;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ dst[x] = bsrc[x] + ((msrc[x] * (osrc[x] - bsrc[x]) + 128) >> 8);
+ }
+
+ dst += dlinesize;
+ bsrc += blinesize;
+ osrc += olinesize;
+ msrc += mlinesize;
+ }
+}
+
+static void maskedmerge16(const uint8_t *bbsrc, const uint8_t *oosrc,
+ const uint8_t *mmsrc, uint8_t *ddst,
+ ptrdiff_t blinesize, ptrdiff_t olinesize,
+ ptrdiff_t mlinesize, ptrdiff_t dlinesize,
+ int w, int h,
+ int half, int shift)
+{
+ const uint16_t *bsrc = (const uint16_t *)bbsrc;
+ const uint16_t *osrc = (const uint16_t *)oosrc;
+ const uint16_t *msrc = (const uint16_t *)mmsrc;
+ uint16_t *dst = (uint16_t *)ddst;
+ int x, y;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ dst[x] = bsrc[x] + ((msrc[x] * (osrc[x] - bsrc[x]) + half) >> shift);
+ }
+
+ dst += dlinesize / 2;
+ bsrc += blinesize / 2;
+ osrc += olinesize / 2;
+ msrc += mlinesize / 2;
+ }
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ MaskedMergeContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int vsub, hsub;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ hsub = desc->log2_chroma_w;
+ vsub = desc->log2_chroma_h;
+ s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
+ s->height[0] = s->height[3] = inlink->h;
+ s->width[1] = s->width[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
+ s->width[0] = s->width[3] = inlink->w;
+
+ s->depth = desc->comp[0].depth;
+ s->half = (1 << s->depth) / 2;
+
+ if (desc->comp[0].depth == 8)
+ s->maskedmerge = maskedmerge8;
+ else
+ s->maskedmerge = maskedmerge16;
+
+ if (ARCH_X86)
+ ff_maskedmerge_init_x86(s);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ MaskedMergeContext *s = ctx->priv;
+ AVFilterLink *base = ctx->inputs[0];
+ AVFilterLink *overlay = ctx->inputs[1];
+ AVFilterLink *mask = ctx->inputs[2];
+ FFFrameSyncIn *in;
+ int ret;
+
+ if (base->format != overlay->format ||
+ base->format != mask->format) {
+ av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
+ return AVERROR(EINVAL);
+ }
+ if (base->w != overlay->w ||
+ base->h != overlay->h ||
+ base->sample_aspect_ratio.num != overlay->sample_aspect_ratio.num ||
+ base->sample_aspect_ratio.den != overlay->sample_aspect_ratio.den ||
+ base->w != mask->w ||
+ base->h != mask->h ||
+ base->sample_aspect_ratio.num != mask->sample_aspect_ratio.num ||
+ base->sample_aspect_ratio.den != mask->sample_aspect_ratio.den) {
+ av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
+ "(size %dx%d, SAR %d:%d) do not match the corresponding "
+ "second input link %s parameters (%dx%d, SAR %d:%d) "
+ "and/or third input link %s parameters (%dx%d, SAR %d:%d)\n",
+ ctx->input_pads[0].name, base->w, base->h,
+ base->sample_aspect_ratio.num,
+ base->sample_aspect_ratio.den,
+ ctx->input_pads[1].name, overlay->w, overlay->h,
+ overlay->sample_aspect_ratio.num,
+ overlay->sample_aspect_ratio.den,
+ ctx->input_pads[2].name, mask->w, mask->h,
+ mask->sample_aspect_ratio.num,
+ mask->sample_aspect_ratio.den);
+ return AVERROR(EINVAL);
+ }
+
+ outlink->w = base->w;
+ outlink->h = base->h;
+ outlink->time_base = base->time_base;
+ outlink->sample_aspect_ratio = base->sample_aspect_ratio;
+ outlink->frame_rate = base->frame_rate;
+
+ if ((ret = ff_framesync_init(&s->fs, ctx, 3)) < 0)
+ return ret;
+
+ in = s->fs.in;
+ in[0].time_base = base->time_base;
+ in[1].time_base = overlay->time_base;
+ in[2].time_base = mask->time_base;
+ in[0].sync = 1;
+ in[0].before = EXT_STOP;
+ in[0].after = EXT_INFINITY;
+ in[1].sync = 1;
+ in[1].before = EXT_STOP;
+ in[1].after = EXT_INFINITY;
+ in[2].sync = 1;
+ in[2].before = EXT_STOP;
+ in[2].after = EXT_INFINITY;
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+
+ return ff_framesync_configure(&s->fs);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ MaskedMergeContext *s = inlink->dst->priv;
+ return ff_framesync_filter_frame(&s->fs, inlink, buf);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ MaskedMergeContext *s = outlink->src->priv;
+ return ff_framesync_request_frame(&s->fs, outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ MaskedMergeContext *s = ctx->priv;
+
+ ff_framesync_uninit(&s->fs);
+}
+
+static const AVFilterPad maskedmerge_inputs[] = {
+ {
+ .name = "base",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ {
+ .name = "overlay",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ {
+ .name = "mask",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad maskedmerge_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_maskedmerge = {
+ .name = "maskedmerge",
+ .description = NULL_IF_CONFIG_SMALL("Merge first stream with second stream using third stream as mask."),
+ .priv_size = sizeof(MaskedMergeContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = maskedmerge_inputs,
+ .outputs = maskedmerge_outputs,
+ .priv_class = &maskedmerge_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_mcdeint.c b/libavfilter/vf_mcdeint.c
new file mode 100644
index 0000000000..050a8341d9
--- /dev/null
+++ b/libavfilter/vf_mcdeint.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Motion Compensation Deinterlacer
+ * Ported from MPlayer libmpcodecs/vf_mcdeint.c.
+ *
+ * Known Issues:
+ *
+ * The motion estimation is somewhat at the mercy of the input, if the
+ * input frames are created purely based on spatial interpolation then
+ * for example a thin black line or another random and not
+ * interpolateable pattern will cause problems.
+ * Note: completely ignoring the "unavailable" lines during motion
+ * estimation did not look any better, so the most obvious solution
+ * would be to improve tfields or penalize problematic motion vectors.
+ *
+ * If non iterative ME is used then snow currently ignores the OBMC
+ * window and as a result sometimes creates artifacts.
+ *
+ * Only past frames are used, we should ideally use future frames too,
+ * something like filtering the whole movie in forward and then
+ * backward direction seems like an interesting idea but the current
+ * filter framework is FAR from supporting such things.
+ *
+ * Combining the motion compensated image with the input image also is
+ * not as trivial as it seems, simple blindly taking even lines from
+ * one and odd ones from the other does not work at all as ME/MC
+ * sometimes has nothing in the previous frames which matches the
+ * current. The current algorithm has been found by trial and error
+ * and almost certainly can be improved...
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavcodec/avcodec.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+enum MCDeintMode {
+ MODE_FAST = 0,
+ MODE_MEDIUM,
+ MODE_SLOW,
+ MODE_EXTRA_SLOW,
+ MODE_NB,
+};
+
+enum MCDeintParity {
+ PARITY_TFF = 0, ///< top field first
+ PARITY_BFF = 1, ///< bottom field first
+};
+
+typedef struct {
+ const AVClass *class;
+ int mode; ///< MCDeintMode
+ int parity; ///< MCDeintParity
+ int qp;
+ AVCodecContext *enc_ctx;
+} MCDeintContext;
+
+#define OFFSET(x) offsetof(MCDeintContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
+
+static const AVOption mcdeint_options[] = {
+ { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_FAST}, 0, MODE_NB-1, FLAGS, .unit="mode" },
+ CONST("fast", NULL, MODE_FAST, "mode"),
+ CONST("medium", NULL, MODE_MEDIUM, "mode"),
+ CONST("slow", NULL, MODE_SLOW, "mode"),
+ CONST("extra_slow", NULL, MODE_EXTRA_SLOW, "mode"),
+
+ { "parity", "set the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=PARITY_BFF}, -1, 1, FLAGS, "parity" },
+ CONST("tff", "assume top field first", PARITY_TFF, "parity"),
+ CONST("bff", "assume bottom field first", PARITY_BFF, "parity"),
+
+ { "qp", "set qp", OFFSET(qp), AV_OPT_TYPE_INT, {.i64=1}, INT_MIN, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(mcdeint);
+
+static int config_props(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ MCDeintContext *mcdeint = ctx->priv;
+ AVCodec *enc;
+ AVCodecContext *enc_ctx;
+ AVDictionary *opts = NULL;
+ int ret;
+
+ if (!(enc = avcodec_find_encoder(AV_CODEC_ID_SNOW))) {
+ av_log(ctx, AV_LOG_ERROR, "Snow encoder is not enabled in libavcodec\n");
+ return AVERROR(EINVAL);
+ }
+
+ mcdeint->enc_ctx = avcodec_alloc_context3(enc);
+ if (!mcdeint->enc_ctx)
+ return AVERROR(ENOMEM);
+ enc_ctx = mcdeint->enc_ctx;
+ enc_ctx->width = inlink->w;
+ enc_ctx->height = inlink->h;
+ enc_ctx->time_base = (AVRational){1,25}; // meaningless
+ enc_ctx->gop_size = INT_MAX;
+ enc_ctx->max_b_frames = 0;
+ enc_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
+ enc_ctx->flags = AV_CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
+ enc_ctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
+ enc_ctx->global_quality = 1;
+ enc_ctx->me_cmp = enc_ctx->me_sub_cmp = FF_CMP_SAD;
+ enc_ctx->mb_cmp = FF_CMP_SSE;
+ av_dict_set(&opts, "memc_only", "1", 0);
+ av_dict_set(&opts, "no_bitstream", "1", 0);
+
+ switch (mcdeint->mode) {
+ case MODE_EXTRA_SLOW:
+ enc_ctx->refs = 3;
+ case MODE_SLOW:
+ enc_ctx->me_method = ME_ITER;
+ case MODE_MEDIUM:
+ enc_ctx->flags |= AV_CODEC_FLAG_4MV;
+ enc_ctx->dia_size = 2;
+ case MODE_FAST:
+ enc_ctx->flags |= AV_CODEC_FLAG_QPEL;
+ }
+
+ ret = avcodec_open2(enc_ctx, enc, &opts);
+ av_dict_free(&opts);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ MCDeintContext *mcdeint = ctx->priv;
+
+ if (mcdeint->enc_ctx) {
+ avcodec_close(mcdeint->enc_ctx);
+ av_freep(&mcdeint->enc_ctx);
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
+{
+ MCDeintContext *mcdeint = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outpic, *frame_dec;
+ AVPacket pkt = {0};
+ int x, y, i, ret, got_frame = 0;
+
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpic) {
+ av_frame_free(&inpic);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpic, inpic);
+ inpic->quality = mcdeint->qp * FF_QP2LAMBDA;
+
+ av_init_packet(&pkt);
+
+ ret = avcodec_encode_video2(mcdeint->enc_ctx, &pkt, inpic, &got_frame);
+ if (ret < 0)
+ goto end;
+
+ frame_dec = mcdeint->enc_ctx->coded_frame;
+
+ for (i = 0; i < 3; i++) {
+ int is_chroma = !!i;
+ int w = AV_CEIL_RSHIFT(inlink->w, is_chroma);
+ int h = AV_CEIL_RSHIFT(inlink->h, is_chroma);
+ int fils = frame_dec->linesize[i];
+ int srcs = inpic ->linesize[i];
+ int dsts = outpic ->linesize[i];
+
+ for (y = 0; y < h; y++) {
+ if ((y ^ mcdeint->parity) & 1) {
+ for (x = 0; x < w; x++) {
+ uint8_t *filp = &frame_dec->data[i][x + y*fils];
+ uint8_t *srcp = &inpic ->data[i][x + y*srcs];
+ uint8_t *dstp = &outpic ->data[i][x + y*dsts];
+
+ if (y > 0 && y < h-1){
+ int is_edge = x < 3 || x > w-4;
+ int diff0 = filp[-fils] - srcp[-srcs];
+ int diff1 = filp[+fils] - srcp[+srcs];
+ int temp = filp[0];
+
+#define DELTA(j) av_clip(j, -x, w-1-x)
+
+#define GET_SCORE_EDGE(j)\
+ FFABS(srcp[-srcs+DELTA(-1+(j))] - srcp[+srcs+DELTA(-1-(j))])+\
+ FFABS(srcp[-srcs+DELTA(j) ] - srcp[+srcs+DELTA( -(j))])+\
+ FFABS(srcp[-srcs+DELTA(1+(j)) ] - srcp[+srcs+DELTA( 1-(j))])
+
+#define GET_SCORE(j)\
+ FFABS(srcp[-srcs-1+(j)] - srcp[+srcs-1-(j)])+\
+ FFABS(srcp[-srcs +(j)] - srcp[+srcs -(j)])+\
+ FFABS(srcp[-srcs+1+(j)] - srcp[+srcs+1-(j)])
+
+#define CHECK_EDGE(j)\
+ { int score = GET_SCORE_EDGE(j);\
+ if (score < spatial_score){\
+ spatial_score = score;\
+ diff0 = filp[-fils+DELTA(j)] - srcp[-srcs+DELTA(j)];\
+ diff1 = filp[+fils+DELTA(-(j))] - srcp[+srcs+DELTA(-(j))];\
+
+#define CHECK(j)\
+ { int score = GET_SCORE(j);\
+ if (score < spatial_score){\
+ spatial_score= score;\
+ diff0 = filp[-fils+(j)] - srcp[-srcs+(j)];\
+ diff1 = filp[+fils-(j)] - srcp[+srcs-(j)];\
+
+ if (is_edge) {
+ int spatial_score = GET_SCORE_EDGE(0) - 1;
+ CHECK_EDGE(-1) CHECK_EDGE(-2) }} }}
+ CHECK_EDGE( 1) CHECK_EDGE( 2) }} }}
+ } else {
+ int spatial_score = GET_SCORE(0) - 1;
+ CHECK(-1) CHECK(-2) }} }}
+ CHECK( 1) CHECK( 2) }} }}
+ }
+
+
+ if (diff0 + diff1 > 0)
+ temp -= (diff0 + diff1 - FFABS(FFABS(diff0) - FFABS(diff1)) / 2) / 2;
+ else
+ temp -= (diff0 + diff1 + FFABS(FFABS(diff0) - FFABS(diff1)) / 2) / 2;
+ *filp = *dstp = temp > 255U ? ~(temp>>31) : temp;
+ } else {
+ *dstp = *filp;
+ }
+ }
+ }
+ }
+
+ for (y = 0; y < h; y++) {
+ if (!((y ^ mcdeint->parity) & 1)) {
+ for (x = 0; x < w; x++) {
+ frame_dec->data[i][x + y*fils] =
+ outpic ->data[i][x + y*dsts] = inpic->data[i][x + y*srcs];
+ }
+ }
+ }
+ }
+ mcdeint->parity ^= 1;
+
+end:
+ av_packet_unref(&pkt);
+ av_frame_free(&inpic);
+ if (ret < 0) {
+ av_frame_free(&outpic);
+ return ret;
+ }
+ return ff_filter_frame(outlink, outpic);
+}
+
+static const AVFilterPad mcdeint_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad mcdeint_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_mcdeint = {
+ .name = "mcdeint",
+ .description = NULL_IF_CONFIG_SMALL("Apply motion compensating deinterlacing."),
+ .priv_size = sizeof(MCDeintContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = mcdeint_inputs,
+ .outputs = mcdeint_outputs,
+ .priv_class = &mcdeint_class,
+};
diff --git a/libavfilter/vf_mergeplanes.c b/libavfilter/vf_mergeplanes.c
new file mode 100644
index 0000000000..c21104320d
--- /dev/null
+++ b/libavfilter/vf_mergeplanes.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "framesync.h"
+
+typedef struct InputParam {
+ int depth[4];
+ int nb_planes;
+ int planewidth[4];
+ int planeheight[4];
+} InputParam;
+
+typedef struct MergePlanesContext {
+ const AVClass *class;
+ int64_t mapping;
+ const enum AVPixelFormat out_fmt;
+ int nb_inputs;
+ int nb_planes;
+ int planewidth[4];
+ int planeheight[4];
+ int map[4][2];
+ const AVPixFmtDescriptor *outdesc;
+
+ FFFrameSync fs;
+} MergePlanesContext;
+
+#define OFFSET(x) offsetof(MergePlanesContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption mergeplanes_options[] = {
+ { "mapping", "set input to output plane mapping", OFFSET(mapping), AV_OPT_TYPE_INT, {.i64=0}, 0, 0x33333333, FLAGS },
+ { "format", "set output pixel format", OFFSET(out_fmt), AV_OPT_TYPE_PIXEL_FMT, {.i64=AV_PIX_FMT_YUVA444P}, 0, INT_MAX, .flags=FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(mergeplanes);
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ MergePlanesContext *s = inlink->dst->priv;
+ return ff_framesync_filter_frame(&s->fs, inlink, in);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ MergePlanesContext *s = ctx->priv;
+ int64_t m = s->mapping;
+ int i, ret;
+
+ s->outdesc = av_pix_fmt_desc_get(s->out_fmt);
+ if (!(s->outdesc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
+ s->outdesc->nb_components < 2) {
+ av_log(ctx, AV_LOG_ERROR, "Only planar formats with more than one component are supported.\n");
+ return AVERROR(EINVAL);
+ }
+ s->nb_planes = av_pix_fmt_count_planes(s->out_fmt);
+
+ for (i = s->nb_planes - 1; i >= 0; i--) {
+ s->map[i][0] = m & 0xf;
+ m >>= 4;
+ s->map[i][1] = m & 0xf;
+ m >>= 4;
+
+ if (s->map[i][0] > 3 || s->map[i][1] > 3) {
+ av_log(ctx, AV_LOG_ERROR, "Mapping with out of range input and/or plane number.\n");
+ return AVERROR(EINVAL);
+ }
+
+ s->nb_inputs = FFMAX(s->nb_inputs, s->map[i][1] + 1);
+ }
+
+ av_assert0(s->nb_inputs && s->nb_inputs <= 4);
+
+ for (i = 0; i < s->nb_inputs; i++) {
+ AVFilterPad pad = { 0 };
+
+ pad.type = AVMEDIA_TYPE_VIDEO;
+ pad.name = av_asprintf("in%d", i);
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ pad.filter_frame = filter_frame;
+
+ if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0){
+ av_freep(&pad.name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ MergePlanesContext *s = ctx->priv;
+ AVFilterFormats *formats = NULL;
+ int i, ret;
+
+ s->outdesc = av_pix_fmt_desc_get(s->out_fmt);
+ for (i = 0; av_pix_fmt_desc_get(i); i++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
+ if (desc->comp[0].depth == s->outdesc->comp[0].depth &&
+ (desc->comp[0].depth <= 8 || (desc->flags & AV_PIX_FMT_FLAG_BE) == (s->outdesc->flags & AV_PIX_FMT_FLAG_BE)) &&
+ av_pix_fmt_count_planes(i) == desc->nb_components &&
+ (ret = ff_add_format(&formats, i)) < 0)
+ return ret;
+ }
+
+ for (i = 0; i < s->nb_inputs; i++)
+ if ((ret = ff_formats_ref(formats, &ctx->inputs[i]->out_formats)) < 0)
+ return ret;
+
+ formats = NULL;
+ if ((ret = ff_add_format(&formats, s->out_fmt)) < 0 ||
+ (ret = ff_formats_ref(formats, &ctx->outputs[0]->in_formats)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int process_frame(FFFrameSync *fs)
+{
+ AVFilterContext *ctx = fs->parent;
+ AVFilterLink *outlink = ctx->outputs[0];
+ MergePlanesContext *s = fs->opaque;
+ AVFrame *in[4] = { NULL };
+ AVFrame *out;
+ int i, ret;
+
+ for (i = 0; i < s->nb_inputs; i++) {
+ if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0)
+ return ret;
+ }
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
+
+ for (i = 0; i < s->nb_planes; i++) {
+ const int input = s->map[i][1];
+ const int plane = s->map[i][0];
+
+ av_image_copy_plane(out->data[i], out->linesize[i],
+ in[input]->data[plane], in[input]->linesize[plane],
+ s->planewidth[i], s->planeheight[i]);
+ }
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ MergePlanesContext *s = ctx->priv;
+ InputParam inputsp[4];
+ FFFrameSyncIn *in;
+ int i, ret;
+
+ if ((ret = ff_framesync_init(&s->fs, ctx, s->nb_inputs)) < 0)
+ return ret;
+
+ in = s->fs.in;
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+
+ outlink->w = ctx->inputs[0]->w;
+ outlink->h = ctx->inputs[0]->h;
+ outlink->time_base = ctx->inputs[0]->time_base;
+ outlink->frame_rate = ctx->inputs[0]->frame_rate;
+ outlink->sample_aspect_ratio = ctx->inputs[0]->sample_aspect_ratio;
+
+ s->planewidth[1] =
+ s->planewidth[2] = AV_CEIL_RSHIFT(((s->outdesc->comp[1].depth > 8) + 1) * outlink->w, s->outdesc->log2_chroma_w);
+ s->planewidth[0] =
+ s->planewidth[3] = ((s->outdesc->comp[0].depth > 8) + 1) * outlink->w;
+ s->planeheight[1] =
+ s->planeheight[2] = AV_CEIL_RSHIFT(outlink->h, s->outdesc->log2_chroma_h);
+ s->planeheight[0] =
+ s->planeheight[3] = outlink->h;
+
+ for (i = 0; i < s->nb_inputs; i++) {
+ InputParam *inputp = &inputsp[i];
+ AVFilterLink *inlink = ctx->inputs[i];
+ const AVPixFmtDescriptor *indesc = av_pix_fmt_desc_get(inlink->format);
+ int j;
+
+ if (outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num ||
+ outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
+ av_log(ctx, AV_LOG_ERROR, "input #%d link %s SAR %d:%d "
+ "does not match output link %s SAR %d:%d\n",
+ i, ctx->input_pads[i].name,
+ inlink->sample_aspect_ratio.num,
+ inlink->sample_aspect_ratio.den,
+ ctx->output_pads[0].name,
+ outlink->sample_aspect_ratio.num,
+ outlink->sample_aspect_ratio.den);
+ return AVERROR(EINVAL);
+ }
+
+ inputp->planewidth[1] =
+ inputp->planewidth[2] = AV_CEIL_RSHIFT(((indesc->comp[1].depth > 8) + 1) * inlink->w, indesc->log2_chroma_w);
+ inputp->planewidth[0] =
+ inputp->planewidth[3] = ((indesc->comp[0].depth > 8) + 1) * inlink->w;
+ inputp->planeheight[1] =
+ inputp->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, indesc->log2_chroma_h);
+ inputp->planeheight[0] =
+ inputp->planeheight[3] = inlink->h;
+ inputp->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ for (j = 0; j < inputp->nb_planes; j++)
+ inputp->depth[j] = indesc->comp[j].depth;
+
+ in[i].time_base = inlink->time_base;
+ in[i].sync = 1;
+ in[i].before = EXT_STOP;
+ in[i].after = EXT_STOP;
+ }
+
+ for (i = 0; i < s->nb_planes; i++) {
+ const int input = s->map[i][1];
+ const int plane = s->map[i][0];
+ InputParam *inputp = &inputsp[input];
+
+ if (plane + 1 > inputp->nb_planes) {
+ av_log(ctx, AV_LOG_ERROR, "input %d does not have %d plane\n",
+ input, plane);
+ goto fail;
+ }
+ if (s->outdesc->comp[i].depth != inputp->depth[plane]) {
+ av_log(ctx, AV_LOG_ERROR, "output plane %d depth %d does not "
+ "match input %d plane %d depth %d\n",
+ i, s->outdesc->comp[i].depth,
+ input, plane, inputp->depth[plane]);
+ goto fail;
+ }
+ if (s->planewidth[i] != inputp->planewidth[plane]) {
+ av_log(ctx, AV_LOG_ERROR, "output plane %d width %d does not "
+ "match input %d plane %d width %d\n",
+ i, s->planewidth[i],
+ input, plane, inputp->planewidth[plane]);
+ goto fail;
+ }
+ if (s->planeheight[i] != inputp->planeheight[plane]) {
+ av_log(ctx, AV_LOG_ERROR, "output plane %d height %d does not "
+ "match input %d plane %d height %d\n",
+ i, s->planeheight[i],
+ input, plane, inputp->planeheight[plane]);
+ goto fail;
+ }
+ }
+
+ return ff_framesync_configure(&s->fs);
+fail:
+ return AVERROR(EINVAL);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ MergePlanesContext *s = outlink->src->priv;
+ return ff_framesync_request_frame(&s->fs, outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ MergePlanesContext *s = ctx->priv;
+ int i;
+
+ ff_framesync_uninit(&s->fs);
+
+ for (i = 0; i < ctx->nb_inputs; i++)
+ av_freep(&ctx->input_pads[i].name);
+}
+
+static const AVFilterPad mergeplanes_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_mergeplanes = {
+ .name = "mergeplanes",
+ .description = NULL_IF_CONFIG_SMALL("Merge planes."),
+ .priv_size = sizeof(MergePlanesContext),
+ .priv_class = &mergeplanes_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = NULL,
+ .outputs = mergeplanes_outputs,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
diff --git a/libavfilter/vf_mestimate.c b/libavfilter/vf_mestimate.c
new file mode 100644
index 0000000000..7ecfe7da60
--- /dev/null
+++ b/libavfilter/vf_mestimate.c
@@ -0,0 +1,377 @@
+/**
+ * Copyright (c) 2016 Davinder Singh (DSM_) <ds.mudhar<@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "motion_estimation.h"
+#include "libavcodec/mathops.h"
+#include "libavutil/avassert.h"
+#include "libavutil/common.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/motion_vector.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct MEContext {
+ const AVClass *class;
+ AVMotionEstContext me_ctx;
+ int method; ///< motion estimation method
+
+ int mb_size; ///< macroblock size
+ int search_param; ///< search parameter
+ int b_width, b_height, b_count;
+ int log2_mb_size;
+
+ AVFrame *prev, *cur, *next;
+
+ int (*mv_table[3])[2][2]; ///< motion vectors of current & prev 2 frames
+} MEContext;
+
+#define OFFSET(x) offsetof(MEContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, 0, 0, FLAGS, unit }
+
+static const AVOption mestimate_options[] = {
+ { "method", "motion estimation method", OFFSET(method), AV_OPT_TYPE_INT, {.i64 = AV_ME_METHOD_ESA}, AV_ME_METHOD_ESA, AV_ME_METHOD_UMH, FLAGS, "method" },
+ CONST("esa", "exhaustive search", AV_ME_METHOD_ESA, "method"),
+ CONST("tss", "three step search", AV_ME_METHOD_TSS, "method"),
+ CONST("tdls", "two dimensional logarithmic search", AV_ME_METHOD_TDLS, "method"),
+ CONST("ntss", "new three step search", AV_ME_METHOD_NTSS, "method"),
+ CONST("fss", "four step search", AV_ME_METHOD_FSS, "method"),
+ CONST("ds", "diamond search", AV_ME_METHOD_DS, "method"),
+ CONST("hexbs", "hexagon-based search", AV_ME_METHOD_HEXBS, "method"),
+ CONST("epzs", "enhanced predictive zonal search", AV_ME_METHOD_EPZS, "method"),
+ CONST("umh", "uneven multi-hexagon search", AV_ME_METHOD_UMH, "method"),
+ { "mb_size", "macroblock size", OFFSET(mb_size), AV_OPT_TYPE_INT, {.i64 = 16}, 8, INT_MAX, FLAGS },
+ { "search_param", "search parameter", OFFSET(search_param), AV_OPT_TYPE_INT, {.i64 = 7}, 4, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(mestimate);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ MEContext *s = inlink->dst->priv;
+ int i;
+
+ s->log2_mb_size = av_ceil_log2_c(s->mb_size);
+ s->mb_size = 1 << s->log2_mb_size;
+
+ s->b_width = inlink->w >> s->log2_mb_size;
+ s->b_height = inlink->h >> s->log2_mb_size;
+ s->b_count = s->b_width * s->b_height;
+
+ for (i = 0; i < 3; i++) {
+ s->mv_table[i] = av_mallocz_array(s->b_count, sizeof(*s->mv_table[0]));
+ if (!s->mv_table[i])
+ return AVERROR(ENOMEM);
+ }
+
+ ff_me_init_context(&s->me_ctx, s->mb_size, s->search_param, inlink->w, inlink->h, 0, (s->b_width - 1) << s->log2_mb_size, 0, (s->b_height - 1) << s->log2_mb_size);
+
+ return 0;
+}
+
+static void add_mv_data(AVMotionVector *mv, int mb_size,
+ int x, int y, int x_mv, int y_mv, int dir)
+{
+ mv->w = mb_size;
+ mv->h = mb_size;
+ mv->dst_x = x + (mb_size >> 1);
+ mv->dst_y = y + (mb_size >> 1);
+ mv->src_x = x_mv + (mb_size >> 1);
+ mv->src_y = y_mv + (mb_size >> 1);
+ mv->source = dir ? 1 : -1;
+ mv->flags = 0;
+}
+
+#define SEARCH_MV(method)\
+ do {\
+ for (mb_y = 0; mb_y < s->b_height; mb_y++)\
+ for (mb_x = 0; mb_x < s->b_width; mb_x++) {\
+ const int x_mb = mb_x << s->log2_mb_size;\
+ const int y_mb = mb_y << s->log2_mb_size;\
+ int mv[2] = {x_mb, y_mb};\
+ ff_me_search_##method(me_ctx, x_mb, y_mb, mv);\
+ add_mv_data(((AVMotionVector *) sd->data) + mv_count++, me_ctx->mb_size, x_mb, y_mb, mv[0], mv[1], dir);\
+ }\
+ } while (0)
+
+#define ADD_PRED(preds, px, py)\
+ do {\
+ preds.mvs[preds.nb][0] = px;\
+ preds.mvs[preds.nb][1] = py;\
+ preds.nb++;\
+ } while(0)
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ MEContext *s = ctx->priv;
+ AVMotionEstContext *me_ctx = &s->me_ctx;
+ AVFrameSideData *sd;
+ AVFrame *out;
+ int mb_x, mb_y, dir;
+ int32_t mv_count = 0;
+ int ret;
+
+ if (frame->pts == AV_NOPTS_VALUE) {
+ ret = ff_filter_frame(ctx->outputs[0], frame);
+ return ret;
+ }
+
+ av_frame_free(&s->prev);
+ s->prev = s->cur;
+ s->cur = s->next;
+ s->next = frame;
+
+ s->mv_table[2] = memcpy(s->mv_table[2], s->mv_table[1], sizeof(*s->mv_table[1]) * s->b_count);
+ s->mv_table[1] = memcpy(s->mv_table[1], s->mv_table[0], sizeof(*s->mv_table[0]) * s->b_count);
+
+ if (!s->cur) {
+ s->cur = av_frame_clone(frame);
+ if (!s->cur)
+ return AVERROR(ENOMEM);
+ }
+
+ if (!s->prev)
+ return 0;
+
+ out = av_frame_clone(s->cur);
+ if (!out)
+ return AVERROR(ENOMEM);
+
+ sd = av_frame_new_side_data(out, AV_FRAME_DATA_MOTION_VECTORS, 2 * s->b_count * sizeof(AVMotionVector));
+ if (!sd) {
+ av_frame_free(&out);
+ return AVERROR(ENOMEM);
+ }
+
+ me_ctx->data_cur = s->cur->data[0];
+ me_ctx->linesize = s->cur->linesize[0];
+
+ for (dir = 0; dir < 2; dir++) {
+ me_ctx->data_ref = (dir ? s->next : s->prev)->data[0];
+
+ if (s->method == AV_ME_METHOD_DS)
+ SEARCH_MV(ds);
+ else if (s->method == AV_ME_METHOD_ESA)
+ SEARCH_MV(esa);
+ else if (s->method == AV_ME_METHOD_FSS)
+ SEARCH_MV(fss);
+ else if (s->method == AV_ME_METHOD_NTSS)
+ SEARCH_MV(ntss);
+ else if (s->method == AV_ME_METHOD_TDLS)
+ SEARCH_MV(tdls);
+ else if (s->method == AV_ME_METHOD_TSS)
+ SEARCH_MV(tss);
+ else if (s->method == AV_ME_METHOD_HEXBS)
+ SEARCH_MV(hexbs);
+ else if (s->method == AV_ME_METHOD_UMH) {
+ for (mb_y = 0; mb_y < s->b_height; mb_y++)
+ for (mb_x = 0; mb_x < s->b_width; mb_x++) {
+ const int mb_i = mb_x + mb_y * s->b_width;
+ const int x_mb = mb_x << s->log2_mb_size;
+ const int y_mb = mb_y << s->log2_mb_size;
+ int mv[2] = {x_mb, y_mb};
+
+ AVMotionEstPredictor *preds = me_ctx->preds;
+ preds[0].nb = 0;
+
+ ADD_PRED(preds[0], 0, 0);
+
+ //left mb in current frame
+ if (mb_x > 0)
+ ADD_PRED(preds[0], s->mv_table[0][mb_i - 1][dir][0], s->mv_table[0][mb_i - 1][dir][1]);
+
+ if (mb_y > 0) {
+ //top mb in current frame
+ ADD_PRED(preds[0], s->mv_table[0][mb_i - s->b_width][dir][0], s->mv_table[0][mb_i - s->b_width][dir][1]);
+
+ //top-right mb in current frame
+ if (mb_x + 1 < s->b_width)
+ ADD_PRED(preds[0], s->mv_table[0][mb_i - s->b_width + 1][dir][0], s->mv_table[0][mb_i - s->b_width + 1][dir][1]);
+ //top-left mb in current frame
+ else if (mb_x > 0)
+ ADD_PRED(preds[0], s->mv_table[0][mb_i - s->b_width - 1][dir][0], s->mv_table[0][mb_i - s->b_width - 1][dir][1]);
+ }
+
+ //median predictor
+ if (preds[0].nb == 4) {
+ me_ctx->pred_x = mid_pred(preds[0].mvs[1][0], preds[0].mvs[2][0], preds[0].mvs[3][0]);
+ me_ctx->pred_y = mid_pred(preds[0].mvs[1][1], preds[0].mvs[2][1], preds[0].mvs[3][1]);
+ } else if (preds[0].nb == 3) {
+ me_ctx->pred_x = mid_pred(0, preds[0].mvs[1][0], preds[0].mvs[2][0]);
+ me_ctx->pred_y = mid_pred(0, preds[0].mvs[1][1], preds[0].mvs[2][1]);
+ } else if (preds[0].nb == 2) {
+ me_ctx->pred_x = preds[0].mvs[1][0];
+ me_ctx->pred_y = preds[0].mvs[1][1];
+ } else {
+ me_ctx->pred_x = 0;
+ me_ctx->pred_y = 0;
+ }
+
+ ff_me_search_umh(me_ctx, x_mb, y_mb, mv);
+
+ s->mv_table[0][mb_i][dir][0] = mv[0] - x_mb;
+ s->mv_table[0][mb_i][dir][1] = mv[1] - y_mb;
+ add_mv_data(((AVMotionVector *) sd->data) + mv_count++, me_ctx->mb_size, x_mb, y_mb, mv[0], mv[1], dir);
+ }
+
+ } else if (s->method == AV_ME_METHOD_EPZS) {
+
+ for (mb_y = 0; mb_y < s->b_height; mb_y++)
+ for (mb_x = 0; mb_x < s->b_width; mb_x++) {
+ const int mb_i = mb_x + mb_y * s->b_width;
+ const int x_mb = mb_x << s->log2_mb_size;
+ const int y_mb = mb_y << s->log2_mb_size;
+ int mv[2] = {x_mb, y_mb};
+
+ AVMotionEstPredictor *preds = me_ctx->preds;
+ preds[0].nb = 0;
+ preds[1].nb = 0;
+
+ ADD_PRED(preds[0], 0, 0);
+
+ //left mb in current frame
+ if (mb_x > 0)
+ ADD_PRED(preds[0], s->mv_table[0][mb_i - 1][dir][0], s->mv_table[0][mb_i - 1][dir][1]);
+
+ //top mb in current frame
+ if (mb_y > 0)
+ ADD_PRED(preds[0], s->mv_table[0][mb_i - s->b_width][dir][0], s->mv_table[0][mb_i - s->b_width][dir][1]);
+
+ //top-right mb in current frame
+ if (mb_y > 0 && mb_x + 1 < s->b_width)
+ ADD_PRED(preds[0], s->mv_table[0][mb_i - s->b_width + 1][dir][0], s->mv_table[0][mb_i - s->b_width + 1][dir][1]);
+
+ //median predictor
+ if (preds[0].nb == 4) {
+ me_ctx->pred_x = mid_pred(preds[0].mvs[1][0], preds[0].mvs[2][0], preds[0].mvs[3][0]);
+ me_ctx->pred_y = mid_pred(preds[0].mvs[1][1], preds[0].mvs[2][1], preds[0].mvs[3][1]);
+ } else if (preds[0].nb == 3) {
+ me_ctx->pred_x = mid_pred(0, preds[0].mvs[1][0], preds[0].mvs[2][0]);
+ me_ctx->pred_y = mid_pred(0, preds[0].mvs[1][1], preds[0].mvs[2][1]);
+ } else if (preds[0].nb == 2) {
+ me_ctx->pred_x = preds[0].mvs[1][0];
+ me_ctx->pred_y = preds[0].mvs[1][1];
+ } else {
+ me_ctx->pred_x = 0;
+ me_ctx->pred_y = 0;
+ }
+
+ //collocated mb in prev frame
+ ADD_PRED(preds[0], s->mv_table[1][mb_i][dir][0], s->mv_table[1][mb_i][dir][1]);
+
+ //accelerator motion vector of collocated block in prev frame
+ ADD_PRED(preds[1], s->mv_table[1][mb_i][dir][0] + (s->mv_table[1][mb_i][dir][0] - s->mv_table[2][mb_i][dir][0]),
+ s->mv_table[1][mb_i][dir][1] + (s->mv_table[1][mb_i][dir][1] - s->mv_table[2][mb_i][dir][1]));
+
+ //left mb in prev frame
+ if (mb_x > 0)
+ ADD_PRED(preds[1], s->mv_table[1][mb_i - 1][dir][0], s->mv_table[1][mb_i - 1][dir][1]);
+
+ //top mb in prev frame
+ if (mb_y > 0)
+ ADD_PRED(preds[1], s->mv_table[1][mb_i - s->b_width][dir][0], s->mv_table[1][mb_i - s->b_width][dir][1]);
+
+ //right mb in prev frame
+ if (mb_x + 1 < s->b_width)
+ ADD_PRED(preds[1], s->mv_table[1][mb_i + 1][dir][0], s->mv_table[1][mb_i + 1][dir][1]);
+
+ //bottom mb in prev frame
+ if (mb_y + 1 < s->b_height)
+ ADD_PRED(preds[1], s->mv_table[1][mb_i + s->b_width][dir][0], s->mv_table[1][mb_i + s->b_width][dir][1]);
+
+ ff_me_search_epzs(me_ctx, x_mb, y_mb, mv);
+
+ s->mv_table[0][mb_i][dir][0] = mv[0] - x_mb;
+ s->mv_table[0][mb_i][dir][1] = mv[1] - y_mb;
+ add_mv_data(((AVMotionVector *) sd->data) + mv_count++, s->mb_size, x_mb, y_mb, mv[0], mv[1], dir);
+ }
+ }
+ }
+
+ return ff_filter_frame(ctx->outputs[0], out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ MEContext *s = ctx->priv;
+ int i;
+
+ av_frame_free(&s->prev);
+ av_frame_free(&s->cur);
+ av_frame_free(&s->next);
+
+ for (i = 0; i < 3; i++)
+ av_freep(&s->mv_table[i]);
+}
+
+static const AVFilterPad mestimate_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad mestimate_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_mestimate = {
+ .name = "mestimate",
+ .description = NULL_IF_CONFIG_SMALL("Generate motion vectors."),
+ .priv_size = sizeof(MEContext),
+ .priv_class = &mestimate_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = mestimate_inputs,
+ .outputs = mestimate_outputs,
+};
diff --git a/libavfilter/vf_midequalizer.c b/libavfilter/vf_midequalizer.c
new file mode 100644
index 0000000000..b95a86dd70
--- /dev/null
+++ b/libavfilter/vf_midequalizer.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2017 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "framesync.h"
+
+typedef struct MidEqualizerContext {
+ const AVClass *class;
+ int width[2][4], height[2][4];
+ int nb_planes;
+ int planes;
+ int histogram_size;
+ float *histogram[2];
+ unsigned *cchange;
+ FFFrameSync fs;
+
+ void (*midequalizer)(const uint8_t *in0, const uint8_t *in1,
+ uint8_t *dst,
+ ptrdiff_t linesize1, ptrdiff_t linesize2,
+ ptrdiff_t dlinesize,
+ int w0, int h0,
+ int w1, int h1,
+ float *histogram1, float *histogram2,
+ unsigned *cchange, size_t hsize);
+} MidEqualizerContext;
+
+#define OFFSET(x) offsetof(MidEqualizerContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption midequalizer_options[] = {
+ { "planes", "set planes", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=0xF}, 0, 0xF, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(midequalizer);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14,
+ AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
+ AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12,
+ AV_PIX_FMT_NONE
+ };
+
+ return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+}
+
+static int process_frame(FFFrameSync *fs)
+{
+ AVFilterContext *ctx = fs->parent;
+ MidEqualizerContext *s = fs->opaque;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out, *in0, *in1;
+ int ret;
+
+ if ((ret = ff_framesync_get_frame(&s->fs, 0, &in0, 0)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 1, &in1, 0)) < 0)
+ return ret;
+
+ if (ctx->is_disabled) {
+ out = av_frame_clone(in0);
+ if (!out)
+ return AVERROR(ENOMEM);
+ } else {
+ int p;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, in0);
+
+ for (p = 0; p < s->nb_planes; p++) {
+ if (!((1 << p) & s->planes)) {
+ av_image_copy_plane(out->data[p], out->linesize[p], in0->data[p], in0->linesize[p],
+ s->width[0][p] * (1 + (s->histogram_size > 256)), s->height[0][p]);
+ continue;
+ }
+
+ s->midequalizer(in0->data[p], in1->data[p],
+ out->data[p],
+ in0->linesize[p], in1->linesize[p],
+ out->linesize[p],
+ s->width[0][p], s->height[0][p],
+ s->width[1][p], s->height[1][p],
+ s->histogram[0], s->histogram[1],
+ s->cchange, s->histogram_size);
+ }
+ }
+ out->pts = av_rescale_q(in0->pts, s->fs.time_base, outlink->time_base);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static void compute_histogram8(const uint8_t *src, ptrdiff_t linesize,
+ int w, int h, float *histogram, size_t hsize)
+{
+ int y, x;
+
+ memset(histogram, 0, hsize * sizeof(*histogram));
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ histogram[src[x]] += 1;
+ }
+ src += linesize;
+ }
+
+ for (x = 0; x < hsize - 1; x++) {
+ histogram[x + 1] += histogram[x];
+ histogram[x] /= hsize;
+ }
+ histogram[x] /= hsize;
+}
+
+static void compute_histogram16(const uint16_t *src, ptrdiff_t linesize,
+ int w, int h, float *histogram, size_t hsize)
+{
+ int y, x;
+
+ memset(histogram, 0, hsize * sizeof(*histogram));
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ histogram[src[x]] += 1;
+ }
+ src += linesize;
+ }
+
+ for (x = 0; x < hsize - 1; x++) {
+ histogram[x + 1] += histogram[x];
+ histogram[x] /= hsize;
+ }
+ histogram[x] /= hsize;
+}
+
+static void compute_contrast_change(float *histogram1, float *histogram2,
+ unsigned *cchange, size_t hsize)
+{
+ int i;
+
+ for (i = 0; i < hsize; i++) {
+ int j;
+
+ for (j = 0; j < hsize && histogram2[j] < histogram1[i]; j++);
+
+ cchange[i] = (i + j) / 2;
+ }
+}
+
+static void midequalizer8(const uint8_t *in0, const uint8_t *in1,
+ uint8_t *dst,
+ ptrdiff_t linesize1, ptrdiff_t linesize2,
+ ptrdiff_t dlinesize,
+ int w0, int h0,
+ int w1, int h1,
+ float *histogram1, float *histogram2,
+ unsigned *cchange,
+ size_t hsize)
+{
+ int x, y;
+
+ compute_histogram8(in0, linesize1, w0, h0, histogram1, hsize);
+ compute_histogram8(in1, linesize2, w1, h1, histogram2, hsize);
+
+ compute_contrast_change(histogram1, histogram2, cchange, hsize);
+
+ for (y = 0; y < h0; y++) {
+ for (x = 0; x < w0; x++) {
+ dst[x] = av_clip_uint8(cchange[in0[x]]);
+ }
+ dst += dlinesize;
+ in0 += linesize1;
+ }
+}
+
+static void midequalizer16(const uint8_t *in0, const uint8_t *in1,
+ uint8_t *dst,
+ ptrdiff_t linesize1, ptrdiff_t linesize2,
+ ptrdiff_t dlinesize,
+ int w0, int h0,
+ int w1, int h1,
+ float *histogram1, float *histogram2,
+ unsigned *cchange,
+ size_t hsize)
+{
+ const uint16_t *i = (const uint16_t *)in0;
+ uint16_t *d = (uint16_t *)dst;
+ int x, y;
+
+ compute_histogram16(i, linesize1 / 2, w0, h0, histogram1, hsize);
+ compute_histogram16((const uint16_t *)in1, linesize2 / 2, w1, h1, histogram2, hsize);
+
+ compute_contrast_change(histogram1, histogram2, cchange, hsize);
+
+ for (y = 0; y < h0; y++) {
+ for (x = 0; x < w0; x++) {
+ d[x] = cchange[i[x]];
+ }
+ d += dlinesize / 2;
+ i += linesize1 / 2;
+ }
+}
+
+static int config_input0(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ MidEqualizerContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int vsub, hsub;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ hsub = desc->log2_chroma_w;
+ vsub = desc->log2_chroma_h;
+
+ s->height[0][0] = s->height[0][3] = inlink->h;
+ s->width[0][0] = s->width[0][3] = inlink->w;
+ s->height[0][1] = s->height[0][2] = AV_CEIL_RSHIFT(inlink->h, vsub);
+ s->width[0][1] = s->width[0][2] = AV_CEIL_RSHIFT(inlink->w, hsub);
+
+ s->histogram_size = 1 << desc->comp[0].depth;
+
+ s->histogram[0] = av_calloc(s->histogram_size, sizeof(float));
+ s->histogram[1] = av_calloc(s->histogram_size, sizeof(float));
+ s->cchange = av_calloc(s->histogram_size, sizeof(unsigned));
+ if (!s->histogram[0] || !s->histogram[1] || !s->cchange)
+ return AVERROR(ENOMEM);
+
+ if (s->histogram_size == 256) {
+ s->midequalizer = midequalizer8;
+ } else {
+ s->midequalizer = midequalizer16;
+ }
+
+ return 0;
+}
+
+static int config_input1(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ MidEqualizerContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int vsub, hsub;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ hsub = desc->log2_chroma_w;
+ vsub = desc->log2_chroma_h;
+
+ s->height[1][0] = s->height[1][3] = inlink->h;
+ s->width[1][0] = s->width[1][3] = inlink->w;
+ s->height[1][1] = s->height[1][2] = AV_CEIL_RSHIFT(inlink->h, vsub);
+ s->width[1][1] = s->width[1][2] = AV_CEIL_RSHIFT(inlink->w, hsub);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ MidEqualizerContext *s = ctx->priv;
+ AVFilterLink *in0 = ctx->inputs[0];
+ AVFilterLink *in1 = ctx->inputs[1];
+ FFFrameSyncIn *in;
+ int ret;
+
+ if (in0->format != in1->format) {
+ av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
+ return AVERROR(EINVAL);
+ }
+
+ outlink->w = in0->w;
+ outlink->h = in0->h;
+ outlink->time_base = in0->time_base;
+ outlink->sample_aspect_ratio = in0->sample_aspect_ratio;
+ outlink->frame_rate = in0->frame_rate;
+
+ if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
+ return ret;
+
+ in = s->fs.in;
+ in[0].time_base = in0->time_base;
+ in[1].time_base = in1->time_base;
+ in[0].sync = 1;
+ in[0].before = EXT_STOP;
+ in[0].after = EXT_INFINITY;
+ in[1].sync = 1;
+ in[1].before = EXT_STOP;
+ in[1].after = EXT_INFINITY;
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+
+ return ff_framesync_configure(&s->fs);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ MidEqualizerContext *s = inlink->dst->priv;
+ return ff_framesync_filter_frame(&s->fs, inlink, buf);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ MidEqualizerContext *s = outlink->src->priv;
+ return ff_framesync_request_frame(&s->fs, outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ MidEqualizerContext *s = ctx->priv;
+
+ ff_framesync_uninit(&s->fs);
+ av_freep(&s->histogram[0]);
+ av_freep(&s->histogram[1]);
+ av_freep(&s->cchange);
+}
+
+static const AVFilterPad midequalizer_inputs[] = {
+ {
+ .name = "in0",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input0,
+ },
+ {
+ .name = "in1",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input1,
+ },
+ { NULL }
+};
+
+static const AVFilterPad midequalizer_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_midequalizer = {
+ .name = "midequalizer",
+ .description = NULL_IF_CONFIG_SMALL("Apply Midway Equalization."),
+ .priv_size = sizeof(MidEqualizerContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = midequalizer_inputs,
+ .outputs = midequalizer_outputs,
+ .priv_class = &midequalizer_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_minterpolate.c b/libavfilter/vf_minterpolate.c
new file mode 100644
index 0000000000..3da696b15e
--- /dev/null
+++ b/libavfilter/vf_minterpolate.c
@@ -0,0 +1,1242 @@
+/**
+ * Copyright (c) 2014-2015 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2016 Davinder Singh (DSM_) <ds.mudhar<@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "motion_estimation.h"
+#include "libavcodec/mathops.h"
+#include "libavutil/avassert.h"
+#include "libavutil/common.h"
+#include "libavutil/motion_vector.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/pixelutils.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define ME_MODE_BIDIR 0
+#define ME_MODE_BILAT 1
+
+#define MC_MODE_OBMC 0
+#define MC_MODE_AOBMC 1
+
+#define SCD_METHOD_NONE 0
+#define SCD_METHOD_FDIFF 1
+
+#define NB_FRAMES 4
+#define NB_PIXEL_MVS 32
+#define NB_CLUSTERS 128
+
+#define ALPHA_MAX 1024
+#define CLUSTER_THRESHOLD 4
+#define PX_WEIGHT_MAX 255
+#define COST_PRED_SCALE 64
+
+static const uint8_t obmc_linear32[1024] = {
+ 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0,
+ 0, 4, 4, 4, 8, 8, 8, 12, 12, 16, 16, 16, 20, 20, 20, 24, 24, 20, 20, 20, 16, 16, 16, 12, 12, 8, 8, 8, 4, 4, 4, 0,
+ 0, 4, 8, 8, 12, 12, 16, 20, 20, 24, 28, 28, 32, 32, 36, 40, 40, 36, 32, 32, 28, 28, 24, 20, 20, 16, 12, 12, 8, 8, 4, 0,
+ 0, 4, 8, 12, 16, 20, 24, 28, 28, 32, 36, 40, 44, 48, 52, 56, 56, 52, 48, 44, 40, 36, 32, 28, 28, 24, 20, 16, 12, 8, 4, 0,
+ 4, 8, 12, 16, 20, 24, 28, 32, 40, 44, 48, 52, 56, 60, 64, 68, 68, 64, 60, 56, 52, 48, 44, 40, 32, 28, 24, 20, 16, 12, 8, 4,
+ 4, 8, 12, 20, 24, 32, 36, 40, 48, 52, 56, 64, 68, 76, 80, 84, 84, 80, 76, 68, 64, 56, 52, 48, 40, 36, 32, 24, 20, 12, 8, 4,
+ 4, 8, 16, 24, 28, 36, 44, 48, 56, 60, 68, 76, 80, 88, 96,100,100, 96, 88, 80, 76, 68, 60, 56, 48, 44, 36, 28, 24, 16, 8, 4,
+ 4, 12, 20, 28, 32, 40, 48, 56, 64, 72, 80, 88, 92,100,108,116,116,108,100, 92, 88, 80, 72, 64, 56, 48, 40, 32, 28, 20, 12, 4,
+ 4, 12, 20, 28, 40, 48, 56, 64, 72, 80, 88, 96,108,116,124,132,132,124,116,108, 96, 88, 80, 72, 64, 56, 48, 40, 28, 20, 12, 4,
+ 4, 16, 24, 32, 44, 52, 60, 72, 80, 92,100,108,120,128,136,148,148,136,128,120,108,100, 92, 80, 72, 60, 52, 44, 32, 24, 16, 4,
+ 4, 16, 28, 36, 48, 56, 68, 80, 88,100,112,120,132,140,152,164,164,152,140,132,120,112,100, 88, 80, 68, 56, 48, 36, 28, 16, 4,
+ 4, 16, 28, 40, 52, 64, 76, 88, 96,108,120,132,144,156,168,180,180,168,156,144,132,120,108, 96, 88, 76, 64, 52, 40, 28, 16, 4,
+ 8, 20, 32, 44, 56, 68, 80, 92,108,120,132,144,156,168,180,192,192,180,168,156,144,132,120,108, 92, 80, 68, 56, 44, 32, 20, 8,
+ 8, 20, 32, 48, 60, 76, 88,100,116,128,140,156,168,184,196,208,208,196,184,168,156,140,128,116,100, 88, 76, 60, 48, 32, 20, 8,
+ 8, 20, 36, 52, 64, 80, 96,108,124,136,152,168,180,196,212,224,224,212,196,180,168,152,136,124,108, 96, 80, 64, 52, 36, 20, 8,
+ 8, 24, 40, 56, 68, 84,100,116,132,148,164,180,192,208,224,240,240,224,208,192,180,164,148,132,116,100, 84, 68, 56, 40, 24, 8,
+ 8, 24, 40, 56, 68, 84,100,116,132,148,164,180,192,208,224,240,240,224,208,192,180,164,148,132,116,100, 84, 68, 56, 40, 24, 8,
+ 8, 20, 36, 52, 64, 80, 96,108,124,136,152,168,180,196,212,224,224,212,196,180,168,152,136,124,108, 96, 80, 64, 52, 36, 20, 8,
+ 8, 20, 32, 48, 60, 76, 88,100,116,128,140,156,168,184,196,208,208,196,184,168,156,140,128,116,100, 88, 76, 60, 48, 32, 20, 8,
+ 8, 20, 32, 44, 56, 68, 80, 92,108,120,132,144,156,168,180,192,192,180,168,156,144,132,120,108, 92, 80, 68, 56, 44, 32, 20, 8,
+ 4, 16, 28, 40, 52, 64, 76, 88, 96,108,120,132,144,156,168,180,180,168,156,144,132,120,108, 96, 88, 76, 64, 52, 40, 28, 16, 4,
+ 4, 16, 28, 36, 48, 56, 68, 80, 88,100,112,120,132,140,152,164,164,152,140,132,120,112,100, 88, 80, 68, 56, 48, 36, 28, 16, 4,
+ 4, 16, 24, 32, 44, 52, 60, 72, 80, 92,100,108,120,128,136,148,148,136,128,120,108,100, 92, 80, 72, 60, 52, 44, 32, 24, 16, 4,
+ 4, 12, 20, 28, 40, 48, 56, 64, 72, 80, 88, 96,108,116,124,132,132,124,116,108, 96, 88, 80, 72, 64, 56, 48, 40, 28, 20, 12, 4,
+ 4, 12, 20, 28, 32, 40, 48, 56, 64, 72, 80, 88, 92,100,108,116,116,108,100, 92, 88, 80, 72, 64, 56, 48, 40, 32, 28, 20, 12, 4,
+ 4, 8, 16, 24, 28, 36, 44, 48, 56, 60, 68, 76, 80, 88, 96,100,100, 96, 88, 80, 76, 68, 60, 56, 48, 44, 36, 28, 24, 16, 8, 4,
+ 4, 8, 12, 20, 24, 32, 36, 40, 48, 52, 56, 64, 68, 76, 80, 84, 84, 80, 76, 68, 64, 56, 52, 48, 40, 36, 32, 24, 20, 12, 8, 4,
+ 4, 8, 12, 16, 20, 24, 28, 32, 40, 44, 48, 52, 56, 60, 64, 68, 68, 64, 60, 56, 52, 48, 44, 40, 32, 28, 24, 20, 16, 12, 8, 4,
+ 0, 4, 8, 12, 16, 20, 24, 28, 28, 32, 36, 40, 44, 48, 52, 56, 56, 52, 48, 44, 40, 36, 32, 28, 28, 24, 20, 16, 12, 8, 4, 0,
+ 0, 4, 8, 8, 12, 12, 16, 20, 20, 24, 28, 28, 32, 32, 36, 40, 40, 36, 32, 32, 28, 28, 24, 20, 20, 16, 12, 12, 8, 8, 4, 0,
+ 0, 4, 4, 4, 8, 8, 8, 12, 12, 16, 16, 16, 20, 20, 20, 24, 24, 20, 20, 20, 16, 16, 16, 12, 12, 8, 8, 8, 4, 4, 4, 0,
+ 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0,
+};
+
+static const uint8_t obmc_linear16[256] = {
+ 0, 4, 4, 8, 8, 12, 12, 16, 16, 12, 12, 8, 8, 4, 4, 0,
+ 4, 8, 16, 20, 28, 32, 40, 44, 44, 40, 32, 28, 20, 16, 8, 4,
+ 4, 16, 24, 36, 44, 56, 64, 76, 76, 64, 56, 44, 36, 24, 16, 4,
+ 8, 20, 36, 48, 64, 76, 92,104,104, 92, 76, 64, 48, 36, 20, 8,
+ 8, 28, 44, 64, 80,100,116,136,136,116,100, 80, 64, 44, 28, 8,
+ 12, 32, 56, 76,100,120,144,164,164,144,120,100, 76, 56, 32, 12,
+ 12, 40, 64, 92,116,144,168,196,196,168,144,116, 92, 64, 40, 12,
+ 16, 44, 76,104,136,164,196,224,224,196,164,136,104, 76, 44, 16,
+ 16, 44, 76,104,136,164,196,224,224,196,164,136,104, 76, 44, 16,
+ 12, 40, 64, 92,116,144,168,196,196,168,144,116, 92, 64, 40, 12,
+ 12, 32, 56, 76,100,120,144,164,164,144,120,100, 76, 56, 32, 12,
+ 8, 28, 44, 64, 80,100,116,136,136,116,100, 80, 64, 44, 28, 8,
+ 8, 20, 36, 48, 64, 76, 92,104,104, 92, 76, 64, 48, 36, 20, 8,
+ 4, 16, 24, 36, 44, 56, 64, 76, 76, 64, 56, 44, 36, 24, 16, 4,
+ 4, 8, 16, 20, 28, 32, 40, 44, 44, 40, 32, 28, 20, 16, 8, 4,
+ 0, 4, 4, 8, 8, 12, 12, 16, 16, 12, 12, 8, 8, 4, 4, 0,
+};
+
+static const uint8_t obmc_linear8[64] = {
+ 4, 12, 20, 28, 28, 20, 12, 4,
+ 12, 36, 60, 84, 84, 60, 36, 12,
+ 20, 60,100,140,140,100, 60, 20,
+ 28, 84,140,196,196,140, 84, 28,
+ 28, 84,140,196,196,140, 84, 28,
+ 20, 60,100,140,140,100, 60, 20,
+ 12, 36, 60, 84, 84, 60, 36, 12,
+ 4, 12, 20, 28, 28, 20, 12, 4,
+};
+
+static const uint8_t obmc_linear4[16] = {
+ 16, 48, 48, 16,
+ 48,144,144, 48,
+ 48,144,144, 48,
+ 16, 48, 48, 16,
+};
+
+static const uint8_t * const obmc_tab_linear[4]= {
+ obmc_linear32, obmc_linear16, obmc_linear8, obmc_linear4
+};
+
+enum MIMode {
+ MI_MODE_DUP = 0,
+ MI_MODE_BLEND = 1,
+ MI_MODE_MCI = 2,
+};
+
+typedef struct Cluster {
+ int64_t sum[2];
+ int nb;
+} Cluster;
+
+typedef struct Block {
+ int16_t mvs[2][2];
+ int cid;
+ uint64_t sbad;
+ int sb;
+ struct Block *subs;
+} Block;
+
+typedef struct Pixel {
+ int16_t mvs[NB_PIXEL_MVS][2];
+ uint32_t weights[NB_PIXEL_MVS];
+ int8_t refs[NB_PIXEL_MVS];
+ int nb;
+} Pixel;
+
+typedef struct Frame {
+ AVFrame *avf;
+ Block *blocks;
+} Frame;
+
+typedef struct MIContext {
+ const AVClass *class;
+ AVMotionEstContext me_ctx;
+ AVRational frame_rate;
+ enum MIMode mi_mode;
+ int mc_mode;
+ int me_mode;
+ int me_method;
+ int mb_size;
+ int search_param;
+ int vsbmc;
+
+ Frame frames[NB_FRAMES];
+ Cluster clusters[NB_CLUSTERS];
+ Block *int_blocks;
+ Pixel *pixels;
+ int (*mv_table[3])[2][2];
+ int64_t out_pts;
+ int b_width, b_height, b_count;
+ int log2_mb_size;
+
+ int scd_method;
+ int scene_changed;
+ av_pixelutils_sad_fn sad;
+ double prev_mafd;
+ double scd_threshold;
+
+ int log2_chroma_w;
+ int log2_chroma_h;
+ int nb_planes;
+} MIContext;
+
+#define OFFSET(x) offsetof(MIContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, 0, 0, FLAGS, unit }
+
+static const AVOption minterpolate_options[] = {
+ { "fps", "output's frame rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "60"}, 0, INT_MAX, FLAGS },
+ { "mi_mode", "motion interpolation mode", OFFSET(mi_mode), AV_OPT_TYPE_INT, {.i64 = MI_MODE_MCI}, MI_MODE_DUP, MI_MODE_MCI, FLAGS, "mi_mode" },
+ CONST("dup", "duplicate frames", MI_MODE_DUP, "mi_mode"),
+ CONST("blend", "blend frames", MI_MODE_BLEND, "mi_mode"),
+ CONST("mci", "motion compensated interpolation", MI_MODE_MCI, "mi_mode"),
+ { "mc_mode", "motion compensation mode", OFFSET(mc_mode), AV_OPT_TYPE_INT, {.i64 = MC_MODE_OBMC}, MC_MODE_OBMC, MC_MODE_AOBMC, FLAGS, "mc_mode" },
+ CONST("obmc", "overlapped block motion compensation", MC_MODE_OBMC, "mc_mode"),
+ CONST("aobmc", "adaptive overlapped block motion compensation", MC_MODE_AOBMC, "mc_mode"),
+ { "me_mode", "motion estimation mode", OFFSET(me_mode), AV_OPT_TYPE_INT, {.i64 = ME_MODE_BILAT}, ME_MODE_BIDIR, ME_MODE_BILAT, FLAGS, "me_mode" },
+ CONST("bidir", "bidirectional motion estimation", ME_MODE_BIDIR, "me_mode"),
+ CONST("bilat", "bilateral motion estimation", ME_MODE_BILAT, "me_mode"),
+ { "me", "motion estimation method", OFFSET(me_method), AV_OPT_TYPE_INT, {.i64 = AV_ME_METHOD_EPZS}, AV_ME_METHOD_ESA, AV_ME_METHOD_UMH, FLAGS, "me" },
+ CONST("esa", "exhaustive search", AV_ME_METHOD_ESA, "me"),
+ CONST("tss", "three step search", AV_ME_METHOD_TSS, "me"),
+ CONST("tdls", "two dimensional logarithmic search", AV_ME_METHOD_TDLS, "me"),
+ CONST("ntss", "new three step search", AV_ME_METHOD_NTSS, "me"),
+ CONST("fss", "four step search", AV_ME_METHOD_FSS, "me"),
+ CONST("ds", "diamond search", AV_ME_METHOD_DS, "me"),
+ CONST("hexbs", "hexagon-based search", AV_ME_METHOD_HEXBS, "me"),
+ CONST("epzs", "enhanced predictive zonal search", AV_ME_METHOD_EPZS, "me"),
+ CONST("umh", "uneven multi-hexagon search", AV_ME_METHOD_UMH, "me"),
+ { "mb_size", "macroblock size", OFFSET(mb_size), AV_OPT_TYPE_INT, {.i64 = 16}, 4, 16, FLAGS },
+ { "search_param", "search parameter", OFFSET(search_param), AV_OPT_TYPE_INT, {.i64 = 32}, 4, INT_MAX, FLAGS },
+ { "vsbmc", "variable-size block motion compensation", OFFSET(vsbmc), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS },
+ { "scd", "scene change detection method", OFFSET(scd_method), AV_OPT_TYPE_INT, {.i64 = SCD_METHOD_FDIFF}, SCD_METHOD_NONE, SCD_METHOD_FDIFF, FLAGS, "scene" },
+ CONST("none", "disable detection", SCD_METHOD_NONE, "scene"),
+ CONST("fdiff", "frame difference", SCD_METHOD_FDIFF, "scene"),
+ { "scd_threshold", "scene change threshold", OFFSET(scd_threshold), AV_OPT_TYPE_DOUBLE, {.dbl = 5.0}, 0, 100.0, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(minterpolate);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static uint64_t get_sbad(AVMotionEstContext *me_ctx, int x, int y, int x_mv, int y_mv)
+{
+ uint8_t *data_cur = me_ctx->data_cur;
+ uint8_t *data_next = me_ctx->data_ref;
+ int linesize = me_ctx->linesize;
+ int mv_x1 = x_mv - x;
+ int mv_y1 = y_mv - y;
+ int mv_x, mv_y, i, j;
+ uint64_t sbad = 0;
+
+ x = av_clip(x, me_ctx->x_min, me_ctx->x_max);
+ y = av_clip(y, me_ctx->y_min, me_ctx->y_max);
+ mv_x = av_clip(x_mv - x, -FFMIN(x - me_ctx->x_min, me_ctx->x_max - x), FFMIN(x - me_ctx->x_min, me_ctx->x_max - x));
+ mv_y = av_clip(y_mv - y, -FFMIN(y - me_ctx->y_min, me_ctx->y_max - y), FFMIN(y - me_ctx->y_min, me_ctx->y_max - y));
+
+ data_cur += (y + mv_y) * linesize;
+ data_next += (y - mv_y) * linesize;
+
+ for (j = 0; j < me_ctx->mb_size; j++)
+ for (i = 0; i < me_ctx->mb_size; i++)
+ sbad += FFABS(data_cur[x + mv_x + i + j * linesize] - data_next[x - mv_x + i + j * linesize]);
+
+ return sbad + (FFABS(mv_x1 - me_ctx->pred_x) + FFABS(mv_y1 - me_ctx->pred_y)) * COST_PRED_SCALE;
+}
+
+static uint64_t get_sbad_ob(AVMotionEstContext *me_ctx, int x, int y, int x_mv, int y_mv)
+{
+ uint8_t *data_cur = me_ctx->data_cur;
+ uint8_t *data_next = me_ctx->data_ref;
+ int linesize = me_ctx->linesize;
+ int x_min = me_ctx->x_min + me_ctx->mb_size / 2;
+ int x_max = me_ctx->x_max - me_ctx->mb_size / 2;
+ int y_min = me_ctx->y_min + me_ctx->mb_size / 2;
+ int y_max = me_ctx->y_max - me_ctx->mb_size / 2;
+ int mv_x1 = x_mv - x;
+ int mv_y1 = y_mv - y;
+ int mv_x, mv_y, i, j;
+ uint64_t sbad = 0;
+
+ x = av_clip(x, x_min, x_max);
+ y = av_clip(y, y_min, y_max);
+ mv_x = av_clip(x_mv - x, -FFMIN(x - x_min, x_max - x), FFMIN(x - x_min, x_max - x));
+ mv_y = av_clip(y_mv - y, -FFMIN(y - y_min, y_max - y), FFMIN(y - y_min, y_max - y));
+
+ for (j = -me_ctx->mb_size / 2; j < me_ctx->mb_size * 3 / 2; j++)
+ for (i = -me_ctx->mb_size / 2; i < me_ctx->mb_size * 3 / 2; i++)
+ sbad += FFABS(data_cur[x + mv_x + i + (y + mv_y + j) * linesize] - data_next[x - mv_x + i + (y - mv_y + j) * linesize]);
+
+ return sbad + (FFABS(mv_x1 - me_ctx->pred_x) + FFABS(mv_y1 - me_ctx->pred_y)) * COST_PRED_SCALE;
+}
+
+static uint64_t get_sad_ob(AVMotionEstContext *me_ctx, int x, int y, int x_mv, int y_mv)
+{
+ uint8_t *data_ref = me_ctx->data_ref;
+ uint8_t *data_cur = me_ctx->data_cur;
+ int linesize = me_ctx->linesize;
+ int x_min = me_ctx->x_min + me_ctx->mb_size / 2;
+ int x_max = me_ctx->x_max - me_ctx->mb_size / 2;
+ int y_min = me_ctx->y_min + me_ctx->mb_size / 2;
+ int y_max = me_ctx->y_max - me_ctx->mb_size / 2;
+ int mv_x = x_mv - x;
+ int mv_y = y_mv - y;
+ int i, j;
+ uint64_t sad = 0;
+
+ x = av_clip(x, x_min, x_max);
+ y = av_clip(y, y_min, y_max);
+ x_mv = av_clip(x_mv, x_min, x_max);
+ y_mv = av_clip(y_mv, y_min, y_max);
+
+ for (j = -me_ctx->mb_size / 2; j < me_ctx->mb_size * 3 / 2; j++)
+ for (i = -me_ctx->mb_size / 2; i < me_ctx->mb_size * 3 / 2; i++)
+ sad += FFABS(data_ref[x_mv + i + (y_mv + j) * linesize] - data_cur[x + i + (y + j) * linesize]);
+
+ return sad + (FFABS(mv_x - me_ctx->pred_x) + FFABS(mv_y - me_ctx->pred_y)) * COST_PRED_SCALE;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ MIContext *mi_ctx = inlink->dst->priv;
+ AVMotionEstContext *me_ctx = &mi_ctx->me_ctx;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ const int height = inlink->h;
+ const int width = inlink->w;
+ int i;
+
+ mi_ctx->log2_chroma_h = desc->log2_chroma_h;
+ mi_ctx->log2_chroma_w = desc->log2_chroma_w;
+
+ mi_ctx->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ mi_ctx->log2_mb_size = av_ceil_log2_c(mi_ctx->mb_size);
+ mi_ctx->mb_size = 1 << mi_ctx->log2_mb_size;
+
+ mi_ctx->b_width = width >> mi_ctx->log2_mb_size;
+ mi_ctx->b_height = height >> mi_ctx->log2_mb_size;
+ mi_ctx->b_count = mi_ctx->b_width * mi_ctx->b_height;
+
+ for (i = 0; i < NB_FRAMES; i++) {
+ Frame *frame = &mi_ctx->frames[i];
+ frame->blocks = av_mallocz_array(mi_ctx->b_count, sizeof(Block));
+ if (!frame->blocks)
+ return AVERROR(ENOMEM);
+ }
+
+ if (mi_ctx->mi_mode == MI_MODE_MCI) {
+ if (!(mi_ctx->pixels = av_mallocz_array(width * height, sizeof(Pixel))))
+ return AVERROR(ENOMEM);
+
+ if (mi_ctx->me_mode == ME_MODE_BILAT)
+ if (!(mi_ctx->int_blocks = av_mallocz_array(mi_ctx->b_count, sizeof(Block))))
+ return AVERROR(ENOMEM);
+
+ if (mi_ctx->me_method == AV_ME_METHOD_EPZS) {
+ for (i = 0; i < 3; i++) {
+ mi_ctx->mv_table[i] = av_mallocz_array(mi_ctx->b_count, sizeof(*mi_ctx->mv_table[0]));
+ if (!mi_ctx->mv_table[i])
+ return AVERROR(ENOMEM);
+ }
+ }
+ }
+
+ if (mi_ctx->scd_method == SCD_METHOD_FDIFF) {
+ mi_ctx->sad = av_pixelutils_get_sad_fn(3, 3, 2, mi_ctx);
+ if (!mi_ctx->sad)
+ return AVERROR(EINVAL);
+ }
+
+ ff_me_init_context(me_ctx, mi_ctx->mb_size, mi_ctx->search_param, width, height, 0, (mi_ctx->b_width - 1) << mi_ctx->log2_mb_size, 0, (mi_ctx->b_height - 1) << mi_ctx->log2_mb_size);
+
+ if (mi_ctx->me_mode == ME_MODE_BIDIR)
+ me_ctx->get_cost = &get_sad_ob;
+ else if (mi_ctx->me_mode == ME_MODE_BILAT)
+ me_ctx->get_cost = &get_sbad_ob;
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ MIContext *mi_ctx = outlink->src->priv;
+
+ outlink->frame_rate = mi_ctx->frame_rate;
+ outlink->time_base = av_inv_q(mi_ctx->frame_rate);
+
+ return 0;
+}
+
+#define ADD_PRED(preds, px, py)\
+ do {\
+ preds.mvs[preds.nb][0] = px;\
+ preds.mvs[preds.nb][1] = py;\
+ preds.nb++;\
+ } while(0)
+
+static void search_mv(MIContext *mi_ctx, Block *blocks, int mb_x, int mb_y, int dir)
+{
+ AVMotionEstContext *me_ctx = &mi_ctx->me_ctx;
+ AVMotionEstPredictor *preds = me_ctx->preds;
+ Block *block = &blocks[mb_x + mb_y * mi_ctx->b_width];
+
+ const int x_mb = mb_x << mi_ctx->log2_mb_size;
+ const int y_mb = mb_y << mi_ctx->log2_mb_size;
+ const int mb_i = mb_x + mb_y * mi_ctx->b_width;
+ int mv[2] = {x_mb, y_mb};
+
+ switch (mi_ctx->me_method) {
+ case AV_ME_METHOD_ESA:
+ ff_me_search_esa(me_ctx, x_mb, y_mb, mv);
+ break;
+ case AV_ME_METHOD_TSS:
+ ff_me_search_tss(me_ctx, x_mb, y_mb, mv);
+ break;
+ case AV_ME_METHOD_TDLS:
+ ff_me_search_tdls(me_ctx, x_mb, y_mb, mv);
+ break;
+ case AV_ME_METHOD_NTSS:
+ ff_me_search_ntss(me_ctx, x_mb, y_mb, mv);
+ break;
+ case AV_ME_METHOD_FSS:
+ ff_me_search_fss(me_ctx, x_mb, y_mb, mv);
+ break;
+ case AV_ME_METHOD_DS:
+ ff_me_search_ds(me_ctx, x_mb, y_mb, mv);
+ break;
+ case AV_ME_METHOD_HEXBS:
+ ff_me_search_hexbs(me_ctx, x_mb, y_mb, mv);
+ break;
+ case AV_ME_METHOD_EPZS:
+
+ preds[0].nb = 0;
+ preds[1].nb = 0;
+
+ ADD_PRED(preds[0], 0, 0);
+
+ //left mb in current frame
+ if (mb_x > 0)
+ ADD_PRED(preds[0], mi_ctx->mv_table[0][mb_i - 1][dir][0], mi_ctx->mv_table[0][mb_i - 1][dir][1]);
+
+ //top mb in current frame
+ if (mb_y > 0)
+ ADD_PRED(preds[0], mi_ctx->mv_table[0][mb_i - mi_ctx->b_width][dir][0], mi_ctx->mv_table[0][mb_i - mi_ctx->b_width][dir][1]);
+
+ //top-right mb in current frame
+ if (mb_y > 0 && mb_x + 1 < mi_ctx->b_width)
+ ADD_PRED(preds[0], mi_ctx->mv_table[0][mb_i - mi_ctx->b_width + 1][dir][0], mi_ctx->mv_table[0][mb_i - mi_ctx->b_width + 1][dir][1]);
+
+ //median predictor
+ if (preds[0].nb == 4) {
+ me_ctx->pred_x = mid_pred(preds[0].mvs[1][0], preds[0].mvs[2][0], preds[0].mvs[3][0]);
+ me_ctx->pred_y = mid_pred(preds[0].mvs[1][1], preds[0].mvs[2][1], preds[0].mvs[3][1]);
+ } else if (preds[0].nb == 3) {
+ me_ctx->pred_x = mid_pred(0, preds[0].mvs[1][0], preds[0].mvs[2][0]);
+ me_ctx->pred_y = mid_pred(0, preds[0].mvs[1][1], preds[0].mvs[2][1]);
+ } else if (preds[0].nb == 2) {
+ me_ctx->pred_x = preds[0].mvs[1][0];
+ me_ctx->pred_y = preds[0].mvs[1][1];
+ } else {
+ me_ctx->pred_x = 0;
+ me_ctx->pred_y = 0;
+ }
+
+ //collocated mb in prev frame
+ ADD_PRED(preds[0], mi_ctx->mv_table[1][mb_i][dir][0], mi_ctx->mv_table[1][mb_i][dir][1]);
+
+ //accelerator motion vector of collocated block in prev frame
+ ADD_PRED(preds[1], mi_ctx->mv_table[1][mb_i][dir][0] + (mi_ctx->mv_table[1][mb_i][dir][0] - mi_ctx->mv_table[2][mb_i][dir][0]),
+ mi_ctx->mv_table[1][mb_i][dir][1] + (mi_ctx->mv_table[1][mb_i][dir][1] - mi_ctx->mv_table[2][mb_i][dir][1]));
+
+ //left mb in prev frame
+ if (mb_x > 0)
+ ADD_PRED(preds[1], mi_ctx->mv_table[1][mb_i - 1][dir][0], mi_ctx->mv_table[1][mb_i - 1][dir][1]);
+
+ //top mb in prev frame
+ if (mb_y > 0)
+ ADD_PRED(preds[1], mi_ctx->mv_table[1][mb_i - mi_ctx->b_width][dir][0], mi_ctx->mv_table[1][mb_i - mi_ctx->b_width][dir][1]);
+
+ //right mb in prev frame
+ if (mb_x + 1 < mi_ctx->b_width)
+ ADD_PRED(preds[1], mi_ctx->mv_table[1][mb_i + 1][dir][0], mi_ctx->mv_table[1][mb_i + 1][dir][1]);
+
+ //bottom mb in prev frame
+ if (mb_y + 1 < mi_ctx->b_height)
+ ADD_PRED(preds[1], mi_ctx->mv_table[1][mb_i + mi_ctx->b_width][dir][0], mi_ctx->mv_table[1][mb_i + mi_ctx->b_width][dir][1]);
+
+ ff_me_search_epzs(me_ctx, x_mb, y_mb, mv);
+
+ mi_ctx->mv_table[0][mb_i][dir][0] = mv[0] - x_mb;
+ mi_ctx->mv_table[0][mb_i][dir][1] = mv[1] - y_mb;
+
+ break;
+ case AV_ME_METHOD_UMH:
+
+ preds[0].nb = 0;
+
+ ADD_PRED(preds[0], 0, 0);
+
+ //left mb in current frame
+ if (mb_x > 0)
+ ADD_PRED(preds[0], blocks[mb_i - 1].mvs[dir][0], blocks[mb_i - 1].mvs[dir][1]);
+
+ if (mb_y > 0) {
+ //top mb in current frame
+ ADD_PRED(preds[0], blocks[mb_i - mi_ctx->b_width].mvs[dir][0], blocks[mb_i - mi_ctx->b_width].mvs[dir][1]);
+
+ //top-right mb in current frame
+ if (mb_x + 1 < mi_ctx->b_width)
+ ADD_PRED(preds[0], blocks[mb_i - mi_ctx->b_width + 1].mvs[dir][0], blocks[mb_i - mi_ctx->b_width + 1].mvs[dir][1]);
+ //top-left mb in current frame
+ else if (mb_x > 0)
+ ADD_PRED(preds[0], blocks[mb_i - mi_ctx->b_width - 1].mvs[dir][0], blocks[mb_i - mi_ctx->b_width - 1].mvs[dir][1]);
+ }
+
+ //median predictor
+ if (preds[0].nb == 4) {
+ me_ctx->pred_x = mid_pred(preds[0].mvs[1][0], preds[0].mvs[2][0], preds[0].mvs[3][0]);
+ me_ctx->pred_y = mid_pred(preds[0].mvs[1][1], preds[0].mvs[2][1], preds[0].mvs[3][1]);
+ } else if (preds[0].nb == 3) {
+ me_ctx->pred_x = mid_pred(0, preds[0].mvs[1][0], preds[0].mvs[2][0]);
+ me_ctx->pred_y = mid_pred(0, preds[0].mvs[1][1], preds[0].mvs[2][1]);
+ } else if (preds[0].nb == 2) {
+ me_ctx->pred_x = preds[0].mvs[1][0];
+ me_ctx->pred_y = preds[0].mvs[1][1];
+ } else {
+ me_ctx->pred_x = 0;
+ me_ctx->pred_y = 0;
+ }
+
+ ff_me_search_umh(me_ctx, x_mb, y_mb, mv);
+
+ break;
+ }
+
+ block->mvs[dir][0] = mv[0] - x_mb;
+ block->mvs[dir][1] = mv[1] - y_mb;
+}
+
+static void bilateral_me(MIContext *mi_ctx)
+{
+ Block *block;
+ int mb_x, mb_y;
+
+ for (mb_y = 0; mb_y < mi_ctx->b_height; mb_y++)
+ for (mb_x = 0; mb_x < mi_ctx->b_width; mb_x++) {
+ block = &mi_ctx->int_blocks[mb_x + mb_y * mi_ctx->b_width];
+
+ block->cid = 0;
+ block->sb = 0;
+
+ block->mvs[0][0] = 0;
+ block->mvs[0][1] = 0;
+ }
+
+ for (mb_y = 0; mb_y < mi_ctx->b_height; mb_y++)
+ for (mb_x = 0; mb_x < mi_ctx->b_width; mb_x++)
+ search_mv(mi_ctx, mi_ctx->int_blocks, mb_x, mb_y, 0);
+}
+
+static int var_size_bme(MIContext *mi_ctx, Block *block, int x_mb, int y_mb, int n)
+{
+ AVMotionEstContext *me_ctx = &mi_ctx->me_ctx;
+ uint64_t cost_sb, cost_old;
+ int mb_size = me_ctx->mb_size;
+ int search_param = me_ctx->search_param;
+ int mv_x, mv_y;
+ int x, y;
+ int ret;
+
+ me_ctx->mb_size = 1 << n;
+ cost_old = me_ctx->get_cost(me_ctx, x_mb, y_mb, x_mb + block->mvs[0][0], y_mb + block->mvs[0][1]);
+ me_ctx->mb_size = mb_size;
+
+ if (!cost_old) {
+ block->sb = 0;
+ return 0;
+ }
+
+ if (!block->subs) {
+ block->subs = av_mallocz_array(4, sizeof(Block));
+ if (!block->subs)
+ return AVERROR(ENOMEM);
+ }
+
+ block->sb = 1;
+
+ for (y = 0; y < 2; y++)
+ for (x = 0; x < 2; x++) {
+ Block *sb = &block->subs[x + y * 2];
+ int mv[2] = {x_mb + block->mvs[0][0], y_mb + block->mvs[0][1]};
+
+ me_ctx->mb_size = 1 << (n - 1);
+ me_ctx->search_param = 2;
+ me_ctx->pred_x = block->mvs[0][0];
+ me_ctx->pred_y = block->mvs[0][1];
+
+ cost_sb = ff_me_search_ds(&mi_ctx->me_ctx, x_mb + block->mvs[0][0], y_mb + block->mvs[0][1], mv);
+ mv_x = mv[0] - x_mb;
+ mv_y = mv[1] - y_mb;
+
+ me_ctx->mb_size = mb_size;
+ me_ctx->search_param = search_param;
+
+ if (cost_sb < cost_old / 4) {
+ sb->mvs[0][0] = mv_x;
+ sb->mvs[0][1] = mv_y;
+
+ if (n > 1) {
+ if (ret = var_size_bme(mi_ctx, sb, x_mb + (x << (n - 1)), y_mb + (y << (n - 1)), n - 1))
+ return ret;
+ } else
+ sb->sb = 0;
+ } else {
+ block->sb = 0;
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static int cluster_mvs(MIContext *mi_ctx)
+{
+ int changed, c, c_max = 0;
+ int mb_x, mb_y, x, y;
+ int mv_x, mv_y, avg_x, avg_y, dx, dy;
+ int d, ret;
+ Block *block;
+ Cluster *cluster, *cluster_new;
+
+ do {
+ changed = 0;
+ for (mb_y = 0; mb_y < mi_ctx->b_height; mb_y++)
+ for (mb_x = 0; mb_x < mi_ctx->b_width; mb_x++) {
+ block = &mi_ctx->int_blocks[mb_x + mb_y * mi_ctx->b_width];
+ c = block->cid;
+ cluster = &mi_ctx->clusters[c];
+ mv_x = block->mvs[0][0];
+ mv_y = block->mvs[0][1];
+
+ if (cluster->nb < 2)
+ continue;
+
+ avg_x = cluster->sum[0] / cluster->nb;
+ avg_y = cluster->sum[1] / cluster->nb;
+ dx = avg_x - mv_x;
+ dy = avg_y - mv_y;
+
+ if (FFABS(avg_x - mv_x) > CLUSTER_THRESHOLD || FFABS(avg_y - mv_y) > CLUSTER_THRESHOLD) {
+
+ for (d = 1; d < 5; d++)
+ for (y = FFMAX(mb_y - d, 0); y < FFMIN(mb_y + d + 1, mi_ctx->b_height); y++)
+ for (x = FFMAX(mb_x - d, 0); x < FFMIN(mb_x + d + 1, mi_ctx->b_width); x++) {
+ Block *nb = &mi_ctx->int_blocks[x + y * mi_ctx->b_width];
+ if (nb->cid > block->cid) {
+ if (nb->cid < c || c == block->cid)
+ c = nb->cid;
+ }
+ }
+
+ if (c == block->cid)
+ c = c_max + 1;
+
+ if (c >= NB_CLUSTERS) {
+ continue;
+ }
+
+ cluster_new = &mi_ctx->clusters[c];
+ cluster_new->sum[0] += mv_x;
+ cluster_new->sum[1] += mv_y;
+ cluster->sum[0] -= mv_x;
+ cluster->sum[1] -= mv_y;
+ cluster_new->nb++;
+ cluster->nb--;
+
+ c_max = FFMAX(c_max, c);
+ block->cid = c;
+
+ changed = 1;
+ }
+ }
+ } while (changed);
+
+ /* find boundaries */
+ for (mb_y = 0; mb_y < mi_ctx->b_height; mb_y++)
+ for (mb_x = 0; mb_x < mi_ctx->b_width; mb_x++) {
+ block = &mi_ctx->int_blocks[mb_x + mb_y * mi_ctx->b_width];
+ for (y = FFMAX(mb_y - 1, 0); y < FFMIN(mb_y + 2, mi_ctx->b_height); y++)
+ for (x = FFMAX(mb_x - 1, 0); x < FFMIN(mb_x + 2, mi_ctx->b_width); x++) {
+ dx = x - mb_x;
+ dy = y - mb_y;
+
+ if ((x - mb_x) && (y - mb_y) || !dx && !dy)
+ continue;
+
+ if (!mb_x || !mb_y || mb_x == mi_ctx->b_width - 1 || mb_y == mi_ctx->b_height - 1)
+ continue;
+
+ if (block->cid != mi_ctx->int_blocks[x + y * mi_ctx->b_width].cid) {
+ if (!dx && block->cid == mi_ctx->int_blocks[x + (mb_y - dy) * mi_ctx->b_width].cid ||
+ !dy && block->cid == mi_ctx->int_blocks[(mb_x - dx) + y * mi_ctx->b_width].cid) {
+ if (ret = var_size_bme(mi_ctx, block, mb_x << mi_ctx->log2_mb_size, mb_y << mi_ctx->log2_mb_size, mi_ctx->log2_mb_size))
+ return ret;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int inject_frame(AVFilterLink *inlink, AVFrame *avf_in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ MIContext *mi_ctx = ctx->priv;
+ Frame frame_tmp;
+ int mb_x, mb_y, dir;
+
+ av_frame_free(&mi_ctx->frames[0].avf);
+ frame_tmp = mi_ctx->frames[0];
+ memmove(&mi_ctx->frames[0], &mi_ctx->frames[1], sizeof(mi_ctx->frames[0]) * (NB_FRAMES - 1));
+ mi_ctx->frames[NB_FRAMES - 1] = frame_tmp;
+ mi_ctx->frames[NB_FRAMES - 1].avf = avf_in;
+
+ if (mi_ctx->mi_mode == MI_MODE_MCI) {
+
+ if (mi_ctx->me_method == AV_ME_METHOD_EPZS) {
+ mi_ctx->mv_table[2] = memcpy(mi_ctx->mv_table[2], mi_ctx->mv_table[1], sizeof(*mi_ctx->mv_table[1]) * mi_ctx->b_count);
+ mi_ctx->mv_table[1] = memcpy(mi_ctx->mv_table[1], mi_ctx->mv_table[0], sizeof(*mi_ctx->mv_table[0]) * mi_ctx->b_count);
+ }
+
+ if (mi_ctx->me_mode == ME_MODE_BIDIR) {
+
+ if (mi_ctx->frames[1].avf) {
+ for (dir = 0; dir < 2; dir++) {
+ mi_ctx->me_ctx.linesize = mi_ctx->frames[2].avf->linesize[0];
+ mi_ctx->me_ctx.data_cur = mi_ctx->frames[2].avf->data[0];
+ mi_ctx->me_ctx.data_ref = mi_ctx->frames[dir ? 3 : 1].avf->data[0];
+
+ for (mb_y = 0; mb_y < mi_ctx->b_height; mb_y++)
+ for (mb_x = 0; mb_x < mi_ctx->b_width; mb_x++)
+ search_mv(mi_ctx, mi_ctx->frames[2].blocks, mb_x, mb_y, dir);
+ }
+ }
+
+ } else if (mi_ctx->me_mode == ME_MODE_BILAT) {
+ Block *block;
+ int i, ret;
+
+ if (!mi_ctx->frames[0].avf)
+ return 0;
+
+ mi_ctx->me_ctx.linesize = mi_ctx->frames[0].avf->linesize[0];
+ mi_ctx->me_ctx.data_cur = mi_ctx->frames[1].avf->data[0];
+ mi_ctx->me_ctx.data_ref = mi_ctx->frames[2].avf->data[0];
+
+ bilateral_me(mi_ctx);
+
+ if (mi_ctx->mc_mode == MC_MODE_AOBMC) {
+
+ for (mb_y = 0; mb_y < mi_ctx->b_height; mb_y++)
+ for (mb_x = 0; mb_x < mi_ctx->b_width; mb_x++) {
+ int x_mb = mb_x << mi_ctx->log2_mb_size;
+ int y_mb = mb_y << mi_ctx->log2_mb_size;
+ block = &mi_ctx->int_blocks[mb_x + mb_y * mi_ctx->b_width];
+
+ block->sbad = get_sbad(&mi_ctx->me_ctx, x_mb, y_mb, x_mb + block->mvs[0][0], y_mb + block->mvs[0][1]);
+ }
+ }
+
+ if (mi_ctx->vsbmc) {
+
+ for (i = 0; i < NB_CLUSTERS; i++) {
+ mi_ctx->clusters[i].sum[0] = 0;
+ mi_ctx->clusters[i].sum[1] = 0;
+ mi_ctx->clusters[i].nb = 0;
+ }
+
+ for (mb_y = 0; mb_y < mi_ctx->b_height; mb_y++)
+ for (mb_x = 0; mb_x < mi_ctx->b_width; mb_x++) {
+ block = &mi_ctx->int_blocks[mb_x + mb_y * mi_ctx->b_width];
+
+ mi_ctx->clusters[0].sum[0] += block->mvs[0][0];
+ mi_ctx->clusters[0].sum[1] += block->mvs[0][1];
+ }
+
+ mi_ctx->clusters[0].nb = mi_ctx->b_count;
+
+ if (ret = cluster_mvs(mi_ctx))
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int detect_scene_change(MIContext *mi_ctx)
+{
+ AVMotionEstContext *me_ctx = &mi_ctx->me_ctx;
+ int x, y;
+ int linesize = me_ctx->linesize;
+ uint8_t *p1 = mi_ctx->frames[1].avf->data[0];
+ uint8_t *p2 = mi_ctx->frames[2].avf->data[0];
+
+ if (mi_ctx->scd_method == SCD_METHOD_FDIFF) {
+ double ret = 0, mafd, diff;
+ int64_t sad;
+
+ for (sad = y = 0; y < me_ctx->height; y += 8)
+ for (x = 0; x < linesize; x += 8)
+ sad += mi_ctx->sad(p1 + x + y * linesize, linesize, p2 + x + y * linesize, linesize);
+
+ emms_c();
+ mafd = (double) sad / (me_ctx->height * me_ctx->width * 3);
+ diff = fabs(mafd - mi_ctx->prev_mafd);
+ ret = av_clipf(FFMIN(mafd, diff), 0, 100.0);
+ mi_ctx->prev_mafd = mafd;
+
+ return ret >= mi_ctx->scd_threshold;
+ }
+
+ return 0;
+}
+
+#define ADD_PIXELS(b_weight, mv_x, mv_y)\
+ do {\
+ if (!b_weight || pixel->nb + 1 >= NB_PIXEL_MVS)\
+ continue;\
+ pixel->refs[pixel->nb] = 1;\
+ pixel->weights[pixel->nb] = b_weight * (ALPHA_MAX - alpha);\
+ pixel->mvs[pixel->nb][0] = av_clip((mv_x * alpha) / ALPHA_MAX, x_min, x_max);\
+ pixel->mvs[pixel->nb][1] = av_clip((mv_y * alpha) / ALPHA_MAX, y_min, y_max);\
+ pixel->nb++;\
+ pixel->refs[pixel->nb] = 2;\
+ pixel->weights[pixel->nb] = b_weight * alpha;\
+ pixel->mvs[pixel->nb][0] = av_clip(-mv_x * (ALPHA_MAX - alpha) / ALPHA_MAX, x_min, x_max);\
+ pixel->mvs[pixel->nb][1] = av_clip(-mv_y * (ALPHA_MAX - alpha) / ALPHA_MAX, y_min, y_max);\
+ pixel->nb++;\
+ } while(0)
+
+static void bidirectional_obmc(MIContext *mi_ctx, int alpha)
+{
+ int x, y;
+ int width = mi_ctx->frames[0].avf->width;
+ int height = mi_ctx->frames[0].avf->height;
+ int mb_y, mb_x, dir;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ mi_ctx->pixels[x + y * width].nb = 0;
+
+ for (dir = 0; dir < 2; dir++)
+ for (mb_y = 0; mb_y < mi_ctx->b_height; mb_y++)
+ for (mb_x = 0; mb_x < mi_ctx->b_width; mb_x++) {
+ int a = dir ? alpha : (ALPHA_MAX - alpha);
+ int mv_x = mi_ctx->frames[2 - dir].blocks[mb_x + mb_y * mi_ctx->b_width].mvs[dir][0];
+ int mv_y = mi_ctx->frames[2 - dir].blocks[mb_x + mb_y * mi_ctx->b_width].mvs[dir][1];
+ int start_x, start_y;
+ int startc_x, startc_y, endc_x, endc_y;
+
+ start_x = (mb_x << mi_ctx->log2_mb_size) - mi_ctx->mb_size / 2 + mv_x * a / ALPHA_MAX;
+ start_y = (mb_y << mi_ctx->log2_mb_size) - mi_ctx->mb_size / 2 + mv_y * a / ALPHA_MAX;
+
+ startc_x = av_clip(start_x, 0, width - 1);
+ startc_y = av_clip(start_y, 0, height - 1);
+ endc_x = av_clip(start_x + (2 << mi_ctx->log2_mb_size), 0, width - 1);
+ endc_y = av_clip(start_y + (2 << mi_ctx->log2_mb_size), 0, height - 1);
+
+ if (dir) {
+ mv_x = -mv_x;
+ mv_y = -mv_y;
+ }
+
+ for (y = startc_y; y < endc_y; y++) {
+ int y_min = -y;
+ int y_max = height - y - 1;
+ for (x = startc_x; x < endc_x; x++) {
+ int x_min = -x;
+ int x_max = width - x - 1;
+ int obmc_weight = obmc_tab_linear[4 - mi_ctx->log2_mb_size][(x - start_x) + ((y - start_y) << (mi_ctx->log2_mb_size + 1))];
+ Pixel *pixel = &mi_ctx->pixels[x + y * width];
+
+ ADD_PIXELS(obmc_weight, mv_x, mv_y);
+ }
+ }
+ }
+}
+
+static void set_frame_data(MIContext *mi_ctx, int alpha, AVFrame *avf_out)
+{
+ int x, y, plane;
+
+ for (plane = 0; plane < mi_ctx->nb_planes; plane++) {
+ int width = avf_out->width;
+ int height = avf_out->height;
+ int chroma = plane == 1 || plane == 2;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++) {
+ int x_mv, y_mv;
+ int weight_sum = 0;
+ int i, val = 0;
+ Pixel *pixel = &mi_ctx->pixels[x + y * avf_out->width];
+
+ for (i = 0; i < pixel->nb; i++)
+ weight_sum += pixel->weights[i];
+
+ if (!weight_sum || !pixel->nb) {
+ pixel->weights[0] = ALPHA_MAX - alpha;
+ pixel->refs[0] = 1;
+ pixel->mvs[0][0] = 0;
+ pixel->mvs[0][1] = 0;
+ pixel->weights[1] = alpha;
+ pixel->refs[1] = 2;
+ pixel->mvs[1][0] = 0;
+ pixel->mvs[1][1] = 0;
+ pixel->nb = 2;
+
+ weight_sum = ALPHA_MAX;
+ }
+
+ for (i = 0; i < pixel->nb; i++) {
+ Frame *frame = &mi_ctx->frames[pixel->refs[i]];
+ if (chroma) {
+ x_mv = (x >> mi_ctx->log2_chroma_w) + pixel->mvs[i][0] / (1 << mi_ctx->log2_chroma_w);
+ y_mv = (y >> mi_ctx->log2_chroma_h) + pixel->mvs[i][1] / (1 << mi_ctx->log2_chroma_h);
+ } else {
+ x_mv = x + pixel->mvs[i][0];
+ y_mv = y + pixel->mvs[i][1];
+ }
+
+ val += pixel->weights[i] * frame->avf->data[plane][x_mv + y_mv * frame->avf->linesize[plane]];
+ }
+
+ val = ROUNDED_DIV(val, weight_sum);
+
+ if (chroma)
+ avf_out->data[plane][(x >> mi_ctx->log2_chroma_w) + (y >> mi_ctx->log2_chroma_h) * avf_out->linesize[plane]] = val;
+ else
+ avf_out->data[plane][x + y * avf_out->linesize[plane]] = val;
+ }
+ }
+}
+
+static void var_size_bmc(MIContext *mi_ctx, Block *block, int x_mb, int y_mb, int n, int alpha)
+{
+ int sb_x, sb_y;
+ int width = mi_ctx->frames[0].avf->width;
+ int height = mi_ctx->frames[0].avf->height;
+
+ for (sb_y = 0; sb_y < 2; sb_y++)
+ for (sb_x = 0; sb_x < 2; sb_x++) {
+ Block *sb = &block->subs[sb_x + sb_y * 2];
+
+ if (sb->sb)
+ var_size_bmc(mi_ctx, sb, x_mb + (sb_x << (n - 1)), y_mb + (sb_y << (n - 1)), n - 1, alpha);
+ else {
+ int x, y;
+ int mv_x = sb->mvs[0][0] * 2;
+ int mv_y = sb->mvs[0][1] * 2;
+
+ int start_x = x_mb + (sb_x << (n - 1));
+ int start_y = y_mb + (sb_y << (n - 1));
+ int end_x = start_x + (1 << (n - 1));
+ int end_y = start_y + (1 << (n - 1));
+
+ for (y = start_y; y < end_y; y++) {
+ int y_min = -y;
+ int y_max = height - y - 1;
+ for (x = start_x; x < end_x; x++) {
+ int x_min = -x;
+ int x_max = width - x - 1;
+ Pixel *pixel = &mi_ctx->pixels[x + y * width];
+
+ ADD_PIXELS(PX_WEIGHT_MAX, mv_x, mv_y);
+ }
+ }
+ }
+ }
+}
+
+static void bilateral_obmc(MIContext *mi_ctx, Block *block, int mb_x, int mb_y, int alpha)
+{
+ int x, y;
+ int width = mi_ctx->frames[0].avf->width;
+ int height = mi_ctx->frames[0].avf->height;
+
+ Block *nb;
+ int nb_x, nb_y;
+ uint64_t sbads[9];
+
+ int mv_x = block->mvs[0][0] * 2;
+ int mv_y = block->mvs[0][1] * 2;
+ int start_x, start_y;
+ int startc_x, startc_y, endc_x, endc_y;
+
+ if (mi_ctx->mc_mode == MC_MODE_AOBMC)
+ for (nb_y = FFMAX(0, mb_y - 1); nb_y < FFMIN(mb_y + 2, mi_ctx->b_height); nb_y++)
+ for (nb_x = FFMAX(0, mb_x - 1); nb_x < FFMIN(mb_x + 2, mi_ctx->b_width); nb_x++) {
+ int x_nb = nb_x << mi_ctx->log2_mb_size;
+ int y_nb = nb_y << mi_ctx->log2_mb_size;
+
+ if (nb_x - mb_x || nb_y - mb_y)
+ sbads[nb_x - mb_x + 1 + (nb_y - mb_y + 1) * 3] = get_sbad(&mi_ctx->me_ctx, x_nb, y_nb, x_nb + block->mvs[0][0], y_nb + block->mvs[0][1]);
+ }
+
+ start_x = (mb_x << mi_ctx->log2_mb_size) - mi_ctx->mb_size / 2;
+ start_y = (mb_y << mi_ctx->log2_mb_size) - mi_ctx->mb_size / 2;
+
+ startc_x = av_clip(start_x, 0, width - 1);
+ startc_y = av_clip(start_y, 0, height - 1);
+ endc_x = av_clip(start_x + (2 << mi_ctx->log2_mb_size), 0, width - 1);
+ endc_y = av_clip(start_y + (2 << mi_ctx->log2_mb_size), 0, height - 1);
+
+ for (y = startc_y; y < endc_y; y++) {
+ int y_min = -y;
+ int y_max = height - y - 1;
+ for (x = startc_x; x < endc_x; x++) {
+ int x_min = -x;
+ int x_max = width - x - 1;
+ int obmc_weight = obmc_tab_linear[4 - mi_ctx->log2_mb_size][(x - start_x) + ((y - start_y) << (mi_ctx->log2_mb_size + 1))];
+ Pixel *pixel = &mi_ctx->pixels[x + y * width];
+
+ if (mi_ctx->mc_mode == MC_MODE_AOBMC) {
+ nb_x = (((x - start_x) >> (mi_ctx->log2_mb_size - 1)) * 2 - 3) / 2;
+ nb_y = (((y - start_y) >> (mi_ctx->log2_mb_size - 1)) * 2 - 3) / 2;
+
+ if (nb_x || nb_y) {
+ uint64_t sbad = sbads[nb_x + 1 + (nb_y + 1) * 3];
+ nb = &mi_ctx->int_blocks[mb_x + nb_x + (mb_y + nb_y) * mi_ctx->b_width];
+
+ if (sbad && sbad != UINT64_MAX && nb->sbad != UINT64_MAX) {
+ int phi = av_clip(ALPHA_MAX * nb->sbad / sbad, 0, ALPHA_MAX);
+ obmc_weight = obmc_weight * phi / ALPHA_MAX;
+ }
+ }
+ }
+
+ ADD_PIXELS(obmc_weight, mv_x, mv_y);
+ }
+ }
+}
+
+static void interpolate(AVFilterLink *inlink, AVFrame *avf_out)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ MIContext *mi_ctx = ctx->priv;
+ int x, y;
+ int plane, alpha;
+ int64_t pts;
+
+ pts = av_rescale(avf_out->pts, (int64_t) ALPHA_MAX * outlink->time_base.num * inlink->time_base.den,
+ (int64_t) outlink->time_base.den * inlink->time_base.num);
+
+ alpha = (pts - mi_ctx->frames[1].avf->pts * ALPHA_MAX) / (mi_ctx->frames[2].avf->pts - mi_ctx->frames[1].avf->pts);
+ alpha = av_clip(alpha, 0, ALPHA_MAX);
+
+ if (alpha == 0 || alpha == ALPHA_MAX) {
+ av_frame_copy(avf_out, alpha ? mi_ctx->frames[2].avf : mi_ctx->frames[1].avf);
+ return;
+ }
+
+ if (mi_ctx->scene_changed) {
+ /* duplicate frame */
+ av_frame_copy(avf_out, alpha > ALPHA_MAX / 2 ? mi_ctx->frames[2].avf : mi_ctx->frames[1].avf);
+ return;
+ }
+
+ switch(mi_ctx->mi_mode) {
+ case MI_MODE_DUP:
+ av_frame_copy(avf_out, alpha > ALPHA_MAX / 2 ? mi_ctx->frames[2].avf : mi_ctx->frames[1].avf);
+
+ break;
+ case MI_MODE_BLEND:
+ for (plane = 0; plane < mi_ctx->nb_planes; plane++) {
+ int width = avf_out->width;
+ int height = avf_out->height;
+
+ if (plane == 1 || plane == 2) {
+ width = AV_CEIL_RSHIFT(width, mi_ctx->log2_chroma_w);
+ height = AV_CEIL_RSHIFT(height, mi_ctx->log2_chroma_h);
+ }
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ avf_out->data[plane][x + y * avf_out->linesize[plane]] =
+ alpha * mi_ctx->frames[2].avf->data[plane][x + y * mi_ctx->frames[2].avf->linesize[plane]] +
+ ((ALPHA_MAX - alpha) * mi_ctx->frames[1].avf->data[plane][x + y * mi_ctx->frames[1].avf->linesize[plane]] + 512) >> 10;
+ }
+ }
+ }
+
+ break;
+ case MI_MODE_MCI:
+ if (mi_ctx->me_mode == ME_MODE_BIDIR) {
+ bidirectional_obmc(mi_ctx, alpha);
+ set_frame_data(mi_ctx, alpha, avf_out);
+
+ } else if (mi_ctx->me_mode == ME_MODE_BILAT) {
+ int mb_x, mb_y;
+ Block *block;
+
+ for (y = 0; y < mi_ctx->frames[0].avf->height; y++)
+ for (x = 0; x < mi_ctx->frames[0].avf->width; x++)
+ mi_ctx->pixels[x + y * mi_ctx->frames[0].avf->width].nb = 0;
+
+ for (mb_y = 0; mb_y < mi_ctx->b_height; mb_y++)
+ for (mb_x = 0; mb_x < mi_ctx->b_width; mb_x++) {
+ block = &mi_ctx->int_blocks[mb_x + mb_y * mi_ctx->b_width];
+
+ if (block->sb)
+ var_size_bmc(mi_ctx, block, mb_x << mi_ctx->log2_mb_size, mb_y << mi_ctx->log2_mb_size, mi_ctx->log2_mb_size, alpha);
+
+ bilateral_obmc(mi_ctx, block, mb_x, mb_y, alpha);
+
+ }
+
+ set_frame_data(mi_ctx, alpha, avf_out);
+ }
+
+ break;
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *avf_in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ MIContext *mi_ctx = ctx->priv;
+ int ret;
+
+ if (avf_in->pts == AV_NOPTS_VALUE) {
+ ret = ff_filter_frame(ctx->outputs[0], avf_in);
+ return ret;
+ }
+
+ if (!mi_ctx->frames[NB_FRAMES - 1].avf || avf_in->pts < mi_ctx->frames[NB_FRAMES - 1].avf->pts) {
+ av_log(ctx, AV_LOG_VERBOSE, "Initializing out pts from input pts %"PRId64"\n", avf_in->pts);
+ mi_ctx->out_pts = av_rescale_q(avf_in->pts, inlink->time_base, outlink->time_base);
+ }
+
+ if (!mi_ctx->frames[NB_FRAMES - 1].avf)
+ if (ret = inject_frame(inlink, av_frame_clone(avf_in)))
+ return ret;
+
+ if (ret = inject_frame(inlink, avf_in))
+ return ret;
+
+ if (!mi_ctx->frames[0].avf)
+ return 0;
+
+ mi_ctx->scene_changed = detect_scene_change(mi_ctx);
+
+ for (;;) {
+ AVFrame *avf_out;
+
+ if (av_compare_ts(mi_ctx->out_pts, outlink->time_base, mi_ctx->frames[2].avf->pts, inlink->time_base) > 0)
+ break;
+
+ if (!(avf_out = ff_get_video_buffer(ctx->outputs[0], inlink->w, inlink->h)))
+ return AVERROR(ENOMEM);
+
+ av_frame_copy_props(avf_out, mi_ctx->frames[NB_FRAMES - 1].avf);
+ avf_out->pts = mi_ctx->out_pts++;
+
+ interpolate(inlink, avf_out);
+
+ if ((ret = ff_filter_frame(ctx->outputs[0], avf_out)) < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static av_cold void free_blocks(Block *block, int sb)
+{
+ if (block->subs)
+ free_blocks(block->subs, 1);
+ if (sb)
+ av_freep(&block);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ MIContext *mi_ctx = ctx->priv;
+ int i, m;
+
+ av_freep(&mi_ctx->pixels);
+ if (mi_ctx->int_blocks)
+ for (m = 0; m < mi_ctx->b_count; m++)
+ free_blocks(&mi_ctx->int_blocks[m], 0);
+ av_freep(&mi_ctx->int_blocks);
+
+ for (i = 0; i < NB_FRAMES; i++) {
+ Frame *frame = &mi_ctx->frames[i];
+ av_freep(&frame->blocks);
+ av_frame_free(&frame->avf);
+ }
+
+ for (i = 0; i < 3; i++)
+ av_freep(&mi_ctx->mv_table[i]);
+}
+
+static const AVFilterPad minterpolate_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad minterpolate_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_minterpolate = {
+ .name = "minterpolate",
+ .description = NULL_IF_CONFIG_SMALL("Frame rate conversion using Motion Interpolation."),
+ .priv_size = sizeof(MIContext),
+ .priv_class = &minterpolate_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = minterpolate_inputs,
+ .outputs = minterpolate_outputs,
+};
diff --git a/libavfilter/vf_mpdecimate.c b/libavfilter/vf_mpdecimate.c
new file mode 100644
index 0000000000..dc345114cd
--- /dev/null
+++ b/libavfilter/vf_mpdecimate.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2003 Rich Felker
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file mpdecimate filter, ported from libmpcodecs/vf_decimate.c by
+ * Rich Felker.
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/pixelutils.h"
+#include "libavutil/timestamp.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "formats.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ int lo, hi; ///< lower and higher threshold number of differences
+ ///< values for 8x8 blocks
+
+ float frac; ///< threshold of changed pixels over the total fraction
+
+ int max_drop_count; ///< if positive: maximum number of sequential frames to drop
+ ///< if negative: minimum number of frames between two drops
+
+ int drop_count; ///< if positive: number of frames sequentially dropped
+ ///< if negative: number of sequential frames which were not dropped
+
+ int hsub, vsub; ///< chroma subsampling values
+ AVFrame *ref; ///< reference picture
+ av_pixelutils_sad_fn sad; ///< sum of absolute difference function
+} DecimateContext;
+
+#define OFFSET(x) offsetof(DecimateContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption mpdecimate_options[] = {
+ { "max", "set the maximum number of consecutive dropped frames (positive), or the minimum interval between dropped frames (negative)",
+ OFFSET(max_drop_count), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX, FLAGS },
+ { "hi", "set high dropping threshold", OFFSET(hi), AV_OPT_TYPE_INT, {.i64=64*12}, INT_MIN, INT_MAX, FLAGS },
+ { "lo", "set low dropping threshold", OFFSET(lo), AV_OPT_TYPE_INT, {.i64=64*5}, INT_MIN, INT_MAX, FLAGS },
+ { "frac", "set fraction dropping threshold", OFFSET(frac), AV_OPT_TYPE_FLOAT, {.dbl=0.33}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(mpdecimate);
+
+/**
+ * Return 1 if the two planes are different, 0 otherwise.
+ */
+static int diff_planes(AVFilterContext *ctx,
+ uint8_t *cur, int cur_linesize,
+ uint8_t *ref, int ref_linesize,
+ int w, int h)
+{
+ DecimateContext *decimate = ctx->priv;
+
+ int x, y;
+ int d, c = 0;
+ int t = (w/16)*(h/16)*decimate->frac;
+
+ /* compute difference for blocks of 8x8 bytes */
+ for (y = 0; y < h-7; y += 4) {
+ for (x = 8; x < w-7; x += 4) {
+ d = decimate->sad(cur + y*cur_linesize + x, cur_linesize,
+ ref + y*ref_linesize + x, ref_linesize);
+ if (d > decimate->hi) {
+ av_log(ctx, AV_LOG_DEBUG, "%d>=hi ", d);
+ return 1;
+ }
+ if (d > decimate->lo) {
+ c++;
+ if (c > t) {
+ av_log(ctx, AV_LOG_DEBUG, "lo:%d>=%d ", c, t);
+ return 1;
+ }
+ }
+ }
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "lo:%d<%d ", c, t);
+ return 0;
+}
+
+/**
+ * Tell if the frame should be decimated, for example if it is no much
+ * different with respect to the reference frame ref.
+ */
+static int decimate_frame(AVFilterContext *ctx,
+ AVFrame *cur, AVFrame *ref)
+{
+ DecimateContext *decimate = ctx->priv;
+ int plane;
+
+ if (decimate->max_drop_count > 0 &&
+ decimate->drop_count >= decimate->max_drop_count)
+ return 0;
+ if (decimate->max_drop_count < 0 &&
+ (decimate->drop_count-1) > decimate->max_drop_count)
+ return 0;
+
+ for (plane = 0; ref->data[plane] && ref->linesize[plane]; plane++) {
+ /* use 8x8 SAD even on subsampled planes. The blocks won't match up with
+ * luma blocks, but hopefully nobody is depending on this to catch
+ * localized chroma changes that wouldn't exceed the thresholds when
+ * diluted by using what's effectively a larger block size.
+ */
+ int vsub = plane == 1 || plane == 2 ? decimate->vsub : 0;
+ int hsub = plane == 1 || plane == 2 ? decimate->hsub : 0;
+ if (diff_planes(ctx,
+ cur->data[plane], cur->linesize[plane],
+ ref->data[plane], ref->linesize[plane],
+ AV_CEIL_RSHIFT(ref->width, hsub),
+ AV_CEIL_RSHIFT(ref->height, vsub))) {
+ emms_c();
+ return 0;
+ }
+ }
+
+ emms_c();
+ return 1;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ DecimateContext *decimate = ctx->priv;
+
+ decimate->sad = av_pixelutils_get_sad_fn(3, 3, 0, ctx); // 8x8, not aligned on blocksize
+ if (!decimate->sad)
+ return AVERROR(EINVAL);
+
+ av_log(ctx, AV_LOG_VERBOSE, "max_drop_count:%d hi:%d lo:%d frac:%f\n",
+ decimate->max_drop_count, decimate->hi, decimate->lo, decimate->frac);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ DecimateContext *decimate = ctx->priv;
+ av_frame_free(&decimate->ref);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVA420P,
+
+ AV_PIX_FMT_GBRP,
+
+ AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_YUVA422P,
+
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ DecimateContext *decimate = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+ decimate->hsub = pix_desc->log2_chroma_w;
+ decimate->vsub = pix_desc->log2_chroma_h;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *cur)
+{
+ DecimateContext *decimate = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ int ret;
+
+ if (decimate->ref && decimate_frame(inlink->dst, cur, decimate->ref)) {
+ decimate->drop_count = FFMAX(1, decimate->drop_count+1);
+ } else {
+ av_frame_free(&decimate->ref);
+ decimate->ref = cur;
+ decimate->drop_count = FFMIN(-1, decimate->drop_count-1);
+
+ if ((ret = ff_filter_frame(outlink, av_frame_clone(cur))) < 0)
+ return ret;
+ }
+
+ av_log(inlink->dst, AV_LOG_DEBUG,
+ "%s pts:%s pts_time:%s drop_count:%d\n",
+ decimate->drop_count > 0 ? "drop" : "keep",
+ av_ts2str(cur->pts), av_ts2timestr(cur->pts, &inlink->time_base),
+ decimate->drop_count);
+
+ if (decimate->drop_count > 0)
+ av_frame_free(&cur);
+
+ return 0;
+}
+
+static const AVFilterPad mpdecimate_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad mpdecimate_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_mpdecimate = {
+ .name = "mpdecimate",
+ .description = NULL_IF_CONFIG_SMALL("Remove near-duplicate frames."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(DecimateContext),
+ .priv_class = &mpdecimate_class,
+ .query_formats = query_formats,
+ .inputs = mpdecimate_inputs,
+ .outputs = mpdecimate_outputs,
+};
diff --git a/libavfilter/vf_neighbor.c b/libavfilter/vf_neighbor.c
new file mode 100644
index 0000000000..de4a12f048
--- /dev/null
+++ b/libavfilter/vf_neighbor.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2012-2013 Oka Motofumi (chikuzen.mo at gmail dot com)
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct NContext {
+ const AVClass *class;
+ int planeheight[4];
+ int planewidth[4];
+ int nb_planes;
+ int threshold[4];
+ int coordinates;
+ uint8_t *buffer;
+
+ void (*filter)(uint8_t *dst, const uint8_t *p1, int width,
+ int threshold, const uint8_t *coordinates[], int coord);
+} NContext;
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
+ };
+
+ return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ NContext *s = ctx->priv;
+
+ av_freep(&s->buffer);
+}
+
+static inline void line_copy8(uint8_t *line, const uint8_t *srcp, int width, int mergin)
+{
+ int i;
+
+ memcpy(line, srcp, width);
+
+ for (i = mergin; i > 0; i--) {
+ line[-i] = line[i];
+ line[width - 1 + i] = line[width - 1 - i];
+ }
+}
+
+static void erosion(uint8_t *dst, const uint8_t *p1, int width,
+ int threshold, const uint8_t *coordinates[], int coord)
+{
+ int x, i;
+
+ for (x = 0; x < width; x++) {
+ int min = p1[x];
+ int limit = FFMAX(min - threshold, 0);
+
+ for (i = 0; i < 8; i++) {
+ if (coord & (1 << i)) {
+ min = FFMIN(min, *(coordinates[i] + x));
+ }
+ min = FFMAX(min, limit);
+ }
+
+ dst[x] = min;
+ }
+}
+
+static void dilation(uint8_t *dst, const uint8_t *p1, int width,
+ int threshold, const uint8_t *coordinates[], int coord)
+{
+ int x, i;
+
+ for (x = 0; x < width; x++) {
+ int max = p1[x];
+ int limit = FFMIN(max + threshold, 255);
+
+ for (i = 0; i < 8; i++) {
+ if (coord & (1 << i)) {
+ max = FFMAX(max, *(coordinates[i] + x));
+ }
+ max = FFMIN(max, limit);
+ }
+
+ dst[x] = max;
+ }
+}
+
+static void deflate(uint8_t *dst, const uint8_t *p1, int width,
+ int threshold, const uint8_t *coordinates[], int coord)
+{
+ int x, i;
+
+ for (x = 0; x < width; x++) {
+ int sum = 0;
+ int limit = FFMAX(p1[x] - threshold, 0);
+
+ for (i = 0; i < 8; sum += *(coordinates[i++] + x));
+
+ dst[x] = FFMAX(FFMIN(sum / 8, p1[x]), limit);
+ }
+}
+
+static void inflate(uint8_t *dst, const uint8_t *p1, int width,
+ int threshold, const uint8_t *coordinates[], int coord)
+{
+ int x, i;
+
+ for (x = 0; x < width; x++) {
+ int sum = 0;
+ int limit = FFMIN(p1[x] + threshold, 255);
+
+ for (i = 0; i < 8; sum += *(coordinates[i++] + x));
+
+ dst[x] = FFMIN(FFMAX(sum / 8, p1[x]), limit);
+ }
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ NContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+
+ if ((ret = av_image_fill_linesizes(s->planewidth, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+ s->buffer = av_malloc(3 * (s->planewidth[0] + 32));
+ if (!s->buffer)
+ return AVERROR(ENOMEM);
+
+ if (!strcmp(ctx->filter->name, "erosion"))
+ s->filter = erosion;
+ else if (!strcmp(ctx->filter->name, "dilation"))
+ s->filter = dilation;
+ else if (!strcmp(ctx->filter->name, "deflate"))
+ s->filter = deflate;
+ else if (!strcmp(ctx->filter->name, "inflate"))
+ s->filter = inflate;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ NContext *s = ctx->priv;
+ AVFrame *out;
+ int plane, y;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ const int threshold = s->threshold[plane];
+
+ if (threshold) {
+ const uint8_t *src = in->data[plane];
+ uint8_t *dst = out->data[plane];
+ int stride = in->linesize[plane];
+ int height = s->planeheight[plane];
+ int width = s->planewidth[plane];
+ uint8_t *p0 = s->buffer + 16;
+ uint8_t *p1 = p0 + s->planewidth[0];
+ uint8_t *p2 = p1 + s->planewidth[0];
+ uint8_t *orig = p0, *end = p2;
+
+ line_copy8(p0, src + stride, width, 1);
+ line_copy8(p1, src, width, 1);
+
+ for (y = 0; y < height; y++) {
+ const uint8_t *coordinates[] = { p0 - 1, p0, p0 + 1,
+ p1 - 1, p1 + 1,
+ p2 - 1, p2, p2 + 1};
+ src += stride * (y < height - 1 ? 1 : -1);
+ line_copy8(p2, src, width, 1);
+
+ s->filter(dst, p1, width, threshold, coordinates, s->coordinates);
+
+ p0 = p1;
+ p1 = p2;
+ p2 = (p2 == end) ? orig: p2 + s->planewidth[0];
+ dst += out->linesize[plane];
+ }
+ } else {
+ av_image_copy_plane(out->data[plane], out->linesize[plane],
+ in->data[plane], in->linesize[plane],
+ s->planewidth[plane], s->planeheight[plane]);
+ }
+ }
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad neighbor_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad neighbor_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+#define OFFSET(x) offsetof(NContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+#define DEFINE_NEIGHBOR_FILTER(name_, description_) \
+AVFILTER_DEFINE_CLASS(name_); \
+ \
+AVFilter ff_vf_##name_ = { \
+ .name = #name_, \
+ .description = NULL_IF_CONFIG_SMALL(description_), \
+ .priv_size = sizeof(NContext), \
+ .priv_class = &name_##_class, \
+ .uninit = uninit, \
+ .query_formats = query_formats, \
+ .inputs = neighbor_inputs, \
+ .outputs = neighbor_outputs, \
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, \
+}
+
+#if CONFIG_EROSION_FILTER
+
+static const AVOption erosion_options[] = {
+ { "threshold0", "set threshold for 1st plane", OFFSET(threshold[0]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
+ { "threshold1", "set threshold for 2nd plane", OFFSET(threshold[1]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
+ { "threshold2", "set threshold for 3rd plane", OFFSET(threshold[2]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
+ { "threshold3", "set threshold for 4th plane", OFFSET(threshold[3]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
+ { "coordinates", "set coordinates", OFFSET(coordinates), AV_OPT_TYPE_INT, {.i64=255}, 0, 255, FLAGS },
+ { NULL }
+};
+
+DEFINE_NEIGHBOR_FILTER(erosion, "Apply erosion effect.");
+
+#endif /* CONFIG_EROSION_FILTER */
+
+#if CONFIG_DILATION_FILTER
+
+static const AVOption dilation_options[] = {
+ { "threshold0", "set threshold for 1st plane", OFFSET(threshold[0]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
+ { "threshold1", "set threshold for 2nd plane", OFFSET(threshold[1]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
+ { "threshold2", "set threshold for 3rd plane", OFFSET(threshold[2]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
+ { "threshold3", "set threshold for 4th plane", OFFSET(threshold[3]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
+ { "coordinates", "set coordinates", OFFSET(coordinates), AV_OPT_TYPE_INT, {.i64=255}, 0, 255, FLAGS },
+ { NULL }
+};
+
+DEFINE_NEIGHBOR_FILTER(dilation, "Apply dilation effect.");
+
+#endif /* CONFIG_DILATION_FILTER */
+
+#if CONFIG_DEFLATE_FILTER
+
+static const AVOption deflate_options[] = {
+ { "threshold0", "set threshold for 1st plane", OFFSET(threshold[0]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
+ { "threshold1", "set threshold for 2nd plane", OFFSET(threshold[1]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
+ { "threshold2", "set threshold for 3rd plane", OFFSET(threshold[2]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
+ { "threshold3", "set threshold for 4th plane", OFFSET(threshold[3]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
+ { NULL }
+};
+
+DEFINE_NEIGHBOR_FILTER(deflate, "Apply deflate effect.");
+
+#endif /* CONFIG_DEFLATE_FILTER */
+
+#if CONFIG_INFLATE_FILTER
+
+static const AVOption inflate_options[] = {
+ { "threshold0", "set threshold for 1st plane", OFFSET(threshold[0]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
+ { "threshold1", "set threshold for 2nd plane", OFFSET(threshold[1]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
+ { "threshold2", "set threshold for 3rd plane", OFFSET(threshold[2]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
+ { "threshold3", "set threshold for 4th plane", OFFSET(threshold[3]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
+ { NULL }
+};
+
+DEFINE_NEIGHBOR_FILTER(inflate, "Apply inflate effect.");
+
+#endif /* CONFIG_INFLATE_FILTER */
diff --git a/libavfilter/vf_nlmeans.c b/libavfilter/vf_nlmeans.c
new file mode 100644
index 0000000000..2487813504
--- /dev/null
+++ b/libavfilter/vf_nlmeans.c
@@ -0,0 +1,551 @@
+/*
+ * Copyright (c) 2016 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @todo
+ * - SIMD for compute_safe_ssd_integral_image
+ * - SIMD for final weighted averaging
+ * - better automatic defaults? see "Parameters" @ http://www.ipol.im/pub/art/2011/bcm_nlm/
+ * - temporal support (probably doesn't need any displacement according to
+ * "Denoising image sequences does not require motion estimation")
+ * - Bayer pixel format support for at least raw photos? (DNG support would be
+ * handy here)
+ * - FATE test (probably needs visual threshold test mechanism due to the use
+ * of floats)
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+struct weighted_avg {
+ double total_weight;
+ double sum;
+};
+
+#define WEIGHT_LUT_NBITS 9
+#define WEIGHT_LUT_SIZE (1<<WEIGHT_LUT_NBITS)
+
+typedef struct {
+ const AVClass *class;
+ int nb_planes;
+ int chroma_w, chroma_h;
+ double pdiff_scale; // invert of the filtering parameter (sigma*10) squared
+ double sigma; // denoising strength
+ int patch_size, patch_hsize; // patch size and half size
+ int patch_size_uv, patch_hsize_uv; // patch size and half size for chroma planes
+ int research_size, research_hsize; // research size and half size
+ int research_size_uv, research_hsize_uv; // research size and half size for chroma planes
+ uint32_t *ii_orig; // integral image
+ uint32_t *ii; // integral image starting after the 0-line and 0-column
+ int ii_w, ii_h; // width and height of the integral image
+ int ii_lz_32; // linesize in 32-bit units of the integral image
+ struct weighted_avg *wa; // weighted average of every pixel
+ int wa_linesize; // linesize for wa in struct size unit
+ double weight_lut[WEIGHT_LUT_SIZE]; // lookup table mapping (scaled) patch differences to their associated weights
+ double pdiff_lut_scale; // scale factor for patch differences before looking into the LUT
+ int max_meaningful_diff; // maximum difference considered (if the patch difference is too high we ignore the pixel)
+} NLMeansContext;
+
+#define OFFSET(x) offsetof(NLMeansContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption nlmeans_options[] = {
+ { "s", "denoising strength", OFFSET(sigma), AV_OPT_TYPE_DOUBLE, { .dbl = 1.0 }, 1.0, 30.0, FLAGS },
+ { "p", "patch size", OFFSET(patch_size), AV_OPT_TYPE_INT, { .i64 = 3*2+1 }, 0, 99, FLAGS },
+ { "pc", "patch size for chroma planes", OFFSET(patch_size_uv), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 99, FLAGS },
+ { "r", "research window", OFFSET(research_size), AV_OPT_TYPE_INT, { .i64 = 7*2+1 }, 0, 99, FLAGS },
+ { "rc", "research window for chroma planes", OFFSET(research_size_uv), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 99, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(nlmeans);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+/*
+ * M is a discrete map where every entry contains the sum of all the entries
+ * in the rectangle from the top-left origin of M to its coordinate. In the
+ * following schema, "i" contains the sum of the whole map:
+ *
+ * M = +----------+-----------------+----+
+ * | | | |
+ * | | | |
+ * | a| b| c|
+ * +----------+-----------------+----+
+ * | | | |
+ * | | | |
+ * | | X | |
+ * | | | |
+ * | d| e| f|
+ * +----------+-----------------+----+
+ * | | | |
+ * | g| h| i|
+ * +----------+-----------------+----+
+ *
+ * The sum of the X box can be calculated with:
+ * X = e-d-b+a
+ *
+ * See https://en.wikipedia.org/wiki/Summed_area_table
+ *
+ * The compute*_ssd functions compute the integral image M where every entry
+ * contains the sum of the squared difference of every corresponding pixels of
+ * two input planes of the same size as M.
+ */
+static inline int get_integral_patch_value(const uint32_t *ii, int ii_lz_32, int x, int y, int p)
+{
+ const int e = ii[(y + p ) * ii_lz_32 + (x + p )];
+ const int d = ii[(y + p ) * ii_lz_32 + (x - p - 1)];
+ const int b = ii[(y - p - 1) * ii_lz_32 + (x + p )];
+ const int a = ii[(y - p - 1) * ii_lz_32 + (x - p - 1)];
+ return e - d - b + a;
+}
+
+/**
+ * Compute squared difference of the safe area (the zone where s1 and s2
+ * overlap). It is likely the largest integral zone, so it is interesting to do
+ * as little checks as possible; contrary to the unsafe version of this
+ * function, we do not need any clipping here.
+ *
+ * The line above dst and the column to its left are always readable.
+ *
+ * This C version computes the SSD integral image using a scalar accumulator,
+ * while for SIMD implementation it is likely more interesting to use the
+ * two-loops algorithm variant.
+ */
+static void compute_safe_ssd_integral_image_c(uint32_t *dst, int dst_linesize_32,
+ const uint8_t *s1, int linesize1,
+ const uint8_t *s2, int linesize2,
+ int w, int h)
+{
+ int x, y;
+
+ for (y = 0; y < h; y++) {
+ uint32_t acc = dst[-1] - dst[-dst_linesize_32 - 1];
+
+ for (x = 0; x < w; x++) {
+ const int d = s1[x] - s2[x];
+ acc += d * d;
+ dst[x] = dst[-dst_linesize_32 + x] + acc;
+ }
+ s1 += linesize1;
+ s2 += linesize2;
+ dst += dst_linesize_32;
+ }
+}
+
+/**
+ * Compute squared difference of an unsafe area (the zone nor s1 nor s2 could
+ * be readable).
+ *
+ * On the other hand, the line above dst and the column to its left are always
+ * readable.
+ *
+ * There is little point in having this function SIMDified as it is likely too
+ * complex and only handle small portions of the image.
+ *
+ * @param dst integral image
+ * @param dst_linesize_32 integral image linesize (in 32-bit integers unit)
+ * @param startx integral starting x position
+ * @param starty integral starting y position
+ * @param src source plane buffer
+ * @param linesize source plane linesize
+ * @param offx source offsetting in x
+ * @param offy source offsetting in y
+ * @paran r absolute maximum source offsetting
+ * @param sw source width
+ * @param sh source height
+ * @param w width to compute
+ * @param h height to compute
+ */
+static inline void compute_unsafe_ssd_integral_image(uint32_t *dst, int dst_linesize_32,
+ int startx, int starty,
+ const uint8_t *src, int linesize,
+ int offx, int offy, int r, int sw, int sh,
+ int w, int h)
+{
+ int x, y;
+
+ for (y = starty; y < starty + h; y++) {
+ uint32_t acc = dst[y*dst_linesize_32 + startx - 1] - dst[(y-1)*dst_linesize_32 + startx - 1];
+ const int s1y = av_clip(y - r, 0, sh - 1);
+ const int s2y = av_clip(y - (r + offy), 0, sh - 1);
+
+ for (x = startx; x < startx + w; x++) {
+ const int s1x = av_clip(x - r, 0, sw - 1);
+ const int s2x = av_clip(x - (r + offx), 0, sw - 1);
+ const uint8_t v1 = src[s1y*linesize + s1x];
+ const uint8_t v2 = src[s2y*linesize + s2x];
+ const int d = v1 - v2;
+ acc += d * d;
+ dst[y*dst_linesize_32 + x] = dst[(y-1)*dst_linesize_32 + x] + acc;
+ }
+ }
+}
+
+/*
+ * Compute the sum of squared difference integral image
+ * http://www.ipol.im/pub/art/2014/57/
+ * Integral Images for Block Matching - Gabriele Facciolo, Nicolas Limare, Enric Meinhardt-Llopis
+ *
+ * @param ii integral image of dimension (w+e*2) x (h+e*2) with
+ * an additional zeroed top line and column already
+ * "applied" to the pointer value
+ * @param ii_linesize_32 integral image linesize (in 32-bit integers unit)
+ * @param src source plane buffer
+ * @param linesize source plane linesize
+ * @param offx x-offsetting ranging in [-e;e]
+ * @param offy y-offsetting ranging in [-e;e]
+ * @param w source width
+ * @param h source height
+ * @param e research padding edge
+ */
+static void compute_ssd_integral_image(uint32_t *ii, int ii_linesize_32,
+ const uint8_t *src, int linesize, int offx, int offy,
+ int e, int w, int h)
+{
+ // ii has a surrounding padding of thickness "e"
+ const int ii_w = w + e*2;
+ const int ii_h = h + e*2;
+
+ // we center the first source
+ const int s1x = e;
+ const int s1y = e;
+
+ // 2nd source is the frame with offsetting
+ const int s2x = e + offx;
+ const int s2y = e + offy;
+
+ // get the dimension of the overlapping rectangle where it is always safe
+ // to compare the 2 sources pixels
+ const int startx_safe = FFMAX(s1x, s2x);
+ const int starty_safe = FFMAX(s1y, s2y);
+ const int endx_safe = FFMIN(s1x + w, s2x + w);
+ const int endy_safe = FFMIN(s1y + h, s2y + h);
+
+ // top part where only one of s1 and s2 is still readable, or none at all
+ compute_unsafe_ssd_integral_image(ii, ii_linesize_32,
+ 0, 0,
+ src, linesize,
+ offx, offy, e, w, h,
+ ii_w, starty_safe);
+
+ // fill the left column integral required to compute the central
+ // overlapping one
+ compute_unsafe_ssd_integral_image(ii, ii_linesize_32,
+ 0, starty_safe,
+ src, linesize,
+ offx, offy, e, w, h,
+ startx_safe, endy_safe - starty_safe);
+
+ // main and safe part of the integral
+ av_assert1(startx_safe - s1x >= 0); av_assert1(startx_safe - s1x < w);
+ av_assert1(starty_safe - s1y >= 0); av_assert1(starty_safe - s1y < h);
+ av_assert1(startx_safe - s2x >= 0); av_assert1(startx_safe - s2x < w);
+ av_assert1(starty_safe - s2y >= 0); av_assert1(starty_safe - s2y < h);
+ compute_safe_ssd_integral_image_c(ii + starty_safe*ii_linesize_32 + startx_safe, ii_linesize_32,
+ src + (starty_safe - s1y) * linesize + (startx_safe - s1x), linesize,
+ src + (starty_safe - s2y) * linesize + (startx_safe - s2x), linesize,
+ endx_safe - startx_safe, endy_safe - starty_safe);
+
+ // right part of the integral
+ compute_unsafe_ssd_integral_image(ii, ii_linesize_32,
+ endx_safe, starty_safe,
+ src, linesize,
+ offx, offy, e, w, h,
+ ii_w - endx_safe, endy_safe - starty_safe);
+
+ // bottom part where only one of s1 and s2 is still readable, or none at all
+ compute_unsafe_ssd_integral_image(ii, ii_linesize_32,
+ 0, endy_safe,
+ src, linesize,
+ offx, offy, e, w, h,
+ ii_w, ii_h - endy_safe);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ NLMeansContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ const int e = FFMAX(s->research_hsize, s->research_hsize_uv)
+ + FFMAX(s->patch_hsize, s->patch_hsize_uv);
+
+ s->chroma_w = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ s->chroma_h = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ /* Allocate the integral image with extra edges of thickness "e"
+ *
+ * +_+-------------------------------+
+ * |0|0000000000000000000000000000000|
+ * +-x-------------------------------+
+ * |0|\ ^ |
+ * |0| ii | e |
+ * |0| v |
+ * |0| +-----------------------+ |
+ * |0| | | |
+ * |0|<->| | |
+ * |0| e | | |
+ * |0| | | |
+ * |0| +-----------------------+ |
+ * |0| |
+ * |0| |
+ * |0| |
+ * +-+-------------------------------+
+ */
+ s->ii_w = inlink->w + e*2;
+ s->ii_h = inlink->h + e*2;
+
+ // align to 4 the linesize, "+1" is for the space of the left 0-column
+ s->ii_lz_32 = FFALIGN(s->ii_w + 1, 4);
+
+ // "+1" is for the space of the top 0-line
+ s->ii_orig = av_mallocz_array(s->ii_h + 1, s->ii_lz_32 * sizeof(*s->ii_orig));
+ if (!s->ii_orig)
+ return AVERROR(ENOMEM);
+
+ // skip top 0-line and left 0-column
+ s->ii = s->ii_orig + s->ii_lz_32 + 1;
+
+ // allocate weighted average for every pixel
+ s->wa_linesize = inlink->w;
+ s->wa = av_malloc_array(s->wa_linesize, inlink->h * sizeof(*s->wa));
+ if (!s->wa)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+struct thread_data {
+ const uint8_t *src;
+ int src_linesize;
+ int startx, starty;
+ int endx, endy;
+ const uint32_t *ii_start;
+ int p;
+};
+
+static int nlmeans_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ int x, y;
+ NLMeansContext *s = ctx->priv;
+ const struct thread_data *td = arg;
+ const uint8_t *src = td->src;
+ const int src_linesize = td->src_linesize;
+ const int process_h = td->endy - td->starty;
+ const int slice_start = (process_h * jobnr ) / nb_jobs;
+ const int slice_end = (process_h * (jobnr+1)) / nb_jobs;
+ const int starty = td->starty + slice_start;
+ const int endy = td->starty + slice_end;
+
+ for (y = starty; y < endy; y++) {
+ for (x = td->startx; x < td->endx; x++) {
+ const int patch_diff_sq = get_integral_patch_value(td->ii_start, s->ii_lz_32, x, y, td->p);
+ if (patch_diff_sq < s->max_meaningful_diff) {
+ struct weighted_avg *wa = &s->wa[y*s->wa_linesize + x];
+ const int weight_lut_idx = patch_diff_sq * s->pdiff_lut_scale;
+ const double weight = s->weight_lut[weight_lut_idx]; // exp(-patch_diff_sq * s->pdiff_scale)
+ wa->total_weight += weight;
+ wa->sum += weight * src[y*src_linesize + x];
+ }
+ }
+ }
+ return 0;
+}
+
+static int nlmeans_plane(AVFilterContext *ctx, int w, int h, int p, int r,
+ uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize)
+{
+ int x, y;
+ int offx, offy;
+ NLMeansContext *s = ctx->priv;
+ /* patches center points cover the whole research window so the patches
+ * themselves overflow the research window */
+ const int e = r + p;
+ /* focus an integral pointer on the centered image (s1) */
+ const uint32_t *centered_ii = s->ii + e*s->ii_lz_32 + e;
+
+ memset(s->wa, 0, s->wa_linesize * h * sizeof(*s->wa));
+
+ for (offy = -r; offy <= r; offy++) {
+ for (offx = -r; offx <= r; offx++) {
+ if (offx || offy) {
+ struct thread_data td = {
+ .src = src + offy*src_linesize + offx,
+ .src_linesize = src_linesize,
+ .startx = FFMAX(0, -offx),
+ .starty = FFMAX(0, -offy),
+ .endx = FFMIN(w, w - offx),
+ .endy = FFMIN(h, h - offy),
+ .ii_start = centered_ii + offy*s->ii_lz_32 + offx,
+ .p = p,
+ };
+
+ compute_ssd_integral_image(s->ii, s->ii_lz_32,
+ src, src_linesize,
+ offx, offy, e, w, h);
+ ctx->internal->execute(ctx, nlmeans_slice, &td, NULL,
+ FFMIN(td.endy - td.starty, ff_filter_get_nb_threads(ctx)));
+ }
+ }
+ }
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ struct weighted_avg *wa = &s->wa[y*s->wa_linesize + x];
+
+ // Also weight the centered pixel
+ wa->total_weight += 1.0;
+ wa->sum += 1.0 * src[y*src_linesize + x];
+
+ dst[y*dst_linesize + x] = av_clip_uint8(wa->sum / wa->total_weight);
+ }
+ }
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ int i;
+ AVFilterContext *ctx = inlink->dst;
+ NLMeansContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ for (i = 0; i < s->nb_planes; i++) {
+ const int w = i ? s->chroma_w : inlink->w;
+ const int h = i ? s->chroma_h : inlink->h;
+ const int p = i ? s->patch_hsize_uv : s->patch_hsize;
+ const int r = i ? s->research_hsize_uv : s->research_hsize;
+ nlmeans_plane(ctx, w, h, p, r,
+ out->data[i], out->linesize[i],
+ in->data[i], in->linesize[i]);
+ }
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+#define CHECK_ODD_FIELD(field, name) do { \
+ if (!(s->field & 1)) { \
+ s->field |= 1; \
+ av_log(ctx, AV_LOG_WARNING, name " size must be odd, " \
+ "setting it to %d\n", s->field); \
+ } \
+} while (0)
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ int i;
+ NLMeansContext *s = ctx->priv;
+ const double h = s->sigma * 10.;
+
+ s->pdiff_scale = 1. / (h * h);
+ s->max_meaningful_diff = -log(1/255.) / s->pdiff_scale;
+ s->pdiff_lut_scale = 1./s->max_meaningful_diff * WEIGHT_LUT_SIZE;
+ av_assert0((s->max_meaningful_diff - 1) * s->pdiff_lut_scale < FF_ARRAY_ELEMS(s->weight_lut));
+ for (i = 0; i < WEIGHT_LUT_SIZE; i++)
+ s->weight_lut[i] = exp(-i / s->pdiff_lut_scale * s->pdiff_scale);
+
+ CHECK_ODD_FIELD(research_size, "Luma research window");
+ CHECK_ODD_FIELD(patch_size, "Luma patch");
+
+ if (!s->research_size_uv) s->research_size_uv = s->research_size;
+ if (!s->patch_size_uv) s->patch_size_uv = s->patch_size;
+
+ CHECK_ODD_FIELD(research_size_uv, "Chroma research window");
+ CHECK_ODD_FIELD(patch_size_uv, "Chroma patch");
+
+ s->research_hsize = s->research_size / 2;
+ s->research_hsize_uv = s->research_size_uv / 2;
+ s->patch_hsize = s->patch_size / 2;
+ s->patch_hsize_uv = s->patch_size_uv / 2;
+
+ av_log(ctx, AV_LOG_INFO, "Research window: %dx%d / %dx%d, patch size: %dx%d / %dx%d\n",
+ s->research_size, s->research_size, s->research_size_uv, s->research_size_uv,
+ s->patch_size, s->patch_size, s->patch_size_uv, s->patch_size_uv);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ NLMeansContext *s = ctx->priv;
+ av_freep(&s->ii_orig);
+ av_freep(&s->wa);
+}
+
+static const AVFilterPad nlmeans_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad nlmeans_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_nlmeans = {
+ .name = "nlmeans",
+ .description = NULL_IF_CONFIG_SMALL("Non-local means denoiser."),
+ .priv_size = sizeof(NLMeansContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = nlmeans_inputs,
+ .outputs = nlmeans_outputs,
+ .priv_class = &nlmeans_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_nnedi.c b/libavfilter/vf_nnedi.c
new file mode 100644
index 0000000000..b14aa64c04
--- /dev/null
+++ b/libavfilter/vf_nnedi.c
@@ -0,0 +1,1211 @@
+/*
+ * Copyright (C) 2010-2011 Kevin Stone
+ * Copyright (C) 2016 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <float.h>
+
+#include "libavutil/common.h"
+#include "libavutil/float_dsp.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct FrameData {
+ uint8_t *paddedp[3];
+ int padded_stride[3];
+ int padded_width[3];
+ int padded_height[3];
+
+ uint8_t *dstp[3];
+ int dst_stride[3];
+
+ int field[3];
+
+ int32_t *lcount[3];
+ float *input;
+ float *temp;
+} FrameData;
+
+typedef struct NNEDIContext {
+ const AVClass *class;
+
+ char *weights_file;
+
+ AVFrame *src;
+ AVFrame *second;
+ AVFrame *dst;
+ int eof;
+ int64_t cur_pts;
+
+ AVFloatDSPContext *fdsp;
+ int nb_planes;
+ int linesize[4];
+ int planeheight[4];
+
+ float *weights0;
+ float *weights1[2];
+ int asize;
+ int nns;
+ int xdia;
+ int ydia;
+
+ // Parameters
+ int deint;
+ int field;
+ int process_plane;
+ int nsize;
+ int nnsparam;
+ int qual;
+ int etype;
+ int pscrn;
+ int fapprox;
+
+ int max_value;
+
+ void (*copy_pad)(const AVFrame *, FrameData *, struct NNEDIContext *, int);
+ void (*evalfunc_0)(struct NNEDIContext *, FrameData *);
+ void (*evalfunc_1)(struct NNEDIContext *, FrameData *);
+
+ // Functions used in evalfunc_0
+ void (*readpixels)(const uint8_t *, const int, float *);
+ void (*compute_network0)(struct NNEDIContext *s, const float *, const float *, uint8_t *);
+ int32_t (*process_line0)(const uint8_t *, int, uint8_t *, const uint8_t *, const int, const int, const int);
+
+ // Functions used in evalfunc_1
+ void (*extract)(const uint8_t *, const int, const int, const int, float *, float *);
+ void (*dot_prod)(struct NNEDIContext *, const float *, const float *, float *, const int, const int, const float *);
+ void (*expfunc)(float *, const int);
+ void (*wae5)(const float *, const int, float *);
+
+ FrameData frame_data;
+} NNEDIContext;
+
+#define OFFSET(x) offsetof(NNEDIContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption nnedi_options[] = {
+ {"weights", "set weights file", OFFSET(weights_file), AV_OPT_TYPE_STRING, {.str="nnedi3_weights.bin"}, 0, 0, FLAGS },
+ {"deint", "set which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "deint" },
+ {"all", "deinterlace all frames", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "deint" },
+ {"interlaced", "only deinterlace frames marked as interlaced", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "deint" },
+ {"field", "set mode of operation", OFFSET(field), AV_OPT_TYPE_INT, {.i64=-1}, -2, 3, FLAGS, "field" },
+ {"af", "use frame flags, both fields", 0, AV_OPT_TYPE_CONST, {.i64=-2}, 0, 0, FLAGS, "field" },
+ {"a", "use frame flags, single field", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, FLAGS, "field" },
+ {"t", "use top field only", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field" },
+ {"b", "use bottom field only", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field" },
+ {"tf", "use both fields, top first", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "field" },
+ {"bf", "use both fields, bottom first", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, FLAGS, "field" },
+ {"planes", "set which planes to process", OFFSET(process_plane), AV_OPT_TYPE_INT, {.i64=7}, 0, 7, FLAGS },
+ {"nsize", "set size of local neighborhood around each pixel, used by the predictor neural network", OFFSET(nsize), AV_OPT_TYPE_INT, {.i64=6}, 0, 6, FLAGS, "nsize" },
+ {"s8x6", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "nsize" },
+ {"s16x6", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "nsize" },
+ {"s32x6", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "nsize" },
+ {"s48x6", NULL, 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, FLAGS, "nsize" },
+ {"s8x4", NULL, 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, FLAGS, "nsize" },
+ {"s16x4", NULL, 0, AV_OPT_TYPE_CONST, {.i64=5}, 0, 0, FLAGS, "nsize" },
+ {"s32x4", NULL, 0, AV_OPT_TYPE_CONST, {.i64=6}, 0, 0, FLAGS, "nsize" },
+ {"nns", "set number of neurons in predictor neural network", OFFSET(nnsparam), AV_OPT_TYPE_INT, {.i64=1}, 0, 4, FLAGS, "nns" },
+ {"n16", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "nns" },
+ {"n32", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "nns" },
+ {"n64", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "nns" },
+ {"n128", NULL, 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, FLAGS, "nns" },
+ {"n256", NULL, 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, FLAGS, "nns" },
+ {"qual", "set quality", OFFSET(qual), AV_OPT_TYPE_INT, {.i64=1}, 1, 2, FLAGS, "qual" },
+ {"fast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "qual" },
+ {"slow", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "qual" },
+ {"etype", "set which set of weights to use in the predictor", OFFSET(etype), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "etype" },
+ {"a", "weights trained to minimize absolute error", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "etype" },
+ {"s", "weights trained to minimize squared error", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "etype" },
+ {"pscrn", "set prescreening", OFFSET(pscrn), AV_OPT_TYPE_INT, {.i64=2}, 0, 2, FLAGS, "pscrn" },
+ {"none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "pscrn" },
+ {"original", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "pscrn" },
+ {"new", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "pscrn" },
+ {"fapprox", NULL, OFFSET(fapprox), AV_OPT_TYPE_INT, {.i64=0}, 0, 3, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(nnedi);
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ NNEDIContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+ if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ NNEDIContext *s = ctx->priv;
+
+ outlink->time_base.num = ctx->inputs[0]->time_base.num;
+ outlink->time_base.den = ctx->inputs[0]->time_base.den * 2;
+ outlink->w = ctx->inputs[0]->w;
+ outlink->h = ctx->inputs[0]->h;
+
+ if (s->field > 1 || s->field == -2)
+ outlink->frame_rate = av_mul_q(ctx->inputs[0]->frame_rate,
+ (AVRational){2, 1});
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static void copy_pad(const AVFrame *src, FrameData *frame_data, NNEDIContext *s, int fn)
+{
+ const int off = 1 - fn;
+ int plane, y, x;
+
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ const uint8_t *srcp = (const uint8_t *)src->data[plane];
+ uint8_t *dstp = (uint8_t *)frame_data->paddedp[plane];
+
+ const int src_stride = src->linesize[plane];
+ const int dst_stride = frame_data->padded_stride[plane];
+
+ const int src_height = s->planeheight[plane];
+ const int dst_height = frame_data->padded_height[plane];
+
+ const int src_width = s->linesize[plane];
+ const int dst_width = frame_data->padded_width[plane];
+
+ int c = 4;
+
+ if (!(s->process_plane & (1 << plane)))
+ continue;
+
+ // Copy.
+ for (y = off; y < src_height; y += 2)
+ memcpy(dstp + 32 + (6 + y) * dst_stride,
+ srcp + y * src_stride,
+ src_width * sizeof(uint8_t));
+
+ // And pad.
+ dstp += (6 + off) * dst_stride;
+ for (y = 6 + off; y < dst_height - 6; y += 2) {
+ int c = 2;
+
+ for (x = 0; x < 32; x++)
+ dstp[x] = dstp[64 - x];
+
+ for (x = dst_width - 32; x < dst_width; x++, c += 2)
+ dstp[x] = dstp[x - c];
+
+ dstp += dst_stride * 2;
+ }
+
+ dstp = (uint8_t *)frame_data->paddedp[plane];
+ for (y = off; y < 6; y += 2)
+ memcpy(dstp + y * dst_stride,
+ dstp + (12 + 2 * off - y) * dst_stride,
+ dst_width * sizeof(uint8_t));
+
+ for (y = dst_height - 6 + off; y < dst_height; y += 2, c += 4)
+ memcpy(dstp + y * dst_stride,
+ dstp + (y - c) * dst_stride,
+ dst_width * sizeof(uint8_t));
+ }
+}
+
+static void elliott(float *data, const int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ data[i] = data[i] / (1.0f + FFABS(data[i]));
+}
+
+static void dot_prod(NNEDIContext *s, const float *data, const float *weights, float *vals, const int n, const int len, const float *scale)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ float sum;
+
+ sum = s->fdsp->scalarproduct_float(data, &weights[i * len], len);
+
+ vals[i] = sum * scale[0] + weights[n * len + i];
+ }
+}
+
+static void dot_prods(NNEDIContext *s, const float *dataf, const float *weightsf, float *vals, const int n, const int len, const float *scale)
+{
+ const int16_t *data = (int16_t *)dataf;
+ const int16_t *weights = (int16_t *)weightsf;
+ const float *wf = (float *)&weights[n * len];
+ int i, j;
+
+ for (i = 0; i < n; i++) {
+ int sum = 0, off = ((i >> 2) << 3) + (i & 3);
+ for (j = 0; j < len; j++)
+ sum += data[j] * weights[i * len + j];
+
+ vals[i] = sum * wf[off] * scale[0] + wf[off + 4];
+ }
+}
+
+static void compute_network0(NNEDIContext *s, const float *input, const float *weights, uint8_t *d)
+{
+ float t, temp[12], scale = 1.0f;
+
+ dot_prod(s, input, weights, temp, 4, 48, &scale);
+ t = temp[0];
+ elliott(temp, 4);
+ temp[0] = t;
+ dot_prod(s, temp, weights + 4 * 49, temp + 4, 4, 4, &scale);
+ elliott(temp + 4, 4);
+ dot_prod(s, temp, weights + 4 * 49 + 4 * 5, temp + 8, 4, 8, &scale);
+ if (FFMAX(temp[10], temp[11]) <= FFMAX(temp[8], temp[9]))
+ d[0] = 1;
+ else
+ d[0] = 0;
+}
+
+static void compute_network0_i16(NNEDIContext *s, const float *inputf, const float *weightsf, uint8_t *d)
+{
+ const float *wf = weightsf + 2 * 48;
+ float t, temp[12], scale = 1.0f;
+
+ dot_prods(s, inputf, weightsf, temp, 4, 48, &scale);
+ t = temp[0];
+ elliott(temp, 4);
+ temp[0] = t;
+ dot_prod(s, temp, wf + 8, temp + 4, 4, 4, &scale);
+ elliott(temp + 4, 4);
+ dot_prod(s, temp, wf + 8 + 4 * 5, temp + 8, 4, 8, &scale);
+ if (FFMAX(temp[10], temp[11]) <= FFMAX(temp[8], temp[9]))
+ d[0] = 1;
+ else
+ d[0] = 0;
+}
+
+static void pixel2float48(const uint8_t *t8, const int pitch, float *p)
+{
+ const uint8_t *t = (const uint8_t *)t8;
+ int y, x;
+
+ for (y = 0; y < 4; y++)
+ for (x = 0; x < 12; x++)
+ p[y * 12 + x] = t[y * pitch * 2 + x];
+}
+
+static void byte2word48(const uint8_t *t, const int pitch, float *pf)
+{
+ int16_t *p = (int16_t *)pf;
+ int y, x;
+
+ for (y = 0; y < 4; y++)
+ for (x = 0; x < 12; x++)
+ p[y * 12 + x] = t[y * pitch * 2 + x];
+}
+
+static int32_t process_line0(const uint8_t *tempu, int width, uint8_t *dstp8, const uint8_t *src3p8, const int src_pitch, const int max_value, const int chroma)
+{
+ uint8_t *dstp = (uint8_t *)dstp8;
+ const uint8_t *src3p = (const uint8_t *)src3p8;
+ int minimum = 0;
+ int maximum = max_value - 1; // Technically the -1 is only needed for 8 and 16 bit input.
+ int count = 0, x;
+ for (x = 0; x < width; x++) {
+ if (tempu[x]) {
+ int tmp = 19 * (src3p[x + src_pitch * 2] + src3p[x + src_pitch * 4]) - 3 * (src3p[x] + src3p[x + src_pitch * 6]);
+ tmp /= 32;
+ dstp[x] = FFMAX(FFMIN(tmp, maximum), minimum);
+ } else {
+ dstp[x] = 255;
+ count++;
+ }
+ }
+ return count;
+}
+
+// new prescreener functions
+static void byte2word64(const uint8_t *t, const int pitch, float *p)
+{
+ int16_t *ps = (int16_t *)p;
+ int y, x;
+
+ for (y = 0; y < 4; y++)
+ for (x = 0; x < 16; x++)
+ ps[y * 16 + x] = t[y * pitch * 2 + x];
+}
+
+static void compute_network0new(NNEDIContext *s, const float *datai, const float *weights, uint8_t *d)
+{
+ int16_t *data = (int16_t *)datai;
+ int16_t *ws = (int16_t *)weights;
+ float *wf = (float *)&ws[4 * 64];
+ float vals[8];
+ int mask, i, j;
+
+ for (i = 0; i < 4; i++) {
+ int sum = 0;
+ float t;
+
+ for (j = 0; j < 64; j++)
+ sum += data[j] * ws[(i << 3) + ((j >> 3) << 5) + (j & 7)];
+ t = sum * wf[i] + wf[4 + i];
+ vals[i] = t / (1.0f + FFABS(t));
+ }
+
+ for (i = 0; i < 4; i++) {
+ float sum = 0.0f;
+
+ for (j = 0; j < 4; j++)
+ sum += vals[j] * wf[8 + i + (j << 2)];
+ vals[4 + i] = sum + wf[8 + 16 + i];
+ }
+
+ mask = 0;
+ for (i = 0; i < 4; i++) {
+ if (vals[4 + i] > 0.0f)
+ mask |= (0x1 << (i << 3));
+ }
+
+ ((int *)d)[0] = mask;
+}
+
+static void evalfunc_0(NNEDIContext *s, FrameData *frame_data)
+{
+ float *input = frame_data->input;
+ const float *weights0 = s->weights0;
+ float *temp = frame_data->temp;
+ uint8_t *tempu = (uint8_t *)temp;
+ int plane, x, y;
+
+ // And now the actual work.
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ const uint8_t *srcp = (const uint8_t *)frame_data->paddedp[plane];
+ const int src_stride = frame_data->padded_stride[plane] / sizeof(uint8_t);
+
+ const int width = frame_data->padded_width[plane];
+ const int height = frame_data->padded_height[plane];
+
+ uint8_t *dstp = (uint8_t *)frame_data->dstp[plane];
+ const int dst_stride = frame_data->dst_stride[plane] / sizeof(uint8_t);
+ const uint8_t *src3p;
+ int ystart, ystop;
+ int32_t *lcount;
+
+ if (!(s->process_plane & (1 << plane)))
+ continue;
+
+ for (y = 1 - frame_data->field[plane]; y < height - 12; y += 2) {
+ memcpy(dstp + y * dst_stride,
+ srcp + 32 + (6 + y) * src_stride,
+ (width - 64) * sizeof(uint8_t));
+
+ }
+
+ ystart = 6 + frame_data->field[plane];
+ ystop = height - 6;
+ srcp += ystart * src_stride;
+ dstp += (ystart - 6) * dst_stride - 32;
+ src3p = srcp - src_stride * 3;
+ lcount = frame_data->lcount[plane] - 6;
+
+ if (s->pscrn == 1) { // original
+ for (y = ystart; y < ystop; y += 2) {
+ for (x = 32; x < width - 32; x++) {
+ s->readpixels((const uint8_t *)(src3p + x - 5), src_stride, input);
+ s->compute_network0(s, input, weights0, tempu+x);
+ }
+ lcount[y] += s->process_line0(tempu + 32, width - 64, (uint8_t *)(dstp + 32), (const uint8_t *)(src3p + 32), src_stride, s->max_value, plane);
+ src3p += src_stride * 2;
+ dstp += dst_stride * 2;
+ }
+ } else if (s->pscrn > 1) { // new
+ for (y = ystart; y < ystop; y += 2) {
+ for (x = 32; x < width - 32; x += 4) {
+ s->readpixels((const uint8_t *)(src3p + x - 6), src_stride, input);
+ s->compute_network0(s, input, weights0, tempu + x);
+ }
+ lcount[y] += s->process_line0(tempu + 32, width - 64, (uint8_t *)(dstp + 32), (const uint8_t *)(src3p + 32), src_stride, s->max_value, plane);
+ src3p += src_stride * 2;
+ dstp += dst_stride * 2;
+ }
+ } else { // no prescreening
+ for (y = ystart; y < ystop; y += 2) {
+ memset(dstp + 32, 255, (width - 64) * sizeof(uint8_t));
+ lcount[y] += width - 64;
+ dstp += dst_stride * 2;
+ }
+ }
+ }
+}
+
+static void extract_m8(const uint8_t *srcp8, const int stride, const int xdia, const int ydia, float *mstd, float *input)
+{
+ // uint8_t or uint16_t or float
+ const uint8_t *srcp = (const uint8_t *)srcp8;
+ float scale;
+ double tmp;
+
+ // int32_t or int64_t or double
+ int64_t sum = 0, sumsq = 0;
+ int y, x;
+
+ for (y = 0; y < ydia; y++) {
+ const uint8_t *srcpT = srcp + y * stride * 2;
+
+ for (x = 0; x < xdia; x++) {
+ sum += srcpT[x];
+ sumsq += (uint32_t)srcpT[x] * (uint32_t)srcpT[x];
+ input[x] = srcpT[x];
+ }
+ input += xdia;
+ }
+ scale = 1.0f / (xdia * ydia);
+ mstd[0] = sum * scale;
+ tmp = (double)sumsq * scale - (double)mstd[0] * mstd[0];
+ mstd[3] = 0.0f;
+ if (tmp <= FLT_EPSILON)
+ mstd[1] = mstd[2] = 0.0f;
+ else {
+ mstd[1] = sqrt(tmp);
+ mstd[2] = 1.0f / mstd[1];
+ }
+}
+
+static void extract_m8_i16(const uint8_t *srcp, const int stride, const int xdia, const int ydia, float *mstd, float *inputf)
+{
+ int16_t *input = (int16_t *)inputf;
+ float scale;
+ int sum = 0, sumsq = 0;
+ int y, x;
+
+ for (y = 0; y < ydia; y++) {
+ const uint8_t *srcpT = srcp + y * stride * 2;
+ for (x = 0; x < xdia; x++) {
+ sum += srcpT[x];
+ sumsq += srcpT[x] * srcpT[x];
+ input[x] = srcpT[x];
+ }
+ input += xdia;
+ }
+ scale = 1.0f / (float)(xdia * ydia);
+ mstd[0] = sum * scale;
+ mstd[1] = sumsq * scale - mstd[0] * mstd[0];
+ mstd[3] = 0.0f;
+ if (mstd[1] <= FLT_EPSILON)
+ mstd[1] = mstd[2] = 0.0f;
+ else {
+ mstd[1] = sqrt(mstd[1]);
+ mstd[2] = 1.0f / mstd[1];
+ }
+}
+
+
+static const float exp_lo = -80.0f;
+static const float exp_hi = +80.0f;
+
+static void e2_m16(float *s, const int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ s[i] = exp(av_clipf(s[i], exp_lo, exp_hi));
+}
+
+const float min_weight_sum = 1e-10f;
+
+static void weighted_avg_elliott_mul5_m16(const float *w, const int n, float *mstd)
+{
+ float vsum = 0.0f, wsum = 0.0f;
+ int i;
+
+ for (i = 0; i < n; i++) {
+ vsum += w[i] * (w[n + i] / (1.0f + FFABS(w[n + i])));
+ wsum += w[i];
+ }
+ if (wsum > min_weight_sum)
+ mstd[3] += ((5.0f * vsum) / wsum) * mstd[1] + mstd[0];
+ else
+ mstd[3] += mstd[0];
+}
+
+
+static void evalfunc_1(NNEDIContext *s, FrameData *frame_data)
+{
+ float *input = frame_data->input;
+ float *temp = frame_data->temp;
+ float **weights1 = s->weights1;
+ const int qual = s->qual;
+ const int asize = s->asize;
+ const int nns = s->nns;
+ const int xdia = s->xdia;
+ const int xdiad2m1 = (xdia / 2) - 1;
+ const int ydia = s->ydia;
+ const float scale = 1.0f / (float)qual;
+ int plane, y, x, i;
+
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ const uint8_t *srcp = (const uint8_t *)frame_data->paddedp[plane];
+ const int src_stride = frame_data->padded_stride[plane] / sizeof(uint8_t);
+
+ const int width = frame_data->padded_width[plane];
+ const int height = frame_data->padded_height[plane];
+
+ uint8_t *dstp = (uint8_t *)frame_data->dstp[plane];
+ const int dst_stride = frame_data->dst_stride[plane] / sizeof(uint8_t);
+
+ const int ystart = frame_data->field[plane];
+ const int ystop = height - 12;
+ const uint8_t *srcpp;
+
+ if (!(s->process_plane & (1 << plane)))
+ continue;
+
+ srcp += (ystart + 6) * src_stride;
+ dstp += ystart * dst_stride - 32;
+ srcpp = srcp - (ydia - 1) * src_stride - xdiad2m1;
+
+ for (y = ystart; y < ystop; y += 2) {
+ for (x = 32; x < width - 32; x++) {
+ float mstd[4];
+
+ if (dstp[x] != 255)
+ continue;
+
+ s->extract((const uint8_t *)(srcpp + x), src_stride, xdia, ydia, mstd, input);
+ for (i = 0; i < qual; i++) {
+ s->dot_prod(s, input, weights1[i], temp, nns * 2, asize, mstd + 2);
+ s->expfunc(temp, nns);
+ s->wae5(temp, nns, mstd);
+ }
+
+ dstp[x] = FFMIN(FFMAX((int)(mstd[3] * scale + 0.5f), 0), s->max_value);
+ }
+ srcpp += src_stride * 2;
+ dstp += dst_stride * 2;
+ }
+ }
+}
+
+#define NUM_NSIZE 7
+#define NUM_NNS 5
+
+static int roundds(const double f)
+{
+ if (f - floor(f) >= 0.5)
+ return FFMIN((int)ceil(f), 32767);
+ return FFMAX((int)floor(f), -32768);
+}
+
+static void select_functions(NNEDIContext *s)
+{
+ s->copy_pad = copy_pad;
+ s->evalfunc_0 = evalfunc_0;
+ s->evalfunc_1 = evalfunc_1;
+
+ // evalfunc_0
+ s->process_line0 = process_line0;
+
+ if (s->pscrn < 2) { // original prescreener
+ if (s->fapprox & 1) { // int16 dot products
+ s->readpixels = byte2word48;
+ s->compute_network0 = compute_network0_i16;
+ } else {
+ s->readpixels = pixel2float48;
+ s->compute_network0 = compute_network0;
+ }
+ } else { // new prescreener
+ // only int16 dot products
+ s->readpixels = byte2word64;
+ s->compute_network0 = compute_network0new;
+ }
+
+ // evalfunc_1
+ s->wae5 = weighted_avg_elliott_mul5_m16;
+
+ if (s->fapprox & 2) { // use int16 dot products
+ s->extract = extract_m8_i16;
+ s->dot_prod = dot_prods;
+ } else { // use float dot products
+ s->extract = extract_m8;
+ s->dot_prod = dot_prod;
+ }
+
+ s->expfunc = e2_m16;
+}
+
+static int modnpf(const int m, const int n)
+{
+ if ((m % n) == 0)
+ return m;
+ return m + n - (m % n);
+}
+
+static int get_frame(AVFilterContext *ctx, int is_second)
+{
+ NNEDIContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *src = s->src;
+ FrameData *frame_data;
+ int effective_field = s->field;
+ size_t temp_size;
+ int field_n;
+ int plane;
+
+ if (effective_field > 1)
+ effective_field -= 2;
+ else if (effective_field < 0)
+ effective_field += 2;
+
+ if (s->field < 0 && src->interlaced_frame && src->top_field_first == 0)
+ effective_field = 0;
+ else if (s->field < 0 && src->interlaced_frame && src->top_field_first == 1)
+ effective_field = 1;
+ else
+ effective_field = !effective_field;
+
+ if (s->field > 1 || s->field == -2) {
+ if (is_second) {
+ field_n = (effective_field == 0);
+ } else {
+ field_n = (effective_field == 1);
+ }
+ } else {
+ field_n = effective_field;
+ }
+
+ s->dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!s->dst)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(s->dst, src);
+ s->dst->interlaced_frame = 0;
+
+ frame_data = &s->frame_data;
+
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ int dst_height = s->planeheight[plane];
+ int dst_width = s->linesize[plane];
+
+ const int min_alignment = 16;
+ const int min_pad = 10;
+
+ if (!(s->process_plane & (1 << plane))) {
+ av_image_copy_plane(s->dst->data[plane], s->dst->linesize[plane],
+ src->data[plane], src->linesize[plane],
+ s->linesize[plane],
+ s->planeheight[plane]);
+ continue;
+ }
+
+ frame_data->padded_width[plane] = dst_width + 64;
+ frame_data->padded_height[plane] = dst_height + 12;
+ frame_data->padded_stride[plane] = modnpf(frame_data->padded_width[plane] + min_pad, min_alignment); // TODO: maybe min_pad is in pixels too?
+ if (!frame_data->paddedp[plane]) {
+ frame_data->paddedp[plane] = av_malloc_array(frame_data->padded_stride[plane], frame_data->padded_height[plane]);
+ if (!frame_data->paddedp[plane])
+ return AVERROR(ENOMEM);
+ }
+
+ frame_data->dstp[plane] = s->dst->data[plane];
+ frame_data->dst_stride[plane] = s->dst->linesize[plane];
+
+ if (!frame_data->lcount[plane]) {
+ frame_data->lcount[plane] = av_calloc(dst_height, sizeof(int32_t) * 16);
+ if (!frame_data->lcount[plane])
+ return AVERROR(ENOMEM);
+ } else {
+ memset(frame_data->lcount[plane], 0, dst_height * sizeof(int32_t) * 16);
+ }
+
+ frame_data->field[plane] = field_n;
+ }
+
+ if (!frame_data->input) {
+ frame_data->input = av_malloc(512 * sizeof(float));
+ if (!frame_data->input)
+ return AVERROR(ENOMEM);
+ }
+ // evalfunc_0 requires at least padded_width[0] bytes.
+ // evalfunc_1 requires at least 512 floats.
+ if (!frame_data->temp) {
+ temp_size = FFMAX(frame_data->padded_width[0], 512 * sizeof(float));
+ frame_data->temp = av_malloc(temp_size);
+ if (!frame_data->temp)
+ return AVERROR(ENOMEM);
+ }
+
+ // Copy src to a padded "frame" in frame_data and mirror the edges.
+ s->copy_pad(src, frame_data, s, field_n);
+
+ // Handles prescreening and the cubic interpolation.
+ s->evalfunc_0(s, frame_data);
+
+ // The rest.
+ s->evalfunc_1(s, frame_data);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *src)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ NNEDIContext *s = ctx->priv;
+ int ret;
+
+ if ((s->field > 1 ||
+ s->field == -2) && !s->second) {
+ goto second;
+ } else if (s->field > 1 ||
+ s->field == -2) {
+ AVFrame *dst;
+
+ s->src = s->second;
+ ret = get_frame(ctx, 1);
+ if (ret < 0) {
+ av_frame_free(&s->dst);
+ av_frame_free(&s->src);
+ av_frame_free(&s->second);
+ return ret;
+ }
+ dst = s->dst;
+
+ if (src->pts != AV_NOPTS_VALUE &&
+ dst->pts != AV_NOPTS_VALUE)
+ dst->pts += src->pts;
+ else
+ dst->pts = AV_NOPTS_VALUE;
+
+ ret = ff_filter_frame(outlink, dst);
+ if (ret < 0)
+ return ret;
+ if (s->eof)
+ return 0;
+ s->cur_pts = s->second->pts;
+ av_frame_free(&s->second);
+second:
+ if ((s->deint && src->interlaced_frame &&
+ !ctx->is_disabled) ||
+ (!s->deint && !ctx->is_disabled)) {
+ s->second = src;
+ }
+ }
+
+ if ((s->deint && !src->interlaced_frame) || ctx->is_disabled) {
+ AVFrame *dst = av_frame_clone(src);
+ if (!dst) {
+ av_frame_free(&src);
+ av_frame_free(&s->second);
+ return AVERROR(ENOMEM);
+ }
+
+ if (s->field > 1 || s->field == -2) {
+ av_frame_free(&s->second);
+ if ((s->deint && src->interlaced_frame) ||
+ (!s->deint))
+ s->second = src;
+ } else {
+ av_frame_free(&src);
+ }
+ if (dst->pts != AV_NOPTS_VALUE)
+ dst->pts *= 2;
+ return ff_filter_frame(outlink, dst);
+ }
+
+ s->src = src;
+ ret = get_frame(ctx, 0);
+ if (ret < 0) {
+ av_frame_free(&s->dst);
+ av_frame_free(&s->src);
+ av_frame_free(&s->second);
+ return ret;
+ }
+
+ if (src->pts != AV_NOPTS_VALUE)
+ s->dst->pts = src->pts * 2;
+ if (s->field <= 1 && s->field > -2) {
+ av_frame_free(&src);
+ s->src = NULL;
+ }
+
+ return ff_filter_frame(outlink, s->dst);
+}
+
+static int request_frame(AVFilterLink *link)
+{
+ AVFilterContext *ctx = link->src;
+ NNEDIContext *s = ctx->priv;
+ int ret;
+
+ if (s->eof)
+ return AVERROR_EOF;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF && s->second) {
+ AVFrame *next = av_frame_clone(s->second);
+
+ if (!next)
+ return AVERROR(ENOMEM);
+
+ next->pts = s->second->pts * 2 - s->cur_pts;
+ s->eof = 1;
+
+ filter_frame(ctx->inputs[0], next);
+ } else if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ NNEDIContext *s = ctx->priv;
+ FILE *weights_file = NULL;
+ int64_t expected_size = 13574928;
+ int64_t weights_size;
+ float *bdata;
+ size_t bytes_read;
+ const int xdia_table[NUM_NSIZE] = { 8, 16, 32, 48, 8, 16, 32 };
+ const int ydia_table[NUM_NSIZE] = { 6, 6, 6, 6, 4, 4, 4 };
+ const int nns_table[NUM_NNS] = { 16, 32, 64, 128, 256 };
+ const int dims0 = 49 * 4 + 5 * 4 + 9 * 4;
+ const int dims0new = 4 * 65 + 4 * 5;
+ const int dims1 = nns_table[s->nnsparam] * 2 * (xdia_table[s->nsize] * ydia_table[s->nsize] + 1);
+ int dims1tsize = 0;
+ int dims1offset = 0;
+ int ret = 0, i, j, k;
+
+ weights_file = fopen(s->weights_file, "rb");
+ if (!weights_file) {
+ av_log(ctx, AV_LOG_ERROR, "No weights file provided, aborting!\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (fseek(weights_file, 0, SEEK_END)) {
+ av_log(ctx, AV_LOG_ERROR, "Couldn't seek to the end of weights file.\n");
+ fclose(weights_file);
+ return AVERROR(EINVAL);
+ }
+
+ weights_size = ftell(weights_file);
+
+ if (weights_size == -1) {
+ fclose(weights_file);
+ av_log(ctx, AV_LOG_ERROR, "Couldn't get size of weights file.\n");
+ return AVERROR(EINVAL);
+ } else if (weights_size != expected_size) {
+ fclose(weights_file);
+ av_log(ctx, AV_LOG_ERROR, "Unexpected weights file size.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (fseek(weights_file, 0, SEEK_SET)) {
+ fclose(weights_file);
+ av_log(ctx, AV_LOG_ERROR, "Couldn't seek to the start of weights file.\n");
+ return AVERROR(EINVAL);
+ }
+
+ bdata = (float *)av_malloc(expected_size);
+ if (!bdata) {
+ fclose(weights_file);
+ return AVERROR(ENOMEM);
+ }
+
+ bytes_read = fread(bdata, 1, expected_size, weights_file);
+
+ if (bytes_read != (size_t)expected_size) {
+ fclose(weights_file);
+ ret = AVERROR_INVALIDDATA;
+ av_log(ctx, AV_LOG_ERROR, "Couldn't read weights file.\n");
+ goto fail;
+ }
+
+ fclose(weights_file);
+
+ for (j = 0; j < NUM_NNS; j++) {
+ for (i = 0; i < NUM_NSIZE; i++) {
+ if (i == s->nsize && j == s->nnsparam)
+ dims1offset = dims1tsize;
+ dims1tsize += nns_table[j] * 2 * (xdia_table[i] * ydia_table[i] + 1) * 2;
+ }
+ }
+
+ s->weights0 = av_malloc_array(FFMAX(dims0, dims0new), sizeof(float));
+ if (!s->weights0) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ for (i = 0; i < 2; i++) {
+ s->weights1[i] = av_malloc_array(dims1, sizeof(float));
+ if (!s->weights1[i]) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ }
+
+ // Adjust prescreener weights
+ if (s->pscrn >= 2) {// using new prescreener
+ const float *bdw;
+ int16_t *ws;
+ float *wf;
+ double mean[4] = { 0.0, 0.0, 0.0, 0.0 };
+ int *offt = av_calloc(4 * 64, sizeof(int));
+
+ if (!offt) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ for (j = 0; j < 4; j++)
+ for (k = 0; k < 64; k++)
+ offt[j * 64 + k] = ((k >> 3) << 5) + ((j & 3) << 3) + (k & 7);
+
+ bdw = bdata + dims0 + dims0new * (s->pscrn - 2);
+ ws = (int16_t *)s->weights0;
+ wf = (float *)&ws[4 * 64];
+ // Calculate mean weight of each first layer neuron
+ for (j = 0; j < 4; j++) {
+ double cmean = 0.0;
+ for (k = 0; k < 64; k++)
+ cmean += bdw[offt[j * 64 + k]];
+ mean[j] = cmean / 64.0;
+ }
+ // Factor mean removal and 1.0/127.5 scaling
+ // into first layer weights. scale to int16 range
+ for (j = 0; j < 4; j++) {
+ double scale, mval = 0.0;
+
+ for (k = 0; k < 64; k++)
+ mval = FFMAX(mval, FFABS((bdw[offt[j * 64 + k]] - mean[j]) / 127.5));
+ scale = 32767.0 / mval;
+ for (k = 0; k < 64; k++)
+ ws[offt[j * 64 + k]] = roundds(((bdw[offt[j * 64 + k]] - mean[j]) / 127.5) * scale);
+ wf[j] = (float)(mval / 32767.0);
+ }
+ memcpy(wf + 4, bdw + 4 * 64, (dims0new - 4 * 64) * sizeof(float));
+ av_free(offt);
+ } else { // using old prescreener
+ double mean[4] = { 0.0, 0.0, 0.0, 0.0 };
+ // Calculate mean weight of each first layer neuron
+ for (j = 0; j < 4; j++) {
+ double cmean = 0.0;
+ for (k = 0; k < 48; k++)
+ cmean += bdata[j * 48 + k];
+ mean[j] = cmean / 48.0;
+ }
+ if (s->fapprox & 1) {// use int16 dot products in first layer
+ int16_t *ws = (int16_t *)s->weights0;
+ float *wf = (float *)&ws[4 * 48];
+ // Factor mean removal and 1.0/127.5 scaling
+ // into first layer weights. scale to int16 range
+ for (j = 0; j < 4; j++) {
+ double scale, mval = 0.0;
+ for (k = 0; k < 48; k++)
+ mval = FFMAX(mval, FFABS((bdata[j * 48 + k] - mean[j]) / 127.5));
+ scale = 32767.0 / mval;
+ for (k = 0; k < 48; k++)
+ ws[j * 48 + k] = roundds(((bdata[j * 48 + k] - mean[j]) / 127.5) * scale);
+ wf[j] = (float)(mval / 32767.0);
+ }
+ memcpy(wf + 4, bdata + 4 * 48, (dims0 - 4 * 48) * sizeof(float));
+ } else {// use float dot products in first layer
+ double half = (1 << 8) - 1;
+
+ half /= 2;
+
+ // Factor mean removal and 1.0/half scaling
+ // into first layer weights.
+ for (j = 0; j < 4; j++)
+ for (k = 0; k < 48; k++)
+ s->weights0[j * 48 + k] = (float)((bdata[j * 48 + k] - mean[j]) / half);
+ memcpy(s->weights0 + 4 * 48, bdata + 4 * 48, (dims0 - 4 * 48) * sizeof(float));
+ }
+ }
+
+ // Adjust prediction weights
+ for (i = 0; i < 2; i++) {
+ const float *bdataT = bdata + dims0 + dims0new * 3 + dims1tsize * s->etype + dims1offset + i * dims1;
+ const int nnst = nns_table[s->nnsparam];
+ const int asize = xdia_table[s->nsize] * ydia_table[s->nsize];
+ const int boff = nnst * 2 * asize;
+ double *mean = (double *)av_calloc(asize + 1 + nnst * 2, sizeof(double));
+
+ if (!mean) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ // Calculate mean weight of each neuron (ignore bias)
+ for (j = 0; j < nnst * 2; j++) {
+ double cmean = 0.0;
+ for (k = 0; k < asize; k++)
+ cmean += bdataT[j * asize + k];
+ mean[asize + 1 + j] = cmean / (double)asize;
+ }
+ // Calculate mean softmax neuron
+ for (j = 0; j < nnst; j++) {
+ for (k = 0; k < asize; k++)
+ mean[k] += bdataT[j * asize + k] - mean[asize + 1 + j];
+ mean[asize] += bdataT[boff + j];
+ }
+ for (j = 0; j < asize + 1; j++)
+ mean[j] /= (double)(nnst);
+
+ if (s->fapprox & 2) { // use int16 dot products
+ int16_t *ws = (int16_t *)s->weights1[i];
+ float *wf = (float *)&ws[nnst * 2 * asize];
+ // Factor mean removal into weights, remove global offset from
+ // softmax neurons, and scale weights to int16 range.
+ for (j = 0; j < nnst; j++) { // softmax neurons
+ double scale, mval = 0.0;
+ for (k = 0; k < asize; k++)
+ mval = FFMAX(mval, FFABS(bdataT[j * asize + k] - mean[asize + 1 + j] - mean[k]));
+ scale = 32767.0 / mval;
+ for (k = 0; k < asize; k++)
+ ws[j * asize + k] = roundds((bdataT[j * asize + k] - mean[asize + 1 + j] - mean[k]) * scale);
+ wf[(j >> 2) * 8 + (j & 3)] = (float)(mval / 32767.0);
+ wf[(j >> 2) * 8 + (j & 3) + 4] = (float)(bdataT[boff + j] - mean[asize]);
+ }
+ for (j = nnst; j < nnst * 2; j++) { // elliott neurons
+ double scale, mval = 0.0;
+ for (k = 0; k < asize; k++)
+ mval = FFMAX(mval, FFABS(bdataT[j * asize + k] - mean[asize + 1 + j]));
+ scale = 32767.0 / mval;
+ for (k = 0; k < asize; k++)
+ ws[j * asize + k] = roundds((bdataT[j * asize + k] - mean[asize + 1 + j]) * scale);
+ wf[(j >> 2) * 8 + (j & 3)] = (float)(mval / 32767.0);
+ wf[(j >> 2) * 8 + (j & 3) + 4] = bdataT[boff + j];
+ }
+ } else { // use float dot products
+ // Factor mean removal into weights, and remove global
+ // offset from softmax neurons.
+ for (j = 0; j < nnst * 2; j++) {
+ for (k = 0; k < asize; k++) {
+ const double q = j < nnst ? mean[k] : 0.0;
+ s->weights1[i][j * asize + k] = (float)(bdataT[j * asize + k] - mean[asize + 1 + j] - q);
+ }
+ s->weights1[i][boff + j] = (float)(bdataT[boff + j] - (j < nnst ? mean[asize] : 0.0));
+ }
+ }
+ av_free(mean);
+ }
+
+ s->nns = nns_table[s->nnsparam];
+ s->xdia = xdia_table[s->nsize];
+ s->ydia = ydia_table[s->nsize];
+ s->asize = xdia_table[s->nsize] * ydia_table[s->nsize];
+
+ s->max_value = 65535 >> 8;
+
+ select_functions(s);
+
+ s->fdsp = avpriv_float_dsp_alloc(0);
+ if (!s->fdsp)
+ ret = AVERROR(ENOMEM);
+
+fail:
+ av_free(bdata);
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ NNEDIContext *s = ctx->priv;
+ int i;
+
+ av_freep(&s->weights0);
+
+ for (i = 0; i < 2; i++)
+ av_freep(&s->weights1[i]);
+
+ for (i = 0; i < s->nb_planes; i++) {
+ av_freep(&s->frame_data.paddedp[i]);
+ av_freep(&s->frame_data.lcount[i]);
+ }
+
+ av_freep(&s->frame_data.input);
+ av_freep(&s->frame_data.temp);
+ av_freep(&s->fdsp);
+ av_frame_free(&s->second);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_nnedi = {
+ .name = "nnedi",
+ .description = NULL_IF_CONFIG_SMALL("Apply neural network edge directed interpolation intra-only deinterlacer."),
+ .priv_size = sizeof(NNEDIContext),
+ .priv_class = &nnedi_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = inputs,
+ .outputs = outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_noise.c b/libavfilter/vf_noise.c
new file mode 100644
index 0000000000..abdf04708b
--- /dev/null
+++ b/libavfilter/vf_noise.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * noise generator
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/lfg.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "vf_noise.h"
+#include "video.h"
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+} ThreadData;
+
+#define OFFSET(x) offsetof(NoiseContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+#define NOISE_PARAMS(name, x, param) \
+ {#name"_seed", "set component #"#x" noise seed", OFFSET(param.seed), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, FLAGS}, \
+ {#name"_strength", "set component #"#x" strength", OFFSET(param.strength), AV_OPT_TYPE_INT, {.i64=0}, 0, 100, FLAGS}, \
+ {#name"s", "set component #"#x" strength", OFFSET(param.strength), AV_OPT_TYPE_INT, {.i64=0}, 0, 100, FLAGS}, \
+ {#name"_flags", "set component #"#x" flags", OFFSET(param.flags), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, 31, FLAGS, #name"_flags"}, \
+ {#name"f", "set component #"#x" flags", OFFSET(param.flags), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, 31, FLAGS, #name"_flags"}, \
+ {"a", "averaged noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_AVERAGED}, 0, 0, FLAGS, #name"_flags"}, \
+ {"p", "(semi)regular pattern", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_PATTERN}, 0, 0, FLAGS, #name"_flags"}, \
+ {"t", "temporal noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_TEMPORAL}, 0, 0, FLAGS, #name"_flags"}, \
+ {"u", "uniform noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_UNIFORM}, 0, 0, FLAGS, #name"_flags"},
+
+static const AVOption noise_options[] = {
+ NOISE_PARAMS(all, 0, all)
+ NOISE_PARAMS(c0, 0, param[0])
+ NOISE_PARAMS(c1, 1, param[1])
+ NOISE_PARAMS(c2, 2, param[2])
+ NOISE_PARAMS(c3, 3, param[3])
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(noise);
+
+static const int8_t patt[4] = { -1, 0, 1, 0 };
+
+#define RAND_N(range) ((int) ((double) range * av_lfg_get(lfg) / (UINT_MAX + 1.0)))
+static av_cold int init_noise(NoiseContext *n, int comp)
+{
+ int8_t *noise = av_malloc(MAX_NOISE * sizeof(int8_t));
+ FilterParams *fp = &n->param[comp];
+ AVLFG *lfg = &n->param[comp].lfg;
+ int strength = fp->strength;
+ int flags = fp->flags;
+ int i, j;
+
+ if (!noise)
+ return AVERROR(ENOMEM);
+
+ av_lfg_init(&fp->lfg, fp->seed + comp*31415U);
+
+ for (i = 0, j = 0; i < MAX_NOISE; i++, j++) {
+ if (flags & NOISE_UNIFORM) {
+ if (flags & NOISE_AVERAGED) {
+ if (flags & NOISE_PATTERN) {
+ noise[i] = (RAND_N(strength) - strength / 2) / 6
+ + patt[j % 4] * strength * 0.25 / 3;
+ } else {
+ noise[i] = (RAND_N(strength) - strength / 2) / 3;
+ }
+ } else {
+ if (flags & NOISE_PATTERN) {
+ noise[i] = (RAND_N(strength) - strength / 2) / 2
+ + patt[j % 4] * strength * 0.25;
+ } else {
+ noise[i] = RAND_N(strength) - strength / 2;
+ }
+ }
+ } else {
+ double x1, x2, w, y1;
+ do {
+ x1 = 2.0 * av_lfg_get(lfg) / (float)UINT_MAX - 1.0;
+ x2 = 2.0 * av_lfg_get(lfg) / (float)UINT_MAX - 1.0;
+ w = x1 * x1 + x2 * x2;
+ } while (w >= 1.0);
+
+ w = sqrt((-2.0 * log(w)) / w);
+ y1 = x1 * w;
+ y1 *= strength / sqrt(3.0);
+ if (flags & NOISE_PATTERN) {
+ y1 /= 2;
+ y1 += patt[j % 4] * strength * 0.35;
+ }
+ y1 = av_clipf(y1, -128, 127);
+ if (flags & NOISE_AVERAGED)
+ y1 /= 3.0;
+ noise[i] = (int)y1;
+ }
+ if (RAND_N(6) == 0)
+ j--;
+ }
+
+ for (i = 0; i < MAX_RES; i++)
+ for (j = 0; j < 3; j++)
+ fp->prev_shift[i][j] = noise + (av_lfg_get(lfg) & (MAX_SHIFT - 1));
+
+ fp->noise = noise;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ int fmt, ret;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (desc->flags & AV_PIX_FMT_FLAG_PLANAR && !(desc->comp[0].depth & 7)
+ && (ret = ff_add_format(&formats, fmt)) < 0)
+ return ret;
+ }
+
+ return ff_set_common_formats(ctx, formats);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ NoiseContext *n = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+
+ n->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ if ((ret = av_image_fill_linesizes(n->bytewidth, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ n->height[1] = n->height[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ n->height[0] = n->height[3] = inlink->h;
+
+ return 0;
+}
+
+void ff_line_noise_c(uint8_t *dst, const uint8_t *src, const int8_t *noise,
+ int len, int shift)
+{
+ int i;
+
+ noise += shift;
+ for (i = 0; i < len; i++) {
+ int v = src[i] + noise[i];
+
+ dst[i] = av_clip_uint8(v);
+ }
+}
+
+void ff_line_noise_avg_c(uint8_t *dst, const uint8_t *src,
+ int len, const int8_t * const *shift)
+{
+ int i;
+ const int8_t *src2 = (const int8_t*)src;
+
+ for (i = 0; i < len; i++) {
+ const int n = shift[0][i] + shift[1][i] + shift[2][i];
+ dst[i] = src2[i] + ((n * src2[i]) >> 7);
+ }
+}
+
+static void noise(uint8_t *dst, const uint8_t *src,
+ int dst_linesize, int src_linesize,
+ int width, int start, int end, NoiseContext *n, int comp)
+{
+ FilterParams *p = &n->param[comp];
+ int8_t *noise = p->noise;
+ const int flags = p->flags;
+ int y;
+
+ if (!noise) {
+ if (dst != src)
+ av_image_copy_plane(dst, dst_linesize, src, src_linesize, width, end - start);
+ return;
+ }
+
+ for (y = start; y < end; y++) {
+ const int ix = y & (MAX_RES - 1);
+ int x;
+ for (x=0; x < width; x+= MAX_RES) {
+ int w = FFMIN(width - x, MAX_RES);
+ int shift = p->rand_shift[ix];
+
+ if (flags & NOISE_AVERAGED) {
+ n->line_noise_avg(dst + x, src + x, w, (const int8_t**)p->prev_shift[ix]);
+ p->prev_shift[ix][shift & 3] = noise + shift;
+ } else {
+ n->line_noise(dst + x, src + x, noise, w, shift);
+ }
+ }
+ dst += dst_linesize;
+ src += src_linesize;
+ }
+}
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ NoiseContext *s = ctx->priv;
+ ThreadData *td = arg;
+ int plane;
+
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ const int height = s->height[plane];
+ const int start = (height * jobnr ) / nb_jobs;
+ const int end = (height * (jobnr+1)) / nb_jobs;
+ noise(td->out->data[plane] + start * td->out->linesize[plane],
+ td->in->data[plane] + start * td->in->linesize[plane],
+ td->out->linesize[plane], td->in->linesize[plane],
+ s->bytewidth[plane], start, end, s, plane);
+ }
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ NoiseContext *n = ctx->priv;
+ ThreadData td;
+ AVFrame *out;
+ int comp, i;
+
+ if (av_frame_is_writable(inpicref)) {
+ out = inpicref;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, inpicref);
+ }
+
+ for (comp = 0; comp < 4; comp++) {
+ FilterParams *fp = &n->param[comp];
+
+ if ((!fp->rand_shift_init || (fp->flags & NOISE_TEMPORAL)) && fp->strength) {
+
+ for (i = 0; i < MAX_RES; i++) {
+ fp->rand_shift[i] = av_lfg_get(&fp->lfg) & (MAX_SHIFT - 1);
+ }
+ fp->rand_shift_init = 1;
+ }
+ }
+
+ td.in = inpicref; td.out = out;
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(n->height[0], ff_filter_get_nb_threads(ctx)));
+ emms_c();
+
+ if (inpicref != out)
+ av_frame_free(&inpicref);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ NoiseContext *n = ctx->priv;
+ int ret, i;
+
+ for (i = 0; i < 4; i++) {
+ if (n->all.seed >= 0)
+ n->param[i].seed = n->all.seed;
+ else
+ n->param[i].seed = 123457;
+ if (n->all.strength)
+ n->param[i].strength = n->all.strength;
+ if (n->all.flags)
+ n->param[i].flags = n->all.flags;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (n->param[i].strength && ((ret = init_noise(n, i)) < 0))
+ return ret;
+ }
+
+ n->line_noise = ff_line_noise_c;
+ n->line_noise_avg = ff_line_noise_avg_c;
+
+ if (ARCH_X86)
+ ff_noise_init_x86(n);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ NoiseContext *n = ctx->priv;
+ int i;
+
+ for (i = 0; i < 4; i++)
+ av_freep(&n->param[i].noise);
+}
+
+static const AVFilterPad noise_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad noise_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_noise = {
+ .name = "noise",
+ .description = NULL_IF_CONFIG_SMALL("Add noise."),
+ .priv_size = sizeof(NoiseContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = noise_inputs,
+ .outputs = noise_outputs,
+ .priv_class = &noise_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_noise.h b/libavfilter/vf_noise.h
new file mode 100644
index 0000000000..2207ed961f
--- /dev/null
+++ b/libavfilter/vf_noise.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_NOISE_H
+#define AVFILTER_NOISE_H
+
+#include "libavutil/lfg.h"
+#include "avfilter.h"
+
+#define MAX_NOISE 5120
+#define MAX_SHIFT 1024
+#define MAX_RES (MAX_NOISE-MAX_SHIFT)
+
+#define NOISE_UNIFORM 1
+#define NOISE_TEMPORAL 2
+#define NOISE_AVERAGED 8
+#define NOISE_PATTERN 16
+
+typedef struct {
+ int strength;
+ unsigned flags;
+ AVLFG lfg;
+ int seed;
+ int8_t *noise;
+ int8_t *prev_shift[MAX_RES][3];
+ int rand_shift[MAX_RES];
+ int rand_shift_init;
+} FilterParams;
+
+typedef struct {
+ const AVClass *class;
+ int nb_planes;
+ int bytewidth[4];
+ int height[4];
+ FilterParams all;
+ FilterParams param[4];
+ void (*line_noise)(uint8_t *dst, const uint8_t *src, const int8_t *noise, int len, int shift);
+ void (*line_noise_avg)(uint8_t *dst, const uint8_t *src, int len, const int8_t * const *shift);
+} NoiseContext;
+
+void ff_line_noise_c(uint8_t *dst, const uint8_t *src, const int8_t *noise, int len, int shift);
+void ff_line_noise_avg_c(uint8_t *dst, const uint8_t *src, int len, const int8_t * const *shift);
+
+void ff_noise_init_x86(NoiseContext *n);
+
+#endif /* AVFILTER_NOISE_H */
diff --git a/libavfilter/vf_null.c b/libavfilter/vf_null.c
index f87258707d..2355615229 100644
--- a/libavfilter/vf_null.c
+++ b/libavfilter/vf_null.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -28,9 +28,8 @@
static const AVFilterPad avfilter_vf_null_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
@@ -44,12 +43,8 @@ static const AVFilterPad avfilter_vf_null_outputs[] = {
};
AVFilter ff_vf_null = {
- .name = "null",
+ .name = "null",
.description = NULL_IF_CONFIG_SMALL("Pass the source unchanged to the output."),
-
- .priv_size = 0,
-
- .inputs = avfilter_vf_null_inputs,
-
- .outputs = avfilter_vf_null_outputs,
+ .inputs = avfilter_vf_null_inputs,
+ .outputs = avfilter_vf_null_outputs,
};
diff --git a/libavfilter/vf_ocr.c b/libavfilter/vf_ocr.c
new file mode 100644
index 0000000000..870dd68841
--- /dev/null
+++ b/libavfilter/vf_ocr.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <tesseract/capi.h>
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct OCRContext {
+ const AVClass *class;
+
+ char *datapath;
+ char *language;
+ char *whitelist;
+ char *blacklist;
+
+ TessBaseAPI *tess;
+} OCRContext;
+
+#define OFFSET(x) offsetof(OCRContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption ocr_options[] = {
+ { "datapath", "set datapath", OFFSET(datapath), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
+ { "language", "set language", OFFSET(language), AV_OPT_TYPE_STRING, {.str="eng"}, 0, 0, FLAGS },
+ { "whitelist", "set character whitelist", OFFSET(whitelist), AV_OPT_TYPE_STRING, {.str="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.:;,-+_!?\"'[]{}()<>|/\\=*&%$#@!~"}, 0, 0, FLAGS },
+ { "blacklist", "set character blacklist", OFFSET(blacklist), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, FLAGS },
+ { NULL }
+};
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ OCRContext *s = ctx->priv;
+
+ s->tess = TessBaseAPICreate();
+ if (TessBaseAPIInit3(s->tess, s->datapath, s->language) == -1) {
+ av_log(ctx, AV_LOG_ERROR, "failed to init tesseract\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (!TessBaseAPISetVariable(s->tess, "tessedit_char_whitelist", s->whitelist)) {
+ av_log(ctx, AV_LOG_ERROR, "failed to set whitelist\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (!TessBaseAPISetVariable(s->tess, "tessedit_char_blacklist", s->blacklist)) {
+ av_log(ctx, AV_LOG_ERROR, "failed to set blacklist\n");
+ return AVERROR(EINVAL);
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "Tesseract version: %s\n", TessVersion());
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ ff_set_common_formats(ctx, fmts_list);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVDictionary **metadata = avpriv_frame_get_metadatap(in);
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ OCRContext *s = ctx->priv;
+ char *result;
+
+ result = TessBaseAPIRect(s->tess, in->data[0], 1,
+ in->linesize[0], 0, 0, in->width, in->height);
+ av_dict_set(metadata, "lavfi.ocr.text", result, 0);
+ TessDeleteText(result);
+
+ return ff_filter_frame(outlink, in);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ OCRContext *s = ctx->priv;
+
+ TessBaseAPIEnd(s->tess);
+ TessBaseAPIDelete(s->tess);
+}
+
+AVFILTER_DEFINE_CLASS(ocr);
+
+static const AVFilterPad ocr_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad ocr_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_ocr = {
+ .name = "ocr",
+ .description = NULL_IF_CONFIG_SMALL("Optical Character Recognition."),
+ .priv_size = sizeof(OCRContext),
+ .priv_class = &ocr_class,
+ .query_formats = query_formats,
+ .init = init,
+ .uninit = uninit,
+ .inputs = ocr_inputs,
+ .outputs = ocr_outputs,
+};
diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c
index 2fa791d001..bbcd6b55cd 100644
--- a/libavfilter/vf_overlay.c
+++ b/libavfilter/vf_overlay.c
@@ -3,20 +3,20 @@
* Copyright (c) 2010 Baptiste Coudurier
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -30,33 +30,43 @@
#include "libavutil/common.h"
#include "libavutil/eval.h"
#include "libavutil/avstring.h"
-#include "libavutil/avassert.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
+#include "libavutil/timestamp.h"
#include "internal.h"
+#include "dualinput.h"
+#include "drawutils.h"
#include "video.h"
static const char *const var_names[] = {
- "E",
- "PHI",
- "PI",
"main_w", "W", ///< width of the main video
"main_h", "H", ///< height of the main video
"overlay_w", "w", ///< width of the overlay video
"overlay_h", "h", ///< height of the overlay video
+ "hsub",
+ "vsub",
+ "x",
+ "y",
+ "n", ///< number of frame
+ "pos", ///< position in the file
+ "t", ///< timestamp expressed in seconds
NULL
};
enum var_name {
- VAR_E,
- VAR_PHI,
- VAR_PI,
VAR_MAIN_W, VAR_MW,
VAR_MAIN_H, VAR_MH,
VAR_OVERLAY_W, VAR_OW,
VAR_OVERLAY_H, VAR_OH,
+ VAR_HSUB,
+ VAR_VSUB,
+ VAR_X,
+ VAR_Y,
+ VAR_N,
+ VAR_POS,
+ VAR_T,
VAR_VARS_NB
};
@@ -66,127 +76,310 @@ enum EOFAction {
EOF_ACTION_PASS
};
-static const char *eof_action_str[] = {
+static const char * const eof_action_str[] = {
"repeat", "endall", "pass"
};
#define MAIN 0
#define OVERLAY 1
+#define R 0
+#define G 1
+#define B 2
+#define A 3
+
+#define Y 0
+#define U 1
+#define V 2
+
+enum EvalMode {
+ EVAL_MODE_INIT,
+ EVAL_MODE_FRAME,
+ EVAL_MODE_NB
+};
+
+enum OverlayFormat {
+ OVERLAY_FORMAT_YUV420,
+ OVERLAY_FORMAT_YUV422,
+ OVERLAY_FORMAT_YUV444,
+ OVERLAY_FORMAT_RGB,
+ OVERLAY_FORMAT_GBRP,
+ OVERLAY_FORMAT_NB
+};
+
typedef struct OverlayContext {
const AVClass *class;
int x, y; ///< position of overlaid picture
- int max_plane_step[4]; ///< steps per pixel for each plane
+ int allow_packed_rgb;
+ uint8_t main_is_packed_rgb;
+ uint8_t main_rgba_map[4];
+ uint8_t main_has_alpha;
+ uint8_t overlay_is_packed_rgb;
+ uint8_t overlay_rgba_map[4];
+ uint8_t overlay_has_alpha;
+ int format; ///< OverlayFormat
+ int eval_mode; ///< EvalMode
+
+ FFDualInputContext dinput;
+
+ int main_pix_step[4]; ///< steps per pixel for each plane of the main output
+ int overlay_pix_step[4]; ///< steps per pixel for each plane of the overlay
int hsub, vsub; ///< chroma subsampling values
+ const AVPixFmtDescriptor *main_desc; ///< format descriptor for main input
+ double var_values[VAR_VARS_NB];
char *x_expr, *y_expr;
- enum EOFAction eof_action; ///< action to take on EOF from source
+ int eof_action; ///< action to take on EOF from source
+
+ AVExpr *x_pexpr, *y_pexpr;
- AVFrame *main;
- AVFrame *over_prev, *over_next;
+ void (*blend_image)(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int x, int y);
} OverlayContext;
static av_cold void uninit(AVFilterContext *ctx)
{
OverlayContext *s = ctx->priv;
- av_frame_free(&s->main);
- av_frame_free(&s->over_prev);
- av_frame_free(&s->over_next);
+ ff_dualinput_uninit(&s->dinput);
+ av_expr_free(s->x_pexpr); s->x_pexpr = NULL;
+ av_expr_free(s->y_pexpr); s->y_pexpr = NULL;
}
-static int query_formats(AVFilterContext *ctx)
+static inline int normalize_xy(double d, int chroma_sub)
+{
+ if (isnan(d))
+ return INT_MAX;
+ return (int)d & ~((1 << chroma_sub) - 1);
+}
+
+static void eval_expr(AVFilterContext *ctx)
{
- static const enum AVPixelFormat inout_pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
- static const enum AVPixelFormat blend_pix_fmts[] = { AV_PIX_FMT_YUVA420P, AV_PIX_FMT_NONE };
- AVFilterFormats *inout_formats = ff_make_format_list(inout_pix_fmts);
- AVFilterFormats *blend_formats = ff_make_format_list(blend_pix_fmts);
+ OverlayContext *s = ctx->priv;
- ff_formats_ref(inout_formats, &ctx->inputs [MAIN ]->out_formats);
- ff_formats_ref(blend_formats, &ctx->inputs [OVERLAY]->out_formats);
- ff_formats_ref(inout_formats, &ctx->outputs[MAIN ]->in_formats );
+ s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
+ s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, NULL);
+ s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
+ s->x = normalize_xy(s->var_values[VAR_X], s->hsub);
+ s->y = normalize_xy(s->var_values[VAR_Y], s->vsub);
+}
+static int set_expr(AVExpr **pexpr, const char *expr, const char *option, void *log_ctx)
+{
+ int ret;
+ AVExpr *old = NULL;
+
+ if (*pexpr)
+ old = *pexpr;
+ ret = av_expr_parse(pexpr, expr, var_names,
+ NULL, NULL, NULL, NULL, 0, log_ctx);
+ if (ret < 0) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Error when evaluating the expression '%s' for %s\n",
+ expr, option);
+ *pexpr = old;
+ return ret;
+ }
+
+ av_expr_free(old);
return 0;
}
-static int config_input_main(AVFilterLink *inlink)
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
{
- OverlayContext *s = inlink->dst->priv;
- const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+ OverlayContext *s = ctx->priv;
+ int ret;
- av_image_fill_max_pixsteps(s->max_plane_step, NULL, pix_desc);
- s->hsub = pix_desc->log2_chroma_w;
- s->vsub = pix_desc->log2_chroma_h;
+ if (!strcmp(cmd, "x"))
+ ret = set_expr(&s->x_pexpr, args, cmd, ctx);
+ else if (!strcmp(cmd, "y"))
+ ret = set_expr(&s->y_pexpr, args, cmd, ctx);
+ else
+ ret = AVERROR(ENOSYS);
+
+ if (ret < 0)
+ return ret;
+
+ if (s->eval_mode == EVAL_MODE_INIT) {
+ eval_expr(ctx);
+ av_log(ctx, AV_LOG_VERBOSE, "x:%f xi:%d y:%f yi:%d\n",
+ s->var_values[VAR_X], s->x,
+ s->var_values[VAR_Y], s->y);
+ }
+ return ret;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ OverlayContext *s = ctx->priv;
+
+ /* overlay formats contains alpha, for avoiding conversion with alpha information loss */
+ static const enum AVPixelFormat main_pix_fmts_yuv420[] = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_NV12, AV_PIX_FMT_NV21,
+ AV_PIX_FMT_NONE
+ };
+ static const enum AVPixelFormat overlay_pix_fmts_yuv420[] = {
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_NONE
+ };
+
+ static const enum AVPixelFormat main_pix_fmts_yuv422[] = {
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_NONE
+ };
+ static const enum AVPixelFormat overlay_pix_fmts_yuv422[] = {
+ AV_PIX_FMT_YUVA422P, AV_PIX_FMT_NONE
+ };
+
+ static const enum AVPixelFormat main_pix_fmts_yuv444[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVA444P, AV_PIX_FMT_NONE
+ };
+ static const enum AVPixelFormat overlay_pix_fmts_yuv444[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_NONE
+ };
+
+ static const enum AVPixelFormat main_pix_fmts_gbrp[] = {
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_NONE
+ };
+ static const enum AVPixelFormat overlay_pix_fmts_gbrp[] = {
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_NONE
+ };
+
+ static const enum AVPixelFormat main_pix_fmts_rgb[] = {
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_NONE
+ };
+ static const enum AVPixelFormat overlay_pix_fmts_rgb[] = {
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *main_formats = NULL;
+ AVFilterFormats *overlay_formats = NULL;
+ int ret;
+
+ switch (s->format) {
+ case OVERLAY_FORMAT_YUV420:
+ if (!(main_formats = ff_make_format_list(main_pix_fmts_yuv420)) ||
+ !(overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv420))) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ break;
+ case OVERLAY_FORMAT_YUV422:
+ if (!(main_formats = ff_make_format_list(main_pix_fmts_yuv422)) ||
+ !(overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv422))) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ break;
+ case OVERLAY_FORMAT_YUV444:
+ if (!(main_formats = ff_make_format_list(main_pix_fmts_yuv444)) ||
+ !(overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv444))) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ break;
+ case OVERLAY_FORMAT_RGB:
+ if (!(main_formats = ff_make_format_list(main_pix_fmts_rgb)) ||
+ !(overlay_formats = ff_make_format_list(overlay_pix_fmts_rgb))) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ break;
+ case OVERLAY_FORMAT_GBRP:
+ if (!(main_formats = ff_make_format_list(main_pix_fmts_gbrp)) ||
+ !(overlay_formats = ff_make_format_list(overlay_pix_fmts_gbrp))) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ if ((ret = ff_formats_ref(main_formats , &ctx->inputs[MAIN]->out_formats )) < 0 ||
+ (ret = ff_formats_ref(overlay_formats, &ctx->inputs[OVERLAY]->out_formats)) < 0 ||
+ (ret = ff_formats_ref(main_formats , &ctx->outputs[MAIN]->in_formats )) < 0)
+ goto fail;
return 0;
+fail:
+ if (main_formats)
+ av_freep(&main_formats->formats);
+ av_freep(&main_formats);
+ if (overlay_formats)
+ av_freep(&overlay_formats->formats);
+ av_freep(&overlay_formats);
+ return ret;
}
+static const enum AVPixelFormat alpha_pix_fmts[] = {
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_BGRA, AV_PIX_FMT_GBRAP, AV_PIX_FMT_NONE
+};
+
static int config_input_overlay(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
OverlayContext *s = inlink->dst->priv;
- char *expr;
- double var_values[VAR_VARS_NB], res;
int ret;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+
+ av_image_fill_max_pixsteps(s->overlay_pix_step, NULL, pix_desc);
/* Finish the configuration by evaluating the expressions
now when both inputs are configured. */
- var_values[VAR_E ] = M_E;
- var_values[VAR_PHI] = M_PHI;
- var_values[VAR_PI ] = M_PI;
-
- var_values[VAR_MAIN_W ] = var_values[VAR_MW] = ctx->inputs[MAIN ]->w;
- var_values[VAR_MAIN_H ] = var_values[VAR_MH] = ctx->inputs[MAIN ]->h;
- var_values[VAR_OVERLAY_W] = var_values[VAR_OW] = ctx->inputs[OVERLAY]->w;
- var_values[VAR_OVERLAY_H] = var_values[VAR_OH] = ctx->inputs[OVERLAY]->h;
-
- if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr), var_names, var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
- goto fail;
- s->x = res;
- if ((ret = av_expr_parse_and_eval(&res, (expr = s->y_expr), var_names, var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)))
- goto fail;
- s->y = res;
- /* x may depend on y */
- if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr), var_names, var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
- goto fail;
- s->x = res;
+ s->var_values[VAR_MAIN_W ] = s->var_values[VAR_MW] = ctx->inputs[MAIN ]->w;
+ s->var_values[VAR_MAIN_H ] = s->var_values[VAR_MH] = ctx->inputs[MAIN ]->h;
+ s->var_values[VAR_OVERLAY_W] = s->var_values[VAR_OW] = ctx->inputs[OVERLAY]->w;
+ s->var_values[VAR_OVERLAY_H] = s->var_values[VAR_OH] = ctx->inputs[OVERLAY]->h;
+ s->var_values[VAR_HSUB] = 1<<pix_desc->log2_chroma_w;
+ s->var_values[VAR_VSUB] = 1<<pix_desc->log2_chroma_h;
+ s->var_values[VAR_X] = NAN;
+ s->var_values[VAR_Y] = NAN;
+ s->var_values[VAR_N] = 0;
+ s->var_values[VAR_T] = NAN;
+ s->var_values[VAR_POS] = NAN;
+
+ if ((ret = set_expr(&s->x_pexpr, s->x_expr, "x", ctx)) < 0 ||
+ (ret = set_expr(&s->y_pexpr, s->y_expr, "y", ctx)) < 0)
+ return ret;
+
+ s->overlay_is_packed_rgb =
+ ff_fill_rgba_map(s->overlay_rgba_map, inlink->format) >= 0;
+ s->overlay_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts);
+
+ if (s->eval_mode == EVAL_MODE_INIT) {
+ eval_expr(ctx);
+ av_log(ctx, AV_LOG_VERBOSE, "x:%f xi:%d y:%f yi:%d\n",
+ s->var_values[VAR_X], s->x,
+ s->var_values[VAR_Y], s->y);
+ }
av_log(ctx, AV_LOG_VERBOSE,
- "main w:%d h:%d fmt:%s overlay x:%d y:%d w:%d h:%d fmt:%s eof_action:%s\n",
+ "main w:%d h:%d fmt:%s overlay w:%d h:%d fmt:%s eof_action:%s\n",
ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h,
av_get_pix_fmt_name(ctx->inputs[MAIN]->format),
- s->x, s->y,
ctx->inputs[OVERLAY]->w, ctx->inputs[OVERLAY]->h,
av_get_pix_fmt_name(ctx->inputs[OVERLAY]->format),
eof_action_str[s->eof_action]);
-
- if (s->x < 0 || s->y < 0 ||
- s->x + var_values[VAR_OVERLAY_W] > var_values[VAR_MAIN_W] ||
- s->y + var_values[VAR_OVERLAY_H] > var_values[VAR_MAIN_H]) {
- av_log(ctx, AV_LOG_ERROR,
- "Overlay area (%d,%d)<->(%d,%d) not within the main area (0,0)<->(%d,%d) or zero-sized\n",
- s->x, s->y,
- (int)(s->x + var_values[VAR_OVERLAY_W]),
- (int)(s->y + var_values[VAR_OVERLAY_H]),
- (int)var_values[VAR_MAIN_W], (int)var_values[VAR_MAIN_H]);
- return AVERROR(EINVAL);
- }
return 0;
-
-fail:
- av_log(NULL, AV_LOG_ERROR,
- "Error when evaluating the expression '%s'\n", expr);
- return ret;
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
+ OverlayContext *s = ctx->priv;
+ int ret;
+
+ if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0)
+ return ret;
outlink->w = ctx->inputs[MAIN]->w;
outlink->h = ctx->inputs[MAIN]->h;
@@ -195,212 +388,435 @@ static int config_output(AVFilterLink *outlink)
return 0;
}
-static void blend_frame(AVFilterContext *ctx,
- AVFrame *dst, AVFrame *src,
- int x, int y)
+// divide by 255 and round to nearest
+// apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16
+#define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
+
+// calculate the unpremultiplied alpha, applying the general equation:
+// alpha = alpha_overlay / ( (alpha_main + alpha_overlay) - (alpha_main * alpha_overlay) )
+// (((x) << 16) - ((x) << 9) + (x)) is a faster version of: 255 * 255 * x
+// ((((x) + (y)) << 8) - ((x) + (y)) - (y) * (x)) is a faster version of: 255 * (x + y)
+#define UNPREMULTIPLY_ALPHA(x, y) ((((x) << 16) - ((x) << 9) + (x)) / ((((x) + (y)) << 8) - ((x) + (y)) - (y) * (x)))
+
+/**
+ * Blend image in src to destination buffer dst at position (x, y).
+ */
+
+static void blend_image_packed_rgb(AVFilterContext *ctx,
+ AVFrame *dst, const AVFrame *src,
+ int x, int y)
{
OverlayContext *s = ctx->priv;
- int i, j, k;
- int width, height;
- int overlay_end_y = y + src->height;
- int end_y, start_y;
-
- width = FFMIN(dst->width - x, src->width);
- end_y = FFMIN(dst->height, overlay_end_y);
- start_y = FFMAX(y, 0);
- height = end_y - start_y;
-
- if (dst->format == AV_PIX_FMT_BGR24 || dst->format == AV_PIX_FMT_RGB24) {
- uint8_t *dp = dst->data[0] + x * 3 + start_y * dst->linesize[0];
- uint8_t *sp = src->data[0];
- int b = dst->format == AV_PIX_FMT_BGR24 ? 2 : 0;
- int r = dst->format == AV_PIX_FMT_BGR24 ? 0 : 2;
- if (y < 0)
- sp += -y * src->linesize[0];
- for (i = 0; i < height; i++) {
- uint8_t *d = dp, *s = sp;
- for (j = 0; j < width; j++) {
- d[r] = (d[r] * (0xff - s[3]) + s[0] * s[3] + 128) >> 8;
- d[1] = (d[1] * (0xff - s[3]) + s[1] * s[3] + 128) >> 8;
- d[b] = (d[b] * (0xff - s[3]) + s[2] * s[3] + 128) >> 8;
- d += 3;
- s += 4;
+ int i, imax, j, jmax;
+ const int src_w = src->width;
+ const int src_h = src->height;
+ const int dst_w = dst->width;
+ const int dst_h = dst->height;
+ uint8_t alpha; ///< the amount of overlay to blend on to main
+ const int dr = s->main_rgba_map[R];
+ const int dg = s->main_rgba_map[G];
+ const int db = s->main_rgba_map[B];
+ const int da = s->main_rgba_map[A];
+ const int dstep = s->main_pix_step[0];
+ const int sr = s->overlay_rgba_map[R];
+ const int sg = s->overlay_rgba_map[G];
+ const int sb = s->overlay_rgba_map[B];
+ const int sa = s->overlay_rgba_map[A];
+ const int sstep = s->overlay_pix_step[0];
+ const int main_has_alpha = s->main_has_alpha;
+ uint8_t *S, *sp, *d, *dp;
+
+ i = FFMAX(-y, 0);
+ sp = src->data[0] + i * src->linesize[0];
+ dp = dst->data[0] + (y+i) * dst->linesize[0];
+
+ for (imax = FFMIN(-y + dst_h, src_h); i < imax; i++) {
+ j = FFMAX(-x, 0);
+ S = sp + j * sstep;
+ d = dp + (x+j) * dstep;
+
+ for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) {
+ alpha = S[sa];
+
+ // if the main channel has an alpha channel, alpha has to be calculated
+ // to create an un-premultiplied (straight) alpha value
+ if (main_has_alpha && alpha != 0 && alpha != 255) {
+ uint8_t alpha_d = d[da];
+ alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d);
}
- dp += dst->linesize[0];
- sp += src->linesize[0];
- }
- } else {
- for (i = 0; i < 3; i++) {
- int hsub = i ? s->hsub : 0;
- int vsub = i ? s->vsub : 0;
- uint8_t *dp = dst->data[i] + (x >> hsub) +
- (start_y >> vsub) * dst->linesize[i];
- uint8_t *sp = src->data[i];
- uint8_t *ap = src->data[3];
- int wp = FFALIGN(width, 1<<hsub) >> hsub;
- int hp = FFALIGN(height, 1<<vsub) >> vsub;
- if (y < 0) {
- sp += ((-y) >> vsub) * src->linesize[i];
- ap += -y * src->linesize[3];
+
+ switch (alpha) {
+ case 0:
+ break;
+ case 255:
+ d[dr] = S[sr];
+ d[dg] = S[sg];
+ d[db] = S[sb];
+ break;
+ default:
+ // main_value = main_value * (1 - alpha) + overlay_value * alpha
+ // since alpha is in the range 0-255, the result must divided by 255
+ d[dr] = FAST_DIV255(d[dr] * (255 - alpha) + S[sr] * alpha);
+ d[dg] = FAST_DIV255(d[dg] * (255 - alpha) + S[sg] * alpha);
+ d[db] = FAST_DIV255(d[db] * (255 - alpha) + S[sb] * alpha);
}
- for (j = 0; j < hp; j++) {
- uint8_t *d = dp, *s = sp, *a = ap;
- for (k = 0; k < wp; k++) {
- // average alpha for color components, improve quality
- int alpha_v, alpha_h, alpha;
- if (hsub && vsub && j+1 < hp && k+1 < wp) {
- alpha = (a[0] + a[src->linesize[3]] +
- a[1] + a[src->linesize[3]+1]) >> 2;
- } else if (hsub || vsub) {
- alpha_h = hsub && k+1 < wp ?
- (a[0] + a[1]) >> 1 : a[0];
- alpha_v = vsub && j+1 < hp ?
- (a[0] + a[src->linesize[3]]) >> 1 : a[0];
- alpha = (alpha_v + alpha_h) >> 1;
- } else
- alpha = a[0];
- *d = (*d * (0xff - alpha) + *s++ * alpha + 128) >> 8;
- d++;
- a += 1 << hsub;
+ if (main_has_alpha) {
+ switch (alpha) {
+ case 0:
+ break;
+ case 255:
+ d[da] = S[sa];
+ break;
+ default:
+ // apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha
+ d[da] += FAST_DIV255((255 - d[da]) * S[sa]);
}
- dp += dst->linesize[i];
- sp += src->linesize[i];
- ap += (1 << vsub) * src->linesize[3];
}
+ d += dstep;
+ S += sstep;
}
+ dp += dst->linesize[0];
+ sp += src->linesize[0];
}
}
-static int filter_frame_main(AVFilterLink *inlink, AVFrame *frame)
+static av_always_inline void blend_plane(AVFilterContext *ctx,
+ AVFrame *dst, const AVFrame *src,
+ int src_w, int src_h,
+ int dst_w, int dst_h,
+ int i, int hsub, int vsub,
+ int x, int y,
+ int main_has_alpha,
+ int dst_plane,
+ int dst_offset,
+ int dst_step)
{
- OverlayContext *s = inlink->dst->priv;
+ int src_wp = AV_CEIL_RSHIFT(src_w, hsub);
+ int src_hp = AV_CEIL_RSHIFT(src_h, vsub);
+ int dst_wp = AV_CEIL_RSHIFT(dst_w, hsub);
+ int dst_hp = AV_CEIL_RSHIFT(dst_h, vsub);
+ int yp = y>>vsub;
+ int xp = x>>hsub;
+ uint8_t *s, *sp, *d, *dp, *a, *ap;
+ int jmax, j, k, kmax;
+
+ j = FFMAX(-yp, 0);
+ sp = src->data[i] + j * src->linesize[i];
+ dp = dst->data[dst_plane]
+ + (yp+j) * dst->linesize[dst_plane]
+ + dst_offset;
+ ap = src->data[3] + (j<<vsub) * src->linesize[3];
+
+ for (jmax = FFMIN(-yp + dst_hp, src_hp); j < jmax; j++) {
+ k = FFMAX(-xp, 0);
+ d = dp + (xp+k) * dst_step;
+ s = sp + k;
+ a = ap + (k<<hsub);
+
+ for (kmax = FFMIN(-xp + dst_wp, src_wp); k < kmax; k++) {
+ int alpha_v, alpha_h, alpha;
+
+ // average alpha for color components, improve quality
+ if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) {
+ alpha = (a[0] + a[src->linesize[3]] +
+ a[1] + a[src->linesize[3]+1]) >> 2;
+ } else if (hsub || vsub) {
+ alpha_h = hsub && k+1 < src_wp ?
+ (a[0] + a[1]) >> 1 : a[0];
+ alpha_v = vsub && j+1 < src_hp ?
+ (a[0] + a[src->linesize[3]]) >> 1 : a[0];
+ alpha = (alpha_v + alpha_h) >> 1;
+ } else
+ alpha = a[0];
+ // if the main channel has an alpha channel, alpha has to be calculated
+ // to create an un-premultiplied (straight) alpha value
+ if (main_has_alpha && alpha != 0 && alpha != 255) {
+ // average alpha for color components, improve quality
+ uint8_t alpha_d;
+ if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) {
+ alpha_d = (d[0] + d[src->linesize[3]] +
+ d[1] + d[src->linesize[3]+1]) >> 2;
+ } else if (hsub || vsub) {
+ alpha_h = hsub && k+1 < src_wp ?
+ (d[0] + d[1]) >> 1 : d[0];
+ alpha_v = vsub && j+1 < src_hp ?
+ (d[0] + d[src->linesize[3]]) >> 1 : d[0];
+ alpha_d = (alpha_v + alpha_h) >> 1;
+ } else
+ alpha_d = d[0];
+ alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d);
+ }
+ *d = FAST_DIV255(*d * (255 - alpha) + *s * alpha);
+ s++;
+ d += dst_step;
+ a += 1 << hsub;
+ }
+ dp += dst->linesize[dst_plane];
+ sp += src->linesize[i];
+ ap += (1 << vsub) * src->linesize[3];
+ }
+}
- av_assert0(!s->main);
- s->main = frame;
+static inline void alpha_composite(const AVFrame *src, const AVFrame *dst,
+ int src_w, int src_h,
+ int dst_w, int dst_h,
+ int x, int y)
+{
+ uint8_t alpha; ///< the amount of overlay to blend on to main
+ uint8_t *s, *sa, *d, *da;
+ int i, imax, j, jmax;
+
+ i = FFMAX(-y, 0);
+ sa = src->data[3] + i * src->linesize[3];
+ da = dst->data[3] + (y+i) * dst->linesize[3];
+
+ for (imax = FFMIN(-y + dst_h, src_h); i < imax; i++) {
+ j = FFMAX(-x, 0);
+ s = sa + j;
+ d = da + x+j;
+
+ for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) {
+ alpha = *s;
+ if (alpha != 0 && alpha != 255) {
+ uint8_t alpha_d = *d;
+ alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d);
+ }
+ switch (alpha) {
+ case 0:
+ break;
+ case 255:
+ *d = *s;
+ break;
+ default:
+ // apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha
+ *d += FAST_DIV255((255 - *d) * *s);
+ }
+ d += 1;
+ s += 1;
+ }
+ da += dst->linesize[3];
+ sa += src->linesize[3];
+ }
+}
- return 0;
+static av_always_inline void blend_image_yuv(AVFilterContext *ctx,
+ AVFrame *dst, const AVFrame *src,
+ int hsub, int vsub,
+ int main_has_alpha,
+ int x, int y)
+{
+ OverlayContext *s = ctx->priv;
+ const int src_w = src->width;
+ const int src_h = src->height;
+ const int dst_w = dst->width;
+ const int dst_h = dst->height;
+
+ if (main_has_alpha)
+ alpha_composite(src, dst, src_w, src_h, dst_w, dst_h, x, y);
+
+ blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, x, y, main_has_alpha,
+ s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, s->main_desc->comp[0].step);
+ blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, x, y, main_has_alpha,
+ s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, s->main_desc->comp[1].step);
+ blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, x, y, main_has_alpha,
+ s->main_desc->comp[2].plane, s->main_desc->comp[2].offset, s->main_desc->comp[2].step);
}
-static int filter_frame_overlay(AVFilterLink *inlink, AVFrame *frame)
+static av_always_inline void blend_image_rgb(AVFilterContext *ctx,
+ AVFrame *dst, const AVFrame *src,
+ int hsub, int vsub,
+ int main_has_alpha,
+ int x, int y)
{
- OverlayContext *s = inlink->dst->priv;
+ OverlayContext *s = ctx->priv;
+ const int src_w = src->width;
+ const int src_h = src->height;
+ const int dst_w = dst->width;
+ const int dst_h = dst->height;
+
+ if (main_has_alpha)
+ alpha_composite(src, dst, src_w, src_h, dst_w, dst_h, x, y);
+
+ blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, x, y, main_has_alpha,
+ s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, s->main_desc->comp[1].step);
+ blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, x, y, main_has_alpha,
+ s->main_desc->comp[2].plane, s->main_desc->comp[2].offset, s->main_desc->comp[2].step);
+ blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, x, y, main_has_alpha,
+ s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, s->main_desc->comp[0].step);
+}
- av_assert0(!s->over_next);
- s->over_next = frame;
+static void blend_image_yuv420(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int x, int y)
+{
+ OverlayContext *s = ctx->priv;
- return 0;
+ blend_image_yuv(ctx, dst, src, 1, 1, s->main_has_alpha, x, y);
}
-static int output_frame(AVFilterContext *ctx)
+static void blend_image_yuv422(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int x, int y)
{
OverlayContext *s = ctx->priv;
- AVFilterLink *outlink = ctx->outputs[0];
- int ret = ff_filter_frame(outlink, s->main);
- s->main = NULL;
- return ret;
+ blend_image_yuv(ctx, dst, src, 1, 0, s->main_has_alpha, x, y);
}
-static int handle_overlay_eof(AVFilterContext *ctx)
+static void blend_image_yuv444(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int x, int y)
{
OverlayContext *s = ctx->priv;
- /* Repeat previous frame on secondary input */
- if (s->over_prev && s->eof_action == EOF_ACTION_REPEAT)
- blend_frame(ctx, s->main, s->over_prev, s->x, s->y);
- /* End both streams */
- else if (s->eof_action == EOF_ACTION_ENDALL)
- return AVERROR_EOF;
- return output_frame(ctx);
+
+ blend_image_yuv(ctx, dst, src, 0, 0, s->main_has_alpha, x, y);
}
-static int request_frame(AVFilterLink *outlink)
+static void blend_image_gbrp(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int x, int y)
{
- AVFilterContext *ctx = outlink->src;
- OverlayContext *s = ctx->priv;
- AVRational tb_main = ctx->inputs[MAIN]->time_base;
- AVRational tb_over = ctx->inputs[OVERLAY]->time_base;
- int ret = 0;
-
- /* get a frame on the main input */
- if (!s->main) {
- ret = ff_request_frame(ctx->inputs[MAIN]);
- if (ret < 0)
- return ret;
- }
+ OverlayContext *s = ctx->priv;
+
+ blend_image_rgb(ctx, dst, src, 0, 0, s->main_has_alpha, x, y);
+}
+
+static int config_input_main(AVFilterLink *inlink)
+{
+ OverlayContext *s = inlink->dst->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+
+ av_image_fill_max_pixsteps(s->main_pix_step, NULL, pix_desc);
+
+ s->hsub = pix_desc->log2_chroma_w;
+ s->vsub = pix_desc->log2_chroma_h;
- /* get a new frame on the overlay input, on EOF check setting 'eof_action' */
- if (!s->over_next) {
- ret = ff_request_frame(ctx->inputs[OVERLAY]);
- if (ret == AVERROR_EOF)
- return handle_overlay_eof(ctx);
- else if (ret < 0)
- return ret;
+ s->main_desc = pix_desc;
+
+ s->main_is_packed_rgb =
+ ff_fill_rgba_map(s->main_rgba_map, inlink->format) >= 0;
+ s->main_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts);
+ switch (s->format) {
+ case OVERLAY_FORMAT_YUV420:
+ s->blend_image = blend_image_yuv420;
+ break;
+ case OVERLAY_FORMAT_YUV422:
+ s->blend_image = blend_image_yuv422;
+ break;
+ case OVERLAY_FORMAT_YUV444:
+ s->blend_image = blend_image_yuv444;
+ break;
+ case OVERLAY_FORMAT_RGB:
+ s->blend_image = blend_image_packed_rgb;
+ break;
+ case OVERLAY_FORMAT_GBRP:
+ s->blend_image = blend_image_gbrp;
+ break;
}
+ return 0;
+}
- while (s->main->pts != AV_NOPTS_VALUE &&
- s->over_next->pts != AV_NOPTS_VALUE &&
- av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main) < 0) {
- av_frame_free(&s->over_prev);
- FFSWAP(AVFrame*, s->over_prev, s->over_next);
-
- ret = ff_request_frame(ctx->inputs[OVERLAY]);
- if (ret == AVERROR_EOF)
- return handle_overlay_eof(ctx);
- else if (ret < 0)
- return ret;
+static AVFrame *do_blend(AVFilterContext *ctx, AVFrame *mainpic,
+ const AVFrame *second)
+{
+ OverlayContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ if (s->eval_mode == EVAL_MODE_FRAME) {
+ int64_t pos = av_frame_get_pkt_pos(mainpic);
+
+ s->var_values[VAR_N] = inlink->frame_count_out;
+ s->var_values[VAR_T] = mainpic->pts == AV_NOPTS_VALUE ?
+ NAN : mainpic->pts * av_q2d(inlink->time_base);
+ s->var_values[VAR_POS] = pos == -1 ? NAN : pos;
+
+ s->var_values[VAR_OVERLAY_W] = s->var_values[VAR_OW] = second->width;
+ s->var_values[VAR_OVERLAY_H] = s->var_values[VAR_OH] = second->height;
+ s->var_values[VAR_MAIN_W ] = s->var_values[VAR_MW] = mainpic->width;
+ s->var_values[VAR_MAIN_H ] = s->var_values[VAR_MH] = mainpic->height;
+
+ eval_expr(ctx);
+ av_log(ctx, AV_LOG_DEBUG, "n:%f t:%f pos:%f x:%f xi:%d y:%f yi:%d\n",
+ s->var_values[VAR_N], s->var_values[VAR_T], s->var_values[VAR_POS],
+ s->var_values[VAR_X], s->x,
+ s->var_values[VAR_Y], s->y);
}
- if (s->main->pts == AV_NOPTS_VALUE ||
- s->over_next->pts == AV_NOPTS_VALUE ||
- !av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main)) {
- blend_frame(ctx, s->main, s->over_next, s->x, s->y);
- av_frame_free(&s->over_prev);
- FFSWAP(AVFrame*, s->over_prev, s->over_next);
- } else if (s->over_prev) {
- blend_frame(ctx, s->main, s->over_prev, s->x, s->y);
+ if (s->x < mainpic->width && s->x + second->width >= 0 ||
+ s->y < mainpic->height && s->y + second->height >= 0)
+ s->blend_image(ctx, mainpic, second, s->x, s->y);
+ return mainpic;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ OverlayContext *s = inlink->dst->priv;
+ av_log(inlink->dst, AV_LOG_DEBUG, "Incoming frame (time:%s) from link #%d\n", av_ts2timestr(inpicref->pts, &inlink->time_base), FF_INLINK_IDX(inlink));
+ return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ OverlayContext *s = outlink->src->priv;
+ return ff_dualinput_request_frame(&s->dinput, outlink);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ OverlayContext *s = ctx->priv;
+
+ if (s->allow_packed_rgb) {
+ av_log(ctx, AV_LOG_WARNING,
+ "The rgb option is deprecated and is overriding the format option, use format instead\n");
+ s->format = OVERLAY_FORMAT_RGB;
+ }
+ if (!s->dinput.repeatlast || s->eof_action == EOF_ACTION_PASS) {
+ s->dinput.repeatlast = 0;
+ s->eof_action = EOF_ACTION_PASS;
+ }
+ if (s->dinput.shortest || s->eof_action == EOF_ACTION_ENDALL) {
+ s->dinput.shortest = 1;
+ s->eof_action = EOF_ACTION_ENDALL;
}
- return output_frame(ctx);
+ s->dinput.process = do_blend;
+ return 0;
}
#define OFFSET(x) offsetof(OverlayContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "x", "Horizontal position of the left edge of the overlaid video on the "
- "main video.", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
- { "y", "Vertical position of the top edge of the overlaid video on the "
- "main video.", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption overlay_options[] = {
+ { "x", "set the x expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "y", "set the y expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "eof_action", "Action to take when encountering EOF from secondary input ",
OFFSET(eof_action), AV_OPT_TYPE_INT, { .i64 = EOF_ACTION_REPEAT },
EOF_ACTION_REPEAT, EOF_ACTION_PASS, .flags = FLAGS, "eof_action" },
{ "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_REPEAT }, .flags = FLAGS, "eof_action" },
{ "endall", "End both streams.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_ENDALL }, .flags = FLAGS, "eof_action" },
{ "pass", "Pass through the main input.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_PASS }, .flags = FLAGS, "eof_action" },
- { NULL },
+ { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_FRAME}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
+ { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
+ { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
+ { "rgb", "force packed RGB in input and output (deprecated)", OFFSET(allow_packed_rgb), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { "shortest", "force termination when the shortest input terminates", OFFSET(dinput.shortest), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
+ { "format", "set output format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=OVERLAY_FORMAT_YUV420}, 0, OVERLAY_FORMAT_NB-1, FLAGS, "format" },
+ { "yuv420", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV420}, .flags = FLAGS, .unit = "format" },
+ { "yuv422", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV422}, .flags = FLAGS, .unit = "format" },
+ { "yuv444", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV444}, .flags = FLAGS, .unit = "format" },
+ { "rgb", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_RGB}, .flags = FLAGS, .unit = "format" },
+ { "gbrp", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_GBRP}, .flags = FLAGS, .unit = "format" },
+ { "repeatlast", "repeat overlay of the last overlay frame", OFFSET(dinput.repeatlast), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
+ { NULL }
};
-static const AVClass overlay_class = {
- .class_name = "overlay",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(overlay);
static const AVFilterPad avfilter_vf_overlay_inputs[] = {
{
.name = "main",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input_main,
- .filter_frame = filter_frame_main,
+ .filter_frame = filter_frame,
.needs_writable = 1,
- .needs_fifo = 1,
},
{
.name = "overlay",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input_overlay,
- .filter_frame = filter_frame_overlay,
- .needs_fifo = 1,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -416,16 +832,15 @@ static const AVFilterPad avfilter_vf_overlay_outputs[] = {
};
AVFilter ff_vf_overlay = {
- .name = "overlay",
- .description = NULL_IF_CONFIG_SMALL("Overlay a video source on top of the input."),
-
- .uninit = uninit,
-
- .priv_size = sizeof(OverlayContext),
- .priv_class = &overlay_class,
-
+ .name = "overlay",
+ .description = NULL_IF_CONFIG_SMALL("Overlay a video source on top of the input."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(OverlayContext),
+ .priv_class = &overlay_class,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_overlay_inputs,
- .outputs = avfilter_vf_overlay_outputs,
+ .process_command = process_command,
+ .inputs = avfilter_vf_overlay_inputs,
+ .outputs = avfilter_vf_overlay_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};
diff --git a/libavfilter/vf_owdenoise.c b/libavfilter/vf_owdenoise.c
new file mode 100644
index 0000000000..e0a953fba2
--- /dev/null
+++ b/libavfilter/vf_owdenoise.c
@@ -0,0 +1,377 @@
+/*
+ * Copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @todo try to change to int
+ * @todo try lifting based implementation
+ * @todo optimize optimize optimize
+ * @todo hard thresholding
+ * @todo use QP to decide filter strength
+ * @todo wavelet normalization / least squares optimal signal vs. noise thresholds
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ double luma_strength;
+ double chroma_strength;
+ int depth;
+ float *plane[16+1][4];
+ int linesize;
+ int hsub, vsub;
+ int pixel_depth;
+} OWDenoiseContext;
+
+#define OFFSET(x) offsetof(OWDenoiseContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption owdenoise_options[] = {
+ { "depth", "set depth", OFFSET(depth), AV_OPT_TYPE_INT, {.i64 = 8}, 8, 16, FLAGS },
+ { "luma_strength", "set luma strength", OFFSET(luma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
+ { "ls", "set luma strength", OFFSET(luma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
+ { "chroma_strength", "set chroma strength", OFFSET(chroma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
+ { "cs", "set chroma strength", OFFSET(chroma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(owdenoise);
+
+DECLARE_ALIGNED(8, static const uint8_t, dither)[8][8] = {
+ { 0, 48, 12, 60, 3, 51, 15, 63 },
+ { 32, 16, 44, 28, 35, 19, 47, 31 },
+ { 8, 56, 4, 52, 11, 59, 7, 55 },
+ { 40, 24, 36, 20, 43, 27, 39, 23 },
+ { 2, 50, 14, 62, 1, 49, 13, 61 },
+ { 34, 18, 46, 30, 33, 17, 45, 29 },
+ { 10, 58, 6, 54, 9, 57, 5, 53 },
+ { 42, 26, 38, 22, 41, 25, 37, 21 },
+};
+
+static const double coeff[2][5] = {
+ {
+ 0.6029490182363579 * M_SQRT2,
+ 0.2668641184428723 * M_SQRT2,
+ -0.07822326652898785 * M_SQRT2,
+ -0.01686411844287495 * M_SQRT2,
+ 0.02674875741080976 * M_SQRT2,
+ },{
+ 1.115087052456994 / M_SQRT2,
+ -0.5912717631142470 / M_SQRT2,
+ -0.05754352622849957 / M_SQRT2,
+ 0.09127176311424948 / M_SQRT2,
+ }
+};
+
+static const double icoeff[2][5] = {
+ {
+ 1.115087052456994 / M_SQRT2,
+ 0.5912717631142470 / M_SQRT2,
+ -0.05754352622849957 / M_SQRT2,
+ -0.09127176311424948 / M_SQRT2,
+ },{
+ 0.6029490182363579 * M_SQRT2,
+ -0.2668641184428723 * M_SQRT2,
+ -0.07822326652898785 * M_SQRT2,
+ 0.01686411844287495 * M_SQRT2,
+ 0.02674875741080976 * M_SQRT2,
+ }
+};
+
+
+static inline void decompose(float *dst_l, float *dst_h, const float *src,
+ int linesize, int w)
+{
+ int x, i;
+ for (x = 0; x < w; x++) {
+ double sum_l = src[x * linesize] * coeff[0][0];
+ double sum_h = src[x * linesize] * coeff[1][0];
+ for (i = 1; i <= 4; i++) {
+ const double s = src[avpriv_mirror(x - i, w - 1) * linesize]
+ + src[avpriv_mirror(x + i, w - 1) * linesize];
+
+ sum_l += coeff[0][i] * s;
+ sum_h += coeff[1][i] * s;
+ }
+ dst_l[x * linesize] = sum_l;
+ dst_h[x * linesize] = sum_h;
+ }
+}
+
+static inline void compose(float *dst, const float *src_l, const float *src_h,
+ int linesize, int w)
+{
+ int x, i;
+ for (x = 0; x < w; x++) {
+ double sum_l = src_l[x * linesize] * icoeff[0][0];
+ double sum_h = src_h[x * linesize] * icoeff[1][0];
+ for (i = 1; i <= 4; i++) {
+ const int x0 = avpriv_mirror(x - i, w - 1) * linesize;
+ const int x1 = avpriv_mirror(x + i, w - 1) * linesize;
+
+ sum_l += icoeff[0][i] * (src_l[x0] + src_l[x1]);
+ sum_h += icoeff[1][i] * (src_h[x0] + src_h[x1]);
+ }
+ dst[x * linesize] = (sum_l + sum_h) * 0.5;
+ }
+}
+
+static inline void decompose2D(float *dst_l, float *dst_h, const float *src,
+ int xlinesize, int ylinesize,
+ int step, int w, int h)
+{
+ int y, x;
+ for (y = 0; y < h; y++)
+ for (x = 0; x < step; x++)
+ decompose(dst_l + ylinesize*y + xlinesize*x,
+ dst_h + ylinesize*y + xlinesize*x,
+ src + ylinesize*y + xlinesize*x,
+ step * xlinesize, (w - x + step - 1) / step);
+}
+
+static inline void compose2D(float *dst, const float *src_l, const float *src_h,
+ int xlinesize, int ylinesize,
+ int step, int w, int h)
+{
+ int y, x;
+ for (y = 0; y < h; y++)
+ for (x = 0; x < step; x++)
+ compose(dst + ylinesize*y + xlinesize*x,
+ src_l + ylinesize*y + xlinesize*x,
+ src_h + ylinesize*y + xlinesize*x,
+ step * xlinesize, (w - x + step - 1) / step);
+}
+
+static void decompose2D2(float *dst[4], float *src, float *temp[2],
+ int linesize, int step, int w, int h)
+{
+ decompose2D(temp[0], temp[1], src, 1, linesize, step, w, h);
+ decompose2D( dst[0], dst[1], temp[0], linesize, 1, step, h, w);
+ decompose2D( dst[2], dst[3], temp[1], linesize, 1, step, h, w);
+}
+
+static void compose2D2(float *dst, float *src[4], float *temp[2],
+ int linesize, int step, int w, int h)
+{
+ compose2D(temp[0], src[0], src[1], linesize, 1, step, h, w);
+ compose2D(temp[1], src[2], src[3], linesize, 1, step, h, w);
+ compose2D(dst, temp[0], temp[1], 1, linesize, step, w, h);
+}
+
+static void filter(OWDenoiseContext *s,
+ uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize,
+ int width, int height, double strength)
+{
+ int x, y, i, j, depth = s->depth;
+
+ while (1<<depth > width || 1<<depth > height)
+ depth--;
+
+ if (s->pixel_depth <= 8) {
+ for (y = 0; y < height; y++)
+ for(x = 0; x < width; x++)
+ s->plane[0][0][y*s->linesize + x] = src[y*src_linesize + x];
+ } else {
+ const uint16_t *src16 = (const uint16_t *)src;
+
+ src_linesize /= 2;
+ for (y = 0; y < height; y++)
+ for(x = 0; x < width; x++)
+ s->plane[0][0][y*s->linesize + x] = src16[y*src_linesize + x];
+ }
+
+ for (i = 0; i < depth; i++)
+ decompose2D2(s->plane[i + 1], s->plane[i][0], s->plane[0] + 1, s->linesize, 1<<i, width, height);
+
+ for (i = 0; i < depth; i++) {
+ for (j = 1; j < 4; j++) {
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ double v = s->plane[i + 1][j][y*s->linesize + x];
+ if (v > strength) v -= strength;
+ else if (v < -strength) v += strength;
+ else v = 0;
+ s->plane[i + 1][j][x + y*s->linesize] = v;
+ }
+ }
+ }
+ }
+ for (i = depth-1; i >= 0; i--)
+ compose2D2(s->plane[i][0], s->plane[i + 1], s->plane[0] + 1, s->linesize, 1<<i, width, height);
+
+ if (s->pixel_depth <= 8) {
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ i = s->plane[0][0][y*s->linesize + x] + dither[x&7][y&7]*(1.0/64) + 1.0/128; // yes the rounding is insane but optimal :)
+ if ((unsigned)i > 255U) i = ~(i >> 31);
+ dst[y*dst_linesize + x] = i;
+ }
+ }
+ } else {
+ uint16_t *dst16 = (uint16_t *)dst;
+
+ dst_linesize /= 2;
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ i = s->plane[0][0][y*s->linesize + x];
+ dst16[y*dst_linesize + x] = i;
+ }
+ }
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ OWDenoiseContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ const int cw = AV_CEIL_RSHIFT(inlink->w, s->hsub);
+ const int ch = AV_CEIL_RSHIFT(inlink->h, s->vsub);
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+
+ if (s->luma_strength > 0)
+ filter(s, out->data[0], out->linesize[0], in->data[0], in->linesize[0], inlink->w, inlink->h, s->luma_strength);
+ if (s->chroma_strength > 0) {
+ filter(s, out->data[1], out->linesize[1], in->data[1], in->linesize[1], cw, ch, s->chroma_strength);
+ filter(s, out->data[2], out->linesize[2], in->data[2], in->linesize[2], cw, ch, s->chroma_strength);
+ }
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ if (s->luma_strength > 0) {
+ filter(s, out->data[0], out->linesize[0], in->data[0], in->linesize[0], inlink->w, inlink->h, s->luma_strength);
+ } else {
+ av_image_copy_plane(out->data[0], out->linesize[0], in ->data[0], in ->linesize[0], inlink->w, inlink->h);
+ }
+ if (s->chroma_strength > 0) {
+ filter(s, out->data[1], out->linesize[1], in->data[1], in->linesize[1], cw, ch, s->chroma_strength);
+ filter(s, out->data[2], out->linesize[2], in->data[2], in->linesize[2], cw, ch, s->chroma_strength);
+ } else {
+ av_image_copy_plane(out->data[1], out->linesize[1], in ->data[1], in ->linesize[1], inlink->w, inlink->h);
+ av_image_copy_plane(out->data[2], out->linesize[2], in ->data[2], in ->linesize[2], inlink->w, inlink->h);
+ }
+
+ if (in->data[3])
+ av_image_copy_plane(out->data[3], out->linesize[3],
+ in ->data[3], in ->linesize[3],
+ inlink->w, inlink->h);
+ av_frame_free(&in);
+ }
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P,
+ AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV440P10,
+ AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
+ AV_PIX_FMT_YUV440P12,
+ AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
+ AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ int i, j;
+ OWDenoiseContext *s = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ const int h = FFALIGN(inlink->h, 16);
+
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
+ s->pixel_depth = desc->comp[0].depth;
+
+ s->linesize = FFALIGN(inlink->w, 16);
+ for (j = 0; j < 4; j++) {
+ for (i = 0; i <= s->depth; i++) {
+ s->plane[i][j] = av_malloc_array(s->linesize, h * sizeof(s->plane[0][0][0]));
+ if (!s->plane[i][j])
+ return AVERROR(ENOMEM);
+ }
+ }
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ int i, j;
+ OWDenoiseContext *s = ctx->priv;
+
+ for (j = 0; j < 4; j++)
+ for (i = 0; i <= s->depth; i++)
+ av_freep(&s->plane[i][j]);
+}
+
+static const AVFilterPad owdenoise_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad owdenoise_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_owdenoise = {
+ .name = "owdenoise",
+ .description = NULL_IF_CONFIG_SMALL("Denoise using wavelets."),
+ .priv_size = sizeof(OWDenoiseContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = owdenoise_inputs,
+ .outputs = owdenoise_outputs,
+ .priv_class = &owdenoise_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_pad.c b/libavfilter/vf_pad.c
index d9015470fa..61927b654a 100644
--- a/libavfilter/vf_pad.c
+++ b/libavfilter/vf_pad.c
@@ -2,20 +2,20 @@
* Copyright (c) 2008 vmrsss
* Copyright (c) 2009 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -41,9 +41,6 @@
#include "drawutils.h"
static const char *const var_names[] = {
- "PI",
- "PHI",
- "E",
"in_w", "iw",
"in_h", "ih",
"out_w", "ow",
@@ -51,15 +48,14 @@ static const char *const var_names[] = {
"x",
"y",
"a",
+ "sar",
+ "dar",
"hsub",
"vsub",
NULL
};
enum var_name {
- VAR_PI,
- VAR_PHI,
- VAR_E,
VAR_IN_W, VAR_IW,
VAR_IN_H, VAR_IH,
VAR_OUT_W, VAR_OW,
@@ -67,6 +63,8 @@ enum var_name {
VAR_X,
VAR_Y,
VAR_A,
+ VAR_SAR,
+ VAR_DAR,
VAR_HSUB,
VAR_VSUB,
VARS_NB
@@ -74,87 +72,54 @@ enum var_name {
static int query_formats(AVFilterContext *ctx)
{
- static const enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
- AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
- AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
-
- AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
- AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
- AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
- AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
- AV_PIX_FMT_YUVA420P,
-
- AV_PIX_FMT_NONE
- };
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
+ return ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
}
+enum EvalMode {
+ EVAL_MODE_INIT,
+ EVAL_MODE_FRAME,
+ EVAL_MODE_NB
+};
+
typedef struct PadContext {
const AVClass *class;
int w, h; ///< output dimensions, a value of 0 will result in the input size
int x, y; ///< offsets of the input area with respect to the padded area
int in_w, in_h; ///< width and height for the padded input video, which has to be aligned to the chroma values in order to avoid chroma issues
+ int inlink_w, inlink_h;
char *w_expr; ///< width expression string
char *h_expr; ///< height expression string
char *x_expr; ///< width expression string
char *y_expr; ///< height expression string
- char *color_str;
+ uint8_t rgba_color[4]; ///< color for the padding area
+ FFDrawContext draw;
+ FFDrawColor color;
- uint8_t color[4]; ///< color expressed either in YUVA or RGBA colorspace for the padding area
- uint8_t *line[4];
- int line_step[4];
- int hsub, vsub; ///< chroma subsampling values
+ int eval_mode; ///< expression evaluation mode
} PadContext;
-static av_cold int init(AVFilterContext *ctx)
-{
- PadContext *s = ctx->priv;
-
- if (av_parse_color(s->color, s->color_str, -1, ctx) < 0)
- return AVERROR(EINVAL);
-
- return 0;
-}
-
-static av_cold void uninit(AVFilterContext *ctx)
-{
- PadContext *s = ctx->priv;
- int i;
-
- for (i = 0; i < 4; i++) {
- av_freep(&s->line[i]);
- s->line_step[i] = 0;
- }
-}
-
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
PadContext *s = ctx->priv;
- const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
- uint8_t rgba_color[4];
- int ret, is_packed_rgba;
+ int ret;
double var_values[VARS_NB], res;
char *expr;
- s->hsub = pix_desc->log2_chroma_w;
- s->vsub = pix_desc->log2_chroma_h;
+ ff_draw_init(&s->draw, inlink->format, 0);
+ ff_draw_color(&s->draw, &s->color, s->rgba_color);
- var_values[VAR_PI] = M_PI;
- var_values[VAR_PHI] = M_PHI;
- var_values[VAR_E] = M_E;
var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
var_values[VAR_A] = (double) inlink->w / inlink->h;
- var_values[VAR_HSUB] = 1<<s->hsub;
- var_values[VAR_VSUB] = 1<<s->vsub;
+ var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
+ (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
+ var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
+ var_values[VAR_HSUB] = 1 << s->draw.hsub_max;
+ var_values[VAR_VSUB] = 1 << s->draw.vsub_max;
/* evaluate width and height */
av_expr_parse_and_eval(&res, (expr = s->w_expr),
@@ -201,22 +166,18 @@ static int config_input(AVFilterLink *inlink)
return AVERROR(EINVAL);
}
- s->w &= ~((1 << s->hsub) - 1);
- s->h &= ~((1 << s->vsub) - 1);
- s->x &= ~((1 << s->hsub) - 1);
- s->y &= ~((1 << s->vsub) - 1);
-
- s->in_w = inlink->w & ~((1 << s->hsub) - 1);
- s->in_h = inlink->h & ~((1 << s->vsub) - 1);
-
- memcpy(rgba_color, s->color, sizeof(rgba_color));
- ff_fill_line_with_color(s->line, s->line_step, s->w, s->color,
- inlink->format, rgba_color, &is_packed_rgba, NULL);
+ s->w = ff_draw_round_to_sub(&s->draw, 0, -1, s->w);
+ s->h = ff_draw_round_to_sub(&s->draw, 1, -1, s->h);
+ s->x = ff_draw_round_to_sub(&s->draw, 0, -1, s->x);
+ s->y = ff_draw_round_to_sub(&s->draw, 1, -1, s->y);
+ s->in_w = ff_draw_round_to_sub(&s->draw, 0, -1, inlink->w);
+ s->in_h = ff_draw_round_to_sub(&s->draw, 1, -1, inlink->h);
+ s->inlink_w = inlink->w;
+ s->inlink_h = inlink->h;
- av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d x:%d y:%d color:0x%02X%02X%02X%02X[%s]\n",
+ av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d x:%d y:%d color:0x%02X%02X%02X%02X\n",
inlink->w, inlink->h, s->w, s->h, s->x, s->y,
- s->color[0], s->color[1], s->color[2], s->color[3],
- is_packed_rgba ? "rgba" : "yuva");
+ s->rgba_color[0], s->rgba_color[1], s->rgba_color[2], s->rgba_color[3]);
if (s->x < 0 || s->y < 0 ||
s->w <= 0 || s->h <= 0 ||
@@ -249,24 +210,27 @@ static int config_output(AVFilterLink *outlink)
static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
{
PadContext *s = inlink->dst->priv;
-
- AVFrame *frame = ff_get_video_buffer(inlink->dst->outputs[0],
- w + (s->w - s->in_w),
- h + (s->h - s->in_h));
+ AVFrame *frame;
int plane;
+ if (s->inlink_w <= 0)
+ return NULL;
+
+ frame = ff_get_video_buffer(inlink->dst->outputs[0],
+ w + (s->w - s->in_w),
+ h + (s->h - s->in_h) + (s->x > 0));
+
if (!frame)
return NULL;
frame->width = w;
frame->height = h;
- for (plane = 0; plane < 4 && frame->data[plane]; plane++) {
- int hsub = (plane == 1 || plane == 2) ? s->hsub : 0;
- int vsub = (plane == 1 || plane == 2) ? s->vsub : 0;
-
- frame->data[plane] += (s->x >> hsub) * s->line_step[plane] +
- (s->y >> vsub) * frame->linesize[plane];
+ for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
+ int hsub = s->draw.hsub[plane];
+ int vsub = s->draw.vsub[plane];
+ frame->data[plane] += (s->x >> hsub) * s->draw.pixelstep[plane] +
+ (s->y >> vsub) * frame->linesize[plane];
}
return frame;
@@ -287,38 +251,37 @@ static int buffer_needs_copy(PadContext *s, AVFrame *frame, AVBufferRef *buf)
/* for each plane in this buffer, check that it can be padded without
* going over buffer bounds or other planes */
for (i = 0; i < FF_ARRAY_ELEMS(planes) && planes[i] >= 0; i++) {
- int hsub = (planes[i] == 1 || planes[i] == 2) ? s->hsub : 0;
- int vsub = (planes[i] == 1 || planes[i] == 2) ? s->vsub : 0;
+ int hsub = s->draw.hsub[planes[i]];
+ int vsub = s->draw.vsub[planes[i]];
uint8_t *start = frame->data[planes[i]];
- uint8_t *end = start + (frame->height >> hsub) *
+ uint8_t *end = start + (frame->height >> vsub) *
frame->linesize[planes[i]];
/* amount of free space needed before the start and after the end
* of the plane */
- ptrdiff_t req_start = (s->x >> hsub) * s->line_step[planes[i]] +
+ ptrdiff_t req_start = (s->x >> hsub) * s->draw.pixelstep[planes[i]] +
(s->y >> vsub) * frame->linesize[planes[i]];
ptrdiff_t req_end = ((s->w - s->x - frame->width) >> hsub) *
- s->line_step[planes[i]] +
- (s->y >> vsub) * frame->linesize[planes[i]];
+ s->draw.pixelstep[planes[i]] +
+ ((s->h - s->y - frame->height) >> vsub) * frame->linesize[planes[i]];
- if (frame->linesize[planes[i]] < (s->w >> hsub) * s->line_step[planes[i]])
+ if (frame->linesize[planes[i]] < (s->w >> hsub) * s->draw.pixelstep[planes[i]])
return 1;
if (start - buf->data < req_start ||
(buf->data + buf->size) - end < req_end)
return 1;
-#define SIGN(x) ((x) > 0 ? 1 : -1)
for (j = 0; j < FF_ARRAY_ELEMS(planes) && planes[j] >= 0; j++) {
- int hsub1 = (planes[j] == 1 || planes[j] == 2) ? s->hsub : 0;
+ int vsub1 = s->draw.vsub[planes[j]];
uint8_t *start1 = frame->data[planes[j]];
- uint8_t *end1 = start1 + (frame->height >> hsub1) *
+ uint8_t *end1 = start1 + (frame->height >> vsub1) *
frame->linesize[planes[j]];
if (i == j)
continue;
- if (SIGN(start - end1) != SIGN(start - end1 - req_start) ||
- SIGN(end - start1) != SIGN(end - start1 + req_end))
+ if (FFSIGN(start - end1) != FFSIGN(start - end1 - req_start) ||
+ FFSIGN(end - start1) != FFSIGN(end - start1 + req_end))
return 1;
}
}
@@ -333,7 +296,7 @@ static int frame_needs_copy(PadContext *s, AVFrame *frame)
if (!av_frame_is_writable(frame))
return 1;
- for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++)
+ for (i = 0; i < 4 && frame->buf[i]; i++)
if (buffer_needs_copy(s, frame, frame->buf[i]))
return 1;
return 0;
@@ -342,8 +305,35 @@ static int frame_needs_copy(PadContext *s, AVFrame *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
PadContext *s = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
- int needs_copy = frame_needs_copy(s, in);
+ int needs_copy;
+ if(s->eval_mode == EVAL_MODE_FRAME && (
+ in->width != s->inlink_w
+ || in->height != s->inlink_h
+ || in->format != outlink->format
+ || in->sample_aspect_ratio.den != outlink->sample_aspect_ratio.den || in->sample_aspect_ratio.num != outlink->sample_aspect_ratio.num)) {
+ int ret;
+
+ inlink->dst->inputs[0]->format = in->format;
+ inlink->dst->inputs[0]->w = in->width;
+ inlink->dst->inputs[0]->h = in->height;
+
+ inlink->dst->inputs[0]->sample_aspect_ratio.den = in->sample_aspect_ratio.den;
+ inlink->dst->inputs[0]->sample_aspect_ratio.num = in->sample_aspect_ratio.num;
+
+
+ if ((ret = config_input(inlink)) < 0) {
+ s->inlink_w = -1;
+ return ret;
+ }
+ if ((ret = config_output(outlink)) < 0) {
+ s->inlink_w = -1;
+ return ret;
+ }
+ }
+
+ needs_copy = frame_needs_copy(s, in);
if (needs_copy) {
av_log(inlink->dst, AV_LOG_DEBUG, "Direct padding impossible allocating new frame\n");
@@ -360,41 +350,40 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
int i;
out = in;
- for (i = 0; i < FF_ARRAY_ELEMS(out->data) && out->data[i]; i++) {
- int hsub = (i == 1 || i == 2) ? s->hsub : 0;
- int vsub = (i == 1 || i == 2) ? s->vsub : 0;
- out->data[i] -= (s->x >> hsub) * s->line_step[i] +
+ for (i = 0; i < 4 && out->data[i] && out->linesize[i]; i++) {
+ int hsub = s->draw.hsub[i];
+ int vsub = s->draw.vsub[i];
+ out->data[i] -= (s->x >> hsub) * s->draw.pixelstep[i] +
(s->y >> vsub) * out->linesize[i];
}
}
/* top bar */
if (s->y) {
- ff_draw_rectangle(out->data, out->linesize,
- s->line, s->line_step, s->hsub, s->vsub,
+ ff_fill_rectangle(&s->draw, &s->color,
+ out->data, out->linesize,
0, 0, s->w, s->y);
}
/* bottom bar */
if (s->h > s->y + s->in_h) {
- ff_draw_rectangle(out->data, out->linesize,
- s->line, s->line_step, s->hsub, s->vsub,
+ ff_fill_rectangle(&s->draw, &s->color,
+ out->data, out->linesize,
0, s->y + s->in_h, s->w, s->h - s->y - s->in_h);
}
/* left border */
- ff_draw_rectangle(out->data, out->linesize, s->line, s->line_step,
- s->hsub, s->vsub, 0, s->y, s->x, in->height);
+ ff_fill_rectangle(&s->draw, &s->color, out->data, out->linesize,
+ 0, s->y, s->x, in->height);
if (needs_copy) {
- ff_copy_rectangle(out->data, out->linesize, in->data, in->linesize,
- s->line_step, s->hsub, s->vsub,
- s->x, s->y, 0, in->width, in->height);
+ ff_copy_rectangle2(&s->draw,
+ out->data, out->linesize, in->data, in->linesize,
+ s->x, s->y, 0, 0, in->width, in->height);
}
/* right border */
- ff_draw_rectangle(out->data, out->linesize,
- s->line, s->line_step, s->hsub, s->vsub,
+ ff_fill_rectangle(&s->draw, &s->color, out->data, out->linesize,
s->x + s->in_w, s->y, s->w - s->x - s->in_w,
in->height);
@@ -407,24 +396,23 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
#define OFFSET(x) offsetof(PadContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str = "iw" }, .flags = FLAGS },
- { "height", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS },
- { "x", "Horizontal position of the left edge of the input video in the "
- "output video", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
- { "y", "Vertical position of the top edge of the input video in the "
- "output video", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
- { "color", "Color of the padded area", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, .flags = FLAGS },
- { NULL },
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption pad_options[] = {
+ { "width", "set the pad area width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "w", "set the pad area width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "height", "set the pad area height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "h", "set the pad area height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "x", "set the x offset expression for the input image position", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "y", "set the y offset expression for the input image position", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "color", "set the color of the padded area border", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str = "black"}, .flags = FLAGS },
+ { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
+ { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
+ { "frame", "eval expressions during initialization and per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
+ { NULL }
};
-static const AVClass pad_class = {
- .class_name = "pad",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(pad);
static const AVFilterPad avfilter_vf_pad_inputs[] = {
{
@@ -448,15 +436,10 @@ static const AVFilterPad avfilter_vf_pad_outputs[] = {
AVFilter ff_vf_pad = {
.name = "pad",
- .description = NULL_IF_CONFIG_SMALL("Pad input image to width:height[:x:y[:color]] (default x and y: 0, default color: black)."),
-
+ .description = NULL_IF_CONFIG_SMALL("Pad the input video."),
.priv_size = sizeof(PadContext),
.priv_class = &pad_class,
- .init = init,
- .uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_pad_inputs,
-
- .outputs = avfilter_vf_pad_outputs,
+ .inputs = avfilter_vf_pad_inputs,
+ .outputs = avfilter_vf_pad_outputs,
};
diff --git a/libavfilter/vf_palettegen.c b/libavfilter/vf_palettegen.c
new file mode 100644
index 0000000000..5e69873b4c
--- /dev/null
+++ b/libavfilter/vf_palettegen.c
@@ -0,0 +1,579 @@
+/*
+ * Copyright (c) 2015 Stupeflix
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Generate one palette for a whole video stream.
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "libavutil/qsort.h"
+#include "avfilter.h"
+#include "internal.h"
+
+/* Reference a color and how much it's used */
+struct color_ref {
+ uint32_t color;
+ uint64_t count;
+};
+
+/* Store a range of colors */
+struct range_box {
+ uint32_t color; // average color
+ int64_t variance; // overall variance of the box (how much the colors are spread)
+ int start; // index in PaletteGenContext->refs
+ int len; // number of referenced colors
+ int sorted_by; // whether range of colors is sorted by red (0), green (1) or blue (2)
+};
+
+struct hist_node {
+ struct color_ref *entries;
+ int nb_entries;
+};
+
+enum {
+ STATS_MODE_ALL_FRAMES,
+ STATS_MODE_DIFF_FRAMES,
+ STATS_MODE_SINGLE_FRAMES,
+ NB_STATS_MODE
+};
+
+#define NBITS 5
+#define HIST_SIZE (1<<(3*NBITS))
+
+typedef struct {
+ const AVClass *class;
+
+ int max_colors;
+ int reserve_transparent;
+ int stats_mode;
+
+ AVFrame *prev_frame; // previous frame used for the diff stats_mode
+ struct hist_node histogram[HIST_SIZE]; // histogram/hashtable of the colors
+ struct color_ref **refs; // references of all the colors used in the stream
+ int nb_refs; // number of color references (or number of different colors)
+ struct range_box boxes[256]; // define the segmentation of the colorspace (the final palette)
+ int nb_boxes; // number of boxes (increase will segmenting them)
+ int palette_pushed; // if the palette frame is pushed into the outlink or not
+} PaletteGenContext;
+
+#define OFFSET(x) offsetof(PaletteGenContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption palettegen_options[] = {
+ { "max_colors", "set the maximum number of colors to use in the palette", OFFSET(max_colors), AV_OPT_TYPE_INT, {.i64=256}, 4, 256, FLAGS },
+ { "reserve_transparent", "reserve a palette entry for transparency", OFFSET(reserve_transparent), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
+ { "stats_mode", "set statistics mode", OFFSET(stats_mode), AV_OPT_TYPE_INT, {.i64=STATS_MODE_ALL_FRAMES}, 0, NB_STATS_MODE-1, FLAGS, "mode" },
+ { "full", "compute full frame histograms", 0, AV_OPT_TYPE_CONST, {.i64=STATS_MODE_ALL_FRAMES}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "diff", "compute histograms only for the part that differs from previous frame", 0, AV_OPT_TYPE_CONST, {.i64=STATS_MODE_DIFF_FRAMES}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "single", "compute new histogram for each frame", 0, AV_OPT_TYPE_CONST, {.i64=STATS_MODE_SINGLE_FRAMES}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(palettegen);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat in_fmts[] = {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE};
+ static const enum AVPixelFormat out_fmts[] = {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE};
+ int ret;
+
+ if ((ret = ff_formats_ref(ff_make_format_list(in_fmts) , &ctx->inputs[0]->out_formats)) < 0)
+ return ret;
+ if ((ret = ff_formats_ref(ff_make_format_list(out_fmts), &ctx->outputs[0]->in_formats)) < 0)
+ return ret;
+ return 0;
+}
+
+typedef int (*cmp_func)(const void *, const void *);
+
+#define DECLARE_CMP_FUNC(name, pos) \
+static int cmp_##name(const void *pa, const void *pb) \
+{ \
+ const struct color_ref * const *a = pa; \
+ const struct color_ref * const *b = pb; \
+ return ((*a)->color >> (8 * (2 - (pos))) & 0xff) \
+ - ((*b)->color >> (8 * (2 - (pos))) & 0xff); \
+}
+
+DECLARE_CMP_FUNC(r, 0)
+DECLARE_CMP_FUNC(g, 1)
+DECLARE_CMP_FUNC(b, 2)
+
+static const cmp_func cmp_funcs[] = {cmp_r, cmp_g, cmp_b};
+
+/**
+ * Simple color comparison for sorting the final palette
+ */
+static int cmp_color(const void *a, const void *b)
+{
+ const struct range_box *box1 = a;
+ const struct range_box *box2 = b;
+ return FFDIFFSIGN(box1->color , box2->color);
+}
+
+static av_always_inline int diff(const uint32_t a, const uint32_t b)
+{
+ const uint8_t c1[] = {a >> 16 & 0xff, a >> 8 & 0xff, a & 0xff};
+ const uint8_t c2[] = {b >> 16 & 0xff, b >> 8 & 0xff, b & 0xff};
+ const int dr = c1[0] - c2[0];
+ const int dg = c1[1] - c2[1];
+ const int db = c1[2] - c2[2];
+ return dr*dr + dg*dg + db*db;
+}
+
+/**
+ * Find the next box to split: pick the one with the highest variance
+ */
+static int get_next_box_id_to_split(PaletteGenContext *s)
+{
+ int box_id, i, best_box_id = -1;
+ int64_t max_variance = -1;
+
+ if (s->nb_boxes == s->max_colors - s->reserve_transparent)
+ return -1;
+
+ for (box_id = 0; box_id < s->nb_boxes; box_id++) {
+ struct range_box *box = &s->boxes[box_id];
+
+ if (s->boxes[box_id].len >= 2) {
+
+ if (box->variance == -1) {
+ int64_t variance = 0;
+
+ for (i = 0; i < box->len; i++) {
+ const struct color_ref *ref = s->refs[box->start + i];
+ variance += diff(ref->color, box->color) * ref->count;
+ }
+ box->variance = variance;
+ }
+ if (box->variance > max_variance) {
+ best_box_id = box_id;
+ max_variance = box->variance;
+ }
+ } else {
+ box->variance = -1;
+ }
+ }
+ return best_box_id;
+}
+
+/**
+ * Get the 32-bit average color for the range of RGB colors enclosed in the
+ * specified box. Takes into account the weight of each color.
+ */
+static uint32_t get_avg_color(struct color_ref * const *refs,
+ const struct range_box *box)
+{
+ int i;
+ const int n = box->len;
+ uint64_t r = 0, g = 0, b = 0, div = 0;
+
+ for (i = 0; i < n; i++) {
+ const struct color_ref *ref = refs[box->start + i];
+ r += (ref->color >> 16 & 0xff) * ref->count;
+ g += (ref->color >> 8 & 0xff) * ref->count;
+ b += (ref->color & 0xff) * ref->count;
+ div += ref->count;
+ }
+
+ r = r / div;
+ g = g / div;
+ b = b / div;
+
+ return 0xffU<<24 | r<<16 | g<<8 | b;
+}
+
+/**
+ * Split given box in two at position n. The original box becomes the left part
+ * of the split, and the new index box is the right part.
+ */
+static void split_box(PaletteGenContext *s, struct range_box *box, int n)
+{
+ struct range_box *new_box = &s->boxes[s->nb_boxes++];
+ new_box->start = n + 1;
+ new_box->len = box->start + box->len - new_box->start;
+ new_box->sorted_by = box->sorted_by;
+ box->len -= new_box->len;
+
+ av_assert0(box->len >= 1);
+ av_assert0(new_box->len >= 1);
+
+ box->color = get_avg_color(s->refs, box);
+ new_box->color = get_avg_color(s->refs, new_box);
+ box->variance = -1;
+ new_box->variance = -1;
+}
+
+/**
+ * Write the palette into the output frame.
+ */
+static void write_palette(AVFilterContext *ctx, AVFrame *out)
+{
+ const PaletteGenContext *s = ctx->priv;
+ int x, y, box_id = 0;
+ uint32_t *pal = (uint32_t *)out->data[0];
+ const int pal_linesize = out->linesize[0] >> 2;
+ uint32_t last_color = 0;
+
+ for (y = 0; y < out->height; y++) {
+ for (x = 0; x < out->width; x++) {
+ if (box_id < s->nb_boxes) {
+ pal[x] = s->boxes[box_id++].color;
+ if ((x || y) && pal[x] == last_color)
+ av_log(ctx, AV_LOG_WARNING, "Dupped color: %08X\n", pal[x]);
+ last_color = pal[x];
+ } else {
+ pal[x] = 0xff000000; // pad with black
+ }
+ }
+ pal += pal_linesize;
+ }
+
+ if (s->reserve_transparent) {
+ av_assert0(s->nb_boxes < 256);
+ pal[out->width - pal_linesize - 1] = 0x0000ff00; // add a green transparent color
+ }
+}
+
+/**
+ * Crawl the histogram to get all the defined colors, and create a linear list
+ * of them (each color reference entry is a pointer to the value in the
+ * histogram/hash table).
+ */
+static struct color_ref **load_color_refs(const struct hist_node *hist, int nb_refs)
+{
+ int i, j, k = 0;
+ struct color_ref **refs = av_malloc_array(nb_refs, sizeof(*refs));
+
+ if (!refs)
+ return NULL;
+
+ for (j = 0; j < HIST_SIZE; j++) {
+ const struct hist_node *node = &hist[j];
+
+ for (i = 0; i < node->nb_entries; i++)
+ refs[k++] = &node->entries[i];
+ }
+
+ return refs;
+}
+
+static double set_colorquant_ratio_meta(AVFrame *out, int nb_out, int nb_in)
+{
+ char buf[32];
+ const double ratio = (double)nb_out / nb_in;
+ snprintf(buf, sizeof(buf), "%f", ratio);
+ av_dict_set(&out->metadata, "lavfi.color_quant_ratio", buf, 0);
+ return ratio;
+}
+
+/**
+ * Main function implementing the Median Cut Algorithm defined by Paul Heckbert
+ * in Color Image Quantization for Frame Buffer Display (1982)
+ */
+static AVFrame *get_palette_frame(AVFilterContext *ctx)
+{
+ AVFrame *out;
+ PaletteGenContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ double ratio;
+ int box_id = 0;
+ struct range_box *box;
+
+ /* reference only the used colors from histogram */
+ s->refs = load_color_refs(s->histogram, s->nb_refs);
+ if (!s->refs) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to allocate references for %d different colors\n", s->nb_refs);
+ return NULL;
+ }
+
+ /* create the palette frame */
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return NULL;
+ out->pts = 0;
+
+ /* set first box for 0..nb_refs */
+ box = &s->boxes[box_id];
+ box->len = s->nb_refs;
+ box->sorted_by = -1;
+ box->color = get_avg_color(s->refs, box);
+ box->variance = -1;
+ s->nb_boxes = 1;
+
+ while (box && box->len > 1) {
+ int i, rr, gr, br, longest;
+ uint64_t median, box_weight = 0;
+
+ /* compute the box weight (sum all the weights of the colors in the
+ * range) and its boundings */
+ uint8_t min[3] = {0xff, 0xff, 0xff};
+ uint8_t max[3] = {0x00, 0x00, 0x00};
+ for (i = box->start; i < box->start + box->len; i++) {
+ const struct color_ref *ref = s->refs[i];
+ const uint32_t rgb = ref->color;
+ const uint8_t r = rgb >> 16 & 0xff, g = rgb >> 8 & 0xff, b = rgb & 0xff;
+ min[0] = FFMIN(r, min[0]), max[0] = FFMAX(r, max[0]);
+ min[1] = FFMIN(g, min[1]), max[1] = FFMAX(g, max[1]);
+ min[2] = FFMIN(b, min[2]), max[2] = FFMAX(b, max[2]);
+ box_weight += ref->count;
+ }
+
+ /* define the axis to sort by according to the widest range of colors */
+ rr = max[0] - min[0];
+ gr = max[1] - min[1];
+ br = max[2] - min[2];
+ longest = 1; // pick green by default (the color the eye is the most sensitive to)
+ if (br >= rr && br >= gr) longest = 2;
+ if (rr >= gr && rr >= br) longest = 0;
+ if (gr >= rr && gr >= br) longest = 1; // prefer green again
+
+ ff_dlog(ctx, "box #%02X [%6d..%-6d] (%6d) w:%-6"PRIu64" ranges:[%2x %2x %2x] sort by %c (already sorted:%c) ",
+ box_id, box->start, box->start + box->len - 1, box->len, box_weight,
+ rr, gr, br, "rgb"[longest], box->sorted_by == longest ? 'y':'n');
+
+ /* sort the range by its longest axis if it's not already sorted */
+ if (box->sorted_by != longest) {
+ cmp_func cmpf = cmp_funcs[longest];
+ AV_QSORT(&s->refs[box->start], box->len, const struct color_ref *, cmpf);
+ box->sorted_by = longest;
+ }
+
+ /* locate the median where to split */
+ median = (box_weight + 1) >> 1;
+ box_weight = 0;
+ /* if you have 2 boxes, the maximum is actually #0: you must have at
+ * least 1 color on each side of the split, hence the -2 */
+ for (i = box->start; i < box->start + box->len - 2; i++) {
+ box_weight += s->refs[i]->count;
+ if (box_weight > median)
+ break;
+ }
+ ff_dlog(ctx, "split @ i=%-6d with w=%-6"PRIu64" (target=%6"PRIu64")\n", i, box_weight, median);
+ split_box(s, box, i);
+
+ box_id = get_next_box_id_to_split(s);
+ box = box_id >= 0 ? &s->boxes[box_id] : NULL;
+ }
+
+ ratio = set_colorquant_ratio_meta(out, s->nb_boxes, s->nb_refs);
+ av_log(ctx, AV_LOG_INFO, "%d%s colors generated out of %d colors; ratio=%f\n",
+ s->nb_boxes, s->reserve_transparent ? "(+1)" : "", s->nb_refs, ratio);
+
+ qsort(s->boxes, s->nb_boxes, sizeof(*s->boxes), cmp_color);
+
+ write_palette(ctx, out);
+
+ return out;
+}
+
+/**
+ * Hashing function for the color.
+ * It keeps the NBITS least significant bit of each component to make it
+ * "random" even if the scene doesn't have much different colors.
+ */
+static inline unsigned color_hash(uint32_t color)
+{
+ const uint8_t r = color >> 16 & ((1<<NBITS)-1);
+ const uint8_t g = color >> 8 & ((1<<NBITS)-1);
+ const uint8_t b = color & ((1<<NBITS)-1);
+ return r<<(NBITS*2) | g<<NBITS | b;
+}
+
+/**
+ * Locate the color in the hash table and increment its counter.
+ */
+static int color_inc(struct hist_node *hist, uint32_t color)
+{
+ int i;
+ const unsigned hash = color_hash(color);
+ struct hist_node *node = &hist[hash];
+ struct color_ref *e;
+
+ for (i = 0; i < node->nb_entries; i++) {
+ e = &node->entries[i];
+ if (e->color == color) {
+ e->count++;
+ return 0;
+ }
+ }
+
+ e = av_dynarray2_add((void**)&node->entries, &node->nb_entries,
+ sizeof(*node->entries), NULL);
+ if (!e)
+ return AVERROR(ENOMEM);
+ e->color = color;
+ e->count = 1;
+ return 1;
+}
+
+/**
+ * Update histogram when pixels differ from previous frame.
+ */
+static int update_histogram_diff(struct hist_node *hist,
+ const AVFrame *f1, const AVFrame *f2)
+{
+ int x, y, ret, nb_diff_colors = 0;
+
+ for (y = 0; y < f1->height; y++) {
+ const uint32_t *p = (const uint32_t *)(f1->data[0] + y*f1->linesize[0]);
+ const uint32_t *q = (const uint32_t *)(f2->data[0] + y*f2->linesize[0]);
+
+ for (x = 0; x < f1->width; x++) {
+ if (p[x] == q[x])
+ continue;
+ ret = color_inc(hist, p[x]);
+ if (ret < 0)
+ return ret;
+ nb_diff_colors += ret;
+ }
+ }
+ return nb_diff_colors;
+}
+
+/**
+ * Simple histogram of the frame.
+ */
+static int update_histogram_frame(struct hist_node *hist, const AVFrame *f)
+{
+ int x, y, ret, nb_diff_colors = 0;
+
+ for (y = 0; y < f->height; y++) {
+ const uint32_t *p = (const uint32_t *)(f->data[0] + y*f->linesize[0]);
+
+ for (x = 0; x < f->width; x++) {
+ ret = color_inc(hist, p[x]);
+ if (ret < 0)
+ return ret;
+ nb_diff_colors += ret;
+ }
+ }
+ return nb_diff_colors;
+}
+
+/**
+ * Update the histogram for each passing frame. No frame will be pushed here.
+ */
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ PaletteGenContext *s = ctx->priv;
+ int ret = s->prev_frame ? update_histogram_diff(s->histogram, s->prev_frame, in)
+ : update_histogram_frame(s->histogram, in);
+
+ if (ret > 0)
+ s->nb_refs += ret;
+
+ if (s->stats_mode == STATS_MODE_DIFF_FRAMES) {
+ av_frame_free(&s->prev_frame);
+ s->prev_frame = in;
+ } else if (s->stats_mode == STATS_MODE_SINGLE_FRAMES) {
+ AVFrame *out;
+ int i;
+
+ out = get_palette_frame(ctx);
+ out->pts = in->pts;
+ av_frame_free(&in);
+ ret = ff_filter_frame(ctx->outputs[0], out);
+ for (i = 0; i < HIST_SIZE; i++)
+ av_freep(&s->histogram[i].entries);
+ av_freep(&s->refs);
+ s->nb_refs = 0;
+ s->nb_boxes = 0;
+ memset(s->boxes, 0, sizeof(s->boxes));
+ memset(s->histogram, 0, sizeof(s->histogram));
+ } else {
+ av_frame_free(&in);
+ }
+
+ return ret;
+}
+
+/**
+ * Returns only one frame at the end containing the full palette.
+ */
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ PaletteGenContext *s = ctx->priv;
+ int r;
+
+ r = ff_request_frame(inlink);
+ if (r == AVERROR_EOF && !s->palette_pushed && s->nb_refs && s->stats_mode != STATS_MODE_SINGLE_FRAMES) {
+ r = ff_filter_frame(outlink, get_palette_frame(ctx));
+ s->palette_pushed = 1;
+ return r;
+ }
+ return r;
+}
+
+/**
+ * The output is one simple 16x16 squared-pixels palette.
+ */
+static int config_output(AVFilterLink *outlink)
+{
+ outlink->w = outlink->h = 16;
+ outlink->sample_aspect_ratio = av_make_q(1, 1);
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ int i;
+ PaletteGenContext *s = ctx->priv;
+
+ for (i = 0; i < HIST_SIZE; i++)
+ av_freep(&s->histogram[i].entries);
+ av_freep(&s->refs);
+ av_frame_free(&s->prev_frame);
+}
+
+static const AVFilterPad palettegen_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad palettegen_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_palettegen = {
+ .name = "palettegen",
+ .description = NULL_IF_CONFIG_SMALL("Find the optimal palette for a given stream."),
+ .priv_size = sizeof(PaletteGenContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = palettegen_inputs,
+ .outputs = palettegen_outputs,
+ .priv_class = &palettegen_class,
+};
diff --git a/libavfilter/vf_paletteuse.c b/libavfilter/vf_paletteuse.c
new file mode 100644
index 0000000000..69d3be92da
--- /dev/null
+++ b/libavfilter/vf_paletteuse.c
@@ -0,0 +1,1086 @@
+/*
+ * Copyright (c) 2015 Stupeflix
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Use a palette to downsample an input video stream.
+ */
+
+#include "libavutil/bprint.h"
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "libavutil/qsort.h"
+#include "dualinput.h"
+#include "avfilter.h"
+
+enum dithering_mode {
+ DITHERING_NONE,
+ DITHERING_BAYER,
+ DITHERING_HECKBERT,
+ DITHERING_FLOYD_STEINBERG,
+ DITHERING_SIERRA2,
+ DITHERING_SIERRA2_4A,
+ NB_DITHERING
+};
+
+enum color_search_method {
+ COLOR_SEARCH_NNS_ITERATIVE,
+ COLOR_SEARCH_NNS_RECURSIVE,
+ COLOR_SEARCH_BRUTEFORCE,
+ NB_COLOR_SEARCHES
+};
+
+enum diff_mode {
+ DIFF_MODE_NONE,
+ DIFF_MODE_RECTANGLE,
+ NB_DIFF_MODE
+};
+
+struct color_node {
+ uint8_t val[3];
+ uint8_t palette_id;
+ int split;
+ int left_id, right_id;
+};
+
+#define NBITS 5
+#define CACHE_SIZE (1<<(3*NBITS))
+
+struct cached_color {
+ uint32_t color;
+ uint8_t pal_entry;
+};
+
+struct cache_node {
+ struct cached_color *entries;
+ int nb_entries;
+};
+
+struct PaletteUseContext;
+
+typedef int (*set_frame_func)(struct PaletteUseContext *s, AVFrame *out, AVFrame *in,
+ int x_start, int y_start, int width, int height);
+
+typedef struct PaletteUseContext {
+ const AVClass *class;
+ FFDualInputContext dinput;
+ struct cache_node cache[CACHE_SIZE]; /* lookup cache */
+ struct color_node map[AVPALETTE_COUNT]; /* 3D-Tree (KD-Tree with K=3) for reverse colormap */
+ uint32_t palette[AVPALETTE_COUNT];
+ int palette_loaded;
+ int dither;
+ int new;
+ set_frame_func set_frame;
+ int bayer_scale;
+ int ordered_dither[8*8];
+ int diff_mode;
+ AVFrame *last_in;
+ AVFrame *last_out;
+
+ /* debug options */
+ char *dot_filename;
+ int color_search_method;
+ int calc_mean_err;
+ uint64_t total_mean_err;
+ int debug_accuracy;
+} PaletteUseContext;
+
+#define OFFSET(x) offsetof(PaletteUseContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption paletteuse_options[] = {
+ { "dither", "select dithering mode", OFFSET(dither), AV_OPT_TYPE_INT, {.i64=DITHERING_SIERRA2_4A}, 0, NB_DITHERING-1, FLAGS, "dithering_mode" },
+ { "bayer", "ordered 8x8 bayer dithering (deterministic)", 0, AV_OPT_TYPE_CONST, {.i64=DITHERING_BAYER}, INT_MIN, INT_MAX, FLAGS, "dithering_mode" },
+ { "heckbert", "dithering as defined by Paul Heckbert in 1982 (simple error diffusion)", 0, AV_OPT_TYPE_CONST, {.i64=DITHERING_HECKBERT}, INT_MIN, INT_MAX, FLAGS, "dithering_mode" },
+ { "floyd_steinberg", "Floyd and Steingberg dithering (error diffusion)", 0, AV_OPT_TYPE_CONST, {.i64=DITHERING_FLOYD_STEINBERG}, INT_MIN, INT_MAX, FLAGS, "dithering_mode" },
+ { "sierra2", "Frankie Sierra dithering v2 (error diffusion)", 0, AV_OPT_TYPE_CONST, {.i64=DITHERING_SIERRA2}, INT_MIN, INT_MAX, FLAGS, "dithering_mode" },
+ { "sierra2_4a", "Frankie Sierra dithering v2 \"Lite\" (error diffusion)", 0, AV_OPT_TYPE_CONST, {.i64=DITHERING_SIERRA2_4A}, INT_MIN, INT_MAX, FLAGS, "dithering_mode" },
+ { "bayer_scale", "set scale for bayer dithering", OFFSET(bayer_scale), AV_OPT_TYPE_INT, {.i64=2}, 0, 5, FLAGS },
+ { "diff_mode", "set frame difference mode", OFFSET(diff_mode), AV_OPT_TYPE_INT, {.i64=DIFF_MODE_NONE}, 0, NB_DIFF_MODE-1, FLAGS, "diff_mode" },
+ { "rectangle", "process smallest different rectangle", 0, AV_OPT_TYPE_CONST, {.i64=DIFF_MODE_RECTANGLE}, INT_MIN, INT_MAX, FLAGS, "diff_mode" },
+
+ /* following are the debug options, not part of the official API */
+ { "debug_kdtree", "save Graphviz graph of the kdtree in specified file", OFFSET(dot_filename), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "color_search", "set reverse colormap color search method", OFFSET(color_search_method), AV_OPT_TYPE_INT, {.i64=COLOR_SEARCH_NNS_ITERATIVE}, 0, NB_COLOR_SEARCHES-1, FLAGS, "search" },
+ { "nns_iterative", "iterative search", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_SEARCH_NNS_ITERATIVE}, INT_MIN, INT_MAX, FLAGS, "search" },
+ { "nns_recursive", "recursive search", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_SEARCH_NNS_RECURSIVE}, INT_MIN, INT_MAX, FLAGS, "search" },
+ { "bruteforce", "brute-force into the palette", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_SEARCH_BRUTEFORCE}, INT_MIN, INT_MAX, FLAGS, "search" },
+ { "mean_err", "compute and print mean error", OFFSET(calc_mean_err), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { "debug_accuracy", "test color search accuracy", OFFSET(debug_accuracy), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { "new", "take new palette for each output frame", OFFSET(new), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(paletteuse);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat in_fmts[] = {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE};
+ static const enum AVPixelFormat inpal_fmts[] = {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE};
+ static const enum AVPixelFormat out_fmts[] = {AV_PIX_FMT_PAL8, AV_PIX_FMT_NONE};
+ int ret;
+ AVFilterFormats *in = ff_make_format_list(in_fmts);
+ AVFilterFormats *inpal = ff_make_format_list(inpal_fmts);
+ AVFilterFormats *out = ff_make_format_list(out_fmts);
+ if (!in || !inpal || !out) {
+ av_freep(&in);
+ av_freep(&inpal);
+ av_freep(&out);
+ return AVERROR(ENOMEM);
+ }
+ if ((ret = ff_formats_ref(in , &ctx->inputs[0]->out_formats)) < 0 ||
+ (ret = ff_formats_ref(inpal, &ctx->inputs[1]->out_formats)) < 0 ||
+ (ret = ff_formats_ref(out , &ctx->outputs[0]->in_formats)) < 0)
+ return ret;
+ return 0;
+}
+
+static av_always_inline int dither_color(uint32_t px, int er, int eg, int eb, int scale, int shift)
+{
+ return av_clip_uint8((px >> 16 & 0xff) + ((er * scale) / (1<<shift))) << 16
+ | av_clip_uint8((px >> 8 & 0xff) + ((eg * scale) / (1<<shift))) << 8
+ | av_clip_uint8((px & 0xff) + ((eb * scale) / (1<<shift)));
+}
+
+static av_always_inline int diff(const uint8_t *c1, const uint8_t *c2)
+{
+ // XXX: try L*a*b with CIE76 (dL*dL + da*da + db*db)
+ const int dr = c1[0] - c2[0];
+ const int dg = c1[1] - c2[1];
+ const int db = c1[2] - c2[2];
+ return dr*dr + dg*dg + db*db;
+}
+
+static av_always_inline uint8_t colormap_nearest_bruteforce(const uint32_t *palette, const uint8_t *rgb)
+{
+ int i, pal_id = -1, min_dist = INT_MAX;
+
+ for (i = 0; i < AVPALETTE_COUNT; i++) {
+ const uint32_t c = palette[i];
+
+ if ((c & 0xff000000) == 0xff000000) { // ignore transparent entry
+ const uint8_t palrgb[] = {
+ palette[i]>>16 & 0xff,
+ palette[i]>> 8 & 0xff,
+ palette[i] & 0xff,
+ };
+ const int d = diff(palrgb, rgb);
+ if (d < min_dist) {
+ pal_id = i;
+ min_dist = d;
+ }
+ }
+ }
+ return pal_id;
+}
+
+/* Recursive form, simpler but a bit slower. Kept for reference. */
+struct nearest_color {
+ int node_pos;
+ int dist_sqd;
+};
+
+static void colormap_nearest_node(const struct color_node *map,
+ const int node_pos,
+ const uint8_t *target,
+ struct nearest_color *nearest)
+{
+ const struct color_node *kd = map + node_pos;
+ const int s = kd->split;
+ int dx, nearer_kd_id, further_kd_id;
+ const uint8_t *current = kd->val;
+ const int current_to_target = diff(target, current);
+
+ if (current_to_target < nearest->dist_sqd) {
+ nearest->node_pos = node_pos;
+ nearest->dist_sqd = current_to_target;
+ }
+
+ if (kd->left_id != -1 || kd->right_id != -1) {
+ dx = target[s] - current[s];
+
+ if (dx <= 0) nearer_kd_id = kd->left_id, further_kd_id = kd->right_id;
+ else nearer_kd_id = kd->right_id, further_kd_id = kd->left_id;
+
+ if (nearer_kd_id != -1)
+ colormap_nearest_node(map, nearer_kd_id, target, nearest);
+
+ if (further_kd_id != -1 && dx*dx < nearest->dist_sqd)
+ colormap_nearest_node(map, further_kd_id, target, nearest);
+ }
+}
+
+static av_always_inline uint8_t colormap_nearest_recursive(const struct color_node *node, const uint8_t *rgb)
+{
+ struct nearest_color res = {.dist_sqd = INT_MAX, .node_pos = -1};
+ colormap_nearest_node(node, 0, rgb, &res);
+ return node[res.node_pos].palette_id;
+}
+
+struct stack_node {
+ int color_id;
+ int dx2;
+};
+
+static av_always_inline uint8_t colormap_nearest_iterative(const struct color_node *root, const uint8_t *target)
+{
+ int pos = 0, best_node_id = -1, best_dist = INT_MAX, cur_color_id = 0;
+ struct stack_node nodes[16];
+ struct stack_node *node = &nodes[0];
+
+ for (;;) {
+
+ const struct color_node *kd = &root[cur_color_id];
+ const uint8_t *current = kd->val;
+ const int current_to_target = diff(target, current);
+
+ /* Compare current color node to the target and update our best node if
+ * it's actually better. */
+ if (current_to_target < best_dist) {
+ best_node_id = cur_color_id;
+ if (!current_to_target)
+ goto end; // exact match, we can return immediately
+ best_dist = current_to_target;
+ }
+
+ /* Check if it's not a leaf */
+ if (kd->left_id != -1 || kd->right_id != -1) {
+ const int split = kd->split;
+ const int dx = target[split] - current[split];
+ int nearer_kd_id, further_kd_id;
+
+ /* Define which side is the most interesting. */
+ if (dx <= 0) nearer_kd_id = kd->left_id, further_kd_id = kd->right_id;
+ else nearer_kd_id = kd->right_id, further_kd_id = kd->left_id;
+
+ if (nearer_kd_id != -1) {
+ if (further_kd_id != -1) {
+ /* Here, both paths are defined, so we push a state for
+ * when we are going back. */
+ node->color_id = further_kd_id;
+ node->dx2 = dx*dx;
+ pos++;
+ node++;
+ }
+ /* We can now update current color with the most probable path
+ * (no need to create a state since there is nothing to save
+ * anymore). */
+ cur_color_id = nearer_kd_id;
+ continue;
+ } else if (dx*dx < best_dist) {
+ /* The nearest path isn't available, so there is only one path
+ * possible and it's the least probable. We enter it only if the
+ * distance from the current point to the hyper rectangle is
+ * less than our best distance. */
+ cur_color_id = further_kd_id;
+ continue;
+ }
+ }
+
+ /* Unstack as much as we can, typically as long as the least probable
+ * branch aren't actually probable. */
+ do {
+ if (--pos < 0)
+ goto end;
+ node--;
+ } while (node->dx2 >= best_dist);
+
+ /* We got a node where the least probable branch might actually contain
+ * a relevant color. */
+ cur_color_id = node->color_id;
+ }
+
+end:
+ return root[best_node_id].palette_id;
+}
+
+#define COLORMAP_NEAREST(search, palette, root, target) \
+ search == COLOR_SEARCH_NNS_ITERATIVE ? colormap_nearest_iterative(root, target) : \
+ search == COLOR_SEARCH_NNS_RECURSIVE ? colormap_nearest_recursive(root, target) : \
+ colormap_nearest_bruteforce(palette, target)
+
+/**
+ * Check if the requested color is in the cache already. If not, find it in the
+ * color tree and cache it.
+ * Note: r, g, and b are the component of c but are passed as well to avoid
+ * recomputing them (they are generally computed by the caller for other uses).
+ */
+static av_always_inline int color_get(struct cache_node *cache, uint32_t color,
+ uint8_t r, uint8_t g, uint8_t b,
+ const struct color_node *map,
+ const uint32_t *palette,
+ const enum color_search_method search_method)
+{
+ int i;
+ const uint8_t rgb[] = {r, g, b};
+ const uint8_t rhash = r & ((1<<NBITS)-1);
+ const uint8_t ghash = g & ((1<<NBITS)-1);
+ const uint8_t bhash = b & ((1<<NBITS)-1);
+ const unsigned hash = rhash<<(NBITS*2) | ghash<<NBITS | bhash;
+ struct cache_node *node = &cache[hash];
+ struct cached_color *e;
+
+ for (i = 0; i < node->nb_entries; i++) {
+ e = &node->entries[i];
+ if (e->color == color)
+ return e->pal_entry;
+ }
+
+ e = av_dynarray2_add((void**)&node->entries, &node->nb_entries,
+ sizeof(*node->entries), NULL);
+ if (!e)
+ return AVERROR(ENOMEM);
+ e->color = color;
+ e->pal_entry = COLORMAP_NEAREST(search_method, palette, map, rgb);
+ return e->pal_entry;
+}
+
+static av_always_inline int get_dst_color_err(struct cache_node *cache,
+ uint32_t c, const struct color_node *map,
+ const uint32_t *palette,
+ int *er, int *eg, int *eb,
+ const enum color_search_method search_method)
+{
+ const uint8_t r = c >> 16 & 0xff;
+ const uint8_t g = c >> 8 & 0xff;
+ const uint8_t b = c & 0xff;
+ const int dstx = color_get(cache, c, r, g, b, map, palette, search_method);
+ const uint32_t dstc = palette[dstx];
+ *er = r - (dstc >> 16 & 0xff);
+ *eg = g - (dstc >> 8 & 0xff);
+ *eb = b - (dstc & 0xff);
+ return dstx;
+}
+
+static av_always_inline int set_frame(PaletteUseContext *s, AVFrame *out, AVFrame *in,
+ int x_start, int y_start, int w, int h,
+ enum dithering_mode dither,
+ const enum color_search_method search_method)
+{
+ int x, y;
+ const struct color_node *map = s->map;
+ struct cache_node *cache = s->cache;
+ const uint32_t *palette = s->palette;
+ const int src_linesize = in ->linesize[0] >> 2;
+ const int dst_linesize = out->linesize[0];
+ uint32_t *src = ((uint32_t *)in ->data[0]) + y_start*src_linesize;
+ uint8_t *dst = out->data[0] + y_start*dst_linesize;
+
+ w += x_start;
+ h += y_start;
+
+ for (y = y_start; y < h; y++) {
+ for (x = x_start; x < w; x++) {
+ int er, eg, eb;
+
+ if (dither == DITHERING_BAYER) {
+ const int d = s->ordered_dither[(y & 7)<<3 | (x & 7)];
+ const uint8_t r8 = src[x] >> 16 & 0xff;
+ const uint8_t g8 = src[x] >> 8 & 0xff;
+ const uint8_t b8 = src[x] & 0xff;
+ const uint8_t r = av_clip_uint8(r8 + d);
+ const uint8_t g = av_clip_uint8(g8 + d);
+ const uint8_t b = av_clip_uint8(b8 + d);
+ const uint32_t c = r<<16 | g<<8 | b;
+ const int color = color_get(cache, c, r, g, b, map, palette, search_method);
+
+ if (color < 0)
+ return color;
+ dst[x] = color;
+
+ } else if (dither == DITHERING_HECKBERT) {
+ const int right = x < w - 1, down = y < h - 1;
+ const int color = get_dst_color_err(cache, src[x], map, palette, &er, &eg, &eb, search_method);
+
+ if (color < 0)
+ return color;
+ dst[x] = color;
+
+ if (right) src[ x + 1] = dither_color(src[ x + 1], er, eg, eb, 3, 3);
+ if ( down) src[src_linesize + x ] = dither_color(src[src_linesize + x ], er, eg, eb, 3, 3);
+ if (right && down) src[src_linesize + x + 1] = dither_color(src[src_linesize + x + 1], er, eg, eb, 2, 3);
+
+ } else if (dither == DITHERING_FLOYD_STEINBERG) {
+ const int right = x < w - 1, down = y < h - 1, left = x > x_start;
+ const int color = get_dst_color_err(cache, src[x], map, palette, &er, &eg, &eb, search_method);
+
+ if (color < 0)
+ return color;
+ dst[x] = color;
+
+ if (right) src[ x + 1] = dither_color(src[ x + 1], er, eg, eb, 7, 4);
+ if (left && down) src[src_linesize + x - 1] = dither_color(src[src_linesize + x - 1], er, eg, eb, 3, 4);
+ if ( down) src[src_linesize + x ] = dither_color(src[src_linesize + x ], er, eg, eb, 5, 4);
+ if (right && down) src[src_linesize + x + 1] = dither_color(src[src_linesize + x + 1], er, eg, eb, 1, 4);
+
+ } else if (dither == DITHERING_SIERRA2) {
+ const int right = x < w - 1, down = y < h - 1, left = x > x_start;
+ const int right2 = x < w - 2, left2 = x > x_start + 1;
+ const int color = get_dst_color_err(cache, src[x], map, palette, &er, &eg, &eb, search_method);
+
+ if (color < 0)
+ return color;
+ dst[x] = color;
+
+ if (right) src[ x + 1] = dither_color(src[ x + 1], er, eg, eb, 4, 4);
+ if (right2) src[ x + 2] = dither_color(src[ x + 2], er, eg, eb, 3, 4);
+
+ if (down) {
+ if (left2) src[ src_linesize + x - 2] = dither_color(src[ src_linesize + x - 2], er, eg, eb, 1, 4);
+ if (left) src[ src_linesize + x - 1] = dither_color(src[ src_linesize + x - 1], er, eg, eb, 2, 4);
+ src[ src_linesize + x ] = dither_color(src[ src_linesize + x ], er, eg, eb, 3, 4);
+ if (right) src[ src_linesize + x + 1] = dither_color(src[ src_linesize + x + 1], er, eg, eb, 2, 4);
+ if (right2) src[ src_linesize + x + 2] = dither_color(src[ src_linesize + x + 2], er, eg, eb, 1, 4);
+ }
+
+ } else if (dither == DITHERING_SIERRA2_4A) {
+ const int right = x < w - 1, down = y < h - 1, left = x > x_start;
+ const int color = get_dst_color_err(cache, src[x], map, palette, &er, &eg, &eb, search_method);
+
+ if (color < 0)
+ return color;
+ dst[x] = color;
+
+ if (right) src[ x + 1] = dither_color(src[ x + 1], er, eg, eb, 2, 2);
+ if (left && down) src[src_linesize + x - 1] = dither_color(src[src_linesize + x - 1], er, eg, eb, 1, 2);
+ if ( down) src[src_linesize + x ] = dither_color(src[src_linesize + x ], er, eg, eb, 1, 2);
+
+ } else {
+ const uint8_t r = src[x] >> 16 & 0xff;
+ const uint8_t g = src[x] >> 8 & 0xff;
+ const uint8_t b = src[x] & 0xff;
+ const int color = color_get(cache, src[x] & 0xffffff, r, g, b, map, palette, search_method);
+
+ if (color < 0)
+ return color;
+ dst[x] = color;
+ }
+ }
+ src += src_linesize;
+ dst += dst_linesize;
+ }
+ return 0;
+}
+
+#define INDENT 4
+static void disp_node(AVBPrint *buf,
+ const struct color_node *map,
+ int parent_id, int node_id,
+ int depth)
+{
+ const struct color_node *node = &map[node_id];
+ const uint32_t fontcolor = node->val[0] > 0x50 &&
+ node->val[1] > 0x50 &&
+ node->val[2] > 0x50 ? 0 : 0xffffff;
+ av_bprintf(buf, "%*cnode%d ["
+ "label=\"%c%02X%c%02X%c%02X%c\" "
+ "fillcolor=\"#%02x%02x%02x\" "
+ "fontcolor=\"#%06X\"]\n",
+ depth*INDENT, ' ', node->palette_id,
+ "[ "[node->split], node->val[0],
+ "][ "[node->split], node->val[1],
+ " ]["[node->split], node->val[2],
+ " ]"[node->split],
+ node->val[0], node->val[1], node->val[2],
+ fontcolor);
+ if (parent_id != -1)
+ av_bprintf(buf, "%*cnode%d -> node%d\n", depth*INDENT, ' ',
+ map[parent_id].palette_id, node->palette_id);
+ if (node->left_id != -1) disp_node(buf, map, node_id, node->left_id, depth + 1);
+ if (node->right_id != -1) disp_node(buf, map, node_id, node->right_id, depth + 1);
+}
+
+// debug_kdtree=kdtree.dot -> dot -Tpng kdtree.dot > kdtree.png
+static int disp_tree(const struct color_node *node, const char *fname)
+{
+ AVBPrint buf;
+ FILE *f = av_fopen_utf8(fname, "w");
+
+ if (!f) {
+ int ret = AVERROR(errno);
+ av_log(NULL, AV_LOG_ERROR, "Cannot open file '%s' for writing: %s\n",
+ fname, av_err2str(ret));
+ return ret;
+ }
+
+ av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED);
+
+ av_bprintf(&buf, "digraph {\n");
+ av_bprintf(&buf, " node [style=filled fontsize=10 shape=box]\n");
+ disp_node(&buf, node, -1, 0, 0);
+ av_bprintf(&buf, "}\n");
+
+ fwrite(buf.str, 1, buf.len, f);
+ fclose(f);
+ av_bprint_finalize(&buf, NULL);
+ return 0;
+}
+
+static int debug_accuracy(const struct color_node *node, const uint32_t *palette,
+ const enum color_search_method search_method)
+{
+ int r, g, b, ret = 0;
+
+ for (r = 0; r < 256; r++) {
+ for (g = 0; g < 256; g++) {
+ for (b = 0; b < 256; b++) {
+ const uint8_t rgb[] = {r, g, b};
+ const int r1 = COLORMAP_NEAREST(search_method, palette, node, rgb);
+ const int r2 = colormap_nearest_bruteforce(palette, rgb);
+ if (r1 != r2) {
+ const uint32_t c1 = palette[r1];
+ const uint32_t c2 = palette[r2];
+ const uint8_t palrgb1[] = { c1>>16 & 0xff, c1>> 8 & 0xff, c1 & 0xff };
+ const uint8_t palrgb2[] = { c2>>16 & 0xff, c2>> 8 & 0xff, c2 & 0xff };
+ const int d1 = diff(palrgb1, rgb);
+ const int d2 = diff(palrgb2, rgb);
+ if (d1 != d2) {
+ av_log(NULL, AV_LOG_ERROR,
+ "/!\\ %02X%02X%02X: %d ! %d (%06X ! %06X) / dist: %d ! %d\n",
+ r, g, b, r1, r2, c1 & 0xffffff, c2 & 0xffffff, d1, d2);
+ ret = 1;
+ }
+ }
+ }
+ }
+ }
+ return ret;
+}
+
+struct color {
+ uint32_t value;
+ uint8_t pal_id;
+};
+
+struct color_rect {
+ uint8_t min[3];
+ uint8_t max[3];
+};
+
+typedef int (*cmp_func)(const void *, const void *);
+
+#define DECLARE_CMP_FUNC(name, pos) \
+static int cmp_##name(const void *pa, const void *pb) \
+{ \
+ const struct color *a = pa; \
+ const struct color *b = pb; \
+ return (a->value >> (8 * (2 - (pos))) & 0xff) \
+ - (b->value >> (8 * (2 - (pos))) & 0xff); \
+}
+
+DECLARE_CMP_FUNC(r, 0)
+DECLARE_CMP_FUNC(g, 1)
+DECLARE_CMP_FUNC(b, 2)
+
+static const cmp_func cmp_funcs[] = {cmp_r, cmp_g, cmp_b};
+
+static int get_next_color(const uint8_t *color_used, const uint32_t *palette,
+ int *component, const struct color_rect *box)
+{
+ int wr, wg, wb;
+ int i, longest = 0;
+ unsigned nb_color = 0;
+ struct color_rect ranges;
+ struct color tmp_pal[256];
+ cmp_func cmpf;
+
+ ranges.min[0] = ranges.min[1] = ranges.min[2] = 0xff;
+ ranges.max[0] = ranges.max[1] = ranges.max[2] = 0x00;
+
+ for (i = 0; i < AVPALETTE_COUNT; i++) {
+ const uint32_t c = palette[i];
+ const uint8_t r = c >> 16 & 0xff;
+ const uint8_t g = c >> 8 & 0xff;
+ const uint8_t b = c & 0xff;
+
+ if (color_used[i] ||
+ r < box->min[0] || g < box->min[1] || b < box->min[2] ||
+ r > box->max[0] || g > box->max[1] || b > box->max[2])
+ continue;
+
+ if (r < ranges.min[0]) ranges.min[0] = r;
+ if (g < ranges.min[1]) ranges.min[1] = g;
+ if (b < ranges.min[2]) ranges.min[2] = b;
+
+ if (r > ranges.max[0]) ranges.max[0] = r;
+ if (g > ranges.max[1]) ranges.max[1] = g;
+ if (b > ranges.max[2]) ranges.max[2] = b;
+
+ tmp_pal[nb_color].value = c;
+ tmp_pal[nb_color].pal_id = i;
+
+ nb_color++;
+ }
+
+ if (!nb_color)
+ return -1;
+
+ /* define longest axis that will be the split component */
+ wr = ranges.max[0] - ranges.min[0];
+ wg = ranges.max[1] - ranges.min[1];
+ wb = ranges.max[2] - ranges.min[2];
+ if (wr >= wg && wr >= wb) longest = 0;
+ if (wg >= wr && wg >= wb) longest = 1;
+ if (wb >= wr && wb >= wg) longest = 2;
+ cmpf = cmp_funcs[longest];
+ *component = longest;
+
+ /* sort along this axis to get median */
+ AV_QSORT(tmp_pal, nb_color, struct color, cmpf);
+
+ return tmp_pal[nb_color >> 1].pal_id;
+}
+
+static int colormap_insert(struct color_node *map,
+ uint8_t *color_used,
+ int *nb_used,
+ const uint32_t *palette,
+ const struct color_rect *box)
+{
+ uint32_t c;
+ int component, cur_id;
+ int node_left_id = -1, node_right_id = -1;
+ struct color_node *node;
+ struct color_rect box1, box2;
+ const int pal_id = get_next_color(color_used, palette, &component, box);
+
+ if (pal_id < 0)
+ return -1;
+
+ /* create new node with that color */
+ cur_id = (*nb_used)++;
+ c = palette[pal_id];
+ node = &map[cur_id];
+ node->split = component;
+ node->palette_id = pal_id;
+ node->val[0] = c>>16 & 0xff;
+ node->val[1] = c>> 8 & 0xff;
+ node->val[2] = c & 0xff;
+
+ color_used[pal_id] = 1;
+
+ /* get the two boxes this node creates */
+ box1 = box2 = *box;
+ box1.max[component] = node->val[component];
+ box2.min[component] = node->val[component] + 1;
+
+ node_left_id = colormap_insert(map, color_used, nb_used, palette, &box1);
+
+ if (box2.min[component] <= box2.max[component])
+ node_right_id = colormap_insert(map, color_used, nb_used, palette, &box2);
+
+ node->left_id = node_left_id;
+ node->right_id = node_right_id;
+
+ return cur_id;
+}
+
+static int cmp_pal_entry(const void *a, const void *b)
+{
+ const int c1 = *(const uint32_t *)a & 0xffffff;
+ const int c2 = *(const uint32_t *)b & 0xffffff;
+ return c1 - c2;
+}
+
+static void load_colormap(PaletteUseContext *s)
+{
+ int i, nb_used = 0;
+ uint8_t color_used[AVPALETTE_COUNT] = {0};
+ uint32_t last_color = 0;
+ struct color_rect box;
+
+ /* disable transparent colors and dups */
+ qsort(s->palette, AVPALETTE_COUNT, sizeof(*s->palette), cmp_pal_entry);
+ for (i = 0; i < AVPALETTE_COUNT; i++) {
+ const uint32_t c = s->palette[i];
+ if (i != 0 && c == last_color) {
+ color_used[i] = 1;
+ continue;
+ }
+ last_color = c;
+ if ((c & 0xff000000) != 0xff000000) {
+ color_used[i] = 1; // ignore transparent color(s)
+ continue;
+ }
+ }
+
+ box.min[0] = box.min[1] = box.min[2] = 0x00;
+ box.max[0] = box.max[1] = box.max[2] = 0xff;
+
+ colormap_insert(s->map, color_used, &nb_used, s->palette, &box);
+
+ if (s->dot_filename)
+ disp_tree(s->map, s->dot_filename);
+
+ if (s->debug_accuracy) {
+ if (!debug_accuracy(s->map, s->palette, s->color_search_method))
+ av_log(NULL, AV_LOG_INFO, "Accuracy check passed\n");
+ }
+}
+
+static void debug_mean_error(PaletteUseContext *s, const AVFrame *in1,
+ const AVFrame *in2, int frame_count)
+{
+ int x, y;
+ const uint32_t *palette = s->palette;
+ uint32_t *src1 = (uint32_t *)in1->data[0];
+ uint8_t *src2 = in2->data[0];
+ const int src1_linesize = in1->linesize[0] >> 2;
+ const int src2_linesize = in2->linesize[0];
+ const float div = in1->width * in1->height * 3;
+ unsigned mean_err = 0;
+
+ for (y = 0; y < in1->height; y++) {
+ for (x = 0; x < in1->width; x++) {
+ const uint32_t c1 = src1[x];
+ const uint32_t c2 = palette[src2[x]];
+ const uint8_t rgb1[] = {c1 >> 16 & 0xff, c1 >> 8 & 0xff, c1 & 0xff};
+ const uint8_t rgb2[] = {c2 >> 16 & 0xff, c2 >> 8 & 0xff, c2 & 0xff};
+ mean_err += diff(rgb1, rgb2);
+ }
+ src1 += src1_linesize;
+ src2 += src2_linesize;
+ }
+
+ s->total_mean_err += mean_err;
+
+ av_log(NULL, AV_LOG_INFO, "MEP:%.3f TotalMEP:%.3f\n",
+ mean_err / div, s->total_mean_err / (div * frame_count));
+}
+
+static void set_processing_window(enum diff_mode diff_mode,
+ const AVFrame *prv_src, const AVFrame *cur_src,
+ const AVFrame *prv_dst, AVFrame *cur_dst,
+ int *xp, int *yp, int *wp, int *hp)
+{
+ int x_start = 0, y_start = 0;
+ int width = cur_src->width;
+ int height = cur_src->height;
+
+ if (prv_src && diff_mode == DIFF_MODE_RECTANGLE) {
+ int y;
+ int x_end = cur_src->width - 1,
+ y_end = cur_src->height - 1;
+ const uint32_t *prv_srcp = (const uint32_t *)prv_src->data[0];
+ const uint32_t *cur_srcp = (const uint32_t *)cur_src->data[0];
+ const uint8_t *prv_dstp = prv_dst->data[0];
+ uint8_t *cur_dstp = cur_dst->data[0];
+
+ const int prv_src_linesize = prv_src->linesize[0] >> 2;
+ const int cur_src_linesize = cur_src->linesize[0] >> 2;
+ const int prv_dst_linesize = prv_dst->linesize[0];
+ const int cur_dst_linesize = cur_dst->linesize[0];
+
+ /* skip common lines */
+ while (y_start < y_end && !memcmp(prv_srcp + y_start*prv_src_linesize,
+ cur_srcp + y_start*cur_src_linesize,
+ cur_src->width * 4)) {
+ memcpy(cur_dstp + y_start*cur_dst_linesize,
+ prv_dstp + y_start*prv_dst_linesize,
+ cur_dst->width);
+ y_start++;
+ }
+ while (y_end > y_start && !memcmp(prv_srcp + y_end*prv_src_linesize,
+ cur_srcp + y_end*cur_src_linesize,
+ cur_src->width * 4)) {
+ memcpy(cur_dstp + y_end*cur_dst_linesize,
+ prv_dstp + y_end*prv_dst_linesize,
+ cur_dst->width);
+ y_end--;
+ }
+
+ height = y_end + 1 - y_start;
+
+ /* skip common columns */
+ while (x_start < x_end) {
+ int same_column = 1;
+ for (y = y_start; y <= y_end; y++) {
+ if (prv_srcp[y*prv_src_linesize + x_start] != cur_srcp[y*cur_src_linesize + x_start]) {
+ same_column = 0;
+ break;
+ }
+ }
+ if (!same_column)
+ break;
+ x_start++;
+ }
+ while (x_end > x_start) {
+ int same_column = 1;
+ for (y = y_start; y <= y_end; y++) {
+ if (prv_srcp[y*prv_src_linesize + x_end] != cur_srcp[y*cur_src_linesize + x_end]) {
+ same_column = 0;
+ break;
+ }
+ }
+ if (!same_column)
+ break;
+ x_end--;
+ }
+ width = x_end + 1 - x_start;
+
+ if (x_start) {
+ for (y = y_start; y <= y_end; y++)
+ memcpy(cur_dstp + y*cur_dst_linesize,
+ prv_dstp + y*prv_dst_linesize, x_start);
+ }
+ if (x_end != cur_src->width - 1) {
+ const int copy_len = cur_src->width - 1 - x_end;
+ for (y = y_start; y <= y_end; y++)
+ memcpy(cur_dstp + y*cur_dst_linesize + x_end + 1,
+ prv_dstp + y*prv_dst_linesize + x_end + 1,
+ copy_len);
+ }
+ }
+ *xp = x_start;
+ *yp = y_start;
+ *wp = width;
+ *hp = height;
+}
+
+static AVFrame *apply_palette(AVFilterLink *inlink, AVFrame *in)
+{
+ int x, y, w, h;
+ AVFilterContext *ctx = inlink->dst;
+ PaletteUseContext *s = ctx->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+
+ AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return NULL;
+ }
+ av_frame_copy_props(out, in);
+
+ set_processing_window(s->diff_mode, s->last_in, in,
+ s->last_out, out, &x, &y, &w, &h);
+ av_frame_free(&s->last_in);
+ av_frame_free(&s->last_out);
+ s->last_in = av_frame_clone(in);
+ s->last_out = av_frame_clone(out);
+ if (!s->last_in || !s->last_out ||
+ av_frame_make_writable(s->last_in) < 0) {
+ av_frame_free(&in);
+ av_frame_free(&out);
+ return NULL;
+ }
+
+ ff_dlog(ctx, "%dx%d rect: (%d;%d) -> (%d,%d) [area:%dx%d]\n",
+ w, h, x, y, x+w, y+h, in->width, in->height);
+
+ if (s->set_frame(s, out, in, x, y, w, h) < 0) {
+ av_frame_free(&out);
+ return NULL;
+ }
+ memcpy(out->data[1], s->palette, AVPALETTE_SIZE);
+ if (s->calc_mean_err)
+ debug_mean_error(s, in, out, inlink->frame_count_out);
+ av_frame_free(&in);
+ return out;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ int ret;
+ AVFilterContext *ctx = outlink->src;
+ PaletteUseContext *s = ctx->priv;
+
+ outlink->w = ctx->inputs[0]->w;
+ outlink->h = ctx->inputs[0]->h;
+
+ outlink->time_base = ctx->inputs[0]->time_base;
+ if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0)
+ return ret;
+ return 0;
+}
+
+static int config_input_palette(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+
+ if (inlink->w * inlink->h != AVPALETTE_COUNT) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Palette input must contain exactly %d pixels. "
+ "Specified input has %dx%d=%d pixels\n",
+ AVPALETTE_COUNT, inlink->w, inlink->h,
+ inlink->w * inlink->h);
+ return AVERROR(EINVAL);
+ }
+ return 0;
+}
+
+static void load_palette(PaletteUseContext *s, const AVFrame *palette_frame)
+{
+ int i, x, y;
+ const uint32_t *p = (const uint32_t *)palette_frame->data[0];
+ const int p_linesize = palette_frame->linesize[0] >> 2;
+
+ if (s->new) {
+ memset(s->palette, 0, sizeof(s->palette));
+ memset(s->map, 0, sizeof(s->map));
+ for (i = 0; i < CACHE_SIZE; i++)
+ av_freep(&s->cache[i].entries);
+ memset(s->cache, 0, sizeof(s->cache));
+ }
+
+ i = 0;
+ for (y = 0; y < palette_frame->height; y++) {
+ for (x = 0; x < palette_frame->width; x++)
+ s->palette[i++] = p[x];
+ p += p_linesize;
+ }
+
+ load_colormap(s);
+
+ if (!s->new)
+ s->palette_loaded = 1;
+}
+
+static AVFrame *load_apply_palette(AVFilterContext *ctx, AVFrame *main,
+ const AVFrame *second)
+{
+ AVFilterLink *inlink = ctx->inputs[0];
+ PaletteUseContext *s = ctx->priv;
+ if (!s->palette_loaded) {
+ load_palette(s, second);
+ }
+ return apply_palette(inlink, main);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ PaletteUseContext *s = inlink->dst->priv;
+ return ff_dualinput_filter_frame(&s->dinput, inlink, in);
+}
+
+#define DEFINE_SET_FRAME(color_search, name, value) \
+static int set_frame_##name(PaletteUseContext *s, AVFrame *out, AVFrame *in, \
+ int x_start, int y_start, int w, int h) \
+{ \
+ return set_frame(s, out, in, x_start, y_start, w, h, value, color_search); \
+}
+
+#define DEFINE_SET_FRAME_COLOR_SEARCH(color_search, color_search_macro) \
+ DEFINE_SET_FRAME(color_search_macro, color_search##_##none, DITHERING_NONE) \
+ DEFINE_SET_FRAME(color_search_macro, color_search##_##bayer, DITHERING_BAYER) \
+ DEFINE_SET_FRAME(color_search_macro, color_search##_##heckbert, DITHERING_HECKBERT) \
+ DEFINE_SET_FRAME(color_search_macro, color_search##_##floyd_steinberg, DITHERING_FLOYD_STEINBERG) \
+ DEFINE_SET_FRAME(color_search_macro, color_search##_##sierra2, DITHERING_SIERRA2) \
+ DEFINE_SET_FRAME(color_search_macro, color_search##_##sierra2_4a, DITHERING_SIERRA2_4A) \
+
+DEFINE_SET_FRAME_COLOR_SEARCH(nns_iterative, COLOR_SEARCH_NNS_ITERATIVE)
+DEFINE_SET_FRAME_COLOR_SEARCH(nns_recursive, COLOR_SEARCH_NNS_RECURSIVE)
+DEFINE_SET_FRAME_COLOR_SEARCH(bruteforce, COLOR_SEARCH_BRUTEFORCE)
+
+#define DITHERING_ENTRIES(color_search) { \
+ set_frame_##color_search##_none, \
+ set_frame_##color_search##_bayer, \
+ set_frame_##color_search##_heckbert, \
+ set_frame_##color_search##_floyd_steinberg, \
+ set_frame_##color_search##_sierra2, \
+ set_frame_##color_search##_sierra2_4a, \
+}
+
+static const set_frame_func set_frame_lut[NB_COLOR_SEARCHES][NB_DITHERING] = {
+ DITHERING_ENTRIES(nns_iterative),
+ DITHERING_ENTRIES(nns_recursive),
+ DITHERING_ENTRIES(bruteforce),
+};
+
+static int dither_value(int p)
+{
+ const int q = p ^ (p >> 3);
+ return (p & 4) >> 2 | (q & 4) >> 1 \
+ | (p & 2) << 1 | (q & 2) << 2 \
+ | (p & 1) << 4 | (q & 1) << 5;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ PaletteUseContext *s = ctx->priv;
+ s->dinput.repeatlast = 1; // only 1 frame in the palette
+ s->dinput.skip_initial_unpaired = 1;
+ s->dinput.process = load_apply_palette;
+
+ s->set_frame = set_frame_lut[s->color_search_method][s->dither];
+
+ if (s->dither == DITHERING_BAYER) {
+ int i;
+ const int delta = 1 << (5 - s->bayer_scale); // to avoid too much luma
+
+ for (i = 0; i < FF_ARRAY_ELEMS(s->ordered_dither); i++)
+ s->ordered_dither[i] = (dither_value(i) >> s->bayer_scale) - delta;
+ }
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ PaletteUseContext *s = outlink->src->priv;
+ return ff_dualinput_request_frame(&s->dinput, outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ int i;
+ PaletteUseContext *s = ctx->priv;
+
+ ff_dualinput_uninit(&s->dinput);
+ for (i = 0; i < CACHE_SIZE; i++)
+ av_freep(&s->cache[i].entries);
+ av_frame_free(&s->last_in);
+ av_frame_free(&s->last_out);
+}
+
+static const AVFilterPad paletteuse_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .needs_writable = 1, // for error diffusal dithering
+ },{
+ .name = "palette",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input_palette,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad paletteuse_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_paletteuse = {
+ .name = "paletteuse",
+ .description = NULL_IF_CONFIG_SMALL("Use a palette to downsample an input video stream."),
+ .priv_size = sizeof(PaletteUseContext),
+ .query_formats = query_formats,
+ .init = init,
+ .uninit = uninit,
+ .inputs = paletteuse_inputs,
+ .outputs = paletteuse_outputs,
+ .priv_class = &paletteuse_class,
+};
diff --git a/libavfilter/vf_perspective.c b/libavfilter/vf_perspective.c
new file mode 100644
index 0000000000..92495097cc
--- /dev/null
+++ b/libavfilter/vf_perspective.c
@@ -0,0 +1,525 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/eval.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define SUB_PIXEL_BITS 8
+#define SUB_PIXELS (1 << SUB_PIXEL_BITS)
+#define COEFF_BITS 11
+
+#define LINEAR 0
+#define CUBIC 1
+
+typedef struct PerspectiveContext {
+ const AVClass *class;
+ char *expr_str[4][2];
+ double ref[4][2];
+ int32_t (*pv)[2];
+ int32_t coeff[SUB_PIXELS][4];
+ int interpolation;
+ int linesize[4];
+ int height[4];
+ int hsub, vsub;
+ int nb_planes;
+ int sense;
+ int eval_mode;
+
+ int (*perspective)(AVFilterContext *ctx,
+ void *arg, int job, int nb_jobs);
+} PerspectiveContext;
+
+#define OFFSET(x) offsetof(PerspectiveContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+enum PERSPECTIVESense {
+ PERSPECTIVE_SENSE_SOURCE = 0, ///< coordinates give locations in source of corners of destination.
+ PERSPECTIVE_SENSE_DESTINATION = 1, ///< coordinates give locations in destination of corners of source.
+};
+
+enum EvalMode {
+ EVAL_MODE_INIT,
+ EVAL_MODE_FRAME,
+ EVAL_MODE_NB
+};
+
+static const AVOption perspective_options[] = {
+ { "x0", "set top left x coordinate", OFFSET(expr_str[0][0]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
+ { "y0", "set top left y coordinate", OFFSET(expr_str[0][1]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
+ { "x1", "set top right x coordinate", OFFSET(expr_str[1][0]), AV_OPT_TYPE_STRING, {.str="W"}, 0, 0, FLAGS },
+ { "y1", "set top right y coordinate", OFFSET(expr_str[1][1]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
+ { "x2", "set bottom left x coordinate", OFFSET(expr_str[2][0]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
+ { "y2", "set bottom left y coordinate", OFFSET(expr_str[2][1]), AV_OPT_TYPE_STRING, {.str="H"}, 0, 0, FLAGS },
+ { "x3", "set bottom right x coordinate", OFFSET(expr_str[3][0]), AV_OPT_TYPE_STRING, {.str="W"}, 0, 0, FLAGS },
+ { "y3", "set bottom right y coordinate", OFFSET(expr_str[3][1]), AV_OPT_TYPE_STRING, {.str="H"}, 0, 0, FLAGS },
+ { "interpolation", "set interpolation", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=LINEAR}, 0, 1, FLAGS, "interpolation" },
+ { "linear", "", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "interpolation" },
+ { "cubic", "", 0, AV_OPT_TYPE_CONST, {.i64=CUBIC}, 0, 0, FLAGS, "interpolation" },
+ { "sense", "specify the sense of the coordinates", OFFSET(sense), AV_OPT_TYPE_INT, {.i64=PERSPECTIVE_SENSE_SOURCE}, 0, 1, FLAGS, "sense"},
+ { "source", "specify locations in source to send to corners in destination",
+ 0, AV_OPT_TYPE_CONST, {.i64=PERSPECTIVE_SENSE_SOURCE}, 0, 0, FLAGS, "sense"},
+ { "destination", "specify locations in destination to send corners of source",
+ 0, AV_OPT_TYPE_CONST, {.i64=PERSPECTIVE_SENSE_DESTINATION}, 0, 0, FLAGS, "sense"},
+ { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
+ { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
+ { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
+
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(perspective);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static inline double get_coeff(double d)
+{
+ double coeff, A = -0.60;
+
+ d = fabs(d);
+
+ if (d < 1.0)
+ coeff = (1.0 - (A + 3.0) * d * d + (A + 2.0) * d * d * d);
+ else if (d < 2.0)
+ coeff = (-4.0 * A + 8.0 * A * d - 5.0 * A * d * d + A * d * d * d);
+ else
+ coeff = 0.0;
+
+ return coeff;
+}
+
+static const char *const var_names[] = { "W", "H", "in", "on", NULL };
+enum { VAR_W, VAR_H, VAR_IN, VAR_ON, VAR_VARS_NB };
+
+static int calc_persp_luts(AVFilterContext *ctx, AVFilterLink *inlink)
+{
+ PerspectiveContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ double (*ref)[2] = s->ref;
+
+ double values[VAR_VARS_NB] = { [VAR_W] = inlink->w, [VAR_H] = inlink->h,
+ [VAR_IN] = inlink->frame_count_out + 1,
+ [VAR_ON] = outlink->frame_count_in + 1 };
+ const int h = values[VAR_H];
+ const int w = values[VAR_W];
+ double x0, x1, x2, x3, x4, x5, x6, x7, x8, q;
+ double t0, t1, t2, t3;
+ int x, y, i, j, ret;
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 2; j++) {
+ if (!s->expr_str[i][j])
+ return AVERROR(EINVAL);
+ ret = av_expr_parse_and_eval(&s->ref[i][j], s->expr_str[i][j],
+ var_names, &values[0],
+ NULL, NULL, NULL, NULL,
+ 0, 0, ctx);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ switch (s->sense) {
+ case PERSPECTIVE_SENSE_SOURCE:
+ x6 = ((ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) *
+ (ref[2][1] - ref[3][1]) -
+ ( ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) *
+ (ref[2][0] - ref[3][0])) * h;
+ x7 = ((ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) *
+ (ref[1][0] - ref[3][0]) -
+ ( ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) *
+ (ref[1][1] - ref[3][1])) * w;
+ q = ( ref[1][0] - ref[3][0]) * (ref[2][1] - ref[3][1]) -
+ ( ref[2][0] - ref[3][0]) * (ref[1][1] - ref[3][1]);
+
+ x0 = q * (ref[1][0] - ref[0][0]) * h + x6 * ref[1][0];
+ x1 = q * (ref[2][0] - ref[0][0]) * w + x7 * ref[2][0];
+ x2 = q * ref[0][0] * w * h;
+ x3 = q * (ref[1][1] - ref[0][1]) * h + x6 * ref[1][1];
+ x4 = q * (ref[2][1] - ref[0][1]) * w + x7 * ref[2][1];
+ x5 = q * ref[0][1] * w * h;
+ x8 = q * w * h;
+ break;
+ case PERSPECTIVE_SENSE_DESTINATION:
+ t0 = ref[0][0] * (ref[3][1] - ref[1][1]) +
+ ref[1][0] * (ref[0][1] - ref[3][1]) +
+ ref[3][0] * (ref[1][1] - ref[0][1]);
+ t1 = ref[1][0] * (ref[2][1] - ref[3][1]) +
+ ref[2][0] * (ref[3][1] - ref[1][1]) +
+ ref[3][0] * (ref[1][1] - ref[2][1]);
+ t2 = ref[0][0] * (ref[3][1] - ref[2][1]) +
+ ref[2][0] * (ref[0][1] - ref[3][1]) +
+ ref[3][0] * (ref[2][1] - ref[0][1]);
+ t3 = ref[0][0] * (ref[1][1] - ref[2][1]) +
+ ref[1][0] * (ref[2][1] - ref[0][1]) +
+ ref[2][0] * (ref[0][1] - ref[1][1]);
+
+ x0 = t0 * t1 * w * (ref[2][1] - ref[0][1]);
+ x1 = t0 * t1 * w * (ref[0][0] - ref[2][0]);
+ x2 = t0 * t1 * w * (ref[0][1] * ref[2][0] - ref[0][0] * ref[2][1]);
+ x3 = t1 * t2 * h * (ref[1][1] - ref[0][1]);
+ x4 = t1 * t2 * h * (ref[0][0] - ref[1][0]);
+ x5 = t1 * t2 * h * (ref[0][1] * ref[1][0] - ref[0][0] * ref[1][1]);
+ x6 = t1 * t2 * (ref[1][1] - ref[0][1]) +
+ t0 * t3 * (ref[2][1] - ref[3][1]);
+ x7 = t1 * t2 * (ref[0][0] - ref[1][0]) +
+ t0 * t3 * (ref[3][0] - ref[2][0]);
+ x8 = t1 * t2 * (ref[0][1] * ref[1][0] - ref[0][0] * ref[1][1]) +
+ t0 * t3 * (ref[2][0] * ref[3][1] - ref[2][1] * ref[3][0]);
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ for (y = 0; y < h; y++){
+ for (x = 0; x < w; x++){
+ int u, v;
+
+ u = lrint(SUB_PIXELS * (x0 * x + x1 * y + x2) /
+ (x6 * x + x7 * y + x8));
+ v = lrint(SUB_PIXELS * (x3 * x + x4 * y + x5) /
+ (x6 * x + x7 * y + x8));
+
+ s->pv[x + y * w][0] = u;
+ s->pv[x + y * w][1] = v;
+ }
+ }
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ PerspectiveContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int h = inlink->h;
+ int w = inlink->w;
+ int i, j, ret;
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+ if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->height[0] = s->height[3] = inlink->h;
+
+ s->pv = av_realloc_f(s->pv, w * h, 2 * sizeof(*s->pv));
+ if (!s->pv)
+ return AVERROR(ENOMEM);
+
+ if (s->eval_mode == EVAL_MODE_INIT) {
+ if ((ret = calc_persp_luts(ctx, inlink)) < 0) {
+ return ret;
+ }
+ }
+
+ for (i = 0; i < SUB_PIXELS; i++){
+ double d = i / (double)SUB_PIXELS;
+ double temp[4];
+ double sum = 0;
+
+ for (j = 0; j < 4; j++)
+ temp[j] = get_coeff(j - d - 1);
+
+ for (j = 0; j < 4; j++)
+ sum += temp[j];
+
+ for (j = 0; j < 4; j++)
+ s->coeff[i][j] = lrint((1 << COEFF_BITS) * temp[j] / sum);
+ }
+
+ return 0;
+}
+
+typedef struct ThreadData {
+ uint8_t *dst;
+ int dst_linesize;
+ uint8_t *src;
+ int src_linesize;
+ int w, h;
+ int hsub, vsub;
+} ThreadData;
+
+static int resample_cubic(AVFilterContext *ctx, void *arg,
+ int job, int nb_jobs)
+{
+ PerspectiveContext *s = ctx->priv;
+ ThreadData *td = arg;
+ uint8_t *dst = td->dst;
+ int dst_linesize = td->dst_linesize;
+ uint8_t *src = td->src;
+ int src_linesize = td->src_linesize;
+ int w = td->w;
+ int h = td->h;
+ int hsub = td->hsub;
+ int vsub = td->vsub;
+ int start = (h * job) / nb_jobs;
+ int end = (h * (job+1)) / nb_jobs;
+ const int linesize = s->linesize[0];
+ int x, y;
+
+ for (y = start; y < end; y++) {
+ int sy = y << vsub;
+ for (x = 0; x < w; x++) {
+ int u, v, subU, subV, sum, sx;
+
+ sx = x << hsub;
+ u = s->pv[sx + sy * linesize][0] >> hsub;
+ v = s->pv[sx + sy * linesize][1] >> vsub;
+ subU = u & (SUB_PIXELS - 1);
+ subV = v & (SUB_PIXELS - 1);
+ u >>= SUB_PIXEL_BITS;
+ v >>= SUB_PIXEL_BITS;
+
+ if (u > 0 && v > 0 && u < w - 2 && v < h - 2){
+ const int index = u + v*src_linesize;
+ const int a = s->coeff[subU][0];
+ const int b = s->coeff[subU][1];
+ const int c = s->coeff[subU][2];
+ const int d = s->coeff[subU][3];
+
+ sum = s->coeff[subV][0] * (a * src[index - 1 - src_linesize] + b * src[index - 0 - src_linesize] +
+ c * src[index + 1 - src_linesize] + d * src[index + 2 - src_linesize]) +
+ s->coeff[subV][1] * (a * src[index - 1 ] + b * src[index - 0 ] +
+ c * src[index + 1 ] + d * src[index + 2 ]) +
+ s->coeff[subV][2] * (a * src[index - 1 + src_linesize] + b * src[index - 0 + src_linesize] +
+ c * src[index + 1 + src_linesize] + d * src[index + 2 + src_linesize]) +
+ s->coeff[subV][3] * (a * src[index - 1 + 2 * src_linesize] + b * src[index - 0 + 2 * src_linesize] +
+ c * src[index + 1 + 2 * src_linesize] + d * src[index + 2 + 2 * src_linesize]);
+ } else {
+ int dx, dy;
+
+ sum = 0;
+
+ for (dy = 0; dy < 4; dy++) {
+ int iy = v + dy - 1;
+
+ if (iy < 0)
+ iy = 0;
+ else if (iy >= h)
+ iy = h-1;
+ for (dx = 0; dx < 4; dx++) {
+ int ix = u + dx - 1;
+
+ if (ix < 0)
+ ix = 0;
+ else if (ix >= w)
+ ix = w - 1;
+
+ sum += s->coeff[subU][dx] * s->coeff[subV][dy] * src[ ix + iy * src_linesize];
+ }
+ }
+ }
+
+ sum = (sum + (1<<(COEFF_BITS * 2 - 1))) >> (COEFF_BITS * 2);
+ sum = av_clip_uint8(sum);
+ dst[x + y * dst_linesize] = sum;
+ }
+ }
+ return 0;
+}
+
+static int resample_linear(AVFilterContext *ctx, void *arg,
+ int job, int nb_jobs)
+{
+ PerspectiveContext *s = ctx->priv;
+ ThreadData *td = arg;
+ uint8_t *dst = td->dst;
+ int dst_linesize = td->dst_linesize;
+ uint8_t *src = td->src;
+ int src_linesize = td->src_linesize;
+ int w = td->w;
+ int h = td->h;
+ int hsub = td->hsub;
+ int vsub = td->vsub;
+ int start = (h * job) / nb_jobs;
+ int end = (h * (job+1)) / nb_jobs;
+ const int linesize = s->linesize[0];
+ int x, y;
+
+ for (y = start; y < end; y++){
+ int sy = y << vsub;
+ for (x = 0; x < w; x++){
+ int u, v, subU, subV, sum, sx, index, subUI, subVI;
+
+ sx = x << hsub;
+ u = s->pv[sx + sy * linesize][0] >> hsub;
+ v = s->pv[sx + sy * linesize][1] >> vsub;
+ subU = u & (SUB_PIXELS - 1);
+ subV = v & (SUB_PIXELS - 1);
+ u >>= SUB_PIXEL_BITS;
+ v >>= SUB_PIXEL_BITS;
+
+ index = u + v * src_linesize;
+ subUI = SUB_PIXELS - subU;
+ subVI = SUB_PIXELS - subV;
+
+ if ((unsigned)u < (unsigned)(w - 1)){
+ if((unsigned)v < (unsigned)(h - 1)){
+ sum = subVI * (subUI * src[index] + subU * src[index + 1]) +
+ subV * (subUI * src[index + src_linesize] + subU * src[index + src_linesize + 1]);
+ sum = (sum + (1 << (SUB_PIXEL_BITS * 2 - 1)))>> (SUB_PIXEL_BITS * 2);
+ } else {
+ if (v < 0)
+ v = 0;
+ else
+ v = h - 1;
+ index = u + v * src_linesize;
+ sum = subUI * src[index] + subU * src[index + 1];
+ sum = (sum + (1 << (SUB_PIXEL_BITS - 1))) >> SUB_PIXEL_BITS;
+ }
+ } else {
+ if (u < 0)
+ u = 0;
+ else
+ u = w - 1;
+ if ((unsigned)v < (unsigned)(h - 1)){
+ index = u + v * src_linesize;
+ sum = subVI * src[index] + subV * src[index + src_linesize];
+ sum = (sum + (1 << (SUB_PIXEL_BITS - 1))) >> SUB_PIXEL_BITS;
+ } else {
+ if (v < 0)
+ v = 0;
+ else
+ v = h - 1;
+ index = u + v * src_linesize;
+ sum = src[index];
+ }
+ }
+
+ sum = av_clip_uint8(sum);
+ dst[x + y * dst_linesize] = sum;
+ }
+ }
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ PerspectiveContext *s = ctx->priv;
+
+ switch (s->interpolation) {
+ case LINEAR: s->perspective = resample_linear; break;
+ case CUBIC: s->perspective = resample_cubic; break;
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ PerspectiveContext *s = ctx->priv;
+ AVFrame *out;
+ int plane;
+ int ret;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&frame);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, frame);
+
+ if (s->eval_mode == EVAL_MODE_FRAME) {
+ if ((ret = calc_persp_luts(ctx, inlink)) < 0) {
+ av_frame_free(&out);
+ return ret;
+ }
+ }
+
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
+ int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
+ ThreadData td = {.dst = out->data[plane],
+ .dst_linesize = out->linesize[plane],
+ .src = frame->data[plane],
+ .src_linesize = frame->linesize[plane],
+ .w = s->linesize[plane],
+ .h = s->height[plane],
+ .hsub = hsub,
+ .vsub = vsub };
+ ctx->internal->execute(ctx, s->perspective, &td, NULL, FFMIN(td.h, ff_filter_get_nb_threads(ctx)));
+ }
+
+ av_frame_free(&frame);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ PerspectiveContext *s = ctx->priv;
+
+ av_freep(&s->pv);
+}
+
+static const AVFilterPad perspective_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad perspective_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_perspective = {
+ .name = "perspective",
+ .description = NULL_IF_CONFIG_SMALL("Correct the perspective of video."),
+ .priv_size = sizeof(PerspectiveContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = perspective_inputs,
+ .outputs = perspective_outputs,
+ .priv_class = &perspective_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_phase.c b/libavfilter/vf_phase.c
new file mode 100644
index 0000000000..fadeb6266d
--- /dev/null
+++ b/libavfilter/vf_phase.c
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2004 Ville Saari
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+enum PhaseMode {
+ PROGRESSIVE,
+ TOP_FIRST,
+ BOTTOM_FIRST,
+ TOP_FIRST_ANALYZE,
+ BOTTOM_FIRST_ANALYZE,
+ ANALYZE,
+ FULL_ANALYZE,
+ AUTO,
+ AUTO_ANALYZE
+};
+
+typedef struct PhaseContext {
+ const AVClass *class;
+ int mode; ///<PhaseMode
+ AVFrame *frame; /* previous frame */
+ int nb_planes;
+ int planeheight[4];
+ int linesize[4];
+} PhaseContext;
+
+#define OFFSET(x) offsetof(PhaseContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, 0, 0, FLAGS, unit }
+
+static const AVOption phase_options[] = {
+ { "mode", "set phase mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=AUTO_ANALYZE}, PROGRESSIVE, AUTO_ANALYZE, FLAGS, "mode" },
+ CONST("p", "progressive", PROGRESSIVE, "mode"),
+ CONST("t", "top first", TOP_FIRST, "mode"),
+ CONST("b", "bottom first", BOTTOM_FIRST, "mode"),
+ CONST("T", "top first analyze", TOP_FIRST_ANALYZE, "mode"),
+ CONST("B", "bottom first analyze", BOTTOM_FIRST_ANALYZE, "mode"),
+ CONST("u", "analyze", ANALYZE, "mode"),
+ CONST("U", "full analyze", FULL_ANALYZE, "mode"),
+ CONST("a", "auto", AUTO, "mode"),
+ CONST("A", "auto analyze", AUTO_ANALYZE, "mode"),
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(phase);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ PhaseContext *s = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+
+ if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ return 0;
+}
+
+/*
+ * This macro interpolates the value of both fields at a point halfway
+ * between lines and takes the squared difference. In field resolution
+ * the point is a quarter pixel below a line in one field and a quarter
+ * pixel above a line in other.
+ *
+ * (The result is actually multiplied by 25)
+ */
+#define DIFF(a, as, b, bs) ((t) = ((*(a) - (b)[bs]) << 2) + (a)[(as) << 1] - (b)[-(bs)], (t) * (t))
+
+/*
+ * Find which field combination has the smallest average squared difference
+ * between the fields.
+ */
+static enum PhaseMode analyze_plane(void *ctx, enum PhaseMode mode, AVFrame *old, AVFrame *new)
+{
+ double bdiff, tdiff, pdiff;
+
+ if (mode == AUTO) {
+ mode = new->interlaced_frame ? new->top_field_first ?
+ TOP_FIRST : BOTTOM_FIRST : PROGRESSIVE;
+ } else if (mode == AUTO_ANALYZE) {
+ mode = new->interlaced_frame ? new->top_field_first ?
+ TOP_FIRST_ANALYZE : BOTTOM_FIRST_ANALYZE : FULL_ANALYZE;
+ }
+
+ if (mode <= BOTTOM_FIRST) {
+ bdiff = pdiff = tdiff = 65536.0;
+ } else {
+ const int ns = new->linesize[0];
+ const int os = old->linesize[0];
+ const uint8_t *nptr = new->data[0];
+ const uint8_t *optr = old->data[0];
+ const int h = new->height;
+ const int w = new->width;
+ int bdif, tdif, pdif;
+ double scale;
+
+ int top = 0, t;
+ const uint8_t *rend, *end = nptr + (h - 2) * ns;
+
+ bdiff = pdiff = tdiff = 0.0;
+
+ nptr += ns;
+ optr += os;
+ while (nptr < end) {
+ pdif = tdif = bdif = 0;
+
+ switch (mode) {
+ case TOP_FIRST_ANALYZE:
+ if (top) {
+ for (rend = nptr + w; nptr < rend; nptr++, optr++) {
+ pdif += DIFF(nptr, ns, nptr, ns);
+ tdif += DIFF(nptr, ns, optr, os);
+ }
+ } else {
+ for (rend = nptr + w; nptr < rend; nptr++, optr++) {
+ pdif += DIFF(nptr, ns, nptr, ns);
+ tdif += DIFF(optr, os, nptr, ns);
+ }
+ }
+ break;
+ case BOTTOM_FIRST_ANALYZE:
+ if (top) {
+ for (rend = nptr + w; nptr < rend; nptr++, optr++) {
+ pdif += DIFF(nptr, ns, nptr, ns);
+ bdif += DIFF(optr, os, nptr, ns);
+ }
+ } else {
+ for (rend = nptr + w; nptr < rend; nptr++, optr++) {
+ pdif += DIFF(nptr, ns, nptr, ns);
+ bdif += DIFF(nptr, ns, optr, os);
+ }
+ }
+ break;
+ case ANALYZE:
+ if (top) {
+ for (rend = nptr + w; nptr < rend; nptr++, optr++) {
+ tdif += DIFF(nptr, ns, optr, os);
+ bdif += DIFF(optr, os, nptr, ns);
+ }
+ } else {
+ for (rend = nptr + w; nptr < rend; nptr++, optr++) {
+ bdif += DIFF(nptr, ns, optr, os);
+ tdif += DIFF(optr, os, nptr, ns);
+ }
+ }
+ break;
+ case FULL_ANALYZE:
+ if (top) {
+ for (rend = nptr + w; nptr < rend; nptr++, optr++) {
+ pdif += DIFF(nptr, ns, nptr, ns);
+ tdif += DIFF(nptr, ns, optr, os);
+ bdif += DIFF(optr, os, nptr, ns);
+ }
+ } else {
+ for (rend = nptr + w; nptr < rend; nptr++, optr++) {
+ pdif += DIFF(nptr, ns, nptr, ns);
+ bdif += DIFF(nptr, ns, optr, os);
+ tdif += DIFF(optr, os, nptr, ns);
+ }
+ }
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ pdiff += (double)pdif;
+ tdiff += (double)tdif;
+ bdiff += (double)bdif;
+ nptr += ns - w;
+ optr += os - w;
+ top ^= 1;
+ }
+
+ scale = 1.0 / (w * (h - 3)) / 25.0;
+ pdiff *= scale;
+ tdiff *= scale;
+ bdiff *= scale;
+
+ if (mode == TOP_FIRST_ANALYZE) {
+ bdiff = 65536.0;
+ } else if (mode == BOTTOM_FIRST_ANALYZE) {
+ tdiff = 65536.0;
+ } else if (mode == ANALYZE) {
+ pdiff = 65536.0;
+ }
+
+ if (bdiff < pdiff && bdiff < tdiff) {
+ mode = BOTTOM_FIRST;
+ } else if (tdiff < pdiff && tdiff < bdiff) {
+ mode = TOP_FIRST;
+ } else {
+ mode = PROGRESSIVE;
+ }
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "mode=%c tdiff=%f bdiff=%f pdiff=%f\n",
+ mode == BOTTOM_FIRST ? 'b' : mode == TOP_FIRST ? 't' : 'p',
+ tdiff, bdiff, pdiff);
+ return mode;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ PhaseContext *s = ctx->priv;
+ enum PhaseMode mode;
+ int plane, top, y;
+ AVFrame *out;
+
+ if (ctx->is_disabled) {
+ av_frame_free(&s->frame);
+ /* we keep a reference to the previous frame so the filter can start
+ * being useful as soon as it's not disabled, avoiding the 1-frame
+ * delay. */
+ s->frame = av_frame_clone(in);
+ return ff_filter_frame(outlink, in);
+ }
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ if (!s->frame) {
+ s->frame = in;
+ mode = PROGRESSIVE;
+ } else {
+ mode = analyze_plane(ctx, s->mode, s->frame, in);
+ }
+
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ const uint8_t *buf = s->frame->data[plane];
+ const uint8_t *from = in->data[plane];
+ uint8_t *to = out->data[plane];
+
+ for (y = 0, top = 1; y < s->planeheight[plane]; y++, top ^= 1) {
+ memcpy(to, mode == (top ? BOTTOM_FIRST : TOP_FIRST) ? buf : from, s->linesize[plane]);
+
+ buf += s->frame->linesize[plane];
+ from += in->linesize[plane];
+ to += out->linesize[plane];
+ }
+ }
+
+ if (in != s->frame)
+ av_frame_free(&s->frame);
+ s->frame = in;
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ PhaseContext *s = ctx->priv;
+
+ av_frame_free(&s->frame);
+}
+
+static const AVFilterPad phase_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad phase_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_phase = {
+ .name = "phase",
+ .description = NULL_IF_CONFIG_SMALL("Phase shift fields."),
+ .priv_size = sizeof(PhaseContext),
+ .priv_class = &phase_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = phase_inputs,
+ .outputs = phase_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_pixdesctest.c b/libavfilter/vf_pixdesctest.c
index 0c5b7a16f4..d6423acb91 100644
--- a/libavfilter/vf_pixdesctest.c
+++ b/libavfilter/vf_pixdesctest.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2009 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -47,7 +47,7 @@ static int config_props(AVFilterLink *inlink)
priv->pix_desc = av_pix_fmt_desc_get(inlink->format);
av_freep(&priv->line);
- if (!(priv->line = av_malloc(sizeof(*priv->line) * inlink->w)))
+ if (!(priv->line = av_malloc_array(sizeof(*priv->line), inlink->w)))
return AVERROR(ENOMEM);
return 0;
@@ -59,6 +59,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
int i, c, w = inlink->w, h = inlink->h;
+ const int cw = AV_CEIL_RSHIFT(w, priv->pix_desc->log2_chroma_w);
+ const int ch = AV_CEIL_RSHIFT(h, priv->pix_desc->log2_chroma_h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
@@ -69,27 +71,26 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
av_frame_copy_props(out, in);
for (i = 0; i < 4; i++) {
- int h = outlink->h;
- h = i == 1 || i == 2 ? h>>priv->pix_desc->log2_chroma_h : h;
+ const int h1 = i == 1 || i == 2 ? ch : h;
if (out->data[i]) {
uint8_t *data = out->data[i] +
- (out->linesize[i] > 0 ? 0 : out->linesize[i] * (h-1));
- memset(data, 0, FFABS(out->linesize[i]) * h);
+ (out->linesize[i] > 0 ? 0 : out->linesize[i] * (h1-1));
+ memset(data, 0, FFABS(out->linesize[i]) * h1);
}
}
/* copy palette */
if (priv->pix_desc->flags & AV_PIX_FMT_FLAG_PAL ||
priv->pix_desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)
- memcpy(out->data[1], in->data[1], 256*4);
+ memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);
for (c = 0; c < priv->pix_desc->nb_components; c++) {
- int w1 = c == 1 || c == 2 ? w>>priv->pix_desc->log2_chroma_w : w;
- int h1 = c == 1 || c == 2 ? h>>priv->pix_desc->log2_chroma_h : h;
+ const int w1 = c == 1 || c == 2 ? cw : w;
+ const int h1 = c == 1 || c == 2 ? ch : h;
for (i = 0; i < h1; i++) {
av_read_image_line(priv->line,
- in->data,
+ (void*)in->data,
in->linesize,
priv->pix_desc,
0, i, c, w1, 0);
@@ -127,11 +128,8 @@ static const AVFilterPad avfilter_vf_pixdesctest_outputs[] = {
AVFilter ff_vf_pixdesctest = {
.name = "pixdesctest",
.description = NULL_IF_CONFIG_SMALL("Test pixel format definitions."),
-
- .priv_size = sizeof(PixdescTestContext),
- .uninit = uninit,
-
- .inputs = avfilter_vf_pixdesctest_inputs,
-
- .outputs = avfilter_vf_pixdesctest_outputs,
+ .priv_size = sizeof(PixdescTestContext),
+ .uninit = uninit,
+ .inputs = avfilter_vf_pixdesctest_inputs,
+ .outputs = avfilter_vf_pixdesctest_outputs,
};
diff --git a/libavfilter/vf_pp.c b/libavfilter/vf_pp.c
new file mode 100644
index 0000000000..bac1d53356
--- /dev/null
+++ b/libavfilter/vf_pp.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2002 A'rpi
+ * Copyright (C) 2012 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * libpostproc filter, ported from MPlayer.
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "internal.h"
+
+#include "libpostproc/postprocess.h"
+
+typedef struct {
+ const AVClass *class;
+ char *subfilters;
+ int mode_id;
+ pp_mode *modes[PP_QUALITY_MAX + 1];
+ void *pp_ctx;
+} PPFilterContext;
+
+#define OFFSET(x) offsetof(PPFilterContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption pp_options[] = {
+ { "subfilters", "set postprocess subfilters", OFFSET(subfilters), AV_OPT_TYPE_STRING, {.str="de"}, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(pp);
+
+static av_cold int pp_init(AVFilterContext *ctx)
+{
+ int i;
+ PPFilterContext *pp = ctx->priv;
+
+ for (i = 0; i <= PP_QUALITY_MAX; i++) {
+ pp->modes[i] = pp_get_mode_by_name_and_quality(pp->subfilters, i);
+ if (!pp->modes[i])
+ return AVERROR_EXTERNAL;
+ }
+ pp->mode_id = PP_QUALITY_MAX;
+ return 0;
+}
+
+static int pp_process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ PPFilterContext *pp = ctx->priv;
+
+ if (!strcmp(cmd, "quality")) {
+ pp->mode_id = av_clip(strtol(args, NULL, 10), 0, PP_QUALITY_MAX);
+ return 0;
+ }
+ return AVERROR(ENOSYS);
+}
+
+static int pp_query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int pp_config_props(AVFilterLink *inlink)
+{
+ int flags = PP_CPU_CAPS_AUTO;
+ PPFilterContext *pp = inlink->dst->priv;
+
+ switch (inlink->format) {
+ case AV_PIX_FMT_GRAY8:
+ case AV_PIX_FMT_YUVJ420P:
+ case AV_PIX_FMT_YUV420P: flags |= PP_FORMAT_420; break;
+ case AV_PIX_FMT_YUVJ422P:
+ case AV_PIX_FMT_YUV422P: flags |= PP_FORMAT_422; break;
+ case AV_PIX_FMT_YUV411P: flags |= PP_FORMAT_411; break;
+ case AV_PIX_FMT_GBRP:
+ case AV_PIX_FMT_YUVJ444P:
+ case AV_PIX_FMT_YUV444P: flags |= PP_FORMAT_444; break;
+ case AV_PIX_FMT_YUVJ440P:
+ case AV_PIX_FMT_YUV440P: flags |= PP_FORMAT_440; break;
+ default: av_assert0(0);
+ }
+
+ pp->pp_ctx = pp_get_context(inlink->w, inlink->h, flags);
+ if (!pp->pp_ctx)
+ return AVERROR(ENOMEM);
+ return 0;
+}
+
+static int pp_filter_frame(AVFilterLink *inlink, AVFrame *inbuf)
+{
+ AVFilterContext *ctx = inlink->dst;
+ PPFilterContext *pp = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ const int aligned_w = FFALIGN(outlink->w, 8);
+ const int aligned_h = FFALIGN(outlink->h, 8);
+ AVFrame *outbuf;
+ int qstride, qp_type;
+ int8_t *qp_table ;
+
+ outbuf = ff_get_video_buffer(outlink, aligned_w, aligned_h);
+ if (!outbuf) {
+ av_frame_free(&inbuf);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outbuf, inbuf);
+ outbuf->width = inbuf->width;
+ outbuf->height = inbuf->height;
+ qp_table = av_frame_get_qp_table(inbuf, &qstride, &qp_type);
+
+ pp_postprocess((const uint8_t **)inbuf->data, inbuf->linesize,
+ outbuf->data, outbuf->linesize,
+ aligned_w, outlink->h,
+ qp_table,
+ qstride,
+ pp->modes[pp->mode_id],
+ pp->pp_ctx,
+ outbuf->pict_type | (qp_type ? PP_PICT_TYPE_QP2 : 0));
+
+ av_frame_free(&inbuf);
+ return ff_filter_frame(outlink, outbuf);
+}
+
+static av_cold void pp_uninit(AVFilterContext *ctx)
+{
+ int i;
+ PPFilterContext *pp = ctx->priv;
+
+ for (i = 0; i <= PP_QUALITY_MAX; i++)
+ pp_free_mode(pp->modes[i]);
+ if (pp->pp_ctx)
+ pp_free_context(pp->pp_ctx);
+}
+
+static const AVFilterPad pp_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = pp_config_props,
+ .filter_frame = pp_filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad pp_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_pp = {
+ .name = "pp",
+ .description = NULL_IF_CONFIG_SMALL("Filter video using libpostproc."),
+ .priv_size = sizeof(PPFilterContext),
+ .init = pp_init,
+ .uninit = pp_uninit,
+ .query_formats = pp_query_formats,
+ .inputs = pp_inputs,
+ .outputs = pp_outputs,
+ .process_command = pp_process_command,
+ .priv_class = &pp_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_pp7.c b/libavfilter/vf_pp7.c
new file mode 100644
index 0000000000..570a1c90b9
--- /dev/null
+++ b/libavfilter/vf_pp7.c
@@ -0,0 +1,406 @@
+/*
+ * Copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2014 Arwa Arif <arwaarif1994@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Postprocessing filter - 7
+ *
+ * Originally written by Michael Niedermayer for the MPlayer
+ * project, and ported by Arwa Arif for FFmpeg.
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "internal.h"
+#include "vf_pp7.h"
+
+enum mode {
+ MODE_HARD,
+ MODE_SOFT,
+ MODE_MEDIUM
+};
+
+#define OFFSET(x) offsetof(PP7Context, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption pp7_options[] = {
+ { "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 64, FLAGS },
+ { "mode", "set thresholding mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_MEDIUM}, 0, 2, FLAGS, "mode" },
+ { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_HARD}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_SOFT}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "medium", "medium thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_MEDIUM}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(pp7);
+
+DECLARE_ALIGNED(8, static const uint8_t, dither)[8][8] = {
+ { 0, 48, 12, 60, 3, 51, 15, 63, },
+ { 32, 16, 44, 28, 35, 19, 47, 31, },
+ { 8, 56, 4, 52, 11, 59, 7, 55, },
+ { 40, 24, 36, 20, 43, 27, 39, 23, },
+ { 2, 50, 14, 62, 1, 49, 13, 61, },
+ { 34, 18, 46, 30, 33, 17, 45, 29, },
+ { 10, 58, 6, 54, 9, 57, 5, 53, },
+ { 42, 26, 38, 22, 41, 25, 37, 21, },
+};
+
+#define N0 4
+#define N1 5
+#define N2 10
+#define SN0 2
+#define SN1 2.2360679775
+#define SN2 3.16227766017
+#define N (1 << 16)
+
+static const int factor[16] = {
+ N / (N0 * N0), N / (N0 * N1), N / (N0 * N0), N / (N0 * N2),
+ N / (N1 * N0), N / (N1 * N1), N / (N1 * N0), N / (N1 * N2),
+ N / (N0 * N0), N / (N0 * N1), N / (N0 * N0), N / (N0 * N2),
+ N / (N2 * N0), N / (N2 * N1), N / (N2 * N0), N / (N2 * N2),
+};
+
+static void init_thres2(PP7Context *p)
+{
+ int qp, i;
+ int bias = 0; //FIXME
+
+ for (qp = 0; qp < 99; qp++) {
+ for (i = 0; i < 16; i++) {
+ p->thres2[qp][i] = ((i&1) ? SN2 : SN0) * ((i&4) ? SN2 : SN0) * FFMAX(1, qp) * (1<<2) - 1 - bias;
+ }
+ }
+}
+
+static inline void dctA_c(int16_t *dst, uint8_t *src, int stride)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ int s0 = src[0 * stride] + src[6 * stride];
+ int s1 = src[1 * stride] + src[5 * stride];
+ int s2 = src[2 * stride] + src[4 * stride];
+ int s3 = src[3 * stride];
+ int s = s3 + s3;
+ s3 = s - s0;
+ s0 = s + s0;
+ s = s2 + s1;
+ s2 = s2 - s1;
+ dst[0] = s0 + s;
+ dst[2] = s0 - s;
+ dst[1] = 2 * s3 + s2;
+ dst[3] = s3 - 2 * s2;
+ src++;
+ dst += 4;
+ }
+}
+
+static void dctB_c(int16_t *dst, int16_t *src)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ int s0 = src[0 * 4] + src[6 * 4];
+ int s1 = src[1 * 4] + src[5 * 4];
+ int s2 = src[2 * 4] + src[4 * 4];
+ int s3 = src[3 * 4];
+ int s = s3 + s3;
+ s3 = s - s0;
+ s0 = s + s0;
+ s = s2 + s1;
+ s2 = s2 - s1;
+ dst[0 * 4] = s0 + s;
+ dst[2 * 4] = s0 - s;
+ dst[1 * 4] = 2 * s3 + s2;
+ dst[3 * 4] = s3 - 2 * s2;
+ src++;
+ dst++;
+ }
+}
+
+static int hardthresh_c(PP7Context *p, int16_t *src, int qp)
+{
+ int i;
+ int a;
+
+ a = src[0] * factor[0];
+ for (i = 1; i < 16; i++) {
+ unsigned int threshold1 = p->thres2[qp][i];
+ unsigned int threshold2 = threshold1 << 1;
+ int level = src[i];
+ if (((unsigned)(level + threshold1)) > threshold2)
+ a += level * factor[i];
+ }
+ return (a + (1 << 11)) >> 12;
+}
+
+static int mediumthresh_c(PP7Context *p, int16_t *src, int qp)
+{
+ int i;
+ int a;
+
+ a = src[0] * factor[0];
+ for (i = 1; i < 16; i++) {
+ unsigned int threshold1 = p->thres2[qp][i];
+ unsigned int threshold2 = threshold1 << 1;
+ int level = src[i];
+ if (((unsigned)(level + threshold1)) > threshold2) {
+ if (((unsigned)(level + 2 * threshold1)) > 2 * threshold2)
+ a += level * factor[i];
+ else {
+ if (level > 0)
+ a += 2 * (level - (int)threshold1) * factor[i];
+ else
+ a += 2 * (level + (int)threshold1) * factor[i];
+ }
+ }
+ }
+ return (a + (1 << 11)) >> 12;
+}
+
+static int softthresh_c(PP7Context *p, int16_t *src, int qp)
+{
+ int i;
+ int a;
+
+ a = src[0] * factor[0];
+ for (i = 1; i < 16; i++) {
+ unsigned int threshold1 = p->thres2[qp][i];
+ unsigned int threshold2 = threshold1 << 1;
+ int level = src[i];
+ if (((unsigned)(level + threshold1)) > threshold2) {
+ if (level > 0)
+ a += (level - (int)threshold1) * factor[i];
+ else
+ a += (level + (int)threshold1) * factor[i];
+ }
+ }
+ return (a + (1 << 11)) >> 12;
+}
+
+static void filter(PP7Context *p, uint8_t *dst, uint8_t *src,
+ int dst_stride, int src_stride,
+ int width, int height,
+ uint8_t *qp_store, int qp_stride, int is_luma)
+{
+ int x, y;
+ const int stride = is_luma ? p->temp_stride : ((width + 16 + 15) & (~15));
+ uint8_t *p_src = p->src + 8 * stride;
+ int16_t *block = (int16_t *)p->src;
+ int16_t *temp = (int16_t *)(p->src + 32);
+
+ if (!src || !dst) return;
+ for (y = 0; y < height; y++) {
+ int index = 8 + 8 * stride + y * stride;
+ memcpy(p_src + index, src + y * src_stride, width);
+ for (x = 0; x < 8; x++) {
+ p_src[index - x - 1]= p_src[index + x ];
+ p_src[index + width + x ]= p_src[index + width - x - 1];
+ }
+ }
+ for (y = 0; y < 8; y++) {
+ memcpy(p_src + ( 7 - y ) * stride, p_src + ( y + 8 ) * stride, stride);
+ memcpy(p_src + (height + 8 + y) * stride, p_src + (height - y + 7) * stride, stride);
+ }
+ //FIXME (try edge emu)
+
+ for (y = 0; y < height; y++) {
+ for (x = -8; x < 0; x += 4) {
+ const int index = x + y * stride + (8 - 3) * (1 + stride) + 8; //FIXME silly offset
+ uint8_t *src = p_src + index;
+ int16_t *tp = temp + 4 * x;
+
+ dctA_c(tp + 4 * 8, src, stride);
+ }
+ for (x = 0; x < width; ) {
+ const int qps = 3 + is_luma;
+ int qp;
+ int end = FFMIN(x + 8, width);
+
+ if (p->qp)
+ qp = p->qp;
+ else {
+ qp = qp_store[ (FFMIN(x, width - 1) >> qps) + (FFMIN(y, height - 1) >> qps) * qp_stride];
+ qp = ff_norm_qscale(qp, p->qscale_type);
+ }
+ for (; x < end; x++) {
+ const int index = x + y * stride + (8 - 3) * (1 + stride) + 8; //FIXME silly offset
+ uint8_t *src = p_src + index;
+ int16_t *tp = temp + 4 * x;
+ int v;
+
+ if ((x & 3) == 0)
+ dctA_c(tp + 4 * 8, src, stride);
+
+ p->dctB(block, tp);
+
+ v = p->requantize(p, block, qp);
+ v = (v + dither[y & 7][x & 7]) >> 6;
+ if ((unsigned)v > 255)
+ v = (-v) >> 31;
+ dst[x + y * dst_stride] = v;
+ }
+ }
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ PP7Context *pp7 = ctx->priv;
+ const int h = FFALIGN(inlink->h + 16, 16);
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ pp7->hsub = desc->log2_chroma_w;
+ pp7->vsub = desc->log2_chroma_h;
+
+ pp7->temp_stride = FFALIGN(inlink->w + 16, 16);
+ pp7->src = av_malloc_array(pp7->temp_stride, (h + 8) * sizeof(uint8_t));
+
+ if (!pp7->src)
+ return AVERROR(ENOMEM);
+
+ init_thres2(pp7);
+
+ switch (pp7->mode) {
+ case 0: pp7->requantize = hardthresh_c; break;
+ case 1: pp7->requantize = softthresh_c; break;
+ default:
+ case 2: pp7->requantize = mediumthresh_c; break;
+ }
+
+ pp7->dctB = dctB_c;
+
+ if (ARCH_X86)
+ ff_pp7_init_x86(pp7);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ PP7Context *pp7 = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out = in;
+
+ int qp_stride = 0;
+ uint8_t *qp_table = NULL;
+
+ if (!pp7->qp)
+ qp_table = av_frame_get_qp_table(in, &qp_stride, &pp7->qscale_type);
+
+ if (!ctx->is_disabled) {
+ const int cw = AV_CEIL_RSHIFT(inlink->w, pp7->hsub);
+ const int ch = AV_CEIL_RSHIFT(inlink->h, pp7->vsub);
+
+ /* get a new frame if in-place is not possible or if the dimensions
+ * are not multiple of 8 */
+ if (!av_frame_is_writable(in) || (inlink->w & 7) || (inlink->h & 7)) {
+ const int aligned_w = FFALIGN(inlink->w, 8);
+ const int aligned_h = FFALIGN(inlink->h, 8);
+
+ out = ff_get_video_buffer(outlink, aligned_w, aligned_h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ out->width = in->width;
+ out->height = in->height;
+ }
+
+ if (qp_table || pp7->qp) {
+
+ filter(pp7, out->data[0], in->data[0], out->linesize[0], in->linesize[0],
+ inlink->w, inlink->h, qp_table, qp_stride, 1);
+ filter(pp7, out->data[1], in->data[1], out->linesize[1], in->linesize[1],
+ cw, ch, qp_table, qp_stride, 0);
+ filter(pp7, out->data[2], in->data[2], out->linesize[2], in->linesize[2],
+ cw, ch, qp_table, qp_stride, 0);
+ emms_c();
+ }
+ }
+
+ if (in != out) {
+ if (in->data[3])
+ av_image_copy_plane(out->data[3], out->linesize[3],
+ in ->data[3], in ->linesize[3],
+ inlink->w, inlink->h);
+ av_frame_free(&in);
+ }
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ PP7Context *pp7 = ctx->priv;
+ av_freep(&pp7->src);
+}
+
+static const AVFilterPad pp7_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad pp7_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_pp7 = {
+ .name = "pp7",
+ .description = NULL_IF_CONFIG_SMALL("Apply Postprocessing 7 filter."),
+ .priv_size = sizeof(PP7Context),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = pp7_inputs,
+ .outputs = pp7_outputs,
+ .priv_class = &pp7_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_pp7.h b/libavfilter/vf_pp7.h
new file mode 100644
index 0000000000..9aa8d732c1
--- /dev/null
+++ b/libavfilter/vf_pp7.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2014 Arwa Arif <arwaarif1994@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef AVFILTER_PP7_H
+#define AVFILTER_PP7_H
+
+#include "avfilter.h"
+
+typedef struct PP7Context {
+ AVClass *class;
+ int thres2[99][16];
+
+ int qp;
+ int mode;
+ int qscale_type;
+ int hsub;
+ int vsub;
+ int temp_stride;
+ uint8_t *src;
+
+ int (*requantize)(struct PP7Context *p, int16_t *src, int qp);
+ void (*dctB)(int16_t *dst, int16_t *src);
+
+} PP7Context;
+
+void ff_pp7_init_x86(PP7Context *pp7);
+
+#endif /* AVFILTER_PP7_H */
diff --git a/libavfilter/vf_premultiply.c b/libavfilter/vf_premultiply.c
new file mode 100644
index 0000000000..e1b79ab779
--- /dev/null
+++ b/libavfilter/vf_premultiply.c
@@ -0,0 +1,409 @@
+/*
+ * Copyright (c) 2016 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "framesync.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct PreMultiplyContext {
+ const AVClass *class;
+ int width[4], height[4];
+ int nb_planes;
+ int planes;
+ int half, depth, offset;
+ FFFrameSync fs;
+
+ void (*premultiply[4])(const uint8_t *msrc, const uint8_t *asrc,
+ uint8_t *dst,
+ ptrdiff_t mlinesize, ptrdiff_t alinesize,
+ ptrdiff_t dlinesize,
+ int w, int h,
+ int half, int shift, int offset);
+} PreMultiplyContext;
+
+#define OFFSET(x) offsetof(PreMultiplyContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption premultiply_options[] = {
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(premultiply);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_NONE
+ };
+
+ return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+}
+
+static void premultiply8(const uint8_t *msrc, const uint8_t *asrc,
+ uint8_t *dst,
+ ptrdiff_t mlinesize, ptrdiff_t alinesize,
+ ptrdiff_t dlinesize,
+ int w, int h,
+ int half, int shift, int offset)
+{
+ int x, y;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ dst[x] = ((msrc[x] * (((asrc[x] >> 1) & 1) + asrc[x])) + 128) >> 8;
+ }
+
+ dst += dlinesize;
+ msrc += mlinesize;
+ asrc += alinesize;
+ }
+}
+
+static void premultiply8yuv(const uint8_t *msrc, const uint8_t *asrc,
+ uint8_t *dst,
+ ptrdiff_t mlinesize, ptrdiff_t alinesize,
+ ptrdiff_t dlinesize,
+ int w, int h,
+ int half, int shift, int offset)
+{
+ int x, y;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ dst[x] = ((((msrc[x] - 128) * (((asrc[x] >> 1) & 1) + asrc[x]))) >> 8) + 128;
+ }
+
+ dst += dlinesize;
+ msrc += mlinesize;
+ asrc += alinesize;
+ }
+}
+
+static void premultiply8offset(const uint8_t *msrc, const uint8_t *asrc,
+ uint8_t *dst,
+ ptrdiff_t mlinesize, ptrdiff_t alinesize,
+ ptrdiff_t dlinesize,
+ int w, int h,
+ int half, int shift, int offset)
+{
+ int x, y;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ dst[x] = ((((msrc[x] - offset) * (((asrc[x] >> 1) & 1) + asrc[x])) + 128) >> 8) + offset;
+ }
+
+ dst += dlinesize;
+ msrc += mlinesize;
+ asrc += alinesize;
+ }
+}
+
+static void premultiply16(const uint8_t *mmsrc, const uint8_t *aasrc,
+ uint8_t *ddst,
+ ptrdiff_t mlinesize, ptrdiff_t alinesize,
+ ptrdiff_t dlinesize,
+ int w, int h,
+ int half, int shift, int offset)
+{
+ const uint16_t *msrc = (const uint16_t *)mmsrc;
+ const uint16_t *asrc = (const uint16_t *)aasrc;
+ uint16_t *dst = (uint16_t *)ddst;
+ int x, y;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ dst[x] = ((msrc[x] * (((asrc[x] >> 1) & 1) + asrc[x])) + half) >> shift;
+ }
+
+ dst += dlinesize / 2;
+ msrc += mlinesize / 2;
+ asrc += alinesize / 2;
+ }
+}
+
+static void premultiply16yuv(const uint8_t *mmsrc, const uint8_t *aasrc,
+ uint8_t *ddst,
+ ptrdiff_t mlinesize, ptrdiff_t alinesize,
+ ptrdiff_t dlinesize,
+ int w, int h,
+ int half, int shift, int offset)
+{
+ const uint16_t *msrc = (const uint16_t *)mmsrc;
+ const uint16_t *asrc = (const uint16_t *)aasrc;
+ uint16_t *dst = (uint16_t *)ddst;
+ int x, y;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ dst[x] = ((((msrc[x] - half) * (((asrc[x] >> 1) & 1) + asrc[x]))) >> shift) + half;
+ }
+
+ dst += dlinesize / 2;
+ msrc += mlinesize / 2;
+ asrc += alinesize / 2;
+ }
+}
+
+static void premultiply16offset(const uint8_t *mmsrc, const uint8_t *aasrc,
+ uint8_t *ddst,
+ ptrdiff_t mlinesize, ptrdiff_t alinesize,
+ ptrdiff_t dlinesize,
+ int w, int h,
+ int half, int shift, int offset)
+{
+ const uint16_t *msrc = (const uint16_t *)mmsrc;
+ const uint16_t *asrc = (const uint16_t *)aasrc;
+ uint16_t *dst = (uint16_t *)ddst;
+ int x, y;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ dst[x] = ((((msrc[x] - offset) * (((asrc[x] >> 1) & 1) + asrc[x])) + half) >> shift) + offset;
+ }
+
+ dst += dlinesize / 2;
+ msrc += mlinesize / 2;
+ asrc += alinesize / 2;
+ }
+}
+
+static int process_frame(FFFrameSync *fs)
+{
+ AVFilterContext *ctx = fs->parent;
+ PreMultiplyContext *s = fs->opaque;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out, *base, *alpha;
+ int ret;
+
+ if ((ret = ff_framesync_get_frame(&s->fs, 0, &base, 0)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 1, &alpha, 0)) < 0)
+ return ret;
+
+ if (ctx->is_disabled) {
+ out = av_frame_clone(base);
+ if (!out)
+ return AVERROR(ENOMEM);
+ } else {
+ int p, full, limited;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, base);
+
+ full = base->color_range == AVCOL_RANGE_JPEG;
+ limited = base->color_range == AVCOL_RANGE_MPEG;
+
+ switch (outlink->format) {
+ case AV_PIX_FMT_YUV444P:
+ s->premultiply[0] = full ? premultiply8 : premultiply8offset;
+ s->premultiply[1] = premultiply8yuv;
+ s->premultiply[2] = premultiply8yuv;
+ break;
+ case AV_PIX_FMT_YUVJ444P:
+ s->premultiply[0] = premultiply8;
+ s->premultiply[1] = premultiply8yuv;
+ s->premultiply[2] = premultiply8yuv;
+ break;
+ case AV_PIX_FMT_GBRP:
+ s->premultiply[0] = limited ? premultiply8offset : premultiply8;
+ s->premultiply[1] = limited ? premultiply8offset : premultiply8;
+ s->premultiply[2] = limited ? premultiply8offset : premultiply8;
+ break;
+ case AV_PIX_FMT_YUV444P9:
+ case AV_PIX_FMT_YUV444P10:
+ case AV_PIX_FMT_YUV444P12:
+ case AV_PIX_FMT_YUV444P14:
+ case AV_PIX_FMT_YUV444P16:
+ s->premultiply[0] = full ? premultiply16 : premultiply16offset;
+ s->premultiply[1] = premultiply16yuv;
+ s->premultiply[2] = premultiply16yuv;
+ break;
+ case AV_PIX_FMT_GBRP9:
+ case AV_PIX_FMT_GBRP10:
+ case AV_PIX_FMT_GBRP12:
+ case AV_PIX_FMT_GBRP14:
+ case AV_PIX_FMT_GBRP16:
+ s->premultiply[0] = limited ? premultiply16offset : premultiply16;
+ s->premultiply[1] = limited ? premultiply16offset : premultiply16;
+ s->premultiply[2] = limited ? premultiply16offset : premultiply16;
+ break;
+ case AV_PIX_FMT_GRAY8:
+ s->premultiply[0] = limited ? premultiply8offset : premultiply8;
+ break;
+ case AV_PIX_FMT_GRAY10:
+ case AV_PIX_FMT_GRAY12:
+ case AV_PIX_FMT_GRAY16:
+ s->premultiply[0] = limited ? premultiply16offset : premultiply16;
+ break;
+ }
+
+ for (p = 0; p < s->nb_planes; p++) {
+ s->premultiply[p](base->data[p], alpha->data[0],
+ out->data[p],
+ base->linesize[p], alpha->linesize[0],
+ out->linesize[p],
+ s->width[p], s->height[p],
+ s->half, s->depth, s->offset);
+ }
+ }
+ out->pts = av_rescale_q(base->pts, s->fs.time_base, outlink->time_base);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ PreMultiplyContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int vsub, hsub;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ hsub = desc->log2_chroma_w;
+ vsub = desc->log2_chroma_h;
+ s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
+ s->height[0] = s->height[3] = inlink->h;
+ s->width[1] = s->width[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
+ s->width[0] = s->width[3] = inlink->w;
+
+ s->depth = desc->comp[0].depth;
+ s->half = (1 << s->depth) / 2;
+ s->offset = 16 << (s->depth - 8);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ PreMultiplyContext *s = ctx->priv;
+ AVFilterLink *base = ctx->inputs[0];
+ AVFilterLink *alpha = ctx->inputs[1];
+ FFFrameSyncIn *in;
+ int ret;
+
+ if (base->format != alpha->format) {
+ av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
+ return AVERROR(EINVAL);
+ }
+ if (base->w != alpha->w ||
+ base->h != alpha->h) {
+ av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
+ "(size %dx%d) do not match the corresponding "
+ "second input link %s parameters (%dx%d) ",
+ ctx->input_pads[0].name, base->w, base->h,
+ ctx->input_pads[1].name, alpha->w, alpha->h);
+ return AVERROR(EINVAL);
+ }
+
+ outlink->w = base->w;
+ outlink->h = base->h;
+ outlink->time_base = base->time_base;
+ outlink->sample_aspect_ratio = base->sample_aspect_ratio;
+ outlink->frame_rate = base->frame_rate;
+
+ if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
+ return ret;
+
+ in = s->fs.in;
+ in[0].time_base = base->time_base;
+ in[1].time_base = alpha->time_base;
+ in[0].sync = 1;
+ in[0].before = EXT_STOP;
+ in[0].after = EXT_INFINITY;
+ in[1].sync = 1;
+ in[1].before = EXT_STOP;
+ in[1].after = EXT_INFINITY;
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+
+ return ff_framesync_configure(&s->fs);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ PreMultiplyContext *s = inlink->dst->priv;
+ return ff_framesync_filter_frame(&s->fs, inlink, buf);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ PreMultiplyContext *s = outlink->src->priv;
+ return ff_framesync_request_frame(&s->fs, outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ PreMultiplyContext *s = ctx->priv;
+
+ ff_framesync_uninit(&s->fs);
+}
+
+static const AVFilterPad premultiply_inputs[] = {
+ {
+ .name = "main",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ {
+ .name = "alpha",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad premultiply_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_premultiply = {
+ .name = "premultiply",
+ .description = NULL_IF_CONFIG_SMALL("PreMultiply first stream with first plane of second stream."),
+ .priv_size = sizeof(PreMultiplyContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = premultiply_inputs,
+ .outputs = premultiply_outputs,
+ .priv_class = &premultiply_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_psnr.c b/libavfilter/vf_psnr.c
new file mode 100644
index 0000000000..af9397123b
--- /dev/null
+++ b/libavfilter/vf_psnr.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright (c) 2011 Roger Pau Monné <roger.pau@entel.upc.edu>
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Caculate the PSNR between two input videos.
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "dualinput.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "psnr.h"
+#include "video.h"
+
+typedef struct PSNRContext {
+ const AVClass *class;
+ FFDualInputContext dinput;
+ double mse, min_mse, max_mse, mse_comp[4];
+ uint64_t nb_frames;
+ FILE *stats_file;
+ char *stats_file_str;
+ int stats_version;
+ int stats_header_written;
+ int stats_add_max;
+ int max[4], average_max;
+ int is_rgb;
+ uint8_t rgba_map[4];
+ char comps[4];
+ int nb_components;
+ int planewidth[4];
+ int planeheight[4];
+ double planeweight[4];
+ PSNRDSPContext dsp;
+} PSNRContext;
+
+#define OFFSET(x) offsetof(PSNRContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption psnr_options[] = {
+ {"stats_file", "Set file where to store per-frame difference information", OFFSET(stats_file_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
+ {"f", "Set file where to store per-frame difference information", OFFSET(stats_file_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
+ {"stats_version", "Set the format version for the stats file.", OFFSET(stats_version), AV_OPT_TYPE_INT, {.i64=1}, 1, 2, FLAGS },
+ {"output_max", "Add raw stats (max values) to the output log.", OFFSET(stats_add_max), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(psnr);
+
+static inline unsigned pow2(unsigned base)
+{
+ return base*base;
+}
+
+static inline double get_psnr(double mse, uint64_t nb_frames, int max)
+{
+ return 10.0 * log10(pow2(max) / (mse / nb_frames));
+}
+
+static uint64_t sse_line_8bit(const uint8_t *main_line, const uint8_t *ref_line, int outw)
+{
+ int j;
+ unsigned m2 = 0;
+
+ for (j = 0; j < outw; j++)
+ m2 += pow2(main_line[j] - ref_line[j]);
+
+ return m2;
+}
+
+static uint64_t sse_line_16bit(const uint8_t *_main_line, const uint8_t *_ref_line, int outw)
+{
+ int j;
+ uint64_t m2 = 0;
+ const uint16_t *main_line = (const uint16_t *) _main_line;
+ const uint16_t *ref_line = (const uint16_t *) _ref_line;
+
+ for (j = 0; j < outw; j++)
+ m2 += pow2(main_line[j] - ref_line[j]);
+
+ return m2;
+}
+
+static inline
+void compute_images_mse(PSNRContext *s,
+ const uint8_t *main_data[4], const int main_linesizes[4],
+ const uint8_t *ref_data[4], const int ref_linesizes[4],
+ int w, int h, double mse[4])
+{
+ int i, c;
+
+ for (c = 0; c < s->nb_components; c++) {
+ const int outw = s->planewidth[c];
+ const int outh = s->planeheight[c];
+ const uint8_t *main_line = main_data[c];
+ const uint8_t *ref_line = ref_data[c];
+ const int ref_linesize = ref_linesizes[c];
+ const int main_linesize = main_linesizes[c];
+ uint64_t m = 0;
+ for (i = 0; i < outh; i++) {
+ m += s->dsp.sse_line(main_line, ref_line, outw);
+ ref_line += ref_linesize;
+ main_line += main_linesize;
+ }
+ mse[c] = m / (double)(outw * outh);
+ }
+}
+
+static void set_meta(AVDictionary **metadata, const char *key, char comp, float d)
+{
+ char value[128];
+ snprintf(value, sizeof(value), "%0.2f", d);
+ if (comp) {
+ char key2[128];
+ snprintf(key2, sizeof(key2), "%s%c", key, comp);
+ av_dict_set(metadata, key2, value, 0);
+ } else {
+ av_dict_set(metadata, key, value, 0);
+ }
+}
+
+static AVFrame *do_psnr(AVFilterContext *ctx, AVFrame *main,
+ const AVFrame *ref)
+{
+ PSNRContext *s = ctx->priv;
+ double comp_mse[4], mse = 0;
+ int j, c;
+ AVDictionary **metadata = avpriv_frame_get_metadatap(main);
+
+ compute_images_mse(s, (const uint8_t **)main->data, main->linesize,
+ (const uint8_t **)ref->data, ref->linesize,
+ main->width, main->height, comp_mse);
+
+ for (j = 0; j < s->nb_components; j++)
+ mse += comp_mse[j] * s->planeweight[j];
+
+ s->min_mse = FFMIN(s->min_mse, mse);
+ s->max_mse = FFMAX(s->max_mse, mse);
+
+ s->mse += mse;
+ for (j = 0; j < s->nb_components; j++)
+ s->mse_comp[j] += comp_mse[j];
+ s->nb_frames++;
+
+ for (j = 0; j < s->nb_components; j++) {
+ c = s->is_rgb ? s->rgba_map[j] : j;
+ set_meta(metadata, "lavfi.psnr.mse.", s->comps[j], comp_mse[c]);
+ set_meta(metadata, "lavfi.psnr.psnr.", s->comps[j], get_psnr(comp_mse[c], 1, s->max[c]));
+ }
+ set_meta(metadata, "lavfi.psnr.mse_avg", 0, mse);
+ set_meta(metadata, "lavfi.psnr.psnr_avg", 0, get_psnr(mse, 1, s->average_max));
+
+ if (s->stats_file) {
+ if (s->stats_version == 2 && !s->stats_header_written) {
+ fprintf(s->stats_file, "psnr_log_version:2 fields:n");
+ fprintf(s->stats_file, ",mse_avg");
+ for (j = 0; j < s->nb_components; j++) {
+ fprintf(s->stats_file, ",mse_%c", s->comps[j]);
+ }
+ fprintf(s->stats_file, ",psnr_avg");
+ for (j = 0; j < s->nb_components; j++) {
+ fprintf(s->stats_file, ",psnr_%c", s->comps[j]);
+ }
+ if (s->stats_add_max) {
+ fprintf(s->stats_file, ",max_avg");
+ for (j = 0; j < s->nb_components; j++) {
+ fprintf(s->stats_file, ",max_%c", s->comps[j]);
+ }
+ }
+ fprintf(s->stats_file, "\n");
+ s->stats_header_written = 1;
+ }
+ fprintf(s->stats_file, "n:%"PRId64" mse_avg:%0.2f ", s->nb_frames, mse);
+ for (j = 0; j < s->nb_components; j++) {
+ c = s->is_rgb ? s->rgba_map[j] : j;
+ fprintf(s->stats_file, "mse_%c:%0.2f ", s->comps[j], comp_mse[c]);
+ }
+ fprintf(s->stats_file, "psnr_avg:%0.2f ", get_psnr(mse, 1, s->average_max));
+ for (j = 0; j < s->nb_components; j++) {
+ c = s->is_rgb ? s->rgba_map[j] : j;
+ fprintf(s->stats_file, "psnr_%c:%0.2f ", s->comps[j],
+ get_psnr(comp_mse[c], 1, s->max[c]));
+ }
+ if (s->stats_version == 2 && s->stats_add_max) {
+ fprintf(s->stats_file, "max_avg:%d ", s->average_max);
+ for (j = 0; j < s->nb_components; j++) {
+ c = s->is_rgb ? s->rgba_map[j] : j;
+ fprintf(s->stats_file, "max_%c:%d ", s->comps[j], s->max[c]);
+ }
+ }
+ fprintf(s->stats_file, "\n");
+ }
+
+ return main;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ PSNRContext *s = ctx->priv;
+
+ s->min_mse = +INFINITY;
+ s->max_mse = -INFINITY;
+
+ if (s->stats_file_str) {
+ if (s->stats_version < 2 && s->stats_add_max) {
+ av_log(ctx, AV_LOG_ERROR,
+ "stats_add_max was specified but stats_version < 2.\n" );
+ return AVERROR(EINVAL);
+ }
+ if (!strcmp(s->stats_file_str, "-")) {
+ s->stats_file = stdout;
+ } else {
+ s->stats_file = fopen(s->stats_file_str, "w");
+ if (!s->stats_file) {
+ int err = AVERROR(errno);
+ char buf[128];
+ av_strerror(err, buf, sizeof(buf));
+ av_log(ctx, AV_LOG_ERROR, "Could not open stats file %s: %s\n",
+ s->stats_file_str, buf);
+ return err;
+ }
+ }
+ }
+
+ s->dinput.process = do_psnr;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY16,
+#define PF_NOALPHA(suf) AV_PIX_FMT_YUV420##suf, AV_PIX_FMT_YUV422##suf, AV_PIX_FMT_YUV444##suf
+#define PF_ALPHA(suf) AV_PIX_FMT_YUVA420##suf, AV_PIX_FMT_YUVA422##suf, AV_PIX_FMT_YUVA444##suf
+#define PF(suf) PF_NOALPHA(suf), PF_ALPHA(suf)
+ PF(P), PF(P9), PF(P10), PF_NOALPHA(P12), PF_NOALPHA(P14), PF(P16),
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP16,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input_ref(AVFilterLink *inlink)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ AVFilterContext *ctx = inlink->dst;
+ PSNRContext *s = ctx->priv;
+ double average_max;
+ unsigned sum;
+ int j;
+
+ s->nb_components = desc->nb_components;
+ if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
+ ctx->inputs[0]->h != ctx->inputs[1]->h) {
+ av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
+ return AVERROR(EINVAL);
+ }
+ if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
+ av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
+ return AVERROR(EINVAL);
+ }
+
+ s->max[0] = (1 << desc->comp[0].depth) - 1;
+ s->max[1] = (1 << desc->comp[1].depth) - 1;
+ s->max[2] = (1 << desc->comp[2].depth) - 1;
+ s->max[3] = (1 << desc->comp[3].depth) - 1;
+
+ s->is_rgb = ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0;
+ s->comps[0] = s->is_rgb ? 'r' : 'y' ;
+ s->comps[1] = s->is_rgb ? 'g' : 'u' ;
+ s->comps[2] = s->is_rgb ? 'b' : 'v' ;
+ s->comps[3] = 'a';
+
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+ s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+ sum = 0;
+ for (j = 0; j < s->nb_components; j++)
+ sum += s->planeheight[j] * s->planewidth[j];
+ average_max = 0;
+ for (j = 0; j < s->nb_components; j++) {
+ s->planeweight[j] = (double) s->planeheight[j] * s->planewidth[j] / sum;
+ average_max += s->max[j] * s->planeweight[j];
+ }
+ s->average_max = lrint(average_max);
+
+ s->dsp.sse_line = desc->comp[0].depth > 8 ? sse_line_16bit : sse_line_8bit;
+ if (ARCH_X86)
+ ff_psnr_init_x86(&s->dsp, desc->comp[0].depth);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ PSNRContext *s = ctx->priv;
+ AVFilterLink *mainlink = ctx->inputs[0];
+ int ret;
+
+ outlink->w = mainlink->w;
+ outlink->h = mainlink->h;
+ outlink->time_base = mainlink->time_base;
+ outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
+ outlink->frame_rate = mainlink->frame_rate;
+ if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ PSNRContext *s = inlink->dst->priv;
+ return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ PSNRContext *s = outlink->src->priv;
+ return ff_dualinput_request_frame(&s->dinput, outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ PSNRContext *s = ctx->priv;
+
+ if (s->nb_frames > 0) {
+ int j;
+ char buf[256];
+
+ buf[0] = 0;
+ for (j = 0; j < s->nb_components; j++) {
+ int c = s->is_rgb ? s->rgba_map[j] : j;
+ av_strlcatf(buf, sizeof(buf), " %c:%f", s->comps[j],
+ get_psnr(s->mse_comp[c], s->nb_frames, s->max[c]));
+ }
+ av_log(ctx, AV_LOG_INFO, "PSNR%s average:%f min:%f max:%f\n",
+ buf,
+ get_psnr(s->mse, s->nb_frames, s->average_max),
+ get_psnr(s->max_mse, 1, s->average_max),
+ get_psnr(s->min_mse, 1, s->average_max));
+ }
+
+ ff_dualinput_uninit(&s->dinput);
+
+ if (s->stats_file && s->stats_file != stdout)
+ fclose(s->stats_file);
+}
+
+static const AVFilterPad psnr_inputs[] = {
+ {
+ .name = "main",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },{
+ .name = "reference",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input_ref,
+ },
+ { NULL }
+};
+
+static const AVFilterPad psnr_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_psnr = {
+ .name = "psnr",
+ .description = NULL_IF_CONFIG_SMALL("Calculate the PSNR between two video streams."),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(PSNRContext),
+ .priv_class = &psnr_class,
+ .inputs = psnr_inputs,
+ .outputs = psnr_outputs,
+};
diff --git a/libavfilter/vf_pullup.c b/libavfilter/vf_pullup.c
new file mode 100644
index 0000000000..fa76caad03
--- /dev/null
+++ b/libavfilter/vf_pullup.c
@@ -0,0 +1,776 @@
+/*
+ * Copyright (c) 2003 Rich Felker
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "vf_pullup.h"
+
+#define F_HAVE_BREAKS 1
+#define F_HAVE_AFFINITY 2
+
+#define BREAK_LEFT 1
+#define BREAK_RIGHT 2
+
+#define OFFSET(x) offsetof(PullupContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption pullup_options[] = {
+ { "jl", "set left junk size", OFFSET(junk_left), AV_OPT_TYPE_INT, {.i64=1}, 0, INT_MAX, FLAGS },
+ { "jr", "set right junk size", OFFSET(junk_right), AV_OPT_TYPE_INT, {.i64=1}, 0, INT_MAX, FLAGS },
+ { "jt", "set top junk size", OFFSET(junk_top), AV_OPT_TYPE_INT, {.i64=4}, 1, INT_MAX, FLAGS },
+ { "jb", "set bottom junk size", OFFSET(junk_bottom), AV_OPT_TYPE_INT, {.i64=4}, 1, INT_MAX, FLAGS },
+ { "sb", "set strict breaks", OFFSET(strict_breaks), AV_OPT_TYPE_BOOL,{.i64=0},-1, 1, FLAGS },
+ { "mp", "set metric plane", OFFSET(metric_plane), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, FLAGS, "mp" },
+ { "y", "luma", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "mp" },
+ { "u", "chroma blue", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "mp" },
+ { "v", "chroma red", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "mp" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(pullup);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+#define ABS(a) (((a) ^ ((a) >> 31)) - ((a) >> 31))
+
+static int diff_c(const uint8_t *a, const uint8_t *b, ptrdiff_t s)
+{
+ int i, j, diff = 0;
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 8; j++)
+ diff += ABS(a[j] - b[j]);
+ a += s;
+ b += s;
+ }
+
+ return diff;
+}
+
+static int comb_c(const uint8_t *a, const uint8_t *b, ptrdiff_t s)
+{
+ int i, j, comb = 0;
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 8; j++)
+ comb += ABS((a[j] << 1) - b[j - s] - b[j ]) +
+ ABS((b[j] << 1) - a[j ] - a[j + s]);
+ a += s;
+ b += s;
+ }
+
+ return comb;
+}
+
+static int var_c(const uint8_t *a, const uint8_t *b, ptrdiff_t s)
+{
+ int i, j, var = 0;
+
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < 8; j++)
+ var += ABS(a[j] - a[j + s]);
+ a += s;
+ }
+
+ return 4 * var; /* match comb scaling */
+}
+
+static int alloc_metrics(PullupContext *s, PullupField *f)
+{
+ f->diffs = av_calloc(FFALIGN(s->metric_length, 16), sizeof(*f->diffs));
+ f->combs = av_calloc(FFALIGN(s->metric_length, 16), sizeof(*f->combs));
+ f->vars = av_calloc(FFALIGN(s->metric_length, 16), sizeof(*f->vars));
+
+ if (!f->diffs || !f->combs || !f->vars) {
+ av_freep(&f->diffs);
+ av_freep(&f->combs);
+ av_freep(&f->vars);
+ return AVERROR(ENOMEM);
+ }
+ return 0;
+}
+
+static void free_field_queue(PullupField *head)
+{
+ PullupField *f = head;
+ do {
+ PullupField *next;
+ if (!f)
+ break;
+ av_free(f->diffs);
+ av_free(f->combs);
+ av_free(f->vars);
+ next = f->next;
+ memset(f, 0, sizeof(*f)); // clear all pointers to avoid stale ones
+ av_free(f);
+ f = next;
+ } while (f != head);
+}
+
+static PullupField *make_field_queue(PullupContext *s, int len)
+{
+ PullupField *head, *f;
+
+ f = head = av_mallocz(sizeof(*head));
+ if (!f)
+ return NULL;
+
+ if (alloc_metrics(s, f) < 0) {
+ av_free(f);
+ return NULL;
+ }
+
+ for (; len > 0; len--) {
+ f->next = av_mallocz(sizeof(*f->next));
+ if (!f->next) {
+ free_field_queue(head);
+ return NULL;
+ }
+
+ f->next->prev = f;
+ f = f->next;
+ if (alloc_metrics(s, f) < 0) {
+ free_field_queue(head);
+ return NULL;
+ }
+ }
+
+ f->next = head;
+ head->prev = f;
+
+ return head;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ PullupContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int mp = s->metric_plane;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ if (mp + 1 > s->nb_planes) {
+ av_log(ctx, AV_LOG_ERROR, "input format does not have such plane\n");
+ return AVERROR(EINVAL);
+ }
+
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+ s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+
+ s->metric_w = (s->planewidth[mp] - ((s->junk_left + s->junk_right) << 3)) >> 3;
+ s->metric_h = (s->planeheight[mp] - ((s->junk_top + s->junk_bottom) << 1)) >> 3;
+ s->metric_offset = (s->junk_left << 3) + (s->junk_top << 1) * s->planewidth[mp];
+ s->metric_length = s->metric_w * s->metric_h;
+
+ av_log(ctx, AV_LOG_DEBUG, "w: %d h: %d\n", s->metric_w, s->metric_h);
+ av_log(ctx, AV_LOG_DEBUG, "offset: %d length: %d\n", s->metric_offset, s->metric_length);
+
+ s->head = make_field_queue(s, 8);
+ if (!s->head)
+ return AVERROR(ENOMEM);
+
+ s->diff = diff_c;
+ s->comb = comb_c;
+ s->var = var_c;
+
+ if (ARCH_X86)
+ ff_pullup_init_x86(s);
+ return 0;
+}
+
+static PullupBuffer *pullup_lock_buffer(PullupBuffer *b, int parity)
+{
+ if (!b)
+ return NULL;
+
+ if ((parity + 1) & 1)
+ b->lock[0]++;
+ if ((parity + 1) & 2)
+ b->lock[1]++;
+
+ return b;
+}
+
+static void pullup_release_buffer(PullupBuffer *b, int parity)
+{
+ if (!b)
+ return;
+
+ if ((parity + 1) & 1)
+ b->lock[0]--;
+ if ((parity + 1) & 2)
+ b->lock[1]--;
+}
+
+static int alloc_buffer(PullupContext *s, PullupBuffer *b)
+{
+ int i;
+
+ if (b->planes[0])
+ return 0;
+ for (i = 0; i < s->nb_planes; i++) {
+ b->planes[i] = av_malloc(s->planeheight[i] * s->planewidth[i]);
+ }
+ if (s->nb_planes == 1)
+ b->planes[1] = av_malloc(4*256);
+
+ return 0;
+}
+
+static PullupBuffer *pullup_get_buffer(PullupContext *s, int parity)
+{
+ int i;
+
+ /* Try first to get the sister buffer for the previous field */
+ if (parity < 2 && s->last && parity != s->last->parity
+ && !s->last->buffer->lock[parity]) {
+ alloc_buffer(s, s->last->buffer);
+ return pullup_lock_buffer(s->last->buffer, parity);
+ }
+
+ /* Prefer a buffer with both fields open */
+ for (i = 0; i < FF_ARRAY_ELEMS(s->buffers); i++) {
+ if (s->buffers[i].lock[0])
+ continue;
+ if (s->buffers[i].lock[1])
+ continue;
+ alloc_buffer(s, &s->buffers[i]);
+ return pullup_lock_buffer(&s->buffers[i], parity);
+ }
+
+ if (parity == 2)
+ return 0;
+
+ /* Search for any half-free buffer */
+ for (i = 0; i < FF_ARRAY_ELEMS(s->buffers); i++) {
+ if (((parity + 1) & 1) && s->buffers[i].lock[0])
+ continue;
+ if (((parity + 1) & 2) && s->buffers[i].lock[1])
+ continue;
+ alloc_buffer(s, &s->buffers[i]);
+ return pullup_lock_buffer(&s->buffers[i], parity);
+ }
+
+ return NULL;
+}
+
+static int queue_length(PullupField *begin, PullupField *end)
+{
+ PullupField *f;
+ int count = 1;
+
+ if (!begin || !end)
+ return 0;
+
+ for (f = begin; f != end; f = f->next)
+ count++;
+
+ return count;
+}
+
+static int find_first_break(PullupField *f, int max)
+{
+ int i;
+
+ for (i = 0; i < max; i++) {
+ if (f->breaks & BREAK_RIGHT || f->next->breaks & BREAK_LEFT)
+ return i + 1;
+ f = f->next;
+ }
+
+ return 0;
+}
+
+static void compute_breaks(PullupContext *s, PullupField *f0)
+{
+ PullupField *f1 = f0->next;
+ PullupField *f2 = f1->next;
+ PullupField *f3 = f2->next;
+ int i, l, max_l = 0, max_r = 0;
+
+ if (f0->flags & F_HAVE_BREAKS)
+ return;
+
+ f0->flags |= F_HAVE_BREAKS;
+
+ /* Special case when fields are 100% identical */
+ if (f0->buffer == f2->buffer && f1->buffer != f3->buffer) {
+ f2->breaks |= BREAK_RIGHT;
+ return;
+ }
+
+ if (f0->buffer != f2->buffer && f1->buffer == f3->buffer) {
+ f1->breaks |= BREAK_LEFT;
+ return;
+ }
+
+ for (i = 0; i < s->metric_length; i++) {
+ l = f2->diffs[i] - f3->diffs[i];
+
+ if ( l > max_l)
+ max_l = l;
+ if (-l > max_r)
+ max_r = -l;
+ }
+
+ /* Don't get tripped up when differences are mostly quant error */
+ if (max_l + max_r < 128)
+ return;
+ if (max_l > 4 * max_r)
+ f1->breaks |= BREAK_LEFT;
+ if (max_r > 4 * max_l)
+ f2->breaks |= BREAK_RIGHT;
+}
+
+static void compute_affinity(PullupContext *s, PullupField *f)
+{
+ int i, max_l = 0, max_r = 0, l;
+
+ if (f->flags & F_HAVE_AFFINITY)
+ return;
+
+ f->flags |= F_HAVE_AFFINITY;
+
+ if (f->buffer == f->next->next->buffer) {
+ f->affinity = 1;
+ f->next->affinity = 0;
+ f->next->next->affinity = -1;
+ f->next->flags |= F_HAVE_AFFINITY;
+ f->next->next->flags |= F_HAVE_AFFINITY;
+ return;
+ }
+
+ for (i = 0; i < s->metric_length; i++) {
+ int v = f->vars[i];
+ int lv = f->prev->vars[i];
+ int rv = f->next->vars[i];
+ int lc = f-> combs[i] - 2*(v < lv ? v : lv);
+ int rc = f->next->combs[i] - 2*(v < rv ? v : rv);
+
+ lc = FFMAX(lc, 0);
+ rc = FFMAX(rc, 0);
+ l = lc - rc;
+
+ if ( l > max_l)
+ max_l = l;
+ if (-l > max_r)
+ max_r = -l;
+ }
+
+ if (max_l + max_r < 64)
+ return;
+
+ if (max_r > 6 * max_l)
+ f->affinity = -1;
+ else if (max_l > 6 * max_r)
+ f->affinity = 1;
+}
+
+static int decide_frame_length(PullupContext *s)
+{
+ PullupField *f0 = s->first;
+ PullupField *f1 = f0->next;
+ PullupField *f2 = f1->next;
+ PullupField *f;
+ int i, l, n;
+
+ if (queue_length(s->first, s->last) < 4)
+ return 0;
+
+ f = s->first;
+ n = queue_length(f, s->last);
+ for (i = 0; i < n - 1; i++) {
+ if (i < n - 3)
+ compute_breaks(s, f);
+
+ compute_affinity(s, f);
+
+ f = f->next;
+ }
+
+ if (f0->affinity == -1)
+ return 1;
+
+ l = find_first_break(f0, 3);
+
+ if (l == 1 && s->strict_breaks < 0)
+ l = 0;
+
+ switch (l) {
+ case 1:
+ return 1 + (s->strict_breaks < 1 && f0->affinity == 1 && f1->affinity == -1);
+ case 2:
+ /* FIXME: strictly speaking, f0->prev is no longer valid... :) */
+ if (s->strict_pairs
+ && (f0->prev->breaks & BREAK_RIGHT) && (f2->breaks & BREAK_LEFT)
+ && (f0->affinity != 1 || f1->affinity != -1) )
+ return 1;
+ return 1 + (f1->affinity != 1);
+ case 3:
+ return 2 + (f2->affinity != 1);
+ default:
+ /* 9 possibilities covered before switch */
+ if (f1->affinity == 1)
+ return 1; /* covers 6 */
+ else if (f1->affinity == -1)
+ return 2; /* covers 6 */
+ else if (f2->affinity == -1) { /* covers 2 */
+ return (f0->affinity == 1) ? 3 : 1;
+ } else {
+ return 2; /* the remaining 6 */
+ }
+ }
+}
+
+static PullupFrame *pullup_get_frame(PullupContext *s)
+{
+ PullupFrame *fr = &s->frame;
+ int i, n = decide_frame_length(s);
+ int aff = s->first->next->affinity;
+
+ av_assert1(n < FF_ARRAY_ELEMS(fr->ifields));
+ if (!n || fr->lock)
+ return NULL;
+
+ fr->lock++;
+ fr->length = n;
+ fr->parity = s->first->parity;
+ fr->buffer = 0;
+
+ for (i = 0; i < n; i++) {
+ /* We cheat and steal the buffer without release+relock */
+ fr->ifields[i] = s->first->buffer;
+ s->first->buffer = 0;
+ s->first = s->first->next;
+ }
+
+ if (n == 1) {
+ fr->ofields[fr->parity ] = fr->ifields[0];
+ fr->ofields[fr->parity ^ 1] = 0;
+ } else if (n == 2) {
+ fr->ofields[fr->parity ] = fr->ifields[0];
+ fr->ofields[fr->parity ^ 1] = fr->ifields[1];
+ } else if (n == 3) {
+ if (!aff)
+ aff = (fr->ifields[0] == fr->ifields[1]) ? -1 : 1;
+ fr->ofields[fr->parity ] = fr->ifields[1 + aff];
+ fr->ofields[fr->parity ^ 1] = fr->ifields[1 ];
+ }
+
+ pullup_lock_buffer(fr->ofields[0], 0);
+ pullup_lock_buffer(fr->ofields[1], 1);
+
+ if (fr->ofields[0] == fr->ofields[1]) {
+ fr->buffer = fr->ofields[0];
+ pullup_lock_buffer(fr->buffer, 2);
+ return fr;
+ }
+
+ return fr;
+}
+
+static void pullup_release_frame(PullupFrame *f)
+{
+ int i;
+
+ for (i = 0; i < f->length; i++)
+ pullup_release_buffer(f->ifields[i], f->parity ^ (i & 1));
+
+ pullup_release_buffer(f->ofields[0], 0);
+ pullup_release_buffer(f->ofields[1], 1);
+
+ if (f->buffer)
+ pullup_release_buffer(f->buffer, 2);
+ f->lock--;
+}
+
+static void compute_metric(PullupContext *s, int *dest,
+ PullupField *fa, int pa, PullupField *fb, int pb,
+ int (*func)(const uint8_t *, const uint8_t *, ptrdiff_t))
+{
+ int mp = s->metric_plane;
+ int xstep = 8;
+ int ystep = s->planewidth[mp] << 3;
+ int stride = s->planewidth[mp] << 1; /* field stride */
+ int w = s->metric_w * xstep;
+ uint8_t *a, *b;
+ int x, y;
+
+ if (!fa->buffer || !fb->buffer)
+ return;
+
+ /* Shortcut for duplicate fields (e.g. from RFF flag) */
+ if (fa->buffer == fb->buffer && pa == pb) {
+ memset(dest, 0, s->metric_length * sizeof(*dest));
+ return;
+ }
+
+ a = fa->buffer->planes[mp] + pa * s->planewidth[mp] + s->metric_offset;
+ b = fb->buffer->planes[mp] + pb * s->planewidth[mp] + s->metric_offset;
+
+ for (y = 0; y < s->metric_h; y++) {
+ for (x = 0; x < w; x += xstep)
+ *dest++ = func(a + x, b + x, stride);
+ a += ystep; b += ystep;
+ }
+}
+
+static int check_field_queue(PullupContext *s)
+{
+ int ret;
+
+ if (s->head->next == s->first) {
+ PullupField *f = av_mallocz(sizeof(*f));
+
+ if (!f)
+ return AVERROR(ENOMEM);
+
+ if ((ret = alloc_metrics(s, f)) < 0) {
+ av_free(f);
+ return ret;
+ }
+
+ f->prev = s->head;
+ f->next = s->first;
+ s->head->next = f;
+ s->first->prev = f;
+ }
+
+ return 0;
+}
+
+static void pullup_submit_field(PullupContext *s, PullupBuffer *b, int parity)
+{
+ PullupField *f;
+
+ /* Grow the circular list if needed */
+ if (check_field_queue(s) < 0)
+ return;
+
+ /* Cannot have two fields of same parity in a row; drop the new one */
+ if (s->last && s->last->parity == parity)
+ return;
+
+ f = s->head;
+ f->parity = parity;
+ f->buffer = pullup_lock_buffer(b, parity);
+ f->flags = 0;
+ f->breaks = 0;
+ f->affinity = 0;
+
+ compute_metric(s, f->diffs, f, parity, f->prev->prev, parity, s->diff);
+ compute_metric(s, f->combs, parity ? f->prev : f, 0, parity ? f : f->prev, 1, s->comb);
+ compute_metric(s, f->vars, f, parity, f, -1, s->var);
+ emms_c();
+
+ /* Advance the circular list */
+ if (!s->first)
+ s->first = s->head;
+
+ s->last = s->head;
+ s->head = s->head->next;
+}
+
+static void copy_field(PullupContext *s,
+ PullupBuffer *dst, PullupBuffer *src, int parity)
+{
+ uint8_t *dd, *ss;
+ int i;
+
+ for (i = 0; i < s->nb_planes; i++) {
+ ss = src->planes[i] + parity * s->planewidth[i];
+ dd = dst->planes[i] + parity * s->planewidth[i];
+
+ av_image_copy_plane(dd, s->planewidth[i] << 1,
+ ss, s->planewidth[i] << 1,
+ s->planewidth[i], s->planeheight[i] >> 1);
+ }
+}
+
+static void pullup_pack_frame(PullupContext *s, PullupFrame *fr)
+{
+ int i;
+
+ if (fr->buffer)
+ return;
+
+ if (fr->length < 2)
+ return; /* FIXME: deal with this */
+
+ for (i = 0; i < 2; i++) {
+ if (fr->ofields[i]->lock[i^1])
+ continue;
+
+ fr->buffer = fr->ofields[i];
+ pullup_lock_buffer(fr->buffer, 2);
+ copy_field(s, fr->buffer, fr->ofields[i^1], i^1);
+ return;
+ }
+
+ fr->buffer = pullup_get_buffer(s, 2);
+
+ copy_field(s, fr->buffer, fr->ofields[0], 0);
+ copy_field(s, fr->buffer, fr->ofields[1], 1);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ PullupContext *s = ctx->priv;
+ PullupBuffer *b;
+ PullupFrame *f;
+ AVFrame *out;
+ int p, ret = 0;
+
+ b = pullup_get_buffer(s, 2);
+ if (!b) {
+ av_log(ctx, AV_LOG_WARNING, "Could not get buffer!\n");
+ f = pullup_get_frame(s);
+ pullup_release_frame(f);
+ goto end;
+ }
+
+ av_image_copy(b->planes, s->planewidth,
+ (const uint8_t**)in->data, in->linesize,
+ inlink->format, inlink->w, inlink->h);
+
+ p = in->interlaced_frame ? !in->top_field_first : 0;
+ pullup_submit_field(s, b, p );
+ pullup_submit_field(s, b, p^1);
+
+ if (in->repeat_pict)
+ pullup_submit_field(s, b, p);
+
+ pullup_release_buffer(b, 2);
+
+ f = pullup_get_frame(s);
+ if (!f)
+ goto end;
+
+ if (f->length < 2) {
+ pullup_release_frame(f);
+ f = pullup_get_frame(s);
+ if (!f)
+ goto end;
+ if (f->length < 2) {
+ pullup_release_frame(f);
+ if (!in->repeat_pict)
+ goto end;
+ f = pullup_get_frame(s);
+ if (!f)
+ goto end;
+ if (f->length < 2) {
+ pullup_release_frame(f);
+ goto end;
+ }
+ }
+ }
+
+ /* If the frame isn't already exportable... */
+ if (!f->buffer)
+ pullup_pack_frame(s, f);
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ av_frame_copy_props(out, in);
+
+ av_image_copy(out->data, out->linesize,
+ (const uint8_t**)f->buffer->planes, s->planewidth,
+ inlink->format, inlink->w, inlink->h);
+
+ ret = ff_filter_frame(outlink, out);
+ pullup_release_frame(f);
+end:
+ av_frame_free(&in);
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ PullupContext *s = ctx->priv;
+ int i;
+
+ free_field_queue(s->head);
+ s->last = NULL;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(s->buffers); i++) {
+ av_freep(&s->buffers[i].planes[0]);
+ av_freep(&s->buffers[i].planes[1]);
+ av_freep(&s->buffers[i].planes[2]);
+ }
+}
+
+static const AVFilterPad pullup_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad pullup_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_pullup = {
+ .name = "pullup",
+ .description = NULL_IF_CONFIG_SMALL("Pullup from field sequence to frames."),
+ .priv_size = sizeof(PullupContext),
+ .priv_class = &pullup_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = pullup_inputs,
+ .outputs = pullup_outputs,
+};
diff --git a/libavfilter/vf_pullup.h b/libavfilter/vf_pullup.h
new file mode 100644
index 0000000000..8f59335180
--- /dev/null
+++ b/libavfilter/vf_pullup.h
@@ -0,0 +1,71 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_PULLUP_H
+#define AVFILTER_PULLUP_H
+
+#include "avfilter.h"
+
+typedef struct PullupBuffer {
+ int lock[2];
+ uint8_t *planes[4];
+} PullupBuffer;
+
+typedef struct PullupField {
+ int parity;
+ PullupBuffer *buffer;
+ unsigned flags;
+ int breaks;
+ int affinity;
+ int *diffs;
+ int *combs;
+ int *vars;
+ struct PullupField *prev, *next;
+} PullupField;
+
+typedef struct PullupFrame {
+ int lock;
+ int length;
+ int parity;
+ PullupBuffer *ifields[4], *ofields[2];
+ PullupBuffer *buffer;
+} PullupFrame;
+
+typedef struct PullupContext {
+ const AVClass *class;
+ int junk_left, junk_right, junk_top, junk_bottom;
+ int metric_plane;
+ int strict_breaks;
+ int strict_pairs;
+ int metric_w, metric_h, metric_length;
+ int metric_offset;
+ int nb_planes;
+ int planewidth[4];
+ int planeheight[4];
+ PullupField *first, *last, *head;
+ PullupBuffer buffers[10];
+ PullupFrame frame;
+
+ int (*diff)(const uint8_t *a, const uint8_t *b, ptrdiff_t s);
+ int (*comb)(const uint8_t *a, const uint8_t *b, ptrdiff_t s);
+ int (*var )(const uint8_t *a, const uint8_t *b, ptrdiff_t s);
+} PullupContext;
+
+void ff_pullup_init_x86(PullupContext *s);
+
+#endif /* AVFILTER_PULLUP_H */
diff --git a/libavfilter/vf_qp.c b/libavfilter/vf_qp.c
new file mode 100644
index 0000000000..33d39493bc
--- /dev/null
+++ b/libavfilter/vf_qp.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <math.h>
+#include "libavutil/eval.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct QPContext {
+ const AVClass *class;
+ char *qp_expr_str;
+ int8_t lut[257];
+ int h, qstride;
+ int evaluate_per_mb;
+} QPContext;
+
+#define OFFSET(x) offsetof(QPContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption qp_options[] = {
+ { "qp", "set qp expression", OFFSET(qp_expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(qp);
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ QPContext *s = ctx->priv;
+ int i;
+ int ret;
+ AVExpr *e = NULL;
+ static const char *var_names[] = { "known", "qp", "x", "y", "w", "h", NULL };
+
+ if (!s->qp_expr_str)
+ return 0;
+
+ ret = av_expr_parse(&e, s->qp_expr_str, var_names, NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0)
+ return ret;
+
+ s->h = (inlink->h + 15) >> 4;
+ s->qstride = (inlink->w + 15) >> 4;
+ for (i = -129; i < 128; i++) {
+ double var_values[] = { i != -129, i, NAN, NAN, s->qstride, s->h, 0};
+ double temp_val = av_expr_eval(e, var_values, NULL);
+
+ if (isnan(temp_val)) {
+ if(strchr(s->qp_expr_str, 'x') || strchr(s->qp_expr_str, 'y'))
+ s->evaluate_per_mb = 1;
+ else {
+ av_expr_free(e);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ s->lut[i + 129] = lrintf(temp_val);
+ }
+ av_expr_free(e);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ QPContext *s = ctx->priv;
+ AVBufferRef *out_qp_table_buf;
+ AVFrame *out = NULL;
+ const int8_t *in_qp_table;
+ int type, stride, ret;
+
+ if (!s->qp_expr_str || ctx->is_disabled)
+ return ff_filter_frame(outlink, in);
+
+ out_qp_table_buf = av_buffer_alloc(s->h * s->qstride);
+ if (!out_qp_table_buf) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ out = av_frame_clone(in);
+ if (!out) {
+ av_buffer_unref(&out_qp_table_buf);
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ in_qp_table = av_frame_get_qp_table(in, &stride, &type);
+ av_frame_set_qp_table(out, out_qp_table_buf, s->qstride, type);
+
+
+ if (s->evaluate_per_mb) {
+ int y, x;
+
+ for (y = 0; y < s->h; y++)
+ for (x = 0; x < s->qstride; x++) {
+ int qp = in_qp_table ? in_qp_table[x + stride * y] : NAN;
+ double var_values[] = { !!in_qp_table, qp, x, y, s->qstride, s->h, 0};
+ static const char *var_names[] = { "known", "qp", "x", "y", "w", "h", NULL };
+ double temp_val;
+
+ ret = av_expr_parse_and_eval(&temp_val, s->qp_expr_str,
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, 0, 0, ctx);
+ if (ret < 0)
+ goto fail;
+ out_qp_table_buf->data[x + s->qstride * y] = lrintf(temp_val);
+ }
+ } else if (in_qp_table) {
+ int y, x;
+
+ for (y = 0; y < s->h; y++)
+ for (x = 0; x < s->qstride; x++)
+ out_qp_table_buf->data[x + s->qstride * y] = s->lut[129 +
+ ((int8_t)in_qp_table[x + stride * y])];
+ } else {
+ int y, x, qp = s->lut[0];
+
+ for (y = 0; y < s->h; y++)
+ for (x = 0; x < s->qstride; x++)
+ out_qp_table_buf->data[x + s->qstride * y] = qp;
+ }
+
+ ret = ff_filter_frame(outlink, out);
+ out = NULL;
+fail:
+ av_frame_free(&in);
+ av_frame_free(&out);
+ return ret;
+}
+
+static const AVFilterPad qp_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad qp_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_qp = {
+ .name = "qp",
+ .description = NULL_IF_CONFIG_SMALL("Change video quantization parameters."),
+ .priv_size = sizeof(QPContext),
+ .inputs = qp_inputs,
+ .outputs = qp_outputs,
+ .priv_class = &qp_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_random.c b/libavfilter/vf_random.c
new file mode 100644
index 0000000000..373a7db053
--- /dev/null
+++ b/libavfilter/vf_random.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/lfg.h"
+#include "libavutil/opt.h"
+#include "libavutil/random_seed.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define MAX_FRAMES 512
+
+typedef struct RandomContext {
+ const AVClass *class;
+
+ AVLFG lfg;
+ int nb_frames;
+ int64_t random_seed;
+ int nb_frames_filled;
+ AVFrame *frames[MAX_FRAMES];
+ int64_t pts[MAX_FRAMES];
+ int flush_idx;
+} RandomContext;
+
+#define OFFSET(x) offsetof(RandomContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption random_options[] = {
+ { "frames", "set number of frames in cache", OFFSET(nb_frames), AV_OPT_TYPE_INT, {.i64=30}, 2, MAX_FRAMES, FLAGS },
+ { "seed", "set the seed", OFFSET(random_seed), AV_OPT_TYPE_INT64, {.i64=-1}, -1, UINT32_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(random);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ RandomContext *s = ctx->priv;
+ uint32_t seed;
+
+ if (s->random_seed < 0)
+ s->random_seed = av_get_random_seed();
+ seed = s->random_seed;
+ av_lfg_init(&s->lfg, seed);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ RandomContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ int idx;
+
+ if (s->nb_frames_filled < s->nb_frames) {
+ s->frames[s->nb_frames_filled] = in;
+ s->pts[s->nb_frames_filled++] = in->pts;
+ return 0;
+ }
+
+ idx = av_lfg_get(&s->lfg) % s->nb_frames;
+
+ out = s->frames[idx];
+ out->pts = s->pts[0];
+ memmove(&s->pts[0], &s->pts[1], (s->nb_frames - 1) * sizeof(s->pts[0]));
+ s->frames[idx] = in;
+ s->pts[s->nb_frames - 1] = in->pts;
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ RandomContext *s = ctx->priv;
+ int ret;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF && !ctx->is_disabled && s->nb_frames > 0) {
+ AVFrame *out = s->frames[s->nb_frames - 1];
+ out->pts = s->pts[s->flush_idx++];
+ ret = ff_filter_frame(outlink, out);
+ s->frames[s->nb_frames - 1] = NULL;
+ s->nb_frames--;
+ }
+
+ return ret;
+}
+
+static const AVFilterPad random_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad random_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_random = {
+ .name = "random",
+ .description = NULL_IF_CONFIG_SMALL("Return random frames."),
+ .priv_size = sizeof(RandomContext),
+ .priv_class = &random_class,
+ .init = init,
+ .inputs = random_inputs,
+ .outputs = random_outputs,
+};
diff --git a/libavfilter/vf_readeia608.c b/libavfilter/vf_readeia608.c
new file mode 100644
index 0000000000..4bfe9cfe75
--- /dev/null
+++ b/libavfilter/vf_readeia608.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2017 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Filter for reading closed captioning data (EIA-608).
+ * See also https://en.wikipedia.org/wiki/EIA-608
+ */
+
+#include <string.h>
+
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/timestamp.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define FALL 0
+#define RISE 1
+
+typedef struct ReadEIA608Context {
+ const AVClass *class;
+ int start, end;
+ int min_range;
+ int max_peak_diff;
+ int max_period_diff;
+ int max_start_diff;
+ int nb_found;
+ int white;
+ int black;
+ float mpd, mhd, msd, mac, spw, bhd, wth, bth;
+ int chp;
+} ReadEIA608Context;
+
+#define OFFSET(x) offsetof(ReadEIA608Context, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption readeia608_options[] = {
+ { "scan_min", "set from which line to scan for codes", OFFSET(start), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
+ { "scan_max", "set to which line to scan for codes", OFFSET(end), AV_OPT_TYPE_INT, {.i64=29}, 0, INT_MAX, FLAGS },
+ { "mac", "set minimal acceptable amplitude change for sync codes detection", OFFSET(mac), AV_OPT_TYPE_FLOAT, {.dbl=.2}, 0.001, 1, FLAGS },
+ { "spw", "set ratio of width reserved for sync code detection", OFFSET(spw), AV_OPT_TYPE_FLOAT, {.dbl=.27}, 0.1, 0.7, FLAGS },
+ { "mhd", "set max peaks height difference for sync code detection", OFFSET(mhd), AV_OPT_TYPE_FLOAT, {.dbl=.1}, 0, 0.5, FLAGS },
+ { "mpd", "set max peaks period difference for sync code detection", OFFSET(mpd), AV_OPT_TYPE_FLOAT, {.dbl=.1}, 0, 0.5, FLAGS },
+ { "msd", "set first two max start code bits differences", OFFSET(msd), AV_OPT_TYPE_FLOAT, {.dbl=.02}, 0, 0.5, FLAGS },
+ { "bhd", "set min ratio of bits height compared to 3rd start code bit", OFFSET(bhd), AV_OPT_TYPE_FLOAT, {.dbl=.75}, 0.01, 1, FLAGS },
+ { "th_w", "set white color threshold", OFFSET(wth), AV_OPT_TYPE_FLOAT, {.dbl=.35}, 0.1, 1, FLAGS },
+ { "th_b", "set black color threshold", OFFSET(bth), AV_OPT_TYPE_FLOAT, {.dbl=.15}, 0, 0.5, FLAGS },
+ { "chp", "check and apply parity bit", OFFSET(chp), AV_OPT_TYPE_BOOL, {.i64= 0}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(readeia608);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pixel_fmts[] = {
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *formats = ff_make_format_list(pixel_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, formats);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ AVFilterContext *ctx = inlink->dst;
+ ReadEIA608Context *s = ctx->priv;
+ int depth = desc->comp[0].depth;
+
+ if (s->end >= inlink->h) {
+ av_log(ctx, AV_LOG_WARNING, "Last line to scan too large, clipping.\n");
+ s->end = inlink->h - 1;
+ }
+
+ if (s->start > s->end) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid range.\n");
+ return AVERROR(EINVAL);
+ }
+
+ s->min_range = s->mac * ((1 << depth) - 1);
+ s->max_peak_diff = s->mhd * ((1 << depth) - 1);
+ s->max_period_diff = s->mpd * ((1 << depth) - 1);
+ s->max_start_diff = s->msd * ((1 << depth) - 1);
+ s->white = s->wth * ((1 << depth) - 1);
+ s->black = s->bth * ((1 << depth) - 1);
+
+ return 0;
+}
+
+static void extract_line(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *in, int line)
+{
+ ReadEIA608Context *s = ctx->priv;
+ int max = 0, min = INT_MAX;
+ int i, ch, range = 0;
+ const uint8_t *src;
+ uint16_t clock[8][2] = { { 0 } };
+ const int sync_width = s->spw * in->width;
+ int last = 0, peaks = 0, max_peak_diff = 0, dir = RISE;
+ const int width_per_bit = (in->width - sync_width) / 19;
+ uint8_t byte[2] = { 0 };
+ int s1, s2, s3, parity;
+
+ src = &in->data[0][line * in->linesize[0]];
+ for (i = 0; i < sync_width; i++) {
+ max = FFMAX(max, src[i]);
+ min = FFMIN(min, src[i]);
+ }
+
+ range = max - min;
+ if (range < s->min_range)
+ return;
+
+ for (i = 0; i < sync_width; i++) {
+ int Y = src[i];
+
+ if (dir == RISE) {
+ if (Y < last) {
+ dir = FALL;
+ if (last >= s->white) {
+ clock[peaks][0] = last;
+ clock[peaks][1] = i;
+ peaks++;
+ if (peaks > 7)
+ break;
+ }
+ }
+ } else if (dir == FALL) {
+ if (Y > last && last <= s->black) {
+ dir = RISE;
+ }
+ }
+ last = Y;
+ }
+
+ if (peaks != 7)
+ return;
+
+ for (i = 1; i < 7; i++)
+ max_peak_diff = FFMAX(max_peak_diff, FFABS(clock[i][0] - clock[i-1][0]));
+
+ if (max_peak_diff > s->max_peak_diff)
+ return;
+
+ max = 0; min = INT_MAX;
+ for (i = 1; i < 7; i++) {
+ max = FFMAX(max, FFABS(clock[i][1] - clock[i-1][1]));
+ min = FFMIN(min, FFABS(clock[i][1] - clock[i-1][1]));
+ }
+
+ range = max - min;
+ if (range > s->max_period_diff)
+ return;
+
+ s1 = src[sync_width + width_per_bit * 0 + width_per_bit / 2];
+ s2 = src[sync_width + width_per_bit * 1 + width_per_bit / 2];
+ s3 = src[sync_width + width_per_bit * 2 + width_per_bit / 2];
+
+ if (FFABS(s1 - s2) > s->max_start_diff || s1 > s->black || s2 > s->black || s3 < s->white)
+ return;
+
+ for (ch = 0; ch < 2; ch++) {
+ for (parity = 0, i = 0; i < 8; i++) {
+ int b = src[sync_width + width_per_bit * (i + 3 + 8 * ch) + width_per_bit / 2];
+
+ if (b - s1 > (s3 - s1) * s->bhd) {
+ b = 1;
+ parity++;
+ } else {
+ b = 0;
+ }
+ byte[ch] |= b << i;
+ }
+
+ if (s->chp) {
+ if (!(parity & 1)) {
+ byte[ch] = 0;
+ }
+ }
+ }
+
+ {
+ uint8_t key[128], value[128];
+
+ snprintf(key, sizeof(key), "lavfi.readeia608.%d.cc", s->nb_found);
+ snprintf(value, sizeof(value), "0x%02X%02X", byte[0], byte[1]);
+ av_dict_set(avpriv_frame_get_metadatap(in), key, value, 0);
+
+ snprintf(key, sizeof(key), "lavfi.readeia608.%d.line", s->nb_found);
+ snprintf(value, sizeof(value), "%d", line);
+ av_dict_set(avpriv_frame_get_metadatap(in), key, value, 0);
+ }
+
+ s->nb_found++;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ReadEIA608Context *s = ctx->priv;
+ int i;
+
+ s->nb_found = 0;
+ for (i = s->start; i <= s->end; i++)
+ extract_line(ctx, inlink, in, i);
+
+ return ff_filter_frame(outlink, in);
+}
+
+static const AVFilterPad readeia608_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad readeia608_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_readeia608 = {
+ .name = "readeia608",
+ .description = NULL_IF_CONFIG_SMALL("Read EIA-608 Closed Caption codes from input video and write them to frame metadata."),
+ .priv_size = sizeof(ReadEIA608Context),
+ .priv_class = &readeia608_class,
+ .query_formats = query_formats,
+ .inputs = readeia608_inputs,
+ .outputs = readeia608_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_readvitc.c b/libavfilter/vf_readvitc.c
new file mode 100644
index 0000000000..d70af6a9ee
--- /dev/null
+++ b/libavfilter/vf_readvitc.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2016 Tobias Rapp
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Filter for reading the vertical interval timecode (VITC).
+ * See also https://en.wikipedia.org/wiki/Vertical_interval_timecode
+ */
+
+#include "libavutil/common.h"
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/timecode.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+#define LINE_DATA_SIZE 9
+
+typedef struct ReadVitcContext {
+ const AVClass *class;
+
+ int scan_max;
+ double thr_b;
+ double thr_w;
+
+ int threshold_black;
+ int threshold_white;
+ int threshold_gray;
+ int grp_width;
+ uint8_t line_data[LINE_DATA_SIZE];
+ char tcbuf[AV_TIMECODE_STR_SIZE];
+} ReadVitcContext;
+
+#define OFFSET(x) offsetof(ReadVitcContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption readvitc_options[] = {
+ { "scan_max", "maximum line numbers to scan for VITC data", OFFSET(scan_max), AV_OPT_TYPE_INT, {.i64 = 45 }, -1, INT_MAX, FLAGS },
+ { "thr_b", "black color threshold", OFFSET(thr_b), AV_OPT_TYPE_DOUBLE, {.dbl = 0.2 }, 0, 1.0, FLAGS },
+ { "thr_w", "white color threshold", OFFSET(thr_w), AV_OPT_TYPE_DOUBLE, {.dbl = 0.6 }, 0, 1.0, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(readvitc);
+
+static uint8_t get_vitc_crc( uint8_t *line ) {
+ uint8_t crc;
+
+ crc = 0x01 | (line[0] << 2);
+ crc ^= (line[0] >> 6) | 0x04 | (line[1] << 4);
+ crc ^= (line[1] >> 4) | 0x10 | (line[2] << 6);
+ crc ^= (line[2] >> 2) | 0x40;
+ crc ^= line[3];
+ crc ^= 0x01 | (line[4] << 2);
+ crc ^= (line[4] >> 6) | 0x04 | (line[5] << 4);
+ crc ^= (line[5] >> 4) | 0x10 | (line[6] << 6);
+ crc ^= (line[6] >> 2) | 0x40;
+ crc ^= line[7];
+ crc ^= 0x01;
+ crc = (crc >> 2) | (crc << 6); // rotate byte right by two bits
+ return crc;
+}
+
+static inline uint8_t get_pit_avg3( uint8_t *line, int i ) {
+ return ((line[i-1] + line[i] + line[i+1]) / 3);
+}
+
+static int read_vitc_line( ReadVitcContext *ctx, uint8_t *src, int line_size, int width, int height )
+{
+ uint8_t *scan_line;
+ int grp_index, pit_index;
+ int grp_start_pos;
+ uint8_t pit_value;
+ int x, y, res = 0;
+
+ if (ctx->scan_max >= 0)
+ height = FFMIN(height, ctx->scan_max);
+
+ // scan lines for VITC data, starting from the top
+ for (y = 0; y < height; y++) {
+ scan_line = src;
+ memset(ctx->line_data, 0, LINE_DATA_SIZE);
+ grp_index = 0;
+ x = 0;
+ while ((x < width) && (grp_index < 9)) {
+ // search next sync pattern
+ while ((x < width) && (scan_line[x] < ctx->threshold_white))
+ x++;
+ while ((x < width) && (scan_line[x] > ctx->threshold_black))
+ x++;
+ x = FFMAX(x - ((ctx->grp_width+10) / 20), 1); // step back a half pit
+ grp_start_pos = x;
+ if ((grp_start_pos + ctx->grp_width) > width)
+ break; // not enough pixels for reading a whole pit group
+ pit_value = get_pit_avg3(scan_line, x);
+ if (pit_value < ctx->threshold_white)
+ break; // first sync bit mismatch
+ x = grp_start_pos + ((ctx->grp_width) / 10);
+ pit_value = get_pit_avg3(scan_line, x);
+ if (pit_value > ctx->threshold_black )
+ break; // second sync bit mismatch
+ for (pit_index = 0; pit_index <= 7; pit_index++) {
+ x = grp_start_pos + (((pit_index+2)*ctx->grp_width) / 10);
+ pit_value = get_pit_avg3(scan_line, x);
+ if (pit_value > ctx->threshold_gray)
+ ctx->line_data[grp_index] |= (1 << pit_index);
+ }
+ grp_index++;
+ }
+ if ((grp_index == 9) && (get_vitc_crc(ctx->line_data) == ctx->line_data[8])) {
+ res = 1;
+ break;
+ }
+ src += line_size;
+ }
+
+ return res;
+}
+
+static unsigned bcd2uint(uint8_t high, uint8_t low)
+{
+ if (high > 9 || low > 9)
+ return 0;
+ return 10*high + low;
+}
+
+static char *make_vitc_tc_string(char *buf, uint8_t *line)
+{
+ unsigned hh = bcd2uint(line[7] & 0x03, line[6] & 0x0f); // 6-bit hours
+ unsigned mm = bcd2uint(line[5] & 0x07, line[4] & 0x0f); // 7-bit minutes
+ unsigned ss = bcd2uint(line[3] & 0x07, line[2] & 0x0f); // 7-bit seconds
+ unsigned ff = bcd2uint(line[1] & 0x03, line[0] & 0x0f); // 6-bit frames
+ unsigned drop = (line[1] & 0x04); // 1-bit drop flag
+ snprintf(buf, AV_TIMECODE_STR_SIZE, "%02u:%02u:%02u%c%02u",
+ hh, mm, ss, drop ? ';' : ':', ff);
+ return buf;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ReadVitcContext *s = ctx->priv;
+
+ s->threshold_black = s->thr_b * UINT8_MAX;
+ s->threshold_white = s->thr_w * UINT8_MAX;
+ if (s->threshold_black > s->threshold_white) {
+ av_log(ctx, AV_LOG_WARNING, "Black color threshold is higher than white color threshold (%g > %g)\n",
+ s->thr_b, s->thr_w);
+ return AVERROR(EINVAL);
+ }
+ s->threshold_gray = s->threshold_white - ((s->threshold_white - s->threshold_black) / 2);
+ av_log(ctx, AV_LOG_DEBUG, "threshold_black:%d threshold_white:%d threshold_gray:%d\n",
+ s->threshold_black, s->threshold_white, s->threshold_gray);
+
+ return 0;
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ReadVitcContext *s = ctx->priv;
+
+ s->grp_width = inlink->w * 5 / 48;
+ av_log(ctx, AV_LOG_DEBUG, "w:%d h:%d grp_width:%d scan_max:%d\n",
+ inlink->w, inlink->h, s->grp_width, s->scan_max);
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pixel_fmts[] = {
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NV12,
+ AV_PIX_FMT_NV16,
+ AV_PIX_FMT_NV21,
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUVA422P,
+ AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pixel_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ReadVitcContext *s = ctx->priv;
+ int found;
+
+ found = read_vitc_line(s, frame->data[0], frame->linesize[0], inlink->w, inlink->h);
+ av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.readvitc.found", (found ? "1" : "0"), 0);
+ if (found)
+ av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.readvitc.tc_str", make_vitc_tc_string(s->tcbuf, s->line_data), 0);
+
+ return ff_filter_frame(outlink, frame);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_readvitc = {
+ .name = "readvitc",
+ .description = NULL_IF_CONFIG_SMALL("Read vertical interval timecode and write it to frame metadata."),
+ .priv_size = sizeof(ReadVitcContext),
+ .priv_class = &readvitc_class,
+ .inputs = inputs,
+ .outputs = outputs,
+ .init = init,
+ .query_formats = query_formats,
+};
diff --git a/libavfilter/vf_remap.c b/libavfilter/vf_remap.c
new file mode 100644
index 0000000000..b7182e9556
--- /dev/null
+++ b/libavfilter/vf_remap.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2016 Floris Sluiter
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Pixel remap filter
+ * This filter copies pixel by pixel a source frame to a target frame.
+ * It remaps the pixels to a new x,y destination based on two files ymap/xmap.
+ * Map files are passed as a parameter and are in PGM format (P2 or P5),
+ * where the values are y(rows)/x(cols) coordinates of the source_frame.
+ * The *target* frame dimension is based on mapfile dimensions: specified in the
+ * header of the mapfile and reflected in the number of datavalues.
+ * Dimensions of ymap and xmap must be equal. Datavalues must be positive or zero.
+ * Any datavalue in the ymap or xmap which value is higher
+ * then the *source* frame height or width is silently ignored, leaving a
+ * blank/chromakey pixel. This can safely be used as a feature to create overlays.
+ *
+ * Algorithm digest:
+ * Target_frame[y][x] = Source_frame[ ymap[y][x] ][ [xmap[y][x] ];
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "framesync.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct RemapContext {
+ const AVClass *class;
+ int nb_planes;
+ int nb_components;
+ int step;
+ FFFrameSync fs;
+
+ void (*remap)(struct RemapContext *s, const AVFrame *in,
+ const AVFrame *xin, const AVFrame *yin,
+ AVFrame *out);
+} RemapContext;
+
+#define OFFSET(x) offsetof(RemapContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption remap_options[] = {
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(remap);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P16,
+ AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12,
+ AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
+ AV_PIX_FMT_RGB48, AV_PIX_FMT_BGR48,
+ AV_PIX_FMT_RGBA64, AV_PIX_FMT_BGRA64,
+ AV_PIX_FMT_NONE
+ };
+ static const enum AVPixelFormat map_fmts[] = {
+ AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *pix_formats = NULL, *map_formats = NULL;
+ int ret;
+
+ if (!(pix_formats = ff_make_format_list(pix_fmts)) ||
+ !(map_formats = ff_make_format_list(map_fmts))) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ if ((ret = ff_formats_ref(pix_formats, &ctx->inputs[0]->out_formats)) < 0 ||
+ (ret = ff_formats_ref(map_formats, &ctx->inputs[1]->out_formats)) < 0 ||
+ (ret = ff_formats_ref(map_formats, &ctx->inputs[2]->out_formats)) < 0 ||
+ (ret = ff_formats_ref(pix_formats, &ctx->outputs[0]->in_formats)) < 0)
+ goto fail;
+ return 0;
+fail:
+ if (pix_formats)
+ av_freep(&pix_formats->formats);
+ av_freep(&pix_formats);
+ if (map_formats)
+ av_freep(&map_formats->formats);
+ av_freep(&map_formats);
+ return ret;
+}
+
+/**
+ * remap_planar algorithm expects planes of same size
+ * pixels are copied from source to target using :
+ * Target_frame[y][x] = Source_frame[ ymap[y][x] ][ [xmap[y][x] ];
+ */
+static void remap_planar(RemapContext *s, const AVFrame *in,
+ const AVFrame *xin, const AVFrame *yin,
+ AVFrame *out)
+{
+ const int xlinesize = xin->linesize[0] / 2;
+ const int ylinesize = yin->linesize[0] / 2;
+ int x , y, plane;
+
+ for (plane = 0; plane < s->nb_planes ; plane++) {
+ uint8_t *dst = out->data[plane];
+ const int dlinesize = out->linesize[plane];
+ const uint8_t *src = in->data[plane];
+ const int slinesize = in->linesize[plane];
+ const uint16_t *xmap = (const uint16_t *)xin->data[0];
+ const uint16_t *ymap = (const uint16_t *)yin->data[0];
+
+ for (y = 0; y < out->height; y++) {
+ for (x = 0; x < out->width; x++) {
+ if (ymap[x] < in->height && xmap[x] < in->width) {
+ dst[x] = src[ymap[x] * slinesize + xmap[x]];
+ } else {
+ dst[x] = 0;
+ }
+ }
+ dst += dlinesize;
+ xmap += xlinesize;
+ ymap += ylinesize;
+ }
+ }
+}
+
+static void remap_planar16(RemapContext *s, const AVFrame *in,
+ const AVFrame *xin, const AVFrame *yin,
+ AVFrame *out)
+{
+ const int xlinesize = xin->linesize[0] / 2;
+ const int ylinesize = yin->linesize[0] / 2;
+ int x , y, plane;
+
+ for (plane = 0; plane < s->nb_planes ; plane++) {
+ uint16_t *dst = (uint16_t *)out->data[plane];
+ const int dlinesize = out->linesize[plane] / 2;
+ const uint16_t *src = (const uint16_t *)in->data[plane];
+ const int slinesize = in->linesize[plane] / 2;
+ const uint16_t *xmap = (const uint16_t *)xin->data[0];
+ const uint16_t *ymap = (const uint16_t *)yin->data[0];
+
+ for (y = 0; y < out->height; y++) {
+ for (x = 0; x < out->width; x++) {
+ if (ymap[x] < in->height && xmap[x] < in->width) {
+ dst[x] = src[ymap[x] * slinesize + xmap[x]];
+ } else {
+ dst[x] = 0;
+ }
+ }
+ dst += dlinesize;
+ xmap += xlinesize;
+ ymap += ylinesize;
+ }
+ }
+}
+
+/**
+ * remap_packed algorithm expects pixels with both padded bits (step) and
+ * number of components correctly set.
+ * pixels are copied from source to target using :
+ * Target_frame[y][x] = Source_frame[ ymap[y][x] ][ [xmap[y][x] ];
+ */
+static void remap_packed(RemapContext *s, const AVFrame *in,
+ const AVFrame *xin, const AVFrame *yin,
+ AVFrame *out)
+{
+ uint8_t *dst = out->data[0];
+ const uint8_t *src = in->data[0];
+ const int dlinesize = out->linesize[0];
+ const int slinesize = in->linesize[0];
+ const int xlinesize = xin->linesize[0] / 2;
+ const int ylinesize = yin->linesize[0] / 2;
+ const uint16_t *xmap = (const uint16_t *)xin->data[0];
+ const uint16_t *ymap = (const uint16_t *)yin->data[0];
+ const int step = s->step;
+ int c, x, y;
+
+ for (y = 0; y < out->height; y++) {
+ for (x = 0; x < out->width; x++) {
+ for (c = 0; c < s->nb_components; c++) {
+ if (ymap[x] < in->height && xmap[x] < in->width) {
+ dst[x * step + c] = src[ymap[x] * slinesize + xmap[x] * step + c];
+ } else {
+ dst[x * step + c] = 0;
+ }
+ }
+ }
+ dst += dlinesize;
+ xmap += xlinesize;
+ ymap += ylinesize;
+ }
+}
+
+static void remap_packed16(RemapContext *s, const AVFrame *in,
+ const AVFrame *xin, const AVFrame *yin,
+ AVFrame *out)
+{
+ uint16_t *dst = (uint16_t *)out->data[0];
+ const uint16_t *src = (const uint16_t *)in->data[0];
+ const int dlinesize = out->linesize[0] / 2;
+ const int slinesize = in->linesize[0] / 2;
+ const int xlinesize = xin->linesize[0] / 2;
+ const int ylinesize = yin->linesize[0] / 2;
+ const uint16_t *xmap = (const uint16_t *)xin->data[0];
+ const uint16_t *ymap = (const uint16_t *)yin->data[0];
+ const int step = s->step / 2;
+ int c, x, y;
+
+ for (y = 0; y < out->height; y++) {
+ for (x = 0; x < out->width; x++) {
+ for (c = 0; c < s->nb_components; c++) {
+ if (ymap[x] < in->height && xmap[x] < in->width) {
+ dst[x * step + c] = src[ymap[x] * slinesize + xmap[x] * step + c];
+ } else {
+ dst[x * step + c] = 0;
+ }
+ }
+ }
+ dst += dlinesize;
+ xmap += xlinesize;
+ ymap += ylinesize;
+ }
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ RemapContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+ s->nb_components = desc->nb_components;
+
+ if (desc->comp[0].depth == 8) {
+ if (s->nb_planes > 1 || s->nb_components == 1) {
+ s->remap = remap_planar;
+ } else {
+ s->remap = remap_packed;
+ }
+ } else {
+ if (s->nb_planes > 1 || s->nb_components == 1) {
+ s->remap = remap_planar16;
+ } else {
+ s->remap = remap_packed16;
+ }
+ }
+
+ s->step = av_get_padded_bits_per_pixel(desc) >> 3;
+ return 0;
+}
+
+static int process_frame(FFFrameSync *fs)
+{
+ AVFilterContext *ctx = fs->parent;
+ RemapContext *s = fs->opaque;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out, *in, *xpic, *ypic;
+ int ret;
+
+ if ((ret = ff_framesync_get_frame(&s->fs, 0, &in, 0)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 1, &xpic, 0)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 2, &ypic, 0)) < 0)
+ return ret;
+
+ if (ctx->is_disabled) {
+ out = av_frame_clone(in);
+ if (!out)
+ return AVERROR(ENOMEM);
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, in);
+
+ s->remap(s, in, xpic, ypic, out);
+ }
+ out->pts = av_rescale_q(in->pts, s->fs.time_base, outlink->time_base);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ RemapContext *s = ctx->priv;
+ AVFilterLink *srclink = ctx->inputs[0];
+ AVFilterLink *xlink = ctx->inputs[1];
+ AVFilterLink *ylink = ctx->inputs[2];
+ FFFrameSyncIn *in;
+ int ret;
+
+ if (xlink->w != ylink->w || xlink->h != ylink->h) {
+ av_log(ctx, AV_LOG_ERROR, "Second input link %s parameters "
+ "(size %dx%d) do not match the corresponding "
+ "third input link %s parameters (%dx%d)\n",
+ ctx->input_pads[1].name, xlink->w, xlink->h,
+ ctx->input_pads[2].name, ylink->w, ylink->h);
+ return AVERROR(EINVAL);
+ }
+
+ outlink->w = xlink->w;
+ outlink->h = xlink->h;
+ outlink->time_base = srclink->time_base;
+ outlink->sample_aspect_ratio = srclink->sample_aspect_ratio;
+ outlink->frame_rate = srclink->frame_rate;
+
+ ret = ff_framesync_init(&s->fs, ctx, 3);
+ if (ret < 0)
+ return ret;
+
+ in = s->fs.in;
+ in[0].time_base = srclink->time_base;
+ in[1].time_base = xlink->time_base;
+ in[2].time_base = ylink->time_base;
+ in[0].sync = 2;
+ in[0].before = EXT_STOP;
+ in[0].after = EXT_STOP;
+ in[1].sync = 1;
+ in[1].before = EXT_NULL;
+ in[1].after = EXT_INFINITY;
+ in[2].sync = 1;
+ in[2].before = EXT_NULL;
+ in[2].after = EXT_INFINITY;
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+
+ return ff_framesync_configure(&s->fs);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ RemapContext *s = inlink->dst->priv;
+ return ff_framesync_filter_frame(&s->fs, inlink, buf);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ RemapContext *s = outlink->src->priv;
+ return ff_framesync_request_frame(&s->fs, outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ RemapContext *s = ctx->priv;
+
+ ff_framesync_uninit(&s->fs);
+}
+
+static const AVFilterPad remap_inputs[] = {
+ {
+ .name = "source",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ {
+ .name = "xmap",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ {
+ .name = "ymap",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad remap_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_remap = {
+ .name = "remap",
+ .description = NULL_IF_CONFIG_SMALL("Remap pixels."),
+ .priv_size = sizeof(RemapContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = remap_inputs,
+ .outputs = remap_outputs,
+ .priv_class = &remap_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_removegrain.c b/libavfilter/vf_removegrain.c
new file mode 100644
index 0000000000..bc45076baa
--- /dev/null
+++ b/libavfilter/vf_removegrain.c
@@ -0,0 +1,660 @@
+/*
+ * Copyright (c) 2012 Laurent de Soras
+ * Copyright (c) 2013 Fredrik Mellbin
+ * Copyright (c) 2015 Paul B Mahol
+ * Copyright (c) 2015 James Darnley
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/qsort.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "removegrain.h"
+#include "video.h"
+
+#define OFFSET(x) offsetof(RemoveGrainContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption removegrain_options[] = {
+ { "m0", "set mode for 1st plane", OFFSET(mode[0]), AV_OPT_TYPE_INT, {.i64=0}, 0, 24, FLAGS },
+ { "m1", "set mode for 2nd plane", OFFSET(mode[1]), AV_OPT_TYPE_INT, {.i64=0}, 0, 24, FLAGS },
+ { "m2", "set mode for 3rd plane", OFFSET(mode[2]), AV_OPT_TYPE_INT, {.i64=0}, 0, 24, FLAGS },
+ { "m3", "set mode for 4th plane", OFFSET(mode[3]), AV_OPT_TYPE_INT, {.i64=0}, 0, 24, FLAGS },
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(removegrain);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+#define REMOVE_GRAIN_SORT_AXIS \
+ const int ma1 = FFMAX(a1, a8); \
+ const int mi1 = FFMIN(a1, a8); \
+ const int ma2 = FFMAX(a2, a7); \
+ const int mi2 = FFMIN(a2, a7); \
+ const int ma3 = FFMAX(a3, a6); \
+ const int mi3 = FFMIN(a3, a6); \
+ const int ma4 = FFMAX(a4, a5); \
+ const int mi4 = FFMIN(a4, a5);
+
+static int mode01(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ const int mi = FFMIN(FFMIN(FFMIN(a1, a2), FFMIN(a3, a4)), FFMIN(FFMIN(a5, a6), FFMIN(a7, a8)));
+ const int ma = FFMAX(FFMAX(FFMAX(a1, a2), FFMAX(a3, a4)), FFMAX(FFMAX(a5, a6), FFMAX(a7, a8)));
+
+ return av_clip(c, mi, ma);
+}
+
+static int cmp_int(const void *p1, const void *p2)
+{
+ int left = *(const int *)p1;
+ int right = *(const int *)p2;
+ return FFDIFFSIGN(left, right);
+}
+
+static int mode02(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ int a[8] = { a1, a2, a3, a4, a5, a6, a7, a8 };
+
+ AV_QSORT(a, 8, int, cmp_int);
+
+ return av_clip(c, a[2 - 1 ], a[7 - 1]);
+}
+
+static int mode03(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ int a[8] = { a1, a2, a3, a4, a5, a6, a7, a8 };
+
+ AV_QSORT(a, 8, int, cmp_int);
+
+ return av_clip(c, a[3 - 1 ], a[6 - 1]);
+}
+
+static int mode04(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ int a[8] = { a1, a2, a3, a4, a5, a6, a7, a8 };
+
+ AV_QSORT(a, 8, int, cmp_int);
+
+ return av_clip(c, a[4 - 1 ], a[5 - 1]);
+}
+
+static int mode05(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ REMOVE_GRAIN_SORT_AXIS
+
+ const int c1 = FFABS(c - av_clip(c, mi1, ma1));
+ const int c2 = FFABS(c - av_clip(c, mi2, ma2));
+ const int c3 = FFABS(c - av_clip(c, mi3, ma3));
+ const int c4 = FFABS(c - av_clip(c, mi4, ma4));
+
+ const int mindiff = FFMIN(FFMIN(c1, c2), FFMIN(c3, c4));
+
+ /* When adding SIMD notice the return order here: 4, 2, 3, 1. */
+ if (mindiff == c4) {
+ return av_clip(c, mi4, ma4);
+ } else if (mindiff == c2) {
+ return av_clip(c, mi2, ma2);
+ } else if (mindiff == c3) {
+ return av_clip(c, mi3, ma3);
+ }
+
+ return av_clip(c, mi1, ma1);
+}
+
+static int mode06(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ REMOVE_GRAIN_SORT_AXIS
+
+ const int d1 = ma1 - mi1;
+ const int d2 = ma2 - mi2;
+ const int d3 = ma3 - mi3;
+ const int d4 = ma4 - mi4;
+
+ const int cli1 = av_clip(c, mi1, ma1);
+ const int cli2 = av_clip(c, mi2, ma2);
+ const int cli3 = av_clip(c, mi3, ma3);
+ const int cli4 = av_clip(c, mi4, ma4);
+
+ const int c1 = av_clip_uint16((FFABS(c - cli1) << 1) + d1);
+ const int c2 = av_clip_uint16((FFABS(c - cli2) << 1) + d2);
+ const int c3 = av_clip_uint16((FFABS(c - cli3) << 1) + d3);
+ const int c4 = av_clip_uint16((FFABS(c - cli4) << 1) + d4);
+
+ const int mindiff = FFMIN(FFMIN(c1, c2), FFMIN(c3, c4));
+
+ if (mindiff == c4) {
+ return cli4;
+ } else if (mindiff == c2) {
+ return cli2;
+ } else if (mindiff == c3) {
+ return cli3;
+ }
+
+ return cli1;
+}
+
+static int mode07(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ REMOVE_GRAIN_SORT_AXIS
+
+ const int d1 = ma1 - mi1;
+ const int d2 = ma2 - mi2;
+ const int d3 = ma3 - mi3;
+ const int d4 = ma4 - mi4;
+
+ const int cli1 = av_clip(c, mi1, ma1);
+ const int cli2 = av_clip(c, mi2, ma2);
+ const int cli3 = av_clip(c, mi3, ma3);
+ const int cli4 = av_clip(c, mi4, ma4);
+
+ const int c1 = FFABS(c - cli1) + d1;
+ const int c2 = FFABS(c - cli2) + d2;
+ const int c3 = FFABS(c - cli3) + d3;
+ const int c4 = FFABS(c - cli4) + d4;
+
+ const int mindiff = FFMIN(FFMIN(c1, c2), FFMIN(c3, c4));
+
+ if (mindiff == c4) {
+ return cli4;
+ } else if (mindiff == c2) {
+ return cli2;
+ } else if (mindiff == c3) {
+ return cli3;
+ }
+
+ return cli1;
+}
+
+static int mode08(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ REMOVE_GRAIN_SORT_AXIS
+
+ const int d1 = ma1 - mi1;
+ const int d2 = ma2 - mi2;
+ const int d3 = ma3 - mi3;
+ const int d4 = ma4 - mi4;
+
+ const int cli1 = av_clip(c, mi1, ma1);
+ const int cli2 = av_clip(c, mi2, ma2);
+ const int cli3 = av_clip(c, mi3, ma3);
+ const int cli4 = av_clip(c, mi4, ma4);
+
+ const int c1 = av_clip_uint16(FFABS(c - cli1) + (d1 << 1));
+ const int c2 = av_clip_uint16(FFABS(c - cli2) + (d2 << 1));
+ const int c3 = av_clip_uint16(FFABS(c - cli3) + (d3 << 1));
+ const int c4 = av_clip_uint16(FFABS(c - cli4) + (d4 << 1));
+
+ const int mindiff = FFMIN(FFMIN(c1, c2), FFMIN(c3, c4));
+
+ if (mindiff == c4) {
+ return cli4;
+ } else if (mindiff == c2) {
+ return cli2;
+ } else if (mindiff == c3) {
+ return cli3;
+ }
+
+ return cli1;
+}
+
+static int mode09(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ REMOVE_GRAIN_SORT_AXIS
+
+ const int d1 = ma1 - mi1;
+ const int d2 = ma2 - mi2;
+ const int d3 = ma3 - mi3;
+ const int d4 = ma4 - mi4;
+
+ const int mindiff = FFMIN(FFMIN(d1, d2), FFMIN(d3, d4));
+
+ if (mindiff == d4) {
+ return av_clip(c, mi4, ma4);
+ } else if (mindiff == d2) {
+ return av_clip(c, mi2, ma2);
+ } else if (mindiff == d3) {
+ return av_clip(c, mi3, ma3);
+ }
+
+ return av_clip(c, mi1, ma1);
+}
+
+static int mode10(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ const int d1 = FFABS(c - a1);
+ const int d2 = FFABS(c - a2);
+ const int d3 = FFABS(c - a3);
+ const int d4 = FFABS(c - a4);
+ const int d5 = FFABS(c - a5);
+ const int d6 = FFABS(c - a6);
+ const int d7 = FFABS(c - a7);
+ const int d8 = FFABS(c - a8);
+
+ const int mindiff = FFMIN(FFMIN(FFMIN(d1, d2), FFMIN(d3, d4)),
+ FFMIN(FFMIN(d5, d6), FFMIN(d7, d8)));
+
+ if (mindiff == d7) return a7;
+ if (mindiff == d8) return a8;
+ if (mindiff == d6) return a6;
+ if (mindiff == d2) return a2;
+ if (mindiff == d3) return a3;
+ if (mindiff == d1) return a1;
+ if (mindiff == d5) return a5;
+
+ return a4;
+}
+
+static int mode1112(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ const int sum = 4 * c + 2 * (a2 + a4 + a5 + a7) + a1 + a3 + a6 + a8;
+ const int val = (sum + 8) >> 4;
+
+ return val;
+}
+
+static int mode1314(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ const int d1 = FFABS(a1 - a8);
+ const int d2 = FFABS(a2 - a7);
+ const int d3 = FFABS(a3 - a6);
+
+ const int mindiff = FFMIN(FFMIN(d1, d2), d3);
+
+ if (mindiff == d2) {
+ return (a2 + a7 + 1) >> 1;
+ }
+ if (mindiff == d3) {
+ return (a3 + a6 + 1) >> 1;
+ }
+
+ return (a1 + a8 + 1) >> 1;
+}
+
+static int mode1516(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ const int d1 = FFABS(a1 - a8);
+ const int d2 = FFABS(a2 - a7);
+ const int d3 = FFABS(a3 - a6);
+
+ const int mindiff = FFMIN(FFMIN(d1, d2), d3);
+ const int average = (2 * (a2 + a7) + a1 + a3 + a6 + a8 + 4) >> 3;
+
+ if (mindiff == d2) {
+ return av_clip(average, FFMIN(a2, a7), FFMAX(a2, a7));
+ }
+ if (mindiff == d3) {
+ return av_clip(average, FFMIN(a3, a6), FFMAX(a3, a6));
+ }
+
+ return av_clip(average, FFMIN(a1, a8), FFMAX(a1, a8));
+}
+
+static int mode17(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ REMOVE_GRAIN_SORT_AXIS
+
+ const int l = FFMAX(FFMAX(mi1, mi2), FFMAX(mi3, mi4));
+ const int u = FFMIN(FFMIN(ma1, ma2), FFMIN(ma3, ma4));
+
+ return av_clip(c, FFMIN(l, u), FFMAX(l, u));
+}
+
+static int mode18(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ const int d1 = FFMAX(FFABS(c - a1), FFABS(c - a8));
+ const int d2 = FFMAX(FFABS(c - a2), FFABS(c - a7));
+ const int d3 = FFMAX(FFABS(c - a3), FFABS(c - a6));
+ const int d4 = FFMAX(FFABS(c - a4), FFABS(c - a5));
+
+ const int mindiff = FFMIN(FFMIN(d1, d2), FFMIN(d3, d4));
+
+ if (mindiff == d4) {
+ return av_clip(c, FFMIN(a4, a5), FFMAX(a4, a5));
+ }
+ if (mindiff == d2) {
+ return av_clip(c, FFMIN(a2, a7), FFMAX(a2, a7));
+ }
+ if (mindiff == d3) {
+ return av_clip(c, FFMIN(a3, a6), FFMAX(a3, a6));
+ }
+
+ return av_clip(c, FFMIN(a1, a8), FFMAX(a1, a8));
+}
+
+static int mode19(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ const int sum = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
+ const int val = (sum + 4) >> 3;
+
+ return val;
+}
+
+static int mode20(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ const int sum = a1 + a2 + a3 + a4 + c + a5 + a6 + a7 + a8;
+ const int val = (sum + 4) / 9;
+
+ return val;
+}
+
+static int mode21(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ const int l1l = (a1 + a8) >> 1;
+ const int l2l = (a2 + a7) >> 1;
+ const int l3l = (a3 + a6) >> 1;
+ const int l4l = (a4 + a5) >> 1;
+
+ const int l1h = (a1 + a8 + 1) >> 1;
+ const int l2h = (a2 + a7 + 1) >> 1;
+ const int l3h = (a3 + a6 + 1) >> 1;
+ const int l4h = (a4 + a5 + 1) >> 1;
+
+ const int mi = FFMIN(FFMIN(l1l, l2l), FFMIN(l3l, l4l));
+ const int ma = FFMAX(FFMAX(l1h, l2h), FFMAX(l3h, l4h));
+
+ return av_clip(c, mi, ma);
+}
+
+static int mode22(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ const int l1 = (a1 + a8 + 1) >> 1;
+ const int l2 = (a2 + a7 + 1) >> 1;
+ const int l3 = (a3 + a6 + 1) >> 1;
+ const int l4 = (a4 + a5 + 1) >> 1;
+
+ const int mi = FFMIN(FFMIN(l1, l2), FFMIN(l3, l4));
+ const int ma = FFMAX(FFMAX(l1, l2), FFMAX(l3, l4));
+
+ return av_clip(c, mi, ma);
+}
+
+static int mode23(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ REMOVE_GRAIN_SORT_AXIS
+
+ const int linediff1 = ma1 - mi1;
+ const int linediff2 = ma2 - mi2;
+ const int linediff3 = ma3 - mi3;
+ const int linediff4 = ma4 - mi4;
+
+ const int u1 = FFMIN(c - ma1, linediff1);
+ const int u2 = FFMIN(c - ma2, linediff2);
+ const int u3 = FFMIN(c - ma3, linediff3);
+ const int u4 = FFMIN(c - ma4, linediff4);
+ const int u = FFMAX(FFMAX(FFMAX(u1, u2), FFMAX(u3, u4)), 0);
+
+ const int d1 = FFMIN(mi1 - c, linediff1);
+ const int d2 = FFMIN(mi2 - c, linediff2);
+ const int d3 = FFMIN(mi3 - c, linediff3);
+ const int d4 = FFMIN(mi4 - c, linediff4);
+ const int d = FFMAX(FFMAX(FFMAX(d1, d2), FFMAX(d3, d4)), 0);
+
+ return c - u + d; // This probably will never overflow.
+}
+
+static int mode24(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
+{
+ REMOVE_GRAIN_SORT_AXIS
+
+ const int linediff1 = ma1 - mi1;
+ const int linediff2 = ma2 - mi2;
+ const int linediff3 = ma3 - mi3;
+ const int linediff4 = ma4 - mi4;
+
+ const int tu1 = c - ma1;
+ const int tu2 = c - ma2;
+ const int tu3 = c - ma3;
+ const int tu4 = c - ma4;
+
+ const int u1 = FFMIN(tu1, linediff1 - tu1);
+ const int u2 = FFMIN(tu2, linediff2 - tu2);
+ const int u3 = FFMIN(tu3, linediff3 - tu3);
+ const int u4 = FFMIN(tu4, linediff4 - tu4);
+ const int u = FFMAX(FFMAX(FFMAX(u1, u2), FFMAX(u3, u4)), 0);
+
+ const int td1 = mi1 - c;
+ const int td2 = mi2 - c;
+ const int td3 = mi3 - c;
+ const int td4 = mi4 - c;
+
+ const int d1 = FFMIN(td1, linediff1 - td1);
+ const int d2 = FFMIN(td2, linediff2 - td2);
+ const int d3 = FFMIN(td3, linediff3 - td3);
+ const int d4 = FFMIN(td4, linediff4 - td4);
+ const int d = FFMAX(FFMAX(FFMAX(d1, d2), FFMAX(d3, d4)), 0);
+
+ return c - u + d; // This probably will never overflow.
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ RemoveGrainContext *s = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int i;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+ s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+
+ for (i = 0; i < s->nb_planes; i++) {
+ switch (s->mode[i]) {
+ case 1: s->rg[i] = mode01; break;
+ case 2: s->rg[i] = mode02; break;
+ case 3: s->rg[i] = mode03; break;
+ case 4: s->rg[i] = mode04; break;
+ case 5: s->rg[i] = mode05; break;
+ case 6: s->rg[i] = mode06; break;
+ case 7: s->rg[i] = mode07; break;
+ case 8: s->rg[i] = mode08; break;
+ case 9: s->rg[i] = mode09; break;
+ case 10: s->rg[i] = mode10; break;
+ case 11: s->rg[i] = mode1112; break;
+ case 12: s->rg[i] = mode1112; break;
+ case 13: s->skip_odd = 1;
+ s->rg[i] = mode1314; break;
+ case 14: s->skip_even = 1;
+ s->rg[i] = mode1314; break;
+ case 15: s->skip_odd = 1;
+ s->rg[i] = mode1516; break;
+ case 16: s->skip_even = 1;
+ s->rg[i] = mode1516; break;
+ case 17: s->rg[i] = mode17; break;
+ case 18: s->rg[i] = mode18; break;
+ case 19: s->rg[i] = mode19; break;
+ case 20: s->rg[i] = mode20; break;
+ case 21: s->rg[i] = mode21; break;
+ case 22: s->rg[i] = mode22; break;
+ case 23: s->rg[i] = mode23; break;
+ case 24: s->rg[i] = mode24; break;
+ }
+ }
+
+ if (ARCH_X86)
+ ff_removegrain_init_x86(s);
+
+ return 0;
+}
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+ int plane;
+} ThreadData;
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ RemoveGrainContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int i = td->plane;
+ const int height = s->planeheight[i];
+ const int om = in->linesize[i] - 1;
+ const int o0 = in->linesize[i] ;
+ const int op = in->linesize[i] + 1;
+ int start = (height * jobnr ) / nb_jobs;
+ int end = (height * (jobnr+1)) / nb_jobs;
+ int x, y;
+
+ start = FFMAX(1, start);
+ end = FFMIN(height-1, end);
+ for (y = start; y < end; y++) {
+ uint8_t *dst = out->data[i];
+ uint8_t *src = in->data[i];
+
+ src = in->data[i] + y * in->linesize[i];
+ dst = out->data[i] + y * out->linesize[i];
+
+ if (s->skip_even && !(y & 1)) {
+ memcpy(dst, src, s->planewidth[i]);
+ continue;
+ }
+ if (s->skip_odd && y & 1) {
+ memcpy(dst, src, s->planewidth[i]);
+ continue;
+ }
+
+ *dst++ = *src++;
+
+ if (s->fl[i]) {
+ int w_asm = (s->planewidth[i] - 2) & ~15;
+
+ s->fl[i](dst, src, in->linesize[i], w_asm);
+
+ x = 1 + w_asm;
+ dst += w_asm;
+ src += w_asm;
+ } else
+ x = 1;
+
+ for (; x < s->planewidth[i] - 1; x++) {
+ const int a1 = src[-op];
+ const int a2 = src[-o0];
+ const int a3 = src[-om];
+ const int a4 = src[-1 ];
+ const int c = src[ 0 ];
+ const int a5 = src[ 1 ];
+ const int a6 = src[ om];
+ const int a7 = src[ o0];
+ const int a8 = src[ op];
+
+ const int res = s->rg[i](c, a1, a2, a3, a4, a5, a6, a7, a8);
+
+ *dst = res;
+ dst++, src++;
+ }
+ dst[0] = src[0];
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ RemoveGrainContext *s = ctx->priv;
+ ThreadData td;
+ AVFrame *out;
+ int i;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ for (i = 0; i < s->nb_planes; i++) {
+ uint8_t *dst = out->data[i];
+ uint8_t *src = in->data[i];
+
+ if (s->mode[i] == 0) {
+ av_image_copy_plane(dst, out->linesize[i],
+ src, in->linesize[i],
+ s->planewidth[i], s->planeheight[i]);
+ continue;
+ }
+
+ memcpy(dst, src, s->planewidth[i]);
+
+ td.in = in; td.out = out; td.plane = i;
+ ctx->internal->execute(ctx, filter_slice, &td, NULL,
+ FFMIN(s->planeheight[i], ff_filter_get_nb_threads(ctx)));
+
+ src = in->data[i] + (s->planeheight[i] - 1) * in->linesize[i];
+ dst = out->data[i] + (s->planeheight[i] - 1) * out->linesize[i];
+ memcpy(dst, src, s->planewidth[i]);
+ }
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad removegrain_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad removegrain_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_removegrain = {
+ .name = "removegrain",
+ .description = NULL_IF_CONFIG_SMALL("Remove grain."),
+ .priv_size = sizeof(RemoveGrainContext),
+ .query_formats = query_formats,
+ .inputs = removegrain_inputs,
+ .outputs = removegrain_outputs,
+ .priv_class = &removegrain_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_removelogo.c b/libavfilter/vf_removelogo.c
new file mode 100644
index 0000000000..94b92a5853
--- /dev/null
+++ b/libavfilter/vf_removelogo.c
@@ -0,0 +1,587 @@
+/*
+ * Copyright (c) 2005 Robert Edele <yartrebo@earthlink.net>
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Advanced blur-based logo removing filter
+ *
+ * This filter loads an image mask file showing where a logo is and
+ * uses a blur transform to remove the logo.
+ *
+ * Based on the libmpcodecs remove-logo filter by Robert Edele.
+ */
+
+/**
+ * This code implements a filter to remove annoying TV logos and other annoying
+ * images placed onto a video stream. It works by filling in the pixels that
+ * comprise the logo with neighboring pixels. The transform is very loosely
+ * based on a gaussian blur, but it is different enough to merit its own
+ * paragraph later on. It is a major improvement on the old delogo filter as it
+ * both uses a better blurring algorithm and uses a bitmap to use an arbitrary
+ * and generally much tighter fitting shape than a rectangle.
+ *
+ * The logo removal algorithm has two key points. The first is that it
+ * distinguishes between pixels in the logo and those not in the logo by using
+ * the passed-in bitmap. Pixels not in the logo are copied over directly without
+ * being modified and they also serve as source pixels for the logo
+ * fill-in. Pixels inside the logo have the mask applied.
+ *
+ * At init-time the bitmap is reprocessed internally, and the distance to the
+ * nearest edge of the logo (Manhattan distance), along with a little extra to
+ * remove rough edges, is stored in each pixel. This is done using an in-place
+ * erosion algorithm, and incrementing each pixel that survives any given
+ * erosion. Once every pixel is eroded, the maximum value is recorded, and a
+ * set of masks from size 0 to this size are generaged. The masks are circular
+ * binary masks, where each pixel within a radius N (where N is the size of the
+ * mask) is a 1, and all other pixels are a 0. Although a gaussian mask would be
+ * more mathematically accurate, a binary mask works better in practice because
+ * we generally do not use the central pixels in the mask (because they are in
+ * the logo region), and thus a gaussian mask will cause too little blur and
+ * thus a very unstable image.
+ *
+ * The mask is applied in a special way. Namely, only pixels in the mask that
+ * line up to pixels outside the logo are used. The dynamic mask size means that
+ * the mask is just big enough so that the edges touch pixels outside the logo,
+ * so the blurring is kept to a minimum and at least the first boundary
+ * condition is met (that the image function itself is continuous), even if the
+ * second boundary condition (that the derivative of the image function is
+ * continuous) is not met. A masking algorithm that does preserve the second
+ * boundary coundition (perhaps something based on a highly-modified bi-cubic
+ * algorithm) should offer even better results on paper, but the noise in a
+ * typical TV signal should make anything based on derivatives hopelessly noisy.
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "bbox.h"
+#include "lavfutils.h"
+#include "lswsutils.h"
+
+typedef struct {
+ const AVClass *class;
+ char *filename;
+ /* Stores our collection of masks. The first is for an array of
+ the second for the y axis, and the third for the x axis. */
+ int ***mask;
+ int max_mask_size;
+ int mask_w, mask_h;
+
+ uint8_t *full_mask_data;
+ FFBoundingBox full_mask_bbox;
+ uint8_t *half_mask_data;
+ FFBoundingBox half_mask_bbox;
+} RemovelogoContext;
+
+#define OFFSET(x) offsetof(RemovelogoContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption removelogo_options[] = {
+ { "filename", "set bitmap filename", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "f", "set bitmap filename", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(removelogo);
+
+/**
+ * Choose a slightly larger mask size to improve performance.
+ *
+ * This function maps the absolute minimum mask size needed to the
+ * mask size we'll actually use. f(x) = x (the smallest that will
+ * work) will produce the sharpest results, but will be quite
+ * jittery. f(x) = 1.25x (what I'm using) is a good tradeoff in my
+ * opinion. This will calculate only at init-time, so you can put a
+ * long expression here without effecting performance.
+ */
+#define apply_mask_fudge_factor(x) (((x) >> 2) + (x))
+
+/**
+ * Pre-process an image to give distance information.
+ *
+ * This function takes a bitmap image and converts it in place into a
+ * distance image. A distance image is zero for pixels outside of the
+ * logo and is the Manhattan distance (|dx| + |dy|) from the logo edge
+ * for pixels inside of the logo. This will overestimate the distance,
+ * but that is safe, and is far easier to implement than a proper
+ * pythagorean distance since I'm using a modified erosion algorithm
+ * to compute the distances.
+ *
+ * @param mask image which will be converted from a greyscale image
+ * into a distance image.
+ */
+static void convert_mask_to_strength_mask(uint8_t *data, int linesize,
+ int w, int h, int min_val,
+ int *max_mask_size)
+{
+ int x, y;
+
+ /* How many times we've gone through the loop. Used in the
+ in-place erosion algorithm and to get us max_mask_size later on. */
+ int current_pass = 0;
+
+ /* set all non-zero values to 1 */
+ for (y = 0; y < h; y++)
+ for (x = 0; x < w; x++)
+ data[y*linesize + x] = data[y*linesize + x] > min_val;
+
+ /* For each pass, if a pixel is itself the same value as the
+ current pass, and its four neighbors are too, then it is
+ incremented. If no pixels are incremented by the end of the
+ pass, then we go again. Edge pixels are counted as always
+ excluded (this should be true anyway for any sane mask, but if
+ it isn't this will ensure that we eventually exit). */
+ while (1) {
+ /* If this doesn't get set by the end of this pass, then we're done. */
+ int has_anything_changed = 0;
+ uint8_t *current_pixel0 = data + 1 + linesize, *current_pixel;
+ current_pass++;
+
+ for (y = 1; y < h-1; y++) {
+ current_pixel = current_pixel0;
+ for (x = 1; x < w-1; x++) {
+ /* Apply the in-place erosion transform. It is based
+ on the following two premises:
+ 1 - Any pixel that fails 1 erosion will fail all
+ future erosions.
+
+ 2 - Only pixels having survived all erosions up to
+ the present will be >= to current_pass.
+ It doesn't matter if it survived the current pass,
+ failed it, or hasn't been tested yet. By using >=
+ instead of ==, we allow the algorithm to work in
+ place. */
+ if ( *current_pixel >= current_pass &&
+ *(current_pixel + 1) >= current_pass &&
+ *(current_pixel - 1) >= current_pass &&
+ *(current_pixel + linesize) >= current_pass &&
+ *(current_pixel - linesize) >= current_pass) {
+ /* Increment the value since it still has not been
+ * eroded, as evidenced by the if statement that
+ * just evaluated to true. */
+ (*current_pixel)++;
+ has_anything_changed = 1;
+ }
+ current_pixel++;
+ }
+ current_pixel0 += linesize;
+ }
+ if (!has_anything_changed)
+ break;
+ }
+
+ /* Apply the fudge factor, which will increase the size of the
+ * mask a little to reduce jitter at the cost of more blur. */
+ for (y = 1; y < h - 1; y++)
+ for (x = 1; x < w - 1; x++)
+ data[(y * linesize) + x] = apply_mask_fudge_factor(data[(y * linesize) + x]);
+
+ /* As a side-effect, we now know the maximum mask size, which
+ * we'll use to generate our masks. */
+ /* Apply the fudge factor to this number too, since we must ensure
+ * that enough masks are generated. */
+ *max_mask_size = apply_mask_fudge_factor(current_pass + 1);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int load_mask(uint8_t **mask, int *w, int *h,
+ const char *filename, void *log_ctx)
+{
+ int ret;
+ enum AVPixelFormat pix_fmt;
+ uint8_t *src_data[4], *gray_data[4];
+ int src_linesize[4], gray_linesize[4];
+
+ /* load image from file */
+ if ((ret = ff_load_image(src_data, src_linesize, w, h, &pix_fmt, filename, log_ctx)) < 0)
+ return ret;
+
+ /* convert the image to GRAY8 */
+ if ((ret = ff_scale_image(gray_data, gray_linesize, *w, *h, AV_PIX_FMT_GRAY8,
+ src_data, src_linesize, *w, *h, pix_fmt,
+ log_ctx)) < 0)
+ goto end;
+
+ /* copy mask to a newly allocated array */
+ *mask = av_malloc(*w * *h);
+ if (!*mask)
+ ret = AVERROR(ENOMEM);
+ av_image_copy_plane(*mask, *w, gray_data[0], gray_linesize[0], *w, *h);
+
+end:
+ av_freep(&src_data[0]);
+ av_freep(&gray_data[0]);
+ return ret;
+}
+
+/**
+ * Generate a scaled down image with half width, height, and intensity.
+ *
+ * This function not only scales down an image, but halves the value
+ * in each pixel too. The purpose of this is to produce a chroma
+ * filter image out of a luma filter image. The pixel values store the
+ * distance to the edge of the logo and halving the dimensions halves
+ * the distance. This function rounds up, because a downwards rounding
+ * error could cause the filter to fail, but an upwards rounding error
+ * will only cause a minor amount of excess blur in the chroma planes.
+ */
+static void generate_half_size_image(const uint8_t *src_data, int src_linesize,
+ uint8_t *dst_data, int dst_linesize,
+ int src_w, int src_h,
+ int *max_mask_size)
+{
+ int x, y;
+
+ /* Copy over the image data, using the average of 4 pixels for to
+ * calculate each downsampled pixel. */
+ for (y = 0; y < src_h/2; y++) {
+ for (x = 0; x < src_w/2; x++) {
+ /* Set the pixel if there exists a non-zero value in the
+ * source pixels, else clear it. */
+ dst_data[(y * dst_linesize) + x] =
+ src_data[((y << 1) * src_linesize) + (x << 1)] ||
+ src_data[((y << 1) * src_linesize) + (x << 1) + 1] ||
+ src_data[(((y << 1) + 1) * src_linesize) + (x << 1)] ||
+ src_data[(((y << 1) + 1) * src_linesize) + (x << 1) + 1];
+ dst_data[(y * dst_linesize) + x] = FFMIN(1, dst_data[(y * dst_linesize) + x]);
+ }
+ }
+
+ convert_mask_to_strength_mask(dst_data, dst_linesize,
+ src_w/2, src_h/2, 0, max_mask_size);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ RemovelogoContext *s = ctx->priv;
+ int ***mask;
+ int ret = 0;
+ int a, b, c, w, h;
+ int full_max_mask_size, half_max_mask_size;
+
+ if (!s->filename) {
+ av_log(ctx, AV_LOG_ERROR, "The bitmap file name is mandatory\n");
+ return AVERROR(EINVAL);
+ }
+
+ /* Load our mask image. */
+ if ((ret = load_mask(&s->full_mask_data, &w, &h, s->filename, ctx)) < 0)
+ return ret;
+ s->mask_w = w;
+ s->mask_h = h;
+
+ convert_mask_to_strength_mask(s->full_mask_data, w, w, h,
+ 16, &full_max_mask_size);
+
+ /* Create the scaled down mask image for the chroma planes. */
+ if (!(s->half_mask_data = av_mallocz(w/2 * h/2)))
+ return AVERROR(ENOMEM);
+ generate_half_size_image(s->full_mask_data, w,
+ s->half_mask_data, w/2,
+ w, h, &half_max_mask_size);
+
+ s->max_mask_size = FFMAX(full_max_mask_size, half_max_mask_size);
+
+ /* Create a circular mask for each size up to max_mask_size. When
+ the filter is applied, the mask size is determined on a pixel
+ by pixel basis, with pixels nearer the edge of the logo getting
+ smaller mask sizes. */
+ mask = (int ***)av_malloc_array(s->max_mask_size + 1, sizeof(int **));
+ if (!mask)
+ return AVERROR(ENOMEM);
+
+ for (a = 0; a <= s->max_mask_size; a++) {
+ mask[a] = (int **)av_malloc_array((a * 2) + 1, sizeof(int *));
+ if (!mask[a]) {
+ av_free(mask);
+ return AVERROR(ENOMEM);
+ }
+ for (b = -a; b <= a; b++) {
+ mask[a][b + a] = (int *)av_malloc_array((a * 2) + 1, sizeof(int));
+ if (!mask[a][b + a]) {
+ av_free(mask);
+ return AVERROR(ENOMEM);
+ }
+ for (c = -a; c <= a; c++) {
+ if ((b * b) + (c * c) <= (a * a)) /* Circular 0/1 mask. */
+ mask[a][b + a][c + a] = 1;
+ else
+ mask[a][b + a][c + a] = 0;
+ }
+ }
+ }
+ s->mask = mask;
+
+ /* Calculate our bounding rectangles, which determine in what
+ * region the logo resides for faster processing. */
+ ff_calculate_bounding_box(&s->full_mask_bbox, s->full_mask_data, w, w, h, 0);
+ ff_calculate_bounding_box(&s->half_mask_bbox, s->half_mask_data, w/2, w/2, h/2, 0);
+
+#define SHOW_LOGO_INFO(mask_type) \
+ av_log(ctx, AV_LOG_VERBOSE, #mask_type " x1:%d x2:%d y1:%d y2:%d max_mask_size:%d\n", \
+ s->mask_type##_mask_bbox.x1, s->mask_type##_mask_bbox.x2, \
+ s->mask_type##_mask_bbox.y1, s->mask_type##_mask_bbox.y2, \
+ mask_type##_max_mask_size);
+ SHOW_LOGO_INFO(full);
+ SHOW_LOGO_INFO(half);
+
+ return 0;
+}
+
+static int config_props_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ RemovelogoContext *s = ctx->priv;
+
+ if (inlink->w != s->mask_w || inlink->h != s->mask_h) {
+ av_log(ctx, AV_LOG_INFO,
+ "Mask image size %dx%d does not match with the input video size %dx%d\n",
+ s->mask_w, s->mask_h, inlink->w, inlink->h);
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+/**
+ * Blur image.
+ *
+ * It takes a pixel that is inside the mask and blurs it. It does so
+ * by finding the average of all the pixels within the mask and
+ * outside of the mask.
+ *
+ * @param mask_data the mask plane to use for averaging
+ * @param image_data the image plane to blur
+ * @param w width of the image
+ * @param h height of the image
+ * @param x x-coordinate of the pixel to blur
+ * @param y y-coordinate of the pixel to blur
+ */
+static unsigned int blur_pixel(int ***mask,
+ const uint8_t *mask_data, int mask_linesize,
+ uint8_t *image_data, int image_linesize,
+ int w, int h, int x, int y)
+{
+ /* Mask size tells how large a circle to use. The radius is about
+ * (slightly larger than) mask size. */
+ int mask_size;
+ int start_posx, start_posy, end_posx, end_posy;
+ int i, j;
+ unsigned int accumulator = 0, divisor = 0;
+ /* What pixel we are reading out of the circular blur mask. */
+ const uint8_t *image_read_position;
+ /* What pixel we are reading out of the filter image. */
+ const uint8_t *mask_read_position;
+
+ /* Prepare our bounding rectangle and clip it if need be. */
+ mask_size = mask_data[y * mask_linesize + x];
+ start_posx = FFMAX(0, x - mask_size);
+ start_posy = FFMAX(0, y - mask_size);
+ end_posx = FFMIN(w - 1, x + mask_size);
+ end_posy = FFMIN(h - 1, y + mask_size);
+
+ image_read_position = image_data + image_linesize * start_posy + start_posx;
+ mask_read_position = mask_data + mask_linesize * start_posy + start_posx;
+
+ for (j = start_posy; j <= end_posy; j++) {
+ for (i = start_posx; i <= end_posx; i++) {
+ /* Check if this pixel is in the mask or not. Only use the
+ * pixel if it is not. */
+ if (!(*mask_read_position) && mask[mask_size][i - start_posx][j - start_posy]) {
+ accumulator += *image_read_position;
+ divisor++;
+ }
+
+ image_read_position++;
+ mask_read_position++;
+ }
+
+ image_read_position += (image_linesize - ((end_posx + 1) - start_posx));
+ mask_read_position += (mask_linesize - ((end_posx + 1) - start_posx));
+ }
+
+ /* If divisor is 0, it means that not a single pixel is outside of
+ the logo, so we have no data. Else we need to normalise the
+ data using the divisor. */
+ return divisor == 0 ? 255:
+ (accumulator + (divisor / 2)) / divisor; /* divide, taking into account average rounding error */
+}
+
+/**
+ * Blur image plane using a mask.
+ *
+ * @param source The image to have it's logo removed.
+ * @param destination Where the output image will be stored.
+ * @param source_stride How far apart (in memory) two consecutive lines are.
+ * @param destination Same as source_stride, but for the destination image.
+ * @param width Width of the image. This is the same for source and destination.
+ * @param height Height of the image. This is the same for source and destination.
+ * @param is_image_direct If the image is direct, then source and destination are
+ * the same and we can save a lot of time by not copying pixels that
+ * haven't changed.
+ * @param filter The image that stores the distance to the edge of the logo for
+ * each pixel.
+ * @param logo_start_x smallest x-coordinate that contains at least 1 logo pixel.
+ * @param logo_start_y smallest y-coordinate that contains at least 1 logo pixel.
+ * @param logo_end_x largest x-coordinate that contains at least 1 logo pixel.
+ * @param logo_end_y largest y-coordinate that contains at least 1 logo pixel.
+ *
+ * This function processes an entire plane. Pixels outside of the logo are copied
+ * to the output without change, and pixels inside the logo have the de-blurring
+ * function applied.
+ */
+static void blur_image(int ***mask,
+ const uint8_t *src_data, int src_linesize,
+ uint8_t *dst_data, int dst_linesize,
+ const uint8_t *mask_data, int mask_linesize,
+ int w, int h, int direct,
+ FFBoundingBox *bbox)
+{
+ int x, y;
+ uint8_t *dst_line;
+ const uint8_t *src_line;
+
+ if (!direct)
+ av_image_copy_plane(dst_data, dst_linesize, src_data, src_linesize, w, h);
+
+ for (y = bbox->y1; y <= bbox->y2; y++) {
+ src_line = src_data + src_linesize * y;
+ dst_line = dst_data + dst_linesize * y;
+
+ for (x = bbox->x1; x <= bbox->x2; x++) {
+ if (mask_data[y * mask_linesize + x]) {
+ /* Only process if we are in the mask. */
+ dst_line[x] = blur_pixel(mask,
+ mask_data, mask_linesize,
+ dst_data, dst_linesize,
+ w, h, x, y);
+ } else {
+ /* Else just copy the data. */
+ if (!direct)
+ dst_line[x] = src_line[x];
+ }
+ }
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ RemovelogoContext *s = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outpicref;
+ int direct = 0;
+
+ if (av_frame_is_writable(inpicref)) {
+ direct = 1;
+ outpicref = inpicref;
+ } else {
+ outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpicref) {
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpicref, inpicref);
+ }
+
+ blur_image(s->mask,
+ inpicref ->data[0], inpicref ->linesize[0],
+ outpicref->data[0], outpicref->linesize[0],
+ s->full_mask_data, inlink->w,
+ inlink->w, inlink->h, direct, &s->full_mask_bbox);
+ blur_image(s->mask,
+ inpicref ->data[1], inpicref ->linesize[1],
+ outpicref->data[1], outpicref->linesize[1],
+ s->half_mask_data, inlink->w/2,
+ inlink->w/2, inlink->h/2, direct, &s->half_mask_bbox);
+ blur_image(s->mask,
+ inpicref ->data[2], inpicref ->linesize[2],
+ outpicref->data[2], outpicref->linesize[2],
+ s->half_mask_data, inlink->w/2,
+ inlink->w/2, inlink->h/2, direct, &s->half_mask_bbox);
+
+ if (!direct)
+ av_frame_free(&inpicref);
+
+ return ff_filter_frame(outlink, outpicref);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ RemovelogoContext *s = ctx->priv;
+ int a, b;
+
+ av_freep(&s->full_mask_data);
+ av_freep(&s->half_mask_data);
+
+ if (s->mask) {
+ /* Loop through each mask. */
+ for (a = 0; a <= s->max_mask_size; a++) {
+ /* Loop through each scanline in a mask. */
+ for (b = -a; b <= a; b++) {
+ av_freep(&s->mask[a][b + a]); /* Free a scanline. */
+ }
+ av_freep(&s->mask[a]);
+ }
+ /* Free the array of pointers pointing to the masks. */
+ av_freep(&s->mask);
+ }
+}
+
+static const AVFilterPad removelogo_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad removelogo_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_removelogo = {
+ .name = "removelogo",
+ .description = NULL_IF_CONFIG_SMALL("Remove a TV logo based on a mask image."),
+ .priv_size = sizeof(RemovelogoContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = removelogo_inputs,
+ .outputs = removelogo_outputs,
+ .priv_class = &removelogo_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_repeatfields.c b/libavfilter/vf_repeatfields.c
new file mode 100644
index 0000000000..3ac432b5bc
--- /dev/null
+++ b/libavfilter/vf_repeatfields.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2003 Tobias Diedrich
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "libavutil/imgutils.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct RepeatFieldsContext {
+ const AVClass *class;
+ int state;
+ int nb_planes;
+ int linesize[4];
+ int planeheight[4];
+ AVFrame *frame;
+} RepeatFieldsContext;
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ RepeatFieldsContext *s = ctx->priv;
+
+ av_frame_free(&s->frame);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pixel_fmts_eq[] = {
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pixel_fmts_eq);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ RepeatFieldsContext *s = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+
+ if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ return 0;
+}
+
+static void update_pts(AVFilterLink *link, AVFrame *f, int64_t pts, int fields)
+{
+ if (av_cmp_q(link->frame_rate, (AVRational){30000, 1001}) == 0 &&
+ av_cmp_q(link->time_base, (AVRational){1001, 60000}) <= 0
+ ) {
+ f->pts = pts + av_rescale_q(fields, (AVRational){1001, 60000}, link->time_base);
+ } else
+ f->pts = AV_NOPTS_VALUE;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in) {
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ RepeatFieldsContext *s = ctx->priv;
+ AVFrame *out;
+ int ret, i;
+ int state = s->state;
+
+ if (!s->frame) {
+ s->frame = av_frame_clone(in);
+ if (!s->frame)
+ return AVERROR(ENOMEM);
+ s->frame->pts = AV_NOPTS_VALUE;
+ }
+
+ out = s->frame;
+
+ if ((state == 0 && !in->top_field_first) ||
+ (state == 1 && in->top_field_first)) {
+ av_log(ctx, AV_LOG_WARNING, "Unexpected field flags: "
+ "state=%d top_field_first=%d repeat_first_field=%d\n",
+ state, in->top_field_first, in->repeat_pict);
+ state ^= 1;
+ }
+
+ if (state == 0) {
+ AVFrame *new;
+
+ new = av_frame_clone(in);
+ if (!new)
+ return AVERROR(ENOMEM);
+
+ ret = ff_filter_frame(outlink, new);
+
+ if (in->repeat_pict) {
+ av_frame_make_writable(out);
+ update_pts(outlink, out, in->pts, 2);
+ for (i = 0; i < s->nb_planes; i++) {
+ av_image_copy_plane(out->data[i], out->linesize[i] * 2,
+ in->data[i], in->linesize[i] * 2,
+ s->linesize[i], s->planeheight[i] / 2);
+ }
+ state = 1;
+ }
+ } else {
+ for (i = 0; i < s->nb_planes; i++) {
+ av_frame_make_writable(out);
+ av_image_copy_plane(out->data[i] + out->linesize[i], out->linesize[i] * 2,
+ in->data[i] + in->linesize[i], in->linesize[i] * 2,
+ s->linesize[i], s->planeheight[i] / 2);
+ }
+
+ ret = ff_filter_frame(outlink, av_frame_clone(out));
+
+ if (in->repeat_pict) {
+ AVFrame *new;
+
+ new = av_frame_clone(in);
+ if (!new)
+ return AVERROR(ENOMEM);
+
+ ret = ff_filter_frame(outlink, new);
+ state = 0;
+ } else {
+ av_frame_make_writable(out);
+ update_pts(outlink, out, in->pts, 1);
+ for (i = 0; i < s->nb_planes; i++) {
+ av_image_copy_plane(out->data[i], out->linesize[i] * 2,
+ in->data[i], in->linesize[i] * 2,
+ s->linesize[i], s->planeheight[i] / 2);
+ }
+ }
+ }
+
+ s->state = state;
+
+ av_frame_free(&in);
+ return ret;
+}
+
+static const AVFilterPad repeatfields_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad repeatfields_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_repeatfields = {
+ .name = "repeatfields",
+ .description = NULL_IF_CONFIG_SMALL("Hard repeat fields based on MPEG repeat field flag."),
+ .priv_size = sizeof(RepeatFieldsContext),
+ .uninit = uninit,
+ .inputs = repeatfields_inputs,
+ .outputs = repeatfields_outputs,
+ .query_formats = query_formats,
+};
diff --git a/libavfilter/vf_rotate.c b/libavfilter/vf_rotate.c
new file mode 100644
index 0000000000..371ff7f722
--- /dev/null
+++ b/libavfilter/vf_rotate.c
@@ -0,0 +1,616 @@
+/*
+ * Copyright (c) 2013 Stefano Sabatini
+ * Copyright (c) 2008 Vitor Sessak
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * rotation filter, partially based on the tests/rotozoom.c program
+*/
+
+#include "libavutil/avstring.h"
+#include "libavutil/eval.h"
+#include "libavutil/opt.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+
+#include "avfilter.h"
+#include "drawutils.h"
+#include "internal.h"
+#include "video.h"
+
+#include <float.h>
+
+static const char * const var_names[] = {
+ "in_w" , "iw", ///< width of the input video
+ "in_h" , "ih", ///< height of the input video
+ "out_w", "ow", ///< width of the input video
+ "out_h", "oh", ///< height of the input video
+ "hsub", "vsub",
+ "n", ///< number of frame
+ "t", ///< timestamp expressed in seconds
+ NULL
+};
+
+enum var_name {
+ VAR_IN_W , VAR_IW,
+ VAR_IN_H , VAR_IH,
+ VAR_OUT_W, VAR_OW,
+ VAR_OUT_H, VAR_OH,
+ VAR_HSUB, VAR_VSUB,
+ VAR_N,
+ VAR_T,
+ VAR_VARS_NB
+};
+
+typedef struct RotContext {
+ const AVClass *class;
+ double angle;
+ char *angle_expr_str; ///< expression for the angle
+ AVExpr *angle_expr; ///< parsed expression for the angle
+ char *outw_expr_str, *outh_expr_str;
+ int outh, outw;
+ uint8_t fillcolor[4]; ///< color expressed either in YUVA or RGBA colorspace for the padding area
+ char *fillcolor_str;
+ int fillcolor_enable;
+ int hsub, vsub;
+ int nb_planes;
+ int use_bilinear;
+ float sinx, cosx;
+ double var_values[VAR_VARS_NB];
+ FFDrawContext draw;
+ FFDrawColor color;
+ uint8_t *(*interpolate_bilinear)(uint8_t *dst_color,
+ const uint8_t *src, int src_linesize, int src_linestep,
+ int x, int y, int max_x, int max_y);
+} RotContext;
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+ int inw, inh;
+ int outw, outh;
+ int plane;
+ int xi, yi;
+ int xprime, yprime;
+ int c, s;
+} ThreadData;
+
+#define OFFSET(x) offsetof(RotContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption rotate_options[] = {
+ { "angle", "set angle (in radians)", OFFSET(angle_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
+ { "a", "set angle (in radians)", OFFSET(angle_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
+ { "out_w", "set output width expression", OFFSET(outw_expr_str), AV_OPT_TYPE_STRING, {.str="iw"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
+ { "ow", "set output width expression", OFFSET(outw_expr_str), AV_OPT_TYPE_STRING, {.str="iw"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
+ { "out_h", "set output height expression", OFFSET(outh_expr_str), AV_OPT_TYPE_STRING, {.str="ih"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
+ { "oh", "set output height expression", OFFSET(outh_expr_str), AV_OPT_TYPE_STRING, {.str="ih"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
+ { "fillcolor", "set background fill color", OFFSET(fillcolor_str), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
+ { "c", "set background fill color", OFFSET(fillcolor_str), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
+ { "bilinear", "use bilinear interpolation", OFFSET(use_bilinear), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, .flags=FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(rotate);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ RotContext *rot = ctx->priv;
+
+ if (!strcmp(rot->fillcolor_str, "none"))
+ rot->fillcolor_enable = 0;
+ else if (av_parse_color(rot->fillcolor, rot->fillcolor_str, -1, ctx) >= 0)
+ rot->fillcolor_enable = 1;
+ else
+ return AVERROR(EINVAL);
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ RotContext *rot = ctx->priv;
+
+ av_expr_free(rot->angle_expr);
+ rot->angle_expr = NULL;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_RGB0,
+ AV_PIX_FMT_0BGR, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUVA420P10LE,
+ AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUVA444P10LE,
+ AV_PIX_FMT_YUV420P12LE,
+ AV_PIX_FMT_YUV444P12LE,
+ AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUVA444P16LE,
+ AV_PIX_FMT_YUV420P16LE, AV_PIX_FMT_YUVA420P16LE,
+ AV_PIX_FMT_YUV444P9LE, AV_PIX_FMT_YUVA444P9LE,
+ AV_PIX_FMT_YUV420P9LE, AV_PIX_FMT_YUVA420P9LE,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static double get_rotated_w(void *opaque, double angle)
+{
+ RotContext *rot = opaque;
+ double inw = rot->var_values[VAR_IN_W];
+ double inh = rot->var_values[VAR_IN_H];
+ float sinx = sin(angle);
+ float cosx = cos(angle);
+
+ return FFMAX(0, inh * sinx) + FFMAX(0, -inw * cosx) +
+ FFMAX(0, inw * cosx) + FFMAX(0, -inh * sinx);
+}
+
+static double get_rotated_h(void *opaque, double angle)
+{
+ RotContext *rot = opaque;
+ double inw = rot->var_values[VAR_IN_W];
+ double inh = rot->var_values[VAR_IN_H];
+ float sinx = sin(angle);
+ float cosx = cos(angle);
+
+ return FFMAX(0, -inh * cosx) + FFMAX(0, -inw * sinx) +
+ FFMAX(0, inh * cosx) + FFMAX(0, inw * sinx);
+}
+
+static double (* const func1[])(void *, double) = {
+ get_rotated_w,
+ get_rotated_h,
+ NULL
+};
+
+static const char * const func1_names[] = {
+ "rotw",
+ "roth",
+ NULL
+};
+
+#define FIXP (1<<16)
+#define FIXP2 (1<<20)
+#define INT_PI 3294199 //(M_PI * FIXP2)
+
+/**
+ * Compute the sin of a using integer values.
+ * Input is scaled by FIXP2 and output values are scaled by FIXP.
+ */
+static int64_t int_sin(int64_t a)
+{
+ int64_t a2, res = 0;
+ int i;
+ if (a < 0) a = INT_PI-a; // 0..inf
+ a %= 2 * INT_PI; // 0..2PI
+
+ if (a >= INT_PI*3/2) a -= 2*INT_PI; // -PI/2 .. 3PI/2
+ if (a >= INT_PI/2 ) a = INT_PI - a; // -PI/2 .. PI/2
+
+ /* compute sin using Taylor series approximated to the fifth term */
+ a2 = (a*a)/(FIXP2);
+ for (i = 2; i < 11; i += 2) {
+ res += a;
+ a = -a*a2 / (FIXP2*i*(i+1));
+ }
+ return (res + 8)>>4;
+}
+
+/**
+ * Interpolate the color in src at position x and y using bilinear
+ * interpolation.
+ */
+static uint8_t *interpolate_bilinear8(uint8_t *dst_color,
+ const uint8_t *src, int src_linesize, int src_linestep,
+ int x, int y, int max_x, int max_y)
+{
+ int int_x = av_clip(x>>16, 0, max_x);
+ int int_y = av_clip(y>>16, 0, max_y);
+ int frac_x = x&0xFFFF;
+ int frac_y = y&0xFFFF;
+ int i;
+ int int_x1 = FFMIN(int_x+1, max_x);
+ int int_y1 = FFMIN(int_y+1, max_y);
+
+ for (i = 0; i < src_linestep; i++) {
+ int s00 = src[src_linestep * int_x + i + src_linesize * int_y ];
+ int s01 = src[src_linestep * int_x1 + i + src_linesize * int_y ];
+ int s10 = src[src_linestep * int_x + i + src_linesize * int_y1];
+ int s11 = src[src_linestep * int_x1 + i + src_linesize * int_y1];
+ int s0 = (((1<<16) - frac_x)*s00 + frac_x*s01);
+ int s1 = (((1<<16) - frac_x)*s10 + frac_x*s11);
+
+ dst_color[i] = ((int64_t)((1<<16) - frac_y)*s0 + (int64_t)frac_y*s1) >> 32;
+ }
+
+ return dst_color;
+}
+
+/**
+ * Interpolate the color in src at position x and y using bilinear
+ * interpolation.
+ */
+static uint8_t *interpolate_bilinear16(uint8_t *dst_color,
+ const uint8_t *src, int src_linesize, int src_linestep,
+ int x, int y, int max_x, int max_y)
+{
+ int int_x = av_clip(x>>16, 0, max_x);
+ int int_y = av_clip(y>>16, 0, max_y);
+ int frac_x = x&0xFFFF;
+ int frac_y = y&0xFFFF;
+ int i;
+ int int_x1 = FFMIN(int_x+1, max_x);
+ int int_y1 = FFMIN(int_y+1, max_y);
+
+ for (i = 0; i < src_linestep; i+=2) {
+ int s00 = AV_RL16(&src[src_linestep * int_x + i + src_linesize * int_y ]);
+ int s01 = AV_RL16(&src[src_linestep * int_x1 + i + src_linesize * int_y ]);
+ int s10 = AV_RL16(&src[src_linestep * int_x + i + src_linesize * int_y1]);
+ int s11 = AV_RL16(&src[src_linestep * int_x1 + i + src_linesize * int_y1]);
+ int s0 = (((1<<16) - frac_x)*s00 + frac_x*s01);
+ int s1 = (((1<<16) - frac_x)*s10 + frac_x*s11);
+
+ AV_WL16(&dst_color[i], ((int64_t)((1<<16) - frac_y)*s0 + (int64_t)frac_y*s1) >> 32);
+ }
+
+ return dst_color;
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ RotContext *rot = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+ double res;
+ char *expr;
+
+ ff_draw_init(&rot->draw, inlink->format, 0);
+ ff_draw_color(&rot->draw, &rot->color, rot->fillcolor);
+
+ rot->hsub = pixdesc->log2_chroma_w;
+ rot->vsub = pixdesc->log2_chroma_h;
+
+ if (pixdesc->comp[0].depth == 8)
+ rot->interpolate_bilinear = interpolate_bilinear8;
+ else
+ rot->interpolate_bilinear = interpolate_bilinear16;
+
+ rot->var_values[VAR_IN_W] = rot->var_values[VAR_IW] = inlink->w;
+ rot->var_values[VAR_IN_H] = rot->var_values[VAR_IH] = inlink->h;
+ rot->var_values[VAR_HSUB] = 1<<rot->hsub;
+ rot->var_values[VAR_VSUB] = 1<<rot->vsub;
+ rot->var_values[VAR_N] = NAN;
+ rot->var_values[VAR_T] = NAN;
+ rot->var_values[VAR_OUT_W] = rot->var_values[VAR_OW] = NAN;
+ rot->var_values[VAR_OUT_H] = rot->var_values[VAR_OH] = NAN;
+
+ av_expr_free(rot->angle_expr);
+ rot->angle_expr = NULL;
+ if ((ret = av_expr_parse(&rot->angle_expr, expr = rot->angle_expr_str, var_names,
+ func1_names, func1, NULL, NULL, 0, ctx)) < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Error occurred parsing angle expression '%s'\n", rot->angle_expr_str);
+ return ret;
+ }
+
+#define SET_SIZE_EXPR(name, opt_name) do { \
+ ret = av_expr_parse_and_eval(&res, expr = rot->name##_expr_str, \
+ var_names, rot->var_values, \
+ func1_names, func1, NULL, NULL, rot, 0, ctx); \
+ if (ret < 0 || isnan(res) || isinf(res) || res <= 0) { \
+ av_log(ctx, AV_LOG_ERROR, \
+ "Error parsing or evaluating expression for option %s: " \
+ "invalid expression '%s' or non-positive or indefinite value %f\n", \
+ opt_name, expr, res); \
+ return ret; \
+ } \
+} while (0)
+
+ /* evaluate width and height */
+ av_expr_parse_and_eval(&res, expr = rot->outw_expr_str, var_names, rot->var_values,
+ func1_names, func1, NULL, NULL, rot, 0, ctx);
+ rot->var_values[VAR_OUT_W] = rot->var_values[VAR_OW] = res;
+ rot->outw = res + 0.5;
+ SET_SIZE_EXPR(outh, "out_h");
+ rot->var_values[VAR_OUT_H] = rot->var_values[VAR_OH] = res;
+ rot->outh = res + 0.5;
+
+ /* evaluate the width again, as it may depend on the evaluated output height */
+ SET_SIZE_EXPR(outw, "out_w");
+ rot->var_values[VAR_OUT_W] = rot->var_values[VAR_OW] = res;
+ rot->outw = res + 0.5;
+
+ /* compute number of planes */
+ rot->nb_planes = av_pix_fmt_count_planes(inlink->format);
+ outlink->w = rot->outw;
+ outlink->h = rot->outh;
+ return 0;
+}
+
+static av_always_inline void copy_elem(uint8_t *pout, const uint8_t *pin, int elem_size)
+{
+ int v;
+ switch (elem_size) {
+ case 1:
+ *pout = *pin;
+ break;
+ case 2:
+ *((uint16_t *)pout) = *((uint16_t *)pin);
+ break;
+ case 3:
+ v = AV_RB24(pin);
+ AV_WB24(pout, v);
+ break;
+ case 4:
+ *((uint32_t *)pout) = *((uint32_t *)pin);
+ break;
+ default:
+ memcpy(pout, pin, elem_size);
+ break;
+ }
+}
+
+static av_always_inline void simple_rotate_internal(uint8_t *dst, const uint8_t *src, int src_linesize, int angle, int elem_size, int len)
+{
+ int i;
+ switch(angle) {
+ case 0:
+ memcpy(dst, src, elem_size * len);
+ break;
+ case 1:
+ for (i = 0; i<len; i++)
+ copy_elem(dst + i*elem_size, src + (len-i-1)*src_linesize, elem_size);
+ break;
+ case 2:
+ for (i = 0; i<len; i++)
+ copy_elem(dst + i*elem_size, src + (len-i-1)*elem_size, elem_size);
+ break;
+ case 3:
+ for (i = 0; i<len; i++)
+ copy_elem(dst + i*elem_size, src + i*src_linesize, elem_size);
+ break;
+ }
+}
+
+static av_always_inline void simple_rotate(uint8_t *dst, const uint8_t *src, int src_linesize, int angle, int elem_size, int len)
+{
+ switch(elem_size) {
+ case 1 : simple_rotate_internal(dst, src, src_linesize, angle, 1, len); break;
+ case 2 : simple_rotate_internal(dst, src, src_linesize, angle, 2, len); break;
+ case 3 : simple_rotate_internal(dst, src, src_linesize, angle, 3, len); break;
+ case 4 : simple_rotate_internal(dst, src, src_linesize, angle, 4, len); break;
+ default: simple_rotate_internal(dst, src, src_linesize, angle, elem_size, len); break;
+ }
+}
+
+#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
+{
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ RotContext *rot = ctx->priv;
+ const int outw = td->outw, outh = td->outh;
+ const int inw = td->inw, inh = td->inh;
+ const int plane = td->plane;
+ const int xi = td->xi, yi = td->yi;
+ const int c = td->c, s = td->s;
+ const int start = (outh * job ) / nb_jobs;
+ const int end = (outh * (job+1)) / nb_jobs;
+ int xprime = td->xprime + start * s;
+ int yprime = td->yprime + start * c;
+ int i, j, x, y;
+
+ for (j = start; j < end; j++) {
+ x = xprime + xi + FIXP*(inw-1)/2;
+ y = yprime + yi + FIXP*(inh-1)/2;
+
+ if (fabs(rot->angle - 0) < FLT_EPSILON && outw == inw && outh == inh) {
+ simple_rotate(out->data[plane] + j * out->linesize[plane],
+ in->data[plane] + j * in->linesize[plane],
+ in->linesize[plane], 0, rot->draw.pixelstep[plane], outw);
+ } else if (fabs(rot->angle - M_PI/2) < FLT_EPSILON && outw == inh && outh == inw) {
+ simple_rotate(out->data[plane] + j * out->linesize[plane],
+ in->data[plane] + j * rot->draw.pixelstep[plane],
+ in->linesize[plane], 1, rot->draw.pixelstep[plane], outw);
+ } else if (fabs(rot->angle - M_PI) < FLT_EPSILON && outw == inw && outh == inh) {
+ simple_rotate(out->data[plane] + j * out->linesize[plane],
+ in->data[plane] + (outh-j-1) * in->linesize[plane],
+ in->linesize[plane], 2, rot->draw.pixelstep[plane], outw);
+ } else if (fabs(rot->angle - 3*M_PI/2) < FLT_EPSILON && outw == inh && outh == inw) {
+ simple_rotate(out->data[plane] + j * out->linesize[plane],
+ in->data[plane] + (outh-j-1) * rot->draw.pixelstep[plane],
+ in->linesize[plane], 3, rot->draw.pixelstep[plane], outw);
+ } else {
+
+ for (i = 0; i < outw; i++) {
+ int32_t v;
+ int x1, y1;
+ uint8_t *pin, *pout;
+ x1 = x>>16;
+ y1 = y>>16;
+
+ /* the out-of-range values avoid border artifacts */
+ if (x1 >= -1 && x1 <= inw && y1 >= -1 && y1 <= inh) {
+ uint8_t inp_inv[4]; /* interpolated input value */
+ pout = out->data[plane] + j * out->linesize[plane] + i * rot->draw.pixelstep[plane];
+ if (rot->use_bilinear) {
+ pin = rot->interpolate_bilinear(inp_inv,
+ in->data[plane], in->linesize[plane], rot->draw.pixelstep[plane],
+ x, y, inw-1, inh-1);
+ } else {
+ int x2 = av_clip(x1, 0, inw-1);
+ int y2 = av_clip(y1, 0, inh-1);
+ pin = in->data[plane] + y2 * in->linesize[plane] + x2 * rot->draw.pixelstep[plane];
+ }
+ switch (rot->draw.pixelstep[plane]) {
+ case 1:
+ *pout = *pin;
+ break;
+ case 2:
+ v = AV_RL16(pin);
+ AV_WL16(pout, v);
+ break;
+ case 3:
+ v = AV_RB24(pin);
+ AV_WB24(pout, v);
+ break;
+ case 4:
+ *((uint32_t *)pout) = *((uint32_t *)pin);
+ break;
+ default:
+ memcpy(pout, pin, rot->draw.pixelstep[plane]);
+ break;
+ }
+ }
+ x += c;
+ y -= s;
+ }
+ }
+ xprime += s;
+ yprime += c;
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ RotContext *rot = ctx->priv;
+ int angle_int, s, c, plane;
+ double res;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ rot->var_values[VAR_N] = inlink->frame_count_out;
+ rot->var_values[VAR_T] = TS2T(in->pts, inlink->time_base);
+ rot->angle = res = av_expr_eval(rot->angle_expr, rot->var_values, rot);
+
+ av_log(ctx, AV_LOG_DEBUG, "n:%f time:%f angle:%f/PI\n",
+ rot->var_values[VAR_N], rot->var_values[VAR_T], rot->angle/M_PI);
+
+ angle_int = res * FIXP * 16;
+ s = int_sin(angle_int);
+ c = int_sin(angle_int + INT_PI/2);
+
+ /* fill background */
+ if (rot->fillcolor_enable)
+ ff_fill_rectangle(&rot->draw, &rot->color, out->data, out->linesize,
+ 0, 0, outlink->w, outlink->h);
+
+ for (plane = 0; plane < rot->nb_planes; plane++) {
+ int hsub = plane == 1 || plane == 2 ? rot->hsub : 0;
+ int vsub = plane == 1 || plane == 2 ? rot->vsub : 0;
+ const int outw = AV_CEIL_RSHIFT(outlink->w, hsub);
+ const int outh = AV_CEIL_RSHIFT(outlink->h, vsub);
+ ThreadData td = { .in = in, .out = out,
+ .inw = AV_CEIL_RSHIFT(inlink->w, hsub),
+ .inh = AV_CEIL_RSHIFT(inlink->h, vsub),
+ .outh = outh, .outw = outw,
+ .xi = -(outw-1) * c / 2, .yi = (outw-1) * s / 2,
+ .xprime = -(outh-1) * s / 2,
+ .yprime = -(outh-1) * c / 2,
+ .plane = plane, .c = c, .s = s };
+
+
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outh, ff_filter_get_nb_threads(ctx)));
+ }
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ RotContext *rot = ctx->priv;
+ int ret;
+
+ if (!strcmp(cmd, "angle") || !strcmp(cmd, "a")) {
+ AVExpr *old = rot->angle_expr;
+ ret = av_expr_parse(&rot->angle_expr, args, var_names,
+ NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Error when parsing the expression '%s' for angle command\n", args);
+ rot->angle_expr = old;
+ return ret;
+ }
+ av_expr_free(old);
+ } else
+ ret = AVERROR(ENOSYS);
+
+ return ret;
+}
+
+static const AVFilterPad rotate_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad rotate_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_rotate = {
+ .name = "rotate",
+ .description = NULL_IF_CONFIG_SMALL("Rotate the input image."),
+ .priv_size = sizeof(RotContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .process_command = process_command,
+ .inputs = rotate_inputs,
+ .outputs = rotate_outputs,
+ .priv_class = &rotate_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_sab.c b/libavfilter/vf_sab.c
new file mode 100644
index 0000000000..3f0951f32a
--- /dev/null
+++ b/libavfilter/vf_sab.c
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Shape Adaptive Blur filter, ported from MPlayer libmpcodecs/vf_sab.c
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libswscale/swscale.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+typedef struct {
+ float radius;
+ float pre_filter_radius;
+ float strength;
+ float quality;
+ struct SwsContext *pre_filter_context;
+ uint8_t *pre_filter_buf;
+ int pre_filter_linesize;
+ int dist_width;
+ int dist_linesize;
+ int *dist_coeff;
+#define COLOR_DIFF_COEFF_SIZE 512
+ int color_diff_coeff[COLOR_DIFF_COEFF_SIZE];
+} FilterParam;
+
+typedef struct {
+ const AVClass *class;
+ FilterParam luma;
+ FilterParam chroma;
+ int hsub;
+ int vsub;
+ unsigned int sws_flags;
+} SabContext;
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+#define RADIUS_MIN 0.1
+#define RADIUS_MAX 4.0
+
+#define PRE_FILTER_RADIUS_MIN 0.1
+#define PRE_FILTER_RADIUS_MAX 2.0
+
+#define STRENGTH_MIN 0.1
+#define STRENGTH_MAX 100.0
+
+#define OFFSET(x) offsetof(SabContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption sab_options[] = {
+ { "luma_radius", "set luma radius", OFFSET(luma.radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, RADIUS_MIN, RADIUS_MAX, .flags=FLAGS },
+ { "lr" , "set luma radius", OFFSET(luma.radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, RADIUS_MIN, RADIUS_MAX, .flags=FLAGS },
+ { "luma_pre_filter_radius", "set luma pre-filter radius", OFFSET(luma.pre_filter_radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, PRE_FILTER_RADIUS_MIN, PRE_FILTER_RADIUS_MAX, .flags=FLAGS },
+ { "lpfr", "set luma pre-filter radius", OFFSET(luma.pre_filter_radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, PRE_FILTER_RADIUS_MIN, PRE_FILTER_RADIUS_MAX, .flags=FLAGS },
+ { "luma_strength", "set luma strength", OFFSET(luma.strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, STRENGTH_MIN, STRENGTH_MAX, .flags=FLAGS },
+ { "ls", "set luma strength", OFFSET(luma.strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, STRENGTH_MIN, STRENGTH_MAX, .flags=FLAGS },
+
+ { "chroma_radius", "set chroma radius", OFFSET(chroma.radius), AV_OPT_TYPE_FLOAT, {.dbl=RADIUS_MIN-1}, RADIUS_MIN-1, RADIUS_MAX, .flags=FLAGS },
+ { "cr", "set chroma radius", OFFSET(chroma.radius), AV_OPT_TYPE_FLOAT, {.dbl=RADIUS_MIN-1}, RADIUS_MIN-1, RADIUS_MAX, .flags=FLAGS },
+ { "chroma_pre_filter_radius", "set chroma pre-filter radius", OFFSET(chroma.pre_filter_radius), AV_OPT_TYPE_FLOAT, {.dbl=PRE_FILTER_RADIUS_MIN-1},
+ PRE_FILTER_RADIUS_MIN-1, PRE_FILTER_RADIUS_MAX, .flags=FLAGS },
+ { "cpfr", "set chroma pre-filter radius", OFFSET(chroma.pre_filter_radius), AV_OPT_TYPE_FLOAT, {.dbl=PRE_FILTER_RADIUS_MIN-1},
+ PRE_FILTER_RADIUS_MIN-1, PRE_FILTER_RADIUS_MAX, .flags=FLAGS },
+ { "chroma_strength", "set chroma strength", OFFSET(chroma.strength), AV_OPT_TYPE_FLOAT, {.dbl=STRENGTH_MIN-1}, STRENGTH_MIN-1, STRENGTH_MAX, .flags=FLAGS },
+ { "cs", "set chroma strength", OFFSET(chroma.strength), AV_OPT_TYPE_FLOAT, {.dbl=STRENGTH_MIN-1}, STRENGTH_MIN-1, STRENGTH_MAX, .flags=FLAGS },
+
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(sab);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ SabContext *s = ctx->priv;
+
+ /* make chroma default to luma values, if not explicitly set */
+ if (s->chroma.radius < RADIUS_MIN)
+ s->chroma.radius = s->luma.radius;
+ if (s->chroma.pre_filter_radius < PRE_FILTER_RADIUS_MIN)
+ s->chroma.pre_filter_radius = s->luma.pre_filter_radius;
+ if (s->chroma.strength < STRENGTH_MIN)
+ s->chroma.strength = s->luma.strength;
+
+ s->luma.quality = s->chroma.quality = 3.0;
+ s->sws_flags = SWS_POINT;
+
+ av_log(ctx, AV_LOG_VERBOSE,
+ "luma_radius:%f luma_pre_filter_radius::%f luma_strength:%f "
+ "chroma_radius:%f chroma_pre_filter_radius:%f chroma_strength:%f\n",
+ s->luma .radius, s->luma .pre_filter_radius, s->luma .strength,
+ s->chroma.radius, s->chroma.pre_filter_radius, s->chroma.strength);
+ return 0;
+}
+
+static void close_filter_param(FilterParam *f)
+{
+ if (f->pre_filter_context) {
+ sws_freeContext(f->pre_filter_context);
+ f->pre_filter_context = NULL;
+ }
+ av_freep(&f->pre_filter_buf);
+ av_freep(&f->dist_coeff);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SabContext *s = ctx->priv;
+
+ close_filter_param(&s->luma);
+ close_filter_param(&s->chroma);
+}
+
+static int open_filter_param(FilterParam *f, int width, int height, unsigned int sws_flags)
+{
+ SwsVector *vec;
+ SwsFilter sws_f;
+ int i, x, y;
+ int linesize = FFALIGN(width, 8);
+
+ f->pre_filter_buf = av_malloc(linesize * height);
+ if (!f->pre_filter_buf)
+ return AVERROR(ENOMEM);
+
+ f->pre_filter_linesize = linesize;
+ vec = sws_getGaussianVec(f->pre_filter_radius, f->quality);
+ sws_f.lumH = sws_f.lumV = vec;
+ sws_f.chrH = sws_f.chrV = NULL;
+ f->pre_filter_context = sws_getContext(width, height, AV_PIX_FMT_GRAY8,
+ width, height, AV_PIX_FMT_GRAY8,
+ sws_flags, &sws_f, NULL, NULL);
+ sws_freeVec(vec);
+
+ vec = sws_getGaussianVec(f->strength, 5.0);
+ for (i = 0; i < COLOR_DIFF_COEFF_SIZE; i++) {
+ double d;
+ int index = i-COLOR_DIFF_COEFF_SIZE/2 + vec->length/2;
+
+ if (index < 0 || index >= vec->length) d = 0.0;
+ else d = vec->coeff[index];
+
+ f->color_diff_coeff[i] = (int)(d/vec->coeff[vec->length/2]*(1<<12) + 0.5);
+ }
+ sws_freeVec(vec);
+
+ vec = sws_getGaussianVec(f->radius, f->quality);
+ f->dist_width = vec->length;
+ f->dist_linesize = FFALIGN(vec->length, 8);
+ f->dist_coeff = av_malloc_array(f->dist_width, f->dist_linesize * sizeof(*f->dist_coeff));
+ if (!f->dist_coeff) {
+ sws_freeVec(vec);
+ return AVERROR(ENOMEM);
+ }
+
+ for (y = 0; y < vec->length; y++) {
+ for (x = 0; x < vec->length; x++) {
+ double d = vec->coeff[x] * vec->coeff[y];
+ f->dist_coeff[x + y*f->dist_linesize] = (int)(d*(1<<10) + 0.5);
+ }
+ }
+ sws_freeVec(vec);
+
+ return 0;
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ SabContext *s = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
+
+ close_filter_param(&s->luma);
+ ret = open_filter_param(&s->luma, inlink->w, inlink->h, s->sws_flags);
+ if (ret < 0)
+ return ret;
+
+ close_filter_param(&s->chroma);
+ ret = open_filter_param(&s->chroma,
+ AV_CEIL_RSHIFT(inlink->w, s->hsub),
+ AV_CEIL_RSHIFT(inlink->h, s->vsub), s->sws_flags);
+ return ret;
+}
+
+#define NB_PLANES 4
+
+static void blur(uint8_t *dst, const int dst_linesize,
+ const uint8_t *src, const int src_linesize,
+ const int w, const int h, FilterParam *fp)
+{
+ int x, y;
+ FilterParam f = *fp;
+ const int radius = f.dist_width/2;
+
+ const uint8_t * const src2[NB_PLANES] = { src };
+ int src2_linesize[NB_PLANES] = { src_linesize };
+ uint8_t *dst2[NB_PLANES] = { f.pre_filter_buf };
+ int dst2_linesize[NB_PLANES] = { f.pre_filter_linesize };
+
+ sws_scale(f.pre_filter_context, src2, src2_linesize, 0, h, dst2, dst2_linesize);
+
+#define UPDATE_FACTOR do { \
+ int factor; \
+ factor = f.color_diff_coeff[COLOR_DIFF_COEFF_SIZE/2 + pre_val - \
+ f.pre_filter_buf[ix + iy*f.pre_filter_linesize]] * f.dist_coeff[dx + dy*f.dist_linesize]; \
+ sum += src[ix + iy*src_linesize] * factor; \
+ div += factor; \
+ } while (0)
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ int sum = 0;
+ int div = 0;
+ int dy;
+ const int pre_val = f.pre_filter_buf[x + y*f.pre_filter_linesize];
+ if (x >= radius && x < w - radius) {
+ for (dy = 0; dy < radius*2 + 1; dy++) {
+ int dx;
+ int iy = y+dy - radius;
+ iy = avpriv_mirror(iy, h-1);
+
+ for (dx = 0; dx < radius*2 + 1; dx++) {
+ const int ix = x+dx - radius;
+ UPDATE_FACTOR;
+ }
+ }
+ } else {
+ for (dy = 0; dy < radius*2+1; dy++) {
+ int dx;
+ int iy = y+dy - radius;
+ iy = avpriv_mirror(iy, h-1);
+
+ for (dx = 0; dx < radius*2 + 1; dx++) {
+ int ix = x+dx - radius;
+ ix = avpriv_mirror(ix, w-1);
+ UPDATE_FACTOR;
+ }
+ }
+ }
+ dst[x + y*dst_linesize] = (sum + div/2) / div;
+ }
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
+{
+ SabContext *s = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outpic;
+
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpic) {
+ av_frame_free(&inpic);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpic, inpic);
+
+ blur(outpic->data[0], outpic->linesize[0], inpic->data[0], inpic->linesize[0],
+ inlink->w, inlink->h, &s->luma);
+ if (inpic->data[2]) {
+ int cw = AV_CEIL_RSHIFT(inlink->w, s->hsub);
+ int ch = AV_CEIL_RSHIFT(inlink->h, s->vsub);
+ blur(outpic->data[1], outpic->linesize[1], inpic->data[1], inpic->linesize[1], cw, ch, &s->chroma);
+ blur(outpic->data[2], outpic->linesize[2], inpic->data[2], inpic->linesize[2], cw, ch, &s->chroma);
+ }
+
+ av_frame_free(&inpic);
+ return ff_filter_frame(outlink, outpic);
+}
+
+static const AVFilterPad sab_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad sab_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_sab = {
+ .name = "sab",
+ .description = NULL_IF_CONFIG_SMALL("Apply shape adaptive blur."),
+ .priv_size = sizeof(SabContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = sab_inputs,
+ .outputs = sab_outputs,
+ .priv_class = &sab_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_scale.c b/libavfilter/vf_scale.c
index 65cf6323c1..2fe9a1fb52 100644
--- a/libavfilter/vf_scale.c
+++ b/libavfilter/vf_scale.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -29,80 +29,118 @@
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
+#include "scale.h"
#include "video.h"
#include "libavutil/avstring.h"
-#include "libavutil/eval.h"
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/avassert.h"
#include "libswscale/swscale.h"
-static const char *const var_names[] = {
- "PI",
- "PHI",
- "E",
- "in_w", "iw",
- "in_h", "ih",
- "out_w", "ow",
- "out_h", "oh",
- "a", "dar",
- "sar",
- "hsub",
- "vsub",
- NULL
-};
-
-enum var_name {
- VAR_PI,
- VAR_PHI,
- VAR_E,
- VAR_IN_W, VAR_IW,
- VAR_IN_H, VAR_IH,
- VAR_OUT_W, VAR_OW,
- VAR_OUT_H, VAR_OH,
- VAR_A, VAR_DAR,
- VAR_SAR,
- VAR_HSUB,
- VAR_VSUB,
- VARS_NB
+enum EvalMode {
+ EVAL_MODE_INIT,
+ EVAL_MODE_FRAME,
+ EVAL_MODE_NB
};
typedef struct ScaleContext {
const AVClass *class;
struct SwsContext *sws; ///< software scaler context
+ struct SwsContext *isws[2]; ///< software scaler context for interlaced material
+ AVDictionary *opts;
/**
* New dimensions. Special values are:
* 0 = original width/height
* -1 = keep original aspect
+ * -N = try to keep aspect but make sure it is divisible by N
*/
int w, h;
+ char *size_str;
unsigned int flags; ///sws flags
double param[2]; // sws params
int hsub, vsub; ///< chroma subsampling
int slice_y; ///< top of current output slice
int input_is_pal; ///< set to 1 if the input format is paletted
+ int output_is_pal; ///< set to 1 if the output format is paletted
+ int interlaced;
char *w_expr; ///< width expression string
char *h_expr; ///< height expression string
char *flags_str;
+
+ char *in_color_matrix;
+ char *out_color_matrix;
+
+ int in_range;
+ int out_range;
+
+ int out_h_chr_pos;
+ int out_v_chr_pos;
+ int in_h_chr_pos;
+ int in_v_chr_pos;
+
+ int force_original_aspect_ratio;
+
+ int nb_slices;
+
+ int eval_mode; ///< expression evaluation mode
+
} ScaleContext;
-static av_cold int init(AVFilterContext *ctx)
+AVFilter ff_vf_scale2ref;
+
+static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
{
ScaleContext *scale = ctx->priv;
+ int ret;
+
+ if (scale->size_str && (scale->w_expr || scale->h_expr)) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Size and width/height expressions cannot be set at the same time.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (scale->w_expr && !scale->h_expr)
+ FFSWAP(char *, scale->w_expr, scale->size_str);
+
+ if (scale->size_str) {
+ char buf[32];
+ if ((ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str)) < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Invalid size '%s'\n", scale->size_str);
+ return ret;
+ }
+ snprintf(buf, sizeof(buf)-1, "%d", scale->w);
+ av_opt_set(scale, "w", buf, 0);
+ snprintf(buf, sizeof(buf)-1, "%d", scale->h);
+ av_opt_set(scale, "h", buf, 0);
+ }
+ if (!scale->w_expr)
+ av_opt_set(scale, "w", "iw", 0);
+ if (!scale->h_expr)
+ av_opt_set(scale, "h", "ih", 0);
+
+ av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n",
+ scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced);
+
+ scale->flags = 0;
if (scale->flags_str) {
const AVClass *class = sws_get_class();
const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
AV_OPT_SEARCH_FAKE_OBJ);
int ret = av_opt_eval_flags(&class, o, scale->flags_str, &scale->flags);
-
if (ret < 0)
return ret;
}
+ scale->opts = *opts;
+ *opts = NULL;
return 0;
}
@@ -111,7 +149,10 @@ static av_cold void uninit(AVFilterContext *ctx)
{
ScaleContext *scale = ctx->priv;
sws_freeContext(scale->sws);
+ sws_freeContext(scale->isws[0]);
+ sws_freeContext(scale->isws[1]);
scale->sws = NULL;
+ av_dict_free(&scale->opts);
}
static int query_formats(AVFilterContext *ctx)
@@ -128,91 +169,87 @@ static int query_formats(AVFilterContext *ctx)
if ((sws_isSupportedInput(pix_fmt) ||
sws_isSupportedEndiannessConversion(pix_fmt))
&& (ret = ff_add_format(&formats, pix_fmt)) < 0) {
- ff_formats_unref(&formats);
return ret;
}
}
- ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
+ if ((ret = ff_formats_ref(formats, &ctx->inputs[0]->out_formats)) < 0)
+ return ret;
}
if (ctx->outputs[0]) {
const AVPixFmtDescriptor *desc = NULL;
formats = NULL;
while ((desc = av_pix_fmt_desc_next(desc))) {
pix_fmt = av_pix_fmt_desc_get_id(desc);
- if ((sws_isSupportedOutput(pix_fmt) ||
+ if ((sws_isSupportedOutput(pix_fmt) || pix_fmt == AV_PIX_FMT_PAL8 ||
sws_isSupportedEndiannessConversion(pix_fmt))
&& (ret = ff_add_format(&formats, pix_fmt)) < 0) {
- ff_formats_unref(&formats);
return ret;
}
}
- ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
+ if ((ret = ff_formats_ref(formats, &ctx->outputs[0]->in_formats)) < 0)
+ return ret;
}
return 0;
}
+static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace)
+{
+ if (!s)
+ s = "bt601";
+
+ if (s && strstr(s, "bt709")) {
+ colorspace = AVCOL_SPC_BT709;
+ } else if (s && strstr(s, "fcc")) {
+ colorspace = AVCOL_SPC_FCC;
+ } else if (s && strstr(s, "smpte240m")) {
+ colorspace = AVCOL_SPC_SMPTE240M;
+ } else if (s && (strstr(s, "bt601") || strstr(s, "bt470") || strstr(s, "smpte170m"))) {
+ colorspace = AVCOL_SPC_BT470BG;
+ } else if (s && strstr(s, "bt2020")) {
+ colorspace = AVCOL_SPC_BT2020_NCL;
+ }
+
+ if (colorspace < 1 || colorspace > 10 || colorspace == 8) {
+ colorspace = AVCOL_SPC_BT470BG;
+ }
+
+ return sws_getCoefficients(colorspace);
+}
+
static int config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
- AVFilterLink *inlink = outlink->src->inputs[0];
- ScaleContext *scale = ctx->priv;
+ AVFilterLink *inlink0 = outlink->src->inputs[0];
+ AVFilterLink *inlink = ctx->filter == &ff_vf_scale2ref ?
+ outlink->src->inputs[1] :
+ outlink->src->inputs[0];
+ enum AVPixelFormat outfmt = outlink->format;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
- int64_t w, h;
- double var_values[VARS_NB], res;
- char *expr;
+ ScaleContext *scale = ctx->priv;
+ int w, h;
int ret;
- var_values[VAR_PI] = M_PI;
- var_values[VAR_PHI] = M_PHI;
- var_values[VAR_E] = M_E;
- var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
- var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
- var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
- var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
- var_values[VAR_A] = (double) inlink->w / inlink->h;
- var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
- (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
- var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
- var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
- var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
-
- /* evaluate width and height */
- av_expr_parse_and_eval(&res, (expr = scale->w_expr),
- var_names, var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx);
- scale->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
- if ((ret = av_expr_parse_and_eval(&res, (expr = scale->h_expr),
- var_names, var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
- goto fail;
- scale->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
- /* evaluate again the width, as it may depend on the output height */
- if ((ret = av_expr_parse_and_eval(&res, (expr = scale->w_expr),
- var_names, var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
+ if ((ret = ff_scale_eval_dimensions(ctx,
+ scale->w_expr, scale->h_expr,
+ inlink, outlink,
+ &w, &h)) < 0)
goto fail;
- scale->w = res;
- w = scale->w;
- h = scale->h;
-
- /* sanity check params */
- if (w < -1 || h < -1) {
- av_log(ctx, AV_LOG_ERROR, "Size values less than -1 are not acceptable.\n");
- return AVERROR(EINVAL);
+ /* Note that force_original_aspect_ratio may overwrite the previous set
+ * dimensions so that it is not divisible by the set factors anymore. */
+ if (scale->force_original_aspect_ratio) {
+ int tmp_w = av_rescale(h, inlink->w, inlink->h);
+ int tmp_h = av_rescale(w, inlink->h, inlink->w);
+
+ if (scale->force_original_aspect_ratio == 1) {
+ w = FFMIN(tmp_w, w);
+ h = FFMIN(tmp_h, h);
+ } else {
+ w = FFMAX(tmp_w, w);
+ h = FFMAX(tmp_h, h);
+ }
}
- if (w == -1 && h == -1)
- scale->w = scale->h = 0;
-
- if (!(w = scale->w))
- w = inlink->w;
- if (!(h = scale->h))
- h = inlink->h;
- if (w == -1)
- w = av_rescale(h, inlink->w, inlink->h);
- if (h == -1)
- h = av_rescale(w, inlink->h, inlink->w);
if (w > INT_MAX || h > INT_MAX ||
(h * inlink->w) > INT_MAX ||
@@ -223,49 +260,182 @@ static int config_props(AVFilterLink *outlink)
outlink->h = h;
/* TODO: make algorithm configurable */
- av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s -> w:%d h:%d fmt:%s flags:0x%0x\n",
- inlink ->w, inlink ->h, av_get_pix_fmt_name(inlink->format),
- outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
- scale->flags);
scale->input_is_pal = desc->flags & AV_PIX_FMT_FLAG_PAL ||
desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL;
+ if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8;
+ scale->output_is_pal = av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PAL ||
+ av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PSEUDOPAL;
if (scale->sws)
sws_freeContext(scale->sws);
- if (inlink->w == outlink->w && inlink->h == outlink->h &&
- inlink->format == outlink->format)
- scale->sws = NULL;
+ if (scale->isws[0])
+ sws_freeContext(scale->isws[0]);
+ if (scale->isws[1])
+ sws_freeContext(scale->isws[1]);
+ scale->isws[0] = scale->isws[1] = scale->sws = NULL;
+ if (inlink0->w == outlink->w &&
+ inlink0->h == outlink->h &&
+ !scale->out_color_matrix &&
+ scale->in_range == scale->out_range &&
+ inlink0->format == outlink->format)
+ ;
else {
- scale->sws = sws_getContext(inlink ->w, inlink ->h, inlink ->format,
- outlink->w, outlink->h, outlink->format,
- scale->flags, NULL, NULL, scale->param);
- if (!scale->sws)
- return AVERROR(EINVAL);
- }
+ struct SwsContext **swscs[3] = {&scale->sws, &scale->isws[0], &scale->isws[1]};
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ int in_v_chr_pos = scale->in_v_chr_pos, out_v_chr_pos = scale->out_v_chr_pos;
+ struct SwsContext **s = swscs[i];
+ *s = sws_alloc_context();
+ if (!*s)
+ return AVERROR(ENOMEM);
+
+ av_opt_set_int(*s, "srcw", inlink0 ->w, 0);
+ av_opt_set_int(*s, "srch", inlink0 ->h >> !!i, 0);
+ av_opt_set_int(*s, "src_format", inlink0->format, 0);
+ av_opt_set_int(*s, "dstw", outlink->w, 0);
+ av_opt_set_int(*s, "dsth", outlink->h >> !!i, 0);
+ av_opt_set_int(*s, "dst_format", outfmt, 0);
+ av_opt_set_int(*s, "sws_flags", scale->flags, 0);
+ av_opt_set_int(*s, "param0", scale->param[0], 0);
+ av_opt_set_int(*s, "param1", scale->param[1], 0);
+ if (scale->in_range != AVCOL_RANGE_UNSPECIFIED)
+ av_opt_set_int(*s, "src_range",
+ scale->in_range == AVCOL_RANGE_JPEG, 0);
+ if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
+ av_opt_set_int(*s, "dst_range",
+ scale->out_range == AVCOL_RANGE_JPEG, 0);
+
+ if (scale->opts) {
+ AVDictionaryEntry *e = NULL;
+ while ((e = av_dict_get(scale->opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
+ if ((ret = av_opt_set(*s, e->key, e->value, 0)) < 0)
+ return ret;
+ }
+ }
+ /* Override YUV420P default settings to have the correct (MPEG-2) chroma positions
+ * MPEG-2 chroma positions are used by convention
+ * XXX: support other 4:2:0 pixel formats */
+ if (inlink0->format == AV_PIX_FMT_YUV420P && scale->in_v_chr_pos == -513) {
+ in_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
+ }
+ if (outlink->format == AV_PIX_FMT_YUV420P && scale->out_v_chr_pos == -513) {
+ out_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
+ }
+
+ av_opt_set_int(*s, "src_h_chr_pos", scale->in_h_chr_pos, 0);
+ av_opt_set_int(*s, "src_v_chr_pos", in_v_chr_pos, 0);
+ av_opt_set_int(*s, "dst_h_chr_pos", scale->out_h_chr_pos, 0);
+ av_opt_set_int(*s, "dst_v_chr_pos", out_v_chr_pos, 0);
- if (inlink->sample_aspect_ratio.num)
- outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h*inlink->w,
- outlink->w*inlink->h},
- inlink->sample_aspect_ratio);
- else
+ if ((ret = sws_init_context(*s, NULL, NULL)) < 0)
+ return ret;
+ if (!scale->interlaced)
+ break;
+ }
+ }
+
+ if (inlink->sample_aspect_ratio.num){
+ outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink->w, outlink->w * inlink->h}, inlink->sample_aspect_ratio);
+ } else
outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:0x%0x\n",
+ inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format),
+ inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den,
+ outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
+ outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den,
+ scale->flags);
return 0;
fail:
- av_log(NULL, AV_LOG_ERROR,
- "Error when evaluating the expression '%s'\n", expr);
return ret;
}
+static int config_props_ref(AVFilterLink *outlink)
+{
+ AVFilterLink *inlink = outlink->src->inputs[1];
+
+ outlink->w = inlink->w;
+ outlink->h = inlink->h;
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ outlink->time_base = inlink->time_base;
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ return ff_request_frame(outlink->src->inputs[0]);
+}
+
+static int request_frame_ref(AVFilterLink *outlink)
+{
+ return ff_request_frame(outlink->src->inputs[1]);
+}
+
+static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field)
+{
+ ScaleContext *scale = link->dst->priv;
+ const uint8_t *in[4];
+ uint8_t *out[4];
+ int in_stride[4],out_stride[4];
+ int i;
+
+ for(i=0; i<4; i++){
+ int vsub= ((i+1)&2) ? scale->vsub : 0;
+ in_stride[i] = cur_pic->linesize[i] * mul;
+ out_stride[i] = out_buf->linesize[i] * mul;
+ in[i] = cur_pic->data[i] + ((y>>vsub)+field) * cur_pic->linesize[i];
+ out[i] = out_buf->data[i] + field * out_buf->linesize[i];
+ }
+ if(scale->input_is_pal)
+ in[1] = cur_pic->data[1];
+ if(scale->output_is_pal)
+ out[1] = out_buf->data[1];
+
+ return sws_scale(sws, in, in_stride, y/mul, h,
+ out,out_stride);
+}
+
static int filter_frame(AVFilterLink *link, AVFrame *in)
{
ScaleContext *scale = link->dst->priv;
AVFilterLink *outlink = link->dst->outputs[0];
AVFrame *out;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
+ char buf[32];
+ int in_range;
+
+ if (av_frame_get_colorspace(in) == AVCOL_SPC_YCGCO)
+ av_log(link->dst, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n");
+
+ if( in->width != link->w
+ || in->height != link->h
+ || in->format != link->format
+ || in->sample_aspect_ratio.den != link->sample_aspect_ratio.den || in->sample_aspect_ratio.num != link->sample_aspect_ratio.num) {
+ int ret;
+
+ if (scale->eval_mode == EVAL_MODE_INIT) {
+ snprintf(buf, sizeof(buf)-1, "%d", outlink->w);
+ av_opt_set(scale, "w", buf, 0);
+ snprintf(buf, sizeof(buf)-1, "%d", outlink->h);
+ av_opt_set(scale, "h", buf, 0);
+ }
+
+ link->dst->inputs[0]->format = in->format;
+ link->dst->inputs[0]->w = in->width;
+ link->dst->inputs[0]->h = in->height;
+
+ link->dst->inputs[0]->sample_aspect_ratio.den = in->sample_aspect_ratio.den;
+ link->dst->inputs[0]->sample_aspect_ratio.num = in->sample_aspect_ratio.num;
+
+
+ if ((ret = config_props(outlink)) < 0)
+ return ret;
+ }
if (!scale->sws)
return ff_filter_frame(outlink, in);
@@ -283,40 +453,165 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
out->width = outlink->w;
out->height = outlink->h;
+ if(scale->output_is_pal)
+ avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format);
+
+ in_range = av_frame_get_color_range(in);
+
+ if ( scale->in_color_matrix
+ || scale->out_color_matrix
+ || scale-> in_range != AVCOL_RANGE_UNSPECIFIED
+ || in_range != AVCOL_RANGE_UNSPECIFIED
+ || scale->out_range != AVCOL_RANGE_UNSPECIFIED) {
+ int in_full, out_full, brightness, contrast, saturation;
+ const int *inv_table, *table;
+
+ sws_getColorspaceDetails(scale->sws, (int **)&inv_table, &in_full,
+ (int **)&table, &out_full,
+ &brightness, &contrast, &saturation);
+
+ if (scale->in_color_matrix)
+ inv_table = parse_yuv_type(scale->in_color_matrix, av_frame_get_colorspace(in));
+ if (scale->out_color_matrix)
+ table = parse_yuv_type(scale->out_color_matrix, AVCOL_SPC_UNSPECIFIED);
+ else if (scale->in_color_matrix)
+ table = inv_table;
+
+ if (scale-> in_range != AVCOL_RANGE_UNSPECIFIED)
+ in_full = (scale-> in_range == AVCOL_RANGE_JPEG);
+ else if (in_range != AVCOL_RANGE_UNSPECIFIED)
+ in_full = (in_range == AVCOL_RANGE_JPEG);
+ if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
+ out_full = (scale->out_range == AVCOL_RANGE_JPEG);
+
+ sws_setColorspaceDetails(scale->sws, inv_table, in_full,
+ table, out_full,
+ brightness, contrast, saturation);
+ if (scale->isws[0])
+ sws_setColorspaceDetails(scale->isws[0], inv_table, in_full,
+ table, out_full,
+ brightness, contrast, saturation);
+ if (scale->isws[1])
+ sws_setColorspaceDetails(scale->isws[1], inv_table, in_full,
+ table, out_full,
+ brightness, contrast, saturation);
+
+ av_frame_set_color_range(out, out_full ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG);
+ }
+
av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
(int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
(int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
INT_MAX);
- sws_scale(scale->sws, in->data, in->linesize, 0, in->height,
- out->data, out->linesize);
+ if(scale->interlaced>0 || (scale->interlaced<0 && in->interlaced_frame)){
+ scale_slice(link, out, in, scale->isws[0], 0, (link->h+1)/2, 2, 0);
+ scale_slice(link, out, in, scale->isws[1], 0, link->h /2, 2, 1);
+ }else if (scale->nb_slices) {
+ int i, slice_h, slice_start, slice_end = 0;
+ const int nb_slices = FFMIN(scale->nb_slices, link->h);
+ for (i = 0; i < nb_slices; i++) {
+ slice_start = slice_end;
+ slice_end = (link->h * (i+1)) / nb_slices;
+ slice_h = slice_end - slice_start;
+ scale_slice(link, out, in, scale->sws, slice_start, slice_h, 1, 0);
+ }
+ }else{
+ scale_slice(link, out, in, scale->sws, 0, link->h, 1, 0);
+ }
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
+static int filter_frame_ref(AVFilterLink *link, AVFrame *in)
+{
+ AVFilterLink *outlink = link->dst->outputs[1];
+
+ return ff_filter_frame(outlink, in);
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ ScaleContext *scale = ctx->priv;
+ int ret;
+
+ if ( !strcmp(cmd, "width") || !strcmp(cmd, "w")
+ || !strcmp(cmd, "height") || !strcmp(cmd, "h")) {
+
+ int old_w = scale->w;
+ int old_h = scale->h;
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ av_opt_set(scale, cmd, args, 0);
+ if ((ret = config_props(outlink)) < 0) {
+ scale->w = old_w;
+ scale->h = old_h;
+ }
+ } else
+ ret = AVERROR(ENOSYS);
+
+ return ret;
+}
+
+static const AVClass *child_class_next(const AVClass *prev)
+{
+ return prev ? NULL : sws_get_class();
+}
+
#define OFFSET(x) offsetof(ScaleContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str = "iw" }, .flags = FLAGS },
- { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS },
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption scale_options[] = {
+ { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "flags", "Flags to pass to libswscale", OFFSET(flags_str), AV_OPT_TYPE_STRING, { .str = "bilinear" }, .flags = FLAGS },
+ { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_BOOL, {.i64 = 0 }, -1, 1, FLAGS },
+ { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
+ { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
+ { "in_color_matrix", "set input YCbCr type", OFFSET(in_color_matrix), AV_OPT_TYPE_STRING, { .str = "auto" }, .flags = FLAGS },
+ { "out_color_matrix", "set output YCbCr type", OFFSET(out_color_matrix), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS },
+ { "in_range", "set input color range", OFFSET( in_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
+ { "out_range", "set output color range", OFFSET(out_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
+ { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
+ { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
+ { "jpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
+ { "mpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
+ { "tv", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
+ { "pc", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
+ { "in_v_chr_pos", "input vertical chroma position in luma grid/256" , OFFSET(in_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
+ { "in_h_chr_pos", "input horizontal chroma position in luma grid/256", OFFSET(in_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
+ { "out_v_chr_pos", "output vertical chroma position in luma grid/256" , OFFSET(out_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
+ { "out_h_chr_pos", "output horizontal chroma position in luma grid/256", OFFSET(out_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
+ { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 2, FLAGS, "force_oar" },
+ { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
+ { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
+ { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
{ "param0", "Scaler param 0", OFFSET(param[0]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
{ "param1", "Scaler param 1", OFFSET(param[1]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
- { NULL },
+ { "nb_slices", "set the number of slices (debug purpose only)", OFFSET(nb_slices), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
+ { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
+ { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
+ { "frame", "eval expressions during initialization and per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
+ { NULL }
};
static const AVClass scale_class = {
- .class_name = "scale",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
+ .class_name = "scale",
+ .item_name = av_default_item_name,
+ .option = scale_options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_FILTER,
+ .child_class_next = child_class_next,
};
static const AVFilterPad avfilter_vf_scale_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
@@ -332,17 +627,66 @@ static const AVFilterPad avfilter_vf_scale_outputs[] = {
};
AVFilter ff_vf_scale = {
- .name = "scale",
- .description = NULL_IF_CONFIG_SMALL("Scale the input video to width:height size and/or convert the image format."),
+ .name = "scale",
+ .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format."),
+ .init_dict = init_dict,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ScaleContext),
+ .priv_class = &scale_class,
+ .inputs = avfilter_vf_scale_inputs,
+ .outputs = avfilter_vf_scale_outputs,
+ .process_command = process_command,
+};
- .init = init,
- .uninit = uninit,
+static const AVClass scale2ref_class = {
+ .class_name = "scale2ref",
+ .item_name = av_default_item_name,
+ .option = scale_options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_FILTER,
+ .child_class_next = child_class_next,
+};
- .query_formats = query_formats,
+static const AVFilterPad avfilter_vf_scale2ref_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ {
+ .name = "ref",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame_ref,
+ },
+ { NULL }
+};
- .priv_size = sizeof(ScaleContext),
- .priv_class = &scale_class,
+static const AVFilterPad avfilter_vf_scale2ref_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ .request_frame= request_frame,
+ },
+ {
+ .name = "ref",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props_ref,
+ .request_frame= request_frame_ref,
+ },
+ { NULL }
+};
- .inputs = avfilter_vf_scale_inputs,
- .outputs = avfilter_vf_scale_outputs,
+AVFilter ff_vf_scale2ref = {
+ .name = "scale2ref",
+ .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format to the given reference."),
+ .init_dict = init_dict,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ScaleContext),
+ .priv_class = &scale2ref_class,
+ .inputs = avfilter_vf_scale2ref_inputs,
+ .outputs = avfilter_vf_scale2ref_outputs,
+ .process_command = process_command,
};
diff --git a/libavfilter/vf_scale_npp.c b/libavfilter/vf_scale_npp.c
index 2fb4990953..4b45174742 100644
--- a/libavfilter/vf_scale_npp.c
+++ b/libavfilter/vf_scale_npp.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,17 +27,16 @@
#include "libavutil/avstring.h"
#include "libavutil/common.h"
-#include "libavutil/eval.h"
#include "libavutil/hwcontext.h"
-#include "libavutil/hwcontext_cuda.h"
+#include "libavutil/hwcontext_cuda_internal.h"
#include "libavutil/internal.h"
-#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
+#include "scale.h"
#include "video.h"
static const enum AVPixelFormat supported_formats[] = {
@@ -50,32 +49,6 @@ static const enum AVPixelFormat deinterleaved_formats[][2] = {
{ AV_PIX_FMT_NV12, AV_PIX_FMT_YUV420P },
};
-static const char *const var_names[] = {
- "PI",
- "PHI",
- "E",
- "in_w", "iw",
- "in_h", "ih",
- "out_w", "ow",
- "out_h", "oh",
- "a", "dar",
- "sar",
- NULL
-};
-
-enum var_name {
- VAR_PI,
- VAR_PHI,
- VAR_E,
- VAR_IN_W, VAR_IW,
- VAR_IN_H, VAR_IH,
- VAR_OUT_W, VAR_OW,
- VAR_OUT_H, VAR_OH,
- VAR_A, VAR_DAR,
- VAR_SAR,
- VARS_NB
-};
-
enum ScaleStage {
STAGE_DEINTERLEAVE,
STAGE_RESIZE,
@@ -169,11 +142,9 @@ static int nppscale_query_formats(AVFilterContext *ctx)
static const enum AVPixelFormat pixel_formats[] = {
AV_PIX_FMT_CUDA, AV_PIX_FMT_NONE,
};
- AVFilterFormats *pix_fmts = ff_make_format_list(pixel_formats);
+ AVFilterFormats *pix_fmts = ff_make_format_list(pixel_formats);
- ff_set_common_formats(ctx, pix_fmts);
-
- return 0;
+ return ff_set_common_formats(ctx, pix_fmts);
}
static int init_stage(NPPScaleStageContext *stage, AVBufferRef *device_ctx)
@@ -294,9 +265,21 @@ static int init_processing_chain(AVFilterContext *ctx, int in_width, int in_heig
/* figure out which stages need to be done */
if (in_width != out_width || in_height != out_height ||
- in_deinterleaved_format != out_deinterleaved_format)
+ in_deinterleaved_format != out_deinterleaved_format) {
s->stages[STAGE_RESIZE].stage_needed = 1;
+ if (s->interp_algo == NPPI_INTER_SUPER &&
+ (out_width > in_width && out_height > in_height)) {
+ s->interp_algo = NPPI_INTER_LANCZOS;
+ av_log(ctx, AV_LOG_WARNING, "super-sampling not supported for output dimensions, using lanczos instead.\n");
+ }
+ if (s->interp_algo == NPPI_INTER_SUPER &&
+ !(out_width < in_width && out_height < in_height)) {
+ s->interp_algo = NPPI_INTER_CUBIC;
+ av_log(ctx, AV_LOG_WARNING, "super-sampling not supported for output dimensions, using cubic instead.\n");
+ }
+ }
+
if (!s->stages[STAGE_RESIZE].stage_needed && in_format == out_format)
s->passthrough = 1;
@@ -349,64 +332,18 @@ static int nppscale_config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = outlink->src->inputs[0];
- NPPScaleContext *s = ctx->priv;
- int64_t w, h;
- double var_values[VARS_NB], res;
- char *expr;
+ NPPScaleContext *s = ctx->priv;
+ int w, h;
int ret;
- var_values[VAR_PI] = M_PI;
- var_values[VAR_PHI] = M_PHI;
- var_values[VAR_E] = M_E;
- var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
- var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
- var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
- var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
- var_values[VAR_A] = (double) inlink->w / inlink->h;
- var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
- (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
- var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
-
- /* evaluate width and height */
- av_expr_parse_and_eval(&res, (expr = s->w_expr),
- var_names, var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx);
- s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
- if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
- var_names, var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
- goto fail;
- s->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
- /* evaluate again the width, as it may depend on the output height */
- if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
- var_names, var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
+ if ((ret = ff_scale_eval_dimensions(s,
+ s->w_expr, s->h_expr,
+ inlink, outlink,
+ &w, &h)) < 0)
goto fail;
- s->w = res;
- w = s->w;
- h = s->h;
-
- /* sanity check params */
- if (w < -1 || h < -1) {
- av_log(ctx, AV_LOG_ERROR, "Size values less than -1 are not acceptable.\n");
- return AVERROR(EINVAL);
- }
- if (w == -1 && h == -1)
- s->w = s->h = 0;
-
- if (!(w = s->w))
- w = inlink->w;
- if (!(h = s->h))
- h = inlink->h;
- if (w == -1)
- w = av_rescale(h, inlink->w, inlink->h);
- if (h == -1)
- h = av_rescale(w, inlink->h, inlink->w);
-
- if (w > INT_MAX || h > INT_MAX ||
- (h * inlink->w) > INT_MAX ||
- (w * inlink->h) > INT_MAX)
+ if (((int64_t)h * inlink->w) > INT_MAX ||
+ ((int64_t)w * inlink->h) > INT_MAX)
av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
outlink->w = w;
@@ -429,8 +366,6 @@ static int nppscale_config_props(AVFilterLink *outlink)
return 0;
fail:
- av_log(NULL, AV_LOG_ERROR,
- "Error when evaluating the expression '%s'\n", expr);
return ret;
}
@@ -574,12 +509,7 @@ static int nppscale_filter_frame(AVFilterLink *link, AVFrame *in)
goto fail;
}
- av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
- (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
- (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
- INT_MAX);
-
- err = cuCtxPushCurrent(device_hwctx->cuda_ctx);
+ err = device_hwctx->internal->cuda_dl->cuCtxPushCurrent(device_hwctx->cuda_ctx);
if (err != CUDA_SUCCESS) {
ret = AVERROR_UNKNOWN;
goto fail;
@@ -587,10 +517,15 @@ static int nppscale_filter_frame(AVFilterLink *link, AVFrame *in)
ret = nppscale_scale(ctx, out, in);
- cuCtxPopCurrent(&dummy);
+ device_hwctx->internal->cuda_dl->cuCtxPopCurrent(&dummy);
if (ret < 0)
goto fail;
+ av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
+ (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
+ (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
+ INT_MAX);
+
av_frame_free(&in);
return ff_filter_frame(outlink, out);
fail:
@@ -600,7 +535,7 @@ fail:
}
#define OFFSET(x) offsetof(NPPScaleContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM)
static const AVOption options[] = {
{ "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str = "iw" }, .flags = FLAGS },
{ "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS },
diff --git a/libavfilter/vf_scale_qsv.c b/libavfilter/vf_scale_qsv.c
index e5c3da5a51..88fca8b461 100644
--- a/libavfilter/vf_scale_qsv.c
+++ b/libavfilter/vf_scale_qsv.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -153,8 +153,10 @@ static int qsvscale_query_formats(AVFilterContext *ctx)
AV_PIX_FMT_QSV, AV_PIX_FMT_NONE,
};
AVFilterFormats *pix_fmts = ff_make_format_list(pixel_formats);
+ int ret;
- ff_set_common_formats(ctx, pix_fmts);
+ if ((ret = ff_set_common_formats(ctx, pix_fmts)) < 0)
+ return ret;
return 0;
}
diff --git a/libavfilter/vf_scale_vaapi.c b/libavfilter/vf_scale_vaapi.c
index 704456dd39..8221849ee0 100644
--- a/libavfilter/vf_scale_vaapi.c
+++ b/libavfilter/vf_scale_vaapi.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -31,6 +31,7 @@
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
+#include "scale.h"
typedef struct ScaleVAAPIContext {
const AVClass *class;
@@ -50,9 +51,12 @@ typedef struct ScaleVAAPIContext {
char *output_format_string;
enum AVPixelFormat output_format;
- int output_width;
- int output_height;
+ char *w_expr; // width expression string
+ char *h_expr; // height expression string
+
+ int output_width; // computed width
+ int output_height; // computed height
} ScaleVAAPIContext;
@@ -61,11 +65,14 @@ static int scale_vaapi_query_formats(AVFilterContext *avctx)
enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_VAAPI, AV_PIX_FMT_NONE,
};
+ int err;
- ff_formats_ref(ff_make_format_list(pix_fmts),
- &avctx->inputs[0]->out_formats);
- ff_formats_ref(ff_make_format_list(pix_fmts),
- &avctx->outputs[0]->in_formats);
+ if ((err = ff_formats_ref(ff_make_format_list(pix_fmts),
+ &avctx->inputs[0]->out_formats)) < 0)
+ return err;
+ if ((err = ff_formats_ref(ff_make_format_list(pix_fmts),
+ &avctx->outputs[0]->in_formats)) < 0)
+ return err;
return 0;
}
@@ -110,6 +117,7 @@ static int scale_vaapi_config_input(AVFilterLink *inlink)
static int scale_vaapi_config_output(AVFilterLink *outlink)
{
+ AVFilterLink *inlink = outlink->src->inputs[0];
AVFilterContext *avctx = outlink->src;
ScaleVAAPIContext *ctx = avctx->priv;
AVVAAPIHWConfig *hwconfig = NULL;
@@ -162,6 +170,12 @@ static int scale_vaapi_config_output(AVFilterLink *outlink)
}
}
+ if ((err = ff_scale_eval_dimensions(ctx,
+ ctx->w_expr, ctx->h_expr,
+ inlink, outlink,
+ &ctx->output_width, &ctx->output_height)) < 0)
+ goto fail;
+
if (ctx->output_width < constraints->min_width ||
ctx->output_height < constraints->min_height ||
ctx->output_width > constraints->max_width ||
@@ -421,12 +435,12 @@ static av_cold void scale_vaapi_uninit(AVFilterContext *avctx)
#define OFFSET(x) offsetof(ScaleVAAPIContext, x)
-#define FLAGS (AV_OPT_FLAG_VIDEO_PARAM)
+#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM)
static const AVOption scale_vaapi_options[] = {
{ "w", "Output video width",
- OFFSET(output_width), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, .flags = FLAGS },
+ OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, .flags = FLAGS },
{ "h", "Output video height",
- OFFSET(output_height), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, .flags = FLAGS },
+ OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, .flags = FLAGS },
{ "format", "Output video format (software format of hardware frames)",
OFFSET(output_format_string), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ NULL },
diff --git a/libavfilter/vf_select.c b/libavfilter/vf_select.c
deleted file mode 100644
index 4139e78333..0000000000
--- a/libavfilter/vf_select.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Copyright (c) 2011 Stefano Sabatini
- *
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * Libav is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * filter for selecting which frame passes in the filterchain
- */
-
-#include "libavutil/eval.h"
-#include "libavutil/fifo.h"
-#include "libavutil/internal.h"
-#include "libavutil/mathematics.h"
-#include "libavutil/opt.h"
-#include "avfilter.h"
-#include "internal.h"
-#include "video.h"
-
-static const char *const var_names[] = {
- "E", ///< Euler number
- "PHI", ///< golden ratio
- "PI", ///< Greek pi
-
- "TB", ///< timebase
-
- "pts", ///< original pts in the file of the frame
- "start_pts", ///< first PTS in the stream, expressed in TB units
- "prev_pts", ///< previous frame PTS
- "prev_selected_pts", ///< previous selected frame PTS
-
- "t", ///< first PTS in seconds
- "start_t", ///< first PTS in the stream, expressed in seconds
- "prev_t", ///< previous frame time
- "prev_selected_t", ///< previously selected time
-
- "pict_type", ///< the type of picture in the movie
- "I",
- "P",
- "B",
- "S",
- "SI",
- "SP",
- "BI",
-
- "interlace_type", ///< the frame interlace type
- "PROGRESSIVE",
- "TOPFIRST",
- "BOTTOMFIRST",
-
- "n", ///< frame number (starting from zero)
- "selected_n", ///< selected frame number (starting from zero)
- "prev_selected_n", ///< number of the last selected frame
-
- "key", ///< tell if the frame is a key frame
- "pos", ///< original position in the file of the frame
-
- NULL
-};
-
-enum var_name {
- VAR_E,
- VAR_PHI,
- VAR_PI,
-
- VAR_TB,
-
- VAR_PTS,
- VAR_START_PTS,
- VAR_PREV_PTS,
- VAR_PREV_SELECTED_PTS,
-
- VAR_T,
- VAR_START_T,
- VAR_PREV_T,
- VAR_PREV_SELECTED_T,
-
- VAR_PICT_TYPE,
- VAR_PICT_TYPE_I,
- VAR_PICT_TYPE_P,
- VAR_PICT_TYPE_B,
- VAR_PICT_TYPE_S,
- VAR_PICT_TYPE_SI,
- VAR_PICT_TYPE_SP,
- VAR_PICT_TYPE_BI,
-
- VAR_INTERLACE_TYPE,
- VAR_INTERLACE_TYPE_P,
- VAR_INTERLACE_TYPE_T,
- VAR_INTERLACE_TYPE_B,
-
- VAR_N,
- VAR_SELECTED_N,
- VAR_PREV_SELECTED_N,
-
- VAR_KEY,
-
- VAR_VARS_NB
-};
-
-#define FIFO_SIZE 8
-
-typedef struct SelectContext {
- const AVClass *class;
- char *expr_str;
- AVExpr *expr;
- double var_values[VAR_VARS_NB];
- double select;
- int cache_frames;
- AVFifoBuffer *pending_frames; ///< FIFO buffer of video frames
-} SelectContext;
-
-static av_cold int init(AVFilterContext *ctx)
-{
- SelectContext *select = ctx->priv;
- int ret;
-
- if ((ret = av_expr_parse(&select->expr, select->expr_str,
- var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n",
- select->expr_str);
- return ret;
- }
-
- select->pending_frames = av_fifo_alloc(FIFO_SIZE*sizeof(AVFrame*));
- if (!select->pending_frames) {
- av_log(ctx, AV_LOG_ERROR, "Failed to allocate pending frames buffer.\n");
- return AVERROR(ENOMEM);
- }
- return 0;
-}
-
-#define INTERLACE_TYPE_P 0
-#define INTERLACE_TYPE_T 1
-#define INTERLACE_TYPE_B 2
-
-static int config_input(AVFilterLink *inlink)
-{
- SelectContext *select = inlink->dst->priv;
-
- select->var_values[VAR_E] = M_E;
- select->var_values[VAR_PHI] = M_PHI;
- select->var_values[VAR_PI] = M_PI;
-
- select->var_values[VAR_N] = 0.0;
- select->var_values[VAR_SELECTED_N] = 0.0;
-
- select->var_values[VAR_TB] = av_q2d(inlink->time_base);
-
- select->var_values[VAR_PREV_PTS] = NAN;
- select->var_values[VAR_PREV_SELECTED_PTS] = NAN;
- select->var_values[VAR_PREV_SELECTED_T] = NAN;
- select->var_values[VAR_START_PTS] = NAN;
- select->var_values[VAR_START_T] = NAN;
-
- select->var_values[VAR_PICT_TYPE_I] = AV_PICTURE_TYPE_I;
- select->var_values[VAR_PICT_TYPE_P] = AV_PICTURE_TYPE_P;
- select->var_values[VAR_PICT_TYPE_B] = AV_PICTURE_TYPE_B;
- select->var_values[VAR_PICT_TYPE_SI] = AV_PICTURE_TYPE_SI;
- select->var_values[VAR_PICT_TYPE_SP] = AV_PICTURE_TYPE_SP;
-
- select->var_values[VAR_INTERLACE_TYPE_P] = INTERLACE_TYPE_P;
- select->var_values[VAR_INTERLACE_TYPE_T] = INTERLACE_TYPE_T;
- select->var_values[VAR_INTERLACE_TYPE_B] = INTERLACE_TYPE_B;;
-
- return 0;
-}
-
-#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
-#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
-
-static int select_frame(AVFilterContext *ctx, AVFrame *frame)
-{
- SelectContext *select = ctx->priv;
- AVFilterLink *inlink = ctx->inputs[0];
- double res;
-
- if (isnan(select->var_values[VAR_START_PTS]))
- select->var_values[VAR_START_PTS] = TS2D(frame->pts);
- if (isnan(select->var_values[VAR_START_T]))
- select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
-
- select->var_values[VAR_PTS] = TS2D(frame->pts);
- select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
- select->var_values[VAR_PREV_PTS] = TS2D(frame->pts);
-
- select->var_values[VAR_INTERLACE_TYPE] =
- !frame->interlaced_frame ? INTERLACE_TYPE_P :
- frame->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B;
- select->var_values[VAR_PICT_TYPE] = frame->pict_type;
-
- res = av_expr_eval(select->expr, select->var_values, NULL);
-
- select->var_values[VAR_N] += 1.0;
-
- if (res) {
- select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N];
- select->var_values[VAR_PREV_SELECTED_PTS] = select->var_values[VAR_PTS];
- select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T];
- select->var_values[VAR_SELECTED_N] += 1.0;
- }
- return res;
-}
-
-static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
-{
- SelectContext *select = inlink->dst->priv;
-
- select->select = select_frame(inlink->dst, frame);
- if (select->select) {
- /* frame was requested through poll_frame */
- if (select->cache_frames) {
- if (!av_fifo_space(select->pending_frames)) {
- av_log(inlink->dst, AV_LOG_ERROR,
- "Buffering limit reached, cannot cache more frames\n");
- av_frame_free(&frame);
- } else
- av_fifo_generic_write(select->pending_frames, &frame,
- sizeof(frame), NULL);
- return 0;
- }
- return ff_filter_frame(inlink->dst->outputs[0], frame);
- }
-
- av_frame_free(&frame);
- return 0;
-}
-
-static int request_frame(AVFilterLink *outlink)
-{
- AVFilterContext *ctx = outlink->src;
- SelectContext *select = ctx->priv;
- AVFilterLink *inlink = outlink->src->inputs[0];
- select->select = 0;
-
- if (av_fifo_size(select->pending_frames)) {
- AVFrame *frame;
-
- av_fifo_generic_read(select->pending_frames, &frame, sizeof(frame), NULL);
- return ff_filter_frame(outlink, frame);
- }
-
- while (!select->select) {
- int ret = ff_request_frame(inlink);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static int poll_frame(AVFilterLink *outlink)
-{
- SelectContext *select = outlink->src->priv;
- AVFilterLink *inlink = outlink->src->inputs[0];
- int count, ret;
-
- if (!av_fifo_size(select->pending_frames)) {
- if ((count = ff_poll_frame(inlink)) <= 0)
- return count;
- /* request frame from input, and apply select condition to it */
- select->cache_frames = 1;
- while (count-- && av_fifo_space(select->pending_frames)) {
- ret = ff_request_frame(inlink);
- if (ret < 0)
- break;
- }
- select->cache_frames = 0;
- }
-
- return av_fifo_size(select->pending_frames)/sizeof(AVFrame*);
-}
-
-static av_cold void uninit(AVFilterContext *ctx)
-{
- SelectContext *select = ctx->priv;
- AVFrame *frame;
-
- av_expr_free(select->expr);
- select->expr = NULL;
-
- while (select->pending_frames &&
- av_fifo_generic_read(select->pending_frames, &frame, sizeof(frame), NULL) == sizeof(frame))
- av_frame_free(&frame);
- av_fifo_free(select->pending_frames);
- select->pending_frames = NULL;
-}
-
-#define OFFSET(x) offsetof(SelectContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "expr", "An expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags = FLAGS },
- { NULL },
-};
-
-static const AVClass select_class = {
- .class_name = "select",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-static const AVFilterPad avfilter_vf_select_inputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .config_props = config_input,
- .filter_frame = filter_frame,
- },
- { NULL }
-};
-
-static const AVFilterPad avfilter_vf_select_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .poll_frame = poll_frame,
- .request_frame = request_frame,
- },
- { NULL }
-};
-
-AVFilter ff_vf_select = {
- .name = "select",
- .description = NULL_IF_CONFIG_SMALL("Select frames to pass in output."),
- .init = init,
- .uninit = uninit,
-
- .priv_size = sizeof(SelectContext),
- .priv_class = &select_class,
-
- .inputs = avfilter_vf_select_inputs,
- .outputs = avfilter_vf_select_outputs,
-};
diff --git a/libavfilter/vf_selectivecolor.c b/libavfilter/vf_selectivecolor.c
new file mode 100644
index 0000000000..748b67b07a
--- /dev/null
+++ b/libavfilter/vf_selectivecolor.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright (c) 2015-2016 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @see http://blog.pkh.me/p/22-understanding-selective-coloring-in-adobe-photoshop.html
+ * @todo
+ * - use integers so it can be made bitexact and a FATE test can be added
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/file.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavcodec/mathops.h" // for mid_pred(), which is a macro so no link dependency
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define R 0
+#define G 1
+#define B 2
+#define A 3
+
+enum color_range {
+ // WARNING: do NOT reorder (see parse_psfile())
+ RANGE_REDS,
+ RANGE_YELLOWS,
+ RANGE_GREENS,
+ RANGE_CYANS,
+ RANGE_BLUES,
+ RANGE_MAGENTAS,
+ RANGE_WHITES,
+ RANGE_NEUTRALS,
+ RANGE_BLACKS,
+ NB_RANGES
+};
+
+enum correction_method {
+ CORRECTION_METHOD_ABSOLUTE,
+ CORRECTION_METHOD_RELATIVE,
+ NB_CORRECTION_METHODS,
+};
+
+static const char *color_names[NB_RANGES] = {
+ "red", "yellow", "green", "cyan", "blue", "magenta", "white", "neutral", "black"
+};
+
+typedef int (*get_range_scale_func)(int r, int g, int b, int min_val, int max_val);
+
+struct process_range {
+ int range_id;
+ uint32_t mask;
+ get_range_scale_func get_scale;
+};
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+} ThreadData;
+
+typedef struct {
+ const AVClass *class;
+ int correction_method;
+ char *opt_cmyk_adjust[NB_RANGES];
+ float cmyk_adjust[NB_RANGES][4];
+ struct process_range process_ranges[NB_RANGES]; // color ranges to process
+ int nb_process_ranges;
+ char *psfile;
+ uint8_t rgba_map[4];
+ int is_16bit;
+ int step;
+} SelectiveColorContext;
+
+#define OFFSET(x) offsetof(SelectiveColorContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+#define RANGE_OPTION(color_name, range) \
+ { color_name"s", "adjust "color_name" regions", OFFSET(opt_cmyk_adjust[range]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS }
+
+static const AVOption selectivecolor_options[] = {
+ { "correction_method", "select correction method", OFFSET(correction_method), AV_OPT_TYPE_INT, {.i64 = CORRECTION_METHOD_ABSOLUTE}, 0, NB_CORRECTION_METHODS-1, FLAGS, "correction_method" },
+ { "absolute", NULL, 0, AV_OPT_TYPE_CONST, {.i64=CORRECTION_METHOD_ABSOLUTE}, INT_MIN, INT_MAX, FLAGS, "correction_method" },
+ { "relative", NULL, 0, AV_OPT_TYPE_CONST, {.i64=CORRECTION_METHOD_RELATIVE}, INT_MIN, INT_MAX, FLAGS, "correction_method" },
+ RANGE_OPTION("red", RANGE_REDS),
+ RANGE_OPTION("yellow", RANGE_YELLOWS),
+ RANGE_OPTION("green", RANGE_GREENS),
+ RANGE_OPTION("cyan", RANGE_CYANS),
+ RANGE_OPTION("blue", RANGE_BLUES),
+ RANGE_OPTION("magenta", RANGE_MAGENTAS),
+ RANGE_OPTION("white", RANGE_WHITES),
+ RANGE_OPTION("neutral", RANGE_NEUTRALS),
+ RANGE_OPTION("black", RANGE_BLACKS),
+ { "psfile", "set Photoshop selectivecolor file name", OFFSET(psfile), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(selectivecolor);
+
+static int get_rgb_scale(int r, int g, int b, int min_val, int max_val)
+{
+ return max_val - mid_pred(r, g, b);
+}
+
+static int get_cmy_scale(int r, int g, int b, int min_val, int max_val)
+{
+ return mid_pred(r, g, b) - min_val;
+}
+
+#define DECLARE_RANGE_SCALE_FUNCS(nbits) \
+static int get_neutrals_scale##nbits(int r, int g, int b, int min_val, int max_val) \
+{ \
+ /* 1 - (|max-0.5| + |min-0.5|) */ \
+ return (((1<<nbits)-1)*2 - ( abs((max_val<<1) - ((1<<nbits)-1)) \
+ + abs((min_val<<1) - ((1<<nbits)-1))) + 1) >> 1; \
+} \
+ \
+static int get_whites_scale##nbits(int r, int g, int b, int min_val, int max_val) \
+{ \
+ /* (min - 0.5) * 2 */ \
+ return (min_val<<1) - ((1<<nbits)-1); \
+} \
+ \
+static int get_blacks_scale##nbits(int r, int g, int b, int min_val, int max_val) \
+{ \
+ /* (0.5 - max) * 2 */ \
+ return ((1<<nbits)-1) - (max_val<<1); \
+} \
+
+DECLARE_RANGE_SCALE_FUNCS(8)
+DECLARE_RANGE_SCALE_FUNCS(16)
+
+static int register_range(SelectiveColorContext *s, int range_id)
+{
+ const float *cmyk = s->cmyk_adjust[range_id];
+
+ /* If the color range has user settings, register the color range
+ * as "to be processed" */
+ if (cmyk[0] || cmyk[1] || cmyk[2] || cmyk[3]) {
+ struct process_range *pr = &s->process_ranges[s->nb_process_ranges++];
+
+ if (cmyk[0] < -1.0 || cmyk[0] > 1.0 ||
+ cmyk[1] < -1.0 || cmyk[1] > 1.0 ||
+ cmyk[2] < -1.0 || cmyk[2] > 1.0 ||
+ cmyk[3] < -1.0 || cmyk[3] > 1.0) {
+ av_log(s, AV_LOG_ERROR, "Invalid %s adjustments (%g %g %g %g). "
+ "Settings must be set in [-1;1] range\n",
+ color_names[range_id], cmyk[0], cmyk[1], cmyk[2], cmyk[3]);
+ return AVERROR(EINVAL);
+ }
+
+ pr->range_id = range_id;
+ pr->mask = 1 << range_id;
+ if (pr->mask & (1<<RANGE_REDS | 1<<RANGE_GREENS | 1<<RANGE_BLUES)) pr->get_scale = get_rgb_scale;
+ else if (pr->mask & (1<<RANGE_CYANS | 1<<RANGE_MAGENTAS | 1<<RANGE_YELLOWS)) pr->get_scale = get_cmy_scale;
+ else if (!s->is_16bit && (pr->mask & 1<<RANGE_WHITES)) pr->get_scale = get_whites_scale8;
+ else if (!s->is_16bit && (pr->mask & 1<<RANGE_NEUTRALS)) pr->get_scale = get_neutrals_scale8;
+ else if (!s->is_16bit && (pr->mask & 1<<RANGE_BLACKS)) pr->get_scale = get_blacks_scale8;
+ else if ( s->is_16bit && (pr->mask & 1<<RANGE_WHITES)) pr->get_scale = get_whites_scale16;
+ else if ( s->is_16bit && (pr->mask & 1<<RANGE_NEUTRALS)) pr->get_scale = get_neutrals_scale16;
+ else if ( s->is_16bit && (pr->mask & 1<<RANGE_BLACKS)) pr->get_scale = get_blacks_scale16;
+ else
+ av_assert0(0);
+ }
+ return 0;
+}
+
+static int parse_psfile(AVFilterContext *ctx, const char *fname)
+{
+ int16_t val;
+ int ret, i, version;
+ uint8_t *buf;
+ size_t size;
+ SelectiveColorContext *s = ctx->priv;
+
+ ret = av_file_map(fname, &buf, &size, 0, NULL);
+ if (ret < 0)
+ return ret;
+
+#define READ16(dst) do { \
+ if (size < 2) { \
+ ret = AVERROR_INVALIDDATA; \
+ goto end; \
+ } \
+ dst = AV_RB16(buf); \
+ buf += 2; \
+ size -= 2; \
+} while (0)
+
+ READ16(version);
+ if (version != 1)
+ av_log(s, AV_LOG_WARNING, "Unsupported selective color file version %d, "
+ "the settings might not be loaded properly\n", version);
+
+ READ16(s->correction_method);
+
+ // 1st CMYK entry is reserved/unused
+ for (i = 0; i < FF_ARRAY_ELEMS(s->cmyk_adjust[0]); i++) {
+ READ16(val);
+ if (val)
+ av_log(s, AV_LOG_WARNING, "%c value of first CMYK entry is not 0 "
+ "but %d\n", "CMYK"[i], val);
+ }
+
+ for (i = 0; i < FF_ARRAY_ELEMS(s->cmyk_adjust); i++) {
+ int k;
+ for (k = 0; k < FF_ARRAY_ELEMS(s->cmyk_adjust[0]); k++) {
+ READ16(val);
+ s->cmyk_adjust[i][k] = val / 100.;
+ }
+ ret = register_range(s, i);
+ if (ret < 0)
+ goto end;
+ }
+
+end:
+ av_file_unmap(buf, size);
+ return ret;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ int i, ret;
+ AVFilterContext *ctx = inlink->dst;
+ SelectiveColorContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ s->is_16bit = desc->comp[0].depth > 8;
+ s->step = av_get_padded_bits_per_pixel(desc) >> (3 + s->is_16bit);
+
+ ret = ff_fill_rgba_map(s->rgba_map, inlink->format);
+ if (ret < 0)
+ return ret;
+
+ /* If the following conditions are not met, it will cause trouble while
+ * parsing the PS file */
+ av_assert0(FF_ARRAY_ELEMS(s->cmyk_adjust) == 10 - 1);
+ av_assert0(FF_ARRAY_ELEMS(s->cmyk_adjust[0]) == 4);
+
+ if (s->psfile) {
+ ret = parse_psfile(ctx, s->psfile);
+ if (ret < 0)
+ return ret;
+ } else {
+ for (i = 0; i < FF_ARRAY_ELEMS(s->opt_cmyk_adjust); i++) {
+ const char *opt_cmyk_adjust = s->opt_cmyk_adjust[i];
+
+ if (opt_cmyk_adjust) {
+ float *cmyk = s->cmyk_adjust[i];
+
+ sscanf(s->opt_cmyk_adjust[i], "%f %f %f %f", cmyk, cmyk+1, cmyk+2, cmyk+3);
+ ret = register_range(s, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+
+ av_log(s, AV_LOG_VERBOSE, "Adjustments:%s\n", s->nb_process_ranges ? "" : " none");
+ for (i = 0; i < s->nb_process_ranges; i++) {
+ const struct process_range *pr = &s->process_ranges[i];
+ const float *cmyk = s->cmyk_adjust[pr->range_id];
+
+ av_log(s, AV_LOG_VERBOSE, "%8ss: C=%6g M=%6g Y=%6g K=%6g\n",
+ color_names[pr->range_id], cmyk[0], cmyk[1], cmyk[2], cmyk[3]);
+ }
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_RGB48, AV_PIX_FMT_BGR48,
+ AV_PIX_FMT_RGBA64, AV_PIX_FMT_BGRA64,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static inline int comp_adjust(int scale, float value, float adjust, float k, int correction_method)
+{
+ const float min = -value;
+ const float max = 1. - value;
+ float res = (-1. - adjust) * k - adjust;
+ if (correction_method == CORRECTION_METHOD_RELATIVE)
+ res *= max;
+ return lrint(av_clipf(res, min, max) * scale);
+}
+
+#define DECLARE_SELECTIVE_COLOR_FUNC(nbits) \
+static inline int selective_color_##nbits(AVFilterContext *ctx, ThreadData *td, \
+ int jobnr, int nb_jobs, int direct, int correction_method) \
+{ \
+ int i, x, y; \
+ const AVFrame *in = td->in; \
+ AVFrame *out = td->out; \
+ const SelectiveColorContext *s = ctx->priv; \
+ const int height = in->height; \
+ const int width = in->width; \
+ const int slice_start = (height * jobnr ) / nb_jobs; \
+ const int slice_end = (height * (jobnr+1)) / nb_jobs; \
+ const int dst_linesize = out->linesize[0]; \
+ const int src_linesize = in->linesize[0]; \
+ const uint8_t roffset = s->rgba_map[R]; \
+ const uint8_t goffset = s->rgba_map[G]; \
+ const uint8_t boffset = s->rgba_map[B]; \
+ const uint8_t aoffset = s->rgba_map[A]; \
+ \
+ for (y = slice_start; y < slice_end; y++) { \
+ uint##nbits##_t *dst = ( uint##nbits##_t *)(out->data[0] + y * dst_linesize); \
+ const uint##nbits##_t *src = (const uint##nbits##_t *)( in->data[0] + y * src_linesize); \
+ \
+ for (x = 0; x < width * s->step; x += s->step) { \
+ const int r = src[x + roffset]; \
+ const int g = src[x + goffset]; \
+ const int b = src[x + boffset]; \
+ const int min_color = FFMIN3(r, g, b); \
+ const int max_color = FFMAX3(r, g, b); \
+ const int is_white = (r > 1<<(nbits-1) && g > 1<<(nbits-1) && b > 1<<(nbits-1)); \
+ const int is_neutral = (r || g || b) && \
+ r != (1<<nbits)-1 && g != (1<<nbits)-1 && b != (1<<nbits)-1; \
+ const int is_black = (r < 1<<(nbits-1) && g < 1<<(nbits-1) && b < 1<<(nbits-1)); \
+ const uint32_t range_flag = (r == max_color) << RANGE_REDS \
+ | (r == min_color) << RANGE_CYANS \
+ | (g == max_color) << RANGE_GREENS \
+ | (g == min_color) << RANGE_MAGENTAS \
+ | (b == max_color) << RANGE_BLUES \
+ | (b == min_color) << RANGE_YELLOWS \
+ | is_white << RANGE_WHITES \
+ | is_neutral << RANGE_NEUTRALS \
+ | is_black << RANGE_BLACKS; \
+ \
+ const float rnorm = r * (1.f / ((1<<nbits)-1)); \
+ const float gnorm = g * (1.f / ((1<<nbits)-1)); \
+ const float bnorm = b * (1.f / ((1<<nbits)-1)); \
+ int adjust_r = 0, adjust_g = 0, adjust_b = 0; \
+ \
+ for (i = 0; i < s->nb_process_ranges; i++) { \
+ const struct process_range *pr = &s->process_ranges[i]; \
+ \
+ if (range_flag & pr->mask) { \
+ const int scale = pr->get_scale(r, g, b, min_color, max_color); \
+ \
+ if (scale > 0) { \
+ const float *cmyk_adjust = s->cmyk_adjust[pr->range_id]; \
+ const float adj_c = cmyk_adjust[0]; \
+ const float adj_m = cmyk_adjust[1]; \
+ const float adj_y = cmyk_adjust[2]; \
+ const float k = cmyk_adjust[3]; \
+ \
+ adjust_r += comp_adjust(scale, rnorm, adj_c, k, correction_method); \
+ adjust_g += comp_adjust(scale, gnorm, adj_m, k, correction_method); \
+ adjust_b += comp_adjust(scale, bnorm, adj_y, k, correction_method); \
+ } \
+ } \
+ } \
+ \
+ if (!direct || adjust_r || adjust_g || adjust_b) { \
+ dst[x + roffset] = av_clip_uint##nbits(r + adjust_r); \
+ dst[x + goffset] = av_clip_uint##nbits(g + adjust_g); \
+ dst[x + boffset] = av_clip_uint##nbits(b + adjust_b); \
+ if (!direct && s->step == 4) \
+ dst[x + aoffset] = src[x + aoffset]; \
+ } \
+ } \
+ } \
+ return 0; \
+}
+
+#define DEF_SELECTIVE_COLOR_FUNC(name, direct, correction_method, nbits) \
+static int selective_color_##name##_##nbits(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \
+{ \
+ return selective_color_##nbits(ctx, arg, jobnr, nb_jobs, direct, correction_method); \
+}
+
+#define DEF_SELECTIVE_COLOR_FUNCS(nbits) \
+DECLARE_SELECTIVE_COLOR_FUNC(nbits) \
+DEF_SELECTIVE_COLOR_FUNC(indirect_absolute, 0, CORRECTION_METHOD_ABSOLUTE, nbits) \
+DEF_SELECTIVE_COLOR_FUNC(indirect_relative, 0, CORRECTION_METHOD_RELATIVE, nbits) \
+DEF_SELECTIVE_COLOR_FUNC( direct_absolute, 1, CORRECTION_METHOD_ABSOLUTE, nbits) \
+DEF_SELECTIVE_COLOR_FUNC( direct_relative, 1, CORRECTION_METHOD_RELATIVE, nbits)
+
+DEF_SELECTIVE_COLOR_FUNCS(8)
+DEF_SELECTIVE_COLOR_FUNCS(16)
+
+typedef int (*selective_color_func_type)(AVFilterContext *ctx, void *td, int jobnr, int nb_jobs);
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ int direct;
+ AVFrame *out;
+ ThreadData td;
+ const SelectiveColorContext *s = ctx->priv;
+ static const selective_color_func_type funcs[2][2][2] = {
+ {
+ {selective_color_indirect_absolute_8, selective_color_indirect_relative_8},
+ {selective_color_direct_absolute_8, selective_color_direct_relative_8},
+ },{
+ {selective_color_indirect_absolute_16, selective_color_indirect_relative_16},
+ {selective_color_direct_absolute_16, selective_color_direct_relative_16},
+ }
+ };
+
+ if (av_frame_is_writable(in)) {
+ direct = 1;
+ out = in;
+ } else {
+ direct = 0;
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ td.in = in;
+ td.out = out;
+ ctx->internal->execute(ctx, funcs[s->is_16bit][direct][s->correction_method],
+ &td, NULL, FFMIN(inlink->h, ff_filter_get_nb_threads(ctx)));
+
+ if (!direct)
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad selectivecolor_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad selectivecolor_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_selectivecolor = {
+ .name = "selectivecolor",
+ .description = NULL_IF_CONFIG_SMALL("Apply CMYK adjustments to specific color ranges."),
+ .priv_size = sizeof(SelectiveColorContext),
+ .query_formats = query_formats,
+ .inputs = selectivecolor_inputs,
+ .outputs = selectivecolor_outputs,
+ .priv_class = &selectivecolor_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_separatefields.c b/libavfilter/vf_separatefields.c
new file mode 100644
index 0000000000..3ea5eb67d7
--- /dev/null
+++ b/libavfilter/vf_separatefields.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct {
+ int nb_planes;
+ AVFrame *second;
+} SeparateFieldsContext;
+
+static int config_props_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ SeparateFieldsContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ if (inlink->h & 1) {
+ av_log(ctx, AV_LOG_ERROR, "height must be even\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ outlink->time_base.num = inlink->time_base.num;
+ outlink->time_base.den = inlink->time_base.den * 2;
+ outlink->frame_rate.num = inlink->frame_rate.num * 2;
+ outlink->frame_rate.den = inlink->frame_rate.den;
+ outlink->w = inlink->w;
+ outlink->h = inlink->h / 2;
+
+ return 0;
+}
+
+static void extract_field(AVFrame *frame, int nb_planes, int type)
+{
+ int i;
+
+ for (i = 0; i < nb_planes; i++) {
+ if (type)
+ frame->data[i] = frame->data[i] + frame->linesize[i];
+ frame->linesize[i] *= 2;
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SeparateFieldsContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ int ret;
+
+ inpicref->height = outlink->h;
+ inpicref->interlaced_frame = 0;
+
+ if (!s->second) {
+ goto clone;
+ } else {
+ AVFrame *second = s->second;
+
+ extract_field(second, s->nb_planes, second->top_field_first);
+
+ if (second->pts != AV_NOPTS_VALUE &&
+ inpicref->pts != AV_NOPTS_VALUE)
+ second->pts += inpicref->pts;
+ else
+ second->pts = AV_NOPTS_VALUE;
+
+ ret = ff_filter_frame(outlink, second);
+ if (ret < 0)
+ return ret;
+clone:
+ s->second = av_frame_clone(inpicref);
+ if (!s->second)
+ return AVERROR(ENOMEM);
+ }
+
+ extract_field(inpicref, s->nb_planes, !inpicref->top_field_first);
+
+ if (inpicref->pts != AV_NOPTS_VALUE)
+ inpicref->pts *= 2;
+
+ return ff_filter_frame(outlink, inpicref);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ SeparateFieldsContext *s = ctx->priv;
+ int ret;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+ if (ret == AVERROR_EOF && s->second) {
+ s->second->pts *= 2;
+ extract_field(s->second, s->nb_planes, s->second->top_field_first);
+ ret = ff_filter_frame(outlink, s->second);
+ s->second = 0;
+ }
+
+ return ret;
+}
+
+static const AVFilterPad separatefields_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad separatefields_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_separatefields = {
+ .name = "separatefields",
+ .description = NULL_IF_CONFIG_SMALL("Split input video frames into fields."),
+ .priv_size = sizeof(SeparateFieldsContext),
+ .inputs = separatefields_inputs,
+ .outputs = separatefields_outputs,
+};
diff --git a/libavfilter/vf_setfield.c b/libavfilter/vf_setfield.c
new file mode 100644
index 0000000000..96e9d18bd3
--- /dev/null
+++ b/libavfilter/vf_setfield.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * set field order
+ */
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "video.h"
+
+enum SetFieldMode {
+ MODE_AUTO = -1,
+ MODE_BFF,
+ MODE_TFF,
+ MODE_PROG,
+};
+
+typedef struct {
+ const AVClass *class;
+ int mode; ///< SetFieldMode
+} SetFieldContext;
+
+#define OFFSET(x) offsetof(SetFieldContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption setfield_options[] = {
+ {"mode", "select interlace mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_AUTO}, -1, MODE_PROG, FLAGS, "mode"},
+ {"auto", "keep the same input field", 0, AV_OPT_TYPE_CONST, {.i64=MODE_AUTO}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"bff", "mark as bottom-field-first", 0, AV_OPT_TYPE_CONST, {.i64=MODE_BFF}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"tff", "mark as top-field-first", 0, AV_OPT_TYPE_CONST, {.i64=MODE_TFF}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"prog", "mark as progressive", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PROG}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(setfield);
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
+{
+ SetFieldContext *setfield = inlink->dst->priv;
+
+ if (setfield->mode == MODE_PROG) {
+ picref->interlaced_frame = 0;
+ } else if (setfield->mode != MODE_AUTO) {
+ picref->interlaced_frame = 1;
+ picref->top_field_first = setfield->mode;
+ }
+ return ff_filter_frame(inlink->dst->outputs[0], picref);
+}
+
+static const AVFilterPad setfield_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad setfield_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_setfield = {
+ .name = "setfield",
+ .description = NULL_IF_CONFIG_SMALL("Force field for the output video frame."),
+ .priv_size = sizeof(SetFieldContext),
+ .priv_class = &setfield_class,
+ .inputs = setfield_inputs,
+ .outputs = setfield_outputs,
+};
diff --git a/libavfilter/vf_showinfo.c b/libavfilter/vf_showinfo.c
index 204ff7a857..83d941c629 100644
--- a/libavfilter/vf_showinfo.c
+++ b/libavfilter/vf_showinfo.c
@@ -1,19 +1,19 @@
/*
* Copyright (c) 2011 Stefano Sabatini
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -30,15 +30,12 @@
#include "libavutil/internal.h"
#include "libavutil/pixdesc.h"
#include "libavutil/stereo3d.h"
+#include "libavutil/timestamp.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
-typedef struct ShowInfoContext {
- unsigned int frame;
-} ShowInfoContext;
-
static void dump_stereo3d(AVFilterContext *ctx, AVFrameSideData *sd)
{
AVStereo3D *stereo;
@@ -69,34 +66,49 @@ static void dump_stereo3d(AVFilterContext *ctx, AVFrameSideData *sd)
av_log(ctx, AV_LOG_INFO, " (inverted)");
}
+static void update_sample_stats(const uint8_t *src, int len, int64_t *sum, int64_t *sum2)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ *sum += src[i];
+ *sum2 += src[i] * src[i];
+ }
+}
+
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
- ShowInfoContext *showinfo = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
uint32_t plane_checksum[4] = {0}, checksum = 0;
+ int64_t sum[4] = {0}, sum2[4] = {0};
+ int32_t pixelcount[4] = {0};
int i, plane, vsub = desc->log2_chroma_h;
- for (plane = 0; frame->data[plane] && plane < 4; plane++) {
+ for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
uint8_t *data = frame->data[plane];
- int h = plane == 1 || plane == 2 ? inlink->h >> vsub : inlink->h;
+ int h = plane == 1 || plane == 2 ? AV_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;
int linesize = av_image_get_linesize(frame->format, frame->width, plane);
+
if (linesize < 0)
return linesize;
for (i = 0; i < h; i++) {
plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize);
checksum = av_adler32_update(checksum, data, linesize);
+
+ update_sample_stats(data, linesize, sum+plane, sum2+plane);
+ pixelcount[plane] += linesize;
data += frame->linesize[plane];
}
}
av_log(ctx, AV_LOG_INFO,
- "n:%d pts:%"PRId64" pts_time:%f "
+ "n:%4"PRId64" pts:%7s pts_time:%-7s pos:%9"PRId64" "
"fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
- "checksum:%"PRIu32" plane_checksum:[%"PRIu32" %"PRIu32" %"PRIu32" %"PRIu32"]\n",
- showinfo->frame,
- frame->pts, frame->pts * av_q2d(inlink->time_base),
+ "checksum:%08"PRIX32" plane_checksum:[%08"PRIX32,
+ inlink->frame_count_out,
+ av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), av_frame_get_pkt_pos(frame),
desc->name,
frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den,
frame->width, frame->height,
@@ -104,7 +116,18 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
frame->top_field_first ? 'T' : 'B', /* Top / Bottom */
frame->key_frame,
av_get_picture_type_char(frame->pict_type),
- checksum, plane_checksum[0], plane_checksum[1], plane_checksum[2], plane_checksum[3]);
+ checksum, plane_checksum[0]);
+
+ for (plane = 1; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
+ av_log(ctx, AV_LOG_INFO, " %08"PRIX32, plane_checksum[plane]);
+ av_log(ctx, AV_LOG_INFO, "] mean:[");
+ for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
+ av_log(ctx, AV_LOG_INFO, "%"PRId64" ", (sum[plane] + pixelcount[plane]/2) / pixelcount[plane]);
+ av_log(ctx, AV_LOG_INFO, "\b] stdev:[");
+ for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
+ av_log(ctx, AV_LOG_INFO, "%3.1f ",
+ sqrt((sum2[plane] - sum[plane]*(double)sum[plane]/pixelcount[plane])/pixelcount[plane]));
+ av_log(ctx, AV_LOG_INFO, "\b]\n");
for (i = 0; i < frame->nb_side_data; i++) {
AVFrameSideData *sd = frame->side_data[i];
@@ -136,7 +159,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
av_log(ctx, AV_LOG_INFO, "\n");
}
- showinfo->frame++;
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
@@ -167,7 +189,6 @@ static const AVFilterPad avfilter_vf_showinfo_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
.filter_frame = filter_frame,
.config_props = config_props_in,
},
@@ -186,10 +207,6 @@ static const AVFilterPad avfilter_vf_showinfo_outputs[] = {
AVFilter ff_vf_showinfo = {
.name = "showinfo",
.description = NULL_IF_CONFIG_SMALL("Show textual information for each video frame."),
-
- .priv_size = sizeof(ShowInfoContext),
-
- .inputs = avfilter_vf_showinfo_inputs,
-
- .outputs = avfilter_vf_showinfo_outputs,
+ .inputs = avfilter_vf_showinfo_inputs,
+ .outputs = avfilter_vf_showinfo_outputs,
};
diff --git a/libavfilter/vf_showpalette.c b/libavfilter/vf_showpalette.c
new file mode 100644
index 0000000000..f1627ba58e
--- /dev/null
+++ b/libavfilter/vf_showpalette.c
@@ -0,0 +1,140 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Display frame palette (AV_PIX_FMT_PAL8)
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ int size;
+} ShowPaletteContext;
+
+#define OFFSET(x) offsetof(ShowPaletteContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption showpalette_options[] = {
+ { "s", "set pixel box size", OFFSET(size), AV_OPT_TYPE_INT, {.i64=30}, 1, 100, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(showpalette);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat in_fmts[] = {AV_PIX_FMT_PAL8, AV_PIX_FMT_NONE};
+ static const enum AVPixelFormat out_fmts[] = {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE};
+ int ret;
+ AVFilterFormats *in = ff_make_format_list(in_fmts);
+ AVFilterFormats *out = ff_make_format_list(out_fmts);
+ if (!in || !out) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ if ((ret = ff_formats_ref(in , &ctx->inputs[0]->out_formats)) < 0 ||
+ (ret = ff_formats_ref(out, &ctx->outputs[0]->in_formats)) < 0)
+ goto fail;
+ return 0;
+fail:
+ if (in)
+ av_freep(&in->formats);
+ av_freep(&in);
+ if (out)
+ av_freep(&out->formats);
+ av_freep(&out);
+ return ret;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ const ShowPaletteContext *s = ctx->priv;
+ outlink->w = outlink->h = 16 * s->size;
+ return 0;
+}
+
+static int disp_palette(AVFrame *out, const AVFrame *in, int size)
+{
+ int x, y, i, j;
+ uint32_t *dst = (uint32_t *)out->data[0];
+ const int dst_linesize = out->linesize[0] >> 2;
+ const uint32_t *pal = (uint32_t *)in->data[1];
+
+ for (y = 0; y < 16; y++)
+ for (x = 0; x < 16; x++)
+ for (j = 0; j < size; j++)
+ for (i = 0; i < size; i++)
+ dst[(y*dst_linesize + x) * size + j*dst_linesize + i] = pal[y*16 + x];
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ int ret;
+ AVFrame *out;
+ AVFilterContext *ctx = inlink->dst;
+ const ShowPaletteContext *s= ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ ret = disp_palette(out, in, s->size);
+ av_frame_free(&in);
+ return ret < 0 ? ret : ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad showpalette_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad showpalette_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_showpalette = {
+ .name = "showpalette",
+ .description = NULL_IF_CONFIG_SMALL("Display frame palette."),
+ .priv_size = sizeof(ShowPaletteContext),
+ .query_formats = query_formats,
+ .inputs = showpalette_inputs,
+ .outputs = showpalette_outputs,
+ .priv_class = &showpalette_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_shuffleframes.c b/libavfilter/vf_shuffleframes.c
new file mode 100644
index 0000000000..8e595111b8
--- /dev/null
+++ b/libavfilter/vf_shuffleframes.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/common.h"
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+
+#include "avfilter.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct ShuffleFramesContext {
+ const AVClass *class;
+ char *mapping;
+ AVFrame **frames;
+ int *map;
+ int64_t *pts;
+ int in_frames;
+ int nb_frames;
+} ShuffleFramesContext;
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ShuffleFramesContext *s = ctx->priv;
+ char *mapping, *saveptr = NULL, *p;
+ int n, nb_items;
+
+ nb_items = 1;
+ for (p = s->mapping; *p; p++) {
+ if (*p == '|' || *p == ' ')
+ nb_items++;
+ }
+
+ s->frames = av_calloc(nb_items, sizeof(*s->frames));
+ s->map = av_calloc(nb_items, sizeof(*s->map));
+ s->pts = av_calloc(nb_items, sizeof(*s->pts));
+ if (!s->map || !s->frames || !s->pts) {
+ return AVERROR(ENOMEM);
+ }
+
+ mapping = av_strdup(s->mapping);
+ if (!mapping)
+ return AVERROR(ENOMEM);
+
+ for (n = 0; n < nb_items; n++) {
+ char *map = av_strtok(n == 0 ? mapping : NULL, " |", &saveptr);
+ if (!map || sscanf(map, "%d", &s->map[n]) != 1) {
+ av_free(mapping);
+ return AVERROR(EINVAL);
+ }
+
+ if (s->map[n] < -1 || s->map[n] >= nb_items) {
+ av_log(ctx, AV_LOG_ERROR, "Index out of range.\n");
+ av_free(mapping);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ s->nb_frames = nb_items;
+ av_free(mapping);
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ShuffleFramesContext *s = ctx->priv;
+ int ret = 0;
+
+ if (s->in_frames < s->nb_frames) {
+ s->frames[s->in_frames] = frame;
+ s->pts[s->in_frames] = frame->pts;
+ s->in_frames++;
+ }
+
+ if (s->in_frames == s->nb_frames) {
+ int n, x;
+
+ for (n = 0; n < s->nb_frames; n++) {
+ AVFrame *out;
+
+ x = s->map[n];
+ if (x >= 0) {
+ out = av_frame_clone(s->frames[x]);
+ if (!out)
+ return AVERROR(ENOMEM);
+ out->pts = s->pts[n];
+ ret = ff_filter_frame(ctx->outputs[0], out);
+ }
+ s->in_frames--;
+ }
+
+ for (n = 0; n < s->nb_frames; n++)
+ av_frame_free(&s->frames[n]);
+ }
+
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ShuffleFramesContext *s = ctx->priv;
+
+ while (s->in_frames > 0) {
+ s->in_frames--;
+ av_frame_free(&s->frames[s->in_frames]);
+ }
+
+ av_freep(&s->frames);
+ av_freep(&s->map);
+ av_freep(&s->pts);
+}
+
+#define OFFSET(x) offsetof(ShuffleFramesContext, x)
+#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
+static const AVOption shuffleframes_options[] = {
+ { "mapping", "set destination indexes of input frames", OFFSET(mapping), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
+ { NULL },
+};
+
+AVFILTER_DEFINE_CLASS(shuffleframes);
+
+static const AVFilterPad shuffleframes_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL },
+};
+
+static const AVFilterPad shuffleframes_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL },
+};
+
+AVFilter ff_vf_shuffleframes = {
+ .name = "shuffleframes",
+ .description = NULL_IF_CONFIG_SMALL("Shuffle video frames."),
+ .priv_size = sizeof(ShuffleFramesContext),
+ .priv_class = &shuffleframes_class,
+ .init = init,
+ .uninit = uninit,
+ .inputs = shuffleframes_inputs,
+ .outputs = shuffleframes_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_shuffleplanes.c b/libavfilter/vf_shuffleplanes.c
index 1bc77b0a31..4bc7b79f87 100644
--- a/libavfilter/vf_shuffleplanes.c
+++ b/libavfilter/vf_shuffleplanes.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -125,7 +125,7 @@ fail:
}
#define OFFSET(x) offsetof(ShufflePlanesContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
static const AVOption shuffleplanes_options[] = {
{ "map0", "Index of the input plane to be used as the first output plane ", OFFSET(map[0]), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 4, FLAGS },
{ "map1", "Index of the input plane to be used as the second output plane ", OFFSET(map[1]), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 4, FLAGS },
@@ -134,12 +134,7 @@ static const AVOption shuffleplanes_options[] = {
{ NULL },
};
-static const AVClass shuffleplanes_class = {
- .class_name = "shuffleplanes",
- .item_name = av_default_item_name,
- .option = shuffleplanes_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(shuffleplanes);
static const AVFilterPad shuffleplanes_inputs[] = {
{
@@ -162,7 +157,7 @@ static const AVFilterPad shuffleplanes_outputs[] = {
AVFilter ff_vf_shuffleplanes = {
.name = "shuffleplanes",
- .description = NULL_IF_CONFIG_SMALL("Shuffle video planes"),
+ .description = NULL_IF_CONFIG_SMALL("Shuffle video planes."),
.priv_size = sizeof(ShufflePlanesContext),
.priv_class = &shuffleplanes_class,
diff --git a/libavfilter/vf_signalstats.c b/libavfilter/vf_signalstats.c
new file mode 100644
index 0000000000..22a1db196f
--- /dev/null
+++ b/libavfilter/vf_signalstats.c
@@ -0,0 +1,1024 @@
+/*
+ * Copyright (c) 2010 Mark Heath mjpeg0 @ silicontrip dot org
+ * Copyright (c) 2014 Clément Bœsch
+ * Copyright (c) 2014 Dave Rice @dericed
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "internal.h"
+
+enum FilterMode {
+ FILTER_NONE = -1,
+ FILTER_TOUT,
+ FILTER_VREP,
+ FILTER_BRNG,
+ FILT_NUMB
+};
+
+typedef struct {
+ const AVClass *class;
+ int chromah; // height of chroma plane
+ int chromaw; // width of chroma plane
+ int hsub; // horizontal subsampling
+ int vsub; // vertical subsampling
+ int depth; // pixel depth
+ int fs; // pixel count per frame
+ int cfs; // pixel count per frame of chroma planes
+ int outfilter; // FilterMode
+ int filters;
+ AVFrame *frame_prev;
+ uint8_t rgba_color[4];
+ int yuv_color[3];
+ int nb_jobs;
+ int *jobs_rets;
+
+ int *histy, *histu, *histv, *histsat;
+
+ AVFrame *frame_sat;
+ AVFrame *frame_hue;
+} SignalstatsContext;
+
+typedef struct ThreadData {
+ const AVFrame *in;
+ AVFrame *out;
+} ThreadData;
+
+typedef struct ThreadDataHueSatMetrics {
+ const AVFrame *src;
+ AVFrame *dst_sat, *dst_hue;
+} ThreadDataHueSatMetrics;
+
+#define OFFSET(x) offsetof(SignalstatsContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption signalstats_options[] = {
+ {"stat", "set statistics filters", OFFSET(filters), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, INT_MAX, FLAGS, "filters"},
+ {"tout", "analyze pixels for temporal outliers", 0, AV_OPT_TYPE_CONST, {.i64=1<<FILTER_TOUT}, 0, 0, FLAGS, "filters"},
+ {"vrep", "analyze video lines for vertical line repetition", 0, AV_OPT_TYPE_CONST, {.i64=1<<FILTER_VREP}, 0, 0, FLAGS, "filters"},
+ {"brng", "analyze for pixels outside of broadcast range", 0, AV_OPT_TYPE_CONST, {.i64=1<<FILTER_BRNG}, 0, 0, FLAGS, "filters"},
+ {"out", "set video filter", OFFSET(outfilter), AV_OPT_TYPE_INT, {.i64=FILTER_NONE}, -1, FILT_NUMB-1, FLAGS, "out"},
+ {"tout", "highlight pixels that depict temporal outliers", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_TOUT}, 0, 0, FLAGS, "out"},
+ {"vrep", "highlight video lines that depict vertical line repetition", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_VREP}, 0, 0, FLAGS, "out"},
+ {"brng", "highlight pixels that are outside of broadcast range", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_BRNG}, 0, 0, FLAGS, "out"},
+ {"c", "set highlight color", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str="yellow"}, .flags=FLAGS},
+ {"color", "set highlight color", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str="yellow"}, .flags=FLAGS},
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(signalstats);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ uint8_t r, g, b;
+ SignalstatsContext *s = ctx->priv;
+
+ if (s->outfilter != FILTER_NONE)
+ s->filters |= 1 << s->outfilter;
+
+ r = s->rgba_color[0];
+ g = s->rgba_color[1];
+ b = s->rgba_color[2];
+ s->yuv_color[0] = (( 66*r + 129*g + 25*b + (1<<7)) >> 8) + 16;
+ s->yuv_color[1] = ((-38*r + -74*g + 112*b + (1<<7)) >> 8) + 128;
+ s->yuv_color[2] = ((112*r + -94*g + -18*b + (1<<7)) >> 8) + 128;
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SignalstatsContext *s = ctx->priv;
+ av_frame_free(&s->frame_prev);
+ av_frame_free(&s->frame_sat);
+ av_frame_free(&s->frame_hue);
+ av_freep(&s->jobs_rets);
+ av_freep(&s->histy);
+ av_freep(&s->histu);
+ av_freep(&s->histv);
+ av_freep(&s->histsat);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ // TODO: add more
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV420P9,
+ AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUV440P10,
+ AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
+ AV_PIX_FMT_YUV440P12,
+ AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
+ AV_PIX_FMT_YUV444P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV420P16,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static AVFrame *alloc_frame(enum AVPixelFormat pixfmt, int w, int h)
+{
+ AVFrame *frame = av_frame_alloc();
+ if (!frame)
+ return NULL;
+
+ frame->format = pixfmt;
+ frame->width = w;
+ frame->height = h;
+
+ if (av_frame_get_buffer(frame, 32) < 0) {
+ av_frame_free(&frame);
+ return NULL;
+ }
+
+ return frame;
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ SignalstatsContext *s = ctx->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
+ s->depth = desc->comp[0].depth;
+ if (s->depth > 8) {
+ s->histy = av_malloc_array(1 << s->depth, sizeof(*s->histy));
+ s->histu = av_malloc_array(1 << s->depth, sizeof(*s->histu));
+ s->histv = av_malloc_array(1 << s->depth, sizeof(*s->histv));
+ s->histsat = av_malloc_array(1 << s->depth, sizeof(*s->histsat));
+
+ if (!s->histy || !s->histu || !s->histv || !s->histsat)
+ return AVERROR(ENOMEM);
+ }
+
+ outlink->w = inlink->w;
+ outlink->h = inlink->h;
+
+ s->chromaw = AV_CEIL_RSHIFT(inlink->w, s->hsub);
+ s->chromah = AV_CEIL_RSHIFT(inlink->h, s->vsub);
+
+ s->fs = inlink->w * inlink->h;
+ s->cfs = s->chromaw * s->chromah;
+
+ s->nb_jobs = FFMAX(1, FFMIN(inlink->h, ff_filter_get_nb_threads(ctx)));
+ s->jobs_rets = av_malloc_array(s->nb_jobs, sizeof(*s->jobs_rets));
+ if (!s->jobs_rets)
+ return AVERROR(ENOMEM);
+
+ s->frame_sat = alloc_frame(s->depth > 8 ? AV_PIX_FMT_GRAY16 : AV_PIX_FMT_GRAY8, inlink->w, inlink->h);
+ s->frame_hue = alloc_frame(AV_PIX_FMT_GRAY16, inlink->w, inlink->h);
+ if (!s->frame_sat || !s->frame_hue)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static void burn_frame8(const SignalstatsContext *s, AVFrame *f, int x, int y)
+{
+ const int chromax = x >> s->hsub;
+ const int chromay = y >> s->vsub;
+ f->data[0][y * f->linesize[0] + x] = s->yuv_color[0];
+ f->data[1][chromay * f->linesize[1] + chromax] = s->yuv_color[1];
+ f->data[2][chromay * f->linesize[2] + chromax] = s->yuv_color[2];
+}
+
+static void burn_frame16(const SignalstatsContext *s, AVFrame *f, int x, int y)
+{
+ const int chromax = x >> s->hsub;
+ const int chromay = y >> s->vsub;
+ const int mult = 1 << (s->depth - 8);
+ AV_WN16(f->data[0] + y * f->linesize[0] + x * 2, s->yuv_color[0] * mult);
+ AV_WN16(f->data[1] + chromay * f->linesize[1] + chromax * 2, s->yuv_color[1] * mult);
+ AV_WN16(f->data[2] + chromay * f->linesize[2] + chromax * 2, s->yuv_color[2] * mult);
+}
+
+static int filter8_brng(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ThreadData *td = arg;
+ const SignalstatsContext *s = ctx->priv;
+ const AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int w = in->width;
+ const int h = in->height;
+ const int slice_start = (h * jobnr ) / nb_jobs;
+ const int slice_end = (h * (jobnr+1)) / nb_jobs;
+ int x, y, score = 0;
+
+ for (y = slice_start; y < slice_end; y++) {
+ const int yc = y >> s->vsub;
+ const uint8_t *pluma = &in->data[0][y * in->linesize[0]];
+ const uint8_t *pchromau = &in->data[1][yc * in->linesize[1]];
+ const uint8_t *pchromav = &in->data[2][yc * in->linesize[2]];
+
+ for (x = 0; x < w; x++) {
+ const int xc = x >> s->hsub;
+ const int luma = pluma[x];
+ const int chromau = pchromau[xc];
+ const int chromav = pchromav[xc];
+ const int filt = luma < 16 || luma > 235 ||
+ chromau < 16 || chromau > 240 ||
+ chromav < 16 || chromav > 240;
+ score += filt;
+ if (out && filt)
+ burn_frame8(s, out, x, y);
+ }
+ }
+ return score;
+}
+
+static int filter16_brng(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ThreadData *td = arg;
+ const SignalstatsContext *s = ctx->priv;
+ const AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int mult = 1 << (s->depth - 8);
+ const int w = in->width;
+ const int h = in->height;
+ const int slice_start = (h * jobnr ) / nb_jobs;
+ const int slice_end = (h * (jobnr+1)) / nb_jobs;
+ int x, y, score = 0;
+
+ for (y = slice_start; y < slice_end; y++) {
+ const int yc = y >> s->vsub;
+ const uint16_t *pluma = (uint16_t *)&in->data[0][y * in->linesize[0]];
+ const uint16_t *pchromau = (uint16_t *)&in->data[1][yc * in->linesize[1]];
+ const uint16_t *pchromav = (uint16_t *)&in->data[2][yc * in->linesize[2]];
+
+ for (x = 0; x < w; x++) {
+ const int xc = x >> s->hsub;
+ const int luma = pluma[x];
+ const int chromau = pchromau[xc];
+ const int chromav = pchromav[xc];
+ const int filt = luma < 16 * mult || luma > 235 * mult ||
+ chromau < 16 * mult || chromau > 240 * mult ||
+ chromav < 16 * mult || chromav > 240 * mult;
+ score += filt;
+ if (out && filt)
+ burn_frame16(s, out, x, y);
+ }
+ }
+ return score;
+}
+
+static int filter_tout_outlier(uint8_t x, uint8_t y, uint8_t z)
+{
+ return ((abs(x - y) + abs (z - y)) / 2) - abs(z - x) > 4; // make 4 configurable?
+}
+
+static int filter8_tout(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ThreadData *td = arg;
+ const SignalstatsContext *s = ctx->priv;
+ const AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int w = in->width;
+ const int h = in->height;
+ const int slice_start = (h * jobnr ) / nb_jobs;
+ const int slice_end = (h * (jobnr+1)) / nb_jobs;
+ const uint8_t *p = in->data[0];
+ int lw = in->linesize[0];
+ int x, y, score = 0, filt;
+
+ for (y = slice_start; y < slice_end; y++) {
+
+ if (y - 1 < 0 || y + 1 >= h)
+ continue;
+
+ // detect two pixels above and below (to eliminate interlace artefacts)
+ // should check that video format is infact interlaced.
+
+#define FILTER(i, j) \
+ filter_tout_outlier(p[(y-j) * lw + x + i], \
+ p[ y * lw + x + i], \
+ p[(y+j) * lw + x + i])
+
+#define FILTER3(j) (FILTER(-1, j) && FILTER(0, j) && FILTER(1, j))
+
+ if (y - 2 >= 0 && y + 2 < h) {
+ for (x = 1; x < w - 1; x++) {
+ filt = FILTER3(2) && FILTER3(1);
+ score += filt;
+ if (filt && out)
+ burn_frame8(s, out, x, y);
+ }
+ } else {
+ for (x = 1; x < w - 1; x++) {
+ filt = FILTER3(1);
+ score += filt;
+ if (filt && out)
+ burn_frame8(s, out, x, y);
+ }
+ }
+ }
+ return score;
+}
+
+static int filter16_tout(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ThreadData *td = arg;
+ const SignalstatsContext *s = ctx->priv;
+ const AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int w = in->width;
+ const int h = in->height;
+ const int slice_start = (h * jobnr ) / nb_jobs;
+ const int slice_end = (h * (jobnr+1)) / nb_jobs;
+ const uint16_t *p = (uint16_t *)in->data[0];
+ int lw = in->linesize[0] / 2;
+ int x, y, score = 0, filt;
+
+ for (y = slice_start; y < slice_end; y++) {
+
+ if (y - 1 < 0 || y + 1 >= h)
+ continue;
+
+ // detect two pixels above and below (to eliminate interlace artefacts)
+ // should check that video format is infact interlaced.
+
+ if (y - 2 >= 0 && y + 2 < h) {
+ for (x = 1; x < w - 1; x++) {
+ filt = FILTER3(2) && FILTER3(1);
+ score += filt;
+ if (filt && out)
+ burn_frame16(s, out, x, y);
+ }
+ } else {
+ for (x = 1; x < w - 1; x++) {
+ filt = FILTER3(1);
+ score += filt;
+ if (filt && out)
+ burn_frame16(s, out, x, y);
+ }
+ }
+ }
+ return score;
+}
+
+#define VREP_START 4
+
+static int filter8_vrep(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ThreadData *td = arg;
+ const SignalstatsContext *s = ctx->priv;
+ const AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int w = in->width;
+ const int h = in->height;
+ const int slice_start = (h * jobnr ) / nb_jobs;
+ const int slice_end = (h * (jobnr+1)) / nb_jobs;
+ const uint8_t *p = in->data[0];
+ const int lw = in->linesize[0];
+ int x, y, score = 0;
+
+ for (y = slice_start; y < slice_end; y++) {
+ const int y2lw = (y - VREP_START) * lw;
+ const int ylw = y * lw;
+ int filt, totdiff = 0;
+
+ if (y < VREP_START)
+ continue;
+
+ for (x = 0; x < w; x++)
+ totdiff += abs(p[y2lw + x] - p[ylw + x]);
+ filt = totdiff < w;
+
+ score += filt;
+ if (filt && out)
+ for (x = 0; x < w; x++)
+ burn_frame8(s, out, x, y);
+ }
+ return score * w;
+}
+
+static int filter16_vrep(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ThreadData *td = arg;
+ const SignalstatsContext *s = ctx->priv;
+ const AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ const int w = in->width;
+ const int h = in->height;
+ const int slice_start = (h * jobnr ) / nb_jobs;
+ const int slice_end = (h * (jobnr+1)) / nb_jobs;
+ const uint16_t *p = (uint16_t *)in->data[0];
+ const int lw = in->linesize[0] / 2;
+ int x, y, score = 0;
+
+ for (y = slice_start; y < slice_end; y++) {
+ const int y2lw = (y - VREP_START) * lw;
+ const int ylw = y * lw;
+ int64_t totdiff = 0;
+ int filt;
+
+ if (y < VREP_START)
+ continue;
+
+ for (x = 0; x < w; x++)
+ totdiff += abs(p[y2lw + x] - p[ylw + x]);
+ filt = totdiff < w;
+
+ score += filt;
+ if (filt && out)
+ for (x = 0; x < w; x++)
+ burn_frame16(s, out, x, y);
+ }
+ return score * w;
+}
+
+static const struct {
+ const char *name;
+ int (*process8)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
+ int (*process16)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
+} filters_def[] = {
+ {"TOUT", filter8_tout, filter16_tout},
+ {"VREP", filter8_vrep, filter16_vrep},
+ {"BRNG", filter8_brng, filter16_brng},
+ {NULL}
+};
+
+#define DEPTH 256
+
+static int compute_sat_hue_metrics8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ int i, j;
+ ThreadDataHueSatMetrics *td = arg;
+ const SignalstatsContext *s = ctx->priv;
+ const AVFrame *src = td->src;
+ AVFrame *dst_sat = td->dst_sat;
+ AVFrame *dst_hue = td->dst_hue;
+
+ const int slice_start = (s->chromah * jobnr ) / nb_jobs;
+ const int slice_end = (s->chromah * (jobnr+1)) / nb_jobs;
+
+ const int lsz_u = src->linesize[1];
+ const int lsz_v = src->linesize[2];
+ const uint8_t *p_u = src->data[1] + slice_start * lsz_u;
+ const uint8_t *p_v = src->data[2] + slice_start * lsz_v;
+
+ const int lsz_sat = dst_sat->linesize[0];
+ const int lsz_hue = dst_hue->linesize[0];
+ uint8_t *p_sat = dst_sat->data[0] + slice_start * lsz_sat;
+ uint8_t *p_hue = dst_hue->data[0] + slice_start * lsz_hue;
+
+ for (j = slice_start; j < slice_end; j++) {
+ for (i = 0; i < s->chromaw; i++) {
+ const int yuvu = p_u[i];
+ const int yuvv = p_v[i];
+ p_sat[i] = hypot(yuvu - 128, yuvv - 128); // int or round?
+ ((int16_t*)p_hue)[i] = floor((180 / M_PI) * atan2f(yuvu-128, yuvv-128) + 180);
+ }
+ p_u += lsz_u;
+ p_v += lsz_v;
+ p_sat += lsz_sat;
+ p_hue += lsz_hue;
+ }
+
+ return 0;
+}
+
+static int compute_sat_hue_metrics16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ int i, j;
+ ThreadDataHueSatMetrics *td = arg;
+ const SignalstatsContext *s = ctx->priv;
+ const AVFrame *src = td->src;
+ AVFrame *dst_sat = td->dst_sat;
+ AVFrame *dst_hue = td->dst_hue;
+ const int mid = 1 << (s->depth - 1);
+
+ const int slice_start = (s->chromah * jobnr ) / nb_jobs;
+ const int slice_end = (s->chromah * (jobnr+1)) / nb_jobs;
+
+ const int lsz_u = src->linesize[1] / 2;
+ const int lsz_v = src->linesize[2] / 2;
+ const uint16_t *p_u = (uint16_t*)src->data[1] + slice_start * lsz_u;
+ const uint16_t *p_v = (uint16_t*)src->data[2] + slice_start * lsz_v;
+
+ const int lsz_sat = dst_sat->linesize[0] / 2;
+ const int lsz_hue = dst_hue->linesize[0] / 2;
+ uint16_t *p_sat = (uint16_t*)dst_sat->data[0] + slice_start * lsz_sat;
+ uint16_t *p_hue = (uint16_t*)dst_hue->data[0] + slice_start * lsz_hue;
+
+ for (j = slice_start; j < slice_end; j++) {
+ for (i = 0; i < s->chromaw; i++) {
+ const int yuvu = p_u[i];
+ const int yuvv = p_v[i];
+ p_sat[i] = hypot(yuvu - mid, yuvv - mid); // int or round?
+ ((int16_t*)p_hue)[i] = floor((180 / M_PI) * atan2f(yuvu-mid, yuvv-mid) + 180);
+ }
+ p_u += lsz_u;
+ p_v += lsz_v;
+ p_sat += lsz_sat;
+ p_hue += lsz_hue;
+ }
+
+ return 0;
+}
+
+static unsigned compute_bit_depth(uint16_t mask)
+{
+ return av_popcount(mask);
+}
+
+static int filter_frame8(AVFilterLink *link, AVFrame *in)
+{
+ AVFilterContext *ctx = link->dst;
+ SignalstatsContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out = in;
+ int i, j;
+ int w = 0, cw = 0, // in
+ pw = 0, cpw = 0; // prev
+ int fil;
+ char metabuf[128];
+ unsigned int histy[DEPTH] = {0},
+ histu[DEPTH] = {0},
+ histv[DEPTH] = {0},
+ histhue[360] = {0},
+ histsat[DEPTH] = {0}; // limited to 8 bit data.
+ int miny = -1, minu = -1, minv = -1;
+ int maxy = -1, maxu = -1, maxv = -1;
+ int lowy = -1, lowu = -1, lowv = -1;
+ int highy = -1, highu = -1, highv = -1;
+ int minsat = -1, maxsat = -1, lowsat = -1, highsat = -1;
+ int lowp, highp, clowp, chighp;
+ int accy, accu, accv;
+ int accsat, acchue = 0;
+ int medhue, maxhue;
+ int toty = 0, totu = 0, totv = 0, totsat=0;
+ int tothue = 0;
+ int dify = 0, difu = 0, difv = 0;
+ uint16_t masky = 0, masku = 0, maskv = 0;
+
+ int filtot[FILT_NUMB] = {0};
+ AVFrame *prev;
+
+ AVFrame *sat = s->frame_sat;
+ AVFrame *hue = s->frame_hue;
+ const uint8_t *p_sat = sat->data[0];
+ const uint8_t *p_hue = hue->data[0];
+ const int lsz_sat = sat->linesize[0];
+ const int lsz_hue = hue->linesize[0];
+ ThreadDataHueSatMetrics td_huesat = {
+ .src = in,
+ .dst_sat = sat,
+ .dst_hue = hue,
+ };
+
+ if (!s->frame_prev)
+ s->frame_prev = av_frame_clone(in);
+
+ prev = s->frame_prev;
+
+ if (s->outfilter != FILTER_NONE) {
+ out = av_frame_clone(in);
+ av_frame_make_writable(out);
+ }
+
+ ctx->internal->execute(ctx, compute_sat_hue_metrics8, &td_huesat,
+ NULL, FFMIN(s->chromah, ff_filter_get_nb_threads(ctx)));
+
+ // Calculate luma histogram and difference with previous frame or field.
+ for (j = 0; j < link->h; j++) {
+ for (i = 0; i < link->w; i++) {
+ const int yuv = in->data[0][w + i];
+
+ masky |= yuv;
+ histy[yuv]++;
+ dify += abs(yuv - prev->data[0][pw + i]);
+ }
+ w += in->linesize[0];
+ pw += prev->linesize[0];
+ }
+
+ // Calculate chroma histogram and difference with previous frame or field.
+ for (j = 0; j < s->chromah; j++) {
+ for (i = 0; i < s->chromaw; i++) {
+ const int yuvu = in->data[1][cw+i];
+ const int yuvv = in->data[2][cw+i];
+
+ masku |= yuvu;
+ maskv |= yuvv;
+ histu[yuvu]++;
+ difu += abs(yuvu - prev->data[1][cpw+i]);
+ histv[yuvv]++;
+ difv += abs(yuvv - prev->data[2][cpw+i]);
+
+ histsat[p_sat[i]]++;
+ histhue[((int16_t*)p_hue)[i]]++;
+ }
+ cw += in->linesize[1];
+ cpw += prev->linesize[1];
+ p_sat += lsz_sat;
+ p_hue += lsz_hue;
+ }
+
+ for (fil = 0; fil < FILT_NUMB; fil ++) {
+ if (s->filters & 1<<fil) {
+ ThreadData td = {
+ .in = in,
+ .out = out != in && s->outfilter == fil ? out : NULL,
+ };
+ memset(s->jobs_rets, 0, s->nb_jobs * sizeof(*s->jobs_rets));
+ ctx->internal->execute(ctx, filters_def[fil].process8,
+ &td, s->jobs_rets, s->nb_jobs);
+ for (i = 0; i < s->nb_jobs; i++)
+ filtot[fil] += s->jobs_rets[i];
+ }
+ }
+
+ // find low / high based on histogram percentile
+ // these only need to be calculated once.
+
+ lowp = lrint(s->fs * 10 / 100.);
+ highp = lrint(s->fs * 90 / 100.);
+ clowp = lrint(s->cfs * 10 / 100.);
+ chighp = lrint(s->cfs * 90 / 100.);
+
+ accy = accu = accv = accsat = 0;
+ for (fil = 0; fil < DEPTH; fil++) {
+ if (miny < 0 && histy[fil]) miny = fil;
+ if (minu < 0 && histu[fil]) minu = fil;
+ if (minv < 0 && histv[fil]) minv = fil;
+ if (minsat < 0 && histsat[fil]) minsat = fil;
+
+ if (histy[fil]) maxy = fil;
+ if (histu[fil]) maxu = fil;
+ if (histv[fil]) maxv = fil;
+ if (histsat[fil]) maxsat = fil;
+
+ toty += histy[fil] * fil;
+ totu += histu[fil] * fil;
+ totv += histv[fil] * fil;
+ totsat += histsat[fil] * fil;
+
+ accy += histy[fil];
+ accu += histu[fil];
+ accv += histv[fil];
+ accsat += histsat[fil];
+
+ if (lowy == -1 && accy >= lowp) lowy = fil;
+ if (lowu == -1 && accu >= clowp) lowu = fil;
+ if (lowv == -1 && accv >= clowp) lowv = fil;
+ if (lowsat == -1 && accsat >= clowp) lowsat = fil;
+
+ if (highy == -1 && accy >= highp) highy = fil;
+ if (highu == -1 && accu >= chighp) highu = fil;
+ if (highv == -1 && accv >= chighp) highv = fil;
+ if (highsat == -1 && accsat >= chighp) highsat = fil;
+ }
+
+ maxhue = histhue[0];
+ medhue = -1;
+ for (fil = 0; fil < 360; fil++) {
+ tothue += histhue[fil] * fil;
+ acchue += histhue[fil];
+
+ if (medhue == -1 && acchue > s->cfs / 2)
+ medhue = fil;
+ if (histhue[fil] > maxhue) {
+ maxhue = histhue[fil];
+ }
+ }
+
+ av_frame_free(&s->frame_prev);
+ s->frame_prev = av_frame_clone(in);
+
+#define SET_META(key, fmt, val) do { \
+ snprintf(metabuf, sizeof(metabuf), fmt, val); \
+ av_dict_set(&out->metadata, "lavfi.signalstats." key, metabuf, 0); \
+} while (0)
+
+ SET_META("YMIN", "%d", miny);
+ SET_META("YLOW", "%d", lowy);
+ SET_META("YAVG", "%g", 1.0 * toty / s->fs);
+ SET_META("YHIGH", "%d", highy);
+ SET_META("YMAX", "%d", maxy);
+
+ SET_META("UMIN", "%d", minu);
+ SET_META("ULOW", "%d", lowu);
+ SET_META("UAVG", "%g", 1.0 * totu / s->cfs);
+ SET_META("UHIGH", "%d", highu);
+ SET_META("UMAX", "%d", maxu);
+
+ SET_META("VMIN", "%d", minv);
+ SET_META("VLOW", "%d", lowv);
+ SET_META("VAVG", "%g", 1.0 * totv / s->cfs);
+ SET_META("VHIGH", "%d", highv);
+ SET_META("VMAX", "%d", maxv);
+
+ SET_META("SATMIN", "%d", minsat);
+ SET_META("SATLOW", "%d", lowsat);
+ SET_META("SATAVG", "%g", 1.0 * totsat / s->cfs);
+ SET_META("SATHIGH", "%d", highsat);
+ SET_META("SATMAX", "%d", maxsat);
+
+ SET_META("HUEMED", "%d", medhue);
+ SET_META("HUEAVG", "%g", 1.0 * tothue / s->cfs);
+
+ SET_META("YDIF", "%g", 1.0 * dify / s->fs);
+ SET_META("UDIF", "%g", 1.0 * difu / s->cfs);
+ SET_META("VDIF", "%g", 1.0 * difv / s->cfs);
+
+ SET_META("YBITDEPTH", "%d", compute_bit_depth(masky));
+ SET_META("UBITDEPTH", "%d", compute_bit_depth(masku));
+ SET_META("VBITDEPTH", "%d", compute_bit_depth(maskv));
+
+ for (fil = 0; fil < FILT_NUMB; fil ++) {
+ if (s->filters & 1<<fil) {
+ char metaname[128];
+ snprintf(metabuf, sizeof(metabuf), "%g", 1.0 * filtot[fil] / s->fs);
+ snprintf(metaname, sizeof(metaname), "lavfi.signalstats.%s", filters_def[fil].name);
+ av_dict_set(&out->metadata, metaname, metabuf, 0);
+ }
+ }
+
+ if (in != out)
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static int filter_frame16(AVFilterLink *link, AVFrame *in)
+{
+ AVFilterContext *ctx = link->dst;
+ SignalstatsContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out = in;
+ int i, j;
+ int w = 0, cw = 0, // in
+ pw = 0, cpw = 0; // prev
+ int fil;
+ char metabuf[128];
+ unsigned int *histy = s->histy,
+ *histu = s->histu,
+ *histv = s->histv,
+ histhue[360] = {0},
+ *histsat = s->histsat;
+ int miny = -1, minu = -1, minv = -1;
+ int maxy = -1, maxu = -1, maxv = -1;
+ int lowy = -1, lowu = -1, lowv = -1;
+ int highy = -1, highu = -1, highv = -1;
+ int minsat = -1, maxsat = -1, lowsat = -1, highsat = -1;
+ int lowp, highp, clowp, chighp;
+ int accy, accu, accv;
+ int accsat, acchue = 0;
+ int medhue, maxhue;
+ int64_t toty = 0, totu = 0, totv = 0, totsat=0;
+ int64_t tothue = 0;
+ int64_t dify = 0, difu = 0, difv = 0;
+ uint16_t masky = 0, masku = 0, maskv = 0;
+
+ int filtot[FILT_NUMB] = {0};
+ AVFrame *prev;
+
+ AVFrame *sat = s->frame_sat;
+ AVFrame *hue = s->frame_hue;
+ const uint16_t *p_sat = (uint16_t *)sat->data[0];
+ const uint16_t *p_hue = (uint16_t *)hue->data[0];
+ const int lsz_sat = sat->linesize[0] / 2;
+ const int lsz_hue = hue->linesize[0] / 2;
+ ThreadDataHueSatMetrics td_huesat = {
+ .src = in,
+ .dst_sat = sat,
+ .dst_hue = hue,
+ };
+
+ if (!s->frame_prev)
+ s->frame_prev = av_frame_clone(in);
+
+ prev = s->frame_prev;
+
+ if (s->outfilter != FILTER_NONE) {
+ out = av_frame_clone(in);
+ av_frame_make_writable(out);
+ }
+
+ ctx->internal->execute(ctx, compute_sat_hue_metrics16, &td_huesat,
+ NULL, FFMIN(s->chromah, ff_filter_get_nb_threads(ctx)));
+
+ // Calculate luma histogram and difference with previous frame or field.
+ memset(s->histy, 0, (1 << s->depth) * sizeof(*s->histy));
+ for (j = 0; j < link->h; j++) {
+ for (i = 0; i < link->w; i++) {
+ const int yuv = AV_RN16(in->data[0] + w + i * 2);
+
+ masky |= yuv;
+ histy[yuv]++;
+ dify += abs(yuv - AV_RN16(prev->data[0] + pw + i * 2));
+ }
+ w += in->linesize[0];
+ pw += prev->linesize[0];
+ }
+
+ // Calculate chroma histogram and difference with previous frame or field.
+ memset(s->histu, 0, (1 << s->depth) * sizeof(*s->histu));
+ memset(s->histv, 0, (1 << s->depth) * sizeof(*s->histv));
+ memset(s->histsat, 0, (1 << s->depth) * sizeof(*s->histsat));
+ for (j = 0; j < s->chromah; j++) {
+ for (i = 0; i < s->chromaw; i++) {
+ const int yuvu = AV_RN16(in->data[1] + cw + i * 2);
+ const int yuvv = AV_RN16(in->data[2] + cw + i * 2);
+
+ masku |= yuvu;
+ maskv |= yuvv;
+ histu[yuvu]++;
+ difu += abs(yuvu - AV_RN16(prev->data[1] + cpw + i * 2));
+ histv[yuvv]++;
+ difv += abs(yuvv - AV_RN16(prev->data[2] + cpw + i * 2));
+
+ histsat[p_sat[i]]++;
+ histhue[((int16_t*)p_hue)[i]]++;
+ }
+ cw += in->linesize[1];
+ cpw += prev->linesize[1];
+ p_sat += lsz_sat;
+ p_hue += lsz_hue;
+ }
+
+ for (fil = 0; fil < FILT_NUMB; fil ++) {
+ if (s->filters & 1<<fil) {
+ ThreadData td = {
+ .in = in,
+ .out = out != in && s->outfilter == fil ? out : NULL,
+ };
+ memset(s->jobs_rets, 0, s->nb_jobs * sizeof(*s->jobs_rets));
+ ctx->internal->execute(ctx, filters_def[fil].process16,
+ &td, s->jobs_rets, s->nb_jobs);
+ for (i = 0; i < s->nb_jobs; i++)
+ filtot[fil] += s->jobs_rets[i];
+ }
+ }
+
+ // find low / high based on histogram percentile
+ // these only need to be calculated once.
+
+ lowp = lrint(s->fs * 10 / 100.);
+ highp = lrint(s->fs * 90 / 100.);
+ clowp = lrint(s->cfs * 10 / 100.);
+ chighp = lrint(s->cfs * 90 / 100.);
+
+ accy = accu = accv = accsat = 0;
+ for (fil = 0; fil < 1 << s->depth; fil++) {
+ if (miny < 0 && histy[fil]) miny = fil;
+ if (minu < 0 && histu[fil]) minu = fil;
+ if (minv < 0 && histv[fil]) minv = fil;
+ if (minsat < 0 && histsat[fil]) minsat = fil;
+
+ if (histy[fil]) maxy = fil;
+ if (histu[fil]) maxu = fil;
+ if (histv[fil]) maxv = fil;
+ if (histsat[fil]) maxsat = fil;
+
+ toty += histy[fil] * fil;
+ totu += histu[fil] * fil;
+ totv += histv[fil] * fil;
+ totsat += histsat[fil] * fil;
+
+ accy += histy[fil];
+ accu += histu[fil];
+ accv += histv[fil];
+ accsat += histsat[fil];
+
+ if (lowy == -1 && accy >= lowp) lowy = fil;
+ if (lowu == -1 && accu >= clowp) lowu = fil;
+ if (lowv == -1 && accv >= clowp) lowv = fil;
+ if (lowsat == -1 && accsat >= clowp) lowsat = fil;
+
+ if (highy == -1 && accy >= highp) highy = fil;
+ if (highu == -1 && accu >= chighp) highu = fil;
+ if (highv == -1 && accv >= chighp) highv = fil;
+ if (highsat == -1 && accsat >= chighp) highsat = fil;
+ }
+
+ maxhue = histhue[0];
+ medhue = -1;
+ for (fil = 0; fil < 360; fil++) {
+ tothue += histhue[fil] * fil;
+ acchue += histhue[fil];
+
+ if (medhue == -1 && acchue > s->cfs / 2)
+ medhue = fil;
+ if (histhue[fil] > maxhue) {
+ maxhue = histhue[fil];
+ }
+ }
+
+ av_frame_free(&s->frame_prev);
+ s->frame_prev = av_frame_clone(in);
+
+ SET_META("YMIN", "%d", miny);
+ SET_META("YLOW", "%d", lowy);
+ SET_META("YAVG", "%g", 1.0 * toty / s->fs);
+ SET_META("YHIGH", "%d", highy);
+ SET_META("YMAX", "%d", maxy);
+
+ SET_META("UMIN", "%d", minu);
+ SET_META("ULOW", "%d", lowu);
+ SET_META("UAVG", "%g", 1.0 * totu / s->cfs);
+ SET_META("UHIGH", "%d", highu);
+ SET_META("UMAX", "%d", maxu);
+
+ SET_META("VMIN", "%d", minv);
+ SET_META("VLOW", "%d", lowv);
+ SET_META("VAVG", "%g", 1.0 * totv / s->cfs);
+ SET_META("VHIGH", "%d", highv);
+ SET_META("VMAX", "%d", maxv);
+
+ SET_META("SATMIN", "%d", minsat);
+ SET_META("SATLOW", "%d", lowsat);
+ SET_META("SATAVG", "%g", 1.0 * totsat / s->cfs);
+ SET_META("SATHIGH", "%d", highsat);
+ SET_META("SATMAX", "%d", maxsat);
+
+ SET_META("HUEMED", "%d", medhue);
+ SET_META("HUEAVG", "%g", 1.0 * tothue / s->cfs);
+
+ SET_META("YDIF", "%g", 1.0 * dify / s->fs);
+ SET_META("UDIF", "%g", 1.0 * difu / s->cfs);
+ SET_META("VDIF", "%g", 1.0 * difv / s->cfs);
+
+ SET_META("YBITDEPTH", "%d", compute_bit_depth(masky));
+ SET_META("UBITDEPTH", "%d", compute_bit_depth(masku));
+ SET_META("VBITDEPTH", "%d", compute_bit_depth(maskv));
+
+ for (fil = 0; fil < FILT_NUMB; fil ++) {
+ if (s->filters & 1<<fil) {
+ char metaname[128];
+ snprintf(metabuf, sizeof(metabuf), "%g", 1.0 * filtot[fil] / s->fs);
+ snprintf(metaname, sizeof(metaname), "lavfi.signalstats.%s", filters_def[fil].name);
+ av_dict_set(&out->metadata, metaname, metabuf, 0);
+ }
+ }
+
+ if (in != out)
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static int filter_frame(AVFilterLink *link, AVFrame *in)
+{
+ AVFilterContext *ctx = link->dst;
+ SignalstatsContext *s = ctx->priv;
+
+ if (s->depth > 8)
+ return filter_frame16(link, in);
+ else
+ return filter_frame8(link, in);
+}
+
+static const AVFilterPad signalstats_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad signalstats_outputs[] = {
+ {
+ .name = "default",
+ .config_props = config_props,
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_signalstats = {
+ .name = "signalstats",
+ .description = "Generate statistics from video analysis.",
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(SignalstatsContext),
+ .inputs = signalstats_inputs,
+ .outputs = signalstats_outputs,
+ .priv_class = &signalstats_class,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_signature.c b/libavfilter/vf_signature.c
new file mode 100644
index 0000000000..57cb96b6c4
--- /dev/null
+++ b/libavfilter/vf_signature.c
@@ -0,0 +1,767 @@
+/*
+ * Copyright (c) 2017 Gerion Entrup
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * MPEG-7 video signature calculation and lookup filter
+ * @see http://epubs.surrey.ac.uk/531590/1/MPEG-7%20Video%20Signature%20Author%27s%20Copy.pdf
+ */
+
+#include <float.h>
+#include "libavcodec/put_bits.h"
+#include "libavformat/avformat.h"
+#include "libavutil/opt.h"
+#include "libavutil/avstring.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/timestamp.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "signature.h"
+#include "signature_lookup.c"
+
+#define OFFSET(x) offsetof(SignatureContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
+#define BLOCK_LCM (int64_t) 476985600
+
+static const AVOption signature_options[] = {
+ { "detectmode", "set the detectmode",
+ OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_OFF}, 0, NB_LOOKUP_MODE-1, FLAGS, "mode" },
+ { "off", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = MODE_OFF}, 0, 0, .flags = FLAGS, "mode" },
+ { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = MODE_FULL}, 0, 0, .flags = FLAGS, "mode" },
+ { "fast", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = MODE_FAST}, 0, 0, .flags = FLAGS, "mode" },
+ { "nb_inputs", "number of inputs",
+ OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, FLAGS },
+ { "filename", "filename for output files",
+ OFFSET(filename), AV_OPT_TYPE_STRING, {.str = ""}, 0, NB_FORMATS-1, FLAGS },
+ { "format", "set output format",
+ OFFSET(format), AV_OPT_TYPE_INT, {.i64 = FORMAT_BINARY}, 0, 1, FLAGS , "format" },
+ { "binary", 0, 0, AV_OPT_TYPE_CONST, {.i64=FORMAT_BINARY}, 0, 0, FLAGS, "format" },
+ { "xml", 0, 0, AV_OPT_TYPE_CONST, {.i64=FORMAT_XML}, 0, 0, FLAGS, "format" },
+ { "th_d", "threshold to detect one word as similar",
+ OFFSET(thworddist), AV_OPT_TYPE_INT, {.i64 = 9000}, 1, INT_MAX, FLAGS },
+ { "th_dc", "threshold to detect all words as similar",
+ OFFSET(thcomposdist), AV_OPT_TYPE_INT, {.i64 = 60000}, 1, INT_MAX, FLAGS },
+ { "th_xh", "threshold to detect frames as similar",
+ OFFSET(thl1), AV_OPT_TYPE_INT, {.i64 = 116}, 1, INT_MAX, FLAGS },
+ { "th_di", "minimum length of matching sequence in frames",
+ OFFSET(thdi), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
+ { "th_it", "threshold for relation of good to all frames",
+ OFFSET(thit), AV_OPT_TYPE_DOUBLE, {.dbl = 0.5}, 0.0, 1.0, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(signature);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ /* all formats with a seperate gray value */
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_NV12, AV_PIX_FMT_NV21,
+ AV_PIX_FMT_NONE
+ };
+
+ return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SignatureContext *sic = ctx->priv;
+ StreamContext *sc = &(sic->streamcontexts[FF_INLINK_IDX(inlink)]);
+
+ sc->time_base = inlink->time_base;
+ /* test for overflow */
+ sc->divide = (((uint64_t) inlink->w/32) * (inlink->w/32 + 1) * (inlink->h/32 * inlink->h/32 + 1) > INT64_MAX / (BLOCK_LCM * 255));
+ if (sc->divide) {
+ av_log(ctx, AV_LOG_WARNING, "Input dimension too high for precise calculation, numbers will be rounded.\n");
+ }
+ sc->w = inlink->w;
+ sc->h = inlink->h;
+ return 0;
+}
+
+static int get_block_size(const Block *b)
+{
+ return (b->to.y - b->up.y + 1) * (b->to.x - b->up.x + 1);
+}
+
+static uint64_t get_block_sum(StreamContext *sc, uint64_t intpic[32][32], const Block *b)
+{
+ uint64_t sum = 0;
+
+ int x0, y0, x1, y1;
+
+ x0 = b->up.x;
+ y0 = b->up.y;
+ x1 = b->to.x;
+ y1 = b->to.y;
+
+ if (x0-1 >= 0 && y0-1 >= 0) {
+ sum = intpic[y1][x1] + intpic[y0-1][x0-1] - intpic[y1][x0-1] - intpic[y0-1][x1];
+ } else if (x0-1 >= 0) {
+ sum = intpic[y1][x1] - intpic[y1][x0-1];
+ } else if (y0-1 >= 0) {
+ sum = intpic[y1][x1] - intpic[y0-1][x1];
+ } else {
+ sum = intpic[y1][x1];
+ }
+ return sum;
+}
+
+static int cmp(const uint64_t *a, const uint64_t *b)
+{
+ return *a < *b ? -1 : ( *a > *b ? 1 : 0 );
+}
+
+/**
+ * sets the bit at position pos to 1 in data
+ */
+static void set_bit(uint8_t* data, size_t pos)
+{
+ uint8_t mask = 1 << 7-(pos%8);
+ data[pos/8] |= mask;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SignatureContext *sic = ctx->priv;
+ StreamContext *sc = &(sic->streamcontexts[FF_INLINK_IDX(inlink)]);
+ FineSignature* fs;
+
+ static const uint8_t pot3[5] = { 3*3*3*3, 3*3*3, 3*3, 3, 1 };
+ /* indexes of words : 210,217,219,274,334 44,175,233,270,273 57,70,103,237,269 100,285,295,337,354 101,102,111,275,296
+ s2usw = sorted to unsorted wordvec: 44 is at index 5, 57 at index 10...
+ */
+ static const unsigned int wordvec[25] = {44,57,70,100,101,102,103,111,175,210,217,219,233,237,269,270,273,274,275,285,295,296,334,337,354};
+ static const uint8_t s2usw[25] = { 5,10,11, 15, 20, 21, 12, 22, 6, 0, 1, 2, 7, 13, 14, 8, 9, 3, 23, 16, 17, 24, 4, 18, 19};
+
+ uint8_t wordt2b[5] = { 0, 0, 0, 0, 0 }; /* word ternary to binary */
+ uint64_t intpic[32][32];
+ uint64_t rowcount;
+ uint8_t *p = picref->data[0];
+ int inti, intj;
+ int *intjlut;
+
+ uint64_t conflist[DIFFELEM_SIZE];
+ int f = 0, g = 0, w = 0;
+ int32_t dh1 = 1, dh2 = 1, dw1 = 1, dw2 = 1, a, b;
+ int64_t denom;
+ int i, j, k, ternary;
+ uint64_t blocksum;
+ int blocksize;
+ int64_t th; /* threshold */
+ int64_t sum;
+
+ int64_t precfactor = (sc->divide) ? 65536 : BLOCK_LCM;
+
+ /* initialize fs */
+ if (sc->curfinesig) {
+ fs = av_mallocz(sizeof(FineSignature));
+ if (!fs)
+ return AVERROR(ENOMEM);
+ sc->curfinesig->next = fs;
+ fs->prev = sc->curfinesig;
+ sc->curfinesig = fs;
+ } else {
+ fs = sc->curfinesig = sc->finesiglist;
+ sc->curcoarsesig1->first = fs;
+ }
+
+ fs->pts = picref->pts;
+ fs->index = sc->lastindex++;
+
+ memset(intpic, 0, sizeof(uint64_t)*32*32);
+ intjlut = av_malloc_array(inlink->w, sizeof(int));
+ if (!intjlut)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < inlink->w; i++) {
+ intjlut[i] = (i*32)/inlink->w;
+ }
+
+ for (i = 0; i < inlink->h; i++) {
+ inti = (i*32)/inlink->h;
+ for (j = 0; j < inlink->w; j++) {
+ intj = intjlut[j];
+ intpic[inti][intj] += p[j];
+ }
+ p += picref->linesize[0];
+ }
+ av_freep(&intjlut);
+
+ /* The following calculates a summed area table (intpic) and brings the numbers
+ * in intpic to the same denominator.
+ * So you only have to handle the numinator in the following sections.
+ */
+ dh1 = inlink->h / 32;
+ if (inlink->h % 32)
+ dh2 = dh1 + 1;
+ dw1 = inlink->w / 32;
+ if (inlink->w % 32)
+ dw2 = dw1 + 1;
+ denom = (sc->divide) ? dh1 * dh2 * dw1 * dw2 : 1;
+
+ for (i = 0; i < 32; i++) {
+ rowcount = 0;
+ a = 1;
+ if (dh2 > 1) {
+ a = ((inlink->h*(i+1))%32 == 0) ? (inlink->h*(i+1))/32 - 1 : (inlink->h*(i+1))/32;
+ a -= ((inlink->h*i)%32 == 0) ? (inlink->h*i)/32 - 1 : (inlink->h*i)/32;
+ a = (a == dh1)? dh2 : dh1;
+ }
+ for (j = 0; j < 32; j++) {
+ b = 1;
+ if (dw2 > 1) {
+ b = ((inlink->w*(j+1))%32 == 0) ? (inlink->w*(j+1))/32 - 1 : (inlink->w*(j+1))/32;
+ b -= ((inlink->w*j)%32 == 0) ? (inlink->w*j)/32 - 1 : (inlink->w*j)/32;
+ b = (b == dw1)? dw2 : dw1;
+ }
+ rowcount += intpic[i][j] * a * b * precfactor / denom;
+ if (i > 0) {
+ intpic[i][j] = intpic[i-1][j] + rowcount;
+ } else {
+ intpic[i][j] = rowcount;
+ }
+ }
+ }
+
+ denom = (sc->divide) ? 1 : dh1 * dh2 * dw1 * dw2;
+
+ for (i = 0; i < ELEMENT_COUNT; i++) {
+ const ElemCat* elemcat = elements[i];
+ int64_t* elemsignature;
+ uint64_t* sortsignature;
+
+ elemsignature = av_malloc_array(elemcat->elem_count, sizeof(int64_t));
+ if (!elemsignature)
+ return AVERROR(ENOMEM);
+ sortsignature = av_malloc_array(elemcat->elem_count, sizeof(int64_t));
+ if (!sortsignature)
+ return AVERROR(ENOMEM);
+
+ for (j = 0; j < elemcat->elem_count; j++) {
+ blocksum = 0;
+ blocksize = 0;
+ for (k = 0; k < elemcat->left_count; k++) {
+ blocksum += get_block_sum(sc, intpic, &elemcat->blocks[j*elemcat->block_count+k]);
+ blocksize += get_block_size(&elemcat->blocks[j*elemcat->block_count+k]);
+ }
+ sum = blocksum / blocksize;
+ if (elemcat->av_elem) {
+ sum -= 128 * precfactor * denom;
+ } else {
+ blocksum = 0;
+ blocksize = 0;
+ for (; k < elemcat->block_count; k++) {
+ blocksum += get_block_sum(sc, intpic, &elemcat->blocks[j*elemcat->block_count+k]);
+ blocksize += get_block_size(&elemcat->blocks[j*elemcat->block_count+k]);
+ }
+ sum -= blocksum / blocksize;
+ conflist[g++] = FFABS(sum * 8 / (precfactor * denom));
+ }
+
+ elemsignature[j] = sum;
+ sortsignature[j] = FFABS(sum);
+ }
+
+ /* get threshold */
+ qsort(sortsignature, elemcat->elem_count, sizeof(uint64_t), (void*) cmp);
+ th = sortsignature[(int) (elemcat->elem_count*0.333)];
+
+ /* ternarize */
+ for (j = 0; j < elemcat->elem_count; j++) {
+ if (elemsignature[j] < -th) {
+ ternary = 0;
+ } else if (elemsignature[j] <= th) {
+ ternary = 1;
+ } else {
+ ternary = 2;
+ }
+ fs->framesig[f/5] += ternary * pot3[f%5];
+
+ if (f == wordvec[w]) {
+ fs->words[s2usw[w]/5] += ternary * pot3[wordt2b[s2usw[w]/5]++];
+ if (w < 24)
+ w++;
+ }
+ f++;
+ }
+ av_freep(&elemsignature);
+ av_freep(&sortsignature);
+ }
+
+ /* confidence */
+ qsort(conflist, DIFFELEM_SIZE, sizeof(uint64_t), (void*) cmp);
+ fs->confidence = FFMIN(conflist[DIFFELEM_SIZE/2], 255);
+
+ /* coarsesignature */
+ if (sc->coarsecount == 0) {
+ if (sc->curcoarsesig2) {
+ sc->curcoarsesig1 = av_mallocz(sizeof(CoarseSignature));
+ if (!sc->curcoarsesig1)
+ return AVERROR(ENOMEM);
+ sc->curcoarsesig1->first = fs;
+ sc->curcoarsesig2->next = sc->curcoarsesig1;
+ sc->coarseend = sc->curcoarsesig1;
+ }
+ }
+ if (sc->coarsecount == 45) {
+ sc->midcoarse = 1;
+ sc->curcoarsesig2 = av_mallocz(sizeof(CoarseSignature));
+ if (!sc->curcoarsesig2)
+ return AVERROR(ENOMEM);
+ sc->curcoarsesig2->first = fs;
+ sc->curcoarsesig1->next = sc->curcoarsesig2;
+ sc->coarseend = sc->curcoarsesig2;
+ }
+ for (i = 0; i < 5; i++) {
+ set_bit(sc->curcoarsesig1->data[i], fs->words[i]);
+ }
+ /* assuming the actual frame is the last */
+ sc->curcoarsesig1->last = fs;
+ if (sc->midcoarse) {
+ for (i = 0; i < 5; i++) {
+ set_bit(sc->curcoarsesig2->data[i], fs->words[i]);
+ }
+ sc->curcoarsesig2->last = fs;
+ }
+
+ sc->coarsecount = (sc->coarsecount+1)%90;
+
+ /* debug printing finesignature */
+ if (av_log_get_level() == AV_LOG_DEBUG) {
+ av_log(ctx, AV_LOG_DEBUG, "input %d, confidence: %d\n", FF_INLINK_IDX(inlink), fs->confidence);
+
+ av_log(ctx, AV_LOG_DEBUG, "words:");
+ for (i = 0; i < 5; i++) {
+ av_log(ctx, AV_LOG_DEBUG, " %d:", fs->words[i] );
+ av_log(ctx, AV_LOG_DEBUG, " %d", fs->words[i] / pot3[0] );
+ for (j = 1; j < 5; j++)
+ av_log(ctx, AV_LOG_DEBUG, ",%d", fs->words[i] % pot3[j-1] / pot3[j] );
+ av_log(ctx, AV_LOG_DEBUG, ";");
+ }
+ av_log(ctx, AV_LOG_DEBUG, "\n");
+
+ av_log(ctx, AV_LOG_DEBUG, "framesignature:");
+ for (i = 0; i < SIGELEM_SIZE/5; i++) {
+ av_log(ctx, AV_LOG_DEBUG, " %d", fs->framesig[i] / pot3[0] );
+ for (j = 1; j < 5; j++)
+ av_log(ctx, AV_LOG_DEBUG, ",%d", fs->framesig[i] % pot3[j-1] / pot3[j] );
+ }
+ av_log(ctx, AV_LOG_DEBUG, "\n");
+ }
+
+ if (FF_INLINK_IDX(inlink) == 0)
+ return ff_filter_frame(inlink->dst->outputs[0], picref);
+ return 1;
+}
+
+static int xml_export(AVFilterContext *ctx, StreamContext *sc, const char* filename)
+{
+ FineSignature* fs;
+ CoarseSignature* cs;
+ int i, j;
+ FILE* f;
+ unsigned int pot3[5] = { 3*3*3*3, 3*3*3, 3*3, 3, 1 };
+
+ f = fopen(filename, "w");
+ if (!f) {
+ int err = AVERROR(EINVAL);
+ char buf[128];
+ av_strerror(err, buf, sizeof(buf));
+ av_log(ctx, AV_LOG_ERROR, "cannot open xml file %s: %s\n", filename, buf);
+ return err;
+ }
+
+ /* header */
+ fprintf(f, "<?xml version='1.0' encoding='ASCII' ?>\n");
+ fprintf(f, "<Mpeg7 xmlns=\"urn:mpeg:mpeg7:schema:2001\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"urn:mpeg:mpeg7:schema:2001 schema/Mpeg7-2001.xsd\">\n");
+ fprintf(f, " <DescriptionUnit xsi:type=\"DescriptorCollectionType\">\n");
+ fprintf(f, " <Descriptor xsi:type=\"VideoSignatureType\">\n");
+ fprintf(f, " <VideoSignatureRegion>\n");
+ fprintf(f, " <VideoSignatureSpatialRegion>\n");
+ fprintf(f, " <Pixel>0 0 </Pixel>\n");
+ fprintf(f, " <Pixel>%d %d </Pixel>\n", sc->w - 1, sc->h - 1);
+ fprintf(f, " </VideoSignatureSpatialRegion>\n");
+ fprintf(f, " <StartFrameOfSpatialRegion>0</StartFrameOfSpatialRegion>\n");
+ /* hoping num is 1, other values are vague */
+ fprintf(f, " <MediaTimeUnit>%d</MediaTimeUnit>\n", sc->time_base.den / sc->time_base.num);
+ fprintf(f, " <MediaTimeOfSpatialRegion>\n");
+ fprintf(f, " <StartMediaTimeOfSpatialRegion>0</StartMediaTimeOfSpatialRegion>\n");
+ fprintf(f, " <EndMediaTimeOfSpatialRegion>%" PRIu64 "</EndMediaTimeOfSpatialRegion>\n", sc->coarseend->last->pts);
+ fprintf(f, " </MediaTimeOfSpatialRegion>\n");
+
+ /* coarsesignatures */
+ for (cs = sc->coarsesiglist; cs; cs = cs->next) {
+ fprintf(f, " <VSVideoSegment>\n");
+ fprintf(f, " <StartFrameOfSegment>%" PRIu32 "</StartFrameOfSegment>\n", cs->first->index);
+ fprintf(f, " <EndFrameOfSegment>%" PRIu32 "</EndFrameOfSegment>\n", cs->last->index);
+ fprintf(f, " <MediaTimeOfSegment>\n");
+ fprintf(f, " <StartMediaTimeOfSegment>%" PRIu64 "</StartMediaTimeOfSegment>\n", cs->first->pts);
+ fprintf(f, " <EndMediaTimeOfSegment>%" PRIu64 "</EndMediaTimeOfSegment>\n", cs->last->pts);
+ fprintf(f, " </MediaTimeOfSegment>\n");
+ for (i = 0; i < 5; i++) {
+ fprintf(f, " <BagOfWords>");
+ for (j = 0; j < 31; j++) {
+ uint8_t n = cs->data[i][j];
+ if (j < 30) {
+ fprintf(f, "%d %d %d %d %d %d %d %d ", (n & 0x80) >> 7,
+ (n & 0x40) >> 6,
+ (n & 0x20) >> 5,
+ (n & 0x10) >> 4,
+ (n & 0x08) >> 3,
+ (n & 0x04) >> 2,
+ (n & 0x02) >> 1,
+ (n & 0x01));
+ } else {
+ /* print only 3 bit in last byte */
+ fprintf(f, "%d %d %d ", (n & 0x80) >> 7,
+ (n & 0x40) >> 6,
+ (n & 0x20) >> 5);
+ }
+ }
+ fprintf(f, "</BagOfWords>\n");
+ }
+ fprintf(f, " </VSVideoSegment>\n");
+ }
+
+ /* finesignatures */
+ for (fs = sc->finesiglist; fs; fs = fs->next) {
+ fprintf(f, " <VideoFrame>\n");
+ fprintf(f, " <MediaTimeOfFrame>%" PRIu64 "</MediaTimeOfFrame>\n", fs->pts);
+ /* confidence */
+ fprintf(f, " <FrameConfidence>%d</FrameConfidence>\n", fs->confidence);
+ /* words */
+ fprintf(f, " <Word>");
+ for (i = 0; i < 5; i++) {
+ fprintf(f, "%d ", fs->words[i]);
+ if (i < 4) {
+ fprintf(f, " ");
+ }
+ }
+ fprintf(f, "</Word>\n");
+ /* framesignature */
+ fprintf(f, " <FrameSignature>");
+ for (i = 0; i< SIGELEM_SIZE/5; i++) {
+ if (i > 0) {
+ fprintf(f, " ");
+ }
+ fprintf(f, "%d ", fs->framesig[i] / pot3[0]);
+ for (j = 1; j < 5; j++)
+ fprintf(f, " %d ", fs->framesig[i] % pot3[j-1] / pot3[j] );
+ }
+ fprintf(f, "</FrameSignature>\n");
+ fprintf(f, " </VideoFrame>\n");
+ }
+ fprintf(f, " </VideoSignatureRegion>\n");
+ fprintf(f, " </Descriptor>\n");
+ fprintf(f, " </DescriptionUnit>\n");
+ fprintf(f, "</Mpeg7>\n");
+
+ fclose(f);
+ return 0;
+}
+
+static int binary_export(AVFilterContext *ctx, StreamContext *sc, const char* filename)
+{
+ FILE* f;
+ FineSignature* fs;
+ CoarseSignature* cs;
+ uint32_t numofsegments = (sc->lastindex + 44)/45;
+ int i, j;
+ PutBitContext buf;
+ /* buffer + header + coarsesignatures + finesignature */
+ int len = (512 + 6 * 32 + 3*16 + 2 +
+ numofsegments * (4*32 + 1 + 5*243) +
+ sc->lastindex * (2 + 32 + 6*8 + 608)) / 8;
+ uint8_t* buffer = av_malloc_array(len, sizeof(uint8_t));
+ if (!buffer)
+ return AVERROR(ENOMEM);
+
+ f = fopen(filename, "wb");
+ if (!f) {
+ int err = AVERROR(EINVAL);
+ char buf[128];
+ av_strerror(err, buf, sizeof(buf));
+ av_log(ctx, AV_LOG_ERROR, "cannot open file %s: %s\n", filename, buf);
+ return err;
+ }
+ init_put_bits(&buf, buffer, len);
+
+ put_bits32(&buf, 1); /* NumOfSpatial Regions, only 1 supported */
+ put_bits(&buf, 1, 1); /* SpatialLocationFlag, always the whole image */
+ put_bits32(&buf, 0); /* PixelX,1 PixelY,1, 0,0 */
+ put_bits(&buf, 16, sc->w-1 & 0xFFFF); /* PixelX,2 */
+ put_bits(&buf, 16, sc->h-1 & 0xFFFF); /* PixelY,2 */
+ put_bits32(&buf, 0); /* StartFrameOfSpatialRegion */
+ put_bits32(&buf, sc->lastindex); /* NumOfFrames */
+ /* hoping num is 1, other values are vague */
+ /* den/num might be greater than 16 bit, so cutting it */
+ put_bits(&buf, 16, 0xFFFF & (sc->time_base.den / sc->time_base.num)); /* MediaTimeUnit */
+ put_bits(&buf, 1, 1); /* MediaTimeFlagOfSpatialRegion */
+ put_bits32(&buf, 0); /* StartMediaTimeOfSpatialRegion */
+ put_bits32(&buf, 0xFFFFFFFF & sc->coarseend->last->pts); /* EndMediaTimeOfSpatialRegion */
+ put_bits32(&buf, numofsegments); /* NumOfSegments */
+ /* coarsesignatures */
+ for (cs = sc->coarsesiglist; cs; cs = cs->next) {
+ put_bits32(&buf, cs->first->index); /* StartFrameOfSegment */
+ put_bits32(&buf, cs->last->index); /* EndFrameOfSegment */
+ put_bits(&buf, 1, 1); /* MediaTimeFlagOfSegment */
+ put_bits32(&buf, 0xFFFFFFFF & cs->first->pts); /* StartMediaTimeOfSegment */
+ put_bits32(&buf, 0xFFFFFFFF & cs->last->pts); /* EndMediaTimeOfSegment */
+ for (i = 0; i < 5; i++) {
+ /* put 243 bits ( = 7 * 32 + 19 = 8 * 28 + 19) into buffer */
+ for (j = 0; j < 30; j++) {
+ put_bits(&buf, 8, cs->data[i][j]);
+ }
+ put_bits(&buf, 3, cs->data[i][30] >> 5);
+ }
+ }
+ /* finesignatures */
+ put_bits(&buf, 1, 0); /* CompressionFlag, only 0 supported */
+ for (fs = sc->finesiglist; fs; fs = fs->next) {
+ put_bits(&buf, 1, 1); /* MediaTimeFlagOfFrame */
+ put_bits32(&buf, 0xFFFFFFFF & fs->pts); /* MediaTimeOfFrame */
+ put_bits(&buf, 8, fs->confidence); /* FrameConfidence */
+ for (i = 0; i < 5; i++) {
+ put_bits(&buf, 8, fs->words[i]); /* Words */
+ }
+ /* framesignature */
+ for (i = 0; i < SIGELEM_SIZE/5; i++) {
+ put_bits(&buf, 8, fs->framesig[i]);
+ }
+ }
+
+ avpriv_align_put_bits(&buf);
+ flush_put_bits(&buf);
+ fwrite(buffer, 1, put_bits_count(&buf)/8, f);
+ fclose(f);
+ av_freep(&buffer);
+ return 0;
+}
+
+static int export(AVFilterContext *ctx, StreamContext *sc, int input)
+{
+ SignatureContext* sic = ctx->priv;
+ char filename[1024];
+
+ if (sic->nb_inputs > 1) {
+ /* error already handled */
+ av_assert0(av_get_frame_filename(filename, sizeof(filename), sic->filename, input) == 0);
+ } else {
+ strcpy(filename, sic->filename);
+ }
+ if (sic->format == FORMAT_XML) {
+ return xml_export(ctx, sc, filename);
+ } else {
+ return binary_export(ctx, sc, filename);
+ }
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ SignatureContext *sic = ctx->priv;
+ StreamContext *sc, *sc2;
+ MatchingInfo match;
+ int i, j, ret;
+ int lookup = 1; /* indicates wheather EOF of all files is reached */
+
+ /* process all inputs */
+ for (i = 0; i < sic->nb_inputs; i++){
+ sc = &(sic->streamcontexts[i]);
+
+ ret = ff_request_frame(ctx->inputs[i]);
+
+ /* return if unexpected error occurs in input stream */
+ if (ret < 0 && ret != AVERROR_EOF)
+ return ret;
+
+ /* export signature at EOF */
+ if (ret == AVERROR_EOF && !sc->exported) {
+ /* export if wanted */
+ if (strlen(sic->filename) > 0) {
+ if (export(ctx, sc, i) < 0)
+ return ret;
+ }
+ sc->exported = 1;
+ }
+ lookup &= sc->exported;
+ }
+
+ /* signature lookup */
+ if (lookup && sic->mode != MODE_OFF) {
+ /* iterate over every pair */
+ for (i = 0; i < sic->nb_inputs; i++) {
+ sc = &(sic->streamcontexts[i]);
+ for (j = i+1; j < sic->nb_inputs; j++) {
+ sc2 = &(sic->streamcontexts[j]);
+ match = lookup_signatures(ctx, sic, sc, sc2, sic->mode);
+ if (match.score != 0) {
+ av_log(ctx, AV_LOG_INFO, "matching of video %d at %f and %d at %f, %d frames matching\n",
+ i, ((double) match.first->pts * sc->time_base.num) / sc->time_base.den,
+ j, ((double) match.second->pts * sc2->time_base.num) / sc2->time_base.den,
+ match.matchframes);
+ if (match.whole)
+ av_log(ctx, AV_LOG_INFO, "whole video matching\n");
+ } else {
+ av_log(ctx, AV_LOG_INFO, "no matching of video %d and %d\n", i, j);
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+
+ SignatureContext *sic = ctx->priv;
+ StreamContext *sc;
+ int i, ret;
+ char tmp[1024];
+
+ sic->streamcontexts = av_mallocz(sic->nb_inputs * sizeof(StreamContext));
+ if (!sic->streamcontexts)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < sic->nb_inputs; i++) {
+ AVFilterPad pad = {
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = av_asprintf("in%d", i),
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ };
+
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+
+ sc = &(sic->streamcontexts[i]);
+
+ sc->lastindex = 0;
+ sc->finesiglist = av_mallocz(sizeof(FineSignature));
+ if (!sc->finesiglist)
+ return AVERROR(ENOMEM);
+ sc->curfinesig = NULL;
+
+ sc->coarsesiglist = av_mallocz(sizeof(CoarseSignature));
+ if (!sc->coarsesiglist)
+ return AVERROR(ENOMEM);
+ sc->curcoarsesig1 = sc->coarsesiglist;
+ sc->coarseend = sc->coarsesiglist;
+ sc->coarsecount = 0;
+ sc->midcoarse = 0;
+
+ if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
+ av_freep(&pad.name);
+ return ret;
+ }
+ }
+
+ /* check filename */
+ if (sic->nb_inputs > 1 && strlen(sic->filename) > 0 && av_get_frame_filename(tmp, sizeof(tmp), sic->filename, 0) == -1) {
+ av_log(ctx, AV_LOG_ERROR, "The filename must contain %%d or %%0nd, if you have more than one input.\n");
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SignatureContext *sic = ctx->priv;
+ StreamContext *sc;
+ void* tmp;
+ FineSignature* finsig;
+ CoarseSignature* cousig;
+ int i;
+
+
+ /* free the lists */
+ if (sic->streamcontexts != NULL) {
+ for (i = 0; i < sic->nb_inputs; i++) {
+ sc = &(sic->streamcontexts[i]);
+ finsig = sc->finesiglist;
+ cousig = sc->coarsesiglist;
+
+ while (finsig) {
+ tmp = finsig;
+ finsig = finsig->next;
+ av_freep(&tmp);
+ }
+ sc->finesiglist = NULL;
+
+ while (cousig) {
+ tmp = cousig;
+ cousig = cousig->next;
+ av_freep(&tmp);
+ }
+ sc->coarsesiglist = NULL;
+ }
+ av_freep(&sic->streamcontexts);
+ }
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ outlink->time_base = inlink->time_base;
+ outlink->frame_rate = inlink->frame_rate;
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ outlink->w = inlink->w;
+ outlink->h = inlink->h;
+
+ return 0;
+}
+
+static const AVFilterPad signature_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_signature = {
+ .name = "signature",
+ .description = NULL_IF_CONFIG_SMALL("Calculate the MPEG-7 video signature"),
+ .priv_size = sizeof(SignatureContext),
+ .priv_class = &signature_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .outputs = signature_outputs,
+ .inputs = NULL,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
diff --git a/libavfilter/vf_smartblur.c b/libavfilter/vf_smartblur.c
new file mode 100644
index 0000000000..117e0ec601
--- /dev/null
+++ b/libavfilter/vf_smartblur.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2012 Jeremy Tran
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Apply a smartblur filter to the input video
+ * Ported from MPlayer libmpcodecs/vf_smartblur.c by Michael Niedermayer.
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libswscale/swscale.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+#define RADIUS_MIN 0.1
+#define RADIUS_MAX 5.0
+
+#define STRENGTH_MIN -1.0
+#define STRENGTH_MAX 1.0
+
+#define THRESHOLD_MIN -30
+#define THRESHOLD_MAX 30
+
+typedef struct {
+ float radius;
+ float strength;
+ int threshold;
+ float quality;
+ struct SwsContext *filter_context;
+} FilterParam;
+
+typedef struct {
+ const AVClass *class;
+ FilterParam luma;
+ FilterParam chroma;
+ int hsub;
+ int vsub;
+ unsigned int sws_flags;
+} SmartblurContext;
+
+#define OFFSET(x) offsetof(SmartblurContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption smartblur_options[] = {
+ { "luma_radius", "set luma radius", OFFSET(luma.radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, RADIUS_MIN, RADIUS_MAX, .flags=FLAGS },
+ { "lr" , "set luma radius", OFFSET(luma.radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, RADIUS_MIN, RADIUS_MAX, .flags=FLAGS },
+ { "luma_strength", "set luma strength", OFFSET(luma.strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, STRENGTH_MIN, STRENGTH_MAX, .flags=FLAGS },
+ { "ls", "set luma strength", OFFSET(luma.strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, STRENGTH_MIN, STRENGTH_MAX, .flags=FLAGS },
+ { "luma_threshold", "set luma threshold", OFFSET(luma.threshold), AV_OPT_TYPE_INT, {.i64=0}, THRESHOLD_MIN, THRESHOLD_MAX, .flags=FLAGS },
+ { "lt", "set luma threshold", OFFSET(luma.threshold), AV_OPT_TYPE_INT, {.i64=0}, THRESHOLD_MIN, THRESHOLD_MAX, .flags=FLAGS },
+
+ { "chroma_radius", "set chroma radius", OFFSET(chroma.radius), AV_OPT_TYPE_FLOAT, {.dbl=RADIUS_MIN-1}, RADIUS_MIN-1, RADIUS_MAX, .flags=FLAGS },
+ { "cr", "set chroma radius", OFFSET(chroma.radius), AV_OPT_TYPE_FLOAT, {.dbl=RADIUS_MIN-1}, RADIUS_MIN-1, RADIUS_MAX, .flags=FLAGS },
+ { "chroma_strength", "set chroma strength", OFFSET(chroma.strength), AV_OPT_TYPE_FLOAT, {.dbl=STRENGTH_MIN-1}, STRENGTH_MIN-1, STRENGTH_MAX, .flags=FLAGS },
+ { "cs", "set chroma strength", OFFSET(chroma.strength), AV_OPT_TYPE_FLOAT, {.dbl=STRENGTH_MIN-1}, STRENGTH_MIN-1, STRENGTH_MAX, .flags=FLAGS },
+ { "chroma_threshold", "set chroma threshold", OFFSET(chroma.threshold), AV_OPT_TYPE_INT, {.i64=THRESHOLD_MIN-1}, THRESHOLD_MIN-1, THRESHOLD_MAX, .flags=FLAGS },
+ { "ct", "set chroma threshold", OFFSET(chroma.threshold), AV_OPT_TYPE_INT, {.i64=THRESHOLD_MIN-1}, THRESHOLD_MIN-1, THRESHOLD_MAX, .flags=FLAGS },
+
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(smartblur);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ SmartblurContext *s = ctx->priv;
+
+ /* make chroma default to luma values, if not explicitly set */
+ if (s->chroma.radius < RADIUS_MIN)
+ s->chroma.radius = s->luma.radius;
+ if (s->chroma.strength < STRENGTH_MIN)
+ s->chroma.strength = s->luma.strength;
+ if (s->chroma.threshold < THRESHOLD_MIN)
+ s->chroma.threshold = s->luma.threshold;
+
+ s->luma.quality = s->chroma.quality = 3.0;
+ s->sws_flags = SWS_BICUBIC;
+
+ av_log(ctx, AV_LOG_VERBOSE,
+ "luma_radius:%f luma_strength:%f luma_threshold:%d "
+ "chroma_radius:%f chroma_strength:%f chroma_threshold:%d\n",
+ s->luma.radius, s->luma.strength, s->luma.threshold,
+ s->chroma.radius, s->chroma.strength, s->chroma.threshold);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SmartblurContext *s = ctx->priv;
+
+ sws_freeContext(s->luma.filter_context);
+ sws_freeContext(s->chroma.filter_context);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int alloc_sws_context(FilterParam *f, int width, int height, unsigned int flags)
+{
+ SwsVector *vec;
+ SwsFilter sws_filter;
+
+ vec = sws_getGaussianVec(f->radius, f->quality);
+
+ if (!vec)
+ return AVERROR(EINVAL);
+
+ sws_scaleVec(vec, f->strength);
+ vec->coeff[vec->length / 2] += 1.0 - f->strength;
+ sws_filter.lumH = sws_filter.lumV = vec;
+ sws_filter.chrH = sws_filter.chrV = NULL;
+ f->filter_context = sws_getCachedContext(NULL,
+ width, height, AV_PIX_FMT_GRAY8,
+ width, height, AV_PIX_FMT_GRAY8,
+ flags, &sws_filter, NULL, NULL);
+
+ sws_freeVec(vec);
+
+ if (!f->filter_context)
+ return AVERROR(EINVAL);
+
+ return 0;
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ SmartblurContext *s = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
+
+ alloc_sws_context(&s->luma, inlink->w, inlink->h, s->sws_flags);
+ alloc_sws_context(&s->chroma,
+ AV_CEIL_RSHIFT(inlink->w, s->hsub),
+ AV_CEIL_RSHIFT(inlink->h, s->vsub),
+ s->sws_flags);
+
+ return 0;
+}
+
+static void blur(uint8_t *dst, const int dst_linesize,
+ const uint8_t *src, const int src_linesize,
+ const int w, const int h, const int threshold,
+ struct SwsContext *filter_context)
+{
+ int x, y;
+ int orig, filtered;
+ int diff;
+ /* Declare arrays of 4 to get aligned data */
+ const uint8_t* const src_array[4] = {src};
+ uint8_t *dst_array[4] = {dst};
+ int src_linesize_array[4] = {src_linesize};
+ int dst_linesize_array[4] = {dst_linesize};
+
+ sws_scale(filter_context, src_array, src_linesize_array,
+ 0, h, dst_array, dst_linesize_array);
+
+ if (threshold > 0) {
+ for (y = 0; y < h; ++y) {
+ for (x = 0; x < w; ++x) {
+ orig = src[x + y * src_linesize];
+ filtered = dst[x + y * dst_linesize];
+ diff = orig - filtered;
+
+ if (diff > 0) {
+ if (diff > 2 * threshold)
+ dst[x + y * dst_linesize] = orig;
+ else if (diff > threshold)
+ /* add 'diff' and subtract 'threshold' from 'filtered' */
+ dst[x + y * dst_linesize] = orig - threshold;
+ } else {
+ if (-diff > 2 * threshold)
+ dst[x + y * dst_linesize] = orig;
+ else if (-diff > threshold)
+ /* add 'diff' and 'threshold' to 'filtered' */
+ dst[x + y * dst_linesize] = orig + threshold;
+ }
+ }
+ }
+ } else if (threshold < 0) {
+ for (y = 0; y < h; ++y) {
+ for (x = 0; x < w; ++x) {
+ orig = src[x + y * src_linesize];
+ filtered = dst[x + y * dst_linesize];
+ diff = orig - filtered;
+
+ if (diff > 0) {
+ if (diff <= -threshold)
+ dst[x + y * dst_linesize] = orig;
+ else if (diff <= -2 * threshold)
+ /* subtract 'diff' and 'threshold' from 'orig' */
+ dst[x + y * dst_linesize] = filtered - threshold;
+ } else {
+ if (diff >= threshold)
+ dst[x + y * dst_linesize] = orig;
+ else if (diff >= 2 * threshold)
+ /* add 'threshold' and subtract 'diff' from 'orig' */
+ dst[x + y * dst_linesize] = filtered + threshold;
+ }
+ }
+ }
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
+{
+ SmartblurContext *s = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outpic;
+ int cw = AV_CEIL_RSHIFT(inlink->w, s->hsub);
+ int ch = AV_CEIL_RSHIFT(inlink->h, s->vsub);
+
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpic) {
+ av_frame_free(&inpic);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpic, inpic);
+
+ blur(outpic->data[0], outpic->linesize[0],
+ inpic->data[0], inpic->linesize[0],
+ inlink->w, inlink->h, s->luma.threshold,
+ s->luma.filter_context);
+
+ if (inpic->data[2]) {
+ blur(outpic->data[1], outpic->linesize[1],
+ inpic->data[1], inpic->linesize[1],
+ cw, ch, s->chroma.threshold,
+ s->chroma.filter_context);
+ blur(outpic->data[2], outpic->linesize[2],
+ inpic->data[2], inpic->linesize[2],
+ cw, ch, s->chroma.threshold,
+ s->chroma.filter_context);
+ }
+
+ av_frame_free(&inpic);
+ return ff_filter_frame(outlink, outpic);
+}
+
+static const AVFilterPad smartblur_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad smartblur_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_smartblur = {
+ .name = "smartblur",
+ .description = NULL_IF_CONFIG_SMALL("Blur the input video without impacting the outlines."),
+ .priv_size = sizeof(SmartblurContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = smartblur_inputs,
+ .outputs = smartblur_outputs,
+ .priv_class = &smartblur_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_spp.c b/libavfilter/vf_spp.c
new file mode 100644
index 0000000000..fe579cedb1
--- /dev/null
+++ b/libavfilter/vf_spp.c
@@ -0,0 +1,529 @@
+/*
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Simple post processing filter
+ *
+ * This implementation is based on an algorithm described in
+ * "Aria Nosratinia Embedded Post-Processing for
+ * Enhancement of Compressed Images (1999)"
+ *
+ * Originally written by Michael Niedermayer for the MPlayer project, and
+ * ported by Clément Bœsch for FFmpeg.
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "internal.h"
+#include "vf_spp.h"
+
+enum mode {
+ MODE_HARD,
+ MODE_SOFT,
+ NB_MODES
+};
+
+static const AVClass *child_class_next(const AVClass *prev)
+{
+ return prev ? NULL : avcodec_dct_get_class();
+}
+
+static void *child_next(void *obj, void *prev)
+{
+ SPPContext *s = obj;
+ return prev ? NULL : s->dct;
+}
+
+#define OFFSET(x) offsetof(SPPContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption spp_options[] = {
+ { "quality", "set quality", OFFSET(log2_count), AV_OPT_TYPE_INT, {.i64 = 3}, 0, MAX_LEVEL, FLAGS },
+ { "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 63, FLAGS },
+ { "mode", "set thresholding mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_HARD}, 0, NB_MODES - 1, FLAGS, "mode" },
+ { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_HARD}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_SOFT}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "use_bframe_qp", "use B-frames' QP", OFFSET(use_bframe_qp), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
+ { NULL }
+};
+
+static const AVClass spp_class = {
+ .class_name = "spp",
+ .item_name = av_default_item_name,
+ .option = spp_options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_FILTER,
+ .child_class_next = child_class_next,
+ .child_next = child_next,
+};
+
+// XXX: share between filters?
+DECLARE_ALIGNED(8, static const uint8_t, ldither)[8][8] = {
+ { 0, 48, 12, 60, 3, 51, 15, 63 },
+ { 32, 16, 44, 28, 35, 19, 47, 31 },
+ { 8, 56, 4, 52, 11, 59, 7, 55 },
+ { 40, 24, 36, 20, 43, 27, 39, 23 },
+ { 2, 50, 14, 62, 1, 49, 13, 61 },
+ { 34, 18, 46, 30, 33, 17, 45, 29 },
+ { 10, 58, 6, 54, 9, 57, 5, 53 },
+ { 42, 26, 38, 22, 41, 25, 37, 21 },
+};
+
+static const uint8_t offset[127][2] = {
+ {0,0},
+ {0,0}, {4,4}, // quality = 1
+ {0,0}, {2,2}, {6,4}, {4,6}, // quality = 2
+ {0,0}, {5,1}, {2,2}, {7,3}, {4,4}, {1,5}, {6,6}, {3,7}, // quality = 3
+
+ {0,0}, {4,0}, {1,1}, {5,1}, {3,2}, {7,2}, {2,3}, {6,3}, // quality = 4
+ {0,4}, {4,4}, {1,5}, {5,5}, {3,6}, {7,6}, {2,7}, {6,7},
+
+ {0,0}, {0,2}, {0,4}, {0,6}, {1,1}, {1,3}, {1,5}, {1,7}, // quality = 5
+ {2,0}, {2,2}, {2,4}, {2,6}, {3,1}, {3,3}, {3,5}, {3,7},
+ {4,0}, {4,2}, {4,4}, {4,6}, {5,1}, {5,3}, {5,5}, {5,7},
+ {6,0}, {6,2}, {6,4}, {6,6}, {7,1}, {7,3}, {7,5}, {7,7},
+
+ {0,0}, {4,4}, {0,4}, {4,0}, {2,2}, {6,6}, {2,6}, {6,2}, // quality = 6
+ {0,2}, {4,6}, {0,6}, {4,2}, {2,0}, {6,4}, {2,4}, {6,0},
+ {1,1}, {5,5}, {1,5}, {5,1}, {3,3}, {7,7}, {3,7}, {7,3},
+ {1,3}, {5,7}, {1,7}, {5,3}, {3,1}, {7,5}, {3,5}, {7,1},
+ {0,1}, {4,5}, {0,5}, {4,1}, {2,3}, {6,7}, {2,7}, {6,3},
+ {0,3}, {4,7}, {0,7}, {4,3}, {2,1}, {6,5}, {2,5}, {6,1},
+ {1,0}, {5,4}, {1,4}, {5,0}, {3,2}, {7,6}, {3,6}, {7,2},
+ {1,2}, {5,6}, {1,6}, {5,2}, {3,0}, {7,4}, {3,4}, {7,0},
+};
+
+static void hardthresh_c(int16_t dst[64], const int16_t src[64],
+ int qp, const uint8_t *permutation)
+{
+ int i;
+ int bias = 0; // FIXME
+
+ unsigned threshold1 = qp * ((1<<4) - bias) - 1;
+ unsigned threshold2 = threshold1 << 1;
+
+ memset(dst, 0, 64 * sizeof(dst[0]));
+ dst[0] = (src[0] + 4) >> 3;
+
+ for (i = 1; i < 64; i++) {
+ int level = src[i];
+ if (((unsigned)(level + threshold1)) > threshold2) {
+ const int j = permutation[i];
+ dst[j] = (level + 4) >> 3;
+ }
+ }
+}
+
+static void softthresh_c(int16_t dst[64], const int16_t src[64],
+ int qp, const uint8_t *permutation)
+{
+ int i;
+ int bias = 0; //FIXME
+
+ unsigned threshold1 = qp * ((1<<4) - bias) - 1;
+ unsigned threshold2 = threshold1 << 1;
+
+ memset(dst, 0, 64 * sizeof(dst[0]));
+ dst[0] = (src[0] + 4) >> 3;
+
+ for (i = 1; i < 64; i++) {
+ int level = src[i];
+ if (((unsigned)(level + threshold1)) > threshold2) {
+ const int j = permutation[i];
+ if (level > 0) dst[j] = (level - threshold1 + 4) >> 3;
+ else dst[j] = (level + threshold1 + 4) >> 3;
+ }
+ }
+}
+
+static void store_slice_c(uint8_t *dst, const int16_t *src,
+ int dst_linesize, int src_linesize,
+ int width, int height, int log2_scale,
+ const uint8_t dither[8][8])
+{
+ int y, x;
+
+#define STORE(pos) do { \
+ temp = ((src[x + y*src_linesize + pos] << log2_scale) + d[pos]) >> 6; \
+ if (temp & 0x100) \
+ temp = ~(temp >> 31); \
+ dst[x + y*dst_linesize + pos] = temp; \
+} while (0)
+
+ for (y = 0; y < height; y++) {
+ const uint8_t *d = dither[y];
+ for (x = 0; x < width; x += 8) {
+ int temp;
+ STORE(0);
+ STORE(1);
+ STORE(2);
+ STORE(3);
+ STORE(4);
+ STORE(5);
+ STORE(6);
+ STORE(7);
+ }
+ }
+}
+
+static void store_slice16_c(uint16_t *dst, const int16_t *src,
+ int dst_linesize, int src_linesize,
+ int width, int height, int log2_scale,
+ const uint8_t dither[8][8], int depth)
+{
+ int y, x;
+ unsigned int mask = -1<<depth;
+
+#define STORE16(pos) do { \
+ temp = ((src[x + y*src_linesize + pos] << log2_scale) + (d[pos]>>1)) >> 5; \
+ if (temp & mask ) \
+ temp = ~(temp >> 31); \
+ dst[x + y*dst_linesize + pos] = temp; \
+} while (0)
+
+ for (y = 0; y < height; y++) {
+ const uint8_t *d = dither[y];
+ for (x = 0; x < width; x += 8) {
+ int temp;
+ STORE16(0);
+ STORE16(1);
+ STORE16(2);
+ STORE16(3);
+ STORE16(4);
+ STORE16(5);
+ STORE16(6);
+ STORE16(7);
+ }
+ }
+}
+
+static inline void add_block(uint16_t *dst, int linesize, const int16_t block[64])
+{
+ int y;
+
+ for (y = 0; y < 8; y++) {
+ *(uint32_t *)&dst[0 + y*linesize] += *(uint32_t *)&block[0 + y*8];
+ *(uint32_t *)&dst[2 + y*linesize] += *(uint32_t *)&block[2 + y*8];
+ *(uint32_t *)&dst[4 + y*linesize] += *(uint32_t *)&block[4 + y*8];
+ *(uint32_t *)&dst[6 + y*linesize] += *(uint32_t *)&block[6 + y*8];
+ }
+}
+
+static void filter(SPPContext *p, uint8_t *dst, uint8_t *src,
+ int dst_linesize, int src_linesize, int width, int height,
+ const uint8_t *qp_table, int qp_stride, int is_luma, int depth)
+{
+ int x, y, i;
+ const int count = 1 << p->log2_count;
+ const int linesize = is_luma ? p->temp_linesize : FFALIGN(width+16, 16);
+ DECLARE_ALIGNED(16, uint64_t, block_align)[32];
+ int16_t *block = (int16_t *)block_align;
+ int16_t *block2 = (int16_t *)(block_align + 16);
+ uint16_t *psrc16 = (uint16_t*)p->src;
+ const int sample_bytes = (depth+7) / 8;
+
+ for (y = 0; y < height; y++) {
+ int index = 8 + 8*linesize + y*linesize;
+ memcpy(p->src + index*sample_bytes, src + y*src_linesize, width*sample_bytes);
+ if (sample_bytes == 1) {
+ for (x = 0; x < 8; x++) {
+ p->src[index - x - 1] = p->src[index + x ];
+ p->src[index + width + x ] = p->src[index + width - x - 1];
+ }
+ } else {
+ for (x = 0; x < 8; x++) {
+ psrc16[index - x - 1] = psrc16[index + x ];
+ psrc16[index + width + x ] = psrc16[index + width - x - 1];
+ }
+ }
+ }
+ for (y = 0; y < 8; y++) {
+ memcpy(p->src + ( 7-y)*linesize * sample_bytes, p->src + ( y+8)*linesize * sample_bytes, linesize * sample_bytes);
+ memcpy(p->src + (height+8+y)*linesize * sample_bytes, p->src + (height-y+7)*linesize * sample_bytes, linesize * sample_bytes);
+ }
+
+ for (y = 0; y < height + 8; y += 8) {
+ memset(p->temp + (8 + y) * linesize, 0, 8 * linesize * sizeof(*p->temp));
+ for (x = 0; x < width + 8; x += 8) {
+ int qp;
+
+ if (p->qp) {
+ qp = p->qp;
+ } else{
+ const int qps = 3 + is_luma;
+ qp = qp_table[(FFMIN(x, width - 1) >> qps) + (FFMIN(y, height - 1) >> qps) * qp_stride];
+ qp = FFMAX(1, ff_norm_qscale(qp, p->qscale_type));
+ }
+ for (i = 0; i < count; i++) {
+ const int x1 = x + offset[i + count - 1][0];
+ const int y1 = y + offset[i + count - 1][1];
+ const int index = x1 + y1*linesize;
+ p->dct->get_pixels(block, p->src + sample_bytes*index, sample_bytes*linesize);
+ p->dct->fdct(block);
+ p->requantize(block2, block, qp, p->dct->idct_permutation);
+ p->dct->idct(block2);
+ add_block(p->temp + index, linesize, block2);
+ }
+ }
+ if (y) {
+ if (sample_bytes == 1) {
+ p->store_slice(dst + (y - 8) * dst_linesize, p->temp + 8 + y*linesize,
+ dst_linesize, linesize, width,
+ FFMIN(8, height + 8 - y), MAX_LEVEL - p->log2_count,
+ ldither);
+ } else {
+ store_slice16_c((uint16_t*)(dst + (y - 8) * dst_linesize), p->temp + 8 + y*linesize,
+ dst_linesize/2, linesize, width,
+ FFMIN(8, height + 8 - y), MAX_LEVEL - p->log2_count,
+ ldither, depth);
+ }
+ }
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10,
+ AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV422P9,
+ AV_PIX_FMT_YUV420P9,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_GBRP9,
+ AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ SPPContext *s = inlink->dst->priv;
+ const int h = FFALIGN(inlink->h + 16, 16);
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ const int bps = desc->comp[0].depth;
+
+ av_opt_set_int(s->dct, "bits_per_sample", bps, 0);
+ avcodec_dct_init(s->dct);
+
+ if (ARCH_X86)
+ ff_spp_init_x86(s);
+
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
+ s->temp_linesize = FFALIGN(inlink->w + 16, 16);
+ s->temp = av_malloc_array(s->temp_linesize, h * sizeof(*s->temp));
+ s->src = av_malloc_array(s->temp_linesize, h * sizeof(*s->src) * 2);
+
+ if (!s->temp || !s->src)
+ return AVERROR(ENOMEM);
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SPPContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out = in;
+ int qp_stride = 0;
+ const int8_t *qp_table = NULL;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ const int depth = desc->comp[0].depth;
+
+ /* if we are not in a constant user quantizer mode and we don't want to use
+ * the quantizers from the B-frames (B-frames often have a higher QP), we
+ * need to save the qp table from the last non B-frame; this is what the
+ * following code block does */
+ if (!s->qp) {
+ qp_table = av_frame_get_qp_table(in, &qp_stride, &s->qscale_type);
+
+ if (qp_table && !s->use_bframe_qp && in->pict_type != AV_PICTURE_TYPE_B) {
+ int w, h;
+
+ /* if the qp stride is not set, it means the QP are only defined on
+ * a line basis */
+ if (!qp_stride) {
+ w = AV_CEIL_RSHIFT(inlink->w, 4);
+ h = 1;
+ } else {
+ w = qp_stride;
+ h = AV_CEIL_RSHIFT(inlink->h, 4);
+ }
+
+ if (w * h > s->non_b_qp_alloc_size) {
+ int ret = av_reallocp_array(&s->non_b_qp_table, w, h);
+ if (ret < 0) {
+ s->non_b_qp_alloc_size = 0;
+ return ret;
+ }
+ s->non_b_qp_alloc_size = w * h;
+ }
+
+ av_assert0(w * h <= s->non_b_qp_alloc_size);
+ memcpy(s->non_b_qp_table, qp_table, w * h);
+ }
+ }
+
+ if (s->log2_count && !ctx->is_disabled) {
+ if (!s->use_bframe_qp && s->non_b_qp_table)
+ qp_table = s->non_b_qp_table;
+
+ if (qp_table || s->qp) {
+ const int cw = AV_CEIL_RSHIFT(inlink->w, s->hsub);
+ const int ch = AV_CEIL_RSHIFT(inlink->h, s->vsub);
+
+ /* get a new frame if in-place is not possible or if the dimensions
+ * are not multiple of 8 */
+ if (!av_frame_is_writable(in) || (inlink->w & 7) || (inlink->h & 7)) {
+ const int aligned_w = FFALIGN(inlink->w, 8);
+ const int aligned_h = FFALIGN(inlink->h, 8);
+
+ out = ff_get_video_buffer(outlink, aligned_w, aligned_h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ out->width = in->width;
+ out->height = in->height;
+ }
+
+ filter(s, out->data[0], in->data[0], out->linesize[0], in->linesize[0], inlink->w, inlink->h, qp_table, qp_stride, 1, depth);
+
+ if (out->data[2]) {
+ filter(s, out->data[1], in->data[1], out->linesize[1], in->linesize[1], cw, ch, qp_table, qp_stride, 0, depth);
+ filter(s, out->data[2], in->data[2], out->linesize[2], in->linesize[2], cw, ch, qp_table, qp_stride, 0, depth);
+ }
+ emms_c();
+ }
+ }
+
+ if (in != out) {
+ if (in->data[3])
+ av_image_copy_plane(out->data[3], out->linesize[3],
+ in ->data[3], in ->linesize[3],
+ inlink->w, inlink->h);
+ av_frame_free(&in);
+ }
+ return ff_filter_frame(outlink, out);
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ SPPContext *s = ctx->priv;
+
+ if (!strcmp(cmd, "level")) {
+ if (!strcmp(args, "max"))
+ s->log2_count = MAX_LEVEL;
+ else
+ s->log2_count = av_clip(strtol(args, NULL, 10), 0, MAX_LEVEL);
+ return 0;
+ }
+ return AVERROR(ENOSYS);
+}
+
+static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
+{
+ SPPContext *s = ctx->priv;
+ int ret;
+
+ s->avctx = avcodec_alloc_context3(NULL);
+ s->dct = avcodec_dct_alloc();
+ if (!s->avctx || !s->dct)
+ return AVERROR(ENOMEM);
+
+ if (opts) {
+ AVDictionaryEntry *e = NULL;
+
+ while ((e = av_dict_get(*opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
+ if ((ret = av_opt_set(s->dct, e->key, e->value, 0)) < 0)
+ return ret;
+ }
+ av_dict_free(opts);
+ }
+
+ s->store_slice = store_slice_c;
+ switch (s->mode) {
+ case MODE_HARD: s->requantize = hardthresh_c; break;
+ case MODE_SOFT: s->requantize = softthresh_c; break;
+ }
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SPPContext *s = ctx->priv;
+
+ av_freep(&s->temp);
+ av_freep(&s->src);
+ if (s->avctx) {
+ avcodec_close(s->avctx);
+ av_freep(&s->avctx);
+ }
+ av_freep(&s->dct);
+ av_freep(&s->non_b_qp_table);
+}
+
+static const AVFilterPad spp_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad spp_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_spp = {
+ .name = "spp",
+ .description = NULL_IF_CONFIG_SMALL("Apply a simple post processing filter."),
+ .priv_size = sizeof(SPPContext),
+ .init_dict = init_dict,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = spp_inputs,
+ .outputs = spp_outputs,
+ .process_command = process_command,
+ .priv_class = &spp_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_spp.h b/libavfilter/vf_spp.h
new file mode 100644
index 0000000000..6b70a91770
--- /dev/null
+++ b/libavfilter/vf_spp.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2013 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef AVFILTER_SPP_H
+#define AVFILTER_SPP_H
+
+#include "libavcodec/avcodec.h"
+#include "libavcodec/avdct.h"
+#include "avfilter.h"
+
+#define MAX_LEVEL 6 /* quality levels */
+
+typedef struct {
+ const AVClass *av_class;
+
+ int log2_count;
+ int qp;
+ int mode;
+ int qscale_type;
+ int temp_linesize;
+ uint8_t *src;
+ uint16_t *temp;
+ AVCodecContext *avctx;
+ AVDCT *dct;
+ int8_t *non_b_qp_table;
+ int non_b_qp_alloc_size;
+ int use_bframe_qp;
+ int hsub, vsub;
+
+ void (*store_slice)(uint8_t *dst, const int16_t *src,
+ int dst_stride, int src_stride,
+ int width, int height, int log2_scale,
+ const uint8_t dither[8][8]);
+
+ void (*requantize)(int16_t dst[64], const int16_t src[64],
+ int qp, const uint8_t *permutation);
+} SPPContext;
+
+void ff_spp_init_x86(SPPContext *s);
+
+#endif /* AVFILTER_SPP_H */
diff --git a/libavfilter/vf_ssim.c b/libavfilter/vf_ssim.c
new file mode 100644
index 0000000000..cf925bd211
--- /dev/null
+++ b/libavfilter/vf_ssim.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2003-2013 Loren Merritt
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Computes the Structural Similarity Metric between two video streams.
+ * original algorithm:
+ * Z. Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli,
+ * "Image quality assessment: From error visibility to structural similarity,"
+ * IEEE Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, Apr. 2004.
+ *
+ * To improve speed, this implementation uses the standard approximation of
+ * overlapped 8x8 block sums, rather than the original gaussian weights.
+ */
+
+/*
+ * @file
+ * Caculate the SSIM between two input videos.
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "dualinput.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "ssim.h"
+#include "video.h"
+
+typedef struct SSIMContext {
+ const AVClass *class;
+ FFDualInputContext dinput;
+ FILE *stats_file;
+ char *stats_file_str;
+ int nb_components;
+ int max;
+ uint64_t nb_frames;
+ double ssim[4], ssim_total;
+ char comps[4];
+ float coefs[4];
+ uint8_t rgba_map[4];
+ int planewidth[4];
+ int planeheight[4];
+ int *temp;
+ int is_rgb;
+ float (*ssim_plane)(SSIMDSPContext *dsp,
+ uint8_t *main, int main_stride,
+ uint8_t *ref, int ref_stride,
+ int width, int height, void *temp,
+ int max);
+ SSIMDSPContext dsp;
+} SSIMContext;
+
+#define OFFSET(x) offsetof(SSIMContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption ssim_options[] = {
+ {"stats_file", "Set file where to store per-frame difference information", OFFSET(stats_file_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
+ {"f", "Set file where to store per-frame difference information", OFFSET(stats_file_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(ssim);
+
+static void set_meta(AVDictionary **metadata, const char *key, char comp, float d)
+{
+ char value[128];
+ snprintf(value, sizeof(value), "%0.2f", d);
+ if (comp) {
+ char key2[128];
+ snprintf(key2, sizeof(key2), "%s%c", key, comp);
+ av_dict_set(metadata, key2, value, 0);
+ } else {
+ av_dict_set(metadata, key, value, 0);
+ }
+}
+
+static void ssim_4x4xn_16bit(const uint8_t *main8, ptrdiff_t main_stride,
+ const uint8_t *ref8, ptrdiff_t ref_stride,
+ int64_t (*sums)[4], int width)
+{
+ const uint16_t *main16 = (const uint16_t *)main8;
+ const uint16_t *ref16 = (const uint16_t *)ref8;
+ int x, y, z;
+
+ main_stride >>= 1;
+ ref_stride >>= 1;
+
+ for (z = 0; z < width; z++) {
+ uint64_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
+
+ for (y = 0; y < 4; y++) {
+ for (x = 0; x < 4; x++) {
+ int a = main16[x + y * main_stride];
+ int b = ref16[x + y * ref_stride];
+
+ s1 += a;
+ s2 += b;
+ ss += a*a;
+ ss += b*b;
+ s12 += a*b;
+ }
+ }
+
+ sums[z][0] = s1;
+ sums[z][1] = s2;
+ sums[z][2] = ss;
+ sums[z][3] = s12;
+ main16 += 4;
+ ref16 += 4;
+ }
+}
+
+static void ssim_4x4xn_8bit(const uint8_t *main, ptrdiff_t main_stride,
+ const uint8_t *ref, ptrdiff_t ref_stride,
+ int (*sums)[4], int width)
+{
+ int x, y, z;
+
+ for (z = 0; z < width; z++) {
+ uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
+
+ for (y = 0; y < 4; y++) {
+ for (x = 0; x < 4; x++) {
+ int a = main[x + y * main_stride];
+ int b = ref[x + y * ref_stride];
+
+ s1 += a;
+ s2 += b;
+ ss += a*a;
+ ss += b*b;
+ s12 += a*b;
+ }
+ }
+
+ sums[z][0] = s1;
+ sums[z][1] = s2;
+ sums[z][2] = ss;
+ sums[z][3] = s12;
+ main += 4;
+ ref += 4;
+ }
+}
+
+static float ssim_end1x(int64_t s1, int64_t s2, int64_t ss, int64_t s12, int max)
+{
+ int64_t ssim_c1 = (int64_t)(.01*.01*max*max*64 + .5);
+ int64_t ssim_c2 = (int64_t)(.03*.03*max*max*64*63 + .5);
+
+ int64_t fs1 = s1;
+ int64_t fs2 = s2;
+ int64_t fss = ss;
+ int64_t fs12 = s12;
+ int64_t vars = fss * 64 - fs1 * fs1 - fs2 * fs2;
+ int64_t covar = fs12 * 64 - fs1 * fs2;
+
+ return (float)(2 * fs1 * fs2 + ssim_c1) * (float)(2 * covar + ssim_c2)
+ / ((float)(fs1 * fs1 + fs2 * fs2 + ssim_c1) * (float)(vars + ssim_c2));
+}
+
+static float ssim_end1(int s1, int s2, int ss, int s12)
+{
+ static const int ssim_c1 = (int)(.01*.01*255*255*64 + .5);
+ static const int ssim_c2 = (int)(.03*.03*255*255*64*63 + .5);
+
+ int fs1 = s1;
+ int fs2 = s2;
+ int fss = ss;
+ int fs12 = s12;
+ int vars = fss * 64 - fs1 * fs1 - fs2 * fs2;
+ int covar = fs12 * 64 - fs1 * fs2;
+
+ return (float)(2 * fs1 * fs2 + ssim_c1) * (float)(2 * covar + ssim_c2)
+ / ((float)(fs1 * fs1 + fs2 * fs2 + ssim_c1) * (float)(vars + ssim_c2));
+}
+
+static float ssim_endn_16bit(const int64_t (*sum0)[4], const int64_t (*sum1)[4], int width, int max)
+{
+ float ssim = 0.0;
+ int i;
+
+ for (i = 0; i < width; i++)
+ ssim += ssim_end1x(sum0[i][0] + sum0[i + 1][0] + sum1[i][0] + sum1[i + 1][0],
+ sum0[i][1] + sum0[i + 1][1] + sum1[i][1] + sum1[i + 1][1],
+ sum0[i][2] + sum0[i + 1][2] + sum1[i][2] + sum1[i + 1][2],
+ sum0[i][3] + sum0[i + 1][3] + sum1[i][3] + sum1[i + 1][3],
+ max);
+ return ssim;
+}
+
+static float ssim_endn_8bit(const int (*sum0)[4], const int (*sum1)[4], int width)
+{
+ float ssim = 0.0;
+ int i;
+
+ for (i = 0; i < width; i++)
+ ssim += ssim_end1(sum0[i][0] + sum0[i + 1][0] + sum1[i][0] + sum1[i + 1][0],
+ sum0[i][1] + sum0[i + 1][1] + sum1[i][1] + sum1[i + 1][1],
+ sum0[i][2] + sum0[i + 1][2] + sum1[i][2] + sum1[i + 1][2],
+ sum0[i][3] + sum0[i + 1][3] + sum1[i][3] + sum1[i + 1][3]);
+ return ssim;
+}
+
+static float ssim_plane_16bit(SSIMDSPContext *dsp,
+ uint8_t *main, int main_stride,
+ uint8_t *ref, int ref_stride,
+ int width, int height, void *temp,
+ int max)
+{
+ int z = 0, y;
+ float ssim = 0.0;
+ int64_t (*sum0)[4] = temp;
+ int64_t (*sum1)[4] = sum0 + (width >> 2) + 3;
+
+ width >>= 2;
+ height >>= 2;
+
+ for (y = 1; y < height; y++) {
+ for (; z <= y; z++) {
+ FFSWAP(void*, sum0, sum1);
+ ssim_4x4xn_16bit(&main[4 * z * main_stride], main_stride,
+ &ref[4 * z * ref_stride], ref_stride,
+ sum0, width);
+ }
+
+ ssim += ssim_endn_16bit((const int64_t (*)[4])sum0, (const int64_t (*)[4])sum1, width - 1, max);
+ }
+
+ return ssim / ((height - 1) * (width - 1));
+}
+
+static float ssim_plane(SSIMDSPContext *dsp,
+ uint8_t *main, int main_stride,
+ uint8_t *ref, int ref_stride,
+ int width, int height, void *temp,
+ int max)
+{
+ int z = 0, y;
+ float ssim = 0.0;
+ int (*sum0)[4] = temp;
+ int (*sum1)[4] = sum0 + (width >> 2) + 3;
+
+ width >>= 2;
+ height >>= 2;
+
+ for (y = 1; y < height; y++) {
+ for (; z <= y; z++) {
+ FFSWAP(void*, sum0, sum1);
+ dsp->ssim_4x4_line(&main[4 * z * main_stride], main_stride,
+ &ref[4 * z * ref_stride], ref_stride,
+ sum0, width);
+ }
+
+ ssim += dsp->ssim_end_line((const int (*)[4])sum0, (const int (*)[4])sum1, width - 1);
+ }
+
+ return ssim / ((height - 1) * (width - 1));
+}
+
+static double ssim_db(double ssim, double weight)
+{
+ return 10 * log10(weight / (weight - ssim));
+}
+
+static AVFrame *do_ssim(AVFilterContext *ctx, AVFrame *main,
+ const AVFrame *ref)
+{
+ AVDictionary **metadata = avpriv_frame_get_metadatap(main);
+ SSIMContext *s = ctx->priv;
+ float c[4], ssimv = 0.0;
+ int i;
+
+ s->nb_frames++;
+
+ for (i = 0; i < s->nb_components; i++) {
+ c[i] = s->ssim_plane(&s->dsp, main->data[i], main->linesize[i],
+ ref->data[i], ref->linesize[i],
+ s->planewidth[i], s->planeheight[i], s->temp,
+ s->max);
+ ssimv += s->coefs[i] * c[i];
+ s->ssim[i] += c[i];
+ }
+ for (i = 0; i < s->nb_components; i++) {
+ int cidx = s->is_rgb ? s->rgba_map[i] : i;
+ set_meta(metadata, "lavfi.ssim.", s->comps[i], c[cidx]);
+ }
+ s->ssim_total += ssimv;
+
+ set_meta(metadata, "lavfi.ssim.All", 0, ssimv);
+ set_meta(metadata, "lavfi.ssim.dB", 0, ssim_db(ssimv, 1.0));
+
+ if (s->stats_file) {
+ fprintf(s->stats_file, "n:%"PRId64" ", s->nb_frames);
+
+ for (i = 0; i < s->nb_components; i++) {
+ int cidx = s->is_rgb ? s->rgba_map[i] : i;
+ fprintf(s->stats_file, "%c:%f ", s->comps[i], c[cidx]);
+ }
+
+ fprintf(s->stats_file, "All:%f (%f)\n", ssimv, ssim_db(ssimv, 1.0));
+ }
+
+ return main;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ SSIMContext *s = ctx->priv;
+
+ if (s->stats_file_str) {
+ if (!strcmp(s->stats_file_str, "-")) {
+ s->stats_file = stdout;
+ } else {
+ s->stats_file = fopen(s->stats_file_str, "w");
+ if (!s->stats_file) {
+ int err = AVERROR(errno);
+ char buf[128];
+ av_strerror(err, buf, sizeof(buf));
+ av_log(ctx, AV_LOG_ERROR, "Could not open stats file %s: %s\n",
+ s->stats_file_str, buf);
+ return err;
+ }
+ }
+ }
+
+ s->dinput.process = do_ssim;
+ s->dinput.shortest = 1;
+ s->dinput.repeatlast = 0;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10,
+ AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_GBRP,
+#define PF(suf) AV_PIX_FMT_YUV420##suf, AV_PIX_FMT_YUV422##suf, AV_PIX_FMT_YUV444##suf, AV_PIX_FMT_GBR##suf
+ PF(P9), PF(P10), PF(P12), PF(P14), PF(P16),
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input_ref(AVFilterLink *inlink)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ AVFilterContext *ctx = inlink->dst;
+ SSIMContext *s = ctx->priv;
+ int sum = 0, i;
+
+ s->nb_components = desc->nb_components;
+
+ if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
+ ctx->inputs[0]->h != ctx->inputs[1]->h) {
+ av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
+ return AVERROR(EINVAL);
+ }
+ if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
+ av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
+ return AVERROR(EINVAL);
+ }
+
+ s->is_rgb = ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0;
+ s->comps[0] = s->is_rgb ? 'R' : 'Y';
+ s->comps[1] = s->is_rgb ? 'G' : 'U';
+ s->comps[2] = s->is_rgb ? 'B' : 'V';
+ s->comps[3] = 'A';
+
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+ s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+ for (i = 0; i < s->nb_components; i++)
+ sum += s->planeheight[i] * s->planewidth[i];
+ for (i = 0; i < s->nb_components; i++)
+ s->coefs[i] = (double) s->planeheight[i] * s->planewidth[i] / sum;
+
+ s->temp = av_malloc_array((2 * inlink->w + 12), sizeof(*s->temp) * (1 + (desc->comp[0].depth > 8)));
+ if (!s->temp)
+ return AVERROR(ENOMEM);
+ s->max = (1 << desc->comp[0].depth) - 1;
+
+ s->ssim_plane = desc->comp[0].depth > 8 ? ssim_plane_16bit : ssim_plane;
+ s->dsp.ssim_4x4_line = ssim_4x4xn_8bit;
+ s->dsp.ssim_end_line = ssim_endn_8bit;
+ if (ARCH_X86)
+ ff_ssim_init_x86(&s->dsp);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ SSIMContext *s = ctx->priv;
+ AVFilterLink *mainlink = ctx->inputs[0];
+ int ret;
+
+ outlink->w = mainlink->w;
+ outlink->h = mainlink->h;
+ outlink->time_base = mainlink->time_base;
+ outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
+ outlink->frame_rate = mainlink->frame_rate;
+
+ if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ SSIMContext *s = inlink->dst->priv;
+ return ff_dualinput_filter_frame(&s->dinput, inlink, buf);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ SSIMContext *s = outlink->src->priv;
+ return ff_dualinput_request_frame(&s->dinput, outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SSIMContext *s = ctx->priv;
+
+ if (s->nb_frames > 0) {
+ char buf[256];
+ int i;
+ buf[0] = 0;
+ for (i = 0; i < s->nb_components; i++) {
+ int c = s->is_rgb ? s->rgba_map[i] : i;
+ av_strlcatf(buf, sizeof(buf), " %c:%f (%f)", s->comps[i], s->ssim[c] / s->nb_frames,
+ ssim_db(s->ssim[c], s->nb_frames));
+ }
+ av_log(ctx, AV_LOG_INFO, "SSIM%s All:%f (%f)\n", buf,
+ s->ssim_total / s->nb_frames, ssim_db(s->ssim_total, s->nb_frames));
+ }
+
+ ff_dualinput_uninit(&s->dinput);
+
+ if (s->stats_file && s->stats_file != stdout)
+ fclose(s->stats_file);
+
+ av_freep(&s->temp);
+}
+
+static const AVFilterPad ssim_inputs[] = {
+ {
+ .name = "main",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },{
+ .name = "reference",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input_ref,
+ },
+ { NULL }
+};
+
+static const AVFilterPad ssim_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_ssim = {
+ .name = "ssim",
+ .description = NULL_IF_CONFIG_SMALL("Calculate the SSIM between two video streams."),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(SSIMContext),
+ .priv_class = &ssim_class,
+ .inputs = ssim_inputs,
+ .outputs = ssim_outputs,
+};
diff --git a/libavfilter/vf_stack.c b/libavfilter/vf_stack.c
new file mode 100644
index 0000000000..03643b6f96
--- /dev/null
+++ b/libavfilter/vf_stack.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2015 Paul B. Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "framesync.h"
+#include "video.h"
+
+typedef struct StackContext {
+ const AVClass *class;
+ const AVPixFmtDescriptor *desc;
+ int nb_inputs;
+ int shortest;
+ int is_vertical;
+ int nb_planes;
+
+ AVFrame **frames;
+ FFFrameSync fs;
+} StackContext;
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *pix_fmts = NULL;
+ int fmt, ret;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_PAL ||
+ desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
+ desc->flags & AV_PIX_FMT_FLAG_BITSTREAM) &&
+ (ret = ff_add_format(&pix_fmts, fmt)) < 0)
+ return ret;
+ }
+
+ return ff_set_common_formats(ctx, pix_fmts);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ StackContext *s = inlink->dst->priv;
+ return ff_framesync_filter_frame(&s->fs, inlink, in);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ StackContext *s = ctx->priv;
+ int i, ret;
+
+ if (!strcmp(ctx->filter->name, "vstack"))
+ s->is_vertical = 1;
+
+ s->frames = av_calloc(s->nb_inputs, sizeof(*s->frames));
+ if (!s->frames)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < s->nb_inputs; i++) {
+ AVFilterPad pad = { 0 };
+
+ pad.type = AVMEDIA_TYPE_VIDEO;
+ pad.name = av_asprintf("input%d", i);
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ pad.filter_frame = filter_frame;
+
+ if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
+ av_freep(&pad.name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int process_frame(FFFrameSync *fs)
+{
+ AVFilterContext *ctx = fs->parent;
+ AVFilterLink *outlink = ctx->outputs[0];
+ StackContext *s = fs->opaque;
+ AVFrame **in = s->frames;
+ AVFrame *out;
+ int i, p, ret, offset[4] = { 0 };
+
+ for (i = 0; i < s->nb_inputs; i++) {
+ if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0)
+ return ret;
+ }
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
+
+ for (i = 0; i < s->nb_inputs; i++) {
+ AVFilterLink *inlink = ctx->inputs[i];
+ int linesize[4];
+ int height[4];
+
+ if ((ret = av_image_fill_linesizes(linesize, inlink->format, inlink->w)) < 0) {
+ av_frame_free(&out);
+ return ret;
+ }
+
+ height[1] = height[2] = AV_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h);
+ height[0] = height[3] = inlink->h;
+
+ for (p = 0; p < s->nb_planes; p++) {
+ if (s->is_vertical) {
+ av_image_copy_plane(out->data[p] + offset[p] * out->linesize[p],
+ out->linesize[p],
+ in[i]->data[p],
+ in[i]->linesize[p],
+ linesize[p], height[p]);
+ offset[p] += height[p];
+ } else {
+ av_image_copy_plane(out->data[p] + offset[p],
+ out->linesize[p],
+ in[i]->data[p],
+ in[i]->linesize[p],
+ linesize[p], height[p]);
+ offset[p] += linesize[p];
+ }
+ }
+ }
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ StackContext *s = ctx->priv;
+ AVRational time_base = ctx->inputs[0]->time_base;
+ AVRational frame_rate = ctx->inputs[0]->frame_rate;
+ int height = ctx->inputs[0]->h;
+ int width = ctx->inputs[0]->w;
+ FFFrameSyncIn *in;
+ int i, ret;
+
+ if (s->is_vertical) {
+ for (i = 1; i < s->nb_inputs; i++) {
+ if (ctx->inputs[i]->w != width) {
+ av_log(ctx, AV_LOG_ERROR, "Input %d width %d does not match input %d width %d.\n", i, ctx->inputs[i]->w, 0, width);
+ return AVERROR(EINVAL);
+ }
+ height += ctx->inputs[i]->h;
+ }
+ } else {
+ for (i = 1; i < s->nb_inputs; i++) {
+ if (ctx->inputs[i]->h != height) {
+ av_log(ctx, AV_LOG_ERROR, "Input %d height %d does not match input %d height %d.\n", i, ctx->inputs[i]->h, 0, height);
+ return AVERROR(EINVAL);
+ }
+ width += ctx->inputs[i]->w;
+ }
+ }
+
+ s->desc = av_pix_fmt_desc_get(outlink->format);
+ if (!s->desc)
+ return AVERROR_BUG;
+ s->nb_planes = av_pix_fmt_count_planes(outlink->format);
+
+ outlink->w = width;
+ outlink->h = height;
+ outlink->time_base = time_base;
+ outlink->frame_rate = frame_rate;
+
+ if ((ret = ff_framesync_init(&s->fs, ctx, s->nb_inputs)) < 0)
+ return ret;
+
+ in = s->fs.in;
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+
+ for (i = 0; i < s->nb_inputs; i++) {
+ AVFilterLink *inlink = ctx->inputs[i];
+
+ in[i].time_base = inlink->time_base;
+ in[i].sync = 1;
+ in[i].before = EXT_STOP;
+ in[i].after = s->shortest ? EXT_STOP : EXT_INFINITY;
+ }
+
+ return ff_framesync_configure(&s->fs);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ StackContext *s = outlink->src->priv;
+ return ff_framesync_request_frame(&s->fs, outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ StackContext *s = ctx->priv;
+ int i;
+
+ ff_framesync_uninit(&s->fs);
+ av_freep(&s->frames);
+
+ for (i = 0; i < ctx->nb_inputs; i++)
+ av_freep(&ctx->input_pads[i].name);
+}
+
+#define OFFSET(x) offsetof(StackContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption stack_options[] = {
+ { "inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64=2}, 2, INT_MAX, .flags = FLAGS },
+ { "shortest", "force termination when the shortest input terminates", OFFSET(shortest), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, .flags = FLAGS },
+ { NULL },
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+#if CONFIG_HSTACK_FILTER
+
+#define hstack_options stack_options
+AVFILTER_DEFINE_CLASS(hstack);
+
+AVFilter ff_vf_hstack = {
+ .name = "hstack",
+ .description = NULL_IF_CONFIG_SMALL("Stack video inputs horizontally."),
+ .priv_size = sizeof(StackContext),
+ .priv_class = &hstack_class,
+ .query_formats = query_formats,
+ .outputs = outputs,
+ .init = init,
+ .uninit = uninit,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
+
+#endif /* CONFIG_HSTACK_FILTER */
+
+#if CONFIG_VSTACK_FILTER
+
+#define vstack_options stack_options
+AVFILTER_DEFINE_CLASS(vstack);
+
+AVFilter ff_vf_vstack = {
+ .name = "vstack",
+ .description = NULL_IF_CONFIG_SMALL("Stack video inputs vertically."),
+ .priv_size = sizeof(StackContext),
+ .priv_class = &vstack_class,
+ .query_formats = query_formats,
+ .outputs = outputs,
+ .init = init,
+ .uninit = uninit,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
+
+#endif /* CONFIG_VSTACK_FILTER */
diff --git a/libavfilter/vf_stereo3d.c b/libavfilter/vf_stereo3d.c
new file mode 100644
index 0000000000..3e23890208
--- /dev/null
+++ b/libavfilter/vf_stereo3d.c
@@ -0,0 +1,1116 @@
+/*
+ * Copyright (c) 2010 Gordon Schmidt <gordon.schmidt <at> s2000.tu-chemnitz.de>
+ * Copyright (c) 2013-2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "stereo3d.h"
+
+enum StereoCode {
+ ANAGLYPH_RC_GRAY, // anaglyph red/cyan gray
+ ANAGLYPH_RC_HALF, // anaglyph red/cyan half colored
+ ANAGLYPH_RC_COLOR, // anaglyph red/cyan colored
+ ANAGLYPH_RC_DUBOIS, // anaglyph red/cyan dubois
+ ANAGLYPH_GM_GRAY, // anaglyph green/magenta gray
+ ANAGLYPH_GM_HALF, // anaglyph green/magenta half colored
+ ANAGLYPH_GM_COLOR, // anaglyph green/magenta colored
+ ANAGLYPH_GM_DUBOIS, // anaglyph green/magenta dubois
+ ANAGLYPH_YB_GRAY, // anaglyph yellow/blue gray
+ ANAGLYPH_YB_HALF, // anaglyph yellow/blue half colored
+ ANAGLYPH_YB_COLOR, // anaglyph yellow/blue colored
+ ANAGLYPH_YB_DUBOIS, // anaglyph yellow/blue dubois
+ ANAGLYPH_RB_GRAY, // anaglyph red/blue gray
+ ANAGLYPH_RG_GRAY, // anaglyph red/green gray
+ MONO_L, // mono output for debugging (left eye only)
+ MONO_R, // mono output for debugging (right eye only)
+ INTERLEAVE_ROWS_LR, // row-interleave (left eye has top row)
+ INTERLEAVE_ROWS_RL, // row-interleave (right eye has top row)
+ SIDE_BY_SIDE_LR, // side by side parallel (left eye left, right eye right)
+ SIDE_BY_SIDE_RL, // side by side crosseye (right eye left, left eye right)
+ SIDE_BY_SIDE_2_LR, // side by side parallel with half width resolution
+ SIDE_BY_SIDE_2_RL, // side by side crosseye with half width resolution
+ ABOVE_BELOW_LR, // above-below (left eye above, right eye below)
+ ABOVE_BELOW_RL, // above-below (right eye above, left eye below)
+ ABOVE_BELOW_2_LR, // above-below with half height resolution
+ ABOVE_BELOW_2_RL, // above-below with half height resolution
+ ALTERNATING_LR, // alternating frames (left eye first, right eye second)
+ ALTERNATING_RL, // alternating frames (right eye first, left eye second)
+ CHECKERBOARD_LR, // checkerboard pattern (left eye first, right eye second)
+ CHECKERBOARD_RL, // checkerboard pattern (right eye first, left eye second)
+ INTERLEAVE_COLS_LR, // column-interleave (left eye first, right eye second)
+ INTERLEAVE_COLS_RL, // column-interleave (right eye first, left eye second)
+ HDMI, // HDMI frame pack (left eye first, right eye second)
+ STEREO_CODE_COUNT // TODO: needs autodetection
+};
+
+typedef struct StereoComponent {
+ int format; ///< StereoCode
+ int width, height;
+ int off_left, off_right;
+ int off_lstep, off_rstep;
+ int row_left, row_right;
+ int row_step;
+} StereoComponent;
+
+static const int ana_coeff[][3][6] = {
+ [ANAGLYPH_RB_GRAY] =
+ {{19595, 38470, 7471, 0, 0, 0},
+ { 0, 0, 0, 0, 0, 0},
+ { 0, 0, 0, 19595, 38470, 7471}},
+ [ANAGLYPH_RG_GRAY] =
+ {{19595, 38470, 7471, 0, 0, 0},
+ { 0, 0, 0, 19595, 38470, 7471},
+ { 0, 0, 0, 0, 0, 0}},
+ [ANAGLYPH_RC_GRAY] =
+ {{19595, 38470, 7471, 0, 0, 0},
+ { 0, 0, 0, 19595, 38470, 7471},
+ { 0, 0, 0, 19595, 38470, 7471}},
+ [ANAGLYPH_RC_HALF] =
+ {{19595, 38470, 7471, 0, 0, 0},
+ { 0, 0, 0, 0, 65536, 0},
+ { 0, 0, 0, 0, 0, 65536}},
+ [ANAGLYPH_RC_COLOR] =
+ {{65536, 0, 0, 0, 0, 0},
+ { 0, 0, 0, 0, 65536, 0},
+ { 0, 0, 0, 0, 0, 65536}},
+ [ANAGLYPH_RC_DUBOIS] =
+ {{29891, 32800, 11559, -2849, -5763, -102},
+ {-2627, -2479, -1033, 24804, 48080, -1209},
+ { -997, -1350, -358, -4729, -7403, 80373}},
+ [ANAGLYPH_GM_GRAY] =
+ {{ 0, 0, 0, 19595, 38470, 7471},
+ {19595, 38470, 7471, 0, 0, 0},
+ { 0, 0, 0, 19595, 38470, 7471}},
+ [ANAGLYPH_GM_HALF] =
+ {{ 0, 0, 0, 65536, 0, 0},
+ {19595, 38470, 7471, 0, 0, 0},
+ { 0, 0, 0, 0, 0, 65536}},
+ [ANAGLYPH_GM_COLOR] =
+ {{ 0, 0, 0, 65536, 0, 0},
+ { 0, 65536, 0, 0, 0, 0},
+ { 0, 0, 0, 0, 0, 65536}},
+ [ANAGLYPH_GM_DUBOIS] =
+ {{-4063,-10354, -2556, 34669, 46203, 1573},
+ {18612, 43778, 9372, -1049, -983, -4260},
+ { -983, -1769, 1376, 590, 4915, 61407}},
+ [ANAGLYPH_YB_GRAY] =
+ {{ 0, 0, 0, 19595, 38470, 7471},
+ { 0, 0, 0, 19595, 38470, 7471},
+ {19595, 38470, 7471, 0, 0, 0}},
+ [ANAGLYPH_YB_HALF] =
+ {{ 0, 0, 0, 65536, 0, 0},
+ { 0, 0, 0, 0, 65536, 0},
+ {19595, 38470, 7471, 0, 0, 0}},
+ [ANAGLYPH_YB_COLOR] =
+ {{ 0, 0, 0, 65536, 0, 0},
+ { 0, 0, 0, 0, 65536, 0},
+ { 0, 0, 65536, 0, 0, 0}},
+ [ANAGLYPH_YB_DUBOIS] =
+ {{65535,-12650,18451, -987, -7590, -1049},
+ {-1604, 56032, 4196, 370, 3826, -1049},
+ {-2345,-10676, 1358, 5801, 11416, 56217}},
+};
+
+typedef struct Stereo3DContext {
+ const AVClass *class;
+ StereoComponent in, out;
+ int width, height;
+ const int *ana_matrix[3];
+ int nb_planes;
+ int linesize[4];
+ int pheight[4];
+ int hsub, vsub;
+ int pixstep[4];
+ AVFrame *prev;
+ int blanks;
+ int in_off_left[4], in_off_right[4];
+ Stereo3DDSPContext dsp;
+} Stereo3DContext;
+
+#define OFFSET(x) offsetof(Stereo3DContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption stereo3d_options[] = {
+ { "in", "set input format", OFFSET(in.format), AV_OPT_TYPE_INT, {.i64=SIDE_BY_SIDE_LR}, INTERLEAVE_ROWS_LR, STEREO_CODE_COUNT-1, FLAGS, "in"},
+ { "ab2l", "above below half height left first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_2_LR}, 0, 0, FLAGS, "in" },
+ { "ab2r", "above below half height right first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_2_RL}, 0, 0, FLAGS, "in" },
+ { "abl", "above below left first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_LR}, 0, 0, FLAGS, "in" },
+ { "abr", "above below right first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_RL}, 0, 0, FLAGS, "in" },
+ { "al", "alternating frames left first", 0, AV_OPT_TYPE_CONST, {.i64=ALTERNATING_LR}, 0, 0, FLAGS, "in" },
+ { "ar", "alternating frames right first", 0, AV_OPT_TYPE_CONST, {.i64=ALTERNATING_RL}, 0, 0, FLAGS, "in" },
+ { "sbs2l", "side by side half width left first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_2_LR}, 0, 0, FLAGS, "in" },
+ { "sbs2r", "side by side half width right first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_2_RL}, 0, 0, FLAGS, "in" },
+ { "sbsl", "side by side left first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_LR}, 0, 0, FLAGS, "in" },
+ { "sbsr", "side by side right first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_RL}, 0, 0, FLAGS, "in" },
+ { "irl", "interleave rows left first", 0, AV_OPT_TYPE_CONST, {.i64=INTERLEAVE_ROWS_LR}, 0, 0, FLAGS, "in" },
+ { "irr", "interleave rows right first", 0, AV_OPT_TYPE_CONST, {.i64=INTERLEAVE_ROWS_RL}, 0, 0, FLAGS, "in" },
+ { "icl", "interleave columns left first", 0, AV_OPT_TYPE_CONST, {.i64=INTERLEAVE_COLS_LR}, 0, 0, FLAGS, "in" },
+ { "icr", "interleave columns right first", 0, AV_OPT_TYPE_CONST, {.i64=INTERLEAVE_COLS_RL}, 0, 0, FLAGS, "in" },
+ { "out", "set output format", OFFSET(out.format), AV_OPT_TYPE_INT, {.i64=ANAGLYPH_RC_DUBOIS}, 0, STEREO_CODE_COUNT-1, FLAGS, "out"},
+ { "ab2l", "above below half height left first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_2_LR}, 0, 0, FLAGS, "out" },
+ { "ab2r", "above below half height right first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_2_RL}, 0, 0, FLAGS, "out" },
+ { "abl", "above below left first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_LR}, 0, 0, FLAGS, "out" },
+ { "abr", "above below right first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_RL}, 0, 0, FLAGS, "out" },
+ { "agmc", "anaglyph green magenta color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_GM_COLOR}, 0, 0, FLAGS, "out" },
+ { "agmd", "anaglyph green magenta dubois", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_GM_DUBOIS}, 0, 0, FLAGS, "out" },
+ { "agmg", "anaglyph green magenta gray", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_GM_GRAY}, 0, 0, FLAGS, "out" },
+ { "agmh", "anaglyph green magenta half color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_GM_HALF}, 0, 0, FLAGS, "out" },
+ { "al", "alternating frames left first", 0, AV_OPT_TYPE_CONST, {.i64=ALTERNATING_LR}, 0, 0, FLAGS, "out" },
+ { "ar", "alternating frames right first", 0, AV_OPT_TYPE_CONST, {.i64=ALTERNATING_RL}, 0, 0, FLAGS, "out" },
+ { "arbg", "anaglyph red blue gray", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RB_GRAY}, 0, 0, FLAGS, "out" },
+ { "arcc", "anaglyph red cyan color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RC_COLOR}, 0, 0, FLAGS, "out" },
+ { "arcd", "anaglyph red cyan dubois", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RC_DUBOIS}, 0, 0, FLAGS, "out" },
+ { "arcg", "anaglyph red cyan gray", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RC_GRAY}, 0, 0, FLAGS, "out" },
+ { "arch", "anaglyph red cyan half color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RC_HALF}, 0, 0, FLAGS, "out" },
+ { "argg", "anaglyph red green gray", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RG_GRAY}, 0, 0, FLAGS, "out" },
+ { "aybc", "anaglyph yellow blue color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_YB_COLOR}, 0, 0, FLAGS, "out" },
+ { "aybd", "anaglyph yellow blue dubois", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_YB_DUBOIS}, 0, 0, FLAGS, "out" },
+ { "aybg", "anaglyph yellow blue gray", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_YB_GRAY}, 0, 0, FLAGS, "out" },
+ { "aybh", "anaglyph yellow blue half color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_YB_HALF}, 0, 0, FLAGS, "out" },
+ { "irl", "interleave rows left first", 0, AV_OPT_TYPE_CONST, {.i64=INTERLEAVE_ROWS_LR}, 0, 0, FLAGS, "out" },
+ { "irr", "interleave rows right first", 0, AV_OPT_TYPE_CONST, {.i64=INTERLEAVE_ROWS_RL}, 0, 0, FLAGS, "out" },
+ { "ml", "mono left", 0, AV_OPT_TYPE_CONST, {.i64=MONO_L}, 0, 0, FLAGS, "out" },
+ { "mr", "mono right", 0, AV_OPT_TYPE_CONST, {.i64=MONO_R}, 0, 0, FLAGS, "out" },
+ { "sbs2l", "side by side half width left first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_2_LR}, 0, 0, FLAGS, "out" },
+ { "sbs2r", "side by side half width right first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_2_RL}, 0, 0, FLAGS, "out" },
+ { "sbsl", "side by side left first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_LR}, 0, 0, FLAGS, "out" },
+ { "sbsr", "side by side right first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_RL}, 0, 0, FLAGS, "out" },
+ { "chl", "checkerboard left first", 0, AV_OPT_TYPE_CONST, {.i64=CHECKERBOARD_LR}, 0, 0, FLAGS, "out" },
+ { "chr", "checkerboard right first", 0, AV_OPT_TYPE_CONST, {.i64=CHECKERBOARD_RL}, 0, 0, FLAGS, "out" },
+ { "icl", "interleave columns left first", 0, AV_OPT_TYPE_CONST, {.i64=INTERLEAVE_COLS_LR}, 0, 0, FLAGS, "out" },
+ { "icr", "interleave columns right first", 0, AV_OPT_TYPE_CONST, {.i64=INTERLEAVE_COLS_RL}, 0, 0, FLAGS, "out" },
+ { "hdmi", "HDMI frame pack", 0, AV_OPT_TYPE_CONST, {.i64=HDMI}, 0, 0, FLAGS, "out" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(stereo3d);
+
+static const enum AVPixelFormat anaglyph_pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat other_pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGB48BE, AV_PIX_FMT_BGR48BE,
+ AV_PIX_FMT_RGB48LE, AV_PIX_FMT_BGR48LE,
+ AV_PIX_FMT_RGBA64BE, AV_PIX_FMT_BGRA64BE,
+ AV_PIX_FMT_RGBA64LE, AV_PIX_FMT_BGRA64LE,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_GBRP9BE, AV_PIX_FMT_GBRP9LE,
+ AV_PIX_FMT_GBRP10BE, AV_PIX_FMT_GBRP10LE,
+ AV_PIX_FMT_GBRP12BE, AV_PIX_FMT_GBRP12LE,
+ AV_PIX_FMT_GBRP14BE, AV_PIX_FMT_GBRP14LE,
+ AV_PIX_FMT_GBRP16BE, AV_PIX_FMT_GBRP16LE,
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA422P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV420P9LE, AV_PIX_FMT_YUVA420P9LE,
+ AV_PIX_FMT_YUV420P9BE, AV_PIX_FMT_YUVA420P9BE,
+ AV_PIX_FMT_YUV422P9LE, AV_PIX_FMT_YUVA422P9LE,
+ AV_PIX_FMT_YUV422P9BE, AV_PIX_FMT_YUVA422P9BE,
+ AV_PIX_FMT_YUV444P9LE, AV_PIX_FMT_YUVA444P9LE,
+ AV_PIX_FMT_YUV444P9BE, AV_PIX_FMT_YUVA444P9BE,
+ AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUVA420P10LE,
+ AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUVA420P10BE,
+ AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUVA422P10LE,
+ AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUVA422P10BE,
+ AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUVA444P10LE,
+ AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUVA444P10BE,
+ AV_PIX_FMT_YUV420P12BE, AV_PIX_FMT_YUV420P12LE,
+ AV_PIX_FMT_YUV422P12BE, AV_PIX_FMT_YUV422P12LE,
+ AV_PIX_FMT_YUV444P12BE, AV_PIX_FMT_YUV444P12LE,
+ AV_PIX_FMT_YUV420P14BE, AV_PIX_FMT_YUV420P14LE,
+ AV_PIX_FMT_YUV422P14BE, AV_PIX_FMT_YUV422P14LE,
+ AV_PIX_FMT_YUV444P14BE, AV_PIX_FMT_YUV444P14LE,
+ AV_PIX_FMT_YUV420P16LE, AV_PIX_FMT_YUVA420P16LE,
+ AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUVA420P16BE,
+ AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUVA422P16LE,
+ AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUVA422P16BE,
+ AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUVA444P16LE,
+ AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUVA444P16BE,
+ AV_PIX_FMT_NONE
+};
+
+static int query_formats(AVFilterContext *ctx)
+{
+ Stereo3DContext *s = ctx->priv;
+ const enum AVPixelFormat *pix_fmts;
+ AVFilterFormats *fmts_list;
+
+ switch (s->out.format) {
+ case ANAGLYPH_GM_COLOR:
+ case ANAGLYPH_GM_DUBOIS:
+ case ANAGLYPH_GM_GRAY:
+ case ANAGLYPH_GM_HALF:
+ case ANAGLYPH_RB_GRAY:
+ case ANAGLYPH_RC_COLOR:
+ case ANAGLYPH_RC_DUBOIS:
+ case ANAGLYPH_RC_GRAY:
+ case ANAGLYPH_RC_HALF:
+ case ANAGLYPH_RG_GRAY:
+ case ANAGLYPH_YB_COLOR:
+ case ANAGLYPH_YB_DUBOIS:
+ case ANAGLYPH_YB_GRAY:
+ case ANAGLYPH_YB_HALF:
+ pix_fmts = anaglyph_pix_fmts;
+ break;
+ default:
+ pix_fmts = other_pix_fmts;
+ }
+
+ fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static inline uint8_t ana_convert(const int *coeff, const uint8_t *left, const uint8_t *right)
+{
+ int sum;
+
+ sum = coeff[0] * left[0] + coeff[3] * right[0]; //red in
+ sum += coeff[1] * left[1] + coeff[4] * right[1]; //green in
+ sum += coeff[2] * left[2] + coeff[5] * right[2]; //blue in
+
+ return av_clip_uint8(sum >> 16);
+}
+
+static void anaglyph_ic(uint8_t *dst, uint8_t *lsrc, uint8_t *rsrc,
+ ptrdiff_t dst_linesize, ptrdiff_t l_linesize, ptrdiff_t r_linesize,
+ int width, int height,
+ const int *ana_matrix_r, const int *ana_matrix_g, const int *ana_matrix_b)
+{
+ int x, y, o;
+
+ for (y = 0; y < height; y++) {
+ for (o = 0, x = 0; x < width; x++, o+= 3) {
+ dst[o ] = ana_convert(ana_matrix_r, lsrc + o * 2, rsrc + o * 2);
+ dst[o + 1] = ana_convert(ana_matrix_g, lsrc + o * 2, rsrc + o * 2);
+ dst[o + 2] = ana_convert(ana_matrix_b, lsrc + o * 2, rsrc + o * 2);
+ }
+
+ dst += dst_linesize;
+ lsrc += l_linesize;
+ rsrc += r_linesize;
+ }
+}
+
+static void anaglyph(uint8_t *dst, uint8_t *lsrc, uint8_t *rsrc,
+ ptrdiff_t dst_linesize, ptrdiff_t l_linesize, ptrdiff_t r_linesize,
+ int width, int height,
+ const int *ana_matrix_r, const int *ana_matrix_g, const int *ana_matrix_b)
+{
+ int x, y, o;
+
+ for (y = 0; y < height; y++) {
+ for (o = 0, x = 0; x < width; x++, o+= 3) {
+ dst[o ] = ana_convert(ana_matrix_r, lsrc + o, rsrc + o);
+ dst[o + 1] = ana_convert(ana_matrix_g, lsrc + o, rsrc + o);
+ dst[o + 2] = ana_convert(ana_matrix_b, lsrc + o, rsrc + o);
+ }
+
+ dst += dst_linesize;
+ lsrc += l_linesize;
+ rsrc += r_linesize;
+ }
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ Stereo3DContext *s = ctx->priv;
+ AVRational aspect = inlink->sample_aspect_ratio;
+ AVRational fps = inlink->frame_rate;
+ AVRational tb = inlink->time_base;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
+ int ret;
+
+ switch (s->in.format) {
+ case INTERLEAVE_COLS_LR:
+ case INTERLEAVE_COLS_RL:
+ case SIDE_BY_SIDE_2_LR:
+ case SIDE_BY_SIDE_LR:
+ case SIDE_BY_SIDE_2_RL:
+ case SIDE_BY_SIDE_RL:
+ if (inlink->w & 1) {
+ av_log(ctx, AV_LOG_ERROR, "width must be even\n");
+ return AVERROR_INVALIDDATA;
+ }
+ break;
+ case INTERLEAVE_ROWS_LR:
+ case INTERLEAVE_ROWS_RL:
+ case ABOVE_BELOW_2_LR:
+ case ABOVE_BELOW_LR:
+ case ABOVE_BELOW_2_RL:
+ case ABOVE_BELOW_RL:
+ if (inlink->h & 1) {
+ av_log(ctx, AV_LOG_ERROR, "height must be even\n");
+ return AVERROR_INVALIDDATA;
+ }
+ break;
+ }
+
+ s->in.width =
+ s->width = inlink->w;
+ s->in.height =
+ s->height = inlink->h;
+ s->in.off_lstep =
+ s->in.off_rstep =
+ s->in.off_left =
+ s->in.off_right =
+ s->in.row_left =
+ s->in.row_right = 0;
+ s->in.row_step = 1;
+
+ switch (s->in.format) {
+ case SIDE_BY_SIDE_2_LR:
+ aspect.num *= 2;
+ case SIDE_BY_SIDE_LR:
+ s->width = inlink->w / 2;
+ s->in.off_right = s->width;
+ break;
+ case SIDE_BY_SIDE_2_RL:
+ aspect.num *= 2;
+ case SIDE_BY_SIDE_RL:
+ s->width = inlink->w / 2;
+ s->in.off_left = s->width;
+ break;
+ case ABOVE_BELOW_2_LR:
+ aspect.den *= 2;
+ case ABOVE_BELOW_LR:
+ s->in.row_right =
+ s->height = inlink->h / 2;
+ break;
+ case ABOVE_BELOW_2_RL:
+ aspect.den *= 2;
+ case ABOVE_BELOW_RL:
+ s->in.row_left =
+ s->height = inlink->h / 2;
+ break;
+ case ALTERNATING_RL:
+ case ALTERNATING_LR:
+ fps.den *= 2;
+ tb.num *= 2;
+ break;
+ case INTERLEAVE_COLS_RL:
+ case INTERLEAVE_COLS_LR:
+ s->width = inlink->w / 2;
+ break;
+ case INTERLEAVE_ROWS_LR:
+ case INTERLEAVE_ROWS_RL:
+ s->in.row_step = 2;
+ if (s->in.format == INTERLEAVE_ROWS_RL)
+ s->in.off_lstep = 1;
+ else
+ s->in.off_rstep = 1;
+ if (s->out.format != CHECKERBOARD_LR &&
+ s->out.format != CHECKERBOARD_RL)
+ s->height = inlink->h / 2;
+ break;
+ default:
+ av_log(ctx, AV_LOG_ERROR, "input format %d is not supported\n", s->in.format);
+ return AVERROR(EINVAL);
+ }
+
+ s->out.width = s->width;
+ s->out.height = s->height;
+ s->out.off_lstep =
+ s->out.off_rstep =
+ s->out.off_left =
+ s->out.off_right =
+ s->out.row_left =
+ s->out.row_right = 0;
+ s->out.row_step = 1;
+
+ switch (s->out.format) {
+ case ANAGLYPH_RB_GRAY:
+ case ANAGLYPH_RG_GRAY:
+ case ANAGLYPH_RC_GRAY:
+ case ANAGLYPH_RC_HALF:
+ case ANAGLYPH_RC_COLOR:
+ case ANAGLYPH_RC_DUBOIS:
+ case ANAGLYPH_GM_GRAY:
+ case ANAGLYPH_GM_HALF:
+ case ANAGLYPH_GM_COLOR:
+ case ANAGLYPH_GM_DUBOIS:
+ case ANAGLYPH_YB_GRAY:
+ case ANAGLYPH_YB_HALF:
+ case ANAGLYPH_YB_COLOR:
+ case ANAGLYPH_YB_DUBOIS: {
+ uint8_t rgba_map[4];
+
+ ff_fill_rgba_map(rgba_map, outlink->format);
+ s->ana_matrix[rgba_map[0]] = &ana_coeff[s->out.format][0][0];
+ s->ana_matrix[rgba_map[1]] = &ana_coeff[s->out.format][1][0];
+ s->ana_matrix[rgba_map[2]] = &ana_coeff[s->out.format][2][0];
+ break;
+ }
+ case SIDE_BY_SIDE_2_LR:
+ aspect.den *= 2;
+ case SIDE_BY_SIDE_LR:
+ s->out.width = s->width * 2;
+ s->out.off_right = s->width;
+ break;
+ case SIDE_BY_SIDE_2_RL:
+ aspect.den *= 2;
+ case SIDE_BY_SIDE_RL:
+ s->out.width = s->width * 2;
+ s->out.off_left = s->width;
+ break;
+ case ABOVE_BELOW_2_LR:
+ aspect.num *= 2;
+ case ABOVE_BELOW_LR:
+ s->out.height = s->height * 2;
+ s->out.row_right = s->height;
+ break;
+ case HDMI:
+ if (s->height != 720 && s->height != 1080) {
+ av_log(ctx, AV_LOG_ERROR, "Only 720 and 1080 height supported\n");
+ return AVERROR(EINVAL);
+ }
+
+ s->blanks = s->height / 24;
+ s->out.height = s->height * 2 + s->blanks;
+ s->out.row_right = s->height + s->blanks;
+ break;
+ case ABOVE_BELOW_2_RL:
+ aspect.num *= 2;
+ case ABOVE_BELOW_RL:
+ s->out.height = s->height * 2;
+ s->out.row_left = s->height;
+ break;
+ case INTERLEAVE_ROWS_LR:
+ s->in.row_step = 1 + (s->in.format == INTERLEAVE_ROWS_RL);
+ s->out.row_step = 2;
+ s->out.height = s->height * 2;
+ s->out.off_rstep = 1;
+ break;
+ case INTERLEAVE_ROWS_RL:
+ s->in.row_step = 1 + (s->in.format == INTERLEAVE_ROWS_LR);
+ s->out.row_step = 2;
+ s->out.height = s->height * 2;
+ s->out.off_lstep = 1;
+ break;
+ case MONO_R:
+ if (s->in.format != INTERLEAVE_COLS_LR) {
+ s->in.off_left = s->in.off_right;
+ s->in.row_left = s->in.row_right;
+ }
+ if (s->in.format == INTERLEAVE_ROWS_LR)
+ FFSWAP(int, s->in.off_lstep, s->in.off_rstep);
+ break;
+ case MONO_L:
+ if (s->in.format == INTERLEAVE_ROWS_RL)
+ FFSWAP(int, s->in.off_lstep, s->in.off_rstep);
+ break;
+ case ALTERNATING_RL:
+ case ALTERNATING_LR:
+ fps.num *= 2;
+ tb.den *= 2;
+ break;
+ case CHECKERBOARD_LR:
+ case CHECKERBOARD_RL:
+ s->out.width = s->width * 2;
+ break;
+ case INTERLEAVE_COLS_LR:
+ case INTERLEAVE_COLS_RL:
+ s->out.width = s->width * 2;
+ break;
+ default:
+ av_log(ctx, AV_LOG_ERROR, "output format %d is not supported\n", s->out.format);
+ return AVERROR(EINVAL);
+ }
+
+ if (s->in.format == INTERLEAVE_COLS_LR || s->in.format == INTERLEAVE_COLS_RL) {
+ if ((s->in.format & 1) != (s->out.format & 1)) {
+ FFSWAP(int, s->in.row_left, s->in.row_right);
+ FFSWAP(int, s->in.off_lstep, s->in.off_rstep);
+ FFSWAP(int, s->in.off_left, s->in.off_right);
+ FFSWAP(int, s->out.row_left, s->out.row_right);
+ FFSWAP(int, s->out.off_lstep, s->out.off_rstep);
+ FFSWAP(int, s->out.off_left, s->out.off_right);
+ }
+ }
+
+ outlink->w = s->out.width;
+ outlink->h = s->out.height;
+ outlink->frame_rate = fps;
+ outlink->time_base = tb;
+ outlink->sample_aspect_ratio = aspect;
+
+ if ((ret = av_image_fill_linesizes(s->linesize, outlink->format, s->width)) < 0)
+ return ret;
+ s->nb_planes = av_pix_fmt_count_planes(outlink->format);
+ av_image_fill_max_pixsteps(s->pixstep, NULL, desc);
+ s->pheight[1] = s->pheight[2] = AV_CEIL_RSHIFT(s->height, desc->log2_chroma_h);
+ s->pheight[0] = s->pheight[3] = s->height;
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
+
+ s->dsp.anaglyph = anaglyph;
+ if (ARCH_X86)
+ ff_stereo3d_init_x86(&s->dsp);
+
+ return 0;
+}
+
+typedef struct ThreadData {
+ AVFrame *ileft, *iright;
+ AVFrame *out;
+} ThreadData;
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ Stereo3DContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *ileft = td->ileft;
+ AVFrame *iright = td->iright;
+ AVFrame *out = td->out;
+ int height = s->out.height;
+ int start = (height * jobnr ) / nb_jobs;
+ int end = (height * (jobnr+1)) / nb_jobs;
+ const int **ana_matrix = s->ana_matrix;
+
+ s->dsp.anaglyph(out->data[0] + out->linesize[0] * start,
+ ileft ->data[0] + s->in_off_left [0] + ileft->linesize[0] * start * s->in.row_step,
+ iright->data[0] + s->in_off_right[0] + iright->linesize[0] * start * s->in.row_step,
+ out->linesize[0],
+ ileft->linesize[0] * s->in.row_step,
+ iright->linesize[0] * s->in.row_step,
+ s->out.width, end - start,
+ ana_matrix[0], ana_matrix[1], ana_matrix[2]);
+
+ return 0;
+}
+
+static void interleave_cols_to_any(Stereo3DContext *s, int *out_off, int p, AVFrame *in, AVFrame *out, int d)
+{
+ int y, x;
+
+ for (y = 0; y < s->pheight[p]; y++) {
+ const uint8_t *src = (const uint8_t*)in->data[p] + y * in->linesize[p] + d * s->pixstep[p];
+ uint8_t *dst = out->data[p] + out_off[p] + y * out->linesize[p] * s->out.row_step;
+
+ switch (s->pixstep[p]) {
+ case 1:
+ for (x = 0; x < s->linesize[p]; x++)
+ dst[x] = src[x * 2];
+ break;
+ case 2:
+ for (x = 0; x < s->linesize[p]; x+=2)
+ AV_WN16(&dst[x], AV_RN16(&src[x * 2]));
+ break;
+ case 3:
+ for (x = 0; x < s->linesize[p]; x+=3)
+ AV_WB24(&dst[x], AV_RB24(&src[x * 2]));
+ break;
+ case 4:
+ for (x = 0; x < s->linesize[p]; x+=4)
+ AV_WN32(&dst[x], AV_RN32(&src[x * 2]));
+ break;
+ case 6:
+ for (x = 0; x < s->linesize[p]; x+=6)
+ AV_WB48(&dst[x], AV_RB48(&src[x * 2]));
+ break;
+ case 8:
+ for (x = 0; x < s->linesize[p]; x+=8)
+ AV_WN64(&dst[x], AV_RN64(&src[x * 2]));
+ break;
+ }
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ Stereo3DContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out, *oleft, *oright, *ileft, *iright;
+ int out_off_left[4], out_off_right[4];
+ int i, ret;
+
+ if (s->in.format == s->out.format)
+ return ff_filter_frame(outlink, inpicref);
+
+ switch (s->out.format) {
+ case ALTERNATING_LR:
+ case ALTERNATING_RL:
+ if (!s->prev) {
+ s->prev = inpicref;
+ return 0;
+ }
+ break;
+ };
+
+ switch (s->in.format) {
+ case ALTERNATING_LR:
+ case ALTERNATING_RL:
+ if (!s->prev) {
+ s->prev = inpicref;
+ return 0;
+ }
+ ileft = s->prev;
+ iright = inpicref;
+ if (s->in.format == ALTERNATING_RL)
+ FFSWAP(AVFrame *, ileft, iright);
+ break;
+ default:
+ ileft = iright = inpicref;
+ };
+
+ if ((s->out.format == ALTERNATING_LR ||
+ s->out.format == ALTERNATING_RL) &&
+ (s->in.format == SIDE_BY_SIDE_LR ||
+ s->in.format == SIDE_BY_SIDE_RL ||
+ s->in.format == SIDE_BY_SIDE_2_LR ||
+ s->in.format == SIDE_BY_SIDE_2_RL ||
+ s->in.format == ABOVE_BELOW_LR ||
+ s->in.format == ABOVE_BELOW_RL ||
+ s->in.format == ABOVE_BELOW_2_LR ||
+ s->in.format == ABOVE_BELOW_2_RL ||
+ s->in.format == INTERLEAVE_ROWS_LR ||
+ s->in.format == INTERLEAVE_ROWS_RL)) {
+ oright = av_frame_clone(s->prev);
+ oleft = av_frame_clone(s->prev);
+ if (!oright || !oleft) {
+ av_frame_free(&oright);
+ av_frame_free(&oleft);
+ av_frame_free(&s->prev);
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+ } else if ((s->out.format == MONO_L ||
+ s->out.format == MONO_R) &&
+ (s->in.format == SIDE_BY_SIDE_LR ||
+ s->in.format == SIDE_BY_SIDE_RL ||
+ s->in.format == SIDE_BY_SIDE_2_LR ||
+ s->in.format == SIDE_BY_SIDE_2_RL ||
+ s->in.format == ABOVE_BELOW_LR ||
+ s->in.format == ABOVE_BELOW_RL ||
+ s->in.format == ABOVE_BELOW_2_LR ||
+ s->in.format == ABOVE_BELOW_2_RL ||
+ s->in.format == INTERLEAVE_ROWS_LR ||
+ s->in.format == INTERLEAVE_ROWS_RL)) {
+ out = oleft = oright = av_frame_clone(inpicref);
+ if (!out) {
+ av_frame_free(&s->prev);
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+ } else if ((s->out.format == MONO_L && s->in.format == ALTERNATING_LR) ||
+ (s->out.format == MONO_R && s->in.format == ALTERNATING_RL)) {
+ s->prev->pts /= 2;
+ ret = ff_filter_frame(outlink, s->prev);
+ av_frame_free(&inpicref);
+ s->prev = NULL;
+ return ret;
+ } else if ((s->out.format == MONO_L && s->in.format == ALTERNATING_RL) ||
+ (s->out.format == MONO_R && s->in.format == ALTERNATING_LR)) {
+ av_frame_free(&s->prev);
+ inpicref->pts /= 2;
+ return ff_filter_frame(outlink, inpicref);
+ } else if ((s->out.format == ALTERNATING_LR && s->in.format == ALTERNATING_RL) ||
+ (s->out.format == ALTERNATING_RL && s->in.format == ALTERNATING_LR)) {
+ FFSWAP(int64_t, s->prev->pts, inpicref->pts);
+ ff_filter_frame(outlink, inpicref);
+ ret = ff_filter_frame(outlink, s->prev);
+ s->prev = NULL;
+ return ret;
+ } else {
+ out = oleft = oright = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&s->prev);
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, inpicref);
+
+ if (s->out.format == ALTERNATING_LR ||
+ s->out.format == ALTERNATING_RL) {
+ oright = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!oright) {
+ av_frame_free(&oleft);
+ av_frame_free(&s->prev);
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(oright, s->prev);
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ int hsub = i == 1 || i == 2 ? s->hsub : 0;
+ int vsub = i == 1 || i == 2 ? s->vsub : 0;
+ s->in_off_left[i] = (AV_CEIL_RSHIFT(s->in.row_left, vsub) + s->in.off_lstep) * ileft->linesize[i] + AV_CEIL_RSHIFT(s->in.off_left * s->pixstep[i], hsub);
+ s->in_off_right[i] = (AV_CEIL_RSHIFT(s->in.row_right, vsub) + s->in.off_rstep) * iright->linesize[i] + AV_CEIL_RSHIFT(s->in.off_right * s->pixstep[i], hsub);
+ out_off_left[i] = (AV_CEIL_RSHIFT(s->out.row_left, vsub) + s->out.off_lstep) * oleft->linesize[i] + AV_CEIL_RSHIFT(s->out.off_left * s->pixstep[i], hsub);
+ out_off_right[i] = (AV_CEIL_RSHIFT(s->out.row_right, vsub) + s->out.off_rstep) * oright->linesize[i] + AV_CEIL_RSHIFT(s->out.off_right * s->pixstep[i], hsub);
+ }
+
+ switch (s->out.format) {
+ case ALTERNATING_LR:
+ case ALTERNATING_RL:
+ switch (s->in.format) {
+ case INTERLEAVE_ROWS_LR:
+ case INTERLEAVE_ROWS_RL:
+ for (i = 0; i < s->nb_planes; i++) {
+ oleft->linesize[i] *= 2;
+ oright->linesize[i] *= 2;
+ }
+ case ABOVE_BELOW_LR:
+ case ABOVE_BELOW_RL:
+ case ABOVE_BELOW_2_LR:
+ case ABOVE_BELOW_2_RL:
+ case SIDE_BY_SIDE_LR:
+ case SIDE_BY_SIDE_RL:
+ case SIDE_BY_SIDE_2_LR:
+ case SIDE_BY_SIDE_2_RL:
+ oleft->width = outlink->w;
+ oright->width = outlink->w;
+ oleft->height = outlink->h;
+ oright->height = outlink->h;
+
+ for (i = 0; i < s->nb_planes; i++) {
+ oleft->data[i] += s->in_off_left[i];
+ oright->data[i] += s->in_off_right[i];
+ }
+ break;
+ default:
+ goto copy;
+ break;
+ }
+ break;
+ case HDMI:
+ for (i = 0; i < s->nb_planes; i++) {
+ int j, h = s->height >> ((i == 1 || i == 2) ? s->vsub : 0);
+ int b = (s->blanks) >> ((i == 1 || i == 2) ? s->vsub : 0);
+
+ for (j = h; j < h + b; j++)
+ memset(oleft->data[i] + j * s->linesize[i], 0, s->linesize[i]);
+ }
+ case SIDE_BY_SIDE_LR:
+ case SIDE_BY_SIDE_RL:
+ case SIDE_BY_SIDE_2_LR:
+ case SIDE_BY_SIDE_2_RL:
+ case ABOVE_BELOW_LR:
+ case ABOVE_BELOW_RL:
+ case ABOVE_BELOW_2_LR:
+ case ABOVE_BELOW_2_RL:
+ case INTERLEAVE_ROWS_LR:
+ case INTERLEAVE_ROWS_RL:
+copy:
+ if (s->in.format == INTERLEAVE_COLS_LR ||
+ s->in.format == INTERLEAVE_COLS_RL) {
+ for (i = 0; i < s->nb_planes; i++) {
+ int d = (s->in.format & 1) != (s->out.format & 1);
+
+ interleave_cols_to_any(s, out_off_left, i, ileft, oleft, d);
+ interleave_cols_to_any(s, out_off_right, i, iright, oright, !d);
+ }
+ } else {
+ for (i = 0; i < s->nb_planes; i++) {
+ av_image_copy_plane(oleft->data[i] + out_off_left[i],
+ oleft->linesize[i] * s->out.row_step,
+ ileft->data[i] + s->in_off_left[i],
+ ileft->linesize[i] * s->in.row_step,
+ s->linesize[i], s->pheight[i]);
+ av_image_copy_plane(oright->data[i] + out_off_right[i],
+ oright->linesize[i] * s->out.row_step,
+ iright->data[i] + s->in_off_right[i],
+ iright->linesize[i] * s->in.row_step,
+ s->linesize[i], s->pheight[i]);
+ }
+ }
+ break;
+ case MONO_L:
+ iright = ileft;
+ case MONO_R:
+ switch (s->in.format) {
+ case INTERLEAVE_ROWS_LR:
+ case INTERLEAVE_ROWS_RL:
+ for (i = 0; i < s->nb_planes; i++) {
+ out->linesize[i] *= 2;
+ }
+ case ABOVE_BELOW_LR:
+ case ABOVE_BELOW_RL:
+ case ABOVE_BELOW_2_LR:
+ case ABOVE_BELOW_2_RL:
+ case SIDE_BY_SIDE_LR:
+ case SIDE_BY_SIDE_RL:
+ case SIDE_BY_SIDE_2_LR:
+ case SIDE_BY_SIDE_2_RL:
+ out->width = outlink->w;
+ out->height = outlink->h;
+
+ for (i = 0; i < s->nb_planes; i++) {
+ out->data[i] += s->in_off_left[i];
+ }
+ break;
+ case INTERLEAVE_COLS_LR:
+ case INTERLEAVE_COLS_RL:
+ for (i = 0; i < s->nb_planes; i++) {
+ const int d = (s->in.format & 1) != (s->out.format & 1);
+
+ interleave_cols_to_any(s, out_off_right, i, iright, out, d);
+ }
+ break;
+ default:
+ for (i = 0; i < s->nb_planes; i++) {
+ av_image_copy_plane(out->data[i], out->linesize[i],
+ iright->data[i] + s->in_off_left[i],
+ iright->linesize[i] * s->in.row_step,
+ s->linesize[i], s->pheight[i]);
+ }
+ break;
+ }
+ break;
+ case ANAGLYPH_RB_GRAY:
+ case ANAGLYPH_RG_GRAY:
+ case ANAGLYPH_RC_GRAY:
+ case ANAGLYPH_RC_HALF:
+ case ANAGLYPH_RC_COLOR:
+ case ANAGLYPH_RC_DUBOIS:
+ case ANAGLYPH_GM_GRAY:
+ case ANAGLYPH_GM_HALF:
+ case ANAGLYPH_GM_COLOR:
+ case ANAGLYPH_GM_DUBOIS:
+ case ANAGLYPH_YB_GRAY:
+ case ANAGLYPH_YB_HALF:
+ case ANAGLYPH_YB_COLOR:
+ case ANAGLYPH_YB_DUBOIS: {
+ if (s->in.format == INTERLEAVE_COLS_LR ||
+ s->in.format == INTERLEAVE_COLS_RL) {
+ const int d = (s->in.format & 1);
+
+ anaglyph_ic(out->data[0],
+ ileft ->data[0] + s->in_off_left [0] + d * 3,
+ iright->data[0] + s->in_off_right[0] + (!d) * 3,
+ out->linesize[0],
+ ileft->linesize[0] * s->in.row_step,
+ iright->linesize[0] * s->in.row_step,
+ s->out.width, s->out.height,
+ s->ana_matrix[0], s->ana_matrix[1], s->ana_matrix[2]);
+ } else {
+ ThreadData td;
+
+ td.ileft = ileft; td.iright = iright; td.out = out;
+ ctx->internal->execute(ctx, filter_slice, &td, NULL,
+ FFMIN(s->out.height, ff_filter_get_nb_threads(ctx)));
+ }
+ break;
+ }
+ case CHECKERBOARD_RL:
+ case CHECKERBOARD_LR:
+ for (i = 0; i < s->nb_planes; i++) {
+ int x, y;
+
+ for (y = 0; y < s->pheight[i]; y++) {
+ uint8_t *dst = out->data[i] + out->linesize[i] * y;
+ const int d1 = (s->in.format == INTERLEAVE_COLS_LR || s->in.format == INTERLEAVE_COLS_RL) && (s->in.format & 1) != (s->out.format & 1);
+ const int d2 = (s->in.format == INTERLEAVE_COLS_LR || s->in.format == INTERLEAVE_COLS_RL) ? !d1 : 0;
+ const int m = 1 + (s->in.format == INTERLEAVE_COLS_LR || s->in.format == INTERLEAVE_COLS_RL);
+ uint8_t *left = ileft->data[i] + ileft->linesize[i] * y + s->in_off_left[i] + d1 * s->pixstep[i];
+ uint8_t *right = iright->data[i] + iright->linesize[i] * y + s->in_off_right[i] + d2 * s->pixstep[i];
+ int p, b;
+
+ if (s->out.format == CHECKERBOARD_RL && s->in.format != INTERLEAVE_COLS_LR && s->in.format != INTERLEAVE_COLS_RL)
+ FFSWAP(uint8_t*, left, right);
+ switch (s->pixstep[i]) {
+ case 1:
+ for (x = 0, b = 0, p = 0; x < s->linesize[i] * 2; x+=2, p++, b+=2) {
+ dst[x ] = (b&1) == (y&1) ? left[p*m] : right[p*m];
+ dst[x+1] = (b&1) != (y&1) ? left[p*m] : right[p*m];
+ }
+ break;
+ case 2:
+ for (x = 0, b = 0, p = 0; x < s->linesize[i] * 2; x+=4, p+=2, b+=2) {
+ AV_WN16(&dst[x ], (b&1) == (y&1) ? AV_RN16(&left[p*m]) : AV_RN16(&right[p*m]));
+ AV_WN16(&dst[x+2], (b&1) != (y&1) ? AV_RN16(&left[p*m]) : AV_RN16(&right[p*m]));
+ }
+ break;
+ case 3:
+ for (x = 0, b = 0, p = 0; x < s->linesize[i] * 2; x+=6, p+=3, b+=2) {
+ AV_WB24(&dst[x ], (b&1) == (y&1) ? AV_RB24(&left[p*m]) : AV_RB24(&right[p*m]));
+ AV_WB24(&dst[x+3], (b&1) != (y&1) ? AV_RB24(&left[p*m]) : AV_RB24(&right[p*m]));
+ }
+ break;
+ case 4:
+ for (x = 0, b = 0, p = 0; x < s->linesize[i] * 2; x+=8, p+=4, b+=2) {
+ AV_WN32(&dst[x ], (b&1) == (y&1) ? AV_RN32(&left[p*m]) : AV_RN32(&right[p*m]));
+ AV_WN32(&dst[x+4], (b&1) != (y&1) ? AV_RN32(&left[p*m]) : AV_RN32(&right[p*m]));
+ }
+ break;
+ case 6:
+ for (x = 0, b = 0, p = 0; x < s->linesize[i] * 2; x+=12, p+=6, b+=2) {
+ AV_WB48(&dst[x ], (b&1) == (y&1) ? AV_RB48(&left[p*m]) : AV_RB48(&right[p*m]));
+ AV_WB48(&dst[x+6], (b&1) != (y&1) ? AV_RB48(&left[p*m]) : AV_RB48(&right[p*m]));
+ }
+ break;
+ case 8:
+ for (x = 0, b = 0, p = 0; x < s->linesize[i] * 2; x+=16, p+=8, b+=2) {
+ AV_WN64(&dst[x ], (b&1) == (y&1) ? AV_RN64(&left[p*m]) : AV_RN64(&right[p*m]));
+ AV_WN64(&dst[x+8], (b&1) != (y&1) ? AV_RN64(&left[p*m]) : AV_RN64(&right[p*m]));
+ }
+ break;
+ }
+ }
+ }
+ break;
+ case INTERLEAVE_COLS_LR:
+ case INTERLEAVE_COLS_RL:
+ for (i = 0; i < s->nb_planes; i++) {
+ const int d = (s->in.format == INTERLEAVE_COLS_LR || s->in.format == INTERLEAVE_COLS_RL);
+ const int m = 1 + d;
+ int x, y;
+
+ for (y = 0; y < s->pheight[i]; y++) {
+ uint8_t *dst = out->data[i] + out->linesize[i] * y;
+ uint8_t *left = ileft->data[i] + ileft->linesize[i] * y * s->in.row_step + s->in_off_left[i] + d * s->pixstep[i];
+ uint8_t *right = iright->data[i] + iright->linesize[i] * y * s->in.row_step + s->in_off_right[i];
+ int p, b;
+
+ if (s->out.format == INTERLEAVE_COLS_LR)
+ FFSWAP(uint8_t*, left, right);
+
+ switch (s->pixstep[i]) {
+ case 1:
+ for (x = 0, b = 0, p = 0; x < s->linesize[i] * 2; x+=2, p++, b+=2) {
+ dst[x ] = b&1 ? left[p*m] : right[p*m];
+ dst[x+1] = !(b&1) ? left[p*m] : right[p*m];
+ }
+ break;
+ case 2:
+ for (x = 0, b = 0, p = 0; x < s->linesize[i] * 2; x+=4, p+=2, b+=2) {
+ AV_WN16(&dst[x ], b&1 ? AV_RN16(&left[p*m]) : AV_RN16(&right[p*m]));
+ AV_WN16(&dst[x+2], !(b&1) ? AV_RN16(&left[p*m]) : AV_RN16(&right[p*m]));
+ }
+ break;
+ case 3:
+ for (x = 0, b = 0, p = 0; x < s->linesize[i] * 2; x+=6, p+=3, b+=2) {
+ AV_WB24(&dst[x ], b&1 ? AV_RB24(&left[p*m]) : AV_RB24(&right[p*m]));
+ AV_WB24(&dst[x+3], !(b&1) ? AV_RB24(&left[p*m]) : AV_RB24(&right[p*m]));
+ }
+ break;
+ case 4:
+ for (x = 0, b = 0, p = 0; x < s->linesize[i] * 2; x+=8, p+=4, b+=2) {
+ AV_WN32(&dst[x ], b&1 ? AV_RN32(&left[p*m]) : AV_RN32(&right[p*m]));
+ AV_WN32(&dst[x+4], !(b&1) ? AV_RN32(&left[p*m]) : AV_RN32(&right[p*m]));
+ }
+ break;
+ case 6:
+ for (x = 0, b = 0, p = 0; x < s->linesize[i] * 2; x+=12, p+=6, b+=2) {
+ AV_WB48(&dst[x ], b&1 ? AV_RB48(&left[p*m]) : AV_RB48(&right[p*m]));
+ AV_WB48(&dst[x+6], !(b&1) ? AV_RB48(&left[p*m]) : AV_RB48(&right[p*m]));
+ }
+ break;
+ case 8:
+ for (x = 0, b = 0, p = 0; x < s->linesize[i] * 2; x+=16, p+=8, b+=2) {
+ AV_WN64(&dst[x ], b&1 ? AV_RN64(&left[p*m]) : AV_RN64(&right[p*m]));
+ AV_WN64(&dst[x+8], !(b&1) ? AV_RN64(&left[p*m]) : AV_RN64(&right[p*m]));
+ }
+ break;
+ }
+ }
+ }
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ if (oright != oleft) {
+ if (s->out.format == ALTERNATING_LR)
+ FFSWAP(AVFrame *, oleft, oright);
+ oright->pts = s->prev->pts * 2;
+ ff_filter_frame(outlink, oright);
+ out = oleft;
+ oleft->pts = s->prev->pts + inpicref->pts;
+ av_frame_free(&s->prev);
+ s->prev = inpicref;
+ } else if (s->in.format == ALTERNATING_LR ||
+ s->in.format == ALTERNATING_RL) {
+ out->pts = s->prev->pts / 2;
+ av_frame_free(&s->prev);
+ av_frame_free(&inpicref);
+ } else {
+ av_frame_free(&s->prev);
+ av_frame_free(&inpicref);
+ }
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ Stereo3DContext *s = ctx->priv;
+
+ av_frame_free(&s->prev);
+}
+
+static const AVFilterPad stereo3d_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad stereo3d_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_stereo3d = {
+ .name = "stereo3d",
+ .description = NULL_IF_CONFIG_SMALL("Convert video stereoscopic 3D view."),
+ .priv_size = sizeof(Stereo3DContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = stereo3d_inputs,
+ .outputs = stereo3d_outputs,
+ .priv_class = &stereo3d_class,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_subtitles.c b/libavfilter/vf_subtitles.c
new file mode 100644
index 0000000000..0f22644cc6
--- /dev/null
+++ b/libavfilter/vf_subtitles.c
@@ -0,0 +1,496 @@
+/*
+ * Copyright (c) 2011 Baptiste Coudurier
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2012 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Libass subtitles burning filter.
+ *
+ * @see{http://www.matroska.org/technical/specs/subtitles/ssa.html}
+ */
+
+#include <ass/ass.h>
+
+#include "config.h"
+#if CONFIG_SUBTITLES_FILTER
+# include "libavcodec/avcodec.h"
+# include "libavformat/avformat.h"
+#endif
+#include "libavutil/avstring.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "drawutils.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "formats.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ ASS_Library *library;
+ ASS_Renderer *renderer;
+ ASS_Track *track;
+ char *filename;
+ char *fontsdir;
+ char *charenc;
+ char *force_style;
+ int stream_index;
+ uint8_t rgba_map[4];
+ int pix_step[4]; ///< steps per pixel for each plane of the main output
+ int original_w, original_h;
+ int shaping;
+ FFDrawContext draw;
+} AssContext;
+
+#define OFFSET(x) offsetof(AssContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+#define COMMON_OPTIONS \
+ {"filename", "set the filename of file to read", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, \
+ {"f", "set the filename of file to read", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, \
+ {"original_size", "set the size of the original video (used to scale fonts)", OFFSET(original_w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, \
+ {"fontsdir", "set the directory containing the fonts to read", OFFSET(fontsdir), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, \
+
+/* libass supports a log level ranging from 0 to 7 */
+static const int ass_libavfilter_log_level_map[] = {
+ [0] = AV_LOG_FATAL, /* MSGL_FATAL */
+ [1] = AV_LOG_ERROR, /* MSGL_ERR */
+ [2] = AV_LOG_WARNING, /* MSGL_WARN */
+ [3] = AV_LOG_WARNING, /* <undefined> */
+ [4] = AV_LOG_INFO, /* MSGL_INFO */
+ [5] = AV_LOG_INFO, /* <undefined> */
+ [6] = AV_LOG_VERBOSE, /* MSGL_V */
+ [7] = AV_LOG_DEBUG, /* MSGL_DBG2 */
+};
+
+static void ass_log(int ass_level, const char *fmt, va_list args, void *ctx)
+{
+ const int ass_level_clip = av_clip(ass_level, 0,
+ FF_ARRAY_ELEMS(ass_libavfilter_log_level_map) - 1);
+ const int level = ass_libavfilter_log_level_map[ass_level_clip];
+
+ av_vlog(ctx, level, fmt, args);
+ av_log(ctx, level, "\n");
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ AssContext *ass = ctx->priv;
+
+ if (!ass->filename) {
+ av_log(ctx, AV_LOG_ERROR, "No filename provided!\n");
+ return AVERROR(EINVAL);
+ }
+
+ ass->library = ass_library_init();
+ if (!ass->library) {
+ av_log(ctx, AV_LOG_ERROR, "Could not initialize libass.\n");
+ return AVERROR(EINVAL);
+ }
+ ass_set_message_cb(ass->library, ass_log, ctx);
+
+ ass_set_fonts_dir(ass->library, ass->fontsdir);
+
+ ass->renderer = ass_renderer_init(ass->library);
+ if (!ass->renderer) {
+ av_log(ctx, AV_LOG_ERROR, "Could not initialize libass renderer.\n");
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AssContext *ass = ctx->priv;
+
+ if (ass->track)
+ ass_free_track(ass->track);
+ if (ass->renderer)
+ ass_renderer_done(ass->renderer);
+ if (ass->library)
+ ass_library_done(ass->library);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ return ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AssContext *ass = inlink->dst->priv;
+
+ ff_draw_init(&ass->draw, inlink->format, 0);
+
+ ass_set_frame_size (ass->renderer, inlink->w, inlink->h);
+ if (ass->original_w && ass->original_h)
+ ass_set_aspect_ratio(ass->renderer, (double)inlink->w / inlink->h,
+ (double)ass->original_w / ass->original_h);
+ if (ass->shaping != -1)
+ ass_set_shaper(ass->renderer, ass->shaping);
+
+ return 0;
+}
+
+/* libass stores an RGBA color in the format RRGGBBTT, where TT is the transparency level */
+#define AR(c) ( (c)>>24)
+#define AG(c) (((c)>>16)&0xFF)
+#define AB(c) (((c)>>8) &0xFF)
+#define AA(c) ((0xFF-(c)) &0xFF)
+
+static void overlay_ass_image(AssContext *ass, AVFrame *picref,
+ const ASS_Image *image)
+{
+ for (; image; image = image->next) {
+ uint8_t rgba_color[] = {AR(image->color), AG(image->color), AB(image->color), AA(image->color)};
+ FFDrawColor color;
+ ff_draw_color(&ass->draw, &color, rgba_color);
+ ff_blend_mask(&ass->draw, &color,
+ picref->data, picref->linesize,
+ picref->width, picref->height,
+ image->bitmap, image->stride, image->w, image->h,
+ 3, 0, image->dst_x, image->dst_y);
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AssContext *ass = ctx->priv;
+ int detect_change = 0;
+ double time_ms = picref->pts * av_q2d(inlink->time_base) * 1000;
+ ASS_Image *image = ass_render_frame(ass->renderer, ass->track,
+ time_ms, &detect_change);
+
+ if (detect_change)
+ av_log(ctx, AV_LOG_DEBUG, "Change happened at time ms:%f\n", time_ms);
+
+ overlay_ass_image(ass, picref, image);
+
+ return ff_filter_frame(outlink, picref);
+}
+
+static const AVFilterPad ass_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ .needs_writable = 1,
+ },
+ { NULL }
+};
+
+static const AVFilterPad ass_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+#if CONFIG_ASS_FILTER
+
+static const AVOption ass_options[] = {
+ COMMON_OPTIONS
+ {"shaping", "set shaping engine", OFFSET(shaping), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, FLAGS, "shaping_mode"},
+ {"auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = -1}, INT_MIN, INT_MAX, FLAGS, "shaping_mode"},
+ {"simple", "simple shaping", 0, AV_OPT_TYPE_CONST, {.i64 = ASS_SHAPING_SIMPLE}, INT_MIN, INT_MAX, FLAGS, "shaping_mode"},
+ {"complex", "complex shaping", 0, AV_OPT_TYPE_CONST, {.i64 = ASS_SHAPING_COMPLEX}, INT_MIN, INT_MAX, FLAGS, "shaping_mode"},
+ {NULL},
+};
+
+AVFILTER_DEFINE_CLASS(ass);
+
+static av_cold int init_ass(AVFilterContext *ctx)
+{
+ AssContext *ass = ctx->priv;
+ int ret = init(ctx);
+
+ if (ret < 0)
+ return ret;
+
+ /* Initialize fonts */
+ ass_set_fonts(ass->renderer, NULL, NULL, 1, NULL, 1);
+
+ ass->track = ass_read_file(ass->library, ass->filename, NULL);
+ if (!ass->track) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Could not create a libass track when reading file '%s'\n",
+ ass->filename);
+ return AVERROR(EINVAL);
+ }
+ return 0;
+}
+
+AVFilter ff_vf_ass = {
+ .name = "ass",
+ .description = NULL_IF_CONFIG_SMALL("Render ASS subtitles onto input video using the libass library."),
+ .priv_size = sizeof(AssContext),
+ .init = init_ass,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = ass_inputs,
+ .outputs = ass_outputs,
+ .priv_class = &ass_class,
+};
+#endif
+
+#if CONFIG_SUBTITLES_FILTER
+
+static const AVOption subtitles_options[] = {
+ COMMON_OPTIONS
+ {"charenc", "set input character encoding", OFFSET(charenc), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"stream_index", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS},
+ {"si", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS},
+ {"force_style", "force subtitle style", OFFSET(force_style), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {NULL},
+};
+
+static const char * const font_mimetypes[] = {
+ "application/x-truetype-font",
+ "application/vnd.ms-opentype",
+ "application/x-font-ttf",
+ NULL
+};
+
+static int attachment_is_font(AVStream * st)
+{
+ const AVDictionaryEntry *tag = NULL;
+ int n;
+
+ tag = av_dict_get(st->metadata, "mimetype", NULL, AV_DICT_MATCH_CASE);
+
+ if (tag) {
+ for (n = 0; font_mimetypes[n]; n++) {
+ if (av_strcasecmp(font_mimetypes[n], tag->value) == 0)
+ return 1;
+ }
+ }
+ return 0;
+}
+
+AVFILTER_DEFINE_CLASS(subtitles);
+
+static av_cold int init_subtitles(AVFilterContext *ctx)
+{
+ int j, ret, sid;
+ int k = 0;
+ AVDictionary *codec_opts = NULL;
+ AVFormatContext *fmt = NULL;
+ AVCodecContext *dec_ctx = NULL;
+ AVCodec *dec = NULL;
+ const AVCodecDescriptor *dec_desc;
+ AVStream *st;
+ AVPacket pkt;
+ AssContext *ass = ctx->priv;
+
+ /* Init libass */
+ ret = init(ctx);
+ if (ret < 0)
+ return ret;
+ ass->track = ass_new_track(ass->library);
+ if (!ass->track) {
+ av_log(ctx, AV_LOG_ERROR, "Could not create a libass track\n");
+ return AVERROR(EINVAL);
+ }
+
+ /* Open subtitles file */
+ ret = avformat_open_input(&fmt, ass->filename, NULL, NULL);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to open %s\n", ass->filename);
+ goto end;
+ }
+ ret = avformat_find_stream_info(fmt, NULL);
+ if (ret < 0)
+ goto end;
+
+ /* Locate subtitles stream */
+ if (ass->stream_index < 0)
+ ret = av_find_best_stream(fmt, AVMEDIA_TYPE_SUBTITLE, -1, -1, NULL, 0);
+ else {
+ ret = -1;
+ if (ass->stream_index < fmt->nb_streams) {
+ for (j = 0; j < fmt->nb_streams; j++) {
+ if (fmt->streams[j]->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) {
+ if (ass->stream_index == k) {
+ ret = j;
+ break;
+ }
+ k++;
+ }
+ }
+ }
+ }
+
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to locate subtitle stream in %s\n",
+ ass->filename);
+ goto end;
+ }
+ sid = ret;
+ st = fmt->streams[sid];
+
+ /* Load attached fonts */
+ for (j = 0; j < fmt->nb_streams; j++) {
+ AVStream *st = fmt->streams[j];
+ if (st->codecpar->codec_type == AVMEDIA_TYPE_ATTACHMENT &&
+ attachment_is_font(st)) {
+ const AVDictionaryEntry *tag = NULL;
+ tag = av_dict_get(st->metadata, "filename", NULL,
+ AV_DICT_MATCH_CASE);
+
+ if (tag) {
+ av_log(ctx, AV_LOG_DEBUG, "Loading attached font: %s\n",
+ tag->value);
+ ass_add_font(ass->library, tag->value,
+ st->codecpar->extradata,
+ st->codecpar->extradata_size);
+ } else {
+ av_log(ctx, AV_LOG_WARNING,
+ "Font attachment has no filename, ignored.\n");
+ }
+ }
+ }
+
+ /* Initialize fonts */
+ ass_set_fonts(ass->renderer, NULL, NULL, 1, NULL, 1);
+
+ /* Open decoder */
+ dec = avcodec_find_decoder(st->codecpar->codec_id);
+ if (!dec) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to find subtitle codec %s\n",
+ avcodec_get_name(st->codecpar->codec_id));
+ return AVERROR(EINVAL);
+ }
+ dec_desc = avcodec_descriptor_get(st->codecpar->codec_id);
+ if (dec_desc && !(dec_desc->props & AV_CODEC_PROP_TEXT_SUB)) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Only text based subtitles are currently supported\n");
+ return AVERROR_PATCHWELCOME;
+ }
+ if (ass->charenc)
+ av_dict_set(&codec_opts, "sub_charenc", ass->charenc, 0);
+ if (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57,26,100))
+ av_dict_set(&codec_opts, "sub_text_format", "ass", 0);
+
+ dec_ctx = avcodec_alloc_context3(dec);
+ if (!dec_ctx)
+ return AVERROR(ENOMEM);
+
+ ret = avcodec_parameters_to_context(dec_ctx, st->codecpar);
+ if (ret < 0)
+ goto end;
+
+ /*
+ * This is required by the decoding process in order to rescale the
+ * timestamps: in the current API the decoded subtitles have their pts
+ * expressed in AV_TIME_BASE, and thus the lavc internals need to know the
+ * stream time base in order to achieve the rescaling.
+ *
+ * That API is old and needs to be reworked to match behaviour with A/V.
+ */
+ av_codec_set_pkt_timebase(dec_ctx, st->time_base);
+
+ ret = avcodec_open2(dec_ctx, NULL, &codec_opts);
+ if (ret < 0)
+ goto end;
+
+ if (ass->force_style) {
+ char **list = NULL;
+ char *temp = NULL;
+ char *ptr = av_strtok(ass->force_style, ",", &temp);
+ int i = 0;
+ while (ptr) {
+ av_dynarray_add(&list, &i, ptr);
+ if (!list) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ ptr = av_strtok(NULL, ",", &temp);
+ }
+ av_dynarray_add(&list, &i, NULL);
+ if (!list) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ ass_set_style_overrides(ass->library, list);
+ av_free(list);
+ }
+ /* Decode subtitles and push them into the renderer (libass) */
+ if (dec_ctx->subtitle_header)
+ ass_process_codec_private(ass->track,
+ dec_ctx->subtitle_header,
+ dec_ctx->subtitle_header_size);
+ av_init_packet(&pkt);
+ pkt.data = NULL;
+ pkt.size = 0;
+ while (av_read_frame(fmt, &pkt) >= 0) {
+ int i, got_subtitle;
+ AVSubtitle sub = {0};
+
+ if (pkt.stream_index == sid) {
+ ret = avcodec_decode_subtitle2(dec_ctx, &sub, &got_subtitle, &pkt);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_WARNING, "Error decoding: %s (ignored)\n",
+ av_err2str(ret));
+ } else if (got_subtitle) {
+ const int64_t start_time = av_rescale_q(sub.pts, AV_TIME_BASE_Q, av_make_q(1, 1000));
+ const int64_t duration = sub.end_display_time;
+ for (i = 0; i < sub.num_rects; i++) {
+ char *ass_line = sub.rects[i]->ass;
+ if (!ass_line)
+ break;
+ if (LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57,25,100))
+ ass_process_data(ass->track, ass_line, strlen(ass_line));
+ else
+ ass_process_chunk(ass->track, ass_line, strlen(ass_line),
+ start_time, duration);
+ }
+ }
+ }
+ av_packet_unref(&pkt);
+ avsubtitle_free(&sub);
+ }
+
+end:
+ av_dict_free(&codec_opts);
+ avcodec_close(dec_ctx);
+ avcodec_free_context(&dec_ctx);
+ avformat_close_input(&fmt);
+ return ret;
+}
+
+AVFilter ff_vf_subtitles = {
+ .name = "subtitles",
+ .description = NULL_IF_CONFIG_SMALL("Render text subtitles onto input video using the libass library."),
+ .priv_size = sizeof(AssContext),
+ .init = init_subtitles,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = ass_inputs,
+ .outputs = ass_outputs,
+ .priv_class = &subtitles_class,
+};
+#endif
diff --git a/libavfilter/vf_super2xsai.c b/libavfilter/vf_super2xsai.c
new file mode 100644
index 0000000000..cbb3f62414
--- /dev/null
+++ b/libavfilter/vf_super2xsai.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2010 Niel van der Westhuizen <nielkie@gmail.com>
+ * Copyright (c) 2002 A'rpi
+ * Copyright (c) 1997-2001 ZSNES Team ( zsknight@zsnes.com / _demo_@zsnes.com )
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Super 2xSaI video filter
+ * Ported from MPlayer libmpcodecs/vf_2xsai.c.
+ */
+
+#include "libavutil/pixdesc.h"
+#include "libavutil/intreadwrite.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct {
+ /* masks used for two pixels interpolation */
+ uint32_t hi_pixel_mask;
+ uint32_t lo_pixel_mask;
+
+ /* masks used for four pixels interpolation */
+ uint32_t q_hi_pixel_mask;
+ uint32_t q_lo_pixel_mask;
+
+ int bpp; ///< bytes per pixel, pixel stride for each (packed) pixel
+ int is_be;
+} Super2xSaIContext;
+
+#define GET_RESULT(A, B, C, D) ((A != C || A != D) - (B != C || B != D))
+
+#define INTERPOLATE(A, B) (((A & hi_pixel_mask) >> 1) + ((B & hi_pixel_mask) >> 1) + (A & B & lo_pixel_mask))
+
+#define Q_INTERPOLATE(A, B, C, D) ((A & q_hi_pixel_mask) >> 2) + ((B & q_hi_pixel_mask) >> 2) + ((C & q_hi_pixel_mask) >> 2) + ((D & q_hi_pixel_mask) >> 2) \
+ + ((((A & q_lo_pixel_mask) + (B & q_lo_pixel_mask) + (C & q_lo_pixel_mask) + (D & q_lo_pixel_mask)) >> 2) & q_lo_pixel_mask)
+
+static void super2xsai(AVFilterContext *ctx,
+ uint8_t *src, int src_linesize,
+ uint8_t *dst, int dst_linesize,
+ int width, int height)
+{
+ Super2xSaIContext *s = ctx->priv;
+ unsigned int x, y;
+ uint32_t color[4][4];
+ unsigned char *src_line[4];
+ const int bpp = s->bpp;
+ const uint32_t hi_pixel_mask = s->hi_pixel_mask;
+ const uint32_t lo_pixel_mask = s->lo_pixel_mask;
+ const uint32_t q_hi_pixel_mask = s->q_hi_pixel_mask;
+ const uint32_t q_lo_pixel_mask = s->q_lo_pixel_mask;
+
+ /* Point to the first 4 lines, first line is duplicated */
+ src_line[0] = src;
+ src_line[1] = src;
+ src_line[2] = src + src_linesize*FFMIN(1, height-1);
+ src_line[3] = src + src_linesize*FFMIN(2, height-1);
+
+#define READ_COLOR4(dst, src_line, off) dst = *((const uint32_t *)src_line + off)
+#define READ_COLOR3(dst, src_line, off) dst = AV_RL24 (src_line + 3*off)
+#define READ_COLOR2(dst, src_line, off) dst = s->is_be ? AV_RB16(src_line + 2 * off) : AV_RL16(src_line + 2 * off)
+
+ for (y = 0; y < height; y++) {
+ uint8_t *dst_line[2];
+
+ dst_line[0] = dst + dst_linesize*2*y;
+ dst_line[1] = dst + dst_linesize*(2*y+1);
+
+ switch (bpp) {
+ case 4:
+ READ_COLOR4(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR4(color[0][2], src_line[0], 1); READ_COLOR4(color[0][3], src_line[0], 2);
+ READ_COLOR4(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR4(color[1][2], src_line[1], 1); READ_COLOR4(color[1][3], src_line[1], 2);
+ READ_COLOR4(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR4(color[2][2], src_line[2], 1); READ_COLOR4(color[2][3], src_line[2], 2);
+ READ_COLOR4(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR4(color[3][2], src_line[3], 1); READ_COLOR4(color[3][3], src_line[3], 2);
+ break;
+ case 3:
+ READ_COLOR3(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR3(color[0][2], src_line[0], 1); READ_COLOR3(color[0][3], src_line[0], 2);
+ READ_COLOR3(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR3(color[1][2], src_line[1], 1); READ_COLOR3(color[1][3], src_line[1], 2);
+ READ_COLOR3(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR3(color[2][2], src_line[2], 1); READ_COLOR3(color[2][3], src_line[2], 2);
+ READ_COLOR3(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR3(color[3][2], src_line[3], 1); READ_COLOR3(color[3][3], src_line[3], 2);
+ break;
+ default:
+ READ_COLOR2(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR2(color[0][2], src_line[0], 1); READ_COLOR2(color[0][3], src_line[0], 2);
+ READ_COLOR2(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR2(color[1][2], src_line[1], 1); READ_COLOR2(color[1][3], src_line[1], 2);
+ READ_COLOR2(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR2(color[2][2], src_line[2], 1); READ_COLOR2(color[2][3], src_line[2], 2);
+ READ_COLOR2(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR2(color[3][2], src_line[3], 1); READ_COLOR2(color[3][3], src_line[3], 2);
+ }
+
+ for (x = 0; x < width; x++) {
+ uint32_t product1a, product1b, product2a, product2b;
+
+//--------------------------------------- B0 B1 B2 B3 0 1 2 3
+// 4 5* 6 S2 -> 4 5* 6 7
+// 1 2 3 S1 8 9 10 11
+// A0 A1 A2 A3 12 13 14 15
+//--------------------------------------
+ if (color[2][1] == color[1][2] && color[1][1] != color[2][2]) {
+ product2b = color[2][1];
+ product1b = product2b;
+ } else if (color[1][1] == color[2][2] && color[2][1] != color[1][2]) {
+ product2b = color[1][1];
+ product1b = product2b;
+ } else if (color[1][1] == color[2][2] && color[2][1] == color[1][2]) {
+ int r = 0;
+
+ r += GET_RESULT(color[1][2], color[1][1], color[1][0], color[3][1]);
+ r += GET_RESULT(color[1][2], color[1][1], color[2][0], color[0][1]);
+ r += GET_RESULT(color[1][2], color[1][1], color[3][2], color[2][3]);
+ r += GET_RESULT(color[1][2], color[1][1], color[0][2], color[1][3]);
+
+ if (r > 0)
+ product1b = color[1][2];
+ else if (r < 0)
+ product1b = color[1][1];
+ else
+ product1b = INTERPOLATE(color[1][1], color[1][2]);
+
+ product2b = product1b;
+ } else {
+ if (color[1][2] == color[2][2] && color[2][2] == color[3][1] && color[2][1] != color[3][2] && color[2][2] != color[3][0])
+ product2b = Q_INTERPOLATE(color[2][2], color[2][2], color[2][2], color[2][1]);
+ else if (color[1][1] == color[2][1] && color[2][1] == color[3][2] && color[3][1] != color[2][2] && color[2][1] != color[3][3])
+ product2b = Q_INTERPOLATE(color[2][1], color[2][1], color[2][1], color[2][2]);
+ else
+ product2b = INTERPOLATE(color[2][1], color[2][2]);
+
+ if (color[1][2] == color[2][2] && color[1][2] == color[0][1] && color[1][1] != color[0][2] && color[1][2] != color[0][0])
+ product1b = Q_INTERPOLATE(color[1][2], color[1][2], color[1][2], color[1][1]);
+ else if (color[1][1] == color[2][1] && color[1][1] == color[0][2] && color[0][1] != color[1][2] && color[1][1] != color[0][3])
+ product1b = Q_INTERPOLATE(color[1][2], color[1][1], color[1][1], color[1][1]);
+ else
+ product1b = INTERPOLATE(color[1][1], color[1][2]);
+ }
+
+ if (color[1][1] == color[2][2] && color[2][1] != color[1][2] && color[1][0] == color[1][1] && color[1][1] != color[3][2])
+ product2a = INTERPOLATE(color[2][1], color[1][1]);
+ else if (color[1][1] == color[2][0] && color[1][2] == color[1][1] && color[1][0] != color[2][1] && color[1][1] != color[3][0])
+ product2a = INTERPOLATE(color[2][1], color[1][1]);
+ else
+ product2a = color[2][1];
+
+ if (color[2][1] == color[1][2] && color[1][1] != color[2][2] && color[2][0] == color[2][1] && color[2][1] != color[0][2])
+ product1a = INTERPOLATE(color[2][1], color[1][1]);
+ else if (color[1][0] == color[2][1] && color[2][2] == color[2][1] && color[2][0] != color[1][1] && color[2][1] != color[0][0])
+ product1a = INTERPOLATE(color[2][1], color[1][1]);
+ else
+ product1a = color[1][1];
+
+ /* Set the calculated pixels */
+ switch (bpp) {
+ case 4:
+ AV_WN32A(dst_line[0] + x * 8, product1a);
+ AV_WN32A(dst_line[0] + x * 8 + 4, product1b);
+ AV_WN32A(dst_line[1] + x * 8, product2a);
+ AV_WN32A(dst_line[1] + x * 8 + 4, product2b);
+ break;
+ case 3:
+ AV_WL24(dst_line[0] + x * 6, product1a);
+ AV_WL24(dst_line[0] + x * 6 + 3, product1b);
+ AV_WL24(dst_line[1] + x * 6, product2a);
+ AV_WL24(dst_line[1] + x * 6 + 3, product2b);
+ break;
+ default: // bpp = 2
+ if (s->is_be) {
+ AV_WB32(dst_line[0] + x * 4, product1a | (product1b << 16));
+ AV_WB32(dst_line[1] + x * 4, product2a | (product2b << 16));
+ } else {
+ AV_WL32(dst_line[0] + x * 4, product1a | (product1b << 16));
+ AV_WL32(dst_line[1] + x * 4, product2a | (product2b << 16));
+ }
+ }
+
+ /* Move color matrix forward */
+ color[0][0] = color[0][1]; color[0][1] = color[0][2]; color[0][2] = color[0][3];
+ color[1][0] = color[1][1]; color[1][1] = color[1][2]; color[1][2] = color[1][3];
+ color[2][0] = color[2][1]; color[2][1] = color[2][2]; color[2][2] = color[2][3];
+ color[3][0] = color[3][1]; color[3][1] = color[3][2]; color[3][2] = color[3][3];
+
+ if (x < width - 3) {
+ x += 3;
+ switch (bpp) {
+ case 4:
+ READ_COLOR4(color[0][3], src_line[0], x);
+ READ_COLOR4(color[1][3], src_line[1], x);
+ READ_COLOR4(color[2][3], src_line[2], x);
+ READ_COLOR4(color[3][3], src_line[3], x);
+ break;
+ case 3:
+ READ_COLOR3(color[0][3], src_line[0], x);
+ READ_COLOR3(color[1][3], src_line[1], x);
+ READ_COLOR3(color[2][3], src_line[2], x);
+ READ_COLOR3(color[3][3], src_line[3], x);
+ break;
+ default: /* case 2 */
+ READ_COLOR2(color[0][3], src_line[0], x);
+ READ_COLOR2(color[1][3], src_line[1], x);
+ READ_COLOR2(color[2][3], src_line[2], x);
+ READ_COLOR2(color[3][3], src_line[3], x);
+ }
+ x -= 3;
+ }
+ }
+
+ /* We're done with one line, so we shift the source lines up */
+ src_line[0] = src_line[1];
+ src_line[1] = src_line[2];
+ src_line[2] = src_line[3];
+
+ /* Read next line */
+ src_line[3] = src_line[2];
+ if (y < height - 3)
+ src_line[3] += src_linesize;
+ } // y loop
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA, AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGB565BE, AV_PIX_FMT_BGR565BE, AV_PIX_FMT_RGB555BE, AV_PIX_FMT_BGR555BE,
+ AV_PIX_FMT_RGB565LE, AV_PIX_FMT_BGR565LE, AV_PIX_FMT_RGB555LE, AV_PIX_FMT_BGR555LE,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ Super2xSaIContext *s = inlink->dst->priv;
+
+ s->hi_pixel_mask = 0xFEFEFEFE;
+ s->lo_pixel_mask = 0x01010101;
+ s->q_hi_pixel_mask = 0xFCFCFCFC;
+ s->q_lo_pixel_mask = 0x03030303;
+ s->bpp = 4;
+
+ switch (inlink->format) {
+ case AV_PIX_FMT_RGB24:
+ case AV_PIX_FMT_BGR24:
+ s->bpp = 3;
+ break;
+
+ case AV_PIX_FMT_RGB565BE:
+ case AV_PIX_FMT_BGR565BE:
+ s->is_be = 1;
+ case AV_PIX_FMT_RGB565LE:
+ case AV_PIX_FMT_BGR565LE:
+ s->hi_pixel_mask = 0xF7DEF7DE;
+ s->lo_pixel_mask = 0x08210821;
+ s->q_hi_pixel_mask = 0xE79CE79C;
+ s->q_lo_pixel_mask = 0x18631863;
+ s->bpp = 2;
+ break;
+
+ case AV_PIX_FMT_BGR555BE:
+ case AV_PIX_FMT_RGB555BE:
+ s->is_be = 1;
+ case AV_PIX_FMT_BGR555LE:
+ case AV_PIX_FMT_RGB555LE:
+ s->hi_pixel_mask = 0x7BDE7BDE;
+ s->lo_pixel_mask = 0x04210421;
+ s->q_hi_pixel_mask = 0x739C739C;
+ s->q_lo_pixel_mask = 0x0C630C63;
+ s->bpp = 2;
+ break;
+ }
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterLink *inlink = outlink->src->inputs[0];
+
+ outlink->w = inlink->w*2;
+ outlink->h = inlink->h*2;
+
+ av_log(inlink->dst, AV_LOG_VERBOSE, "fmt:%s size:%dx%d -> size:%dx%d\n",
+ av_get_pix_fmt_name(inlink->format),
+ inlink->w, inlink->h, outlink->w, outlink->h);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpicref) {
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpicref, inpicref);
+ outpicref->width = outlink->w;
+ outpicref->height = outlink->h;
+
+ super2xsai(inlink->dst, inpicref->data[0], inpicref->linesize[0],
+ outpicref->data[0], outpicref->linesize[0],
+ inlink->w, inlink->h);
+
+ av_frame_free(&inpicref);
+ return ff_filter_frame(outlink, outpicref);
+}
+
+static const AVFilterPad super2xsai_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad super2xsai_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_super2xsai = {
+ .name = "super2xsai",
+ .description = NULL_IF_CONFIG_SMALL("Scale the input by 2x using the Super2xSaI pixel art algorithm."),
+ .priv_size = sizeof(Super2xSaIContext),
+ .query_formats = query_formats,
+ .inputs = super2xsai_inputs,
+ .outputs = super2xsai_outputs,
+};
diff --git a/libavfilter/vf_swaprect.c b/libavfilter/vf_swaprect.c
new file mode 100644
index 0000000000..a0aa59d236
--- /dev/null
+++ b/libavfilter/vf_swaprect.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2015 Paul B. Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/eval.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct SwapRectContext {
+ const AVClass *class;
+ char *w, *h;
+ char *x1, *y1;
+ char *x2, *y2;
+
+ int nb_planes;
+ int pixsteps[4];
+
+ const AVPixFmtDescriptor *desc;
+ uint8_t *temp;
+} SwapRectContext;
+
+#define OFFSET(x) offsetof(SwapRectContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption swaprect_options[] = {
+ { "w", "set rect width", OFFSET(w), AV_OPT_TYPE_STRING, {.str="w/2"}, 0, 0, .flags = FLAGS },
+ { "h", "set rect height", OFFSET(h), AV_OPT_TYPE_STRING, {.str="h/2"}, 0, 0, .flags = FLAGS },
+ { "x1", "set 1st rect x top left coordinate", OFFSET(x1), AV_OPT_TYPE_STRING, {.str="w/2"}, 0, 0, .flags = FLAGS },
+ { "y1", "set 1st rect y top left coordinate", OFFSET(y1), AV_OPT_TYPE_STRING, {.str="h/2"}, 0, 0, .flags = FLAGS },
+ { "x2", "set 2nd rect x top left coordinate", OFFSET(x2), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, .flags = FLAGS },
+ { "y2", "set 2nd rect y top left coordinate", OFFSET(y2), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, .flags = FLAGS },
+ { NULL },
+};
+
+AVFILTER_DEFINE_CLASS(swaprect);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *pix_fmts = NULL;
+ int fmt, ret;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_PAL ||
+ desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
+ desc->flags & AV_PIX_FMT_FLAG_BITSTREAM) &&
+ (ret = ff_add_format(&pix_fmts, fmt)) < 0)
+ return ret;
+ }
+
+ return ff_set_common_formats(ctx, pix_fmts);
+}
+
+static const char *const var_names[] = { "w", "h", "a", "n", "t", "pos", "sar", "dar", NULL };
+enum { VAR_W, VAR_H, VAR_A, VAR_N, VAR_T, VAR_POS, VAR_SAR, VAR_DAR, VAR_VARS_NB };
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ SwapRectContext *s = ctx->priv;
+ double var_values[VAR_VARS_NB];
+ int x1[4], y1[4];
+ int x2[4], y2[4];
+ int aw[4], ah[4];
+ int lw[4], lh[4];
+ int pw[4], ph[4];
+ double dw, dh;
+ double dx1, dy1;
+ double dx2, dy2;
+ int y, p, w, h, ret;
+
+ var_values[VAR_W] = inlink->w;
+ var_values[VAR_H] = inlink->h;
+ var_values[VAR_A] = (float) inlink->w / inlink->h;
+ var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? av_q2d(inlink->sample_aspect_ratio) : 1;
+ var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
+ var_values[VAR_N] = inlink->frame_count_out;
+ var_values[VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base);
+ var_values[VAR_POS] = av_frame_get_pkt_pos(in) == -1 ? NAN : av_frame_get_pkt_pos(in);
+
+ ret = av_expr_parse_and_eval(&dw, s->w,
+ var_names, &var_values[0],
+ NULL, NULL, NULL, NULL,
+ 0, 0, ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = av_expr_parse_and_eval(&dh, s->h,
+ var_names, &var_values[0],
+ NULL, NULL, NULL, NULL,
+ 0, 0, ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = av_expr_parse_and_eval(&dx1, s->x1,
+ var_names, &var_values[0],
+ NULL, NULL, NULL, NULL,
+ 0, 0, ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = av_expr_parse_and_eval(&dy1, s->y1,
+ var_names, &var_values[0],
+ NULL, NULL, NULL, NULL,
+ 0, 0, ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = av_expr_parse_and_eval(&dx2, s->x2,
+ var_names, &var_values[0],
+ NULL, NULL, NULL, NULL,
+ 0, 0, ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = av_expr_parse_and_eval(&dy2, s->y2,
+ var_names, &var_values[0],
+ NULL, NULL, NULL, NULL,
+ 0, 0, ctx);
+ if (ret < 0)
+ return ret;
+
+ w = dw; h = dh; x1[0] = dx1; y1[0] = dy1; x2[0] = dx2; y2[0] = dy2;
+
+ x1[0] = av_clip(x1[0], 0, inlink->w - 1);
+ y1[0] = av_clip(y1[0], 0, inlink->w - 1);
+
+ x2[0] = av_clip(x2[0], 0, inlink->w - 1);
+ y2[0] = av_clip(y2[0], 0, inlink->w - 1);
+
+ ah[1] = ah[2] = FF_CEIL_RSHIFT(h, s->desc->log2_chroma_h);
+ ah[0] = ah[3] = h;
+ aw[1] = aw[2] = FF_CEIL_RSHIFT(w, s->desc->log2_chroma_w);
+ aw[0] = aw[3] = w;
+
+ w = FFMIN3(w, inlink->w - x1[0], inlink->w - x2[0]);
+ h = FFMIN3(h, inlink->h - y1[0], inlink->h - y2[0]);
+
+ ph[1] = ph[2] = FF_CEIL_RSHIFT(h, s->desc->log2_chroma_h);
+ ph[0] = ph[3] = h;
+ pw[1] = pw[2] = FF_CEIL_RSHIFT(w, s->desc->log2_chroma_w);
+ pw[0] = pw[3] = w;
+
+ lh[1] = lh[2] = FF_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h);
+ lh[0] = lh[3] = inlink->h;
+ lw[1] = lw[2] = FF_CEIL_RSHIFT(inlink->w, s->desc->log2_chroma_w);
+ lw[0] = lw[3] = inlink->w;
+
+ x1[1] = x1[2] = FF_CEIL_RSHIFT(x1[0], s->desc->log2_chroma_w);
+ x1[0] = x1[3] = x1[0];
+ y1[1] = y1[2] = FF_CEIL_RSHIFT(y1[0], s->desc->log2_chroma_h);
+ y1[0] = y1[3] = y1[0];
+
+ x2[1] = x2[2] = FF_CEIL_RSHIFT(x2[0], s->desc->log2_chroma_w);
+ x2[0] = x2[3] = x2[0];
+ y2[1] = y2[2] = FF_CEIL_RSHIFT(y2[0], s->desc->log2_chroma_h);
+ y2[0] = y2[3] = y2[0];
+
+ for (p = 0; p < s->nb_planes; p++) {
+ if (ph[p] == ah[p] && pw[p] == aw[p]) {
+ uint8_t *src = in->data[p] + y1[p] * in->linesize[p] + x1[p] * s->pixsteps[p];
+ uint8_t *dst = in->data[p] + y2[p] * in->linesize[p] + x2[p] * s->pixsteps[p];
+
+ for (y = 0; y < ph[p]; y++) {
+ memcpy(s->temp, src, pw[p] * s->pixsteps[p]);
+ memmove(src, dst, pw[p] * s->pixsteps[p]);
+ memcpy(dst, s->temp, pw[p] * s->pixsteps[p]);
+ src += in->linesize[p];
+ dst += in->linesize[p];
+ }
+ }
+ }
+
+ return ff_filter_frame(outlink, in);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SwapRectContext *s = ctx->priv;
+
+ if (!s->w || !s->h ||
+ !s->x1 || !s->y1 ||
+ !s->x2 || !s->y2)
+ return AVERROR(EINVAL);
+
+ s->desc = av_pix_fmt_desc_get(inlink->format);
+ av_image_fill_max_pixsteps(s->pixsteps, NULL, s->desc);
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ s->temp = av_malloc_array(inlink->w, s->pixsteps[0]);
+ if (!s->temp)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SwapRectContext *s = ctx->priv;
+ av_freep(&s->temp);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ .needs_writable = 1,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_swaprect = {
+ .name = "swaprect",
+ .description = NULL_IF_CONFIG_SMALL("Swap 2 rectangular objects in video."),
+ .priv_size = sizeof(SwapRectContext),
+ .priv_class = &swaprect_class,
+ .query_formats = query_formats,
+ .uninit = uninit,
+ .inputs = inputs,
+ .outputs = outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_swapuv.c b/libavfilter/vf_swapuv.c
new file mode 100644
index 0000000000..8d62c48c4f
--- /dev/null
+++ b/libavfilter/vf_swapuv.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * swap UV filter
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/version.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct SwapUVContext {
+ const AVClass *class;
+} SwapUVContext;
+
+static const AVOption swapuv_options[] = {
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(swapuv);
+
+static void do_swap(AVFrame *frame)
+{
+ FFSWAP(uint8_t*, frame->data[1], frame->data[2]);
+ FFSWAP(int, frame->linesize[1], frame->linesize[2]);
+ FFSWAP(AVBufferRef*, frame->buf[1], frame->buf[2]);
+
+#if FF_API_ERROR_FRAME
+FF_DISABLE_DEPRECATION_WARNINGS
+ FFSWAP(uint64_t, frame->error[1], frame->error[2]);
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+}
+
+static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h)
+{
+ AVFrame *picref = ff_default_get_video_buffer(link, w, h);
+ do_swap(picref);
+ return picref;
+}
+
+static int filter_frame(AVFilterLink *link, AVFrame *inpicref)
+{
+ do_swap(inpicref);
+ return ff_filter_frame(link->dst->outputs[0], inpicref);
+}
+
+static int is_planar_yuv(const AVPixFmtDescriptor *desc)
+{
+ int i;
+
+ if (desc->flags & ~(AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA) ||
+ desc->nb_components < 3 ||
+ (desc->comp[1].depth != desc->comp[2].depth))
+ return 0;
+ for (i = 0; i < desc->nb_components; i++) {
+ if (desc->comp[i].offset != 0 ||
+ desc->comp[i].shift != 0 ||
+ desc->comp[i].plane != i)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ int fmt, ret;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (is_planar_yuv(desc) && (ret = ff_add_format(&formats, fmt)) < 0)
+ return ret;
+ }
+
+ return ff_set_common_formats(ctx, formats);
+}
+
+static const AVFilterPad swapuv_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .get_video_buffer = get_video_buffer,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad swapuv_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_swapuv = {
+ .name = "swapuv",
+ .description = NULL_IF_CONFIG_SMALL("Swap U and V components."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(SwapUVContext),
+ .priv_class = &swapuv_class,
+ .inputs = swapuv_inputs,
+ .outputs = swapuv_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_telecine.c b/libavfilter/vf_telecine.c
new file mode 100644
index 0000000000..35f382ef7f
--- /dev/null
+++ b/libavfilter/vf_telecine.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2012 Rudolf Polzer
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file telecine filter, heavily based from mpv-player:TOOLS/vf_dlopen/telecine.c by
+ * Rudolf Polzer.
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ int first_field;
+ char *pattern;
+ unsigned int pattern_pos;
+ int64_t start_time;
+
+ AVRational pts;
+ AVRational ts_unit;
+ int out_cnt;
+ int occupied;
+
+ int nb_planes;
+ int planeheight[4];
+ int stride[4];
+
+ AVFrame *frame[5];
+ AVFrame *temp;
+} TelecineContext;
+
+#define OFFSET(x) offsetof(TelecineContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption telecine_options[] = {
+ {"first_field", "select first field", OFFSET(first_field), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "field"},
+ {"top", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
+ {"t", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
+ {"bottom", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
+ {"b", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
+ {"pattern", "pattern that describe for how many fields a frame is to be displayed", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str="23"}, 0, 0, FLAGS},
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(telecine);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ TelecineContext *s = ctx->priv;
+ const char *p;
+ int max = 0;
+
+ if (!strlen(s->pattern)) {
+ av_log(ctx, AV_LOG_ERROR, "No pattern provided.\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ for (p = s->pattern; *p; p++) {
+ if (!av_isdigit(*p)) {
+ av_log(ctx, AV_LOG_ERROR, "Provided pattern includes non-numeric characters.\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ max = FFMAX(*p - '0', max);
+ s->pts.num += 2;
+ s->pts.den += *p - '0';
+ }
+
+ s->start_time = AV_NOPTS_VALUE;
+
+ s->out_cnt = (max + 1) / 2;
+ av_log(ctx, AV_LOG_INFO, "Telecine pattern %s yields up to %d frames per frame, pts advance factor: %d/%d\n",
+ s->pattern, s->out_cnt, s->pts.num, s->pts.den);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *pix_fmts = NULL;
+ int fmt, ret;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
+ desc->flags & AV_PIX_FMT_FLAG_PAL ||
+ desc->flags & AV_PIX_FMT_FLAG_BITSTREAM) &&
+ (ret = ff_add_format(&pix_fmts, fmt)) < 0)
+ return ret;
+ }
+
+ return ff_set_common_formats(ctx, pix_fmts);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ TelecineContext *s = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int i, ret;
+
+ s->temp = ff_get_video_buffer(inlink, inlink->w, inlink->h);
+ if (!s->temp)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < s->out_cnt; i++) {
+ s->frame[i] = ff_get_video_buffer(inlink, inlink->w, inlink->h);
+ if (!s->frame[i])
+ return AVERROR(ENOMEM);
+ }
+
+ if ((ret = av_image_fill_linesizes(s->stride, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ TelecineContext *s = ctx->priv;
+ const AVFilterLink *inlink = ctx->inputs[0];
+ AVRational fps = inlink->frame_rate;
+
+ if (!fps.num || !fps.den) {
+ av_log(ctx, AV_LOG_ERROR, "The input needs a constant frame rate; "
+ "current rate of %d/%d is invalid\n", fps.num, fps.den);
+ return AVERROR(EINVAL);
+ }
+ fps = av_mul_q(fps, av_inv_q(s->pts));
+ av_log(ctx, AV_LOG_VERBOSE, "FPS: %d/%d -> %d/%d\n",
+ inlink->frame_rate.num, inlink->frame_rate.den, fps.num, fps.den);
+
+ outlink->frame_rate = fps;
+ outlink->time_base = av_mul_q(inlink->time_base, s->pts);
+ av_log(ctx, AV_LOG_VERBOSE, "TB: %d/%d -> %d/%d\n",
+ inlink->time_base.num, inlink->time_base.den, outlink->time_base.num, outlink->time_base.den);
+
+ s->ts_unit = av_inv_q(av_mul_q(fps, outlink->time_base));
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ TelecineContext *s = ctx->priv;
+ int i, len, ret = 0, nout = 0;
+
+ if (s->start_time == AV_NOPTS_VALUE)
+ s->start_time = inpicref->pts;
+
+ len = s->pattern[s->pattern_pos] - '0';
+
+ s->pattern_pos++;
+ if (!s->pattern[s->pattern_pos])
+ s->pattern_pos = 0;
+
+ if (!len) { // do not output any field from this frame
+ av_frame_free(&inpicref);
+ return 0;
+ }
+
+ if (s->occupied) {
+ av_frame_make_writable(s->frame[nout]);
+ for (i = 0; i < s->nb_planes; i++) {
+ // fill in the EARLIER field from the buffered pic
+ av_image_copy_plane(s->frame[nout]->data[i] + s->frame[nout]->linesize[i] * s->first_field,
+ s->frame[nout]->linesize[i] * 2,
+ s->temp->data[i] + s->temp->linesize[i] * s->first_field,
+ s->temp->linesize[i] * 2,
+ s->stride[i],
+ (s->planeheight[i] - s->first_field + 1) / 2);
+ // fill in the LATER field from the new pic
+ av_image_copy_plane(s->frame[nout]->data[i] + s->frame[nout]->linesize[i] * !s->first_field,
+ s->frame[nout]->linesize[i] * 2,
+ inpicref->data[i] + inpicref->linesize[i] * !s->first_field,
+ inpicref->linesize[i] * 2,
+ s->stride[i],
+ (s->planeheight[i] - !s->first_field + 1) / 2);
+ }
+ nout++;
+ len--;
+ s->occupied = 0;
+ }
+
+ while (len >= 2) {
+ // output THIS image as-is
+ av_frame_make_writable(s->frame[nout]);
+ for (i = 0; i < s->nb_planes; i++)
+ av_image_copy_plane(s->frame[nout]->data[i], s->frame[nout]->linesize[i],
+ inpicref->data[i], inpicref->linesize[i],
+ s->stride[i],
+ s->planeheight[i]);
+ nout++;
+ len -= 2;
+ }
+
+ if (len >= 1) {
+ // copy THIS image to the buffer, we need it later
+ for (i = 0; i < s->nb_planes; i++)
+ av_image_copy_plane(s->temp->data[i], s->temp->linesize[i],
+ inpicref->data[i], inpicref->linesize[i],
+ s->stride[i],
+ s->planeheight[i]);
+ s->occupied = 1;
+ }
+
+ for (i = 0; i < nout; i++) {
+ AVFrame *frame = av_frame_clone(s->frame[i]);
+
+ if (!frame) {
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+
+ av_frame_copy_props(frame, inpicref);
+ frame->pts = ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time) +
+ av_rescale(outlink->frame_count_in, s->ts_unit.num,
+ s->ts_unit.den);
+ ret = ff_filter_frame(outlink, frame);
+ }
+ av_frame_free(&inpicref);
+
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ TelecineContext *s = ctx->priv;
+ int i;
+
+ av_frame_free(&s->temp);
+ for (i = 0; i < s->out_cnt; i++)
+ av_frame_free(&s->frame[i]);
+}
+
+static const AVFilterPad telecine_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad telecine_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_telecine = {
+ .name = "telecine",
+ .description = NULL_IF_CONFIG_SMALL("Apply a telecine pattern."),
+ .priv_size = sizeof(TelecineContext),
+ .priv_class = &telecine_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = telecine_inputs,
+ .outputs = telecine_outputs,
+};
diff --git a/libavfilter/vf_threshold.c b/libavfilter/vf_threshold.c
new file mode 100644
index 0000000000..1cb4c9aab8
--- /dev/null
+++ b/libavfilter/vf_threshold.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright (c) 2016 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * threshold video filter
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "framesync.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct ThresholdContext {
+ const AVClass *class;
+
+ int planes;
+ int bpc;
+
+ int nb_planes;
+ int width[4], height[4];
+
+ void (*threshold)(const uint8_t *in, const uint8_t *threshold,
+ const uint8_t *min, const uint8_t *max,
+ uint8_t *out,
+ ptrdiff_t ilinesize, ptrdiff_t tlinesize,
+ ptrdiff_t flinesize, ptrdiff_t slinesize,
+ ptrdiff_t olinesize,
+ int w, int h);
+
+ AVFrame *frames[4];
+ FFFrameSync fs;
+} ThresholdContext;
+
+#define OFFSET(x) offsetof(ThresholdContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption threshold_options[] = {
+ { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15}, 0, 15, FLAGS},
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(threshold);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
+ AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP16,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10,
+ AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_NONE
+ };
+
+ return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+}
+
+static int process_frame(FFFrameSync *fs)
+{
+ AVFilterContext *ctx = fs->parent;
+ ThresholdContext *s = fs->opaque;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out, *in, *threshold, *min, *max;
+ int ret;
+
+ if ((ret = ff_framesync_get_frame(&s->fs, 0, &in, 0)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 1, &threshold, 0)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 2, &min, 0)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 3, &max, 0)) < 0)
+ return ret;
+
+ if (ctx->is_disabled) {
+ out = av_frame_clone(in);
+ if (!out)
+ return AVERROR(ENOMEM);
+ } else {
+ int p;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, in);
+
+ for (p = 0; p < s->nb_planes; p++) {
+ if (!(s->planes & (1 << p))) {
+ av_image_copy_plane(out->data[p], out->linesize[p],
+ in->data[p], in->linesize[p],
+ s->width[p] * s->bpc,
+ s->height[p]);
+ continue;
+ }
+ s->threshold(in->data[p], threshold->data[p],
+ min->data[p], max->data[p],
+ out->data[p],
+ in->linesize[p], threshold->linesize[p],
+ min->linesize[p], max->linesize[p],
+ out->linesize[p],
+ s->width[p], s->height[p]);
+ }
+ }
+
+ out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static void threshold8(const uint8_t *in, const uint8_t *threshold,
+ const uint8_t *min, const uint8_t *max,
+ uint8_t *out,
+ ptrdiff_t ilinesize, ptrdiff_t tlinesize,
+ ptrdiff_t flinesize, ptrdiff_t slinesize,
+ ptrdiff_t olinesize,
+ int w, int h)
+{
+ int x, y;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ out[x] = in[x] < threshold[x] ? min[x] : max[x];
+ }
+
+ in += ilinesize;
+ threshold += tlinesize;
+ min += flinesize;
+ max += flinesize;
+ out += olinesize;
+ }
+}
+
+static void threshold16(const uint8_t *iin, const uint8_t *tthreshold,
+ const uint8_t *ffirst, const uint8_t *ssecond,
+ uint8_t *oout,
+ ptrdiff_t ilinesize, ptrdiff_t tlinesize,
+ ptrdiff_t flinesize, ptrdiff_t slinesize,
+ ptrdiff_t olinesize,
+ int w, int h)
+{
+ const uint16_t *in = (const uint16_t *)iin;
+ const uint16_t *threshold = (const uint16_t *)tthreshold;
+ const uint16_t *min = (const uint16_t *)ffirst;
+ const uint16_t *max = (const uint16_t *)ssecond;
+ uint16_t *out = (uint16_t *)oout;
+ int x, y;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ out[x] = in[x] < threshold[x] ? min[x] : max[x];
+ }
+
+ in += ilinesize / 2;
+ threshold += tlinesize / 2;
+ min += flinesize / 2;
+ max += flinesize / 2;
+ out += olinesize / 2;
+ }
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ThresholdContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int vsub, hsub;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ hsub = desc->log2_chroma_w;
+ vsub = desc->log2_chroma_h;
+ s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
+ s->height[0] = s->height[3] = inlink->h;
+ s->width[1] = s->width[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
+ s->width[0] = s->width[3] = inlink->w;
+
+ if (desc->comp[0].depth == 8) {
+ s->threshold = threshold8;
+ s->bpc = 1;
+ } else {
+ s->threshold = threshold16;
+ s->bpc = 2;
+ }
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ThresholdContext *s = ctx->priv;
+ AVFilterLink *base = ctx->inputs[0];
+ AVFilterLink *threshold = ctx->inputs[1];
+ AVFilterLink *min = ctx->inputs[2];
+ AVFilterLink *max = ctx->inputs[3];
+ FFFrameSyncIn *in;
+ int ret;
+
+ if (base->format != threshold->format ||
+ base->format != min->format ||
+ base->format != max->format) {
+ av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
+ return AVERROR(EINVAL);
+ }
+ if (base->w != threshold->w ||
+ base->h != threshold->h ||
+ base->w != min->w ||
+ base->h != min->h ||
+ base->w != max->w ||
+ base->h != max->h) {
+ av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
+ "(size %dx%d) do not match the corresponding "
+ "second input link %s parameters (%dx%d) "
+ "and/or third input link %s parameters (%dx%d) "
+ "and/or fourth input link %s parameters (%dx%d)\n",
+ ctx->input_pads[0].name, base->w, base->h,
+ ctx->input_pads[1].name, threshold->w, threshold->h,
+ ctx->input_pads[2].name, min->w, min->h,
+ ctx->input_pads[3].name, max->w, max->h);
+ return AVERROR(EINVAL);
+ }
+
+ outlink->w = base->w;
+ outlink->h = base->h;
+ outlink->time_base = base->time_base;
+ outlink->sample_aspect_ratio = base->sample_aspect_ratio;
+ outlink->frame_rate = base->frame_rate;
+
+ if ((ret = ff_framesync_init(&s->fs, ctx, 4)) < 0)
+ return ret;
+
+ in = s->fs.in;
+ in[0].time_base = base->time_base;
+ in[1].time_base = threshold->time_base;
+ in[2].time_base = min->time_base;
+ in[3].time_base = max->time_base;
+ in[0].sync = 1;
+ in[0].before = EXT_STOP;
+ in[0].after = EXT_STOP;
+ in[1].sync = 1;
+ in[1].before = EXT_STOP;
+ in[1].after = EXT_STOP;
+ in[2].sync = 1;
+ in[2].before = EXT_STOP;
+ in[2].after = EXT_STOP;
+ in[3].sync = 1;
+ in[3].before = EXT_STOP;
+ in[3].after = EXT_STOP;
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+
+ return ff_framesync_configure(&s->fs);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ ThresholdContext *s = inlink->dst->priv;
+ return ff_framesync_filter_frame(&s->fs, inlink, buf);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ ThresholdContext *s = outlink->src->priv;
+ return ff_framesync_request_frame(&s->fs, outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ThresholdContext *s = ctx->priv;
+
+ ff_framesync_uninit(&s->fs);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ {
+ .name = "threshold",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ {
+ .name = "min",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ {
+ .name = "max",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_threshold = {
+ .name = "threshold",
+ .description = NULL_IF_CONFIG_SMALL("Threshold first video stream using other video streams."),
+ .priv_size = sizeof(ThresholdContext),
+ .priv_class = &threshold_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = inputs,
+ .outputs = outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_thumbnail.c b/libavfilter/vf_thumbnail.c
new file mode 100644
index 0000000000..417ccd56d0
--- /dev/null
+++ b/libavfilter/vf_thumbnail.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2011 Smartjog S.A.S, Clément Bœsch <clement.boesch@smartjog.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Potential thumbnail lookup filter to reduce the risk of an inappropriate
+ * selection (such as a black frame) we could get with an absolute seek.
+ *
+ * Simplified version of algorithm by Vadim Zaliva <lord@crocodile.org>.
+ * @see http://notbrainsurgery.livejournal.com/29773.html
+ */
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+
+#define HIST_SIZE (3*256)
+
+struct thumb_frame {
+ AVFrame *buf; ///< cached frame
+ int histogram[HIST_SIZE]; ///< RGB color distribution histogram of the frame
+};
+
+typedef struct {
+ const AVClass *class;
+ int n; ///< current frame
+ int n_frames; ///< number of frames for analysis
+ struct thumb_frame *frames; ///< the n_frames frames
+ AVRational tb; ///< copy of the input timebase to ease access
+} ThumbContext;
+
+#define OFFSET(x) offsetof(ThumbContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption thumbnail_options[] = {
+ { "n", "set the frames batch size", OFFSET(n_frames), AV_OPT_TYPE_INT, {.i64=100}, 2, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(thumbnail);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ThumbContext *s = ctx->priv;
+
+ s->frames = av_calloc(s->n_frames, sizeof(*s->frames));
+ if (!s->frames) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Allocation failure, try to lower the number of frames\n");
+ return AVERROR(ENOMEM);
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "batch size: %d frames\n", s->n_frames);
+ return 0;
+}
+
+/**
+ * @brief Compute Sum-square deviation to estimate "closeness".
+ * @param hist color distribution histogram
+ * @param median average color distribution histogram
+ * @return sum of squared errors
+ */
+static double frame_sum_square_err(const int *hist, const double *median)
+{
+ int i;
+ double err, sum_sq_err = 0;
+
+ for (i = 0; i < HIST_SIZE; i++) {
+ err = median[i] - (double)hist[i];
+ sum_sq_err += err*err;
+ }
+ return sum_sq_err;
+}
+
+static AVFrame *get_best_frame(AVFilterContext *ctx)
+{
+ AVFrame *picref;
+ ThumbContext *s = ctx->priv;
+ int i, j, best_frame_idx = 0;
+ int nb_frames = s->n;
+ double avg_hist[HIST_SIZE] = {0}, sq_err, min_sq_err = -1;
+
+ // average histogram of the N frames
+ for (j = 0; j < FF_ARRAY_ELEMS(avg_hist); j++) {
+ for (i = 0; i < nb_frames; i++)
+ avg_hist[j] += (double)s->frames[i].histogram[j];
+ avg_hist[j] /= nb_frames;
+ }
+
+ // find the frame closer to the average using the sum of squared errors
+ for (i = 0; i < nb_frames; i++) {
+ sq_err = frame_sum_square_err(s->frames[i].histogram, avg_hist);
+ if (i == 0 || sq_err < min_sq_err)
+ best_frame_idx = i, min_sq_err = sq_err;
+ }
+
+ // free and reset everything (except the best frame buffer)
+ for (i = 0; i < nb_frames; i++) {
+ memset(s->frames[i].histogram, 0, sizeof(s->frames[i].histogram));
+ if (i != best_frame_idx)
+ av_frame_free(&s->frames[i].buf);
+ }
+ s->n = 0;
+
+ // raise the chosen one
+ picref = s->frames[best_frame_idx].buf;
+ av_log(ctx, AV_LOG_INFO, "frame id #%d (pts_time=%f) selected "
+ "from a set of %d images\n", best_frame_idx,
+ picref->pts * av_q2d(s->tb), nb_frames);
+ s->frames[best_frame_idx].buf = NULL;
+
+ return picref;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ int i, j;
+ AVFilterContext *ctx = inlink->dst;
+ ThumbContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ int *hist = s->frames[s->n].histogram;
+ const uint8_t *p = frame->data[0];
+
+ // keep a reference of each frame
+ s->frames[s->n].buf = frame;
+
+ // update current frame RGB histogram
+ for (j = 0; j < inlink->h; j++) {
+ for (i = 0; i < inlink->w; i++) {
+ hist[0*256 + p[i*3 ]]++;
+ hist[1*256 + p[i*3 + 1]]++;
+ hist[2*256 + p[i*3 + 2]]++;
+ }
+ p += frame->linesize[0];
+ }
+
+ // no selection until the buffer of N frames is filled up
+ s->n++;
+ if (s->n < s->n_frames)
+ return 0;
+
+ return ff_filter_frame(outlink, get_best_frame(ctx));
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ int i;
+ ThumbContext *s = ctx->priv;
+ for (i = 0; i < s->n_frames && s->frames[i].buf; i++)
+ av_frame_free(&s->frames[i].buf);
+ av_freep(&s->frames);
+}
+
+static int request_frame(AVFilterLink *link)
+{
+ AVFilterContext *ctx = link->src;
+ ThumbContext *s = ctx->priv;
+ int ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF && s->n) {
+ ret = ff_filter_frame(link, get_best_frame(ctx));
+ if (ret < 0)
+ return ret;
+ ret = AVERROR_EOF;
+ }
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ThumbContext *s = ctx->priv;
+
+ s->tb = inlink->time_base;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static const AVFilterPad thumbnail_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad thumbnail_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_thumbnail = {
+ .name = "thumbnail",
+ .description = NULL_IF_CONFIG_SMALL("Select the most representative frame in a given sequence of consecutive frames."),
+ .priv_size = sizeof(ThumbContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = thumbnail_inputs,
+ .outputs = thumbnail_outputs,
+ .priv_class = &thumbnail_class,
+};
diff --git a/libavfilter/vf_tile.c b/libavfilter/vf_tile.c
new file mode 100644
index 0000000000..9af00bd8f5
--- /dev/null
+++ b/libavfilter/vf_tile.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2012 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * tile video filter
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "video.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ unsigned w, h;
+ unsigned margin;
+ unsigned padding;
+ unsigned current;
+ unsigned nb_frames;
+ FFDrawContext draw;
+ FFDrawColor blank;
+ AVFrame *out_ref;
+ uint8_t rgba_color[4];
+} TileContext;
+
+#define REASONABLE_SIZE 1024
+
+#define OFFSET(x) offsetof(TileContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption tile_options[] = {
+ { "layout", "set grid size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE,
+ {.str = "6x5"}, 0, 0, FLAGS },
+ { "nb_frames", "set maximum number of frame to render", OFFSET(nb_frames),
+ AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
+ { "margin", "set outer border margin in pixels", OFFSET(margin),
+ AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1024, FLAGS },
+ { "padding", "set inner border thickness in pixels", OFFSET(padding),
+ AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1024, FLAGS },
+ { "color", "set the color of the unused area", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str = "black"}, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(tile);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ TileContext *tile = ctx->priv;
+
+ if (tile->w > REASONABLE_SIZE || tile->h > REASONABLE_SIZE) {
+ av_log(ctx, AV_LOG_ERROR, "Tile size %ux%u is insane.\n",
+ tile->w, tile->h);
+ return AVERROR(EINVAL);
+ }
+
+ if (tile->nb_frames == 0) {
+ tile->nb_frames = tile->w * tile->h;
+ } else if (tile->nb_frames > tile->w * tile->h) {
+ av_log(ctx, AV_LOG_ERROR, "nb_frames must be less than or equal to %dx%d=%d\n",
+ tile->w, tile->h, tile->w * tile->h);
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ return ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ TileContext *tile = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ const unsigned total_margin_w = (tile->w - 1) * tile->padding + 2*tile->margin;
+ const unsigned total_margin_h = (tile->h - 1) * tile->padding + 2*tile->margin;
+
+ if (inlink->w > (INT_MAX - total_margin_w) / tile->w) {
+ av_log(ctx, AV_LOG_ERROR, "Total width %ux%u is too much.\n",
+ tile->w, inlink->w);
+ return AVERROR(EINVAL);
+ }
+ if (inlink->h > (INT_MAX - total_margin_h) / tile->h) {
+ av_log(ctx, AV_LOG_ERROR, "Total height %ux%u is too much.\n",
+ tile->h, inlink->h);
+ return AVERROR(EINVAL);
+ }
+ outlink->w = tile->w * inlink->w + total_margin_w;
+ outlink->h = tile->h * inlink->h + total_margin_h;
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ outlink->frame_rate = av_mul_q(inlink->frame_rate,
+ av_make_q(1, tile->nb_frames));
+ ff_draw_init(&tile->draw, inlink->format, 0);
+ ff_draw_color(&tile->draw, &tile->blank, tile->rgba_color);
+
+ return 0;
+}
+
+static void get_current_tile_pos(AVFilterContext *ctx, unsigned *x, unsigned *y)
+{
+ TileContext *tile = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ const unsigned tx = tile->current % tile->w;
+ const unsigned ty = tile->current / tile->w;
+
+ *x = tile->margin + (inlink->w + tile->padding) * tx;
+ *y = tile->margin + (inlink->h + tile->padding) * ty;
+}
+
+static void draw_blank_frame(AVFilterContext *ctx, AVFrame *out_buf)
+{
+ TileContext *tile = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ unsigned x0, y0;
+
+ get_current_tile_pos(ctx, &x0, &y0);
+ ff_fill_rectangle(&tile->draw, &tile->blank,
+ out_buf->data, out_buf->linesize,
+ x0, y0, inlink->w, inlink->h);
+ tile->current++;
+}
+static int end_last_frame(AVFilterContext *ctx)
+{
+ TileContext *tile = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out_buf = tile->out_ref;
+ int ret;
+
+ while (tile->current < tile->nb_frames)
+ draw_blank_frame(ctx, out_buf);
+ ret = ff_filter_frame(outlink, out_buf);
+ tile->current = 0;
+ return ret;
+}
+
+/* Note: direct rendering is not possible since there is no guarantee that
+ * buffers are fed to filter_frame in the order they were obtained from
+ * get_buffer (think B-frames). */
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ TileContext *tile = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ unsigned x0, y0;
+
+ if (!tile->current) {
+ tile->out_ref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!tile->out_ref) {
+ av_frame_free(&picref);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(tile->out_ref, picref);
+ tile->out_ref->width = outlink->w;
+ tile->out_ref->height = outlink->h;
+
+ /* fill surface once for margin/padding */
+ if (tile->margin || tile->padding)
+ ff_fill_rectangle(&tile->draw, &tile->blank,
+ tile->out_ref->data,
+ tile->out_ref->linesize,
+ 0, 0, outlink->w, outlink->h);
+ }
+
+ get_current_tile_pos(ctx, &x0, &y0);
+ ff_copy_rectangle2(&tile->draw,
+ tile->out_ref->data, tile->out_ref->linesize,
+ picref->data, picref->linesize,
+ x0, y0, 0, 0, inlink->w, inlink->h);
+
+ av_frame_free(&picref);
+ if (++tile->current == tile->nb_frames)
+ return end_last_frame(ctx);
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ TileContext *tile = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ int r;
+
+ r = ff_request_frame(inlink);
+ if (r == AVERROR_EOF && tile->current)
+ r = end_last_frame(ctx);
+ return r;
+}
+
+static const AVFilterPad tile_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad tile_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_tile = {
+ .name = "tile",
+ .description = NULL_IF_CONFIG_SMALL("Tile several successive frames together."),
+ .init = init,
+ .query_formats = query_formats,
+ .priv_size = sizeof(TileContext),
+ .inputs = tile_inputs,
+ .outputs = tile_outputs,
+ .priv_class = &tile_class,
+};
diff --git a/libavfilter/vf_tinterlace.c b/libavfilter/vf_tinterlace.c
new file mode 100644
index 0000000000..80146a9480
--- /dev/null
+++ b/libavfilter/vf_tinterlace.c
@@ -0,0 +1,426 @@
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2010 Baptiste Coudurier
+ * Copyright (c) 2003 Michael Zucchi <notzed@ximian.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * temporal field interlace filter, ported from MPlayer/libmpcodecs
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/avassert.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "tinterlace.h"
+
+#define OFFSET(x) offsetof(TInterlaceContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+#define TINTERLACE_FLAG_VLPF 01
+#define TINTERLACE_FLAG_EXACT_TB 2
+
+static const AVOption tinterlace_options[] = {
+ {"mode", "select interlace mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_MERGE}, 0, MODE_NB-1, FLAGS, "mode"},
+ {"merge", "merge fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_MERGE}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"drop_even", "drop even fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_DROP_EVEN}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"drop_odd", "drop odd fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_DROP_ODD}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"pad", "pad alternate lines with black", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PAD}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"interleave_top", "interleave top and bottom fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE_TOP}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"interleave_bottom", "interleave bottom and top fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"interlacex2", "interlace fields from two consecutive frames", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLACEX2}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"mergex2", "merge fields keeping same frame rate", 0, AV_OPT_TYPE_CONST, {.i64=MODE_MERGEX2}, INT_MIN, INT_MAX, FLAGS, "mode"},
+
+ {"flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, 0, INT_MAX, 0, "flags" },
+ {"low_pass_filter", "enable vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_VLPF}, INT_MIN, INT_MAX, FLAGS, "flags" },
+ {"vlpf", "enable vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_VLPF}, INT_MIN, INT_MAX, FLAGS, "flags" },
+ {"exact_tb", "force a timebase which can represent timestamps exactly", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_EXACT_TB}, INT_MIN, INT_MAX, FLAGS, "flags" },
+
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(tinterlace);
+
+#define FULL_SCALE_YUVJ_FORMATS \
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P
+
+static const enum AVPixelFormat full_scale_yuvj_pix_fmts[] = {
+ FULL_SCALE_YUVJ_FORMATS, AV_PIX_FMT_NONE
+};
+
+static const AVRational standard_tbs[] = {
+ {1, 25},
+ {1, 30},
+ {1001, 30000},
+};
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_GRAY8, FULL_SCALE_YUVJ_FORMATS,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static void lowpass_line_c(uint8_t *dstp, ptrdiff_t width, const uint8_t *srcp,
+ const uint8_t *srcp_above, const uint8_t *srcp_below)
+{
+ int i;
+ for (i = 0; i < width; i++) {
+ // this calculation is an integer representation of
+ // '0.5 * current + 0.25 * above + 0.25 * below'
+ // '1 +' is for rounding.
+ dstp[i] = (1 + srcp[i] + srcp[i] + srcp_above[i] + srcp_below[i]) >> 2;
+ }
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ TInterlaceContext *tinterlace = ctx->priv;
+
+ av_frame_free(&tinterlace->cur );
+ av_frame_free(&tinterlace->next);
+ av_freep(&tinterlace->black_data[0]);
+}
+
+static int config_out_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
+ TInterlaceContext *tinterlace = ctx->priv;
+ int i;
+
+ tinterlace->vsub = desc->log2_chroma_h;
+ outlink->w = inlink->w;
+ outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD || tinterlace->mode == MODE_MERGEX2?
+ inlink->h*2 : inlink->h;
+ if (tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD || tinterlace->mode == MODE_MERGEX2)
+ outlink->sample_aspect_ratio = av_mul_q(inlink->sample_aspect_ratio,
+ av_make_q(2, 1));
+
+ if (tinterlace->mode == MODE_PAD) {
+ uint8_t black[4] = { 16, 128, 128, 16 };
+ int i, ret;
+ if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts))
+ black[0] = black[3] = 0;
+ ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize,
+ outlink->w, outlink->h, outlink->format, 16);
+ if (ret < 0)
+ return ret;
+
+ /* fill black picture with black */
+ for (i = 0; i < 4 && tinterlace->black_data[i]; i++) {
+ int h = i == 1 || i == 2 ? AV_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h) : outlink->h;
+ memset(tinterlace->black_data[i], black[i],
+ tinterlace->black_linesize[i] * h);
+ }
+ }
+ if ((tinterlace->flags & TINTERLACE_FLAG_VLPF)
+ && !(tinterlace->mode == MODE_INTERLEAVE_TOP
+ || tinterlace->mode == MODE_INTERLEAVE_BOTTOM)) {
+ av_log(ctx, AV_LOG_WARNING, "low_pass_filter flag ignored with mode %d\n",
+ tinterlace->mode);
+ tinterlace->flags &= ~TINTERLACE_FLAG_VLPF;
+ }
+ tinterlace->preout_time_base = inlink->time_base;
+ if (tinterlace->mode == MODE_INTERLACEX2) {
+ tinterlace->preout_time_base.den *= 2;
+ outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){2,1});
+ outlink->time_base = av_mul_q(inlink->time_base , (AVRational){1,2});
+ } else if (tinterlace->mode == MODE_MERGEX2) {
+ outlink->frame_rate = inlink->frame_rate;
+ outlink->time_base = inlink->time_base;
+ } else if (tinterlace->mode != MODE_PAD) {
+ outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){1,2});
+ outlink->time_base = av_mul_q(inlink->time_base , (AVRational){2,1});
+ }
+
+ for (i = 0; i<FF_ARRAY_ELEMS(standard_tbs); i++){
+ if (!av_cmp_q(standard_tbs[i], outlink->time_base))
+ break;
+ }
+ if (i == FF_ARRAY_ELEMS(standard_tbs) ||
+ (tinterlace->flags & TINTERLACE_FLAG_EXACT_TB))
+ outlink->time_base = tinterlace->preout_time_base;
+
+ if (tinterlace->flags & TINTERLACE_FLAG_VLPF) {
+ tinterlace->lowpass_line = lowpass_line_c;
+ if (ARCH_X86)
+ ff_tinterlace_init_x86(tinterlace);
+ }
+
+ av_log(ctx, AV_LOG_VERBOSE, "mode:%d filter:%s h:%d -> h:%d\n",
+ tinterlace->mode, (tinterlace->flags & TINTERLACE_FLAG_VLPF) ? "on" : "off",
+ inlink->h, outlink->h);
+
+ return 0;
+}
+
+#define FIELD_UPPER 0
+#define FIELD_LOWER 1
+#define FIELD_UPPER_AND_LOWER 2
+
+/**
+ * Copy picture field from src to dst.
+ *
+ * @param src_field copy from upper, lower field or both
+ * @param interleave leave a padding line between each copied line
+ * @param dst_field copy to upper or lower field,
+ * only meaningful when interleave is selected
+ * @param flags context flags
+ */
+static inline
+void copy_picture_field(TInterlaceContext *tinterlace,
+ uint8_t *dst[4], int dst_linesize[4],
+ const uint8_t *src[4], int src_linesize[4],
+ enum AVPixelFormat format, int w, int src_h,
+ int src_field, int interleave, int dst_field,
+ int flags)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);
+ int hsub = desc->log2_chroma_w;
+ int plane, vsub = desc->log2_chroma_h;
+ int k = src_field == FIELD_UPPER_AND_LOWER ? 1 : 2;
+ int h;
+
+ for (plane = 0; plane < desc->nb_components; plane++) {
+ int lines = plane == 1 || plane == 2 ? AV_CEIL_RSHIFT(src_h, vsub) : src_h;
+ int cols = plane == 1 || plane == 2 ? AV_CEIL_RSHIFT( w, hsub) : w;
+ uint8_t *dstp = dst[plane];
+ const uint8_t *srcp = src[plane];
+
+ lines = (lines + (src_field == FIELD_UPPER)) / k;
+ if (src_field == FIELD_LOWER)
+ srcp += src_linesize[plane];
+ if (interleave && dst_field == FIELD_LOWER)
+ dstp += dst_linesize[plane];
+ if (flags & TINTERLACE_FLAG_VLPF) {
+ // Low-pass filtering is required when creating an interlaced destination from
+ // a progressive source which contains high-frequency vertical detail.
+ // Filtering will reduce interlace 'twitter' and Moire patterning.
+ int srcp_linesize = src_linesize[plane] * k;
+ int dstp_linesize = dst_linesize[plane] * (interleave ? 2 : 1);
+ for (h = lines; h > 0; h--) {
+ const uint8_t *srcp_above = srcp - src_linesize[plane];
+ const uint8_t *srcp_below = srcp + src_linesize[plane];
+ if (h == lines) srcp_above = srcp; // there is no line above
+ if (h == 1) srcp_below = srcp; // there is no line below
+
+ tinterlace->lowpass_line(dstp, cols, srcp, srcp_above, srcp_below);
+ dstp += dstp_linesize;
+ srcp += srcp_linesize;
+ }
+ } else {
+ av_image_copy_plane(dstp, dst_linesize[plane] * (interleave ? 2 : 1),
+ srcp, src_linesize[plane]*k, cols, lines);
+ }
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ TInterlaceContext *tinterlace = ctx->priv;
+ AVFrame *cur, *next, *out;
+ int field, tff, ret;
+
+ av_frame_free(&tinterlace->cur);
+ tinterlace->cur = tinterlace->next;
+ tinterlace->next = picref;
+
+ cur = tinterlace->cur;
+ next = tinterlace->next;
+ /* we need at least two frames */
+ if (!tinterlace->cur)
+ return 0;
+
+ switch (tinterlace->mode) {
+ case MODE_MERGEX2: /* move the odd frame into the upper field of the new image, even into
+ * the lower field, generating a double-height video at same framerate */
+ case MODE_MERGE: /* move the odd frame into the upper field of the new image, even into
+ * the lower field, generating a double-height video at half framerate */
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, cur);
+ out->height = outlink->h;
+ out->interlaced_frame = 1;
+ out->top_field_first = 1;
+ out->sample_aspect_ratio = av_mul_q(cur->sample_aspect_ratio, av_make_q(2, 1));
+
+ /* write odd frame lines into the upper field of the new frame */
+ copy_picture_field(tinterlace, out->data, out->linesize,
+ (const uint8_t **)cur->data, cur->linesize,
+ inlink->format, inlink->w, inlink->h,
+ FIELD_UPPER_AND_LOWER, 1, tinterlace->mode == MODE_MERGEX2 ? inlink->frame_count_out & 1 ? FIELD_LOWER : FIELD_UPPER : FIELD_UPPER, tinterlace->flags);
+ /* write even frame lines into the lower field of the new frame */
+ copy_picture_field(tinterlace, out->data, out->linesize,
+ (const uint8_t **)next->data, next->linesize,
+ inlink->format, inlink->w, inlink->h,
+ FIELD_UPPER_AND_LOWER, 1, tinterlace->mode == MODE_MERGEX2 ? inlink->frame_count_out & 1 ? FIELD_UPPER : FIELD_LOWER : FIELD_LOWER, tinterlace->flags);
+ if (tinterlace->mode != MODE_MERGEX2)
+ av_frame_free(&tinterlace->next);
+ break;
+
+ case MODE_DROP_ODD: /* only output even frames, odd frames are dropped; height unchanged, half framerate */
+ case MODE_DROP_EVEN: /* only output odd frames, even frames are dropped; height unchanged, half framerate */
+ out = av_frame_clone(tinterlace->mode == MODE_DROP_EVEN ? cur : next);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_free(&tinterlace->next);
+ break;
+
+ case MODE_PAD: /* expand each frame to double height, but pad alternate
+ * lines with black; framerate unchanged */
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, cur);
+ out->height = outlink->h;
+ out->sample_aspect_ratio = av_mul_q(cur->sample_aspect_ratio, av_make_q(2, 1));
+
+ field = (1 + tinterlace->frame) & 1 ? FIELD_UPPER : FIELD_LOWER;
+ /* copy upper and lower fields */
+ copy_picture_field(tinterlace, out->data, out->linesize,
+ (const uint8_t **)cur->data, cur->linesize,
+ inlink->format, inlink->w, inlink->h,
+ FIELD_UPPER_AND_LOWER, 1, field, tinterlace->flags);
+ /* pad with black the other field */
+ copy_picture_field(tinterlace, out->data, out->linesize,
+ (const uint8_t **)tinterlace->black_data, tinterlace->black_linesize,
+ inlink->format, inlink->w, inlink->h,
+ FIELD_UPPER_AND_LOWER, 1, !field, tinterlace->flags);
+ break;
+
+ /* interleave upper/lower lines from odd frames with lower/upper lines from even frames,
+ * halving the frame rate and preserving image height */
+ case MODE_INTERLEAVE_TOP: /* top field first */
+ case MODE_INTERLEAVE_BOTTOM: /* bottom field first */
+ tff = tinterlace->mode == MODE_INTERLEAVE_TOP;
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, cur);
+ out->interlaced_frame = 1;
+ out->top_field_first = tff;
+
+ /* copy upper/lower field from cur */
+ copy_picture_field(tinterlace, out->data, out->linesize,
+ (const uint8_t **)cur->data, cur->linesize,
+ inlink->format, inlink->w, inlink->h,
+ tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER,
+ tinterlace->flags);
+ /* copy lower/upper field from next */
+ copy_picture_field(tinterlace, out->data, out->linesize,
+ (const uint8_t **)next->data, next->linesize,
+ inlink->format, inlink->w, inlink->h,
+ tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER,
+ tinterlace->flags);
+ av_frame_free(&tinterlace->next);
+ break;
+ case MODE_INTERLACEX2: /* re-interlace preserving image height, double frame rate */
+ /* output current frame first */
+ out = av_frame_clone(cur);
+ if (!out)
+ return AVERROR(ENOMEM);
+ out->interlaced_frame = 1;
+ if (cur->pts != AV_NOPTS_VALUE)
+ out->pts = cur->pts*2;
+
+ out->pts = av_rescale_q(out->pts, tinterlace->preout_time_base, outlink->time_base);
+ if ((ret = ff_filter_frame(outlink, out)) < 0)
+ return ret;
+
+ /* output mix of current and next frame */
+ tff = next->top_field_first;
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, next);
+ out->interlaced_frame = 1;
+ out->top_field_first = !tff;
+
+ if (next->pts != AV_NOPTS_VALUE && cur->pts != AV_NOPTS_VALUE)
+ out->pts = cur->pts + next->pts;
+ else
+ out->pts = AV_NOPTS_VALUE;
+ /* write current frame second field lines into the second field of the new frame */
+ copy_picture_field(tinterlace, out->data, out->linesize,
+ (const uint8_t **)cur->data, cur->linesize,
+ inlink->format, inlink->w, inlink->h,
+ tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER,
+ tinterlace->flags);
+ /* write next frame first field lines into the first field of the new frame */
+ copy_picture_field(tinterlace, out->data, out->linesize,
+ (const uint8_t **)next->data, next->linesize,
+ inlink->format, inlink->w, inlink->h,
+ tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER,
+ tinterlace->flags);
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ out->pts = av_rescale_q(out->pts, tinterlace->preout_time_base, outlink->time_base);
+ ret = ff_filter_frame(outlink, out);
+ tinterlace->frame++;
+
+ return ret;
+}
+
+static const AVFilterPad tinterlace_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad tinterlace_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_out_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_tinterlace = {
+ .name = "tinterlace",
+ .description = NULL_IF_CONFIG_SMALL("Perform temporal field interlacing."),
+ .priv_size = sizeof(TInterlaceContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = tinterlace_inputs,
+ .outputs = tinterlace_outputs,
+ .priv_class = &tinterlace_class,
+};
diff --git a/libavfilter/vf_transpose.c b/libavfilter/vf_transpose.c
index 07602b9086..75b4dda41f 100644
--- a/libavfilter/vf_transpose.c
+++ b/libavfilter/vf_transpose.c
@@ -2,20 +2,20 @@
* Copyright (c) 2010 Stefano Sabatini
* Copyright (c) 2008 Vitor Sessak
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -38,6 +38,12 @@
#include "internal.h"
#include "video.h"
+typedef enum {
+ TRANSPOSE_PT_TYPE_NONE,
+ TRANSPOSE_PT_TYPE_LANDSCAPE,
+ TRANSPOSE_PT_TYPE_PORTRAIT,
+} PassthroughType;
+
enum TransposeDir {
TRANSPOSE_CCLOCK_FLIP,
TRANSPOSE_CLOCK,
@@ -50,51 +56,58 @@ typedef struct TransContext {
int hsub, vsub;
int pixsteps[4];
- enum TransposeDir dir;
+ int passthrough; ///< PassthroughType, landscape passthrough mode enabled
+ int dir; ///< TransposeDir
} TransContext;
static int query_formats(AVFilterContext *ctx)
{
- enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
- AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
- AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
- AV_PIX_FMT_RGB565BE, AV_PIX_FMT_RGB565LE,
- AV_PIX_FMT_RGB555BE, AV_PIX_FMT_RGB555LE,
- AV_PIX_FMT_BGR565BE, AV_PIX_FMT_BGR565LE,
- AV_PIX_FMT_BGR555BE, AV_PIX_FMT_BGR555LE,
- AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE,
- AV_PIX_FMT_YUV420P16LE, AV_PIX_FMT_YUV420P16BE,
- AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUV422P16BE,
- AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUV444P16BE,
- AV_PIX_FMT_NV12, AV_PIX_FMT_NV21,
- AV_PIX_FMT_RGB8, AV_PIX_FMT_BGR8,
- AV_PIX_FMT_RGB4_BYTE, AV_PIX_FMT_BGR4_BYTE,
- AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
- AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
- AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
- AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ440P,
- AV_PIX_FMT_YUVA420P, AV_PIX_FMT_GRAY8,
- AV_PIX_FMT_NONE
- };
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
+ AVFilterFormats *pix_fmts = NULL;
+ int fmt, ret;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_PAL ||
+ desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
+ desc->flags & AV_PIX_FMT_FLAG_BITSTREAM ||
+ desc->log2_chroma_w != desc->log2_chroma_h) &&
+ (ret = ff_add_format(&pix_fmts, fmt)) < 0)
+ return ret;
+ }
+
+
+ return ff_set_common_formats(ctx, pix_fmts);
}
static int config_props_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
- TransContext *trans = ctx->priv;
+ TransContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
const AVPixFmtDescriptor *desc_out = av_pix_fmt_desc_get(outlink->format);
const AVPixFmtDescriptor *desc_in = av_pix_fmt_desc_get(inlink->format);
- trans->hsub = desc_in->log2_chroma_w;
- trans->vsub = desc_in->log2_chroma_h;
+ if (s->dir&4) {
+ av_log(ctx, AV_LOG_WARNING,
+ "dir values greater than 3 are deprecated, use the passthrough option instead\n");
+ s->dir &= 3;
+ s->passthrough = TRANSPOSE_PT_TYPE_LANDSCAPE;
+ }
+
+ if ((inlink->w >= inlink->h && s->passthrough == TRANSPOSE_PT_TYPE_LANDSCAPE) ||
+ (inlink->w <= inlink->h && s->passthrough == TRANSPOSE_PT_TYPE_PORTRAIT)) {
+ av_log(ctx, AV_LOG_VERBOSE,
+ "w:%d h:%d -> w:%d h:%d (passthrough mode)\n",
+ inlink->w, inlink->h, inlink->w, inlink->h);
+ return 0;
+ } else {
+ s->passthrough = TRANSPOSE_PT_TYPE_NONE;
+ }
- av_image_fill_max_pixsteps(trans->pixsteps, NULL, desc_out);
+ s->hsub = desc_in->log2_chroma_w;
+ s->vsub = desc_in->log2_chroma_h;
+
+ av_image_fill_max_pixsteps(s->pixsteps, NULL, desc_out);
outlink->w = inlink->h;
outlink->h = inlink->w;
@@ -107,114 +120,167 @@ static int config_props_output(AVFilterLink *outlink)
av_log(ctx, AV_LOG_VERBOSE,
"w:%d h:%d dir:%d -> w:%d h:%d rotation:%s vflip:%d\n",
- inlink->w, inlink->h, trans->dir, outlink->w, outlink->h,
- trans->dir == 1 || trans->dir == 3 ? "clockwise" : "counterclockwise",
- trans->dir == 0 || trans->dir == 3);
+ inlink->w, inlink->h, s->dir, outlink->w, outlink->h,
+ s->dir == 1 || s->dir == 3 ? "clockwise" : "counterclockwise",
+ s->dir == 0 || s->dir == 3);
return 0;
}
-static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
{
- AVFilterLink *outlink = inlink->dst->outputs[0];
- TransContext *trans = inlink->dst->priv;
- AVFrame *out;
- int plane;
+ TransContext *s = inlink->dst->priv;
- out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
- if (!out) {
- av_frame_free(&in);
- return AVERROR(ENOMEM);
- }
+ return s->passthrough ?
+ ff_null_get_video_buffer (inlink, w, h) :
+ ff_default_get_video_buffer(inlink, w, h);
+}
- out->pts = in->pts;
+typedef struct ThreadData {
+ AVFrame *in, *out;
+} ThreadData;
- if (in->sample_aspect_ratio.num == 0) {
- out->sample_aspect_ratio = in->sample_aspect_ratio;
- } else {
- out->sample_aspect_ratio.num = in->sample_aspect_ratio.den;
- out->sample_aspect_ratio.den = in->sample_aspect_ratio.num;
- }
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr,
+ int nb_jobs)
+{
+ TransContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *out = td->out;
+ AVFrame *in = td->in;
+ int plane;
for (plane = 0; out->data[plane]; plane++) {
- int hsub = plane == 1 || plane == 2 ? trans->hsub : 0;
- int vsub = plane == 1 || plane == 2 ? trans->vsub : 0;
- int pixstep = trans->pixsteps[plane];
- int inh = in->height >> vsub;
- int outw = out->width >> hsub;
- int outh = out->height >> vsub;
+ int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
+ int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
+ int pixstep = s->pixsteps[plane];
+ int inh = AV_CEIL_RSHIFT(in->height, vsub);
+ int outw = AV_CEIL_RSHIFT(out->width, hsub);
+ int outh = AV_CEIL_RSHIFT(out->height, vsub);
+ int start = (outh * jobnr ) / nb_jobs;
+ int end = (outh * (jobnr+1)) / nb_jobs;
uint8_t *dst, *src;
int dstlinesize, srclinesize;
int x, y;
- dst = out->data[plane];
dstlinesize = out->linesize[plane];
+ dst = out->data[plane] + start * dstlinesize;
src = in->data[plane];
srclinesize = in->linesize[plane];
- if (trans->dir & 1) {
+ if (s->dir & 1) {
src += in->linesize[plane] * (inh - 1);
srclinesize *= -1;
}
- if (trans->dir & 2) {
- dst += out->linesize[plane] * (outh - 1);
+ if (s->dir & 2) {
+ dst = out->data[plane] + dstlinesize * (outh - start - 1);
dstlinesize *= -1;
}
- for (y = 0; y < outh; y++) {
- switch (pixstep) {
- case 1:
+ switch (pixstep) {
+ case 1:
+ for (y = start; y < end; y++, dst += dstlinesize)
for (x = 0; x < outw; x++)
dst[x] = src[x * srclinesize + y];
- break;
- case 2:
+ break;
+ case 2:
+ for (y = start; y < end; y++, dst += dstlinesize) {
for (x = 0; x < outw; x++)
*((uint16_t *)(dst + 2 * x)) =
*((uint16_t *)(src + x * srclinesize + y * 2));
- break;
- case 3:
+ }
+ break;
+ case 3:
+ for (y = start; y < end; y++, dst += dstlinesize) {
for (x = 0; x < outw; x++) {
int32_t v = AV_RB24(src + x * srclinesize + y * 3);
AV_WB24(dst + 3 * x, v);
}
- break;
- case 4:
+ }
+ break;
+ case 4:
+ for (y = start; y < end; y++, dst += dstlinesize) {
for (x = 0; x < outw; x++)
*((uint32_t *)(dst + 4 * x)) =
*((uint32_t *)(src + x * srclinesize + y * 4));
- break;
}
- dst += dstlinesize;
+ break;
+ case 6:
+ for (y = start; y < end; y++, dst += dstlinesize) {
+ for (x = 0; x < outw; x++) {
+ int64_t v = AV_RB48(src + x * srclinesize + y*6);
+ AV_WB48(dst + 6*x, v);
+ }
+ }
+ break;
+ case 8:
+ for (y = start; y < end; y++, dst += dstlinesize) {
+ for (x = 0; x < outw; x++)
+ *((uint64_t *)(dst + 8*x)) = *((uint64_t *)(src + x * srclinesize + y*8));
+ }
+ break;
}
}
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ TransContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ThreadData td;
+ AVFrame *out;
+
+ if (s->passthrough)
+ return ff_filter_frame(outlink, in);
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ if (in->sample_aspect_ratio.num == 0) {
+ out->sample_aspect_ratio = in->sample_aspect_ratio;
+ } else {
+ out->sample_aspect_ratio.num = in->sample_aspect_ratio.den;
+ out->sample_aspect_ratio.den = in->sample_aspect_ratio.num;
+ }
+
+ td.in = in, td.out = out;
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
#define OFFSET(x) offsetof(TransContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "dir", "Transpose direction", OFFSET(dir), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_CCLOCK_FLIP },
- TRANSPOSE_CCLOCK_FLIP, TRANSPOSE_CLOCK_FLIP, FLAGS, "dir" },
- { "cclock_flip", "counter-clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK_FLIP }, .unit = "dir" },
- { "clock", "clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK }, .unit = "dir" },
- { "cclock", "counter-clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK }, .unit = "dir" },
- { "clock_flip", "clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK_FLIP }, .unit = "dir" },
- { NULL },
-};
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption transpose_options[] = {
+ { "dir", "set transpose direction", OFFSET(dir), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 7, FLAGS, "dir" },
+ { "cclock_flip", "rotate counter-clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK_FLIP }, .flags=FLAGS, .unit = "dir" },
+ { "clock", "rotate clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK }, .flags=FLAGS, .unit = "dir" },
+ { "cclock", "rotate counter-clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK }, .flags=FLAGS, .unit = "dir" },
+ { "clock_flip", "rotate clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK_FLIP }, .flags=FLAGS, .unit = "dir" },
-static const AVClass transpose_class = {
- .class_name = "transpose",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
+ { "passthrough", "do not apply transposition if the input matches the specified geometry",
+ OFFSET(passthrough), AV_OPT_TYPE_INT, {.i64=TRANSPOSE_PT_TYPE_NONE}, 0, INT_MAX, FLAGS, "passthrough" },
+ { "none", "always apply transposition", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_NONE}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
+ { "portrait", "preserve portrait geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_PORTRAIT}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
+ { "landscape", "preserve landscape geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_LANDSCAPE}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
+
+ { NULL }
};
+AVFILTER_DEFINE_CLASS(transpose);
+
static const AVFilterPad avfilter_vf_transpose_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
+ .get_video_buffer = get_video_buffer,
.filter_frame = filter_frame,
},
{ NULL }
@@ -237,4 +303,5 @@ AVFilter ff_vf_transpose = {
.query_formats = query_formats,
.inputs = avfilter_vf_transpose_inputs,
.outputs = avfilter_vf_transpose_outputs,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
};
diff --git a/libavfilter/vf_unsharp.c b/libavfilter/vf_unsharp.c
index dbe3874455..438ff6d8fb 100644
--- a/libavfilter/vf_unsharp.c
+++ b/libavfilter/vf_unsharp.c
@@ -3,26 +3,26 @@
* Port copyright (c) 2010 Daniel G. Taylor <dan@programmer-art.org>
* Relicensed to the LGPL with permission from Remi Guyomarch.
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
- * blur / sharpen filter, ported to Libav from MPlayer
+ * blur / sharpen filter, ported to FFmpeg from MPlayer
* libmpcodecs/unsharp.c.
*
* This code is based on:
@@ -41,76 +41,57 @@
#include "internal.h"
#include "video.h"
#include "libavutil/common.h"
+#include "libavutil/imgutils.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
-
-#define MIN_SIZE 3
-#define MAX_SIZE 13
-
-typedef struct FilterParam {
- int msize_x; ///< matrix width
- int msize_y; ///< matrix height
- int amount; ///< effect amount
- int steps_x; ///< horizontal step count
- int steps_y; ///< vertical step count
- int scalebits; ///< bits to shift pixel
- int32_t halfscale; ///< amount to add to pixel
- uint32_t *sc[(MAX_SIZE * MAX_SIZE) - 1]; ///< finite state machine storage
-} FilterParam;
-
-typedef struct UnsharpContext {
- const AVClass *class;
- int lmsize_x, lmsize_y, cmsize_x, cmsize_y;
- float lamount, camount;
- FilterParam luma; ///< luma parameters (width, height, amount)
- FilterParam chroma; ///< chroma parameters (width, height, amount)
- int hsub, vsub;
-} UnsharpContext;
+#include "unsharp.h"
+#include "unsharp_opencl.h"
static void apply_unsharp( uint8_t *dst, int dst_stride,
const uint8_t *src, int src_stride,
- int width, int height, FilterParam *fp)
+ int width, int height, UnsharpFilterParam *fp)
{
uint32_t **sc = fp->sc;
- uint32_t sr[(MAX_SIZE * MAX_SIZE) - 1], tmp1, tmp2;
+ uint32_t sr[MAX_MATRIX_SIZE - 1], tmp1, tmp2;
int32_t res;
int x, y, z;
- const uint8_t *src2;
-
- if (!fp->amount) {
- if (dst_stride == src_stride)
- memcpy(dst, src, src_stride * height);
- else
- for (y = 0; y < height; y++, dst += dst_stride, src += src_stride)
- memcpy(dst, src, width);
+ const uint8_t *src2 = NULL; //silence a warning
+ const int amount = fp->amount;
+ const int steps_x = fp->steps_x;
+ const int steps_y = fp->steps_y;
+ const int scalebits = fp->scalebits;
+ const int32_t halfscale = fp->halfscale;
+
+ if (!amount) {
+ av_image_copy_plane(dst, dst_stride, src, src_stride, width, height);
return;
}
- for (y = 0; y < 2 * fp->steps_y; y++)
- memset(sc[y], 0, sizeof(sc[y][0]) * (width + 2 * fp->steps_x));
+ for (y = 0; y < 2 * steps_y; y++)
+ memset(sc[y], 0, sizeof(sc[y][0]) * (width + 2 * steps_x));
- for (y = -fp->steps_y; y < height + fp->steps_y; y++) {
+ for (y = -steps_y; y < height + steps_y; y++) {
if (y < height)
src2 = src;
- memset(sr, 0, sizeof(sr[0]) * (2 * fp->steps_x - 1));
- for (x = -fp->steps_x; x < width + fp->steps_x; x++) {
+ memset(sr, 0, sizeof(sr[0]) * (2 * steps_x - 1));
+ for (x = -steps_x; x < width + steps_x; x++) {
tmp1 = x <= 0 ? src2[0] : x >= width ? src2[width-1] : src2[x];
- for (z = 0; z < fp->steps_x * 2; z += 2) {
+ for (z = 0; z < steps_x * 2; z += 2) {
tmp2 = sr[z + 0] + tmp1; sr[z + 0] = tmp1;
tmp1 = sr[z + 1] + tmp2; sr[z + 1] = tmp2;
}
- for (z = 0; z < fp->steps_y * 2; z += 2) {
- tmp2 = sc[z + 0][x + fp->steps_x] + tmp1; sc[z + 0][x + fp->steps_x] = tmp1;
- tmp1 = sc[z + 1][x + fp->steps_x] + tmp2; sc[z + 1][x + fp->steps_x] = tmp2;
+ for (z = 0; z < steps_y * 2; z += 2) {
+ tmp2 = sc[z + 0][x + steps_x] + tmp1; sc[z + 0][x + steps_x] = tmp1;
+ tmp1 = sc[z + 1][x + steps_x] + tmp2; sc[z + 1][x + steps_x] = tmp2;
}
- if (x >= fp->steps_x && y >= fp->steps_y) {
- const uint8_t *srx = src - fp->steps_y * src_stride + x - fp->steps_x;
- uint8_t *dsx = dst - fp->steps_y * dst_stride + x - fp->steps_x;
+ if (x >= steps_x && y >= steps_y) {
+ const uint8_t *srx = src - steps_y * src_stride + x - steps_x;
+ uint8_t *dsx = dst - steps_y * dst_stride + x - steps_x;
- res = (int32_t)*srx + ((((int32_t) * srx - (int32_t)((tmp1 + fp->halfscale) >> fp->scalebits)) * fp->amount) >> 16);
+ res = (int32_t)*srx + ((((int32_t) * srx - (int32_t)((tmp1 + halfscale) >> scalebits)) * amount) >> 16);
*dsx = av_clip_uint8(res);
}
}
@@ -121,7 +102,25 @@ static void apply_unsharp( uint8_t *dst, int dst_stride,
}
}
-static void set_filter_param(FilterParam *fp, int msize_x, int msize_y, float amount)
+static int apply_unsharp_c(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
+{
+ AVFilterLink *inlink = ctx->inputs[0];
+ UnsharpContext *s = ctx->priv;
+ int i, plane_w[3], plane_h[3];
+ UnsharpFilterParam *fp[3];
+ plane_w[0] = inlink->w;
+ plane_w[1] = plane_w[2] = AV_CEIL_RSHIFT(inlink->w, s->hsub);
+ plane_h[0] = inlink->h;
+ plane_h[1] = plane_h[2] = AV_CEIL_RSHIFT(inlink->h, s->vsub);
+ fp[0] = &s->luma;
+ fp[1] = fp[2] = &s->chroma;
+ for (i = 0; i < 3; i++) {
+ apply_unsharp(out->data[i], out->linesize[i], in->data[i], in->linesize[i], plane_w[i], plane_h[i], fp[i]);
+ }
+ return 0;
+}
+
+static void set_filter_param(UnsharpFilterParam *fp, int msize_x, int msize_y, float amount)
{
fp->msize_x = msize_x;
fp->msize_y = msize_y;
@@ -135,78 +134,113 @@ static void set_filter_param(FilterParam *fp, int msize_x, int msize_y, float am
static av_cold int init(AVFilterContext *ctx)
{
- UnsharpContext *unsharp = ctx->priv;
+ int ret = 0;
+ UnsharpContext *s = ctx->priv;
+
- set_filter_param(&unsharp->luma, unsharp->lmsize_x, unsharp->lmsize_y, unsharp->lamount);
- set_filter_param(&unsharp->chroma, unsharp->cmsize_x, unsharp->cmsize_y, unsharp->camount);
+ set_filter_param(&s->luma, s->lmsize_x, s->lmsize_y, s->lamount);
+ set_filter_param(&s->chroma, s->cmsize_x, s->cmsize_y, s->camount);
+ if (s->luma.scalebits >= 26 || s->chroma.scalebits >= 26) {
+ av_log(ctx, AV_LOG_ERROR, "luma or chroma matrix size too big\n");
+ return AVERROR(EINVAL);
+ }
+ s->apply_unsharp = apply_unsharp_c;
+ if (!CONFIG_OPENCL && s->opencl) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL support was not enabled in this build, cannot be selected\n");
+ return AVERROR(EINVAL);
+ }
+ if (CONFIG_OPENCL && s->opencl) {
+ s->apply_unsharp = ff_opencl_apply_unsharp;
+ ret = ff_opencl_unsharp_init(ctx);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
- enum AVPixelFormat pix_fmts[] = {
+ static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_NONE
};
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
-
- return 0;
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
}
-static void init_filter_param(AVFilterContext *ctx, FilterParam *fp, const char *effect_type, int width)
+static int init_filter_param(AVFilterContext *ctx, UnsharpFilterParam *fp, const char *effect_type, int width)
{
int z;
- const char *effect;
+ const char *effect = fp->amount == 0 ? "none" : fp->amount < 0 ? "blur" : "sharpen";
- effect = fp->amount == 0 ? "none" : fp->amount < 0 ? "blur" : "sharpen";
+ if (!(fp->msize_x & fp->msize_y & 1)) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Invalid even size for %s matrix size %dx%d\n",
+ effect_type, fp->msize_x, fp->msize_y);
+ return AVERROR(EINVAL);
+ }
av_log(ctx, AV_LOG_VERBOSE, "effect:%s type:%s msize_x:%d msize_y:%d amount:%0.2f\n",
effect, effect_type, fp->msize_x, fp->msize_y, fp->amount / 65535.0);
for (z = 0; z < 2 * fp->steps_y; z++)
- fp->sc[z] = av_malloc(sizeof(*(fp->sc[z])) * (width + 2 * fp->steps_x));
+ if (!(fp->sc[z] = av_malloc_array(width + 2 * fp->steps_x,
+ sizeof(*(fp->sc[z])))))
+ return AVERROR(ENOMEM);
+
+ return 0;
}
static int config_props(AVFilterLink *link)
{
- UnsharpContext *unsharp = link->dst->priv;
+ UnsharpContext *s = link->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
+ int ret;
- unsharp->hsub = desc->log2_chroma_w;
- unsharp->vsub = desc->log2_chroma_h;
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
- init_filter_param(link->dst, &unsharp->luma, "luma", link->w);
- init_filter_param(link->dst, &unsharp->chroma, "chroma", AV_CEIL_RSHIFT(link->w, unsharp->hsub));
+ ret = init_filter_param(link->dst, &s->luma, "luma", link->w);
+ if (ret < 0)
+ return ret;
+ ret = init_filter_param(link->dst, &s->chroma, "chroma", AV_CEIL_RSHIFT(link->w, s->hsub));
+ if (ret < 0)
+ return ret;
return 0;
}
-static void free_filter_param(FilterParam *fp)
+static void free_filter_param(UnsharpFilterParam *fp)
{
int z;
for (z = 0; z < 2 * fp->steps_y; z++)
- av_free(fp->sc[z]);
+ av_freep(&fp->sc[z]);
}
static av_cold void uninit(AVFilterContext *ctx)
{
- UnsharpContext *unsharp = ctx->priv;
+ UnsharpContext *s = ctx->priv;
+
+ if (CONFIG_OPENCL && s->opencl) {
+ ff_opencl_unsharp_uninit(ctx);
+ }
- free_filter_param(&unsharp->luma);
- free_filter_param(&unsharp->chroma);
+ free_filter_param(&s->luma);
+ free_filter_param(&s->chroma);
}
static int filter_frame(AVFilterLink *link, AVFrame *in)
{
- UnsharpContext *unsharp = link->dst->priv;
+ UnsharpContext *s = link->dst->priv;
AVFilterLink *outlink = link->dst->outputs[0];
AVFrame *out;
- int cw = AV_CEIL_RSHIFT(link->w, unsharp->hsub);
- int ch = AV_CEIL_RSHIFT(link->h, unsharp->vsub);
+ int ret = 0;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
@@ -214,33 +248,45 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
+ if (CONFIG_OPENCL && s->opencl) {
+ ret = ff_opencl_unsharp_process_inout_buf(link->dst, in, out);
+ if (ret < 0)
+ goto end;
+ }
- apply_unsharp(out->data[0], out->linesize[0], in->data[0], in->linesize[0], link->w, link->h, &unsharp->luma);
- apply_unsharp(out->data[1], out->linesize[1], in->data[1], in->linesize[1], cw, ch, &unsharp->chroma);
- apply_unsharp(out->data[2], out->linesize[2], in->data[2], in->linesize[2], cw, ch, &unsharp->chroma);
-
+ ret = s->apply_unsharp(link->dst, in, out);
+end:
av_frame_free(&in);
+
+ if (ret < 0) {
+ av_frame_free(&out);
+ return ret;
+ }
return ff_filter_frame(outlink, out);
}
#define OFFSET(x) offsetof(UnsharpContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "luma_msize_x", "luma matrix horizontal size", OFFSET(lmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
- { "luma_msize_y", "luma matrix vertical size", OFFSET(lmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
- { "luma_amount", "luma effect strength", OFFSET(lamount), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, -2, 5, FLAGS },
- { "chroma_msize_x", "chroma matrix horizontal size", OFFSET(cmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
- { "chroma_msize_y", "chroma matrix vertical size", OFFSET(cmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
- { "chroma_amount", "chroma effect strength", OFFSET(camount), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, -2, 5, FLAGS },
- { NULL },
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+#define MIN_SIZE 3
+#define MAX_SIZE 23
+static const AVOption unsharp_options[] = {
+ { "luma_msize_x", "set luma matrix horizontal size", OFFSET(lmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "lx", "set luma matrix horizontal size", OFFSET(lmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "luma_msize_y", "set luma matrix vertical size", OFFSET(lmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "ly", "set luma matrix vertical size", OFFSET(lmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "luma_amount", "set luma effect strength", OFFSET(lamount), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, -2, 5, FLAGS },
+ { "la", "set luma effect strength", OFFSET(lamount), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, -2, 5, FLAGS },
+ { "chroma_msize_x", "set chroma matrix horizontal size", OFFSET(cmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "cx", "set chroma matrix horizontal size", OFFSET(cmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "chroma_msize_y", "set chroma matrix vertical size", OFFSET(cmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "cy", "set chroma matrix vertical size", OFFSET(cmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "chroma_amount", "set chroma effect strength", OFFSET(camount), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, -2, 5, FLAGS },
+ { "ca", "set chroma effect strength", OFFSET(camount), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, -2, 5, FLAGS },
+ { "opencl", "use OpenCL filtering capabilities", OFFSET(opencl), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
+ { NULL }
};
-static const AVClass unsharp_class = {
- .class_name = "unsharp",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(unsharp);
static const AVFilterPad avfilter_vf_unsharp_inputs[] = {
{
@@ -261,17 +307,14 @@ static const AVFilterPad avfilter_vf_unsharp_outputs[] = {
};
AVFilter ff_vf_unsharp = {
- .name = "unsharp",
- .description = NULL_IF_CONFIG_SMALL("Sharpen or blur the input video."),
-
- .priv_size = sizeof(UnsharpContext),
- .priv_class = &unsharp_class,
-
- .init = init,
- .uninit = uninit,
+ .name = "unsharp",
+ .description = NULL_IF_CONFIG_SMALL("Sharpen or blur the input video."),
+ .priv_size = sizeof(UnsharpContext),
+ .priv_class = &unsharp_class,
+ .init = init,
+ .uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_unsharp_inputs,
-
- .outputs = avfilter_vf_unsharp_outputs,
+ .inputs = avfilter_vf_unsharp_inputs,
+ .outputs = avfilter_vf_unsharp_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/libavfilter/vf_uspp.c b/libavfilter/vf_uspp.c
new file mode 100644
index 0000000000..8a6d0fbb93
--- /dev/null
+++ b/libavfilter/vf_uspp.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2014 Arwa Arif <arwaarif1994@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Ultra Slow/Simple Post-processing filter.
+ *
+ * Originally written by Michael Niedermayer for the MPlayer project, and
+ * ported by Arwa Arif for FFmpeg.
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "internal.h"
+#include "avfilter.h"
+
+#define MAX_LEVEL 8 /* quality levels */
+#define BLOCK 16
+
+typedef struct {
+ const AVClass *av_class;
+ int log2_count;
+ int hsub, vsub;
+ int qp;
+ int qscale_type;
+ int temp_stride[3];
+ uint8_t *src[3];
+ uint16_t *temp[3];
+ int outbuf_size;
+ uint8_t *outbuf;
+ AVCodecContext *avctx_enc[BLOCK*BLOCK];
+ AVFrame *frame;
+ AVFrame *frame_dec;
+ uint8_t *non_b_qp_table;
+ int non_b_qp_alloc_size;
+ int use_bframe_qp;
+} USPPContext;
+
+#define OFFSET(x) offsetof(USPPContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption uspp_options[] = {
+ { "quality", "set quality", OFFSET(log2_count), AV_OPT_TYPE_INT, {.i64 = 3}, 0, MAX_LEVEL, FLAGS },
+ { "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 63, FLAGS },
+ { "use_bframe_qp", "use B-frames' QP", OFFSET(use_bframe_qp), AV_OPT_TYPE_BOOL,{.i64 = 0}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(uspp);
+
+DECLARE_ALIGNED(8, static const uint8_t, dither)[8][8] = {
+ { 0*4, 48*4, 12*4, 60*4, 3*4, 51*4, 15*4, 63*4, },
+ { 32*4, 16*4, 44*4, 28*4, 35*4, 19*4, 47*4, 31*4, },
+ { 8*4, 56*4, 4*4, 52*4, 11*4, 59*4, 7*4, 55*4, },
+ { 40*4, 24*4, 36*4, 20*4, 43*4, 27*4, 39*4, 23*4, },
+ { 2*4, 50*4, 14*4, 62*4, 1*4, 49*4, 13*4, 61*4, },
+ { 34*4, 18*4, 46*4, 30*4, 33*4, 17*4, 45*4, 29*4, },
+ { 10*4, 58*4, 6*4, 54*4, 9*4, 57*4, 5*4, 53*4, },
+ { 42*4, 26*4, 38*4, 22*4, 41*4, 25*4, 37*4, 21*4, },
+};
+
+static const uint8_t offset[511][2] = {
+ { 0, 0},
+ { 0, 0}, { 8, 8}, // quality 1
+ { 0, 0}, { 4, 4}, {12, 8}, { 8,12}, // quality 2
+ { 0, 0}, {10, 2}, { 4, 4}, {14, 6}, { 8, 8}, { 2,10}, {12,12}, { 6,14}, // quality 3
+
+ { 0, 0}, {10, 2}, { 4, 4}, {14, 6}, { 8, 8}, { 2,10}, {12,12}, { 6,14},
+ { 5, 1}, {15, 3}, { 9, 5}, { 3, 7}, {13, 9}, { 7,11}, { 1,13}, {11,15}, // quality 4
+
+ { 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 5, 1}, {13, 1}, { 5, 9}, {13, 9},
+ { 2, 2}, {10, 2}, { 2,10}, {10,10}, { 7, 3}, {15, 3}, { 7,11}, {15,11},
+ { 4, 4}, {12, 4}, { 4,12}, {12,12}, { 1, 5}, { 9, 5}, { 1,13}, { 9,13},
+ { 6, 6}, {14, 6}, { 6,14}, {14,14}, { 3, 7}, {11, 7}, { 3,15}, {11,15}, // quality 5
+
+ { 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 4, 0}, {12, 0}, { 4, 8}, {12, 8},
+ { 1, 1}, { 9, 1}, { 1, 9}, { 9, 9}, { 5, 1}, {13, 1}, { 5, 9}, {13, 9},
+ { 3, 2}, {11, 2}, { 3,10}, {11,10}, { 7, 2}, {15, 2}, { 7,10}, {15,10},
+ { 2, 3}, {10, 3}, { 2,11}, {10,11}, { 6, 3}, {14, 3}, { 6,11}, {14,11},
+ { 0, 4}, { 8, 4}, { 0,12}, { 8,12}, { 4, 4}, {12, 4}, { 4,12}, {12,12},
+ { 1, 5}, { 9, 5}, { 1,13}, { 9,13}, { 5, 5}, {13, 5}, { 5,13}, {13,13},
+ { 3, 6}, {11, 6}, { 3,14}, {11,14}, { 7, 6}, {15, 6}, { 7,14}, {15,14},
+ { 2, 7}, {10, 7}, { 2,15}, {10,15}, { 6, 7}, {14, 7}, { 6,15}, {14,15}, // quality 6
+
+ { 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 0, 2}, { 8, 2}, { 0,10}, { 8,10},
+ { 0, 4}, { 8, 4}, { 0,12}, { 8,12}, { 0, 6}, { 8, 6}, { 0,14}, { 8,14},
+ { 1, 1}, { 9, 1}, { 1, 9}, { 9, 9}, { 1, 3}, { 9, 3}, { 1,11}, { 9,11},
+ { 1, 5}, { 9, 5}, { 1,13}, { 9,13}, { 1, 7}, { 9, 7}, { 1,15}, { 9,15},
+ { 2, 0}, {10, 0}, { 2, 8}, {10, 8}, { 2, 2}, {10, 2}, { 2,10}, {10,10},
+ { 2, 4}, {10, 4}, { 2,12}, {10,12}, { 2, 6}, {10, 6}, { 2,14}, {10,14},
+ { 3, 1}, {11, 1}, { 3, 9}, {11, 9}, { 3, 3}, {11, 3}, { 3,11}, {11,11},
+ { 3, 5}, {11, 5}, { 3,13}, {11,13}, { 3, 7}, {11, 7}, { 3,15}, {11,15},
+ { 4, 0}, {12, 0}, { 4, 8}, {12, 8}, { 4, 2}, {12, 2}, { 4,10}, {12,10},
+ { 4, 4}, {12, 4}, { 4,12}, {12,12}, { 4, 6}, {12, 6}, { 4,14}, {12,14},
+ { 5, 1}, {13, 1}, { 5, 9}, {13, 9}, { 5, 3}, {13, 3}, { 5,11}, {13,11},
+ { 5, 5}, {13, 5}, { 5,13}, {13,13}, { 5, 7}, {13, 7}, { 5,15}, {13,15},
+ { 6, 0}, {14, 0}, { 6, 8}, {14, 8}, { 6, 2}, {14, 2}, { 6,10}, {14,10},
+ { 6, 4}, {14, 4}, { 6,12}, {14,12}, { 6, 6}, {14, 6}, { 6,14}, {14,14},
+ { 7, 1}, {15, 1}, { 7, 9}, {15, 9}, { 7, 3}, {15, 3}, { 7,11}, {15,11},
+ { 7, 5}, {15, 5}, { 7,13}, {15,13}, { 7, 7}, {15, 7}, { 7,15}, {15,15}, // quality 7
+
+ { 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 4, 4}, {12, 4}, { 4,12}, {12,12},
+ { 0, 4}, { 8, 4}, { 0,12}, { 8,12}, { 4, 0}, {12, 0}, { 4, 8}, {12, 8},
+ { 2, 2}, {10, 2}, { 2,10}, {10,10}, { 6, 6}, {14, 6}, { 6,14}, {14,14},
+ { 2, 6}, {10, 6}, { 2,14}, {10,14}, { 6, 2}, {14, 2}, { 6,10}, {14,10},
+ { 0, 2}, { 8, 2}, { 0,10}, { 8,10}, { 4, 6}, {12, 6}, { 4,14}, {12,14},
+ { 0, 6}, { 8, 6}, { 0,14}, { 8,14}, { 4, 2}, {12, 2}, { 4,10}, {12,10},
+ { 2, 0}, {10, 0}, { 2, 8}, {10, 8}, { 6, 4}, {14, 4}, { 6,12}, {14,12},
+ { 2, 4}, {10, 4}, { 2,12}, {10,12}, { 6, 0}, {14, 0}, { 6, 8}, {14, 8},
+ { 1, 1}, { 9, 1}, { 1, 9}, { 9, 9}, { 5, 5}, {13, 5}, { 5,13}, {13,13},
+ { 1, 5}, { 9, 5}, { 1,13}, { 9,13}, { 5, 1}, {13, 1}, { 5, 9}, {13, 9},
+ { 3, 3}, {11, 3}, { 3,11}, {11,11}, { 7, 7}, {15, 7}, { 7,15}, {15,15},
+ { 3, 7}, {11, 7}, { 3,15}, {11,15}, { 7, 3}, {15, 3}, { 7,11}, {15,11},
+ { 1, 3}, { 9, 3}, { 1,11}, { 9,11}, { 5, 7}, {13, 7}, { 5,15}, {13,15},
+ { 1, 7}, { 9, 7}, { 1,15}, { 9,15}, { 5, 3}, {13, 3}, { 5,11}, {13,11}, // quality 8
+ { 3, 1}, {11, 1}, { 3, 9}, {11, 9}, { 7, 5}, {15, 5}, { 7,13}, {15,13},
+ { 3, 5}, {11, 5}, { 3,13}, {11,13}, { 7, 1}, {15, 1}, { 7, 9}, {15, 9},
+ { 0, 1}, { 8, 1}, { 0, 9}, { 8, 9}, { 4, 5}, {12, 5}, { 4,13}, {12,13},
+ { 0, 5}, { 8, 5}, { 0,13}, { 8,13}, { 4, 1}, {12, 1}, { 4, 9}, {12, 9},
+ { 2, 3}, {10, 3}, { 2,11}, {10,11}, { 6, 7}, {14, 7}, { 6,15}, {14,15},
+ { 2, 7}, {10, 7}, { 2,15}, {10,15}, { 6, 3}, {14, 3}, { 6,11}, {14,11},
+ { 0, 3}, { 8, 3}, { 0,11}, { 8,11}, { 4, 7}, {12, 7}, { 4,15}, {12,15},
+ { 0, 7}, { 8, 7}, { 0,15}, { 8,15}, { 4, 3}, {12, 3}, { 4,11}, {12,11},
+ { 2, 1}, {10, 1}, { 2, 9}, {10, 9}, { 6, 5}, {14, 5}, { 6,13}, {14,13},
+ { 2, 5}, {10, 5}, { 2,13}, {10,13}, { 6, 1}, {14, 1}, { 6, 9}, {14, 9},
+ { 1, 0}, { 9, 0}, { 1, 8}, { 9, 8}, { 5, 4}, {13, 4}, { 5,12}, {13,12},
+ { 1, 4}, { 9, 4}, { 1,12}, { 9,12}, { 5, 0}, {13, 0}, { 5, 8}, {13, 8},
+ { 3, 2}, {11, 2}, { 3,10}, {11,10}, { 7, 6}, {15, 6}, { 7,14}, {15,14},
+ { 3, 6}, {11, 6}, { 3,14}, {11,14}, { 7, 2}, {15, 2}, { 7,10}, {15,10},
+ { 1, 2}, { 9, 2}, { 1,10}, { 9,10}, { 5, 6}, {13, 6}, { 5,14}, {13,14},
+ { 1, 6}, { 9, 6}, { 1,14}, { 9,14}, { 5, 2}, {13, 2}, { 5,10}, {13,10},
+ { 3, 0}, {11, 0}, { 3, 8}, {11, 8}, { 7, 4}, {15, 4}, { 7,12}, {15,12},
+ { 3, 4}, {11, 4}, { 3,12}, {11,12}, { 7, 0}, {15, 0}, { 7, 8}, {15, 8},
+};
+
+static void store_slice_c(uint8_t *dst, const uint16_t *src,
+ int dst_stride, int src_stride,
+ int width, int height, int log2_scale)
+{
+ int y, x;
+
+#define STORE(pos) do { \
+ temp = ((src[x + y * src_stride + pos] << log2_scale) + d[pos]) >> 8; \
+ if (temp & 0x100) temp = ~(temp >> 31); \
+ dst[x + y * dst_stride + pos] = temp; \
+} while (0)
+
+ for (y = 0; y < height; y++) {
+ const uint8_t *d = dither[y&7];
+ for (x = 0; x < width; x += 8) {
+ int temp;
+ STORE(0);
+ STORE(1);
+ STORE(2);
+ STORE(3);
+ STORE(4);
+ STORE(5);
+ STORE(6);
+ STORE(7);
+ }
+ }
+}
+
+static void filter(USPPContext *p, uint8_t *dst[3], uint8_t *src[3],
+ int dst_stride[3], int src_stride[3], int width,
+ int height, uint8_t *qp_store, int qp_stride)
+{
+ int x, y, i, j;
+ const int count = 1<<p->log2_count;
+ int ret;
+
+ for (i = 0; i < 3; i++) {
+ int is_chroma = !!i;
+ int w = AV_CEIL_RSHIFT(width, is_chroma ? p->hsub : 0);
+ int h = AV_CEIL_RSHIFT(height, is_chroma ? p->vsub : 0);
+ int stride = p->temp_stride[i];
+ int block = BLOCK >> (is_chroma ? p->hsub : 0);
+
+ if (!src[i] || !dst[i])
+ continue;
+ for (y = 0; y < h; y++) {
+ int index = block + block * stride + y * stride;
+
+ memcpy(p->src[i] + index, src[i] + y * src_stride[i], w );
+ for (x = 0; x < block; x++) {
+ p->src[i][index - x - 1] = p->src[i][index + x ];
+ p->src[i][index + w + x ] = p->src[i][index + w - x - 1];
+ }
+ }
+ for (y = 0; y < block; y++) {
+ memcpy(p->src[i] + ( block-1-y) * stride, p->src[i] + ( y+block ) * stride, stride);
+ memcpy(p->src[i] + (h+block +y) * stride, p->src[i] + (h-y+block-1) * stride, stride);
+ }
+
+ p->frame->linesize[i] = stride;
+ memset(p->temp[i], 0, (h + 2 * block) * stride * sizeof(int16_t));
+ }
+
+ if (p->qp)
+ p->frame->quality = p->qp * FF_QP2LAMBDA;
+ else {
+ int qpsum=0;
+ int qpcount = (height>>4) * (height>>4);
+
+ for (y = 0; y < (height>>4); y++) {
+ for (x = 0; x < (width>>4); x++)
+ qpsum += qp_store[x + y * qp_stride];
+ }
+ p->frame->quality = ff_norm_qscale((qpsum + qpcount/2) / qpcount, p->qscale_type) * FF_QP2LAMBDA;
+ }
+// init per MB qscale stuff FIXME
+ p->frame->height = height;
+ p->frame->width = width;
+
+ for (i = 0; i < count; i++) {
+ const int x1 = offset[i+count-1][0];
+ const int y1 = offset[i+count-1][1];
+ const int x1c = x1 >> p->hsub;
+ const int y1c = y1 >> p->vsub;
+ const int BLOCKc = BLOCK >> p->hsub;
+ int offset;
+ AVPacket pkt = {0};
+ int got_pkt_ptr;
+
+ av_init_packet(&pkt);
+ pkt.data = p->outbuf;
+ pkt.size = p->outbuf_size;
+
+ p->frame->data[0] = p->src[0] + x1 + y1 * p->frame->linesize[0];
+ p->frame->data[1] = p->src[1] + x1c + y1c * p->frame->linesize[1];
+ p->frame->data[2] = p->src[2] + x1c + y1c * p->frame->linesize[2];
+ p->frame->format = p->avctx_enc[i]->pix_fmt;
+
+ ret = avcodec_encode_video2(p->avctx_enc[i], &pkt, p->frame, &got_pkt_ptr);
+ if (ret < 0) {
+ av_log(p->avctx_enc[i], AV_LOG_ERROR, "Encoding failed\n");
+ continue;
+ }
+
+ p->frame_dec = p->avctx_enc[i]->coded_frame;
+
+ offset = (BLOCK-x1) + (BLOCK-y1) * p->frame_dec->linesize[0];
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ p->temp[0][x + y * p->temp_stride[0]] += p->frame_dec->data[0][x + y * p->frame_dec->linesize[0] + offset];
+
+ if (!src[2] || !dst[2])
+ continue;
+
+ offset = (BLOCKc-x1c) + (BLOCKc-y1c) * p->frame_dec->linesize[1];
+
+ for (y = 0; y < AV_CEIL_RSHIFT(height, p->vsub); y++) {
+ for (x = 0; x < AV_CEIL_RSHIFT(width, p->hsub); x++) {
+ p->temp[1][x + y * p->temp_stride[1]] += p->frame_dec->data[1][x + y * p->frame_dec->linesize[1] + offset];
+ p->temp[2][x + y * p->temp_stride[2]] += p->frame_dec->data[2][x + y * p->frame_dec->linesize[2] + offset];
+ }
+ }
+ }
+
+ for (j = 0; j < 3; j++) {
+ int is_chroma = !!j;
+ if (!dst[j])
+ continue;
+ store_slice_c(dst[j], p->temp[j], dst_stride[j], p->temp_stride[j],
+ AV_CEIL_RSHIFT(width, is_chroma ? p->hsub : 0),
+ AV_CEIL_RSHIFT(height, is_chroma ? p->vsub : 0),
+ 8-p->log2_count);
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+
+ AVFilterContext *ctx = inlink->dst;
+ USPPContext *uspp = ctx->priv;
+ const int height = inlink->h;
+ const int width = inlink->w;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int i;
+
+ AVCodec *enc = avcodec_find_encoder(AV_CODEC_ID_SNOW);
+ if (!enc) {
+ av_log(ctx, AV_LOG_ERROR, "SNOW encoder not found.\n");
+ return AVERROR(EINVAL);
+ }
+
+ uspp->hsub = desc->log2_chroma_w;
+ uspp->vsub = desc->log2_chroma_h;
+
+ for (i = 0; i < 3; i++) {
+ int is_chroma = !!i;
+ int w = (width + 4 * BLOCK-1) & (~(2 * BLOCK-1));
+ int h = (height + 4 * BLOCK-1) & (~(2 * BLOCK-1));
+
+ if (is_chroma) {
+ w = AV_CEIL_RSHIFT(w, uspp->hsub);
+ h = AV_CEIL_RSHIFT(h, uspp->vsub);
+ }
+
+ uspp->temp_stride[i] = w;
+ if (!(uspp->temp[i] = av_malloc_array(uspp->temp_stride[i], h * sizeof(int16_t))))
+ return AVERROR(ENOMEM);
+ if (!(uspp->src [i] = av_malloc_array(uspp->temp_stride[i], h * sizeof(uint8_t))))
+ return AVERROR(ENOMEM);
+ }
+
+ for (i = 0; i < (1<<uspp->log2_count); i++) {
+ AVCodecContext *avctx_enc;
+ AVDictionary *opts = NULL;
+ int ret;
+
+ if (!(uspp->avctx_enc[i] = avcodec_alloc_context3(NULL)))
+ return AVERROR(ENOMEM);
+
+ avctx_enc = uspp->avctx_enc[i];
+ avctx_enc->width = width + BLOCK;
+ avctx_enc->height = height + BLOCK;
+ avctx_enc->time_base = (AVRational){1,25}; // meaningless
+ avctx_enc->gop_size = INT_MAX;
+ avctx_enc->max_b_frames = 0;
+ avctx_enc->pix_fmt = inlink->format;
+ avctx_enc->flags = AV_CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
+ avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
+ avctx_enc->global_quality = 123;
+ av_dict_set(&opts, "no_bitstream", "1", 0);
+ ret = avcodec_open2(avctx_enc, enc, &opts);
+ if (ret < 0)
+ return ret;
+ av_dict_free(&opts);
+ av_assert0(avctx_enc->codec);
+ }
+
+ uspp->outbuf_size = (width + BLOCK) * (height + BLOCK) * 10;
+ if (!(uspp->frame = av_frame_alloc()))
+ return AVERROR(ENOMEM);
+ if (!(uspp->outbuf = av_malloc(uspp->outbuf_size)))
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ USPPContext *uspp = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out = in;
+
+ int qp_stride = 0;
+ uint8_t *qp_table = NULL;
+
+ /* if we are not in a constant user quantizer mode and we don't want to use
+ * the quantizers from the B-frames (B-frames often have a higher QP), we
+ * need to save the qp table from the last non B-frame; this is what the
+ * following code block does */
+ if (!uspp->qp) {
+ qp_table = av_frame_get_qp_table(in, &qp_stride, &uspp->qscale_type);
+
+ if (qp_table && !uspp->use_bframe_qp && in->pict_type != AV_PICTURE_TYPE_B) {
+ int w, h;
+
+ /* if the qp stride is not set, it means the QP are only defined on
+ * a line basis */
+ if (!qp_stride) {
+ w = AV_CEIL_RSHIFT(inlink->w, 4);
+ h = 1;
+ } else {
+ w = qp_stride;
+ h = AV_CEIL_RSHIFT(inlink->h, 4);
+ }
+
+ if (w * h > uspp->non_b_qp_alloc_size) {
+ int ret = av_reallocp_array(&uspp->non_b_qp_table, w, h);
+ if (ret < 0) {
+ uspp->non_b_qp_alloc_size = 0;
+ return ret;
+ }
+ uspp->non_b_qp_alloc_size = w * h;
+ }
+
+ av_assert0(w * h <= uspp->non_b_qp_alloc_size);
+ memcpy(uspp->non_b_qp_table, qp_table, w * h);
+ }
+ }
+
+ if (uspp->log2_count && !ctx->is_disabled) {
+ if (!uspp->use_bframe_qp && uspp->non_b_qp_table)
+ qp_table = uspp->non_b_qp_table;
+
+ if (qp_table || uspp->qp) {
+
+ /* get a new frame if in-place is not possible or if the dimensions
+ * are not multiple of 8 */
+ if (!av_frame_is_writable(in) || (inlink->w & 7) || (inlink->h & 7)) {
+ const int aligned_w = FFALIGN(inlink->w, 8);
+ const int aligned_h = FFALIGN(inlink->h, 8);
+
+ out = ff_get_video_buffer(outlink, aligned_w, aligned_h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ out->width = in->width;
+ out->height = in->height;
+ }
+
+ filter(uspp, out->data, in->data, out->linesize, in->linesize,
+ inlink->w, inlink->h, qp_table, qp_stride);
+ }
+ }
+
+ if (in != out) {
+ if (in->data[3])
+ av_image_copy_plane(out->data[3], out->linesize[3],
+ in ->data[3], in ->linesize[3],
+ inlink->w, inlink->h);
+ av_frame_free(&in);
+ }
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ USPPContext *uspp = ctx->priv;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ av_freep(&uspp->temp[i]);
+ av_freep(&uspp->src[i]);
+ }
+
+ for (i = 0; i < (1 << uspp->log2_count); i++) {
+ avcodec_close(uspp->avctx_enc[i]);
+ av_freep(&uspp->avctx_enc[i]);
+ }
+
+ av_freep(&uspp->non_b_qp_table);
+ av_freep(&uspp->outbuf);
+ av_frame_free(&uspp->frame);
+}
+
+static const AVFilterPad uspp_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad uspp_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_uspp = {
+ .name = "uspp",
+ .description = NULL_IF_CONFIG_SMALL("Apply Ultra Simple / Slow Post-processing filter."),
+ .priv_size = sizeof(USPPContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = uspp_inputs,
+ .outputs = uspp_outputs,
+ .priv_class = &uspp_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_vaguedenoiser.c b/libavfilter/vf_vaguedenoiser.c
new file mode 100644
index 0000000000..2b93e70e57
--- /dev/null
+++ b/libavfilter/vf_vaguedenoiser.c
@@ -0,0 +1,583 @@
+/*
+ * Copyright (c) 2003 LeFunGus, lefungus@altern.org
+ *
+ * This file is part of FFmpeg
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <float.h>
+
+#include "libavutil/imgutils.h"
+#include "libavutil/attributes.h"
+#include "libavutil/common.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct VagueDenoiserContext {
+ const AVClass *class;
+
+ float threshold;
+ float percent;
+ int method;
+ int nsteps;
+ int planes;
+
+ int depth;
+ int peak;
+ int nb_planes;
+ int planeheight[4];
+ int planewidth[4];
+
+ float *block;
+ float *in;
+ float *out;
+ float *tmp;
+
+ int hlowsize[4][32];
+ int hhighsize[4][32];
+ int vlowsize[4][32];
+ int vhighsize[4][32];
+
+ void (*thresholding)(float *block, const int width, const int height,
+ const int stride, const float threshold,
+ const float percent, const int nsteps);
+} VagueDenoiserContext;
+
+#define OFFSET(x) offsetof(VagueDenoiserContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption vaguedenoiser_options[] = {
+ { "threshold", "set filtering strength", OFFSET(threshold), AV_OPT_TYPE_FLOAT, {.dbl=2.}, 0,DBL_MAX, FLAGS },
+ { "method", "set filtering method", OFFSET(method), AV_OPT_TYPE_INT, {.i64=2 }, 0, 2, FLAGS, "method" },
+ { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "method" },
+ { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "method" },
+ { "garrote", "garotte thresholding", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "method" },
+ { "nsteps", "set number of steps", OFFSET(nsteps), AV_OPT_TYPE_INT, {.i64=6 }, 1, 32, FLAGS },
+ { "percent", "set percent of full denoising", OFFSET(percent),AV_OPT_TYPE_FLOAT, {.dbl=85}, 0,100, FLAGS },
+ { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15 }, 0, 15, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(vaguedenoiser);
+
+#define NPAD 10
+
+static const float analysis_low[9] = {
+ 0.037828455506995f, -0.023849465019380f, -0.110624404418423f, 0.377402855612654f,
+ 0.852698679009403f, 0.377402855612654f, -0.110624404418423f, -0.023849465019380f, 0.037828455506995f
+};
+
+static const float analysis_high[7] = {
+ -0.064538882628938f, 0.040689417609558f, 0.418092273222212f, -0.788485616405664f,
+ 0.418092273222212f, 0.040689417609558f, -0.064538882628938f
+};
+
+static const float synthesis_low[7] = {
+ -0.064538882628938f, -0.040689417609558f, 0.418092273222212f, 0.788485616405664f,
+ 0.418092273222212f, -0.040689417609558f, -0.064538882628938f
+};
+
+static const float synthesis_high[9] = {
+ -0.037828455506995f, -0.023849465019380f, 0.110624404418423f, 0.377402855612654f,
+ -0.852698679009403f, 0.377402855612654f, 0.110624404418423f, -0.023849465019380f, -0.037828455506995f
+};
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV440P10,
+ AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
+ AV_PIX_FMT_YUV440P12,
+ AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
+ AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ VagueDenoiserContext *s = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int p, i, nsteps_width, nsteps_height, nsteps_max;
+
+ s->depth = desc->comp[0].depth;
+ s->nb_planes = desc->nb_components;
+
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+ s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+
+ s->block = av_malloc_array(inlink->w * inlink->h, sizeof(*s->block));
+ s->in = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->in));
+ s->out = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->out));
+ s->tmp = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->tmp));
+
+ if (!s->block || !s->in || !s->out || !s->tmp)
+ return AVERROR(ENOMEM);
+
+ s->threshold *= 1 << (s->depth - 8);
+ s->peak = (1 << s->depth) - 1;
+
+ nsteps_width = ((s->planes & 2 || s->planes & 4) && s->nb_planes > 1) ? s->planewidth[1] : s->planewidth[0];
+ nsteps_height = ((s->planes & 2 || s->planes & 4) && s->nb_planes > 1) ? s->planeheight[1] : s->planeheight[0];
+
+ for (nsteps_max = 1; nsteps_max < 15; nsteps_max++) {
+ if (pow(2, nsteps_max) >= nsteps_width || pow(2, nsteps_max) >= nsteps_height)
+ break;
+ }
+
+ s->nsteps = FFMIN(s->nsteps, nsteps_max - 2);
+
+ for (p = 0; p < 4; p++) {
+ s->hlowsize[p][0] = (s->planewidth[p] + 1) >> 1;
+ s->hhighsize[p][0] = s->planewidth[p] >> 1;
+ s->vlowsize[p][0] = (s->planeheight[p] + 1) >> 1;
+ s->vhighsize[p][0] = s->planeheight[p] >> 1;
+
+ for (i = 1; i < s->nsteps; i++) {
+ s->hlowsize[p][i] = (s->hlowsize[p][i - 1] + 1) >> 1;
+ s->hhighsize[p][i] = s->hlowsize[p][i - 1] >> 1;
+ s->vlowsize[p][i] = (s->vlowsize[p][i - 1] + 1) >> 1;
+ s->vhighsize[p][i] = s->vlowsize[p][i - 1] >> 1;
+ }
+ }
+
+ return 0;
+}
+
+static inline void copy(const float *p1, float *p2, const int length)
+{
+ memcpy(p2, p1, length * sizeof(float));
+}
+
+static inline void copyv(const float *p1, const int stride1, float *p2, const int length)
+{
+ int i;
+
+ for (i = 0; i < length; i++) {
+ p2[i] = *p1;
+ p1 += stride1;
+ }
+}
+
+static inline void copyh(const float *p1, float *p2, const int stride2, const int length)
+{
+ int i;
+
+ for (i = 0; i < length; i++) {
+ *p2 = p1[i];
+ p2 += stride2;
+ }
+}
+
+// Do symmetric extension of data using prescribed symmetries
+// Original values are in output[npad] through output[npad+size-1]
+// New values will be placed in output[0] through output[npad] and in output[npad+size] through output[2*npad+size-1] (note: end values may not be filled in)
+// extension at left bdry is ... 3 2 1 0 | 0 1 2 3 ...
+// same for right boundary
+// if right_ext=1 then ... 3 2 1 0 | 1 2 3
+static void symmetric_extension(float *output, const int size, const int left_ext, const int right_ext)
+{
+ int first = NPAD;
+ int last = NPAD - 1 + size;
+ const int originalLast = last;
+ int i, nextend, idx;
+
+ if (left_ext == 2)
+ output[--first] = output[NPAD];
+ if (right_ext == 2)
+ output[++last] = output[originalLast];
+
+ // extend left end
+ nextend = first;
+ for (i = 0; i < nextend; i++)
+ output[--first] = output[NPAD + 1 + i];
+
+ idx = NPAD + NPAD - 1 + size;
+
+ // extend right end
+ nextend = idx - last;
+ for (i = 0; i < nextend; i++)
+ output[++last] = output[originalLast - 1 - i];
+}
+
+static void transform_step(float *input, float *output, const int size, const int low_size, VagueDenoiserContext *s)
+{
+ int i;
+
+ symmetric_extension(input, size, 1, 1);
+
+ for (i = NPAD; i < NPAD + low_size; i++) {
+ const float a = input[2 * i - 14] * analysis_low[0];
+ const float b = input[2 * i - 13] * analysis_low[1];
+ const float c = input[2 * i - 12] * analysis_low[2];
+ const float d = input[2 * i - 11] * analysis_low[3];
+ const float e = input[2 * i - 10] * analysis_low[4];
+ const float f = input[2 * i - 9] * analysis_low[3];
+ const float g = input[2 * i - 8] * analysis_low[2];
+ const float h = input[2 * i - 7] * analysis_low[1];
+ const float k = input[2 * i - 6] * analysis_low[0];
+
+ output[i] = a + b + c + d + e + f + g + h + k;
+ }
+
+ for (i = NPAD; i < NPAD + low_size; i++) {
+ const float a = input[2 * i - 12] * analysis_high[0];
+ const float b = input[2 * i - 11] * analysis_high[1];
+ const float c = input[2 * i - 10] * analysis_high[2];
+ const float d = input[2 * i - 9] * analysis_high[3];
+ const float e = input[2 * i - 8] * analysis_high[2];
+ const float f = input[2 * i - 7] * analysis_high[1];
+ const float g = input[2 * i - 6] * analysis_high[0];
+
+ output[i + low_size] = a + b + c + d + e + f + g;
+ }
+}
+
+static void invert_step(const float *input, float *output, float *temp, const int size, VagueDenoiserContext *s)
+{
+ const int low_size = (size + 1) >> 1;
+ const int high_size = size >> 1;
+ int left_ext = 1, right_ext, i;
+ int findex;
+
+ memcpy(temp + NPAD, input + NPAD, low_size * sizeof(float));
+
+ right_ext = (size % 2 == 0) ? 2 : 1;
+ symmetric_extension(temp, low_size, left_ext, right_ext);
+
+ memset(output, 0, (NPAD + NPAD + size) * sizeof(float));
+ findex = (size + 2) >> 1;
+
+ for (i = 9; i < findex + 11; i++) {
+ const float a = temp[i] * synthesis_low[0];
+ const float b = temp[i] * synthesis_low[1];
+ const float c = temp[i] * synthesis_low[2];
+ const float d = temp[i] * synthesis_low[3];
+
+ output[2 * i - 13] += a;
+ output[2 * i - 12] += b;
+ output[2 * i - 11] += c;
+ output[2 * i - 10] += d;
+ output[2 * i - 9] += c;
+ output[2 * i - 8] += b;
+ output[2 * i - 7] += a;
+ }
+
+ memcpy(temp + NPAD, input + NPAD + low_size, high_size * sizeof(float));
+
+ left_ext = 2;
+ right_ext = (size % 2 == 0) ? 1 : 2;
+ symmetric_extension(temp, high_size, left_ext, right_ext);
+
+ for (i = 8; i < findex + 11; i++) {
+ const float a = temp[i] * synthesis_high[0];
+ const float b = temp[i] * synthesis_high[1];
+ const float c = temp[i] * synthesis_high[2];
+ const float d = temp[i] * synthesis_high[3];
+ const float e = temp[i] * synthesis_high[4];
+
+ output[2 * i - 13] += a;
+ output[2 * i - 12] += b;
+ output[2 * i - 11] += c;
+ output[2 * i - 10] += d;
+ output[2 * i - 9] += e;
+ output[2 * i - 8] += d;
+ output[2 * i - 7] += c;
+ output[2 * i - 6] += b;
+ output[2 * i - 5] += a;
+ }
+}
+
+static void hard_thresholding(float *block, const int width, const int height,
+ const int stride, const float threshold,
+ const float percent, const int unused)
+{
+ const float frac = 1.f - percent * 0.01f;
+ int y, x;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ if (FFABS(block[x]) <= threshold)
+ block[x] *= frac;
+ }
+ block += stride;
+ }
+}
+
+static void soft_thresholding(float *block, const int width, const int height, const int stride,
+ const float threshold, const float percent, const int nsteps)
+{
+ const float frac = 1.f - percent * 0.01f;
+ const float shift = threshold * 0.01f * percent;
+ int w = width;
+ int h = height;
+ int y, x, l;
+
+ for (l = 0; l < nsteps; l++) {
+ w = (w + 1) >> 1;
+ h = (h + 1) >> 1;
+ }
+
+ for (y = 0; y < height; y++) {
+ const int x0 = (y < h) ? w : 0;
+ for (x = x0; x < width; x++) {
+ const float temp = FFABS(block[x]);
+ if (temp <= threshold)
+ block[x] *= frac;
+ else
+ block[x] = (block[x] < 0.f ? -1.f : (block[x] > 0.f ? 1.f : 0.f)) * (temp - shift);
+ }
+ block += stride;
+ }
+}
+
+static void qian_thresholding(float *block, const int width, const int height,
+ const int stride, const float threshold,
+ const float percent, const int unused)
+{
+ const float percent01 = percent * 0.01f;
+ const float tr2 = threshold * threshold * percent01;
+ const float frac = 1.f - percent01;
+ int y, x;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ const float temp = FFABS(block[x]);
+ if (temp <= threshold) {
+ block[x] *= frac;
+ } else {
+ const float tp2 = temp * temp;
+ block[x] *= (tp2 - tr2) / tp2;
+ }
+ }
+ block += stride;
+ }
+}
+
+static void filter(VagueDenoiserContext *s, AVFrame *in, AVFrame *out)
+{
+ int p, y, x, i, j;
+
+ for (p = 0; p < s->nb_planes; p++) {
+ const int height = s->planeheight[p];
+ const int width = s->planewidth[p];
+ const uint8_t *srcp8 = in->data[p];
+ const uint16_t *srcp16 = (const uint16_t *)in->data[p];
+ uint8_t *dstp8 = out->data[p];
+ uint16_t *dstp16 = (uint16_t *)out->data[p];
+ float *output = s->block;
+ int h_low_size0 = width;
+ int v_low_size0 = height;
+ int nsteps_transform = s->nsteps;
+ int nsteps_invert = s->nsteps;
+ const float *input = s->block;
+
+ if (!((1 << p) & s->planes)) {
+ av_image_copy_plane(out->data[p], out->linesize[p], in->data[p], in->linesize[p],
+ s->planewidth[p], s->planeheight[p]);
+ continue;
+ }
+
+ if (s->depth <= 8) {
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++)
+ output[x] = srcp8[x];
+ srcp8 += in->linesize[p];
+ output += width;
+ }
+ } else {
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++)
+ output[x] = srcp16[x];
+ srcp16 += in->linesize[p] / 2;
+ output += width;
+ }
+ }
+
+ while (nsteps_transform--) {
+ int low_size = (h_low_size0 + 1) >> 1;
+ float *input = s->block;
+ for (j = 0; j < v_low_size0; j++) {
+ copy(input, s->in + NPAD, h_low_size0);
+ transform_step(s->in, s->out, h_low_size0, low_size, s);
+ copy(s->out + NPAD, input, h_low_size0);
+ input += width;
+ }
+
+ low_size = (v_low_size0 + 1) >> 1;
+ input = s->block;
+ for (j = 0; j < h_low_size0; j++) {
+ copyv(input, width, s->in + NPAD, v_low_size0);
+ transform_step(s->in, s->out, v_low_size0, low_size, s);
+ copyh(s->out + NPAD, input, width, v_low_size0);
+ input++;
+ }
+
+ h_low_size0 = (h_low_size0 + 1) >> 1;
+ v_low_size0 = (v_low_size0 + 1) >> 1;
+ }
+
+ s->thresholding(s->block, width, height, width, s->threshold, s->percent, s->nsteps);
+
+ while (nsteps_invert--) {
+ const int idx = s->vlowsize[p][nsteps_invert] + s->vhighsize[p][nsteps_invert];
+ const int idx2 = s->hlowsize[p][nsteps_invert] + s->hhighsize[p][nsteps_invert];
+ float * idx3 = s->block;
+ for (i = 0; i < idx2; i++) {
+ copyv(idx3, width, s->in + NPAD, idx);
+ invert_step(s->in, s->out, s->tmp, idx, s);
+ copyh(s->out + NPAD, idx3, width, idx);
+ idx3++;
+ }
+
+ idx3 = s->block;
+ for (i = 0; i < idx; i++) {
+ copy(idx3, s->in + NPAD, idx2);
+ invert_step(s->in, s->out, s->tmp, idx2, s);
+ copy(s->out + NPAD, idx3, idx2);
+ idx3 += width;
+ }
+ }
+
+ if (s->depth <= 8) {
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++)
+ dstp8[x] = av_clip_uint8(input[x] + 0.5f);
+ input += width;
+ dstp8 += out->linesize[p];
+ }
+ } else {
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++)
+ dstp16[x] = av_clip(input[x] + 0.5f, 0, s->peak);
+ input += width;
+ dstp16 += out->linesize[p] / 2;
+ }
+ }
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ VagueDenoiserContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ int direct = av_frame_is_writable(in);
+
+ if (direct) {
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+
+ av_frame_copy_props(out, in);
+ }
+
+ filter(s, in, out);
+
+ if (!direct)
+ av_frame_free(&in);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ VagueDenoiserContext *s = ctx->priv;
+
+ switch (s->method) {
+ case 0:
+ s->thresholding = hard_thresholding;
+ break;
+ case 1:
+ s->thresholding = soft_thresholding;
+ break;
+ case 2:
+ s->thresholding = qian_thresholding;
+ break;
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ VagueDenoiserContext *s = ctx->priv;
+
+ av_freep(&s->block);
+ av_freep(&s->in);
+ av_freep(&s->out);
+ av_freep(&s->tmp);
+}
+
+static const AVFilterPad vaguedenoiser_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+
+static const AVFilterPad vaguedenoiser_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_vaguedenoiser = {
+ .name = "vaguedenoiser",
+ .description = NULL_IF_CONFIG_SMALL("Apply a Wavelet based Denoiser."),
+ .priv_size = sizeof(VagueDenoiserContext),
+ .priv_class = &vaguedenoiser_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = vaguedenoiser_inputs,
+ .outputs = vaguedenoiser_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_vectorscope.c b/libavfilter/vf_vectorscope.c
new file mode 100644
index 0000000000..987bc66bd4
--- /dev/null
+++ b/libavfilter/vf_vectorscope.c
@@ -0,0 +1,1359 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/xga_font_data.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+enum VectorscopeMode {
+ GRAY,
+ COLOR,
+ COLOR2,
+ COLOR3,
+ COLOR4,
+ COLOR5,
+ MODE_NB
+};
+
+typedef struct VectorscopeContext {
+ const AVClass *class;
+ int mode;
+ int intensity;
+ float fintensity;
+ uint16_t bg_color[4];
+ int planewidth[4];
+ int planeheight[4];
+ int hsub, vsub;
+ int x, y, pd;
+ int is_yuv;
+ int size;
+ int depth;
+ int mult;
+ int envelope;
+ int graticule;
+ float opacity;
+ float bgopacity;
+ float lthreshold;
+ float hthreshold;
+ int tmin;
+ int tmax;
+ int flags;
+ int colorspace;
+ int cs;
+ uint8_t *peak_memory;
+ uint8_t **peak;
+
+ void (*vectorscope)(struct VectorscopeContext *s,
+ AVFrame *in, AVFrame *out, int pd);
+ void (*graticulef)(struct VectorscopeContext *s, AVFrame *out,
+ int X, int Y, int D, int P);
+} VectorscopeContext;
+
+#define OFFSET(x) offsetof(VectorscopeContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption vectorscope_options[] = {
+ { "mode", "set vectorscope mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, MODE_NB-1, FLAGS, "mode"},
+ { "m", "set vectorscope mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, MODE_NB-1, FLAGS, "mode"},
+ { "gray", 0, 0, AV_OPT_TYPE_CONST, {.i64=GRAY}, 0, 0, FLAGS, "mode" },
+ { "color", 0, 0, AV_OPT_TYPE_CONST, {.i64=COLOR}, 0, 0, FLAGS, "mode" },
+ { "color2", 0, 0, AV_OPT_TYPE_CONST, {.i64=COLOR2}, 0, 0, FLAGS, "mode" },
+ { "color3", 0, 0, AV_OPT_TYPE_CONST, {.i64=COLOR3}, 0, 0, FLAGS, "mode" },
+ { "color4", 0, 0, AV_OPT_TYPE_CONST, {.i64=COLOR4}, 0, 0, FLAGS, "mode" },
+ { "color5", 0, 0, AV_OPT_TYPE_CONST, {.i64=COLOR5}, 0, 0, FLAGS, "mode" },
+ { "x", "set color component on X axis", OFFSET(x), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, FLAGS},
+ { "y", "set color component on Y axis", OFFSET(y), AV_OPT_TYPE_INT, {.i64=2}, 0, 2, FLAGS},
+ { "intensity", "set intensity", OFFSET(fintensity), AV_OPT_TYPE_FLOAT, {.dbl=0.004}, 0, 1, FLAGS},
+ { "i", "set intensity", OFFSET(fintensity), AV_OPT_TYPE_FLOAT, {.dbl=0.004}, 0, 1, FLAGS},
+ { "envelope", "set envelope", OFFSET(envelope), AV_OPT_TYPE_INT, {.i64=0}, 0, 3, FLAGS, "envelope"},
+ { "e", "set envelope", OFFSET(envelope), AV_OPT_TYPE_INT, {.i64=0}, 0, 3, FLAGS, "envelope"},
+ { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "envelope" },
+ { "instant", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "envelope" },
+ { "peak", 0, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "envelope" },
+ { "peak+instant", 0, 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, FLAGS, "envelope" },
+ { "graticule", "set graticule", OFFSET(graticule), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, FLAGS, "graticule"},
+ { "g", "set graticule", OFFSET(graticule), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, FLAGS, "graticule"},
+ { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "graticule" },
+ { "green", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "graticule" },
+ { "color", 0, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "graticule" },
+ { "opacity", "set graticule opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 1, FLAGS},
+ { "o", "set graticule opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 1, FLAGS},
+ { "flags", "set graticule flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=4}, 0, 7, FLAGS, "flags"},
+ { "f", "set graticule flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=4}, 0, 7, FLAGS, "flags"},
+ { "white", "draw white point", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "flags" },
+ { "black", "draw black point", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "flags" },
+ { "name", "draw point name", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, FLAGS, "flags" },
+ { "bgopacity", "set background opacity", OFFSET(bgopacity), AV_OPT_TYPE_FLOAT, {.dbl=0.3}, 0, 1, FLAGS},
+ { "b", "set background opacity", OFFSET(bgopacity), AV_OPT_TYPE_FLOAT, {.dbl=0.3}, 0, 1, FLAGS},
+ { "lthreshold", "set low threshold", OFFSET(lthreshold), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 1, FLAGS},
+ { "l", "set low threshold", OFFSET(lthreshold), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 1, FLAGS},
+ { "hthreshold", "set high threshold", OFFSET(hthreshold), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, FLAGS},
+ { "h", "set high threshold", OFFSET(hthreshold), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, FLAGS},
+ { "colorspace", "set colorspace", OFFSET(colorspace), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, FLAGS, "colorspace"},
+ { "c", "set colorspace", OFFSET(colorspace), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, FLAGS, "colorspace"},
+ { "auto", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "colorspace" },
+ { "601", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "colorspace" },
+ { "709", 0, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "colorspace" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(vectorscope);
+
+static const enum AVPixelFormat out_yuv8_pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_yuv9_pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_yuv10_pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_yuv12_pix_fmts[] = {
+ AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_rgb8_pix_fmts[] = {
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_rgb9_pix_fmts[] = {
+ AV_PIX_FMT_GBRP9,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_rgb10_pix_fmts[] = {
+ AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_rgb12_pix_fmts[] = {
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRAP12,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat in1_pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRAP12,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat in2_pix_fmts[] = {
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRAP12,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
+ AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
+ AV_PIX_FMT_NONE
+};
+
+static int query_formats(AVFilterContext *ctx)
+{
+ VectorscopeContext *s = ctx->priv;
+ const enum AVPixelFormat *out_pix_fmts;
+ const AVPixFmtDescriptor *desc;
+ AVFilterFormats *avff;
+ int depth, rgb, i, ret;
+
+ if (!ctx->inputs[0]->in_formats ||
+ !ctx->inputs[0]->in_formats->nb_formats) {
+ return AVERROR(EAGAIN);
+ }
+
+ if (!ctx->inputs[0]->out_formats) {
+ const enum AVPixelFormat *in_pix_fmts;
+
+ if ((s->x == 1 && s->y == 2) || (s->x == 2 && s->y == 1))
+ in_pix_fmts = in2_pix_fmts;
+ else
+ in_pix_fmts = in1_pix_fmts;
+ if ((ret = ff_formats_ref(ff_make_format_list(in_pix_fmts), &ctx->inputs[0]->out_formats)) < 0)
+ return ret;
+ }
+
+ avff = ctx->inputs[0]->in_formats;
+ desc = av_pix_fmt_desc_get(avff->formats[0]);
+ rgb = desc->flags & AV_PIX_FMT_FLAG_RGB;
+ depth = desc->comp[0].depth;
+ for (i = 1; i < avff->nb_formats; i++) {
+ desc = av_pix_fmt_desc_get(avff->formats[i]);
+ if (rgb != (desc->flags & AV_PIX_FMT_FLAG_RGB) ||
+ depth != desc->comp[0].depth)
+ return AVERROR(EAGAIN);
+ }
+
+ if (rgb && depth == 8)
+ out_pix_fmts = out_rgb8_pix_fmts;
+ else if (rgb && depth == 9)
+ out_pix_fmts = out_rgb9_pix_fmts;
+ else if (rgb && depth == 10)
+ out_pix_fmts = out_rgb10_pix_fmts;
+ else if (rgb && depth == 12)
+ out_pix_fmts = out_rgb12_pix_fmts;
+ else if (depth == 8)
+ out_pix_fmts = out_yuv8_pix_fmts;
+ else if (depth == 9)
+ out_pix_fmts = out_yuv9_pix_fmts;
+ else if (depth == 10)
+ out_pix_fmts = out_yuv10_pix_fmts;
+ else if (depth == 12)
+ out_pix_fmts = out_yuv12_pix_fmts;
+ else
+ return AVERROR(EAGAIN);
+ if ((ret = ff_formats_ref(ff_make_format_list(out_pix_fmts), &ctx->outputs[0]->in_formats)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ VectorscopeContext *s = outlink->src->priv;
+ int i;
+
+ s->intensity = s->fintensity * (s->size - 1);
+ outlink->h = outlink->w = s->size;
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+
+ s->peak_memory = av_calloc(s->size, s->size);
+ if (!s->peak_memory)
+ return AVERROR(ENOMEM);
+
+ s->peak = av_calloc(s->size, sizeof(*s->peak));
+ if (!s->peak)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < s->size; i++)
+ s->peak[i] = s->peak_memory + s->size * i;
+
+ return 0;
+}
+
+static void envelope_instant16(VectorscopeContext *s, AVFrame *out)
+{
+ const int dlinesize = out->linesize[0] / 2;
+ uint16_t *dpd = s->mode == COLOR || !s->is_yuv ? (uint16_t *)out->data[s->pd] : (uint16_t *)out->data[0];
+ const int max = s->size - 1;
+ int i, j;
+
+ for (i = 0; i < out->height; i++) {
+ for (j = 0; j < out->width; j++) {
+ const int pos = i * dlinesize + j;
+ const int poa = (i - 1) * dlinesize + j;
+ const int pob = (i + 1) * dlinesize + j;
+
+ if (dpd[pos] && (((!j || !dpd[pos - 1]) || ((j == (out->width - 1)) || !dpd[pos + 1]))
+ || ((!i || !dpd[poa]) || ((i == (out->height - 1)) || !dpd[pob])))) {
+ dpd[pos] = max;
+ }
+ }
+ }
+}
+
+static void envelope_peak16(VectorscopeContext *s, AVFrame *out)
+{
+ const int dlinesize = out->linesize[0] / 2;
+ uint16_t *dpd = s->mode == COLOR || !s->is_yuv ? (uint16_t *)out->data[s->pd] : (uint16_t *)out->data[0];
+ const int max = s->size - 1;
+ int i, j;
+
+ for (i = 0; i < out->height; i++) {
+ for (j = 0; j < out->width; j++) {
+ const int pos = i * dlinesize + j;
+
+ if (dpd[pos])
+ s->peak[i][j] = 1;
+ }
+ }
+
+ if (s->envelope == 3)
+ envelope_instant16(s, out);
+
+ for (i = 0; i < out->height; i++) {
+ for (j = 0; j < out->width; j++) {
+ const int pos = i * dlinesize + j;
+
+ if (s->peak[i][j] && (((!j || !s->peak[i][j-1]) || ((j == (out->width - 1)) || !s->peak[i][j + 1]))
+ || ((!i || !s->peak[i-1][j]) || ((i == (out->height - 1)) || !s->peak[i + 1][j])))) {
+ dpd[pos] = max;
+ }
+ }
+ }
+}
+
+static void envelope_instant(VectorscopeContext *s, AVFrame *out)
+{
+ const int dlinesize = out->linesize[0];
+ uint8_t *dpd = s->mode == COLOR || !s->is_yuv ? out->data[s->pd] : out->data[0];
+ int i, j;
+
+ for (i = 0; i < out->height; i++) {
+ for (j = 0; j < out->width; j++) {
+ const int pos = i * dlinesize + j;
+ const int poa = (i - 1) * dlinesize + j;
+ const int pob = (i + 1) * dlinesize + j;
+
+ if (dpd[pos] && (((!j || !dpd[pos - 1]) || ((j == (out->width - 1)) || !dpd[pos + 1]))
+ || ((!i || !dpd[poa]) || ((i == (out->height - 1)) || !dpd[pob])))) {
+ dpd[pos] = 255;
+ }
+ }
+ }
+}
+
+static void envelope_peak(VectorscopeContext *s, AVFrame *out)
+{
+ const int dlinesize = out->linesize[0];
+ uint8_t *dpd = s->mode == COLOR || !s->is_yuv ? out->data[s->pd] : out->data[0];
+ int i, j;
+
+ for (i = 0; i < out->height; i++) {
+ for (j = 0; j < out->width; j++) {
+ const int pos = i * dlinesize + j;
+
+ if (dpd[pos])
+ s->peak[i][j] = 1;
+ }
+ }
+
+ if (s->envelope == 3)
+ envelope_instant(s, out);
+
+ for (i = 0; i < out->height; i++) {
+ for (j = 0; j < out->width; j++) {
+ const int pos = i * dlinesize + j;
+
+ if (s->peak[i][j] && (((!j || !s->peak[i][j-1]) || ((j == (out->width - 1)) || !s->peak[i][j + 1]))
+ || ((!i || !s->peak[i-1][j]) || ((i == (out->height - 1)) || !s->peak[i + 1][j])))) {
+ dpd[pos] = 255;
+ }
+ }
+ }
+}
+
+static void envelope16(VectorscopeContext *s, AVFrame *out)
+{
+ if (!s->envelope) {
+ return;
+ } else if (s->envelope == 1) {
+ envelope_instant16(s, out);
+ } else {
+ envelope_peak16(s, out);
+ }
+}
+
+static void envelope(VectorscopeContext *s, AVFrame *out)
+{
+ if (!s->envelope) {
+ return;
+ } else if (s->envelope == 1) {
+ envelope_instant(s, out);
+ } else {
+ envelope_peak(s, out);
+ }
+}
+
+static void vectorscope16(VectorscopeContext *s, AVFrame *in, AVFrame *out, int pd)
+{
+ const uint16_t * const *src = (const uint16_t * const *)in->data;
+ const int slinesizex = in->linesize[s->x] / 2;
+ const int slinesizey = in->linesize[s->y] / 2;
+ const int slinesized = in->linesize[pd] / 2;
+ const int dlinesize = out->linesize[0] / 2;
+ const int intensity = s->intensity;
+ const int px = s->x, py = s->y;
+ const int h = s->planeheight[py];
+ const int w = s->planewidth[px];
+ const uint16_t *spx = src[px];
+ const uint16_t *spy = src[py];
+ const uint16_t *spd = src[pd];
+ const int hsub = s->hsub;
+ const int vsub = s->vsub;
+ uint16_t **dst = (uint16_t **)out->data;
+ uint16_t *dpx = dst[px];
+ uint16_t *dpy = dst[py];
+ uint16_t *dpd = dst[pd];
+ const int max = s->size - 1;
+ const int mid = s->size / 2;
+ const int tmin = s->tmin;
+ const int tmax = s->tmax;
+ int i, j, k;
+
+ for (k = 0; k < 4 && dst[k]; k++) {
+ for (i = 0; i < out->height ; i++)
+ for (j = 0; j < out->width; j++)
+ AV_WN16(out->data[k] + i * out->linesize[k] + j * 2,
+ (s->mode == COLOR || s->mode == COLOR5) && k == s->pd ? 0 : s->bg_color[k]);
+ }
+
+ switch (s->mode) {
+ case COLOR:
+ case COLOR5:
+ case GRAY:
+ if (s->is_yuv) {
+ for (i = 0; i < h; i++) {
+ const int iwx = i * slinesizex;
+ const int iwy = i * slinesizey;
+ const int iwd = i * slinesized;
+ for (j = 0; j < w; j++) {
+ const int x = FFMIN(spx[iwx + j], max);
+ const int y = FFMIN(spy[iwy + j], max);
+ const int z = spd[iwd + j];
+ const int pos = y * dlinesize + x;
+
+ if (z < tmin || z > tmax)
+ continue;
+
+ dpd[pos] = FFMIN(dpd[pos] + intensity, max);
+ }
+ }
+ } else {
+ for (i = 0; i < h; i++) {
+ const int iwx = i * slinesizex;
+ const int iwy = i * slinesizey;
+ const int iwd = i * slinesized;
+ for (j = 0; j < w; j++) {
+ const int x = FFMIN(spx[iwx + j], max);
+ const int y = FFMIN(spy[iwy + j], max);
+ const int z = spd[iwd + j];
+ const int pos = y * dlinesize + x;
+
+ if (z < tmin || z > tmax)
+ continue;
+
+ dst[0][pos] = FFMIN(dst[0][pos] + intensity, max);
+ dst[1][pos] = FFMIN(dst[1][pos] + intensity, max);
+ dst[2][pos] = FFMIN(dst[2][pos] + intensity, max);
+ }
+ }
+ }
+ break;
+ case COLOR2:
+ if (s->is_yuv) {
+ for (i = 0; i < h; i++) {
+ const int iw1 = i * slinesizex;
+ const int iw2 = i * slinesizey;
+ const int iwd = i * slinesized;
+ for (j = 0; j < w; j++) {
+ const int x = FFMIN(spx[iw1 + j], max);
+ const int y = FFMIN(spy[iw2 + j], max);
+ const int z = spd[iwd + j];
+ const int pos = y * dlinesize + x;
+
+ if (z < tmin || z > tmax)
+ continue;
+
+ if (!dpd[pos])
+ dpd[pos] = FFABS(mid - x) + FFABS(mid - y);
+ dpx[pos] = x;
+ dpy[pos] = y;
+ }
+ }
+ } else {
+ for (i = 0; i < h; i++) {
+ const int iw1 = i * slinesizex;
+ const int iw2 = i * slinesizey;
+ const int iwd = i * slinesized;
+ for (j = 0; j < w; j++) {
+ const int x = FFMIN(spx[iw1 + j], max);
+ const int y = FFMIN(spy[iw2 + j], max);
+ const int z = spd[iwd + j];
+ const int pos = y * dlinesize + x;
+
+ if (z < tmin || z > tmax)
+ continue;
+
+ if (!dpd[pos])
+ dpd[pos] = FFMIN(x + y, max);
+ dpx[pos] = x;
+ dpy[pos] = y;
+ }
+ }
+ }
+ break;
+ case COLOR3:
+ for (i = 0; i < h; i++) {
+ const int iw1 = i * slinesizex;
+ const int iw2 = i * slinesizey;
+ const int iwd = i * slinesized;
+ for (j = 0; j < w; j++) {
+ const int x = FFMIN(spx[iw1 + j], max);
+ const int y = FFMIN(spy[iw2 + j], max);
+ const int z = spd[iwd + j];
+ const int pos = y * dlinesize + x;
+
+ if (z < tmin || z > tmax)
+ continue;
+
+ dpd[pos] = FFMIN(max, dpd[pos] + intensity);
+ dpx[pos] = x;
+ dpy[pos] = y;
+ }
+ }
+ break;
+ case COLOR4:
+ for (i = 0; i < in->height; i++) {
+ const int iwx = (i >> vsub) * slinesizex;
+ const int iwy = (i >> vsub) * slinesizey;
+ const int iwd = i * slinesized;
+ for (j = 0; j < in->width; j++) {
+ const int x = FFMIN(spx[iwx + (j >> hsub)], max);
+ const int y = FFMIN(spy[iwy + (j >> hsub)], max);
+ const int z = spd[iwd + j];
+ const int pos = y * dlinesize + x;
+
+ if (z < tmin || z > tmax)
+ continue;
+
+ dpd[pos] = FFMAX(z, dpd[pos]);
+ dpx[pos] = x;
+ dpy[pos] = y;
+ }
+ }
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ envelope16(s, out);
+
+ if (dst[3]) {
+ for (i = 0; i < out->height; i++) {
+ for (j = 0; j < out->width; j++) {
+ int pos = i * dlinesize + j;
+
+ if (dpd[pos])
+ dst[3][pos] = max;
+ }
+ }
+ }
+
+ if (s->mode == COLOR) {
+ for (i = 0; i < out->height; i++) {
+ for (j = 0; j < out->width; j++) {
+ if (!dpd[i * dlinesize + j]) {
+ dpx[i * dlinesize + j] = j;
+ dpy[i * dlinesize + j] = i;
+ dpd[i * dlinesize + j] = mid;
+ }
+ }
+ }
+ } else if (s->mode == COLOR5) {
+ for (i = 0; i < out->height; i++) {
+ for (j = 0; j < out->width; j++) {
+ if (!dpd[i * dlinesize + j]) {
+ dpx[i * dlinesize + j] = j;
+ dpy[i * dlinesize + j] = i;
+ dpd[i * dlinesize + j] = mid * M_SQRT2 - hypot(i - mid, j - mid);
+ }
+ }
+ }
+ }
+}
+
+static void vectorscope8(VectorscopeContext *s, AVFrame *in, AVFrame *out, int pd)
+{
+ const uint8_t * const *src = (const uint8_t * const *)in->data;
+ const int slinesizex = in->linesize[s->x];
+ const int slinesizey = in->linesize[s->y];
+ const int slinesized = in->linesize[pd];
+ const int dlinesize = out->linesize[0];
+ const int intensity = s->intensity;
+ const int px = s->x, py = s->y;
+ const int h = s->planeheight[py];
+ const int w = s->planewidth[px];
+ const uint8_t *spx = src[px];
+ const uint8_t *spy = src[py];
+ const uint8_t *spd = src[pd];
+ const int hsub = s->hsub;
+ const int vsub = s->vsub;
+ uint8_t **dst = out->data;
+ uint8_t *dpx = dst[px];
+ uint8_t *dpy = dst[py];
+ uint8_t *dpd = dst[pd];
+ const int tmin = s->tmin;
+ const int tmax = s->tmax;
+ int i, j, k;
+
+ for (k = 0; k < 4 && dst[k]; k++)
+ for (i = 0; i < out->height ; i++)
+ memset(dst[k] + i * out->linesize[k],
+ (s->mode == COLOR || s->mode == COLOR5) && k == s->pd ? 0 : s->bg_color[k], out->width);
+
+ switch (s->mode) {
+ case COLOR5:
+ case COLOR:
+ case GRAY:
+ if (s->is_yuv) {
+ for (i = 0; i < h; i++) {
+ const int iwx = i * slinesizex;
+ const int iwy = i * slinesizey;
+ const int iwd = i * slinesized;
+ for (j = 0; j < w; j++) {
+ const int x = spx[iwx + j];
+ const int y = spy[iwy + j];
+ const int z = spd[iwd + j];
+ const int pos = y * dlinesize + x;
+
+ if (z < tmin || z > tmax)
+ continue;
+
+ dpd[pos] = FFMIN(dpd[pos] + intensity, 255);
+ }
+ }
+ } else {
+ for (i = 0; i < h; i++) {
+ const int iwx = i * slinesizex;
+ const int iwy = i * slinesizey;
+ const int iwd = i * slinesized;
+ for (j = 0; j < w; j++) {
+ const int x = spx[iwx + j];
+ const int y = spy[iwy + j];
+ const int z = spd[iwd + j];
+ const int pos = y * dlinesize + x;
+
+ if (z < tmin || z > tmax)
+ continue;
+
+ dst[0][pos] = FFMIN(dst[0][pos] + intensity, 255);
+ dst[1][pos] = FFMIN(dst[1][pos] + intensity, 255);
+ dst[2][pos] = FFMIN(dst[2][pos] + intensity, 255);
+ }
+ }
+ }
+ break;
+ case COLOR2:
+ if (s->is_yuv) {
+ for (i = 0; i < h; i++) {
+ const int iw1 = i * slinesizex;
+ const int iw2 = i * slinesizey;
+ const int iwd = i * slinesized;
+ for (j = 0; j < w; j++) {
+ const int x = spx[iw1 + j];
+ const int y = spy[iw2 + j];
+ const int z = spd[iwd + j];
+ const int pos = y * dlinesize + x;
+
+ if (z < tmin || z > tmax)
+ continue;
+
+ if (!dpd[pos])
+ dpd[pos] = FFABS(128 - x) + FFABS(128 - y);
+ dpx[pos] = x;
+ dpy[pos] = y;
+ }
+ }
+ } else {
+ for (i = 0; i < h; i++) {
+ const int iw1 = i * slinesizex;
+ const int iw2 = i * slinesizey;
+ const int iwd = i * slinesized;
+ for (j = 0; j < w; j++) {
+ const int x = spx[iw1 + j];
+ const int y = spy[iw2 + j];
+ const int z = spd[iwd + j];
+ const int pos = y * dlinesize + x;
+
+ if (z < tmin || z > tmax)
+ continue;
+
+ if (!dpd[pos])
+ dpd[pos] = FFMIN(x + y, 255);
+ dpx[pos] = x;
+ dpy[pos] = y;
+ }
+ }
+ }
+ break;
+ case COLOR3:
+ for (i = 0; i < h; i++) {
+ const int iw1 = i * slinesizex;
+ const int iw2 = i * slinesizey;
+ const int iwd = i * slinesized;
+ for (j = 0; j < w; j++) {
+ const int x = spx[iw1 + j];
+ const int y = spy[iw2 + j];
+ const int z = spd[iwd + j];
+ const int pos = y * dlinesize + x;
+
+ if (z < tmin || z > tmax)
+ continue;
+
+ dpd[pos] = FFMIN(255, dpd[pos] + intensity);
+ dpx[pos] = x;
+ dpy[pos] = y;
+ }
+ }
+ break;
+ case COLOR4:
+ for (i = 0; i < in->height; i++) {
+ const int iwx = (i >> vsub) * slinesizex;
+ const int iwy = (i >> vsub) * slinesizey;
+ const int iwd = i * slinesized;
+ for (j = 0; j < in->width; j++) {
+ const int x = spx[iwx + (j >> hsub)];
+ const int y = spy[iwy + (j >> hsub)];
+ const int z = spd[iwd + j];
+ const int pos = y * dlinesize + x;
+
+ if (z < tmin || z > tmax)
+ continue;
+
+ dpd[pos] = FFMAX(z, dpd[pos]);
+ dpx[pos] = x;
+ dpy[pos] = y;
+ }
+ }
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ envelope(s, out);
+
+ if (dst[3]) {
+ for (i = 0; i < out->height; i++) {
+ for (j = 0; j < out->width; j++) {
+ int pos = i * dlinesize + j;
+
+ if (dpd[pos])
+ dst[3][pos] = 255;
+ }
+ }
+ }
+
+ if (s->mode == COLOR) {
+ for (i = 0; i < out->height; i++) {
+ for (j = 0; j < out->width; j++) {
+ if (!dpd[i * out->linesize[pd] + j]) {
+ dpx[i * out->linesize[px] + j] = j;
+ dpy[i * out->linesize[py] + j] = i;
+ dpd[i * out->linesize[pd] + j] = 128;
+ }
+ }
+ }
+ } else if (s->mode == COLOR5) {
+ for (i = 0; i < out->height; i++) {
+ for (j = 0; j < out->width; j++) {
+ if (!dpd[i * out->linesize[pd] + j]) {
+ dpx[i * out->linesize[px] + j] = j;
+ dpy[i * out->linesize[py] + j] = i;
+ dpd[i * out->linesize[pd] + j] = 128 * M_SQRT2 - hypot(i - 128, j - 128);
+ }
+ }
+ }
+ }
+}
+
+const static char *positions_name[] = {
+ "R", "B", "Cy", "Yl", "G", "Mg",
+};
+
+const static uint16_t positions[][14][3] = {
+ {
+ { 81, 90, 240 }, { 41, 240, 110 }, { 170, 166, 16 },
+ { 210, 16, 146 }, { 145, 54, 34 }, { 106, 202, 222 },
+ { 162, 44, 142 }, { 131, 156, 44 }, { 112, 72, 58 },
+ { 84, 184, 198 }, { 65, 100, 212 }, { 35, 212, 114 },
+ { 235, 128, 128 }, { 16, 128, 128 } },
+ { { 63, 102, 240 }, { 32, 240, 118 }, { 188, 154, 16 },
+ { 219, 16, 138 }, { 173, 42, 26 }, { 78, 214, 230 },
+ { 28, 212, 120 }, { 51, 109, 212 }, { 63, 193, 204 },
+ { 133, 63, 52 }, { 145, 147, 44 }, { 168, 44, 136 },
+ { 235, 128, 128 }, { 16, 128, 128 } },
+ { { 81*2, 90*2, 240*2 }, { 41*2, 240*2, 110*2 }, { 170*2, 166*2, 16*2 },
+ { 210*2, 16*2, 146*2 }, { 145*2, 54*2, 34*2 }, { 106*2, 202*2, 222*2 },
+ { 162*2, 44*2, 142*2 }, { 131*2, 156*2, 44*2 }, { 112*2, 72*2, 58*2 },
+ { 84*2, 184*2, 198*2 }, { 65*2, 100*2, 212*2 }, { 35*2, 212*2, 114*2 },
+ { 470, 256, 256 }, { 32, 256, 256 } },
+ { { 63*2, 102*2, 240*2 }, { 32*2, 240*2, 118*2 }, { 188*2, 154*2, 16*2 },
+ { 219*2, 16*2, 138*2 }, { 173*2, 42*2, 26*2 }, { 78*2, 214*2, 230*2 },
+ { 28*2, 212*2, 120*2 }, { 51*2, 109*2, 212*2 }, { 63*2, 193*2, 204*2 },
+ { 133*2, 63*2, 52*2 }, { 145*2, 147*2, 44*2 }, { 168*2, 44*2, 136*2 },
+ { 470, 256, 256 }, { 32, 256, 256 } },
+ { { 81*4, 90*4, 240*4 }, { 41*4, 240*4, 110*4 }, { 170*4, 166*4, 16*4 },
+ { 210*4, 16*4, 146*4 }, { 145*4, 54*4, 34*4 }, { 106*4, 202*4, 222*4 },
+ { 162*4, 44*4, 142*4 }, { 131*4, 156*4, 44*4 }, { 112*4, 72*4, 58*4 },
+ { 84*4, 184*4, 198*4 }, { 65*4, 100*4, 212*4 }, { 35*4, 212*4, 114*4 },
+ { 940, 512, 512 }, { 64, 512, 512 } },
+ { { 63*4, 102*4, 240*4 }, { 32*4, 240*4, 118*4 }, { 188*4, 154*4, 16*4 },
+ { 219*4, 16*4, 138*4 }, { 173*4, 42*4, 26*4 }, { 78*4, 214*4, 230*4 },
+ { 28*4, 212*4, 120*4 }, { 51*4, 109*4, 212*4 }, { 63*4, 193*4, 204*4 },
+ { 133*4, 63*4, 52*4 }, { 145*4, 147*4, 44*4 }, { 168*4, 44*4, 136*4 },
+ { 940, 512, 512 }, { 64, 512, 512 } },
+ { { 81*8, 90*4, 240*8 }, { 41*8, 240*8, 110*8 }, { 170*8, 166*8, 16*8 },
+ { 210*8, 16*4, 146*8 }, { 145*8, 54*8, 34*8 }, { 106*8, 202*8, 222*8 },
+ { 162*8, 44*4, 142*8 }, { 131*8, 156*8, 44*8 }, { 112*8, 72*8, 58*8 },
+ { 84*8, 184*4, 198*8 }, { 65*8, 100*8, 212*8 }, { 35*8, 212*8, 114*8 },
+ { 1880, 1024, 1024 }, { 128, 1024, 1024 } },
+ { { 63*8, 102*8, 240*8 }, { 32*8, 240*8, 118*8 }, { 188*8, 154*8, 16*8 },
+ { 219*8, 16*8, 138*8 }, { 173*8, 42*8, 26*8 }, { 78*8, 214*8, 230*8 },
+ { 28*8, 212*8, 120*8 }, { 51*8, 109*8, 212*8 }, { 63*8, 193*8, 204*8 },
+ { 133*8, 63*8, 52*8 }, { 145*8, 147*8, 44*8 }, { 168*8, 44*8, 136*8 },
+ { 1880, 1024, 1024 }, { 128, 1024, 1024 } },
+ { { 81*16, 90*16, 240*16 }, { 41*16, 240*16, 110*16 }, { 170*16, 166*16, 16*16 },
+ { 210*16, 16*16, 146*16 }, { 145*16, 54*16, 34*16 }, { 106*16, 202*16, 222*16 },
+ { 162*16, 44*16, 142*16 }, { 131*16, 156*16, 44*16 }, { 112*16, 72*16, 58*16 },
+ { 84*16, 184*16, 198*16 }, { 65*16, 100*16, 212*16 }, { 35*16, 212*16, 114*16 },
+ { 3760, 2048, 2048 }, { 256, 2048, 2048 } },
+ { { 63*16, 102*16, 240*16 }, { 32*16, 240*16, 118*16 }, { 188*16, 154*16, 16*16 },
+ { 219*16, 16*16, 138*16 }, { 173*16, 42*16, 26*16 }, { 78*16, 214*16, 230*16 },
+ { 28*16, 212*16, 120*16 }, { 51*16, 109*16, 212*16 }, { 63*16, 193*16, 204*16 },
+ { 133*16, 63*16, 52*16 }, { 145*16, 147*16, 44*16 }, { 168*16, 44*16, 136*16 },
+ { 3760, 2048, 2048 }, { 256, 2048, 2048 } },
+};
+
+static void draw_dots(uint8_t *dst, int L, int v, float o)
+{
+ const float f = 1. - o;
+ const float V = o * v;
+ int l = L * 2;
+
+ dst[ l - 3] = dst[ l - 3] * f + V;
+ dst[ l + 3] = dst[ l + 3] * f + V;
+ dst[-l - 3] = dst[-l - 3] * f + V;
+ dst[-l + 3] = dst[-l + 3] * f + V;
+
+ l += L;
+
+ dst[ l - 3] = dst[ l - 3] * f + V;
+ dst[ l + 3] = dst[ l + 3] * f + V;
+ dst[ l - 2] = dst[ l - 2] * f + V;
+ dst[ l + 2] = dst[ l + 2] * f + V;
+ dst[-l - 3] = dst[-l - 3] * f + V;
+ dst[-l + 3] = dst[-l + 3] * f + V;
+ dst[-l - 2] = dst[-l - 2] * f + V;
+ dst[-l + 2] = dst[-l + 2] * f + V;
+}
+
+static void draw_dots16(uint16_t *dst, int L, int v, float o)
+{
+ const float f = 1. - o;
+ const float V = o * v;
+ int l = L * 2;
+
+ dst[ l - 3] = dst[ l - 3] * f + V;
+ dst[ l + 3] = dst[ l + 3] * f + V;
+ dst[-l - 3] = dst[-l - 3] * f + V;
+ dst[-l + 3] = dst[-l + 3] * f + V;
+
+ l += L;
+
+ dst[ l - 3] = dst[ l - 3] * f + V;
+ dst[ l + 3] = dst[ l + 3] * f + V;
+ dst[ l - 2] = dst[ l - 2] * f + V;
+ dst[ l + 2] = dst[ l + 2] * f + V;
+ dst[-l - 3] = dst[-l - 3] * f + V;
+ dst[-l + 3] = dst[-l + 3] * f + V;
+ dst[-l - 2] = dst[-l - 2] * f + V;
+ dst[-l + 2] = dst[-l + 2] * f + V;
+}
+
+static void none_graticule(VectorscopeContext *s, AVFrame *out, int X, int Y, int D, int P)
+{
+}
+
+static void draw_htext(AVFrame *out, int x, int y, float o1, float o2, const char *txt, const uint8_t color[4])
+{
+ const uint8_t *font;
+ int font_height;
+ int i, plane;
+
+ font = avpriv_cga_font, font_height = 8;
+
+ for (plane = 0; plane < 4 && out->data[plane]; plane++) {
+ for (i = 0; txt[i]; i++) {
+ int char_y, mask;
+ int v = color[plane];
+
+ uint8_t *p = out->data[plane] + y * out->linesize[plane] + (x + i * 8);
+ for (char_y = font_height - 1; char_y >= 0; char_y--) {
+ for (mask = 0x80; mask; mask >>= 1) {
+ if (font[txt[i] * font_height + char_y] & mask)
+ p[0] = p[0] * o2 + v * o1;
+ p++;
+ }
+ p += out->linesize[plane] - 8;
+ }
+ }
+ }
+}
+
+static void draw_htext16(AVFrame *out, int x, int y, float o1, float o2, const char *txt, const uint16_t color[4])
+{
+ const uint8_t *font;
+ int font_height;
+ int i, plane;
+
+ font = avpriv_cga_font, font_height = 8;
+
+ for (plane = 0; plane < 4 && out->data[plane]; plane++) {
+ for (i = 0; txt[i]; i++) {
+ int char_y, mask;
+ int v = color[plane];
+
+ uint16_t *p = (uint16_t *)(out->data[plane] + y * out->linesize[plane]) + (x + i * 8);
+ for (char_y = font_height - 1; char_y >= 0; char_y--) {
+ for (mask = 0x80; mask; mask >>= 1) {
+ if (font[txt[i] * font_height + char_y] & mask)
+ p[0] = p[0] * o2 + v * o1;
+ p++;
+ }
+ p += out->linesize[plane] / 2 - 8;
+ }
+ }
+ }
+}
+
+static void color_graticule16(VectorscopeContext *s, AVFrame *out, int X, int Y, int D, int P)
+{
+ const int max = s->size - 1;
+ const float o = s->opacity;
+ int i;
+
+ for (i = 0; i < 12; i++) {
+ int x = positions[P][i][X];
+ int y = positions[P][i][Y];
+ int d = positions[P][i][D];
+
+ draw_dots16((uint16_t *)(out->data[D] + y * out->linesize[D] + x * 2), out->linesize[D] / 2, d, o);
+ draw_dots16((uint16_t *)(out->data[X] + y * out->linesize[X] + x * 2), out->linesize[X] / 2, x, o);
+ draw_dots16((uint16_t *)(out->data[Y] + y * out->linesize[Y] + x * 2), out->linesize[Y] / 2, y, o);
+ if (out->data[3])
+ draw_dots16((uint16_t *)(out->data[3] + y * out->linesize[3] + x * 2), out->linesize[3] / 2, max, o);
+ }
+
+ if (s->flags & 1) {
+ int x = positions[P][12][X];
+ int y = positions[P][12][Y];
+ int d = positions[P][12][D];
+
+ draw_dots16((uint16_t *)(out->data[D] + y * out->linesize[D] + x * 2), out->linesize[D] / 2, d, o);
+ draw_dots16((uint16_t *)(out->data[X] + y * out->linesize[X] + x * 2), out->linesize[X] / 2, x, o);
+ draw_dots16((uint16_t *)(out->data[Y] + y * out->linesize[Y] + x * 2), out->linesize[Y] / 2, y, o);
+ if (out->data[3])
+ draw_dots16((uint16_t *)(out->data[3] + y * out->linesize[3] + x * 2), out->linesize[3] / 2, max, o);
+ }
+
+ if (s->flags & 2) {
+ int x = positions[P][13][X];
+ int y = positions[P][13][Y];
+ int d = positions[P][13][D];
+
+ draw_dots16((uint16_t *)(out->data[D] + y * out->linesize[D] + x * 2), out->linesize[D] / 2, d, o);
+ draw_dots16((uint16_t *)(out->data[X] + y * out->linesize[X] + x * 2), out->linesize[X] / 2, x, o);
+ draw_dots16((uint16_t *)(out->data[Y] + y * out->linesize[Y] + x * 2), out->linesize[Y] / 2, y, o);
+ if (out->data[3])
+ draw_dots16((uint16_t *)(out->data[3] + y * out->linesize[3] + x * 2), out->linesize[3] / 2, max, o);
+ }
+
+ for (i = 0; i < 6 && s->flags & 4; i++) {
+ uint16_t color[4] = { 0, 0, 0, 0 };
+ int x = positions[P][i][X];
+ int y = positions[P][i][Y];
+ int d = positions[P][i][D];
+
+ color[D] = d;
+ color[X] = x;
+ color[Y] = y;
+ color[3] = max;
+
+ if (x > max / 2)
+ x += 8;
+ else
+ x -= 14;
+ if (y > max / 2)
+ y += 8;
+ else
+ y -= 14;
+
+ x = av_clip(x, 0, out->width - 9);
+ y = av_clip(y, 0, out->height - 9);
+ draw_htext16(out, x, y, o, 1. - o, positions_name[i], color);
+ }
+}
+
+static void color_graticule(VectorscopeContext *s, AVFrame *out, int X, int Y, int D, int P)
+{
+ const float o = s->opacity;
+ int i;
+
+ for (i = 0; i < 12; i++) {
+ int x = positions[P][i][X];
+ int y = positions[P][i][Y];
+ int d = positions[P][i][D];
+
+ draw_dots(out->data[D] + y * out->linesize[D] + x, out->linesize[D], d, o);
+ draw_dots(out->data[X] + y * out->linesize[X] + x, out->linesize[X], x, o);
+ draw_dots(out->data[Y] + y * out->linesize[Y] + x, out->linesize[Y], y, o);
+ if (out->data[3])
+ draw_dots(out->data[3] + y * out->linesize[3] + x, out->linesize[3], 255, o);
+ }
+
+ if (s->flags & 1) {
+ int x = positions[P][12][X];
+ int y = positions[P][12][Y];
+ int d = positions[P][12][D];
+
+ draw_dots(out->data[D] + y * out->linesize[D] + x, out->linesize[D], d, o);
+ draw_dots(out->data[X] + y * out->linesize[X] + x, out->linesize[X], x, o);
+ draw_dots(out->data[Y] + y * out->linesize[Y] + x, out->linesize[Y], y, o);
+ if (out->data[3])
+ draw_dots(out->data[3] + y * out->linesize[3] + x, out->linesize[3], 255, o);
+ }
+
+ if (s->flags & 2) {
+ int x = positions[P][13][X];
+ int y = positions[P][13][Y];
+ int d = positions[P][12][D];
+
+ draw_dots(out->data[D] + y * out->linesize[D] + x, out->linesize[D], d, o);
+ draw_dots(out->data[X] + y * out->linesize[X] + x, out->linesize[X], x, o);
+ draw_dots(out->data[Y] + y * out->linesize[Y] + x, out->linesize[Y], y, o);
+ if (out->data[3])
+ draw_dots(out->data[3] + y * out->linesize[3] + x, out->linesize[3], 255, o);
+ }
+
+ for (i = 0; i < 6 && s->flags & 4; i++) {
+ uint8_t color[4] = { 0, 0, 0, 255 };
+ int x = positions[P][i][X];
+ int y = positions[P][i][Y];
+ int d = positions[P][i][D];
+
+ color[D] = d;
+ color[X] = x;
+ color[Y] = y;
+
+ if (x > 128)
+ x += 8;
+ else
+ x -= 14;
+ if (y > 128)
+ y += 8;
+ else
+ y -= 14;
+
+ x = av_clip(x, 0, out->width - 9);
+ y = av_clip(y, 0, out->height - 9);
+ draw_htext(out, x, y, o, 1. - o, positions_name[i], color);
+ }
+}
+
+static void green_graticule16(VectorscopeContext *s, AVFrame *out, int X, int Y, int D, int P)
+{
+ const int max = s->size - 1;
+ const float o = s->opacity;
+ const int m = s->mult;
+ int i;
+
+ for (i = 0; i < 12; i++) {
+ int x = positions[P][i][X];
+ int y = positions[P][i][Y];
+
+ draw_dots16((uint16_t *)(out->data[0] + y * out->linesize[0] + x * 2), out->linesize[0] / 2, 128 * m, o);
+ draw_dots16((uint16_t *)(out->data[1] + y * out->linesize[1] + x * 2), out->linesize[1] / 2, 0, o);
+ draw_dots16((uint16_t *)(out->data[2] + y * out->linesize[2] + x * 2), out->linesize[2] / 2, 0, o);
+ if (out->data[3])
+ draw_dots16((uint16_t *)(out->data[3] + y * out->linesize[3] + x * 2), out->linesize[3] / 2, max, o);
+ }
+
+ if (s->flags & 1) {
+ int x = positions[P][12][X];
+ int y = positions[P][12][Y];
+
+ draw_dots16((uint16_t *)(out->data[0] + y * out->linesize[0] + x * 2), out->linesize[0] / 2, 128 * m, o);
+ draw_dots16((uint16_t *)(out->data[1] + y * out->linesize[1] + x * 2), out->linesize[1] / 2, 0, o);
+ draw_dots16((uint16_t *)(out->data[2] + y * out->linesize[2] + x * 2), out->linesize[2] / 2, 0, o);
+ if (out->data[3])
+ draw_dots16((uint16_t *)(out->data[3] + y * out->linesize[3] + x * 2), out->linesize[3] / 2, max, o);
+ }
+
+ if (s->flags & 2) {
+ int x = positions[P][13][X];
+ int y = positions[P][13][Y];
+
+ draw_dots16((uint16_t *)(out->data[0] + y * out->linesize[0] + x * 2), out->linesize[0] / 2, 128 * m, o);
+ draw_dots16((uint16_t *)(out->data[1] + y * out->linesize[1] + x * 2), out->linesize[1] / 2, 0, o);
+ draw_dots16((uint16_t *)(out->data[2] + y * out->linesize[2] + x * 2), out->linesize[2] / 2, 0, o);
+ if (out->data[3])
+ draw_dots16((uint16_t *)(out->data[3] + y * out->linesize[3] + x * 2), out->linesize[3] / 2, max, o);
+ }
+
+ for (i = 0; i < 6 && s->flags & 4; i++) {
+ const uint16_t color[4] = { 128 * m, 0, 0, max };
+ int x = positions[P][i][X];
+ int y = positions[P][i][Y];
+
+ if (x > max / 2)
+ x += 8;
+ else
+ x -= 14;
+ if (y > max / 2)
+ y += 8;
+ else
+ y -= 14;
+
+ x = av_clip(x, 0, out->width - 9);
+ y = av_clip(y, 0, out->height - 9);
+ draw_htext16(out, x, y, o, 1. - o, positions_name[i], color);
+ }
+}
+
+static void green_graticule(VectorscopeContext *s, AVFrame *out, int X, int Y, int D, int P)
+{
+ const float o = s->opacity;
+ int i;
+
+ for (i = 0; i < 12; i++) {
+ int x = positions[P][i][X];
+ int y = positions[P][i][Y];
+
+ draw_dots(out->data[0] + y * out->linesize[0] + x, out->linesize[0], 128, o);
+ draw_dots(out->data[1] + y * out->linesize[1] + x, out->linesize[1], 0, o);
+ draw_dots(out->data[2] + y * out->linesize[2] + x, out->linesize[2], 0, o);
+ if (out->data[3])
+ draw_dots(out->data[3] + y * out->linesize[3] + x, out->linesize[3], 255, o);
+ }
+
+ if (s->flags & 1) {
+ int x = positions[P][12][X];
+ int y = positions[P][12][Y];
+
+ draw_dots(out->data[0] + y * out->linesize[0] + x, out->linesize[0], 128, o);
+ draw_dots(out->data[1] + y * out->linesize[1] + x, out->linesize[1], 0, o);
+ draw_dots(out->data[2] + y * out->linesize[2] + x, out->linesize[2], 0, o);
+ if (out->data[3])
+ draw_dots(out->data[3] + y * out->linesize[3] + x, out->linesize[3], 255, o);
+ }
+
+ if (s->flags & 2) {
+ int x = positions[P][13][X];
+ int y = positions[P][13][Y];
+
+ draw_dots(out->data[0] + y * out->linesize[0] + x, out->linesize[0], 128, o);
+ draw_dots(out->data[1] + y * out->linesize[1] + x, out->linesize[1], 0, o);
+ draw_dots(out->data[2] + y * out->linesize[2] + x, out->linesize[2], 0, o);
+ if (out->data[3])
+ draw_dots(out->data[3] + y * out->linesize[3] + x, out->linesize[3], 255, o);
+ }
+
+ for (i = 0; i < 6 && s->flags & 4; i++) {
+ const uint8_t color[4] = { 128, 0, 0, 255 };
+ int x = positions[P][i][X];
+ int y = positions[P][i][Y];
+
+ if (x > 128)
+ x += 8;
+ else
+ x -= 14;
+ if (y > 128)
+ y += 8;
+ else
+ y -= 14;
+
+ x = av_clip(x, 0, out->width - 9);
+ y = av_clip(y, 0, out->height - 9);
+ draw_htext(out, x, y, o, 1. - o, positions_name[i], color);
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ VectorscopeContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ int plane;
+
+ if (s->colorspace) {
+ s->cs = (s->depth - 8) * 2 + s->colorspace - 1;
+ } else {
+ switch (av_frame_get_colorspace(in)) {
+ case AVCOL_SPC_SMPTE170M:
+ case AVCOL_SPC_BT470BG:
+ s->cs = (s->depth - 8) * 2 + 0;
+ break;
+ case AVCOL_SPC_BT709:
+ default:
+ s->cs = (s->depth - 8) * 2 + 1;
+ }
+ }
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ s->vectorscope(s, in, out, s->pd);
+ s->graticulef(s, out, s->x, s->y, s->pd, s->cs);
+
+ for (plane = 0; plane < 4; plane++) {
+ if (out->data[plane]) {
+ out->data[plane] += (s->size - 1) * out->linesize[plane];
+ out->linesize[plane] = -out->linesize[plane];
+ }
+ }
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ AVFilterContext *ctx = inlink->dst;
+ VectorscopeContext *s = ctx->priv;
+
+ s->is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB);
+ s->size = 1 << desc->comp[0].depth;
+ s->mult = s->size / 256;
+ s->depth = desc->comp[0].depth;
+ s->tmin = s->lthreshold * (s->size - 1);
+ s->tmax = s->hthreshold * (s->size - 1);
+
+ if (s->tmin > s->tmax) {
+ av_log(ctx, AV_LOG_ERROR, "low threshold should be less than high threshold\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (s->mode == GRAY && s->is_yuv)
+ s->pd = 0;
+ else {
+ if ((s->x == 1 && s->y == 2) || (s->x == 2 && s->y == 1))
+ s->pd = 0;
+ else if ((s->x == 0 && s->y == 2) || (s->x == 2 && s->y == 0))
+ s->pd = 1;
+ else if ((s->x == 0 && s->y == 1) || (s->x == 1 && s->y == 0))
+ s->pd = 2;
+ }
+
+ if (s->size == 256)
+ s->vectorscope = vectorscope8;
+ else
+ s->vectorscope = vectorscope16;
+
+ s->graticulef = none_graticule;
+
+ if (s->is_yuv && s->size == 256) {
+ if (s->graticule == 1)
+ s->graticulef = green_graticule;
+ else if (s->graticule == 2)
+ s->graticulef = color_graticule;
+ } else if (s->is_yuv) {
+ if (s->graticule == 1)
+ s->graticulef = green_graticule16;
+ else if (s->graticule == 2)
+ s->graticulef = color_graticule16;
+ }
+
+ s->bg_color[3] = s->bgopacity * (s->size - 1);
+
+ switch (inlink->format) {
+ case AV_PIX_FMT_GBRP12:
+ case AV_PIX_FMT_GBRP10:
+ case AV_PIX_FMT_GBRP9:
+ case AV_PIX_FMT_GBRAP:
+ case AV_PIX_FMT_GBRP:
+ s->bg_color[0] = 0;
+ s->bg_color[1] = 0;
+ s->bg_color[2] = 0;
+ break;
+ default:
+ s->bg_color[0] = 0;
+ s->bg_color[1] = s->size / 2 - 1;
+ s->bg_color[2] = s->size / 2 - 1;
+ }
+
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+ s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ VectorscopeContext *s = ctx->priv;
+
+ av_freep(&s->peak);
+ av_freep(&s->peak_memory);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_vectorscope = {
+ .name = "vectorscope",
+ .description = NULL_IF_CONFIG_SMALL("Video vectorscope."),
+ .priv_size = sizeof(VectorscopeContext),
+ .priv_class = &vectorscope_class,
+ .query_formats = query_formats,
+ .uninit = uninit,
+ .inputs = inputs,
+ .outputs = outputs,
+};
diff --git a/libavfilter/vf_vflip.c b/libavfilter/vf_vflip.c
index fa54985722..c7c39d3341 100644
--- a/libavfilter/vf_vflip.c
+++ b/libavfilter/vf_vflip.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -24,15 +24,23 @@
*/
#include "libavutil/internal.h"
+#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
typedef struct FlipContext {
+ const AVClass *class;
int vsub; ///< vertical chroma subsampling
} FlipContext;
+static const AVOption vflip_options[] = {
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(vflip);
+
static int config_input(AVFilterLink *link)
{
FlipContext *flip = link->dst->priv;
@@ -55,9 +63,10 @@ static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h)
for (i = 0; i < 4; i ++) {
int vsub = i == 1 || i == 2 ? flip->vsub : 0;
+ int height = AV_CEIL_RSHIFT(h, vsub);
if (frame->data[i]) {
- frame->data[i] += ((h >> vsub) - 1) * frame->linesize[i];
+ frame->data[i] += (height - 1) * frame->linesize[i];
frame->linesize[i] = -frame->linesize[i];
}
}
@@ -72,9 +81,10 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
for (i = 0; i < 4; i ++) {
int vsub = i == 1 || i == 2 ? flip->vsub : 0;
+ int height = AV_CEIL_RSHIFT(link->h, vsub);
if (frame->data[i]) {
- frame->data[i] += ((link->h >> vsub)-1) * frame->linesize[i];
+ frame->data[i] += (height - 1) * frame->linesize[i];
frame->linesize[i] = -frame->linesize[i];
}
}
@@ -101,11 +111,11 @@ static const AVFilterPad avfilter_vf_vflip_outputs[] = {
};
AVFilter ff_vf_vflip = {
- .name = "vflip",
+ .name = "vflip",
.description = NULL_IF_CONFIG_SMALL("Flip the input video vertically."),
-
- .priv_size = sizeof(FlipContext),
-
- .inputs = avfilter_vf_vflip_inputs,
- .outputs = avfilter_vf_vflip_outputs,
+ .priv_size = sizeof(FlipContext),
+ .priv_class = &vflip_class,
+ .inputs = avfilter_vf_vflip_inputs,
+ .outputs = avfilter_vf_vflip_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/libavfilter/vf_vidstabdetect.c b/libavfilter/vf_vidstabdetect.c
new file mode 100644
index 0000000000..47429494f3
--- /dev/null
+++ b/libavfilter/vf_vidstabdetect.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2013 Georg Martius <georg dot martius at web dot de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define DEFAULT_RESULT_NAME "transforms.trf"
+
+#include <vid.stab/libvidstab.h>
+
+#include "libavutil/common.h"
+#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
+#include "avfilter.h"
+#include "internal.h"
+
+#include "vidstabutils.h"
+
+typedef struct {
+ const AVClass *class;
+
+ VSMotionDetect md;
+ VSMotionDetectConfig conf;
+
+ char *result;
+ FILE *f;
+} StabData;
+
+
+#define OFFSET(x) offsetof(StabData, x)
+#define OFFSETC(x) (offsetof(StabData, conf)+offsetof(VSMotionDetectConfig, x))
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption vidstabdetect_options[] = {
+ {"result", "path to the file used to write the transforms", OFFSET(result), AV_OPT_TYPE_STRING, {.str = DEFAULT_RESULT_NAME}, .flags = FLAGS},
+ {"shakiness", "how shaky is the video and how quick is the camera?"
+ " 1: little (fast) 10: very strong/quick (slow)", OFFSETC(shakiness), AV_OPT_TYPE_INT, {.i64 = 5}, 1, 10, FLAGS},
+ {"accuracy", "(>=shakiness) 1: low 15: high (slow)", OFFSETC(accuracy), AV_OPT_TYPE_INT, {.i64 = 15}, 1, 15, FLAGS},
+ {"stepsize", "region around minimum is scanned with 1 pixel resolution", OFFSETC(stepSize), AV_OPT_TYPE_INT, {.i64 = 6}, 1, 32, FLAGS},
+ {"mincontrast", "below this contrast a field is discarded (0-1)", OFFSETC(contrastThreshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.25}, 0.0, 1.0, FLAGS},
+ {"show", "0: draw nothing; 1,2: show fields and transforms", OFFSETC(show), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 2, FLAGS},
+ {"tripod", "virtual tripod mode (if >0): motion is compared to a reference"
+ " reference frame (frame # is the value)", OFFSETC(virtualTripod), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS},
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(vidstabdetect);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ StabData *s = ctx->priv;
+ ff_vs_init();
+ s->class = &vidstabdetect_class;
+ av_log(ctx, AV_LOG_VERBOSE, "vidstabdetect filter: init %s\n", LIBVIDSTAB_VERSION);
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ StabData *s = ctx->priv;
+ VSMotionDetect *md = &(s->md);
+
+ if (s->f) {
+ fclose(s->f);
+ s->f = NULL;
+ }
+
+ vsMotionDetectionCleanup(md);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ // If you add something here also add it in vidstabutils.c
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ StabData *s = ctx->priv;
+
+ VSMotionDetect* md = &(s->md);
+ VSFrameInfo fi;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ vsFrameInfoInit(&fi, inlink->w, inlink->h,
+ ff_av2vs_pixfmt(ctx, inlink->format));
+ if (fi.bytesPerPixel != av_get_bits_per_pixel(desc)/8) {
+ av_log(ctx, AV_LOG_ERROR, "pixel-format error: wrong bits/per/pixel, please report a BUG");
+ return AVERROR(EINVAL);
+ }
+ if (fi.log2ChromaW != desc->log2_chroma_w) {
+ av_log(ctx, AV_LOG_ERROR, "pixel-format error: log2_chroma_w, please report a BUG");
+ return AVERROR(EINVAL);
+ }
+
+ if (fi.log2ChromaH != desc->log2_chroma_h) {
+ av_log(ctx, AV_LOG_ERROR, "pixel-format error: log2_chroma_h, please report a BUG");
+ return AVERROR(EINVAL);
+ }
+
+ // set values that are not initialized by the options
+ s->conf.algo = 1;
+ s->conf.modName = "vidstabdetect";
+ if (vsMotionDetectInit(md, &s->conf, &fi) != VS_OK) {
+ av_log(ctx, AV_LOG_ERROR, "initialization of Motion Detection failed, please report a BUG");
+ return AVERROR(EINVAL);
+ }
+
+ vsMotionDetectGetConfig(&s->conf, md);
+ av_log(ctx, AV_LOG_INFO, "Video stabilization settings (pass 1/2):\n");
+ av_log(ctx, AV_LOG_INFO, " shakiness = %d\n", s->conf.shakiness);
+ av_log(ctx, AV_LOG_INFO, " accuracy = %d\n", s->conf.accuracy);
+ av_log(ctx, AV_LOG_INFO, " stepsize = %d\n", s->conf.stepSize);
+ av_log(ctx, AV_LOG_INFO, " mincontrast = %f\n", s->conf.contrastThreshold);
+ av_log(ctx, AV_LOG_INFO, " tripod = %d\n", s->conf.virtualTripod);
+ av_log(ctx, AV_LOG_INFO, " show = %d\n", s->conf.show);
+ av_log(ctx, AV_LOG_INFO, " result = %s\n", s->result);
+
+ s->f = fopen(s->result, "w");
+ if (s->f == NULL) {
+ av_log(ctx, AV_LOG_ERROR, "cannot open transform file %s\n", s->result);
+ return AVERROR(EINVAL);
+ } else {
+ if (vsPrepareFile(md, s->f) != VS_OK) {
+ av_log(ctx, AV_LOG_ERROR, "cannot write to transform file %s\n", s->result);
+ return AVERROR(EINVAL);
+ }
+ }
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ StabData *s = ctx->priv;
+ VSMotionDetect *md = &(s->md);
+ LocalMotions localmotions;
+
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ VSFrame frame;
+ int plane;
+
+ if (s->conf.show > 0 && !av_frame_is_writable(in))
+ av_frame_make_writable(in);
+
+ for (plane = 0; plane < md->fi.planes; plane++) {
+ frame.data[plane] = in->data[plane];
+ frame.linesize[plane] = in->linesize[plane];
+ }
+ if (vsMotionDetection(md, &localmotions, &frame) != VS_OK) {
+ av_log(ctx, AV_LOG_ERROR, "motion detection failed");
+ return AVERROR(AVERROR_EXTERNAL);
+ } else {
+ if (vsWriteToFile(md, s->f, &localmotions) != VS_OK) {
+ int ret = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "cannot write to transform file");
+ return ret;
+ }
+ vs_vector_del(&localmotions);
+ }
+
+ return ff_filter_frame(outlink, in);
+}
+
+static const AVFilterPad avfilter_vf_vidstabdetect_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_vf_vidstabdetect_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_vidstabdetect = {
+ .name = "vidstabdetect",
+ .description = NULL_IF_CONFIG_SMALL("Extract relative transformations, "
+ "pass 1 of 2 for stabilization "
+ "(see vidstabtransform for pass 2)."),
+ .priv_size = sizeof(StabData),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = avfilter_vf_vidstabdetect_inputs,
+ .outputs = avfilter_vf_vidstabdetect_outputs,
+ .priv_class = &vidstabdetect_class,
+};
diff --git a/libavfilter/vf_vidstabtransform.c b/libavfilter/vf_vidstabtransform.c
new file mode 100644
index 0000000000..dac0a2d19e
--- /dev/null
+++ b/libavfilter/vf_vidstabtransform.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2013 Georg Martius <georg dot martius at web dot de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define DEFAULT_INPUT_NAME "transforms.trf"
+
+#include <vid.stab/libvidstab.h>
+
+#include "libavutil/common.h"
+#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
+#include "avfilter.h"
+#include "internal.h"
+
+#include "vidstabutils.h"
+
+typedef struct {
+ const AVClass *class;
+
+ VSTransformData td;
+ VSTransformConfig conf;
+
+ VSTransformations trans; // transformations
+ char *input; // name of transform file
+ int tripod;
+ int debug;
+} TransformContext;
+
+#define OFFSET(x) offsetof(TransformContext, x)
+#define OFFSETC(x) (offsetof(TransformContext, conf)+offsetof(VSTransformConfig, x))
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption vidstabtransform_options[] = {
+ {"input", "set path to the file storing the transforms", OFFSET(input),
+ AV_OPT_TYPE_STRING, {.str = DEFAULT_INPUT_NAME}, .flags = FLAGS },
+ {"smoothing", "set number of frames*2 + 1 used for lowpass filtering", OFFSETC(smoothing),
+ AV_OPT_TYPE_INT, {.i64 = 15}, 0, 1000, FLAGS},
+
+ {"optalgo", "set camera path optimization algo", OFFSETC(camPathAlgo),
+ AV_OPT_TYPE_INT, {.i64 = VSOptimalL1}, VSOptimalL1, VSAvg, FLAGS, "optalgo"},
+ { "opt", "global optimization", 0, // from version 1.0 on
+ AV_OPT_TYPE_CONST, {.i64 = VSOptimalL1 }, 0, 0, FLAGS, "optalgo"},
+ { "gauss", "gaussian kernel", 0,
+ AV_OPT_TYPE_CONST, {.i64 = VSGaussian }, 0, 0, FLAGS, "optalgo"},
+ { "avg", "simple averaging on motion", 0,
+ AV_OPT_TYPE_CONST, {.i64 = VSAvg }, 0, 0, FLAGS, "optalgo"},
+
+ {"maxshift", "set maximal number of pixels to translate image", OFFSETC(maxShift),
+ AV_OPT_TYPE_INT, {.i64 = -1}, -1, 500, FLAGS},
+ {"maxangle", "set maximal angle in rad to rotate image", OFFSETC(maxAngle),
+ AV_OPT_TYPE_DOUBLE, {.dbl = -1.0}, -1.0, 3.14, FLAGS},
+
+ {"crop", "set cropping mode", OFFSETC(crop),
+ AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS, "crop"},
+ { "keep", "keep border", 0,
+ AV_OPT_TYPE_CONST, {.i64 = VSKeepBorder }, 0, 0, FLAGS, "crop"},
+ { "black", "black border", 0,
+ AV_OPT_TYPE_CONST, {.i64 = VSCropBorder }, 0, 0, FLAGS, "crop"},
+
+ {"invert", "invert transforms", OFFSETC(invert),
+ AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS},
+ {"relative", "consider transforms as relative", OFFSETC(relative),
+ AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS},
+ {"zoom", "set percentage to zoom (>0: zoom in, <0: zoom out", OFFSETC(zoom),
+ AV_OPT_TYPE_DOUBLE, {.dbl = 0}, -100, 100, FLAGS},
+ {"optzoom", "set optimal zoom (0: nothing, 1: optimal static zoom, 2: optimal dynamic zoom)", OFFSETC(optZoom),
+ AV_OPT_TYPE_INT, {.i64 = 1}, 0, 2, FLAGS},
+ {"zoomspeed", "for adative zoom: percent to zoom maximally each frame", OFFSETC(zoomSpeed),
+ AV_OPT_TYPE_DOUBLE, {.dbl = 0.25}, 0, 5, FLAGS},
+
+ {"interpol", "set type of interpolation", OFFSETC(interpolType),
+ AV_OPT_TYPE_INT, {.i64 = 2}, 0, 3, FLAGS, "interpol"},
+ { "no", "no interpolation", 0,
+ AV_OPT_TYPE_CONST, {.i64 = VS_Zero }, 0, 0, FLAGS, "interpol"},
+ { "linear", "linear (horizontal)", 0,
+ AV_OPT_TYPE_CONST, {.i64 = VS_Linear }, 0, 0, FLAGS, "interpol"},
+ { "bilinear","bi-linear", 0,
+ AV_OPT_TYPE_CONST, {.i64 = VS_BiLinear},0, 0, FLAGS, "interpol"},
+ { "bicubic", "bi-cubic", 0,
+ AV_OPT_TYPE_CONST, {.i64 = VS_BiCubic },0, 0, FLAGS, "interpol"},
+
+ {"tripod", "enable virtual tripod mode (same as relative=0:smoothing=0)", OFFSET(tripod),
+ AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS},
+ {"debug", "enable debug mode and writer global motions information to file", OFFSET(debug),
+ AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS},
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(vidstabtransform);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ TransformContext *tc = ctx->priv;
+ ff_vs_init();
+ tc->class = &vidstabtransform_class;
+ av_log(ctx, AV_LOG_VERBOSE, "vidstabtransform filter: init %s\n", LIBVIDSTAB_VERSION);
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ TransformContext *tc = ctx->priv;
+
+ vsTransformDataCleanup(&tc->td);
+ vsTransformationsCleanup(&tc->trans);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ // If you add something here also add it in vidstabutils.c
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ TransformContext *tc = ctx->priv;
+ FILE *f;
+
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ VSTransformData *td = &(tc->td);
+
+ VSFrameInfo fi_src;
+ VSFrameInfo fi_dest;
+
+ if (!vsFrameInfoInit(&fi_src, inlink->w, inlink->h,
+ ff_av2vs_pixfmt(ctx, inlink->format)) ||
+ !vsFrameInfoInit(&fi_dest, inlink->w, inlink->h,
+ ff_av2vs_pixfmt(ctx, inlink->format))) {
+ av_log(ctx, AV_LOG_ERROR, "unknown pixel format: %i (%s)",
+ inlink->format, desc->name);
+ return AVERROR(EINVAL);
+ }
+
+ if (fi_src.bytesPerPixel != av_get_bits_per_pixel(desc)/8 ||
+ fi_src.log2ChromaW != desc->log2_chroma_w ||
+ fi_src.log2ChromaH != desc->log2_chroma_h) {
+ av_log(ctx, AV_LOG_ERROR, "pixel-format error: bpp %i<>%i ",
+ fi_src.bytesPerPixel, av_get_bits_per_pixel(desc)/8);
+ av_log(ctx, AV_LOG_ERROR, "chroma_subsampl: w: %i<>%i h: %i<>%i\n",
+ fi_src.log2ChromaW, desc->log2_chroma_w,
+ fi_src.log2ChromaH, desc->log2_chroma_h);
+ return AVERROR(EINVAL);
+ }
+
+ // set values that are not initializes by the options
+ tc->conf.modName = "vidstabtransform";
+ tc->conf.verbose = 1 + tc->debug;
+ if (tc->tripod) {
+ av_log(ctx, AV_LOG_INFO, "Virtual tripod mode: relative=0, smoothing=0\n");
+ tc->conf.relative = 0;
+ tc->conf.smoothing = 0;
+ }
+ tc->conf.simpleMotionCalculation = 0;
+ tc->conf.storeTransforms = tc->debug;
+ tc->conf.smoothZoom = 0;
+
+ if (vsTransformDataInit(td, &tc->conf, &fi_src, &fi_dest) != VS_OK) {
+ av_log(ctx, AV_LOG_ERROR, "initialization of vid.stab transform failed, please report a BUG\n");
+ return AVERROR(EINVAL);
+ }
+
+ vsTransformGetConfig(&tc->conf, td);
+ av_log(ctx, AV_LOG_INFO, "Video transformation/stabilization settings (pass 2/2):\n");
+ av_log(ctx, AV_LOG_INFO, " input = %s\n", tc->input);
+ av_log(ctx, AV_LOG_INFO, " smoothing = %d\n", tc->conf.smoothing);
+ av_log(ctx, AV_LOG_INFO, " optalgo = %s\n",
+ tc->conf.camPathAlgo == VSOptimalL1 ? "opt" :
+ (tc->conf.camPathAlgo == VSGaussian ? "gauss" : "avg"));
+ av_log(ctx, AV_LOG_INFO, " maxshift = %d\n", tc->conf.maxShift);
+ av_log(ctx, AV_LOG_INFO, " maxangle = %f\n", tc->conf.maxAngle);
+ av_log(ctx, AV_LOG_INFO, " crop = %s\n", tc->conf.crop ? "Black" : "Keep");
+ av_log(ctx, AV_LOG_INFO, " relative = %s\n", tc->conf.relative ? "True": "False");
+ av_log(ctx, AV_LOG_INFO, " invert = %s\n", tc->conf.invert ? "True" : "False");
+ av_log(ctx, AV_LOG_INFO, " zoom = %f\n", tc->conf.zoom);
+ av_log(ctx, AV_LOG_INFO, " optzoom = %s\n",
+ tc->conf.optZoom == 1 ? "Static (1)" : (tc->conf.optZoom == 2 ? "Dynamic (2)" : "Off (0)"));
+ if (tc->conf.optZoom == 2)
+ av_log(ctx, AV_LOG_INFO, " zoomspeed = %g\n", tc->conf.zoomSpeed);
+ av_log(ctx, AV_LOG_INFO, " interpol = %s\n", getInterpolationTypeName(tc->conf.interpolType));
+
+ f = fopen(tc->input, "r");
+ if (!f) {
+ int ret = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "cannot open input file %s\n", tc->input);
+ return ret;
+ } else {
+ VSManyLocalMotions mlms;
+ if (vsReadLocalMotionsFile(f, &mlms) == VS_OK) {
+ // calculate the actual transforms from the local motions
+ if (vsLocalmotions2Transforms(td, &mlms, &tc->trans) != VS_OK) {
+ av_log(ctx, AV_LOG_ERROR, "calculating transformations failed\n");
+ return AVERROR(EINVAL);
+ }
+ } else { // try to read old format
+ if (!vsReadOldTransforms(td, f, &tc->trans)) { /* read input file */
+ av_log(ctx, AV_LOG_ERROR, "error parsing input file %s\n", tc->input);
+ return AVERROR(EINVAL);
+ }
+ }
+ }
+ fclose(f);
+
+ if (vsPreprocessTransforms(td, &tc->trans) != VS_OK) {
+ av_log(ctx, AV_LOG_ERROR, "error while preprocessing transforms\n");
+ return AVERROR(EINVAL);
+ }
+
+ // TODO: add sharpening, so far the user needs to call the unsharp filter manually
+ return 0;
+}
+
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ TransformContext *tc = ctx->priv;
+ VSTransformData* td = &(tc->td);
+
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ int direct = 0;
+ AVFrame *out;
+ VSFrame inframe;
+ int plane;
+
+ if (av_frame_is_writable(in)) {
+ direct = 1;
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ for (plane = 0; plane < vsTransformGetSrcFrameInfo(td)->planes; plane++) {
+ inframe.data[plane] = in->data[plane];
+ inframe.linesize[plane] = in->linesize[plane];
+ }
+ if (direct) {
+ vsTransformPrepare(td, &inframe, &inframe);
+ } else { // separate frames
+ VSFrame outframe;
+ for (plane = 0; plane < vsTransformGetDestFrameInfo(td)->planes; plane++) {
+ outframe.data[plane] = out->data[plane];
+ outframe.linesize[plane] = out->linesize[plane];
+ }
+ vsTransformPrepare(td, &inframe, &outframe);
+ }
+
+ vsDoTransform(td, vsGetNextTransform(td, &tc->trans));
+
+ vsTransformFinish(td);
+
+ if (!direct)
+ av_frame_free(&in);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad avfilter_vf_vidstabtransform_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_vf_vidstabtransform_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_vidstabtransform = {
+ .name = "vidstabtransform",
+ .description = NULL_IF_CONFIG_SMALL("Transform the frames, "
+ "pass 2 of 2 for stabilization "
+ "(see vidstabdetect for pass 1)."),
+ .priv_size = sizeof(TransformContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = avfilter_vf_vidstabtransform_inputs,
+ .outputs = avfilter_vf_vidstabtransform_outputs,
+ .priv_class = &vidstabtransform_class,
+};
diff --git a/libavfilter/vf_vignette.c b/libavfilter/vf_vignette.c
new file mode 100644
index 0000000000..94b6c6ffff
--- /dev/null
+++ b/libavfilter/vf_vignette.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2013 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <float.h> /* DBL_MAX */
+
+#include "libavutil/opt.h"
+#include "libavutil/eval.h"
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+static const char *const var_names[] = {
+ "w", // stream width
+ "h", // stream height
+ "n", // frame count
+ "pts", // presentation timestamp expressed in AV_TIME_BASE units
+ "r", // frame rate
+ "t", // timestamp expressed in seconds
+ "tb", // timebase
+ NULL
+};
+
+enum var_name {
+ VAR_W,
+ VAR_H,
+ VAR_N,
+ VAR_PTS,
+ VAR_R,
+ VAR_T,
+ VAR_TB,
+ VAR_NB
+};
+
+enum EvalMode {
+ EVAL_MODE_INIT,
+ EVAL_MODE_FRAME,
+ EVAL_MODE_NB
+};
+
+typedef struct {
+ const AVClass *class;
+ const AVPixFmtDescriptor *desc;
+ int backward;
+ int eval_mode; ///< EvalMode
+#define DEF_EXPR_FIELDS(name) AVExpr *name##_pexpr; char *name##_expr; double name
+ DEF_EXPR_FIELDS(angle);
+ DEF_EXPR_FIELDS(x0);
+ DEF_EXPR_FIELDS(y0);
+ double var_values[VAR_NB];
+ float *fmap;
+ int fmap_linesize;
+ double dmax;
+ float xscale, yscale;
+ uint32_t dither;
+ int do_dither;
+ AVRational aspect;
+ AVRational scale;
+} VignetteContext;
+
+#define OFFSET(x) offsetof(VignetteContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption vignette_options[] = {
+ { "angle", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS },
+ { "a", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS },
+ { "x0", "set circle center position on x-axis", OFFSET(x0_expr), AV_OPT_TYPE_STRING, {.str="w/2"}, .flags = FLAGS },
+ { "y0", "set circle center position on y-axis", OFFSET(y0_expr), AV_OPT_TYPE_STRING, {.str="h/2"}, .flags = FLAGS },
+ { "mode", "set forward/backward mode", OFFSET(backward), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS, "mode" },
+ { "forward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ { "backward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
+ { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
+ { "frame", "eval expressions for each frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
+ { "dither", "set dithering", OFFSET(do_dither), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
+ { "aspect", "set aspect ratio", OFFSET(aspect), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, DBL_MAX, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(vignette);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ VignetteContext *s = ctx->priv;
+
+#define PARSE_EXPR(name) do { \
+ int ret = av_expr_parse(&s->name##_pexpr, s->name##_expr, var_names, \
+ NULL, NULL, NULL, NULL, 0, ctx); \
+ if (ret < 0) { \
+ av_log(ctx, AV_LOG_ERROR, "Unable to parse expression for '" \
+ AV_STRINGIFY(name) "'\n"); \
+ return ret; \
+ } \
+} while (0)
+
+ PARSE_EXPR(angle);
+ PARSE_EXPR(x0);
+ PARSE_EXPR(y0);
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ VignetteContext *s = ctx->priv;
+ av_freep(&s->fmap);
+ av_expr_free(s->angle_pexpr);
+ av_expr_free(s->x0_pexpr);
+ av_expr_free(s->y0_pexpr);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static double get_natural_factor(const VignetteContext *s, int x, int y)
+{
+ const int xx = (x - s->x0) * s->xscale;
+ const int yy = (y - s->y0) * s->yscale;
+ const double dnorm = hypot(xx, yy) / s->dmax;
+ if (dnorm > 1) {
+ return 0;
+ } else {
+ const double c = cos(s->angle * dnorm);
+ return (c*c)*(c*c); // do not remove braces, it helps compilers
+ }
+}
+
+#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
+#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
+
+static void update_context(VignetteContext *s, AVFilterLink *inlink, AVFrame *frame)
+{
+ int x, y;
+ float *dst = s->fmap;
+ int dst_linesize = s->fmap_linesize;
+
+ if (frame) {
+ s->var_values[VAR_N] = inlink->frame_count_out;
+ s->var_values[VAR_T] = TS2T(frame->pts, inlink->time_base);
+ s->var_values[VAR_PTS] = TS2D(frame->pts);
+ } else {
+ s->var_values[VAR_N] = NAN;
+ s->var_values[VAR_T] = NAN;
+ s->var_values[VAR_PTS] = NAN;
+ }
+
+ s->angle = av_expr_eval(s->angle_pexpr, s->var_values, NULL);
+ s->x0 = av_expr_eval(s->x0_pexpr, s->var_values, NULL);
+ s->y0 = av_expr_eval(s->y0_pexpr, s->var_values, NULL);
+
+ if (isnan(s->x0) || isnan(s->y0) || isnan(s->angle))
+ s->eval_mode = EVAL_MODE_FRAME;
+
+ s->angle = av_clipf(s->angle, 0, M_PI_2);
+
+ if (s->backward) {
+ for (y = 0; y < inlink->h; y++) {
+ for (x = 0; x < inlink->w; x++)
+ dst[x] = 1. / get_natural_factor(s, x, y);
+ dst += dst_linesize;
+ }
+ } else {
+ for (y = 0; y < inlink->h; y++) {
+ for (x = 0; x < inlink->w; x++)
+ dst[x] = get_natural_factor(s, x, y);
+ dst += dst_linesize;
+ }
+ }
+}
+
+static inline double get_dither_value(VignetteContext *s)
+{
+ double dv = 0;
+ if (s->do_dither) {
+ dv = s->dither / (double)(1LL<<32);
+ s->dither = s->dither * 1664525 + 1013904223;
+ }
+ return dv;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ unsigned x, y, direct = 0;
+ AVFilterContext *ctx = inlink->dst;
+ VignetteContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+
+ if (av_frame_is_writable(in)) {
+ direct = 1;
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ if (s->eval_mode == EVAL_MODE_FRAME)
+ update_context(s, inlink, in);
+
+ if (s->desc->flags & AV_PIX_FMT_FLAG_RGB) {
+ uint8_t *dst = out->data[0];
+ const uint8_t *src = in ->data[0];
+ const float *fmap = s->fmap;
+ const int dst_linesize = out->linesize[0];
+ const int src_linesize = in ->linesize[0];
+ const int fmap_linesize = s->fmap_linesize;
+
+ for (y = 0; y < inlink->h; y++) {
+ uint8_t *dstp = dst;
+ const uint8_t *srcp = src;
+
+ for (x = 0; x < inlink->w; x++, dstp += 3, srcp += 3) {
+ const float f = fmap[x];
+
+ dstp[0] = av_clip_uint8(srcp[0] * f + get_dither_value(s));
+ dstp[1] = av_clip_uint8(srcp[1] * f + get_dither_value(s));
+ dstp[2] = av_clip_uint8(srcp[2] * f + get_dither_value(s));
+ }
+ dst += dst_linesize;
+ src += src_linesize;
+ fmap += fmap_linesize;
+ }
+ } else {
+ int plane;
+
+ for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
+ uint8_t *dst = out->data[plane];
+ const uint8_t *src = in ->data[plane];
+ const float *fmap = s->fmap;
+ const int dst_linesize = out->linesize[plane];
+ const int src_linesize = in ->linesize[plane];
+ const int fmap_linesize = s->fmap_linesize;
+ const int chroma = plane == 1 || plane == 2;
+ const int hsub = chroma ? s->desc->log2_chroma_w : 0;
+ const int vsub = chroma ? s->desc->log2_chroma_h : 0;
+ const int w = AV_CEIL_RSHIFT(inlink->w, hsub);
+ const int h = AV_CEIL_RSHIFT(inlink->h, vsub);
+
+ for (y = 0; y < h; y++) {
+ uint8_t *dstp = dst;
+ const uint8_t *srcp = src;
+
+ for (x = 0; x < w; x++) {
+ const double dv = get_dither_value(s);
+ if (chroma) *dstp++ = av_clip_uint8(fmap[x << hsub] * (*srcp++ - 127) + 127 + dv);
+ else *dstp++ = av_clip_uint8(fmap[x ] * *srcp++ + dv);
+ }
+ dst += dst_linesize;
+ src += src_linesize;
+ fmap += fmap_linesize << vsub;
+ }
+ }
+ }
+
+ if (!direct)
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ VignetteContext *s = inlink->dst->priv;
+ AVRational sar = inlink->sample_aspect_ratio;
+
+ s->desc = av_pix_fmt_desc_get(inlink->format);
+ s->var_values[VAR_W] = inlink->w;
+ s->var_values[VAR_H] = inlink->h;
+ s->var_values[VAR_TB] = av_q2d(inlink->time_base);
+ s->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ?
+ NAN : av_q2d(inlink->frame_rate);
+
+ if (!sar.num || !sar.den)
+ sar.num = sar.den = 1;
+ if (sar.num > sar.den) {
+ s->xscale = av_q2d(av_div_q(sar, s->aspect));
+ s->yscale = 1;
+ } else {
+ s->yscale = av_q2d(av_div_q(s->aspect, sar));
+ s->xscale = 1;
+ }
+ s->dmax = hypot(inlink->w / 2., inlink->h / 2.);
+ av_log(s, AV_LOG_DEBUG, "xscale=%f yscale=%f dmax=%f\n",
+ s->xscale, s->yscale, s->dmax);
+
+ s->fmap_linesize = FFALIGN(inlink->w, 32);
+ s->fmap = av_malloc_array(s->fmap_linesize, inlink->h * sizeof(*s->fmap));
+ if (!s->fmap)
+ return AVERROR(ENOMEM);
+
+ if (s->eval_mode == EVAL_MODE_INIT)
+ update_context(s, inlink, NULL);
+
+ return 0;
+}
+
+static const AVFilterPad vignette_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad vignette_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_vignette = {
+ .name = "vignette",
+ .description = NULL_IF_CONFIG_SMALL("Make or reverse a vignette effect."),
+ .priv_size = sizeof(VignetteContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = vignette_inputs,
+ .outputs = vignette_outputs,
+ .priv_class = &vignette_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_w3fdif.c b/libavfilter/vf_w3fdif.c
new file mode 100644
index 0000000000..b7872db341
--- /dev/null
+++ b/libavfilter/vf_w3fdif.c
@@ -0,0 +1,596 @@
+/*
+ * Copyright (C) 2012 British Broadcasting Corporation, All Rights Reserved
+ * Author of de-interlace algorithm: Jim Easterbrook for BBC R&D
+ * Based on the process described by Martin Weston for BBC R&D
+ * Author of FFmpeg filter: Mark Himsley for BBC Broadcast Systems Development
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/common.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "w3fdif.h"
+
+typedef struct W3FDIFContext {
+ const AVClass *class;
+ int filter; ///< 0 is simple, 1 is more complex
+ int deint; ///< which frames to deinterlace
+ int linesize[4]; ///< bytes of pixel data per line for each plane
+ int planeheight[4]; ///< height of each plane
+ int field; ///< which field are we on, 0 or 1
+ int eof;
+ int nb_planes;
+ AVFrame *prev, *cur, *next; ///< previous, current, next frames
+ int32_t **work_line; ///< lines we are calculating
+ int nb_threads;
+ int max;
+
+ W3FDIFDSPContext dsp;
+} W3FDIFContext;
+
+#define OFFSET(x) offsetof(W3FDIFContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, 0, 0, FLAGS, unit }
+
+static const AVOption w3fdif_options[] = {
+ { "filter", "specify the filter", OFFSET(filter), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "filter" },
+ CONST("simple", NULL, 0, "filter"),
+ CONST("complex", NULL, 1, "filter"),
+ { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "deint" },
+ CONST("all", "deinterlace all frames", 0, "deint"),
+ CONST("interlaced", "only deinterlace frames marked as interlaced", 1, "deint"),
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(w3fdif);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static void filter_simple_low(int32_t *work_line,
+ uint8_t *in_lines_cur[2],
+ const int16_t *coef, int linesize)
+{
+ int i;
+
+ for (i = 0; i < linesize; i++) {
+ *work_line = *in_lines_cur[0]++ * coef[0];
+ *work_line++ += *in_lines_cur[1]++ * coef[1];
+ }
+}
+
+static void filter_complex_low(int32_t *work_line,
+ uint8_t *in_lines_cur[4],
+ const int16_t *coef, int linesize)
+{
+ int i;
+
+ for (i = 0; i < linesize; i++) {
+ *work_line = *in_lines_cur[0]++ * coef[0];
+ *work_line += *in_lines_cur[1]++ * coef[1];
+ *work_line += *in_lines_cur[2]++ * coef[2];
+ *work_line++ += *in_lines_cur[3]++ * coef[3];
+ }
+}
+
+static void filter_simple_high(int32_t *work_line,
+ uint8_t *in_lines_cur[3],
+ uint8_t *in_lines_adj[3],
+ const int16_t *coef, int linesize)
+{
+ int i;
+
+ for (i = 0; i < linesize; i++) {
+ *work_line += *in_lines_cur[0]++ * coef[0];
+ *work_line += *in_lines_adj[0]++ * coef[0];
+ *work_line += *in_lines_cur[1]++ * coef[1];
+ *work_line += *in_lines_adj[1]++ * coef[1];
+ *work_line += *in_lines_cur[2]++ * coef[2];
+ *work_line++ += *in_lines_adj[2]++ * coef[2];
+ }
+}
+
+static void filter_complex_high(int32_t *work_line,
+ uint8_t *in_lines_cur[5],
+ uint8_t *in_lines_adj[5],
+ const int16_t *coef, int linesize)
+{
+ int i;
+
+ for (i = 0; i < linesize; i++) {
+ *work_line += *in_lines_cur[0]++ * coef[0];
+ *work_line += *in_lines_adj[0]++ * coef[0];
+ *work_line += *in_lines_cur[1]++ * coef[1];
+ *work_line += *in_lines_adj[1]++ * coef[1];
+ *work_line += *in_lines_cur[2]++ * coef[2];
+ *work_line += *in_lines_adj[2]++ * coef[2];
+ *work_line += *in_lines_cur[3]++ * coef[3];
+ *work_line += *in_lines_adj[3]++ * coef[3];
+ *work_line += *in_lines_cur[4]++ * coef[4];
+ *work_line++ += *in_lines_adj[4]++ * coef[4];
+ }
+}
+
+static void filter_scale(uint8_t *out_pixel, const int32_t *work_pixel, int linesize, int max)
+{
+ int j;
+
+ for (j = 0; j < linesize; j++, out_pixel++, work_pixel++)
+ *out_pixel = av_clip(*work_pixel, 0, 255 * 256 * 128) >> 15;
+}
+
+static void filter16_simple_low(int32_t *work_line,
+ uint8_t *in_lines_cur8[2],
+ const int16_t *coef, int linesize)
+{
+ uint16_t *in_lines_cur[2] = { (uint16_t *)in_lines_cur8[0], (uint16_t *)in_lines_cur8[1] };
+ int i;
+
+ linesize /= 2;
+ for (i = 0; i < linesize; i++) {
+ *work_line = *in_lines_cur[0]++ * coef[0];
+ *work_line++ += *in_lines_cur[1]++ * coef[1];
+ }
+}
+
+static void filter16_complex_low(int32_t *work_line,
+ uint8_t *in_lines_cur8[4],
+ const int16_t *coef, int linesize)
+{
+ uint16_t *in_lines_cur[4] = { (uint16_t *)in_lines_cur8[0],
+ (uint16_t *)in_lines_cur8[1],
+ (uint16_t *)in_lines_cur8[2],
+ (uint16_t *)in_lines_cur8[3] };
+ int i;
+
+ linesize /= 2;
+ for (i = 0; i < linesize; i++) {
+ *work_line = *in_lines_cur[0]++ * coef[0];
+ *work_line += *in_lines_cur[1]++ * coef[1];
+ *work_line += *in_lines_cur[2]++ * coef[2];
+ *work_line++ += *in_lines_cur[3]++ * coef[3];
+ }
+}
+
+static void filter16_simple_high(int32_t *work_line,
+ uint8_t *in_lines_cur8[3],
+ uint8_t *in_lines_adj8[3],
+ const int16_t *coef, int linesize)
+{
+ uint16_t *in_lines_cur[3] = { (uint16_t *)in_lines_cur8[0],
+ (uint16_t *)in_lines_cur8[1],
+ (uint16_t *)in_lines_cur8[2] };
+ uint16_t *in_lines_adj[3] = { (uint16_t *)in_lines_adj8[0],
+ (uint16_t *)in_lines_adj8[1],
+ (uint16_t *)in_lines_adj8[2] };
+ int i;
+
+ linesize /= 2;
+ for (i = 0; i < linesize; i++) {
+ *work_line += *in_lines_cur[0]++ * coef[0];
+ *work_line += *in_lines_adj[0]++ * coef[0];
+ *work_line += *in_lines_cur[1]++ * coef[1];
+ *work_line += *in_lines_adj[1]++ * coef[1];
+ *work_line += *in_lines_cur[2]++ * coef[2];
+ *work_line++ += *in_lines_adj[2]++ * coef[2];
+ }
+}
+
+static void filter16_complex_high(int32_t *work_line,
+ uint8_t *in_lines_cur8[5],
+ uint8_t *in_lines_adj8[5],
+ const int16_t *coef, int linesize)
+{
+ uint16_t *in_lines_cur[5] = { (uint16_t *)in_lines_cur8[0],
+ (uint16_t *)in_lines_cur8[1],
+ (uint16_t *)in_lines_cur8[2],
+ (uint16_t *)in_lines_cur8[3],
+ (uint16_t *)in_lines_cur8[4] };
+ uint16_t *in_lines_adj[5] = { (uint16_t *)in_lines_adj8[0],
+ (uint16_t *)in_lines_adj8[1],
+ (uint16_t *)in_lines_adj8[2],
+ (uint16_t *)in_lines_adj8[3],
+ (uint16_t *)in_lines_adj8[4] };
+ int i;
+
+ linesize /= 2;
+ for (i = 0; i < linesize; i++) {
+ *work_line += *in_lines_cur[0]++ * coef[0];
+ *work_line += *in_lines_adj[0]++ * coef[0];
+ *work_line += *in_lines_cur[1]++ * coef[1];
+ *work_line += *in_lines_adj[1]++ * coef[1];
+ *work_line += *in_lines_cur[2]++ * coef[2];
+ *work_line += *in_lines_adj[2]++ * coef[2];
+ *work_line += *in_lines_cur[3]++ * coef[3];
+ *work_line += *in_lines_adj[3]++ * coef[3];
+ *work_line += *in_lines_cur[4]++ * coef[4];
+ *work_line++ += *in_lines_adj[4]++ * coef[4];
+ }
+}
+
+static void filter16_scale(uint8_t *out_pixel8, const int32_t *work_pixel, int linesize, int max)
+{
+ uint16_t *out_pixel = (uint16_t *)out_pixel8;
+ int j;
+
+ linesize /= 2;
+ for (j = 0; j < linesize; j++, out_pixel++, work_pixel++)
+ *out_pixel = av_clip(*work_pixel, 0, max) >> 15;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ W3FDIFContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret, i, depth;
+
+ if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+ s->nb_threads = ff_filter_get_nb_threads(ctx);
+ s->work_line = av_calloc(s->nb_threads, sizeof(*s->work_line));
+ if (!s->work_line)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < s->nb_threads; i++) {
+ s->work_line[i] = av_calloc(FFALIGN(s->linesize[0], 32), sizeof(*s->work_line[0]));
+ if (!s->work_line[i])
+ return AVERROR(ENOMEM);
+ }
+
+ depth = desc->comp[0].depth;
+ s->max = ((1 << depth) - 1) * 256 * 128;
+ if (depth <= 8) {
+ s->dsp.filter_simple_low = filter_simple_low;
+ s->dsp.filter_complex_low = filter_complex_low;
+ s->dsp.filter_simple_high = filter_simple_high;
+ s->dsp.filter_complex_high = filter_complex_high;
+ s->dsp.filter_scale = filter_scale;
+ } else {
+ s->dsp.filter_simple_low = filter16_simple_low;
+ s->dsp.filter_complex_low = filter16_complex_low;
+ s->dsp.filter_simple_high = filter16_simple_high;
+ s->dsp.filter_complex_high = filter16_complex_high;
+ s->dsp.filter_scale = filter16_scale;
+ }
+
+ if (ARCH_X86)
+ ff_w3fdif_init_x86(&s->dsp, depth);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterLink *inlink = outlink->src->inputs[0];
+
+ outlink->time_base.num = inlink->time_base.num;
+ outlink->time_base.den = inlink->time_base.den * 2;
+ outlink->frame_rate.num = inlink->frame_rate.num * 2;
+ outlink->frame_rate.den = inlink->frame_rate.den;
+
+ return 0;
+}
+
+/*
+ * Filter coefficients from PH-2071, scaled by 256 * 128.
+ * Each set of coefficients has a set for low-frequencies and high-frequencies.
+ * n_coef_lf[] and n_coef_hf[] are the number of coefs for simple and more-complex.
+ * It is important for later that n_coef_lf[] is even and n_coef_hf[] is odd.
+ * coef_lf[][] and coef_hf[][] are the coefficients for low-frequencies
+ * and high-frequencies for simple and more-complex mode.
+ */
+static const int8_t n_coef_lf[2] = { 2, 4 };
+static const int16_t coef_lf[2][4] = {{ 16384, 16384, 0, 0},
+ { -852, 17236, 17236, -852}};
+static const int8_t n_coef_hf[2] = { 3, 5 };
+static const int16_t coef_hf[2][5] = {{ -2048, 4096, -2048, 0, 0},
+ { 1016, -3801, 5570, -3801, 1016}};
+
+typedef struct ThreadData {
+ AVFrame *out, *cur, *adj;
+ int plane;
+} ThreadData;
+
+static int deinterlace_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ W3FDIFContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *out = td->out;
+ AVFrame *cur = td->cur;
+ AVFrame *adj = td->adj;
+ const int plane = td->plane;
+ const int filter = s->filter;
+ uint8_t *in_line, *in_lines_cur[5], *in_lines_adj[5];
+ uint8_t *out_line, *out_pixel;
+ int32_t *work_line, *work_pixel;
+ uint8_t *cur_data = cur->data[plane];
+ uint8_t *adj_data = adj->data[plane];
+ uint8_t *dst_data = out->data[plane];
+ const int linesize = s->linesize[plane];
+ const int height = s->planeheight[plane];
+ const int cur_line_stride = cur->linesize[plane];
+ const int adj_line_stride = adj->linesize[plane];
+ const int dst_line_stride = out->linesize[plane];
+ const int start = (height * jobnr) / nb_jobs;
+ const int end = (height * (jobnr+1)) / nb_jobs;
+ const int max = s->max;
+ int j, y_in, y_out;
+
+ /* copy unchanged the lines of the field */
+ y_out = start + (s->field == cur->top_field_first) - (start & 1);
+
+ in_line = cur_data + (y_out * cur_line_stride);
+ out_line = dst_data + (y_out * dst_line_stride);
+
+ while (y_out < end) {
+ memcpy(out_line, in_line, linesize);
+ y_out += 2;
+ in_line += cur_line_stride * 2;
+ out_line += dst_line_stride * 2;
+ }
+
+ /* interpolate other lines of the field */
+ y_out = start + (s->field != cur->top_field_first) - (start & 1);
+
+ out_line = dst_data + (y_out * dst_line_stride);
+
+ while (y_out < end) {
+ /* get low vertical frequencies from current field */
+ for (j = 0; j < n_coef_lf[filter]; j++) {
+ y_in = (y_out + 1) + (j * 2) - n_coef_lf[filter];
+
+ while (y_in < 0)
+ y_in += 2;
+ while (y_in >= height)
+ y_in -= 2;
+
+ in_lines_cur[j] = cur_data + (y_in * cur_line_stride);
+ }
+
+ work_line = s->work_line[jobnr];
+ switch (n_coef_lf[filter]) {
+ case 2:
+ s->dsp.filter_simple_low(work_line, in_lines_cur,
+ coef_lf[filter], linesize);
+ break;
+ case 4:
+ s->dsp.filter_complex_low(work_line, in_lines_cur,
+ coef_lf[filter], linesize);
+ }
+
+ /* get high vertical frequencies from adjacent fields */
+ for (j = 0; j < n_coef_hf[filter]; j++) {
+ y_in = (y_out + 1) + (j * 2) - n_coef_hf[filter];
+
+ while (y_in < 0)
+ y_in += 2;
+ while (y_in >= height)
+ y_in -= 2;
+
+ in_lines_cur[j] = cur_data + (y_in * cur_line_stride);
+ in_lines_adj[j] = adj_data + (y_in * adj_line_stride);
+ }
+
+ work_line = s->work_line[jobnr];
+ switch (n_coef_hf[filter]) {
+ case 3:
+ s->dsp.filter_simple_high(work_line, in_lines_cur, in_lines_adj,
+ coef_hf[filter], linesize);
+ break;
+ case 5:
+ s->dsp.filter_complex_high(work_line, in_lines_cur, in_lines_adj,
+ coef_hf[filter], linesize);
+ }
+
+ /* save scaled result to the output frame, scaling down by 256 * 128 */
+ work_pixel = s->work_line[jobnr];
+ out_pixel = out_line;
+
+ s->dsp.filter_scale(out_pixel, work_pixel, linesize, max);
+
+ /* move on to next line */
+ y_out += 2;
+ out_line += dst_line_stride * 2;
+ }
+
+ return 0;
+}
+
+static int filter(AVFilterContext *ctx, int is_second)
+{
+ W3FDIFContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out, *adj;
+ ThreadData td;
+ int plane;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, s->cur);
+ out->interlaced_frame = 0;
+
+ if (!is_second) {
+ if (out->pts != AV_NOPTS_VALUE)
+ out->pts *= 2;
+ } else {
+ int64_t cur_pts = s->cur->pts;
+ int64_t next_pts = s->next->pts;
+
+ if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) {
+ out->pts = cur_pts + next_pts;
+ } else {
+ out->pts = AV_NOPTS_VALUE;
+ }
+ }
+
+ adj = s->field ? s->next : s->prev;
+ td.out = out; td.cur = s->cur; td.adj = adj;
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ td.plane = plane;
+ ctx->internal->execute(ctx, deinterlace_slice, &td, NULL, FFMIN(s->planeheight[plane], s->nb_threads));
+ }
+
+ s->field = !s->field;
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ W3FDIFContext *s = ctx->priv;
+ int ret;
+
+ av_frame_free(&s->prev);
+ s->prev = s->cur;
+ s->cur = s->next;
+ s->next = frame;
+
+ if (!s->cur) {
+ s->cur = av_frame_clone(s->next);
+ if (!s->cur)
+ return AVERROR(ENOMEM);
+ }
+
+ if ((s->deint && !s->cur->interlaced_frame) || ctx->is_disabled) {
+ AVFrame *out = av_frame_clone(s->cur);
+ if (!out)
+ return AVERROR(ENOMEM);
+
+ av_frame_free(&s->prev);
+ if (out->pts != AV_NOPTS_VALUE)
+ out->pts *= 2;
+ return ff_filter_frame(ctx->outputs[0], out);
+ }
+
+ if (!s->prev)
+ return 0;
+
+ ret = filter(ctx, 0);
+ if (ret < 0)
+ return ret;
+
+ return filter(ctx, 1);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ W3FDIFContext *s = ctx->priv;
+ int ret;
+
+ if (s->eof)
+ return AVERROR_EOF;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF && s->cur) {
+ AVFrame *next = av_frame_clone(s->next);
+ if (!next)
+ return AVERROR(ENOMEM);
+ next->pts = s->next->pts * 2 - s->cur->pts;
+ filter_frame(ctx->inputs[0], next);
+ s->eof = 1;
+ } else if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ W3FDIFContext *s = ctx->priv;
+ int i;
+
+ av_frame_free(&s->prev);
+ av_frame_free(&s->cur );
+ av_frame_free(&s->next);
+
+ for (i = 0; i < s->nb_threads; i++)
+ av_freep(&s->work_line[i]);
+
+ av_freep(&s->work_line);
+}
+
+static const AVFilterPad w3fdif_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad w3fdif_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_w3fdif = {
+ .name = "w3fdif",
+ .description = NULL_IF_CONFIG_SMALL("Apply Martin Weston three field deinterlace."),
+ .priv_size = sizeof(W3FDIFContext),
+ .priv_class = &w3fdif_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = w3fdif_inputs,
+ .outputs = w3fdif_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_waveform.c b/libavfilter/vf_waveform.c
new file mode 100644
index 0000000000..70995eebee
--- /dev/null
+++ b/libavfilter/vf_waveform.c
@@ -0,0 +1,2833 @@
+/*
+ * Copyright (c) 2012-2016 Paul B Mahol
+ * Copyright (c) 2013 Marton Balint
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/xga_font_data.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+enum FilterType {
+ LOWPASS,
+ FLAT,
+ AFLAT,
+ CHROMA,
+ COLOR,
+ ACOLOR,
+ NB_FILTERS
+};
+
+enum DisplayType {
+ OVERLAY,
+ STACK,
+ PARADE,
+ NB_DISPLAYS
+};
+
+enum ScaleType {
+ DIGITAL,
+ MILLIVOLTS,
+ IRE,
+ NB_SCALES
+};
+
+typedef struct GraticuleLine {
+ const char *name;
+ uint16_t pos;
+} GraticuleLine;
+
+typedef struct GraticuleLines {
+ struct GraticuleLine line[4];
+} GraticuleLines;
+
+typedef struct WaveformContext {
+ const AVClass *class;
+ int mode;
+ int acomp;
+ int dcomp;
+ int ncomp;
+ int pcomp;
+ uint8_t bg_color[4];
+ float fintensity;
+ int intensity;
+ int mirror;
+ int display;
+ int envelope;
+ int graticule;
+ float opacity;
+ float bgopacity;
+ int estart[4];
+ int eend[4];
+ int *emax[4][4];
+ int *emin[4][4];
+ int *peak;
+ int filter;
+ int flags;
+ int bits;
+ int max;
+ int size;
+ int scale;
+ int shift_w[4], shift_h[4];
+ GraticuleLines *glines;
+ int nb_glines;
+ void (*waveform)(struct WaveformContext *s,
+ AVFrame *in, AVFrame *out,
+ int component, int intensity,
+ int offset_y, int offset_x,
+ int column, int mirror);
+ void (*graticulef)(struct WaveformContext *s, AVFrame *out);
+ const AVPixFmtDescriptor *desc;
+ const AVPixFmtDescriptor *odesc;
+} WaveformContext;
+
+#define OFFSET(x) offsetof(WaveformContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption waveform_options[] = {
+ { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "mode" },
+ { "m", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "mode" },
+ { "row", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "mode" },
+ { "column", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "mode" },
+ { "intensity", "set intensity", OFFSET(fintensity), AV_OPT_TYPE_FLOAT, {.dbl=0.04}, 0, 1, FLAGS },
+ { "i", "set intensity", OFFSET(fintensity), AV_OPT_TYPE_FLOAT, {.dbl=0.04}, 0, 1, FLAGS },
+ { "mirror", "set mirroring", OFFSET(mirror), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
+ { "r", "set mirroring", OFFSET(mirror), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
+ { "display", "set display mode", OFFSET(display), AV_OPT_TYPE_INT, {.i64=STACK}, 0, NB_DISPLAYS-1, FLAGS, "display" },
+ { "d", "set display mode", OFFSET(display), AV_OPT_TYPE_INT, {.i64=STACK}, 0, NB_DISPLAYS-1, FLAGS, "display" },
+ { "overlay", NULL, 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY}, 0, 0, FLAGS, "display" },
+ { "stack", NULL, 0, AV_OPT_TYPE_CONST, {.i64=STACK}, 0, 0, FLAGS, "display" },
+ { "parade", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PARADE}, 0, 0, FLAGS, "display" },
+ { "components", "set components to display", OFFSET(pcomp), AV_OPT_TYPE_INT, {.i64=1}, 1, 15, FLAGS },
+ { "c", "set components to display", OFFSET(pcomp), AV_OPT_TYPE_INT, {.i64=1}, 1, 15, FLAGS },
+ { "envelope", "set envelope to display", OFFSET(envelope), AV_OPT_TYPE_INT, {.i64=0}, 0, 3, FLAGS, "envelope" },
+ { "e", "set envelope to display", OFFSET(envelope), AV_OPT_TYPE_INT, {.i64=0}, 0, 3, FLAGS, "envelope" },
+ { "none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "envelope" },
+ { "instant", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "envelope" },
+ { "peak", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "envelope" },
+ { "peak+instant", NULL, 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, FLAGS, "envelope" },
+ { "filter", "set filter", OFFSET(filter), AV_OPT_TYPE_INT, {.i64=0}, 0, NB_FILTERS-1, FLAGS, "filter" },
+ { "f", "set filter", OFFSET(filter), AV_OPT_TYPE_INT, {.i64=0}, 0, NB_FILTERS-1, FLAGS, "filter" },
+ { "lowpass", NULL, 0, AV_OPT_TYPE_CONST, {.i64=LOWPASS}, 0, 0, FLAGS, "filter" },
+ { "flat" , NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAT}, 0, 0, FLAGS, "filter" },
+ { "aflat" , NULL, 0, AV_OPT_TYPE_CONST, {.i64=AFLAT}, 0, 0, FLAGS, "filter" },
+ { "chroma", NULL, 0, AV_OPT_TYPE_CONST, {.i64=CHROMA}, 0, 0, FLAGS, "filter" },
+ { "color", NULL, 0, AV_OPT_TYPE_CONST, {.i64=COLOR}, 0, 0, FLAGS, "filter" },
+ { "acolor", NULL, 0, AV_OPT_TYPE_CONST, {.i64=ACOLOR}, 0, 0, FLAGS, "filter" },
+ { "graticule", "set graticule", OFFSET(graticule), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "graticule" },
+ { "g", "set graticule", OFFSET(graticule), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "graticule" },
+ { "none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "graticule" },
+ { "green", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "graticule" },
+ { "opacity", "set graticule opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 1, FLAGS },
+ { "o", "set graticule opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 1, FLAGS },
+ { "flags", "set graticule flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=1}, 0, 3, FLAGS, "flags" },
+ { "fl", "set graticule flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=1}, 0, 3, FLAGS, "flags" },
+ { "numbers", "draw numbers", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "flags" },
+ { "dots", "draw dots instead of lines", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "flags" },
+ { "scale", "set scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=0}, 0, NB_SCALES-1, FLAGS, "scale" },
+ { "s", "set scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=0}, 0, NB_SCALES-1, FLAGS, "scale" },
+ { "digital", NULL, 0, AV_OPT_TYPE_CONST, {.i64=DIGITAL}, 0, 0, FLAGS, "scale" },
+ { "millivolts", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MILLIVOLTS}, 0, 0, FLAGS, "scale" },
+ { "ire", NULL, 0, AV_OPT_TYPE_CONST, {.i64=IRE}, 0, 0, FLAGS, "scale" },
+ { "bgopacity", "set background opacity", OFFSET(bgopacity), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 1, FLAGS },
+ { "b", "set background opacity", OFFSET(bgopacity), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(waveform);
+
+static const enum AVPixelFormat in_lowpass_pix_fmts[] = {
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12,
+ AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV420P9,
+ AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA420P9,
+ AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA420P10,
+ AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV440P12,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat in_color_pix_fmts[] = {
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV420P9,
+ AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA420P9,
+ AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA420P10,
+ AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV440P12,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat in_flat_pix_fmts[] = {
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV420P9,
+ AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA420P9,
+ AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA420P10,
+ AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV440P12,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_rgb8_lowpass_pix_fmts[] = {
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_rgb9_lowpass_pix_fmts[] = {
+ AV_PIX_FMT_GBRP9,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_rgb10_lowpass_pix_fmts[] = {
+ AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_rgb12_lowpass_pix_fmts[] = {
+ AV_PIX_FMT_GBRP12,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_yuv8_lowpass_pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_yuv9_lowpass_pix_fmts[] = {
+ AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUVA444P9,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_yuv10_lowpass_pix_fmts[] = {
+ AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_yuv12_lowpass_pix_fmts[] = {
+ AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_gray8_lowpass_pix_fmts[] = {
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_gray10_lowpass_pix_fmts[] = {
+ AV_PIX_FMT_GRAY10,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat out_gray12_lowpass_pix_fmts[] = {
+ AV_PIX_FMT_GRAY12,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat flat_pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_NONE
+};
+
+static int query_formats(AVFilterContext *ctx)
+{
+ WaveformContext *s = ctx->priv;
+ const enum AVPixelFormat *out_pix_fmts;
+ const enum AVPixelFormat *in_pix_fmts;
+ const AVPixFmtDescriptor *desc;
+ AVFilterFormats *avff;
+ int depth, rgb, i, ret, ncomp;
+
+ if (!ctx->inputs[0]->in_formats ||
+ !ctx->inputs[0]->in_formats->nb_formats) {
+ return AVERROR(EAGAIN);
+ }
+
+ switch (s->filter) {
+ case LOWPASS: in_pix_fmts = in_lowpass_pix_fmts; break;
+ case CHROMA:
+ case AFLAT:
+ case FLAT: in_pix_fmts = in_flat_pix_fmts; break;
+ case ACOLOR:
+ case COLOR: in_pix_fmts = in_color_pix_fmts; break;
+ }
+
+ if (!ctx->inputs[0]->out_formats) {
+ if ((ret = ff_formats_ref(ff_make_format_list(in_pix_fmts), &ctx->inputs[0]->out_formats)) < 0)
+ return ret;
+ }
+
+ avff = ctx->inputs[0]->in_formats;
+ desc = av_pix_fmt_desc_get(avff->formats[0]);
+ ncomp = desc->nb_components;
+ rgb = desc->flags & AV_PIX_FMT_FLAG_RGB;
+ depth = desc->comp[0].depth;
+ for (i = 1; i < avff->nb_formats; i++) {
+ desc = av_pix_fmt_desc_get(avff->formats[i]);
+ if (rgb != (desc->flags & AV_PIX_FMT_FLAG_RGB) ||
+ depth != desc->comp[0].depth)
+ return AVERROR(EAGAIN);
+ }
+
+ if (s->filter == LOWPASS && ncomp == 1 && depth == 8)
+ out_pix_fmts = out_gray8_lowpass_pix_fmts;
+ else if (s->filter == LOWPASS && ncomp == 1 && depth == 10)
+ out_pix_fmts = out_gray10_lowpass_pix_fmts;
+ else if (s->filter == LOWPASS && ncomp == 1 && depth == 12)
+ out_pix_fmts = out_gray12_lowpass_pix_fmts;
+ else if (rgb && depth == 8 && ncomp > 2)
+ out_pix_fmts = out_rgb8_lowpass_pix_fmts;
+ else if (rgb && depth == 9 && ncomp > 2)
+ out_pix_fmts = out_rgb9_lowpass_pix_fmts;
+ else if (rgb && depth == 10 && ncomp > 2)
+ out_pix_fmts = out_rgb10_lowpass_pix_fmts;
+ else if (rgb && depth == 12 && ncomp > 2)
+ out_pix_fmts = out_rgb12_lowpass_pix_fmts;
+ else if (depth == 8 && ncomp > 2)
+ out_pix_fmts = out_yuv8_lowpass_pix_fmts;
+ else if (depth == 9 && ncomp > 2)
+ out_pix_fmts = out_yuv9_lowpass_pix_fmts;
+ else if (depth == 10 && ncomp > 2)
+ out_pix_fmts = out_yuv10_lowpass_pix_fmts;
+ else if (depth == 12 && ncomp > 2)
+ out_pix_fmts = out_yuv12_lowpass_pix_fmts;
+ else
+ return AVERROR(EAGAIN);
+ if ((ret = ff_formats_ref(ff_make_format_list(out_pix_fmts), &ctx->outputs[0]->in_formats)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static void envelope_instant16(WaveformContext *s, AVFrame *out, int plane, int component, int offset)
+{
+ const int dst_linesize = out->linesize[component] / 2;
+ const int bg = s->bg_color[component] * (s->max / 256);
+ const int limit = s->max - 1;
+ const int dst_h = s->display == PARADE ? out->height / s->acomp : out->height;
+ const int dst_w = s->display == PARADE ? out->width / s->acomp : out->width;
+ const int start = s->estart[plane];
+ const int end = s->eend[plane];
+ uint16_t *dst;
+ int x, y;
+
+ if (s->mode) {
+ for (x = offset; x < offset + dst_w; x++) {
+ for (y = start; y < end; y++) {
+ dst = (uint16_t *)out->data[component] + y * dst_linesize + x;
+ if (dst[0] != bg) {
+ dst[0] = limit;
+ break;
+ }
+ }
+ for (y = end - 1; y >= start; y--) {
+ dst = (uint16_t *)out->data[component] + y * dst_linesize + x;
+ if (dst[0] != bg) {
+ dst[0] = limit;
+ break;
+ }
+ }
+ }
+ } else {
+ for (y = offset; y < offset + dst_h; y++) {
+ dst = (uint16_t *)out->data[component] + y * dst_linesize;
+ for (x = start; x < end; x++) {
+ if (dst[x] != bg) {
+ dst[x] = limit;
+ break;
+ }
+ }
+ for (x = end - 1; x >= start; x--) {
+ if (dst[x] != bg) {
+ dst[x] = limit;
+ break;
+ }
+ }
+ }
+ }
+}
+
+static void envelope_instant(WaveformContext *s, AVFrame *out, int plane, int component, int offset)
+{
+ const int dst_linesize = out->linesize[component];
+ const uint8_t bg = s->bg_color[component];
+ const int dst_h = s->display == PARADE ? out->height / s->acomp : out->height;
+ const int dst_w = s->display == PARADE ? out->width / s->acomp : out->width;
+ const int start = s->estart[plane];
+ const int end = s->eend[plane];
+ uint8_t *dst;
+ int x, y;
+
+ if (s->mode) {
+ for (x = offset; x < offset + dst_w; x++) {
+ for (y = start; y < end; y++) {
+ dst = out->data[component] + y * dst_linesize + x;
+ if (dst[0] != bg) {
+ dst[0] = 255;
+ break;
+ }
+ }
+ for (y = end - 1; y >= start; y--) {
+ dst = out->data[component] + y * dst_linesize + x;
+ if (dst[0] != bg) {
+ dst[0] = 255;
+ break;
+ }
+ }
+ }
+ } else {
+ for (y = offset; y < offset + dst_h; y++) {
+ dst = out->data[component] + y * dst_linesize;
+ for (x = start; x < end; x++) {
+ if (dst[x] != bg) {
+ dst[x] = 255;
+ break;
+ }
+ }
+ for (x = end - 1; x >= start; x--) {
+ if (dst[x] != bg) {
+ dst[x] = 255;
+ break;
+ }
+ }
+ }
+ }
+}
+
+static void envelope_peak16(WaveformContext *s, AVFrame *out, int plane, int component, int offset)
+{
+ const int dst_linesize = out->linesize[component] / 2;
+ const int bg = s->bg_color[component] * (s->max / 256);
+ const int limit = s->max - 1;
+ const int dst_h = s->display == PARADE ? out->height / s->acomp : out->height;
+ const int dst_w = s->display == PARADE ? out->width / s->acomp : out->width;
+ const int start = s->estart[plane];
+ const int end = s->eend[plane];
+ int *emax = s->emax[plane][component];
+ int *emin = s->emin[plane][component];
+ uint16_t *dst;
+ int x, y;
+
+ if (s->mode) {
+ for (x = offset; x < offset + dst_w; x++) {
+ for (y = start; y < end && y < emin[x - offset]; y++) {
+ dst = (uint16_t *)out->data[component] + y * dst_linesize + x;
+ if (dst[0] != bg) {
+ emin[x - offset] = y;
+ break;
+ }
+ }
+ for (y = end - 1; y >= start && y >= emax[x - offset]; y--) {
+ dst = (uint16_t *)out->data[component] + y * dst_linesize + x;
+ if (dst[0] != bg) {
+ emax[x - offset] = y;
+ break;
+ }
+ }
+ }
+
+ if (s->envelope == 3)
+ envelope_instant16(s, out, plane, component, offset);
+
+ for (x = offset; x < offset + dst_w; x++) {
+ dst = (uint16_t *)out->data[component] + emin[x - offset] * dst_linesize + x;
+ dst[0] = limit;
+ dst = (uint16_t *)out->data[component] + emax[x - offset] * dst_linesize + x;
+ dst[0] = limit;
+ }
+ } else {
+ for (y = offset; y < offset + dst_h; y++) {
+ dst = (uint16_t *)out->data[component] + y * dst_linesize;
+ for (x = start; x < end && x < emin[y - offset]; x++) {
+ if (dst[x] != bg) {
+ emin[y - offset] = x;
+ break;
+ }
+ }
+ for (x = end - 1; x >= start && x >= emax[y - offset]; x--) {
+ if (dst[x] != bg) {
+ emax[y - offset] = x;
+ break;
+ }
+ }
+ }
+
+ if (s->envelope == 3)
+ envelope_instant16(s, out, plane, component, offset);
+
+ for (y = offset; y < offset + dst_h; y++) {
+ dst = (uint16_t *)out->data[component] + y * dst_linesize + emin[y - offset];
+ dst[0] = limit;
+ dst = (uint16_t *)out->data[component] + y * dst_linesize + emax[y - offset];
+ dst[0] = limit;
+ }
+ }
+}
+
+static void envelope_peak(WaveformContext *s, AVFrame *out, int plane, int component, int offset)
+{
+ const int dst_linesize = out->linesize[component];
+ const int bg = s->bg_color[component];
+ const int dst_h = s->display == PARADE ? out->height / s->acomp : out->height;
+ const int dst_w = s->display == PARADE ? out->width / s->acomp : out->width;
+ const int start = s->estart[plane];
+ const int end = s->eend[plane];
+ int *emax = s->emax[plane][component];
+ int *emin = s->emin[plane][component];
+ uint8_t *dst;
+ int x, y;
+
+ if (s->mode) {
+ for (x = offset; x < offset + dst_w; x++) {
+ for (y = start; y < end && y < emin[x - offset]; y++) {
+ dst = out->data[component] + y * dst_linesize + x;
+ if (dst[0] != bg) {
+ emin[x - offset] = y;
+ break;
+ }
+ }
+ for (y = end - 1; y >= start && y >= emax[x - offset]; y--) {
+ dst = out->data[component] + y * dst_linesize + x;
+ if (dst[0] != bg) {
+ emax[x - offset] = y;
+ break;
+ }
+ }
+ }
+
+ if (s->envelope == 3)
+ envelope_instant(s, out, plane, component, offset);
+
+ for (x = offset; x < offset + dst_w; x++) {
+ dst = out->data[component] + emin[x - offset] * dst_linesize + x;
+ dst[0] = 255;
+ dst = out->data[component] + emax[x - offset] * dst_linesize + x;
+ dst[0] = 255;
+ }
+ } else {
+ for (y = offset; y < offset + dst_h; y++) {
+ dst = out->data[component] + y * dst_linesize;
+ for (x = start; x < end && x < emin[y - offset]; x++) {
+ if (dst[x] != bg) {
+ emin[y - offset] = x;
+ break;
+ }
+ }
+ for (x = end - 1; x >= start && x >= emax[y - offset]; x--) {
+ if (dst[x] != bg) {
+ emax[y - offset] = x;
+ break;
+ }
+ }
+ }
+
+ if (s->envelope == 3)
+ envelope_instant(s, out, plane, component, offset);
+
+ for (y = offset; y < offset + dst_h; y++) {
+ dst = out->data[component] + y * dst_linesize + emin[y - offset];
+ dst[0] = 255;
+ dst = out->data[component] + y * dst_linesize + emax[y - offset];
+ dst[0] = 255;
+ }
+ }
+}
+
+static void envelope16(WaveformContext *s, AVFrame *out, int plane, int component, int offset)
+{
+ if (s->envelope == 0) {
+ return;
+ } else if (s->envelope == 1) {
+ envelope_instant16(s, out, plane, component, offset);
+ } else {
+ envelope_peak16(s, out, plane, component, offset);
+ }
+}
+
+static void envelope(WaveformContext *s, AVFrame *out, int plane, int component, int offset)
+{
+ if (s->envelope == 0) {
+ return;
+ } else if (s->envelope == 1) {
+ envelope_instant(s, out, plane, component, offset);
+ } else {
+ envelope_peak(s, out, plane, component, offset);
+ }
+}
+
+static void update16(uint16_t *target, int max, int intensity, int limit)
+{
+ if (*target <= max)
+ *target += intensity;
+ else
+ *target = limit;
+}
+
+static void update(uint8_t *target, int max, int intensity)
+{
+ if (*target <= max)
+ *target += intensity;
+ else
+ *target = 255;
+}
+
+static av_always_inline void lowpass16(WaveformContext *s,
+ AVFrame *in, AVFrame *out,
+ int component, int intensity,
+ int offset_y, int offset_x,
+ int column, int mirror)
+{
+ const int plane = s->desc->comp[component].plane;
+ const int shift_w = s->shift_w[component];
+ const int shift_h = s->shift_h[component];
+ const int src_linesize = in->linesize[plane] / 2;
+ const int dst_linesize = out->linesize[plane] / 2;
+ const int dst_signed_linesize = dst_linesize * (mirror == 1 ? -1 : 1);
+ const int limit = s->max - 1;
+ const int max = limit - intensity;
+ const int src_h = AV_CEIL_RSHIFT(in->height, shift_h);
+ const int src_w = AV_CEIL_RSHIFT(in->width, shift_w);
+ const uint16_t *src_data = (const uint16_t *)in->data[plane];
+ uint16_t *dst_data = (uint16_t *)out->data[plane] + offset_y * dst_linesize + offset_x;
+ uint16_t * const dst_bottom_line = dst_data + dst_linesize * (s->size - 1);
+ uint16_t * const dst_line = (mirror ? dst_bottom_line : dst_data);
+ const int step = column ? 1 << shift_w : 1 << shift_h;
+ const uint16_t *p;
+ int y;
+
+ if (!column && mirror)
+ dst_data += s->size;
+
+ for (y = 0; y < src_h; y++) {
+ const uint16_t *src_data_end = src_data + src_w;
+ uint16_t *dst = dst_line;
+
+ for (p = src_data; p < src_data_end; p++) {
+ uint16_t *target;
+ int i = 0, v = FFMIN(*p, limit);
+
+ if (column) {
+ do {
+ target = dst++ + dst_signed_linesize * v;
+ update16(target, max, intensity, limit);
+ } while (++i < step);
+ } else {
+ uint16_t *row = dst_data;
+ do {
+ if (mirror)
+ target = row - v - 1;
+ else
+ target = row + v;
+ update16(target, max, intensity, limit);
+ row += dst_linesize;
+ } while (++i < step);
+ }
+ }
+ src_data += src_linesize;
+ dst_data += dst_linesize * step;
+ }
+
+ envelope16(s, out, plane, plane, column ? offset_x : offset_y);
+}
+
+#define LOWPASS16_FUNC(name, column, mirror) \
+static void lowpass16_##name(WaveformContext *s, \
+ AVFrame *in, AVFrame *out, \
+ int component, int intensity, \
+ int offset_y, int offset_x, \
+ int unused1, int unused2) \
+{ \
+ lowpass16(s, in, out, component, intensity, \
+ offset_y, offset_x, column, mirror); \
+}
+
+LOWPASS16_FUNC(column_mirror, 1, 1)
+LOWPASS16_FUNC(column, 1, 0)
+LOWPASS16_FUNC(row_mirror, 0, 1)
+LOWPASS16_FUNC(row, 0, 0)
+
+static av_always_inline void lowpass(WaveformContext *s,
+ AVFrame *in, AVFrame *out,
+ int component, int intensity,
+ int offset_y, int offset_x,
+ int column, int mirror)
+{
+ const int plane = s->desc->comp[component].plane;
+ const int shift_w = s->shift_w[component];
+ const int shift_h = s->shift_h[component];
+ const int src_linesize = in->linesize[plane];
+ const int dst_linesize = out->linesize[plane];
+ const int dst_signed_linesize = dst_linesize * (mirror == 1 ? -1 : 1);
+ const int max = 255 - intensity;
+ const int src_h = AV_CEIL_RSHIFT(in->height, shift_h);
+ const int src_w = AV_CEIL_RSHIFT(in->width, shift_w);
+ const uint8_t *src_data = in->data[plane];
+ uint8_t *dst_data = out->data[plane] + offset_y * dst_linesize + offset_x;
+ uint8_t * const dst_bottom_line = dst_data + dst_linesize * (s->size - 1);
+ uint8_t * const dst_line = (mirror ? dst_bottom_line : dst_data);
+ const int step = column ? 1 << shift_w : 1 << shift_h;
+ const uint8_t *p;
+ int y;
+
+ if (!column && mirror)
+ dst_data += s->size;
+
+ for (y = 0; y < src_h; y++) {
+ const uint8_t *src_data_end = src_data + src_w;
+ uint8_t *dst = dst_line;
+
+ for (p = src_data; p < src_data_end; p++) {
+ uint8_t *target;
+ if (column) {
+ target = dst + dst_signed_linesize * *p;
+ dst += step;
+ update(target, max, intensity);
+ } else {
+ uint8_t *row = dst_data;
+ if (mirror)
+ target = row - *p - 1;
+ else
+ target = row + *p;
+ update(target, max, intensity);
+ row += dst_linesize;
+ }
+ }
+ src_data += src_linesize;
+ dst_data += dst_linesize * step;
+ }
+
+ if (column && step > 1) {
+ const int dst_w = s->display == PARADE ? out->width / s->acomp : out->width;
+ const int dst_h = 256;
+ uint8_t *dst;
+ int x, z;
+
+ dst = out->data[plane] + offset_y * dst_linesize + offset_x;
+ for (y = 0; y < dst_h; y++) {
+ for (x = 0; x < dst_w; x+=step) {
+ for (z = 1; z < step; z++) {
+ dst[x + z] = dst[x];
+ }
+ }
+ dst += dst_linesize;
+ }
+ } else if (step > 1) {
+ const int dst_h = s->display == PARADE ? out->height / s->acomp : out->height;
+ const int dst_w = 256;
+ uint8_t *dst;
+ int z;
+
+ dst = out->data[plane] + offset_y * dst_linesize + offset_x;
+ for (y = 0; y < dst_h; y+=step) {
+ for (z = 1; z < step; z++)
+ memcpy(dst + dst_linesize * z, dst, dst_w);
+ dst += dst_linesize * step;
+ }
+ }
+
+ envelope(s, out, plane, plane, column ? offset_x : offset_y);
+}
+
+#define LOWPASS_FUNC(name, column, mirror) \
+static void lowpass_##name(WaveformContext *s, \
+ AVFrame *in, AVFrame *out, \
+ int component, int intensity, \
+ int offset_y, int offset_x, \
+ int unused1, int unused2) \
+{ \
+ lowpass(s, in, out, component, intensity, \
+ offset_y, offset_x, column, mirror); \
+}
+
+LOWPASS_FUNC(column_mirror, 1, 1)
+LOWPASS_FUNC(column, 1, 0)
+LOWPASS_FUNC(row_mirror, 0, 1)
+LOWPASS_FUNC(row, 0, 0)
+
+static av_always_inline void flat16(WaveformContext *s,
+ AVFrame *in, AVFrame *out,
+ int component, int intensity,
+ int offset_y, int offset_x,
+ int column, int mirror)
+{
+ const int plane = s->desc->comp[component].plane;
+ const int c0_linesize = in->linesize[ plane + 0 ] / 2;
+ const int c1_linesize = in->linesize[(plane + 1) % s->ncomp] / 2;
+ const int c2_linesize = in->linesize[(plane + 2) % s->ncomp] / 2;
+ const int c0_shift_w = s->shift_w[ component + 0 ];
+ const int c1_shift_w = s->shift_w[(component + 1) % s->ncomp];
+ const int c2_shift_w = s->shift_w[(component + 2) % s->ncomp];
+ const int c0_shift_h = s->shift_h[ component + 0 ];
+ const int c1_shift_h = s->shift_h[(component + 1) % s->ncomp];
+ const int c2_shift_h = s->shift_h[(component + 2) % s->ncomp];
+ const int d0_linesize = out->linesize[ plane + 0 ] / 2;
+ const int d1_linesize = out->linesize[(plane + 1) % s->ncomp] / 2;
+ const int limit = s->max - 1;
+ const int max = limit - intensity;
+ const int mid = s->max / 2;
+ const int src_h = in->height;
+ const int src_w = in->width;
+ int x, y;
+
+ if (column) {
+ const int d0_signed_linesize = d0_linesize * (mirror == 1 ? -1 : 1);
+ const int d1_signed_linesize = d1_linesize * (mirror == 1 ? -1 : 1);
+
+ for (x = 0; x < src_w; x++) {
+ const uint16_t *c0_data = (uint16_t *)in->data[plane + 0];
+ const uint16_t *c1_data = (uint16_t *)in->data[(plane + 1) % s->ncomp];
+ const uint16_t *c2_data = (uint16_t *)in->data[(plane + 2) % s->ncomp];
+ uint16_t *d0_data = (uint16_t *)(out->data[plane]) + offset_y * d0_linesize + offset_x;
+ uint16_t *d1_data = (uint16_t *)(out->data[(plane + 1) % s->ncomp]) + offset_y * d1_linesize + offset_x;
+ uint16_t * const d0_bottom_line = d0_data + d0_linesize * (s->size - 1);
+ uint16_t * const d0 = (mirror ? d0_bottom_line : d0_data);
+ uint16_t * const d1_bottom_line = d1_data + d1_linesize * (s->size - 1);
+ uint16_t * const d1 = (mirror ? d1_bottom_line : d1_data);
+
+ for (y = 0; y < src_h; y++) {
+ const int c0 = FFMIN(c0_data[x >> c0_shift_w], limit) + s->max;
+ const int c1 = FFMIN(FFABS(c1_data[x >> c1_shift_w] - mid) + FFABS(c2_data[x >> c2_shift_w] - mid), limit);
+ uint16_t *target;
+
+ target = d0 + x + d0_signed_linesize * c0;
+ update16(target, max, intensity, limit);
+ target = d1 + x + d1_signed_linesize * (c0 - c1);
+ update16(target, max, intensity, limit);
+ target = d1 + x + d1_signed_linesize * (c0 + c1);
+ update16(target, max, intensity, limit);
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ if (!c2_shift_h || (y & c2_shift_h))
+ c2_data += c2_linesize;
+ d0_data += d0_linesize;
+ d1_data += d1_linesize;
+ }
+ }
+ } else {
+ const uint16_t *c0_data = (uint16_t *)in->data[plane];
+ const uint16_t *c1_data = (uint16_t *)in->data[(plane + 1) % s->ncomp];
+ const uint16_t *c2_data = (uint16_t *)in->data[(plane + 2) % s->ncomp];
+ uint16_t *d0_data = (uint16_t *)(out->data[plane]) + offset_y * d0_linesize + offset_x;
+ uint16_t *d1_data = (uint16_t *)(out->data[(plane + 1) % s->ncomp]) + offset_y * d1_linesize + offset_x;
+
+ if (mirror) {
+ d0_data += s->size - 1;
+ d1_data += s->size - 1;
+ }
+
+ for (y = 0; y < src_h; y++) {
+ for (x = 0; x < src_w; x++) {
+ const int c0 = FFMIN(c0_data[x >> c0_shift_w], limit) + s->max;
+ const int c1 = FFMIN(FFABS(c1_data[x >> c1_shift_w] - mid) + FFABS(c2_data[x >> c2_shift_w] - mid), limit);
+ uint16_t *target;
+
+ if (mirror) {
+ target = d0_data - c0;
+ update16(target, max, intensity, limit);
+ target = d1_data - (c0 - c1);
+ update16(target, max, intensity, limit);
+ target = d1_data - (c0 + c1);
+ update16(target, max, intensity, limit);
+ } else {
+ target = d0_data + c0;
+ update16(target, max, intensity, limit);
+ target = d1_data + (c0 - c1);
+ update16(target, max, intensity, limit);
+ target = d1_data + (c0 + c1);
+ update16(target, max, intensity, limit);
+ }
+ }
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ if (!c2_shift_h || (y & c2_shift_h))
+ c2_data += c2_linesize;
+ d0_data += d0_linesize;
+ d1_data += d1_linesize;
+ }
+ }
+
+ envelope16(s, out, plane, plane, column ? offset_x : offset_y);
+ envelope16(s, out, plane, (plane + 1) % s->ncomp, column ? offset_x : offset_y);
+}
+
+static av_always_inline void flat(WaveformContext *s,
+ AVFrame *in, AVFrame *out,
+ int component, int intensity,
+ int offset_y, int offset_x,
+ int column, int mirror)
+{
+ const int plane = s->desc->comp[component].plane;
+ const int c0_linesize = in->linesize[ plane + 0 ];
+ const int c1_linesize = in->linesize[(plane + 1) % s->ncomp];
+ const int c2_linesize = in->linesize[(plane + 2) % s->ncomp];
+ const int c0_shift_w = s->shift_w[ component + 0 ];
+ const int c1_shift_w = s->shift_w[(component + 1) % s->ncomp];
+ const int c2_shift_w = s->shift_w[(component + 2) % s->ncomp];
+ const int c0_shift_h = s->shift_h[ component + 0 ];
+ const int c1_shift_h = s->shift_h[(component + 1) % s->ncomp];
+ const int c2_shift_h = s->shift_h[(component + 2) % s->ncomp];
+ const int d0_linesize = out->linesize[ plane + 0 ];
+ const int d1_linesize = out->linesize[(plane + 1) % s->ncomp];
+ const int max = 255 - intensity;
+ const int src_h = in->height;
+ const int src_w = in->width;
+ int x, y;
+
+ if (column) {
+ const int d0_signed_linesize = d0_linesize * (mirror == 1 ? -1 : 1);
+ const int d1_signed_linesize = d1_linesize * (mirror == 1 ? -1 : 1);
+
+ for (x = 0; x < src_w; x++) {
+ const uint8_t *c0_data = in->data[plane + 0];
+ const uint8_t *c1_data = in->data[(plane + 1) % s->ncomp];
+ const uint8_t *c2_data = in->data[(plane + 2) % s->ncomp];
+ uint8_t *d0_data = out->data[plane] + offset_y * d0_linesize + offset_x;
+ uint8_t *d1_data = out->data[(plane + 1) % s->ncomp] + offset_y * d1_linesize + offset_x;
+ uint8_t * const d0_bottom_line = d0_data + d0_linesize * (s->size - 1);
+ uint8_t * const d0 = (mirror ? d0_bottom_line : d0_data);
+ uint8_t * const d1_bottom_line = d1_data + d1_linesize * (s->size - 1);
+ uint8_t * const d1 = (mirror ? d1_bottom_line : d1_data);
+
+ for (y = 0; y < src_h; y++) {
+ const int c0 = c0_data[x >> c0_shift_w] + 256;
+ const int c1 = FFABS(c1_data[x >> c1_shift_w] - 128) + FFABS(c2_data[x >> c2_shift_w] - 128);
+ uint8_t *target;
+
+ target = d0 + x + d0_signed_linesize * c0;
+ update(target, max, intensity);
+ target = d1 + x + d1_signed_linesize * (c0 - c1);
+ update(target, max, intensity);
+ target = d1 + x + d1_signed_linesize * (c0 + c1);
+ update(target, max, intensity);
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ if (!c2_shift_h || (y & c2_shift_h))
+ c2_data += c2_linesize;
+ d0_data += d0_linesize;
+ d1_data += d1_linesize;
+ }
+ }
+ } else {
+ const uint8_t *c0_data = in->data[plane];
+ const uint8_t *c1_data = in->data[(plane + 1) % s->ncomp];
+ const uint8_t *c2_data = in->data[(plane + 2) % s->ncomp];
+ uint8_t *d0_data = out->data[plane] + offset_y * d0_linesize + offset_x;
+ uint8_t *d1_data = out->data[(plane + 1) % s->ncomp] + offset_y * d1_linesize + offset_x;
+
+ if (mirror) {
+ d0_data += s->size - 1;
+ d1_data += s->size - 1;
+ }
+
+ for (y = 0; y < src_h; y++) {
+ for (x = 0; x < src_w; x++) {
+ int c0 = c0_data[x >> c0_shift_w] + 256;
+ const int c1 = FFABS(c1_data[x >> c1_shift_w] - 128) + FFABS(c2_data[x >> c2_shift_w] - 128);
+ uint8_t *target;
+
+ if (mirror) {
+ target = d0_data - c0;
+ update(target, max, intensity);
+ target = d1_data - (c0 - c1);
+ update(target, max, intensity);
+ target = d1_data - (c0 + c1);
+ update(target, max, intensity);
+ } else {
+ target = d0_data + c0;
+ update(target, max, intensity);
+ target = d1_data + (c0 - c1);
+ update(target, max, intensity);
+ target = d1_data + (c0 + c1);
+ update(target, max, intensity);
+ }
+ }
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ if (!c2_shift_h || (y & c2_shift_h))
+ c2_data += c2_linesize;
+ d0_data += d0_linesize;
+ d1_data += d1_linesize;
+ }
+ }
+
+ envelope(s, out, plane, plane, column ? offset_x : offset_y);
+ envelope(s, out, plane, (plane + 1) % s->ncomp, column ? offset_x : offset_y);
+}
+
+static av_always_inline void aflat16(WaveformContext *s,
+ AVFrame *in, AVFrame *out,
+ int component, int intensity,
+ int offset_y, int offset_x,
+ int column, int mirror)
+{
+ const int plane = s->desc->comp[component].plane;
+ const int c0_linesize = in->linesize[ plane + 0 ] / 2;
+ const int c1_linesize = in->linesize[(plane + 1) % s->ncomp] / 2;
+ const int c2_linesize = in->linesize[(plane + 2) % s->ncomp] / 2;
+ const int c0_shift_w = s->shift_w[ component + 0 ];
+ const int c1_shift_w = s->shift_w[(component + 1) % s->ncomp];
+ const int c2_shift_w = s->shift_w[(component + 2) % s->ncomp];
+ const int c0_shift_h = s->shift_h[ component + 0 ];
+ const int c1_shift_h = s->shift_h[(component + 1) % s->ncomp];
+ const int c2_shift_h = s->shift_h[(component + 2) % s->ncomp];
+ const int d0_linesize = out->linesize[ plane + 0 ] / 2;
+ const int d1_linesize = out->linesize[(plane + 1) % s->ncomp] / 2;
+ const int d2_linesize = out->linesize[(plane + 2) % s->ncomp] / 2;
+ const int limit = s->max - 1;
+ const int max = limit - intensity;
+ const int mid = s->max / 2;
+ const int src_h = in->height;
+ const int src_w = in->width;
+ int x, y;
+
+ if (column) {
+ const int d0_signed_linesize = d0_linesize * (mirror == 1 ? -1 : 1);
+ const int d1_signed_linesize = d1_linesize * (mirror == 1 ? -1 : 1);
+ const int d2_signed_linesize = d2_linesize * (mirror == 1 ? -1 : 1);
+
+ for (x = 0; x < src_w; x++) {
+ const uint16_t *c0_data = (uint16_t *)in->data[plane + 0];
+ const uint16_t *c1_data = (uint16_t *)in->data[(plane + 1) % s->ncomp];
+ const uint16_t *c2_data = (uint16_t *)in->data[(plane + 2) % s->ncomp];
+ uint16_t *d0_data = (uint16_t *)out->data[plane] + offset_y * d0_linesize + offset_x;
+ uint16_t *d1_data = (uint16_t *)out->data[(plane + 1) % s->ncomp] + offset_y * d1_linesize + offset_x;
+ uint16_t *d2_data = (uint16_t *)out->data[(plane + 2) % s->ncomp] + offset_y * d2_linesize + offset_x;
+ uint16_t * const d0_bottom_line = d0_data + d0_linesize * (s->size - 1);
+ uint16_t * const d0 = (mirror ? d0_bottom_line : d0_data);
+ uint16_t * const d1_bottom_line = d1_data + d1_linesize * (s->size - 1);
+ uint16_t * const d1 = (mirror ? d1_bottom_line : d1_data);
+ uint16_t * const d2_bottom_line = d2_data + d2_linesize * (s->size - 1);
+ uint16_t * const d2 = (mirror ? d2_bottom_line : d2_data);
+
+ for (y = 0; y < src_h; y++) {
+ const int c0 = FFMIN(c0_data[x >> c0_shift_w], limit) + mid;
+ const int c1 = FFMIN(c1_data[x >> c1_shift_w], limit) - mid;
+ const int c2 = FFMIN(c2_data[x >> c2_shift_w], limit) - mid;
+ uint16_t *target;
+
+ target = d0 + x + d0_signed_linesize * c0;
+ update16(target, max, intensity, limit);
+
+ target = d1 + x + d1_signed_linesize * (c0 + c1);
+ update16(target, max, intensity, limit);
+
+ target = d2 + x + d2_signed_linesize * (c0 + c2);
+ update16(target, max, intensity, limit);
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ if (!c2_shift_h || (y & c2_shift_h))
+ c2_data += c2_linesize;
+ d0_data += d0_linesize;
+ d1_data += d1_linesize;
+ d2_data += d2_linesize;
+ }
+ }
+ } else {
+ const uint16_t *c0_data = (uint16_t *)in->data[plane];
+ const uint16_t *c1_data = (uint16_t *)in->data[(plane + 1) % s->ncomp];
+ const uint16_t *c2_data = (uint16_t *)in->data[(plane + 2) % s->ncomp];
+ uint16_t *d0_data = (uint16_t *)out->data[plane] + offset_y * d0_linesize + offset_x;
+ uint16_t *d1_data = (uint16_t *)out->data[(plane + 1) % s->ncomp] + offset_y * d1_linesize + offset_x;
+ uint16_t *d2_data = (uint16_t *)out->data[(plane + 2) % s->ncomp] + offset_y * d2_linesize + offset_x;
+
+ if (mirror) {
+ d0_data += s->size - 1;
+ d1_data += s->size - 1;
+ d2_data += s->size - 1;
+ }
+
+ for (y = 0; y < src_h; y++) {
+ for (x = 0; x < src_w; x++) {
+ const int c0 = FFMIN(c0_data[x >> c0_shift_w], limit) + mid;
+ const int c1 = FFMIN(c1_data[x >> c1_shift_w], limit) - mid;
+ const int c2 = FFMIN(c2_data[x >> c2_shift_w], limit) - mid;
+ uint16_t *target;
+
+ if (mirror) {
+ target = d0_data - c0;
+ update16(target, max, intensity, limit);
+ target = d1_data - (c0 + c1);
+ update16(target, max, intensity, limit);
+ target = d2_data - (c0 + c2);
+ update16(target, max, intensity, limit);
+ } else {
+ target = d0_data + c0;
+ update16(target, max, intensity, limit);
+ target = d1_data + (c0 + c1);
+ update16(target, max, intensity, limit);
+ target = d2_data + (c0 + c2);
+ update16(target, max, intensity, limit);
+ }
+ }
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ if (!c2_shift_h || (y & c2_shift_h))
+ c2_data += c2_linesize;
+ d0_data += d0_linesize;
+ d1_data += d1_linesize;
+ d2_data += d2_linesize;
+ }
+ }
+
+ envelope16(s, out, plane, (plane + 0) % s->ncomp, column ? offset_x : offset_y);
+ envelope16(s, out, plane, (plane + 1) % s->ncomp, column ? offset_x : offset_y);
+ envelope16(s, out, plane, (plane + 2) % s->ncomp, column ? offset_x : offset_y);
+}
+
+static av_always_inline void aflat(WaveformContext *s,
+ AVFrame *in, AVFrame *out,
+ int component, int intensity,
+ int offset_y, int offset_x,
+ int column, int mirror)
+{
+ const int plane = s->desc->comp[component].plane;
+ const int c0_linesize = in->linesize[ plane + 0 ];
+ const int c1_linesize = in->linesize[(plane + 1) % s->ncomp];
+ const int c2_linesize = in->linesize[(plane + 2) % s->ncomp];
+ const int c0_shift_w = s->shift_w[ component + 0 ];
+ const int c1_shift_w = s->shift_w[(component + 1) % s->ncomp];
+ const int c2_shift_w = s->shift_w[(component + 2) % s->ncomp];
+ const int c0_shift_h = s->shift_h[ component + 0 ];
+ const int c1_shift_h = s->shift_h[(component + 1) % s->ncomp];
+ const int c2_shift_h = s->shift_h[(component + 2) % s->ncomp];
+ const int d0_linesize = out->linesize[ plane + 0 ];
+ const int d1_linesize = out->linesize[(plane + 1) % s->ncomp];
+ const int d2_linesize = out->linesize[(plane + 2) % s->ncomp];
+ const int max = 255 - intensity;
+ const int src_h = in->height;
+ const int src_w = in->width;
+ int x, y;
+
+ if (column) {
+ const int d0_signed_linesize = d0_linesize * (mirror == 1 ? -1 : 1);
+ const int d1_signed_linesize = d1_linesize * (mirror == 1 ? -1 : 1);
+ const int d2_signed_linesize = d2_linesize * (mirror == 1 ? -1 : 1);
+
+ for (x = 0; x < src_w; x++) {
+ const uint8_t *c0_data = in->data[plane + 0];
+ const uint8_t *c1_data = in->data[(plane + 1) % s->ncomp];
+ const uint8_t *c2_data = in->data[(plane + 2) % s->ncomp];
+ uint8_t *d0_data = out->data[plane] + offset_y * d0_linesize + offset_x;
+ uint8_t *d1_data = out->data[(plane + 1) % s->ncomp] + offset_y * d1_linesize + offset_x;
+ uint8_t *d2_data = out->data[(plane + 2) % s->ncomp] + offset_y * d2_linesize + offset_x;
+ uint8_t * const d0_bottom_line = d0_data + d0_linesize * (s->size - 1);
+ uint8_t * const d0 = (mirror ? d0_bottom_line : d0_data);
+ uint8_t * const d1_bottom_line = d1_data + d1_linesize * (s->size - 1);
+ uint8_t * const d1 = (mirror ? d1_bottom_line : d1_data);
+ uint8_t * const d2_bottom_line = d2_data + d2_linesize * (s->size - 1);
+ uint8_t * const d2 = (mirror ? d2_bottom_line : d2_data);
+
+ for (y = 0; y < src_h; y++) {
+ const int c0 = c0_data[x >> c0_shift_w] + 128;
+ const int c1 = c1_data[x >> c1_shift_w] - 128;
+ const int c2 = c2_data[x >> c2_shift_w] - 128;
+ uint8_t *target;
+
+ target = d0 + x + d0_signed_linesize * c0;
+ update(target, max, intensity);
+
+ target = d1 + x + d1_signed_linesize * (c0 + c1);
+ update(target, max, intensity);
+
+ target = d2 + x + d2_signed_linesize * (c0 + c2);
+ update(target, max, intensity);
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c2_data += c1_linesize;
+ d0_data += d0_linesize;
+ d1_data += d1_linesize;
+ d2_data += d2_linesize;
+ }
+ }
+ } else {
+ const uint8_t *c0_data = in->data[plane];
+ const uint8_t *c1_data = in->data[(plane + 1) % s->ncomp];
+ const uint8_t *c2_data = in->data[(plane + 2) % s->ncomp];
+ uint8_t *d0_data = out->data[plane] + offset_y * d0_linesize + offset_x;
+ uint8_t *d1_data = out->data[(plane + 1) % s->ncomp] + offset_y * d1_linesize + offset_x;
+ uint8_t *d2_data = out->data[(plane + 2) % s->ncomp] + offset_y * d2_linesize + offset_x;
+
+ if (mirror) {
+ d0_data += s->size - 1;
+ d1_data += s->size - 1;
+ d2_data += s->size - 1;
+ }
+
+ for (y = 0; y < src_h; y++) {
+ for (x = 0; x < src_w; x++) {
+ const int c0 = c0_data[x >> c0_shift_w] + 128;
+ const int c1 = c1_data[x >> c1_shift_w] - 128;
+ const int c2 = c2_data[x >> c2_shift_w] - 128;
+ uint8_t *target;
+
+ if (mirror) {
+ target = d0_data - c0;
+ update(target, max, intensity);
+ target = d1_data - (c0 + c1);
+ update(target, max, intensity);
+ target = d2_data - (c0 + c2);
+ update(target, max, intensity);
+ } else {
+ target = d0_data + c0;
+ update(target, max, intensity);
+ target = d1_data + (c0 + c1);
+ update(target, max, intensity);
+ target = d2_data + (c0 + c2);
+ update(target, max, intensity);
+ }
+ }
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ if (!c2_shift_h || (y & c2_shift_h))
+ c2_data += c2_linesize;
+ d0_data += d0_linesize;
+ d1_data += d1_linesize;
+ d2_data += d2_linesize;
+ }
+ }
+
+ envelope(s, out, plane, (plane + 0) % s->ncomp, column ? offset_x : offset_y);
+ envelope(s, out, plane, (plane + 1) % s->ncomp, column ? offset_x : offset_y);
+ envelope(s, out, plane, (plane + 2) % s->ncomp, column ? offset_x : offset_y);
+}
+
+static av_always_inline void chroma16(WaveformContext *s,
+ AVFrame *in, AVFrame *out,
+ int component, int intensity,
+ int offset_y, int offset_x,
+ int column, int mirror)
+{
+ const int plane = s->desc->comp[component].plane;
+ const int c0_linesize = in->linesize[(plane + 1) % s->ncomp] / 2;
+ const int c1_linesize = in->linesize[(plane + 2) % s->ncomp] / 2;
+ const int dst_linesize = out->linesize[plane] / 2;
+ const int limit = s->max - 1;
+ const int max = limit - intensity;
+ const int mid = s->max / 2;
+ const int c0_shift_w = s->shift_w[(component + 1) % s->ncomp];
+ const int c1_shift_w = s->shift_w[(component + 2) % s->ncomp];
+ const int c0_shift_h = s->shift_h[(component + 1) % s->ncomp];
+ const int c1_shift_h = s->shift_h[(component + 2) % s->ncomp];
+ const int src_h = in->height;
+ const int src_w = in->width;
+ int x, y;
+
+ if (column) {
+ const int dst_signed_linesize = dst_linesize * (mirror == 1 ? -1 : 1);
+
+ for (x = 0; x < src_w; x++) {
+ const uint16_t *c0_data = (uint16_t *)in->data[(plane + 1) % s->ncomp];
+ const uint16_t *c1_data = (uint16_t *)in->data[(plane + 2) % s->ncomp];
+ uint16_t *dst_data = (uint16_t *)out->data[plane] + offset_y * dst_linesize + offset_x;
+ uint16_t * const dst_bottom_line = dst_data + dst_linesize * (s->size - 1);
+ uint16_t * const dst_line = (mirror ? dst_bottom_line : dst_data);
+ uint16_t *dst = dst_line;
+
+ for (y = 0; y < src_h; y++) {
+ const int sum = FFMIN(FFABS(c0_data[x >> c0_shift_w] - mid) + FFABS(c1_data[x >> c1_shift_w] - mid - 1), limit);
+ uint16_t *target;
+
+ target = dst + x + dst_signed_linesize * sum;
+ update16(target, max, intensity, limit);
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ dst_data += dst_linesize;
+ }
+ }
+ } else {
+ const uint16_t *c0_data = (uint16_t *)in->data[(plane + 1) % s->ncomp];
+ const uint16_t *c1_data = (uint16_t *)in->data[(plane + 2) % s->ncomp];
+ uint16_t *dst_data = (uint16_t *)out->data[plane] + offset_y * dst_linesize + offset_x;
+
+ if (mirror)
+ dst_data += s->size - 1;
+ for (y = 0; y < src_h; y++) {
+ for (x = 0; x < src_w; x++) {
+ const int sum = FFMIN(FFABS(c0_data[x >> c0_shift_w] - mid) + FFABS(c1_data[x >> c1_shift_w] - mid - 1), limit);
+ uint16_t *target;
+
+ if (mirror) {
+ target = dst_data - sum;
+ update16(target, max, intensity, limit);
+ } else {
+ target = dst_data + sum;
+ update16(target, max, intensity, limit);
+ }
+ }
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ dst_data += dst_linesize;
+ }
+ }
+
+ envelope16(s, out, plane, plane, column ? offset_x : offset_y);
+}
+
+static av_always_inline void chroma(WaveformContext *s,
+ AVFrame *in, AVFrame *out,
+ int component, int intensity,
+ int offset_y, int offset_x,
+ int column, int mirror)
+{
+ const int plane = s->desc->comp[component].plane;
+ const int c0_linesize = in->linesize[(plane + 1) % s->ncomp];
+ const int c1_linesize = in->linesize[(plane + 2) % s->ncomp];
+ const int dst_linesize = out->linesize[plane];
+ const int max = 255 - intensity;
+ const int c0_shift_w = s->shift_w[(component + 1) % s->ncomp];
+ const int c1_shift_w = s->shift_w[(component + 2) % s->ncomp];
+ const int c0_shift_h = s->shift_h[(component + 1) % s->ncomp];
+ const int c1_shift_h = s->shift_h[(component + 2) % s->ncomp];
+ const int src_h = in->height;
+ const int src_w = in->width;
+ int x, y;
+
+ if (column) {
+ const int dst_signed_linesize = dst_linesize * (mirror == 1 ? -1 : 1);
+
+ for (x = 0; x < src_w; x++) {
+ const uint8_t *c0_data = in->data[(plane + 1) % s->ncomp];
+ const uint8_t *c1_data = in->data[(plane + 2) % s->ncomp];
+ uint8_t *dst_data = out->data[plane] + offset_y * dst_linesize + offset_x;
+ uint8_t * const dst_bottom_line = dst_data + dst_linesize * (s->size - 1);
+ uint8_t * const dst_line = (mirror ? dst_bottom_line : dst_data);
+ uint8_t *dst = dst_line;
+
+ for (y = 0; y < src_h; y++) {
+ const int sum = FFABS(c0_data[x >> c0_shift_w] - 128) + FFABS(c1_data[x >> c1_shift_w] - 127);
+ uint8_t *target;
+
+ target = dst + x + dst_signed_linesize * sum;
+ update(target, max, intensity);
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ dst_data += dst_linesize;
+ }
+ }
+ } else {
+ const uint8_t *c0_data = in->data[(plane + 1) % s->ncomp];
+ const uint8_t *c1_data = in->data[(plane + 2) % s->ncomp];
+ uint8_t *dst_data = out->data[plane] + offset_y * dst_linesize + offset_x;
+
+ if (mirror)
+ dst_data += s->size - 1;
+ for (y = 0; y < src_h; y++) {
+ for (x = 0; x < src_w; x++) {
+ const int sum = FFABS(c0_data[x >> c0_shift_w] - 128) + FFABS(c1_data[x >> c1_shift_w] - 127);
+ uint8_t *target;
+
+ if (mirror) {
+ target = dst_data - sum;
+ update(target, max, intensity);
+ } else {
+ target = dst_data + sum;
+ update(target, max, intensity);
+ }
+ }
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ dst_data += dst_linesize;
+ }
+ }
+
+ envelope(s, out, plane, plane, column ? offset_x : offset_y);
+}
+
+static av_always_inline void color16(WaveformContext *s,
+ AVFrame *in, AVFrame *out,
+ int component, int intensity,
+ int offset_y, int offset_x,
+ int column, int mirror)
+{
+ const int plane = s->desc->comp[component].plane;
+ const int limit = s->max - 1;
+ const uint16_t *c0_data = (const uint16_t *)in->data[plane + 0];
+ const uint16_t *c1_data = (const uint16_t *)in->data[(plane + 1) % s->ncomp];
+ const uint16_t *c2_data = (const uint16_t *)in->data[(plane + 2) % s->ncomp];
+ const int c0_linesize = in->linesize[ plane + 0 ] / 2;
+ const int c1_linesize = in->linesize[(plane + 1) % s->ncomp] / 2;
+ const int c2_linesize = in->linesize[(plane + 2) % s->ncomp] / 2;
+ const int d0_linesize = out->linesize[ plane + 0 ] / 2;
+ const int d1_linesize = out->linesize[(plane + 1) % s->ncomp] / 2;
+ const int d2_linesize = out->linesize[(plane + 2) % s->ncomp] / 2;
+ const int c0_shift_w = s->shift_w[ component + 0 ];
+ const int c1_shift_w = s->shift_w[(component + 1) % s->ncomp];
+ const int c2_shift_w = s->shift_w[(component + 2) % s->ncomp];
+ const int c0_shift_h = s->shift_h[ component + 0 ];
+ const int c1_shift_h = s->shift_h[(component + 1) % s->ncomp];
+ const int c2_shift_h = s->shift_h[(component + 2) % s->ncomp];
+ const int src_h = in->height;
+ const int src_w = in->width;
+ int x, y;
+
+ if (column) {
+ const int d0_signed_linesize = d0_linesize * (mirror == 1 ? -1 : 1);
+ const int d1_signed_linesize = d1_linesize * (mirror == 1 ? -1 : 1);
+ const int d2_signed_linesize = d2_linesize * (mirror == 1 ? -1 : 1);
+ uint16_t *d0_data = (uint16_t *)out->data[plane] + offset_y * d0_linesize + offset_x;
+ uint16_t *d1_data = (uint16_t *)out->data[(plane + 1) % s->ncomp] + offset_y * d1_linesize + offset_x;
+ uint16_t *d2_data = (uint16_t *)out->data[(plane + 2) % s->ncomp] + offset_y * d2_linesize + offset_x;
+ uint16_t * const d0_bottom_line = d0_data + d0_linesize * (s->size - 1);
+ uint16_t * const d0 = (mirror ? d0_bottom_line : d0_data);
+ uint16_t * const d1_bottom_line = d1_data + d1_linesize * (s->size - 1);
+ uint16_t * const d1 = (mirror ? d1_bottom_line : d1_data);
+ uint16_t * const d2_bottom_line = d2_data + d2_linesize * (s->size - 1);
+ uint16_t * const d2 = (mirror ? d2_bottom_line : d2_data);
+
+ for (y = 0; y < src_h; y++) {
+ for (x = 0; x < src_w; x++) {
+ const int c0 = FFMIN(c0_data[x >> c0_shift_w], limit);
+ const int c1 = c1_data[x >> c1_shift_w];
+ const int c2 = c2_data[x >> c2_shift_w];
+
+ *(d0 + d0_signed_linesize * c0 + x) = c0;
+ *(d1 + d1_signed_linesize * c0 + x) = c1;
+ *(d2 + d2_signed_linesize * c0 + x) = c2;
+ }
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ if (!c2_shift_h || (y & c2_shift_h))
+ c2_data += c2_linesize;
+ d0_data += d0_linesize;
+ d1_data += d1_linesize;
+ d2_data += d2_linesize;
+ }
+ } else {
+ uint16_t *d0_data = (uint16_t *)out->data[plane] + offset_y * d0_linesize + offset_x;
+ uint16_t *d1_data = (uint16_t *)out->data[(plane + 1) % s->ncomp] + offset_y * d1_linesize + offset_x;
+ uint16_t *d2_data = (uint16_t *)out->data[(plane + 2) % s->ncomp] + offset_y * d2_linesize + offset_x;
+
+ if (mirror) {
+ d0_data += s->size - 1;
+ d1_data += s->size - 1;
+ d2_data += s->size - 1;
+ }
+
+ for (y = 0; y < src_h; y++) {
+ for (x = 0; x < src_w; x++) {
+ const int c0 = FFMIN(c0_data[x >> c0_shift_w], limit);
+ const int c1 = c1_data[x >> c1_shift_w];
+ const int c2 = c2_data[x >> c2_shift_w];
+
+ if (mirror) {
+ *(d0_data - c0) = c0;
+ *(d1_data - c0) = c1;
+ *(d2_data - c0) = c2;
+ } else {
+ *(d0_data + c0) = c0;
+ *(d1_data + c0) = c1;
+ *(d2_data + c0) = c2;
+ }
+ }
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ if (!c2_shift_h || (y & c2_shift_h))
+ c2_data += c2_linesize;
+ d0_data += d0_linesize;
+ d1_data += d1_linesize;
+ d2_data += d2_linesize;
+ }
+ }
+
+ envelope16(s, out, plane, plane, column ? offset_x : offset_y);
+}
+
+static av_always_inline void color(WaveformContext *s,
+ AVFrame *in, AVFrame *out,
+ int component, int intensity,
+ int offset_y, int offset_x,
+ int column, int mirror)
+{
+ const int plane = s->desc->comp[component].plane;
+ const uint8_t *c0_data = in->data[plane + 0];
+ const uint8_t *c1_data = in->data[(plane + 1) % s->ncomp];
+ const uint8_t *c2_data = in->data[(plane + 2) % s->ncomp];
+ const int c0_linesize = in->linesize[ plane + 0 ];
+ const int c1_linesize = in->linesize[(plane + 1) % s->ncomp];
+ const int c2_linesize = in->linesize[(plane + 2) % s->ncomp];
+ const int d0_linesize = out->linesize[ plane + 0 ];
+ const int d1_linesize = out->linesize[(plane + 1) % s->ncomp];
+ const int d2_linesize = out->linesize[(plane + 2) % s->ncomp];
+ const int c0_shift_w = s->shift_w[ component + 0 ];
+ const int c1_shift_w = s->shift_w[(component + 1) % s->ncomp];
+ const int c2_shift_w = s->shift_w[(component + 2) % s->ncomp];
+ const int c0_shift_h = s->shift_h[ component + 0 ];
+ const int c1_shift_h = s->shift_h[(component + 1) % s->ncomp];
+ const int c2_shift_h = s->shift_h[(component + 2) % s->ncomp];
+ const int src_h = in->height;
+ const int src_w = in->width;
+ int x, y;
+
+ if (s->mode) {
+ const int d0_signed_linesize = d0_linesize * (mirror == 1 ? -1 : 1);
+ const int d1_signed_linesize = d1_linesize * (mirror == 1 ? -1 : 1);
+ const int d2_signed_linesize = d2_linesize * (mirror == 1 ? -1 : 1);
+ uint8_t *d0_data = out->data[plane] + offset_y * d0_linesize + offset_x;
+ uint8_t *d1_data = out->data[(plane + 1) % s->ncomp] + offset_y * d1_linesize + offset_x;
+ uint8_t *d2_data = out->data[(plane + 2) % s->ncomp] + offset_y * d2_linesize + offset_x;
+ uint8_t * const d0_bottom_line = d0_data + d0_linesize * (s->size - 1);
+ uint8_t * const d0 = (mirror ? d0_bottom_line : d0_data);
+ uint8_t * const d1_bottom_line = d1_data + d1_linesize * (s->size - 1);
+ uint8_t * const d1 = (mirror ? d1_bottom_line : d1_data);
+ uint8_t * const d2_bottom_line = d2_data + d2_linesize * (s->size - 1);
+ uint8_t * const d2 = (mirror ? d2_bottom_line : d2_data);
+
+ for (y = 0; y < src_h; y++) {
+ for (x = 0; x < src_w; x++) {
+ const int c0 = c0_data[x >> c0_shift_w];
+ const int c1 = c1_data[x >> c1_shift_w];
+ const int c2 = c2_data[x >> c2_shift_w];
+
+ *(d0 + d0_signed_linesize * c0 + x) = c0;
+ *(d1 + d1_signed_linesize * c0 + x) = c1;
+ *(d2 + d2_signed_linesize * c0 + x) = c2;
+ }
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ if (!c2_shift_h || (y & c2_shift_h))
+ c2_data += c2_linesize;
+ d0_data += d0_linesize;
+ d1_data += d1_linesize;
+ d2_data += d2_linesize;
+ }
+ } else {
+ uint8_t *d0_data = out->data[plane] + offset_y * d0_linesize + offset_x;
+ uint8_t *d1_data = out->data[(plane + 1) % s->ncomp] + offset_y * d1_linesize + offset_x;
+ uint8_t *d2_data = out->data[(plane + 2) % s->ncomp] + offset_y * d2_linesize + offset_x;
+
+ if (mirror) {
+ d0_data += s->size - 1;
+ d1_data += s->size - 1;
+ d2_data += s->size - 1;
+ }
+
+ for (y = 0; y < src_h; y++) {
+ for (x = 0; x < src_w; x++) {
+ const int c0 = c0_data[x >> c0_shift_w];
+ const int c1 = c1_data[x >> c1_shift_w];
+ const int c2 = c2_data[x >> c2_shift_w];
+
+ if (mirror) {
+ *(d0_data - c0) = c0;
+ *(d1_data - c0) = c1;
+ *(d2_data - c0) = c2;
+ } else {
+ *(d0_data + c0) = c0;
+ *(d1_data + c0) = c1;
+ *(d2_data + c0) = c2;
+ }
+ }
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ if (!c2_shift_h || (y & c2_shift_h))
+ c2_data += c2_linesize;
+ d0_data += d0_linesize;
+ d1_data += d1_linesize;
+ d2_data += d2_linesize;
+ }
+ }
+
+ envelope(s, out, plane, plane, column ? offset_x : offset_y);
+}
+
+static av_always_inline void acolor16(WaveformContext *s,
+ AVFrame *in, AVFrame *out,
+ int component, int intensity,
+ int offset_y, int offset_x,
+ int column, int mirror)
+{
+ const int plane = s->desc->comp[component].plane;
+ const int limit = s->max - 1;
+ const int max = limit - intensity;
+ const uint16_t *c0_data = (const uint16_t *)in->data[plane + 0];
+ const uint16_t *c1_data = (const uint16_t *)in->data[(plane + 1) % s->ncomp];
+ const uint16_t *c2_data = (const uint16_t *)in->data[(plane + 2) % s->ncomp];
+ const int c0_linesize = in->linesize[ plane + 0 ] / 2;
+ const int c1_linesize = in->linesize[(plane + 1) % s->ncomp] / 2;
+ const int c2_linesize = in->linesize[(plane + 2) % s->ncomp] / 2;
+ const int d0_linesize = out->linesize[ plane + 0 ] / 2;
+ const int d1_linesize = out->linesize[(plane + 1) % s->ncomp] / 2;
+ const int d2_linesize = out->linesize[(plane + 2) % s->ncomp] / 2;
+ const int c0_shift_w = s->shift_w[ component + 0 ];
+ const int c1_shift_w = s->shift_w[(component + 1) % s->ncomp];
+ const int c2_shift_w = s->shift_w[(component + 2) % s->ncomp];
+ const int c0_shift_h = s->shift_h[ component + 0 ];
+ const int c1_shift_h = s->shift_h[(component + 1) % s->ncomp];
+ const int c2_shift_h = s->shift_h[(component + 2) % s->ncomp];
+ const int src_h = in->height;
+ const int src_w = in->width;
+ int x, y;
+
+ if (s->mode) {
+ const int d0_signed_linesize = d0_linesize * (mirror == 1 ? -1 : 1);
+ const int d1_signed_linesize = d1_linesize * (mirror == 1 ? -1 : 1);
+ const int d2_signed_linesize = d2_linesize * (mirror == 1 ? -1 : 1);
+ uint16_t *d0_data = (uint16_t *)out->data[plane] + offset_y * d0_linesize + offset_x;
+ uint16_t *d1_data = (uint16_t *)out->data[(plane + 1) % s->ncomp] + offset_y * d1_linesize + offset_x;
+ uint16_t *d2_data = (uint16_t *)out->data[(plane + 2) % s->ncomp] + offset_y * d2_linesize + offset_x;
+ uint16_t * const d0_bottom_line = d0_data + d0_linesize * (s->size - 1);
+ uint16_t * const d0 = (mirror ? d0_bottom_line : d0_data);
+ uint16_t * const d1_bottom_line = d1_data + d1_linesize * (s->size - 1);
+ uint16_t * const d1 = (mirror ? d1_bottom_line : d1_data);
+ uint16_t * const d2_bottom_line = d2_data + d2_linesize * (s->size - 1);
+ uint16_t * const d2 = (mirror ? d2_bottom_line : d2_data);
+
+ for (y = 0; y < src_h; y++) {
+ for (x = 0; x < src_w; x++) {
+ const int c0 = FFMIN(c0_data[x >> c0_shift_w], limit);
+ const int c1 = c1_data[x >> c1_shift_w];
+ const int c2 = c2_data[x >> c2_shift_w];
+
+ update16(d0 + d0_signed_linesize * c0 + x, max, intensity, limit);
+ *(d1 + d1_signed_linesize * c0 + x) = c1;
+ *(d2 + d2_signed_linesize * c0 + x) = c2;
+ }
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ if (!c2_shift_h || (y & c2_shift_h))
+ c2_data += c2_linesize;
+ d0_data += d0_linesize;
+ d1_data += d1_linesize;
+ d2_data += d2_linesize;
+ }
+ } else {
+ uint16_t *d0_data = (uint16_t *)out->data[plane] + offset_y * d0_linesize + offset_x;
+ uint16_t *d1_data = (uint16_t *)out->data[(plane + 1) % s->ncomp] + offset_y * d1_linesize + offset_x;
+ uint16_t *d2_data = (uint16_t *)out->data[(plane + 2) % s->ncomp] + offset_y * d2_linesize + offset_x;
+
+ if (mirror) {
+ d0_data += s->size - 1;
+ d1_data += s->size - 1;
+ d2_data += s->size - 1;
+ }
+
+ for (y = 0; y < src_h; y++) {
+ for (x = 0; x < src_w; x++) {
+ const int c0 = FFMIN(c0_data[x >> c0_shift_w], limit);
+ const int c1 = c1_data[x >> c1_shift_w];
+ const int c2 = c2_data[x >> c2_shift_w];
+
+ if (mirror) {
+ update16(d0_data - c0, max, intensity, limit);
+ *(d1_data - c0) = c1;
+ *(d2_data - c0) = c2;
+ } else {
+ update16(d0_data + c0, max, intensity, limit);
+ *(d1_data + c0) = c1;
+ *(d2_data + c0) = c2;
+ }
+ }
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ if (!c2_shift_h || (y & c2_shift_h))
+ c2_data += c2_linesize;
+ d0_data += d0_linesize;
+ d1_data += d1_linesize;
+ d2_data += d2_linesize;
+ }
+ }
+
+ envelope16(s, out, plane, plane, column ? offset_x : offset_y);
+}
+
+static av_always_inline void acolor(WaveformContext *s,
+ AVFrame *in, AVFrame *out,
+ int component, int intensity,
+ int offset_y, int offset_x,
+ int column, int mirror)
+{
+ const int plane = s->desc->comp[component].plane;
+ const uint8_t *c0_data = in->data[plane + 0];
+ const uint8_t *c1_data = in->data[(plane + 1) % s->ncomp];
+ const uint8_t *c2_data = in->data[(plane + 2) % s->ncomp];
+ const int c0_linesize = in->linesize[ plane + 0 ];
+ const int c1_linesize = in->linesize[(plane + 1) % s->ncomp];
+ const int c2_linesize = in->linesize[(plane + 2) % s->ncomp];
+ const int d0_linesize = out->linesize[ plane + 0 ];
+ const int d1_linesize = out->linesize[(plane + 1) % s->ncomp];
+ const int d2_linesize = out->linesize[(plane + 2) % s->ncomp];
+ const int c0_shift_w = s->shift_w[ component + 0 ];
+ const int c1_shift_w = s->shift_w[(component + 1) % s->ncomp];
+ const int c2_shift_w = s->shift_w[(component + 2) % s->ncomp];
+ const int c0_shift_h = s->shift_h[ component + 0 ];
+ const int c1_shift_h = s->shift_h[(component + 1) % s->ncomp];
+ const int c2_shift_h = s->shift_h[(component + 2) % s->ncomp];
+ const int max = 255 - intensity;
+ const int src_h = in->height;
+ const int src_w = in->width;
+ int x, y;
+
+ if (s->mode) {
+ const int d0_signed_linesize = d0_linesize * (mirror == 1 ? -1 : 1);
+ const int d1_signed_linesize = d1_linesize * (mirror == 1 ? -1 : 1);
+ const int d2_signed_linesize = d2_linesize * (mirror == 1 ? -1 : 1);
+ uint8_t *d0_data = out->data[plane] + offset_y * d0_linesize + offset_x;
+ uint8_t *d1_data = out->data[(plane + 1) % s->ncomp] + offset_y * d1_linesize + offset_x;
+ uint8_t *d2_data = out->data[(plane + 2) % s->ncomp] + offset_y * d2_linesize + offset_x;
+ uint8_t * const d0_bottom_line = d0_data + d0_linesize * (s->size - 1);
+ uint8_t * const d0 = (mirror ? d0_bottom_line : d0_data);
+ uint8_t * const d1_bottom_line = d1_data + d1_linesize * (s->size - 1);
+ uint8_t * const d1 = (mirror ? d1_bottom_line : d1_data);
+ uint8_t * const d2_bottom_line = d2_data + d2_linesize * (s->size - 1);
+ uint8_t * const d2 = (mirror ? d2_bottom_line : d2_data);
+
+ for (y = 0; y < src_h; y++) {
+ for (x = 0; x < src_w; x++) {
+ const int c0 = c0_data[x >> c0_shift_w];
+ const int c1 = c1_data[x >> c1_shift_w];
+ const int c2 = c2_data[x >> c2_shift_w];
+
+ update(d0 + d0_signed_linesize * c0 + x, max, intensity);
+ *(d1 + d1_signed_linesize * c0 + x) = c1;
+ *(d2 + d2_signed_linesize * c0 + x) = c2;
+ }
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ if (!c2_shift_h || (y & c2_shift_h))
+ c2_data += c2_linesize;
+ d0_data += d0_linesize;
+ d1_data += d1_linesize;
+ d2_data += d2_linesize;
+ }
+ } else {
+ uint8_t *d0_data = out->data[plane] + offset_y * d0_linesize + offset_x;
+ uint8_t *d1_data = out->data[(plane + 1) % s->ncomp] + offset_y * d1_linesize + offset_x;
+ uint8_t *d2_data = out->data[(plane + 2) % s->ncomp] + offset_y * d2_linesize + offset_x;
+
+ if (mirror) {
+ d0_data += s->size - 1;
+ d1_data += s->size - 1;
+ d2_data += s->size - 1;
+ }
+
+ for (y = 0; y < src_h; y++) {
+ for (x = 0; x < src_w; x++) {
+ const int c0 = c0_data[x >> c0_shift_w];
+ const int c1 = c1_data[x >> c1_shift_w];
+ const int c2 = c2_data[x >> c2_shift_w];
+
+ if (mirror) {
+ update(d0_data - c0, max, intensity);
+ *(d1_data - c0) = c1;
+ *(d2_data - c0) = c2;
+ } else {
+ update(d0_data + c0, max, intensity);
+ *(d1_data + c0) = c1;
+ *(d2_data + c0) = c2;
+ }
+ }
+
+ if (!c0_shift_h || (y & c0_shift_h))
+ c0_data += c0_linesize;
+ if (!c1_shift_h || (y & c1_shift_h))
+ c1_data += c1_linesize;
+ if (!c2_shift_h || (y & c2_shift_h))
+ c2_data += c2_linesize;
+ d0_data += d0_linesize;
+ d1_data += d1_linesize;
+ d2_data += d2_linesize;
+ }
+ }
+
+ envelope(s, out, plane, plane, column ? offset_x : offset_y);
+}
+
+static const uint8_t black_yuva_color[4] = { 0, 127, 127, 255 };
+static const uint8_t green_yuva_color[4] = { 255, 0, 0, 255 };
+static const uint8_t black_gbrp_color[4] = { 0, 0, 0, 255 };
+
+static const GraticuleLines aflat_digital8[] = {
+ { { { "16", 16+128 }, { "16", 16+128 }, { "16", 16+128 }, { "0", 0+128 } } },
+ { { { "128", 128+128 }, { "128", 128+128 }, { "128", 128+128 }, { "128", 128+128 } } },
+ { { { "235", 235+128 }, { "240", 240+128 }, { "240", 240+128 }, { "255", 255+128 } } },
+};
+
+static const GraticuleLines aflat_digital9[] = {
+ { { { "32", 32+256 }, { "32", 32+256 }, { "32", 32+256 }, { "0", 0+256 } } },
+ { { { "256", 256+256 }, { "256", 256+256 }, { "256", 256+256 }, { "256", 256+256 } } },
+ { { { "470", 470+256 }, { "480", 480+256 }, { "480", 480+256 }, { "511", 511+256 } } },
+};
+
+static const GraticuleLines aflat_digital10[] = {
+ { { { "64", 64+512 }, { "64", 64+512 }, { "64", 64+512 }, { "0", 0+512 } } },
+ { { { "512", 512+512 }, { "512", 512+512 }, { "512", 512+512 }, { "512", 512+512 } } },
+ { { { "940", 940+512 }, { "960", 960+512 }, { "960", 960+512 }, { "1023", 1023+512 } } },
+};
+
+static const GraticuleLines aflat_digital12[] = {
+ { { { "256", 256+2048 }, { "256", 256+2048 }, { "256", 256+2048 }, { "0", 0+2048 } } },
+ { { { "2048", 2048+2048 }, { "2048", 2048+2048 }, { "2048", 2048+2048 }, { "2048", 2048+2048 } } },
+ { { { "3760", 3760+2048 }, { "3840", 3840+2048 }, { "3840", 3840+2048 }, { "4095", 4095+2048 } } },
+};
+
+static const GraticuleLines aflat_millivolts8[] = {
+ { { { "0", 16+128 }, { "0", 16+128 }, { "0", 16+128 }, { "0", 0+128 } } },
+ { { { "175", 71+128 }, { "175", 72+128 }, { "175", 72+128 }, { "175", 64+128 } } },
+ { { { "350", 126+128 }, { "350", 128+128 }, { "350", 128+128 }, { "350", 128+128 } } },
+ { { { "525", 180+128 }, { "525", 184+128 }, { "525", 184+128 }, { "525", 192+128 } } },
+ { { { "700", 235+128 }, { "700", 240+128 }, { "700", 240+128 }, { "700", 255+128 } } },
+};
+
+static const GraticuleLines aflat_millivolts9[] = {
+ { { { "0", 32+256 }, { "0", 32+256 }, { "0", 32+256 }, { "0", 0+256 } } },
+ { { { "175", 142+256 }, { "175", 144+256 }, { "175", 144+256 }, { "175", 128+256 } } },
+ { { { "350", 251+256 }, { "350", 256+256 }, { "350", 256+256 }, { "350", 256+256 } } },
+ { { { "525", 361+256 }, { "525", 368+256 }, { "525", 368+256 }, { "525", 384+256 } } },
+ { { { "700", 470+256 }, { "700", 480+256 }, { "700", 480+256 }, { "700", 511+256 } } },
+};
+
+static const GraticuleLines aflat_millivolts10[] = {
+ { { { "0", 64+512 }, { "0", 64+512 }, { "0", 64+512 }, { "0", 0+512 } } },
+ { { { "175", 283+512 }, { "175", 288+512 }, { "175", 288+512 }, { "175", 256+512 } } },
+ { { { "350", 502+512 }, { "350", 512+512 }, { "350", 512+512 }, { "350", 512+512 } } },
+ { { { "525", 721+512 }, { "525", 736+512 }, { "525", 736+512 }, { "525", 768+512 } } },
+ { { { "700", 940+512 }, { "700", 960+512 }, { "700", 960+512 }, { "700", 1023+512 } } },
+};
+
+static const GraticuleLines aflat_millivolts12[] = {
+ { { { "0", 256+2048 }, { "0", 256+2048 }, { "0", 256+2048 }, { "0", 0+2048 } } },
+ { { { "175", 1132+2048 }, { "175", 1152+2048 }, { "175", 1152+2048 }, { "175", 1024+2048 } } },
+ { { { "350", 2008+2048 }, { "350", 2048+2048 }, { "350", 2048+2048 }, { "350", 2048+2048 } } },
+ { { { "525", 2884+2048 }, { "525", 2944+2048 }, { "525", 2944+2048 }, { "525", 3072+2048 } } },
+ { { { "700", 3760+2048 }, { "700", 3840+2048 }, { "700", 3840+2048 }, { "700", 4095+2048 } } },
+};
+
+static const GraticuleLines aflat_ire8[] = {
+ { { { "-25", -39+128 }, { "-25", -40+128 }, { "-25", -40+128 }, { "-25", -64+128 } } },
+ { { { "0", 16+128 }, { "0", 16+128 }, { "0", 16+128 }, { "0", 0+128 } } },
+ { { { "25", 71+128 }, { "25", 72+128 }, { "25", 72+128 }, { "25", 64+128 } } },
+ { { { "50", 126+128 }, { "50", 128+128 }, { "50", 128+128 }, { "50", 128+128 } } },
+ { { { "75", 180+128 }, { "75", 184+128 }, { "75", 184+128 }, { "75", 192+128 } } },
+ { { { "100", 235+128 }, { "100", 240+128 }, { "100", 240+128 }, { "100", 256+128 } } },
+ { { { "125", 290+128 }, { "125", 296+128 }, { "125", 296+128 }, { "125", 320+128 } } },
+};
+
+static const GraticuleLines aflat_ire9[] = {
+ { { { "-25", -78+256 }, { "-25", -80+256 }, { "-25", -80+256 }, { "-25",-128+256 } } },
+ { { { "0", 32+256 }, { "0", 32+256 }, { "0", 32+256 }, { "0", 0+256 } } },
+ { { { "25", 142+256 }, { "25", 144+256 }, { "25", 144+256 }, { "25", 128+256 } } },
+ { { { "50", 251+256 }, { "50", 256+256 }, { "50", 256+256 }, { "50", 256+256 } } },
+ { { { "75", 361+256 }, { "75", 368+256 }, { "75", 368+256 }, { "75", 384+256 } } },
+ { { { "100", 470+256 }, { "100", 480+256 }, { "100", 480+256 }, { "100", 512+256 } } },
+ { { { "125", 580+256 }, { "125", 592+256 }, { "125", 592+256 }, { "125", 640+256 } } },
+};
+
+static const GraticuleLines aflat_ire10[] = {
+ { { { "-25",-156+512 }, { "-25",-160+512 }, { "-25",-160+512 }, { "-25", -256+512 } } },
+ { { { "0", 64+512 }, { "0", 64+512 }, { "0", 64+512 }, { "0", 0+512 } } },
+ { { { "25", 283+512 }, { "25", 288+512 }, { "25", 288+512 }, { "25", 256+512 } } },
+ { { { "50", 502+512 }, { "50", 512+512 }, { "50", 512+512 }, { "50", 512+512 } } },
+ { { { "75", 721+512 }, { "75", 736+512 }, { "75", 736+512 }, { "75", 768+512 } } },
+ { { { "100", 940+512 }, { "100", 960+512 }, { "100", 960+512 }, { "100", 1024+512 } } },
+ { { { "125",1160+512 }, { "125",1184+512 }, { "125",1184+512 }, { "125", 1280+512 } } },
+};
+
+static const GraticuleLines aflat_ire12[] = {
+ { { { "-25", -624+2048 }, { "-25", -640+2048 }, { "-25", -640+2048 }, { "-25",-1024+2048 } } },
+ { { { "0", 256+2048 }, { "0", 256+2048 }, { "0", 256+2048 }, { "0", 0+2048 } } },
+ { { { "25", 1132+2048 }, { "25", 1152+2048 }, { "25", 1152+2048 }, { "25", 1024+2048 } } },
+ { { { "50", 2008+2048 }, { "50", 2048+2048 }, { "50", 2048+2048 }, { "50", 2048+2048 } } },
+ { { { "75", 2884+2048 }, { "75", 2944+2048 }, { "75", 2944+2048 }, { "75", 3072+2048 } } },
+ { { { "100", 3760+2048 }, { "100", 3840+2048 }, { "100", 3840+2048 }, { "100", 4096+2048 } } },
+ { { { "125", 4640+2048 }, { "125", 4736+2048 }, { "125", 4736+2048 }, { "125", 5120+2048 } } },
+};
+
+static const GraticuleLines flat_digital8[] = {
+ { { { "16", 16+256 }, { "16", 16+256 }, { "16", 16+256 }, { "0", 0+256 } } },
+ { { { "128", 128+256 }, { "128", 128+256 }, { "128", 128+256 }, { "128", 128+256 } } },
+ { { { "235", 235+256 }, { "240", 240+256 }, { "240", 240+256 }, { "255", 255+256 } } },
+};
+
+static const GraticuleLines flat_digital9[] = {
+ { { { "32", 32+512 }, { "32", 32+512 }, { "32", 32+512 }, { "0", 0+512 } } },
+ { { { "256", 256+512 }, { "256", 256+512 }, { "256", 256+512 }, { "256", 256+512 } } },
+ { { { "470", 470+512 }, { "480", 480+512 }, { "480", 480+512 }, { "511", 511+512 } } },
+};
+
+static const GraticuleLines flat_digital10[] = {
+ { { { "64", 64+1024 }, { "64", 64+1024 }, { "64", 64+1024 }, { "0", 0+1024 } } },
+ { { { "512", 512+1024 }, { "512", 512+1024 }, { "512", 512+1024 }, { "512", 512+1024 } } },
+ { { { "940", 940+1024 }, { "960", 960+1024 }, { "960", 960+1024 }, { "1023", 1023+1024 } } },
+};
+
+static const GraticuleLines flat_digital12[] = {
+ { { { "256", 256+4096 }, { "256", 256+4096 }, { "256", 256+4096 }, { "0", 0+4096 } } },
+ { { { "2048", 2048+4096 }, { "2048", 2048+4096 }, { "2048", 2048+4096 }, { "2048", 2048+4096 } } },
+ { { { "3760", 3760+4096 }, { "3840", 3840+4096 }, { "3840", 3840+4096 }, { "4095", 4095+4096 } } },
+};
+
+static const GraticuleLines flat_millivolts8[] = {
+ { { { "0", 16+256 }, { "0", 16+256 }, { "0", 16+256 }, { "0", 0+256 } } },
+ { { { "175", 71+256 }, { "175", 72+256 }, { "175", 72+256 }, { "175", 64+256 } } },
+ { { { "350", 126+256 }, { "350", 128+256 }, { "350", 128+256 }, { "350", 128+256 } } },
+ { { { "525", 180+256 }, { "525", 184+256 }, { "525", 184+256 }, { "525", 192+256 } } },
+ { { { "700", 235+256 }, { "700", 240+256 }, { "700", 240+256 }, { "700", 255+256 } } },
+};
+
+static const GraticuleLines flat_millivolts9[] = {
+ { { { "0", 32+512 }, { "0", 32+512 }, { "0", 32+512 }, { "0", 0+512 } } },
+ { { { "175", 142+512 }, { "175", 144+512 }, { "175", 144+512 }, { "175", 128+512 } } },
+ { { { "350", 251+512 }, { "350", 256+512 }, { "350", 256+512 }, { "350", 256+512 } } },
+ { { { "525", 361+512 }, { "525", 368+512 }, { "525", 368+512 }, { "525", 384+512 } } },
+ { { { "700", 470+512 }, { "700", 480+512 }, { "700", 480+512 }, { "700", 511+512 } } },
+};
+
+static const GraticuleLines flat_millivolts10[] = {
+ { { { "0", 64+1024 }, { "0", 64+1024 }, { "0", 64+1024 }, { "0", 0+1024 } } },
+ { { { "175", 283+1024 }, { "175", 288+1024 }, { "175", 288+1024 }, { "175", 256+1024 } } },
+ { { { "350", 502+1024 }, { "350", 512+1024 }, { "350", 512+1024 }, { "350", 512+1024 } } },
+ { { { "525", 721+1024 }, { "525", 736+1024 }, { "525", 736+1024 }, { "525", 768+1024 } } },
+ { { { "700", 940+1024 }, { "700", 960+1024 }, { "700", 960+1024 }, { "700", 1023+1024 } } },
+};
+
+static const GraticuleLines flat_millivolts12[] = {
+ { { { "0", 256+4096 }, { "0", 256+4096 }, { "0", 256+4096 }, { "0", 0+4096 } } },
+ { { { "175", 1132+4096 }, { "175", 1152+4096 }, { "175", 1152+4096 }, { "175", 1024+4096 } } },
+ { { { "350", 2008+4096 }, { "350", 2048+4096 }, { "350", 2048+4096 }, { "350", 2048+4096 } } },
+ { { { "525", 2884+4096 }, { "525", 2944+4096 }, { "525", 2944+4096 }, { "525", 3072+4096 } } },
+ { { { "700", 3760+4096 }, { "700", 3840+4096 }, { "700", 3840+4096 }, { "700", 4095+4096 } } },
+};
+
+static const GraticuleLines flat_ire8[] = {
+ { { { "-25", -39+256 }, { "-25", -40+256 }, { "-25", -40+256 }, { "-25", -64+256 } } },
+ { { { "0", 16+256 }, { "0", 16+256 }, { "0", 16+256 }, { "0", 0+256 } } },
+ { { { "25", 71+256 }, { "25", 72+256 }, { "25", 72+256 }, { "25", 64+256 } } },
+ { { { "50", 126+256 }, { "50", 128+256 }, { "50", 128+256 }, { "50", 128+256 } } },
+ { { { "75", 180+256 }, { "75", 184+256 }, { "75", 184+256 }, { "75", 192+256 } } },
+ { { { "100", 235+256 }, { "100", 240+256 }, { "100", 240+256 }, { "100", 256+256 } } },
+ { { { "125", 290+256 }, { "125", 296+256 }, { "125", 296+256 }, { "125", 320+256 } } },
+};
+
+static const GraticuleLines flat_ire9[] = {
+ { { { "-25", -78+512 }, { "-25", -80+512 }, { "-25", -80+512 }, { "-25",-128+512 } } },
+ { { { "0", 32+512 }, { "0", 32+512 }, { "0", 32+512 }, { "0", 0+512 } } },
+ { { { "25", 142+512 }, { "25", 144+512 }, { "25", 144+512 }, { "25", 128+512 } } },
+ { { { "50", 251+512 }, { "50", 256+512 }, { "50", 256+512 }, { "50", 256+512 } } },
+ { { { "75", 361+512 }, { "75", 368+512 }, { "75", 368+512 }, { "75", 384+512 } } },
+ { { { "100", 470+512 }, { "100", 480+512 }, { "100", 480+512 }, { "100", 512+512 } } },
+ { { { "125", 580+512 }, { "125", 592+512 }, { "125", 592+512 }, { "125", 640+512 } } },
+};
+
+static const GraticuleLines flat_ire10[] = {
+ { { { "-25",-156+1024 }, { "-25",-160+1024 }, { "-25",-160+1024 }, { "-25", -256+1024 } } },
+ { { { "0", 64+1024 }, { "0", 64+1024 }, { "0", 64+1024 }, { "0", 0+1024 } } },
+ { { { "25", 283+1024 }, { "25", 288+1024 }, { "25", 288+1024 }, { "25", 256+1024 } } },
+ { { { "50", 502+1024 }, { "50", 512+1024 }, { "50", 512+1024 }, { "50", 512+1024 } } },
+ { { { "75", 721+1024 }, { "75", 736+1024 }, { "75", 736+1024 }, { "75", 768+1024 } } },
+ { { { "100", 940+1024 }, { "100", 960+1024 }, { "100", 960+1024 }, { "100", 1024+1024 } } },
+ { { { "125",1160+1024 }, { "125",1184+1024 }, { "125",1184+1024 }, { "125", 1280+1024 } } },
+};
+
+static const GraticuleLines flat_ire12[] = {
+ { { { "-25", -624+4096 }, { "-25", -640+4096 }, { "-25", -640+4096 }, { "-25",-1024+4096 } } },
+ { { { "0", 256+4096 }, { "0", 256+4096 }, { "0", 256+4096 }, { "0", 0+4096 } } },
+ { { { "25", 1132+4096 }, { "25", 1152+4096 }, { "25", 1152+4096 }, { "25", 1024+4096 } } },
+ { { { "50", 2008+4096 }, { "50", 2048+4096 }, { "50", 2048+4096 }, { "50", 2048+4096 } } },
+ { { { "75", 2884+4096 }, { "75", 2944+4096 }, { "75", 2944+4096 }, { "75", 3072+4096 } } },
+ { { { "100", 3760+4096 }, { "100", 3840+4096 }, { "100", 3840+4096 }, { "100", 4096+4096 } } },
+ { { { "125", 4640+4096 }, { "125", 4736+4096 }, { "125", 4736+4096 }, { "125", 5120+4096 } } },
+};
+
+static const GraticuleLines digital8[] = {
+ { { { "16", 16 }, { "16", 16 }, { "16", 16 }, { "0", 0 } } },
+ { { { "128", 128 }, { "128", 128 }, { "128", 128 }, { "128", 128 } } },
+ { { { "235", 235 }, { "240", 240 }, { "240", 240 }, { "255", 255 } } },
+};
+
+static const GraticuleLines digital9[] = {
+ { { { "32", 32 }, { "32", 32 }, { "32", 32 }, { "0", 0 } } },
+ { { { "256", 256 }, { "256", 256 }, { "256", 256 }, { "256", 256 } } },
+ { { { "470", 470 }, { "480", 480 }, { "480", 480 }, { "511", 511 } } },
+};
+
+static const GraticuleLines digital10[] = {
+ { { { "64", 64 }, { "64", 64 }, { "64", 64 }, { "0", 0 } } },
+ { { { "512", 512 }, { "512", 512 }, { "512", 512 }, { "512", 512 } } },
+ { { { "940", 940 }, { "960", 960 }, { "960", 960 }, { "1023", 1023 } } },
+};
+
+static const GraticuleLines digital12[] = {
+ { { { "256", 256 }, { "256", 256 }, { "256", 256 }, { "0", 0 } } },
+ { { { "2048", 2048 }, { "2048", 2048 }, { "2048", 2048 }, { "2048", 2048 } } },
+ { { { "3760", 3760 }, { "3840", 3840 }, { "3840", 3840 }, { "4095", 4095 } } },
+};
+
+static const GraticuleLines millivolts8[] = {
+ { { { "0", 16 }, { "0", 16 }, { "0", 16 }, { "0", 0 } } },
+ { { { "175", 71 }, { "175", 72 }, { "175", 72 }, { "175", 64 } } },
+ { { { "350", 126 }, { "350", 128 }, { "350", 128 }, { "350", 128 } } },
+ { { { "525", 180 }, { "525", 184 }, { "525", 184 }, { "525", 192 } } },
+ { { { "700", 235 }, { "700", 240 }, { "700", 240 }, { "700", 255 } } },
+};
+
+static const GraticuleLines millivolts9[] = {
+ { { { "0", 32 }, { "0", 32 }, { "0", 32 }, { "0", 0 } } },
+ { { { "175", 142 }, { "175", 144 }, { "175", 144 }, { "175", 128 } } },
+ { { { "350", 251 }, { "350", 256 }, { "350", 256 }, { "350", 256 } } },
+ { { { "525", 361 }, { "525", 368 }, { "525", 368 }, { "525", 384 } } },
+ { { { "700", 470 }, { "700", 480 }, { "700", 480 }, { "700", 511 } } },
+};
+
+static const GraticuleLines millivolts10[] = {
+ { { { "0", 64 }, { "0", 64 }, { "0", 64 }, { "0", 0 } } },
+ { { { "175", 283 }, { "175", 288 }, { "175", 288 }, { "175", 256 } } },
+ { { { "350", 502 }, { "350", 512 }, { "350", 512 }, { "350", 512 } } },
+ { { { "525", 721 }, { "525", 736 }, { "525", 736 }, { "525", 768 } } },
+ { { { "700", 940 }, { "700", 960 }, { "700", 960 }, { "700", 1023 } } },
+};
+
+static const GraticuleLines millivolts12[] = {
+ { { { "0", 256 }, { "0", 256 }, { "0", 256 }, { "0", 0 } } },
+ { { { "175", 1132 }, { "175", 1152 }, { "175", 1152 }, { "175", 1024 } } },
+ { { { "350", 2008 }, { "350", 2048 }, { "350", 2048 }, { "350", 2048 } } },
+ { { { "525", 2884 }, { "525", 2944 }, { "525", 2944 }, { "525", 3072 } } },
+ { { { "700", 3760 }, { "700", 3840 }, { "700", 3840 }, { "700", 4095 } } },
+};
+
+static const GraticuleLines ire8[] = {
+ { { { "0", 16 }, { "0", 16 }, { "0", 16 }, { "0", 0 } } },
+ { { { "25", 71 }, { "25", 72 }, { "25", 72 }, { "25", 64 } } },
+ { { { "50", 126 }, { "50", 128 }, { "50", 128 }, { "50", 128 } } },
+ { { { "75", 180 }, { "75", 184 }, { "75", 184 }, { "75", 192 } } },
+ { { { "100", 235 }, { "100", 240 }, { "100", 240 }, { "100", 255 } } },
+};
+
+static const GraticuleLines ire9[] = {
+ { { { "0", 32 }, { "0", 32 }, { "0", 32 }, { "0", 0 } } },
+ { { { "25", 142 }, { "25", 144 }, { "25", 144 }, { "25", 128 } } },
+ { { { "50", 251 }, { "50", 256 }, { "50", 256 }, { "50", 256 } } },
+ { { { "75", 361 }, { "75", 368 }, { "75", 368 }, { "75", 384 } } },
+ { { { "100", 470 }, { "100", 480 }, { "100", 480 }, { "100", 511 } } },
+};
+
+static const GraticuleLines ire10[] = {
+ { { { "0", 64 }, { "0", 64 }, { "0", 64 }, { "0", 0 } } },
+ { { { "25", 283 }, { "25", 288 }, { "25", 288 }, { "25", 256 } } },
+ { { { "50", 502 }, { "50", 512 }, { "50", 512 }, { "50", 512 } } },
+ { { { "75", 721 }, { "75", 736 }, { "75", 736 }, { "75", 768 } } },
+ { { { "100", 940 }, { "100", 960 }, { "100", 960 }, { "100", 1023 } } },
+};
+
+static const GraticuleLines ire12[] = {
+ { { { "0", 256 }, { "0", 256 }, { "0", 256 }, { "0", 0 } } },
+ { { { "25", 1132 }, { "25", 1152 }, { "25", 1152 }, { "25", 1024 } } },
+ { { { "50", 2008 }, { "50", 2048 }, { "50", 2048 }, { "50", 2048 } } },
+ { { { "75", 2884 }, { "75", 2944 }, { "75", 2944 }, { "75", 3072 } } },
+ { { { "100", 3760 }, { "100", 3840 }, { "100", 3840 }, { "100", 4095 } } },
+};
+
+static const GraticuleLines chroma_digital8[] = {
+ { { { "50", 50 }, { "50", 50 }, { "50", 50 }, { "50", 50 } } },
+ { { { "100", 100 }, { "100", 100 }, { "100", 100 }, { "100", 100 } } },
+ { { { "150", 150 }, { "150", 150 }, { "150", 150 }, { "150", 150 } } },
+ { { { "200", 200 }, { "200", 200 }, { "200", 200 }, { "200", 200 } } },
+ { { { "255", 255 }, { "255", 255 }, { "255", 255 }, { "255", 255 } } },
+};
+
+static const GraticuleLines chroma_digital9[] = {
+ { { { "100", 100 }, { "100", 100 }, { "100", 100 }, { "100", 100 } } },
+ { { { "200", 200 }, { "200", 200 }, { "200", 200 }, { "200", 200 } } },
+ { { { "300", 300 }, { "300", 300 }, { "300", 300 }, { "300", 300 } } },
+ { { { "400", 400 }, { "400", 400 }, { "400", 400 }, { "400", 400 } } },
+ { { { "500", 500 }, { "500", 500 }, { "500", 500 }, { "500", 500 } } },
+};
+
+static const GraticuleLines chroma_digital10[] = {
+ { { { "200", 200 }, { "200", 200 }, { "200", 200 }, { "200", 200 } } },
+ { { { "400", 400 }, { "400", 400 }, { "400", 400 }, { "400", 400 } } },
+ { { { "600", 600 }, { "600", 600 }, { "600", 600 }, { "600", 600 } } },
+ { { { "800", 800 }, { "800", 800 }, { "800", 800 }, { "800", 800 } } },
+ { { {"1000",1000 }, {"1000",1000 }, {"1000",1000 }, {"1000",1000 } } },
+};
+
+static const GraticuleLines chroma_digital12[] = {
+ { { { "800", 800 }, { "800", 800 }, { "800", 800 }, { "800", 800 } } },
+ { { { "1600", 1600 }, { "1600", 1600 }, { "1600", 1600 }, { "1600", 1600 } } },
+ { { { "2400", 2400 }, { "2400", 2400 }, { "2400", 2400 }, { "2400", 2400 } } },
+ { { { "3200", 3200 }, { "3200", 3200 }, { "3200", 3200 }, { "3200", 3200 } } },
+ { { { "4000", 4000 }, { "4000", 4000 }, { "4000", 4000 }, { "4000", 4000 } } },
+};
+
+static void blend_vline(uint8_t *dst, int height, int linesize, float o1, float o2, int v, int step)
+{
+ int y;
+
+ for (y = 0; y < height; y += step) {
+ dst[0] = v * o1 + dst[0] * o2;
+
+ dst += linesize * step;
+ }
+}
+
+static void blend_vline16(uint16_t *dst, int height, int linesize, float o1, float o2, int v, int step)
+{
+ int y;
+
+ for (y = 0; y < height; y += step) {
+ dst[0] = v * o1 + dst[0] * o2;
+
+ dst += (linesize / 2) * step;
+ }
+}
+
+static void blend_hline(uint8_t *dst, int width, float o1, float o2, int v, int step)
+{
+ int x;
+
+ for (x = 0; x < width; x += step) {
+ dst[x] = v * o1 + dst[x] * o2;
+ }
+}
+
+static void blend_hline16(uint16_t *dst, int width, float o1, float o2, int v, int step)
+{
+ int x;
+
+ for (x = 0; x < width; x += step) {
+ dst[x] = v * o1 + dst[x] * o2;
+ }
+}
+
+static void draw_htext(AVFrame *out, int x, int y, float o1, float o2, const char *txt, const uint8_t color[4])
+{
+ const uint8_t *font;
+ int font_height;
+ int i, plane;
+
+ font = avpriv_cga_font, font_height = 8;
+
+ for (plane = 0; plane < 4 && out->data[plane]; plane++) {
+ for (i = 0; txt[i]; i++) {
+ int char_y, mask;
+ int v = color[plane];
+
+ uint8_t *p = out->data[plane] + y * out->linesize[plane] + (x + i * 8);
+ for (char_y = 0; char_y < font_height; char_y++) {
+ for (mask = 0x80; mask; mask >>= 1) {
+ if (font[txt[i] * font_height + char_y] & mask)
+ p[0] = p[0] * o2 + v * o1;
+ p++;
+ }
+ p += out->linesize[plane] - 8;
+ }
+ }
+ }
+}
+
+static void draw_htext16(AVFrame *out, int x, int y, int mult, float o1, float o2, const char *txt, const uint8_t color[4])
+{
+ const uint8_t *font;
+ int font_height;
+ int i, plane;
+
+ font = avpriv_cga_font, font_height = 8;
+
+ for (plane = 0; plane < 4 && out->data[plane]; plane++) {
+ for (i = 0; txt[i]; i++) {
+ int char_y, mask;
+ int v = color[plane] * mult;
+
+ uint16_t *p = (uint16_t *)(out->data[plane] + y * out->linesize[plane]) + (x + i * 8);
+ for (char_y = 0; char_y < font_height; char_y++) {
+ for (mask = 0x80; mask; mask >>= 1) {
+ if (font[txt[i] * font_height + char_y] & mask)
+ p[0] = p[0] * o2 + v * o1;
+ p++;
+ }
+ p += out->linesize[plane] / 2 - 8;
+ }
+ }
+ }
+}
+
+static void draw_vtext(AVFrame *out, int x, int y, float o1, float o2, const char *txt, const uint8_t color[4])
+{
+ const uint8_t *font;
+ int font_height;
+ int i, plane;
+
+ font = avpriv_cga_font, font_height = 8;
+
+ for (plane = 0; plane < 4 && out->data[plane]; plane++) {
+ for (i = 0; txt[i]; i++) {
+ int char_y, mask;
+ int v = color[plane];
+
+ for (char_y = font_height - 1; char_y >= 0; char_y--) {
+ uint8_t *p = out->data[plane] + (y + i * 10) * out->linesize[plane] + x;
+ for (mask = 0x80; mask; mask >>= 1) {
+ if (font[txt[i] * font_height + font_height - 1 - char_y] & mask)
+ p[char_y] = p[char_y] * o2 + v * o1;
+ p += out->linesize[plane];
+ }
+ }
+ }
+ }
+}
+
+static void draw_vtext16(AVFrame *out, int x, int y, int mult, float o1, float o2, const char *txt, const uint8_t color[4])
+{
+ const uint8_t *font;
+ int font_height;
+ int i, plane;
+
+ font = avpriv_cga_font, font_height = 8;
+
+ for (plane = 0; plane < 4 && out->data[plane]; plane++) {
+ for (i = 0; txt[i]; i++) {
+ int char_y, mask;
+ int v = color[plane] * mult;
+
+ for (char_y = 0; char_y < font_height; char_y++) {
+ uint16_t *p = (uint16_t *)(out->data[plane] + (y + i * 10) * out->linesize[plane]) + x;
+ for (mask = 0x80; mask; mask >>= 1) {
+ if (font[txt[i] * font_height + font_height - 1 - char_y] & mask)
+ p[char_y] = p[char_y] * o2 + v * o1;
+ p += out->linesize[plane] / 2;
+ }
+ }
+ }
+ }
+}
+
+static void graticule_none(WaveformContext *s, AVFrame *out)
+{
+}
+
+static void graticule_green_row(WaveformContext *s, AVFrame *out)
+{
+ const int step = (s->flags & 2) + 1;
+ const float o1 = s->opacity;
+ const float o2 = 1. - o1;
+ const int height = s->display == PARADE ? out->height / s->acomp : out->height;
+ int k = 0, c, p, l, offset_x = 0, offset_y = 0;
+
+ for (c = 0; c < s->ncomp; c++) {
+ if (!((1 << c) & s->pcomp) || (!s->display && k > 0))
+ continue;
+
+ k++;
+ for (p = 0; p < s->ncomp; p++) {
+ const int v = green_yuva_color[p];
+ for (l = 0; l < s->nb_glines; l++) {
+ const uint16_t pos = s->glines[l].line[c].pos;
+ int x = offset_x + (s->mirror ? s->size - 1 - pos : pos);
+ uint8_t *dst = out->data[p] + offset_y * out->linesize[p] + x;
+
+ blend_vline(dst, height, out->linesize[p], o1, o2, v, step);
+ }
+ }
+
+ for (l = 0; l < s->nb_glines && (s->flags & 1); l++) {
+ const char *name = s->glines[l].line[c].name;
+ const uint16_t pos = s->glines[l].line[c].pos;
+ int x = offset_x + (s->mirror ? s->size - 1 - pos : pos) - 10;
+
+ if (x < 0)
+ x = 4;
+
+ draw_vtext(out, x, offset_y + 2, o1, o2, name, green_yuva_color);
+ }
+
+ offset_x += s->size * (s->display == STACK);
+ offset_y += height * (s->display == PARADE);
+ }
+}
+
+static void graticule16_green_row(WaveformContext *s, AVFrame *out)
+{
+ const int step = (s->flags & 2) + 1;
+ const float o1 = s->opacity;
+ const float o2 = 1. - o1;
+ const int mult = s->size / 256;
+ const int height = s->display == PARADE ? out->height / s->acomp : out->height;
+ int k = 0, c, p, l, offset_x = 0, offset_y = 0;
+
+ for (c = 0; c < s->ncomp; c++) {
+ if (!((1 << c) & s->pcomp) || (!s->display && k > 0))
+ continue;
+
+ k++;
+ for (p = 0; p < s->ncomp; p++) {
+ const int v = green_yuva_color[p] * mult;
+ for (l = 0; l < s->nb_glines ; l++) {
+ const uint16_t pos = s->glines[l].line[c].pos;
+ int x = offset_x + (s->mirror ? s->size - 1 - pos : pos);
+ uint16_t *dst = (uint16_t *)(out->data[p] + offset_y * out->linesize[p]) + x;
+
+ blend_vline16(dst, height, out->linesize[p], o1, o2, v, step);
+ }
+ }
+
+ for (l = 0; l < s->nb_glines && (s->flags & 1); l++) {
+ const char *name = s->glines[l].line[c].name;
+ const uint16_t pos = s->glines[l].line[c].pos;
+ int x = offset_x + (s->mirror ? s->size - 1 - pos : pos) - 10;
+
+ if (x < 0)
+ x = 4;
+
+ draw_vtext16(out, x, offset_y + 2, mult, o1, o2, name, green_yuva_color);
+ }
+
+ offset_x += s->size * (s->display == STACK);
+ offset_y += height * (s->display == PARADE);
+ }
+}
+
+static void graticule_green_column(WaveformContext *s, AVFrame *out)
+{
+ const int step = (s->flags & 2) + 1;
+ const float o1 = s->opacity;
+ const float o2 = 1. - o1;
+ const int width = s->display == PARADE ? out->width / s->acomp : out->width;
+ int k = 0, c, p, l, offset_y = 0, offset_x = 0;
+
+ for (c = 0; c < s->ncomp; c++) {
+ if ((!((1 << c) & s->pcomp) || (!s->display && k > 0)))
+ continue;
+
+ k++;
+ for (p = 0; p < s->ncomp; p++) {
+ const int v = green_yuva_color[p];
+ for (l = 0; l < s->nb_glines ; l++) {
+ const uint16_t pos = s->glines[l].line[c].pos;
+ int y = offset_y + (s->mirror ? s->size - 1 - pos : pos);
+ uint8_t *dst = out->data[p] + y * out->linesize[p] + offset_x;
+
+ blend_hline(dst, width, o1, o2, v, step);
+ }
+ }
+
+ for (l = 0; l < s->nb_glines && (s->flags & 1); l++) {
+ const char *name = s->glines[l].line[c].name;
+ const uint16_t pos = s->glines[l].line[c].pos;
+ int y = offset_y + (s->mirror ? s->size - 1 - pos : pos) - 10;
+
+ if (y < 0)
+ y = 4;
+
+ draw_htext(out, 2 + offset_x, y, o1, o2, name, green_yuva_color);
+ }
+
+ offset_y += s->size * (s->display == STACK);
+ offset_x += width * (s->display == PARADE);
+ }
+}
+
+static void graticule16_green_column(WaveformContext *s, AVFrame *out)
+{
+ const int step = (s->flags & 2) + 1;
+ const float o1 = s->opacity;
+ const float o2 = 1. - o1;
+ const int mult = s->size / 256;
+ const int width = s->display == PARADE ? out->width / s->acomp : out->width;
+ int k = 0, c, p, l, offset_x = 0, offset_y = 0;
+
+ for (c = 0; c < s->ncomp; c++) {
+ if ((!((1 << c) & s->pcomp) || (!s->display && k > 0)))
+ continue;
+
+ k++;
+ for (p = 0; p < s->ncomp; p++) {
+ const int v = green_yuva_color[p] * mult;
+ for (l = 0; l < s->nb_glines ; l++) {
+ const uint16_t pos = s->glines[l].line[c].pos;
+ int y = offset_y + (s->mirror ? s->size - 1 - pos : pos);
+ uint16_t *dst = (uint16_t *)(out->data[p] + y * out->linesize[p]) + offset_x;
+
+ blend_hline16(dst, width, o1, o2, v, step);
+ }
+ }
+
+ for (l = 0; l < s->nb_glines && (s->flags & 1); l++) {
+ const char *name = s->glines[l].line[c].name;
+ const uint16_t pos = s->glines[l].line[c].pos;
+ int y = offset_y + (s->mirror ? s->size - 1 - pos: pos) - 10;
+
+ if (y < 0)
+ y = 4;
+
+ draw_htext16(out, 2 + offset_x, y, mult, o1, o2, name, green_yuva_color);
+ }
+
+ offset_y += s->size * (s->display == STACK);
+ offset_x += width * (s->display == PARADE);
+ }
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ WaveformContext *s = ctx->priv;
+
+ s->desc = av_pix_fmt_desc_get(inlink->format);
+ s->ncomp = s->desc->nb_components;
+ s->bits = s->desc->comp[0].depth;
+ s->max = 1 << s->bits;
+ s->intensity = s->fintensity * (s->max - 1);
+
+ s->shift_w[0] = s->shift_w[3] = 0;
+ s->shift_h[0] = s->shift_h[3] = 0;
+ s->shift_w[1] = s->shift_w[2] = s->desc->log2_chroma_w;
+ s->shift_h[1] = s->shift_h[2] = s->desc->log2_chroma_h;
+
+ s->graticulef = graticule_none;
+
+ switch (s->filter) {
+ case AFLAT: s->size = 256 * 2; break;
+ case FLAT: s->size = 256 * 3; break;
+ default: s->size = 256; break;
+ }
+
+ switch (s->filter | ((s->bits > 8) << 4) |
+ (s->mode << 8) | (s->mirror << 12)) {
+ case 0x1100: s->waveform = lowpass_column_mirror; break;
+ case 0x1000: s->waveform = lowpass_row_mirror; break;
+ case 0x0100: s->waveform = lowpass_column; break;
+ case 0x0000: s->waveform = lowpass_row; break;
+ case 0x1110: s->waveform = lowpass16_column_mirror; break;
+ case 0x1010: s->waveform = lowpass16_row_mirror; break;
+ case 0x0110: s->waveform = lowpass16_column; break;
+ case 0x0010: s->waveform = lowpass16_row; break;
+ case 0x1101:
+ case 0x1001:
+ case 0x0101:
+ case 0x0001: s->waveform = flat; break;
+ case 0x1111:
+ case 0x1011:
+ case 0x0111:
+ case 0x0011: s->waveform = flat16; break;
+ case 0x1102:
+ case 0x1002:
+ case 0x0102:
+ case 0x0002: s->waveform = aflat; break;
+ case 0x1112:
+ case 0x1012:
+ case 0x0112:
+ case 0x0012: s->waveform = aflat16; break;
+ case 0x1103:
+ case 0x1003:
+ case 0x0103:
+ case 0x0003: s->waveform = chroma; break;
+ case 0x1113:
+ case 0x1013:
+ case 0x0113:
+ case 0x0013: s->waveform = chroma16; break;
+ case 0x1104:
+ case 0x1004:
+ case 0x0104:
+ case 0x0004: s->waveform = color; break;
+ case 0x1114:
+ case 0x1014:
+ case 0x0114:
+ case 0x0014: s->waveform = color16; break;
+ case 0x1105:
+ case 0x1005:
+ case 0x0105:
+ case 0x0005: s->waveform = acolor; break;
+ case 0x1115:
+ case 0x1015:
+ case 0x0115:
+ case 0x0015: s->waveform = acolor16; break;
+ }
+
+ switch (s->filter) {
+ case LOWPASS:
+ case COLOR:
+ case ACOLOR:
+ case CHROMA:
+ case AFLAT:
+ case FLAT:
+ if (s->graticule && s->mode == 1)
+ s->graticulef = s->bits > 8 ? graticule16_green_column : graticule_green_column;
+ else if (s->graticule && s->mode == 0)
+ s->graticulef = s->bits > 8 ? graticule16_green_row : graticule_green_row;
+ break;
+ }
+
+ switch (s->filter) {
+ case COLOR:
+ case ACOLOR:
+ case LOWPASS:
+ switch (s->scale) {
+ case DIGITAL:
+ switch (s->bits) {
+ case 8: s->glines = (GraticuleLines *)digital8; s->nb_glines = FF_ARRAY_ELEMS(digital8); break;
+ case 9: s->glines = (GraticuleLines *)digital9; s->nb_glines = FF_ARRAY_ELEMS(digital9); break;
+ case 10: s->glines = (GraticuleLines *)digital10; s->nb_glines = FF_ARRAY_ELEMS(digital10); break;
+ case 12: s->glines = (GraticuleLines *)digital12; s->nb_glines = FF_ARRAY_ELEMS(digital12); break;
+ }
+ break;
+ case MILLIVOLTS:
+ switch (s->bits) {
+ case 8: s->glines = (GraticuleLines *)millivolts8; s->nb_glines = FF_ARRAY_ELEMS(millivolts8); break;
+ case 9: s->glines = (GraticuleLines *)millivolts9; s->nb_glines = FF_ARRAY_ELEMS(millivolts9); break;
+ case 10: s->glines = (GraticuleLines *)millivolts10; s->nb_glines = FF_ARRAY_ELEMS(millivolts10); break;
+ case 12: s->glines = (GraticuleLines *)millivolts12; s->nb_glines = FF_ARRAY_ELEMS(millivolts12); break;
+ }
+ break;
+ case IRE:
+ switch (s->bits) {
+ case 8: s->glines = (GraticuleLines *)ire8; s->nb_glines = FF_ARRAY_ELEMS(ire8); break;
+ case 9: s->glines = (GraticuleLines *)ire9; s->nb_glines = FF_ARRAY_ELEMS(ire9); break;
+ case 10: s->glines = (GraticuleLines *)ire10; s->nb_glines = FF_ARRAY_ELEMS(ire10); break;
+ case 12: s->glines = (GraticuleLines *)ire12; s->nb_glines = FF_ARRAY_ELEMS(ire12); break;
+ }
+ break;
+ }
+ break;
+ case CHROMA:
+ switch (s->scale) {
+ case DIGITAL:
+ switch (s->bits) {
+ case 8: s->glines = (GraticuleLines *)chroma_digital8; s->nb_glines = FF_ARRAY_ELEMS(chroma_digital8); break;
+ case 9: s->glines = (GraticuleLines *)chroma_digital9; s->nb_glines = FF_ARRAY_ELEMS(chroma_digital9); break;
+ case 10: s->glines = (GraticuleLines *)chroma_digital10; s->nb_glines = FF_ARRAY_ELEMS(chroma_digital10); break;
+ case 12: s->glines = (GraticuleLines *)chroma_digital12; s->nb_glines = FF_ARRAY_ELEMS(chroma_digital12); break;
+ }
+ break;
+ case MILLIVOLTS:
+ switch (s->bits) {
+ case 8: s->glines = (GraticuleLines *)millivolts8; s->nb_glines = FF_ARRAY_ELEMS(millivolts8); break;
+ case 9: s->glines = (GraticuleLines *)millivolts9; s->nb_glines = FF_ARRAY_ELEMS(millivolts9); break;
+ case 10: s->glines = (GraticuleLines *)millivolts10; s->nb_glines = FF_ARRAY_ELEMS(millivolts10); break;
+ case 12: s->glines = (GraticuleLines *)millivolts12; s->nb_glines = FF_ARRAY_ELEMS(millivolts12); break;
+ }
+ break;
+ case IRE:
+ switch (s->bits) {
+ case 8: s->glines = (GraticuleLines *)ire8; s->nb_glines = FF_ARRAY_ELEMS(ire8); break;
+ case 9: s->glines = (GraticuleLines *)ire9; s->nb_glines = FF_ARRAY_ELEMS(ire9); break;
+ case 10: s->glines = (GraticuleLines *)ire10; s->nb_glines = FF_ARRAY_ELEMS(ire10); break;
+ case 12: s->glines = (GraticuleLines *)ire12; s->nb_glines = FF_ARRAY_ELEMS(ire12); break;
+ }
+ break;
+ }
+ break;
+ case AFLAT:
+ switch (s->scale) {
+ case DIGITAL:
+ switch (s->bits) {
+ case 8: s->glines = (GraticuleLines *)aflat_digital8; s->nb_glines = FF_ARRAY_ELEMS(aflat_digital8); break;
+ case 9: s->glines = (GraticuleLines *)aflat_digital9; s->nb_glines = FF_ARRAY_ELEMS(aflat_digital9); break;
+ case 10: s->glines = (GraticuleLines *)aflat_digital10; s->nb_glines = FF_ARRAY_ELEMS(aflat_digital10); break;
+ case 12: s->glines = (GraticuleLines *)aflat_digital12; s->nb_glines = FF_ARRAY_ELEMS(aflat_digital12); break;
+ }
+ break;
+ case MILLIVOLTS:
+ switch (s->bits) {
+ case 8: s->glines = (GraticuleLines *)aflat_millivolts8; s->nb_glines = FF_ARRAY_ELEMS(aflat_millivolts8); break;
+ case 9: s->glines = (GraticuleLines *)aflat_millivolts9; s->nb_glines = FF_ARRAY_ELEMS(aflat_millivolts9); break;
+ case 10: s->glines = (GraticuleLines *)aflat_millivolts10; s->nb_glines = FF_ARRAY_ELEMS(aflat_millivolts10); break;
+ case 12: s->glines = (GraticuleLines *)aflat_millivolts12; s->nb_glines = FF_ARRAY_ELEMS(aflat_millivolts12); break;
+ }
+ break;
+ case IRE:
+ switch (s->bits) {
+ case 8: s->glines = (GraticuleLines *)aflat_ire8; s->nb_glines = FF_ARRAY_ELEMS(aflat_ire8); break;
+ case 9: s->glines = (GraticuleLines *)aflat_ire9; s->nb_glines = FF_ARRAY_ELEMS(aflat_ire9); break;
+ case 10: s->glines = (GraticuleLines *)aflat_ire10; s->nb_glines = FF_ARRAY_ELEMS(aflat_ire10); break;
+ case 12: s->glines = (GraticuleLines *)aflat_ire12; s->nb_glines = FF_ARRAY_ELEMS(aflat_ire12); break;
+ }
+ break;
+ }
+ break;
+ case FLAT:
+ switch (s->scale) {
+ case DIGITAL:
+ switch (s->bits) {
+ case 8: s->glines = (GraticuleLines *)flat_digital8; s->nb_glines = FF_ARRAY_ELEMS(flat_digital8); break;
+ case 9: s->glines = (GraticuleLines *)flat_digital9; s->nb_glines = FF_ARRAY_ELEMS(flat_digital9); break;
+ case 10: s->glines = (GraticuleLines *)flat_digital10; s->nb_glines = FF_ARRAY_ELEMS(flat_digital10); break;
+ case 12: s->glines = (GraticuleLines *)flat_digital12; s->nb_glines = FF_ARRAY_ELEMS(flat_digital12); break;
+ }
+ break;
+ case MILLIVOLTS:
+ switch (s->bits) {
+ case 8: s->glines = (GraticuleLines *)flat_millivolts8; s->nb_glines = FF_ARRAY_ELEMS(flat_millivolts8); break;
+ case 9: s->glines = (GraticuleLines *)flat_millivolts9; s->nb_glines = FF_ARRAY_ELEMS(flat_millivolts9); break;
+ case 10: s->glines = (GraticuleLines *)flat_millivolts10; s->nb_glines = FF_ARRAY_ELEMS(flat_millivolts10); break;
+ case 12: s->glines = (GraticuleLines *)flat_millivolts12; s->nb_glines = FF_ARRAY_ELEMS(flat_millivolts12); break;
+ }
+ break;
+ case IRE:
+ switch (s->bits) {
+ case 8: s->glines = (GraticuleLines *)flat_ire8; s->nb_glines = FF_ARRAY_ELEMS(flat_ire8); break;
+ case 9: s->glines = (GraticuleLines *)flat_ire9; s->nb_glines = FF_ARRAY_ELEMS(flat_ire9); break;
+ case 10: s->glines = (GraticuleLines *)flat_ire10; s->nb_glines = FF_ARRAY_ELEMS(flat_ire10); break;
+ case 12: s->glines = (GraticuleLines *)flat_ire12; s->nb_glines = FF_ARRAY_ELEMS(flat_ire12); break;
+ }
+ break;
+ }
+ break;
+ }
+
+ s->size = s->size << (s->bits - 8);
+
+ switch (inlink->format) {
+ case AV_PIX_FMT_GBRAP:
+ case AV_PIX_FMT_GBRP:
+ case AV_PIX_FMT_GBRP9:
+ case AV_PIX_FMT_GBRP10:
+ case AV_PIX_FMT_GBRP12:
+ memcpy(s->bg_color, black_gbrp_color, sizeof(s->bg_color));
+ s->graticulef = graticule_none;
+ break;
+ default:
+ memcpy(s->bg_color, black_yuva_color, sizeof(s->bg_color));
+ }
+
+ s->bg_color[3] *= s->bgopacity;
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ WaveformContext *s = ctx->priv;
+ int comp = 0, i, j = 0, k, p, size;
+
+ for (i = 0; i < s->ncomp; i++) {
+ if ((1 << i) & s->pcomp)
+ comp++;
+ }
+ s->acomp = comp;
+ s->odesc = av_pix_fmt_desc_get(outlink->format);
+ s->dcomp = s->odesc->nb_components;
+
+ av_freep(&s->peak);
+
+ if (s->mode) {
+ outlink->h = s->size * FFMAX(comp * (s->display == STACK), 1);
+ outlink->w = inlink->w * FFMAX(comp * (s->display == PARADE), 1);
+ size = inlink->w;
+ } else {
+ outlink->w = s->size * FFMAX(comp * (s->display == STACK), 1);
+ outlink->h = inlink->h * FFMAX(comp * (s->display == PARADE), 1);
+ size = inlink->h;
+ }
+
+ s->peak = av_malloc_array(size, 32 * sizeof(*s->peak));
+ if (!s->peak)
+ return AVERROR(ENOMEM);
+
+ for (p = 0; p < s->ncomp; p++) {
+ const int plane = s->desc->comp[p].plane;
+ int offset;
+
+ if (!((1 << p) & s->pcomp))
+ continue;
+
+ for (k = 0; k < 4; k++) {
+ s->emax[plane][k] = s->peak + size * (plane * 4 + k + 0);
+ s->emin[plane][k] = s->peak + size * (plane * 4 + k + 16);
+ }
+
+ offset = j++ * s->size * (s->display == STACK);
+ s->estart[plane] = offset;
+ s->eend[plane] = (offset + s->size - 1);
+ for (i = 0; i < size; i++) {
+ for (k = 0; k < 4; k++) {
+ s->emax[plane][k][i] = s->estart[plane];
+ s->emin[plane][k][i] = s->eend[plane];
+ }
+ }
+ }
+
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ WaveformContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ int i, j, k;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ out->pts = in->pts;
+ av_frame_set_color_range(out, AVCOL_RANGE_JPEG);
+
+ for (k = 0; k < s->dcomp; k++) {
+ if (s->bits <= 8) {
+ for (i = 0; i < outlink->h ; i++)
+ memset(out->data[s->odesc->comp[k].plane] +
+ i * out->linesize[s->odesc->comp[k].plane],
+ s->bg_color[k], outlink->w);
+ } else {
+ const int mult = s->max / 256;
+ uint16_t *dst = (uint16_t *)out->data[s->odesc->comp[k].plane];
+
+ for (i = 0; i < outlink->h ; i++) {
+ for (j = 0; j < outlink->w; j++)
+ dst[j] = s->bg_color[k] * mult;
+ dst += out->linesize[s->odesc->comp[k].plane] / 2;
+ }
+ }
+ }
+
+ for (k = 0, i = 0; k < s->ncomp; k++) {
+ if ((1 << k) & s->pcomp) {
+ int offset_y;
+ int offset_x;
+
+ if (s->display == PARADE) {
+ offset_x = s->mode ? i++ * inlink->w : 0;
+ offset_y = s->mode ? 0 : i++ * inlink->h;
+ } else {
+ offset_y = s->mode ? i++ * s->size * !!s->display : 0;
+ offset_x = s->mode ? 0 : i++ * s->size * !!s->display;
+ }
+ s->waveform(s, in, out, k, s->intensity, offset_y, offset_x, s->mode, s->mirror);
+ }
+ }
+ s->graticulef(s, out);
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ WaveformContext *s = ctx->priv;
+
+ av_freep(&s->peak);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_waveform = {
+ .name = "waveform",
+ .description = NULL_IF_CONFIG_SMALL("Video waveform monitor."),
+ .priv_size = sizeof(WaveformContext),
+ .priv_class = &waveform_class,
+ .query_formats = query_formats,
+ .uninit = uninit,
+ .inputs = inputs,
+ .outputs = outputs,
+};
diff --git a/libavfilter/vf_weave.c b/libavfilter/vf_weave.c
new file mode 100644
index 0000000000..a5fc1b7403
--- /dev/null
+++ b/libavfilter/vf_weave.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct WeaveContext {
+ const AVClass *class;
+ int first_field;
+ int nb_planes;
+ int planeheight[4];
+ int linesize[4];
+
+ AVFrame *prev;
+} WeaveContext;
+
+#define OFFSET(x) offsetof(WeaveContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption weave_options[] = {
+ { "first_field", "set first field", OFFSET(first_field), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "field"},
+ { "top", "set top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
+ { "t", "set top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
+ { "bottom", "set bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
+ { "b", "set bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(weave);
+
+static int config_props_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ WeaveContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+
+ outlink->time_base.num = inlink->time_base.num * 2;
+ outlink->time_base.den = inlink->time_base.den;
+ outlink->frame_rate.num = inlink->frame_rate.num;
+ outlink->frame_rate.den = inlink->frame_rate.den * 2;
+ outlink->w = inlink->w;
+ outlink->h = inlink->h * 2;
+
+ if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ WeaveContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ int i;
+
+ if (!s->prev) {
+ s->prev = in;
+ return 0;
+ }
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ av_frame_free(&s->prev);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ for (i = 0; i < s->nb_planes; i++) {
+ av_image_copy_plane(out->data[i] + out->linesize[i] * s->first_field,
+ out->linesize[i] * 2,
+ in->data[i], in->linesize[i],
+ s->linesize[i], s->planeheight[i]);
+ av_image_copy_plane(out->data[i] + out->linesize[i] * !s->first_field,
+ out->linesize[i] * 2,
+ s->prev->data[i], s->prev->linesize[i],
+ s->linesize[i], s->planeheight[i]);
+ }
+
+ out->pts = in->pts / 2;
+ out->interlaced_frame = 1;
+ out->top_field_first = !s->first_field;
+
+ av_frame_free(&in);
+ av_frame_free(&s->prev);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ WeaveContext *s = ctx->priv;
+
+ av_frame_free(&s->prev);
+}
+
+static const AVFilterPad weave_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad weave_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_weave = {
+ .name = "weave",
+ .description = NULL_IF_CONFIG_SMALL("Weave input video fields into frames."),
+ .priv_size = sizeof(WeaveContext),
+ .priv_class = &weave_class,
+ .uninit = uninit,
+ .inputs = weave_inputs,
+ .outputs = weave_outputs,
+};
diff --git a/libavfilter/vf_xbr.c b/libavfilter/vf_xbr.c
new file mode 100644
index 0000000000..9893e0cc80
--- /dev/null
+++ b/libavfilter/vf_xbr.c
@@ -0,0 +1,434 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * Copyright (c) 2011, 2012 Hyllian/Jararaca <sergiogdb@gmail.com>
+ * Copyright (c) 2014 Arwa Arif <arwaarif1994@gmail.com>
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * XBR Filter is used for depixelization of image.
+ * This is based on Hyllian's xBR shader.
+ *
+ * @see http://www.libretro.com/forums/viewtopic.php?f=6&t=134
+ * @see https://github.com/yoyofr/iFBA/blob/master/fba_src/src/intf/video/scalers/xbr.cpp
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+#include "internal.h"
+
+#define LB_MASK 0x00FEFEFE
+#define RED_BLUE_MASK 0x00FF00FF
+#define GREEN_MASK 0x0000FF00
+
+typedef int (*xbrfunc_t)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
+
+typedef struct {
+ const AVClass *class;
+ int n;
+ xbrfunc_t func;
+ uint32_t rgbtoyuv[1<<24];
+} XBRContext;
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+ const uint32_t *rgbtoyuv;
+} ThreadData;
+
+#define OFFSET(x) offsetof(XBRContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption xbr_options[] = {
+ { "n", "set scale factor", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 3}, 2, 4, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(xbr);
+
+static uint32_t pixel_diff(uint32_t x, uint32_t y, const uint32_t *r2y)
+{
+#define YMASK 0xff0000
+#define UMASK 0x00ff00
+#define VMASK 0x0000ff
+#define ABSDIFF(a,b) (abs((int)(a)-(int)(b)))
+
+ uint32_t yuv1 = r2y[x & 0xffffff];
+ uint32_t yuv2 = r2y[y & 0xffffff];
+
+ return (ABSDIFF(yuv1 & YMASK, yuv2 & YMASK) >> 16) +
+ (ABSDIFF(yuv1 & UMASK, yuv2 & UMASK) >> 8) +
+ ABSDIFF(yuv1 & VMASK, yuv2 & VMASK);
+}
+
+#define ALPHA_BLEND_128_W(a, b) ((((a) & LB_MASK) >> 1) + (((b) & LB_MASK) >> 1))
+#define ALPHA_BLEND_BASE(a, b, m, s) ( (RED_BLUE_MASK & (((a) & RED_BLUE_MASK) + (((((b) & RED_BLUE_MASK) - ((a) & RED_BLUE_MASK)) * (m)) >> (s)))) \
+ | (GREEN_MASK & (((a) & GREEN_MASK) + (((((b) & GREEN_MASK) - ((a) & GREEN_MASK)) * (m)) >> (s)))))
+#define ALPHA_BLEND_32_W(a, b) ALPHA_BLEND_BASE(a, b, 1, 3)
+#define ALPHA_BLEND_64_W(a, b) ALPHA_BLEND_BASE(a, b, 1, 2)
+#define ALPHA_BLEND_192_W(a, b) ALPHA_BLEND_BASE(a, b, 3, 2)
+#define ALPHA_BLEND_224_W(a, b) ALPHA_BLEND_BASE(a, b, 7, 3)
+
+#define df(A, B) pixel_diff(A, B, r2y)
+#define eq(A, B) (df(A, B) < 155)
+
+#define FILT2(PE, PI, PH, PF, PG, PC, PD, PB, PA, G5, C4, G0, D0, C1, B1, F4, I4, H5, I5, A0, A1, \
+ N0, N1, N2, N3) do { \
+ if (PE != PH && PE != PF) { \
+ const unsigned e = df(PE,PC) + df(PE,PG) + df(PI,H5) + df(PI,F4) + (df(PH,PF)<<2); \
+ const unsigned i = df(PH,PD) + df(PH,I5) + df(PF,I4) + df(PF,PB) + (df(PE,PI)<<2); \
+ if (e <= i) { \
+ const unsigned px = df(PE,PF) <= df(PE,PH) ? PF : PH; \
+ if (e < i && (!eq(PF,PB) && !eq(PH,PD) || eq(PE,PI) \
+ && (!eq(PF,I4) && !eq(PH,I5)) \
+ || eq(PE,PG) || eq(PE,PC))) { \
+ const unsigned ke = df(PF,PG); \
+ const unsigned ki = df(PH,PC); \
+ const int left = ke<<1 <= ki && PE != PG && PD != PG; \
+ const int up = ke >= ki<<1 && PE != PC && PB != PC; \
+ if (left && up) { \
+ E[N3] = ALPHA_BLEND_224_W(E[N3], px); \
+ E[N2] = ALPHA_BLEND_64_W( E[N2], px); \
+ E[N1] = E[N2]; \
+ } else if (left) { \
+ E[N3] = ALPHA_BLEND_192_W(E[N3], px); \
+ E[N2] = ALPHA_BLEND_64_W( E[N2], px); \
+ } else if (up) { \
+ E[N3] = ALPHA_BLEND_192_W(E[N3], px); \
+ E[N1] = ALPHA_BLEND_64_W( E[N1], px); \
+ } else { /* diagonal */ \
+ E[N3] = ALPHA_BLEND_128_W(E[N3], px); \
+ } \
+ } else { \
+ E[N3] = ALPHA_BLEND_128_W(E[N3], px); \
+ } \
+ } \
+ } \
+} while (0)
+
+#define FILT3(PE, PI, PH, PF, PG, PC, PD, PB, PA, G5, C4, G0, D0, C1, B1, F4, I4, H5, I5, A0, A1, \
+ N0, N1, N2, N3, N4, N5, N6, N7, N8) do { \
+ if (PE != PH && PE != PF) { \
+ const unsigned e = df(PE,PC) + df(PE,PG) + df(PI,H5) + df(PI,F4) + (df(PH,PF)<<2); \
+ const unsigned i = df(PH,PD) + df(PH,I5) + df(PF,I4) + df(PF,PB) + (df(PE,PI)<<2); \
+ if (e <= i) { \
+ const unsigned px = df(PE,PF) <= df(PE,PH) ? PF : PH; \
+ if (e < i && (!eq(PF,PB) && !eq(PF,PC) || !eq(PH,PD) && !eq(PH,PG) || eq(PE,PI) \
+ && (!eq(PF,F4) && !eq(PF,I4) || !eq(PH,H5) && !eq(PH,I5)) \
+ || eq(PE,PG) || eq(PE,PC))) { \
+ const unsigned ke = df(PF,PG); \
+ const unsigned ki = df(PH,PC); \
+ const int left = ke<<1 <= ki && PE != PG && PD != PG; \
+ const int up = ke >= ki<<1 && PE != PC && PB != PC; \
+ if (left && up) { \
+ E[N7] = ALPHA_BLEND_192_W(E[N7], px); \
+ E[N6] = ALPHA_BLEND_64_W( E[N6], px); \
+ E[N5] = E[N7]; \
+ E[N2] = E[N6]; \
+ E[N8] = px; \
+ } else if (left) { \
+ E[N7] = ALPHA_BLEND_192_W(E[N7], px); \
+ E[N5] = ALPHA_BLEND_64_W( E[N5], px); \
+ E[N6] = ALPHA_BLEND_64_W( E[N6], px); \
+ E[N8] = px; \
+ } else if (up) { \
+ E[N5] = ALPHA_BLEND_192_W(E[N5], px); \
+ E[N7] = ALPHA_BLEND_64_W( E[N7], px); \
+ E[N2] = ALPHA_BLEND_64_W( E[N2], px); \
+ E[N8] = px; \
+ } else { /* diagonal */ \
+ E[N8] = ALPHA_BLEND_224_W(E[N8], px); \
+ E[N5] = ALPHA_BLEND_32_W( E[N5], px); \
+ E[N7] = ALPHA_BLEND_32_W( E[N7], px); \
+ } \
+ } else { \
+ E[N8] = ALPHA_BLEND_128_W(E[N8], px); \
+ } \
+ } \
+ } \
+} while (0)
+
+#define FILT4(PE, PI, PH, PF, PG, PC, PD, PB, PA, G5, C4, G0, D0, C1, B1, F4, I4, H5, I5, A0, A1, \
+ N15, N14, N11, N3, N7, N10, N13, N12, N9, N6, N2, N1, N5, N8, N4, N0) do { \
+ if (PE != PH && PE != PF) { \
+ const unsigned e = df(PE,PC) + df(PE,PG) + df(PI,H5) + df(PI,F4) + (df(PH,PF)<<2); \
+ const unsigned i = df(PH,PD) + df(PH,I5) + df(PF,I4) + df(PF,PB) + (df(PE,PI)<<2); \
+ if (e <= i) { \
+ const unsigned px = df(PE,PF) <= df(PE,PH) ? PF : PH; \
+ if (e < i && (!eq(PF,PB) && !eq(PH,PD) || eq(PE,PI) \
+ && (!eq(PF,I4) && !eq(PH,I5)) \
+ || eq(PE,PG) || eq(PE,PC))) { \
+ const unsigned ke = df(PF,PG); \
+ const unsigned ki = df(PH,PC); \
+ const int left = ke<<1 <= ki && PE != PG && PD != PG; \
+ const int up = ke >= ki<<1 && PE != PC && PB != PC; \
+ if (left && up) { \
+ E[N13] = ALPHA_BLEND_192_W(E[N13], px); \
+ E[N12] = ALPHA_BLEND_64_W( E[N12], px); \
+ E[N15] = E[N14] = E[N11] = px; \
+ E[N10] = E[N3] = E[N12]; \
+ E[N7] = E[N13]; \
+ } else if (left) { \
+ E[N11] = ALPHA_BLEND_192_W(E[N11], px); \
+ E[N13] = ALPHA_BLEND_192_W(E[N13], px); \
+ E[N10] = ALPHA_BLEND_64_W( E[N10], px); \
+ E[N12] = ALPHA_BLEND_64_W( E[N12], px); \
+ E[N14] = px; \
+ E[N15] = px; \
+ } else if (up) { \
+ E[N14] = ALPHA_BLEND_192_W(E[N14], px); \
+ E[N7 ] = ALPHA_BLEND_192_W(E[N7 ], px); \
+ E[N10] = ALPHA_BLEND_64_W( E[N10], px); \
+ E[N3 ] = ALPHA_BLEND_64_W( E[N3 ], px); \
+ E[N11] = px; \
+ E[N15] = px; \
+ } else { /* diagonal */ \
+ E[N11] = ALPHA_BLEND_128_W(E[N11], px); \
+ E[N14] = ALPHA_BLEND_128_W(E[N14], px); \
+ E[N15] = px; \
+ } \
+ } else { \
+ E[N15] = ALPHA_BLEND_128_W(E[N15], px); \
+ } \
+ } \
+ } \
+} while (0)
+
+static av_always_inline void xbr_filter(const ThreadData *td, int jobnr, int nb_jobs, int n)
+{
+ int x, y;
+ const AVFrame *input = td->in;
+ AVFrame *output = td->out;
+ const uint32_t *r2y = td->rgbtoyuv;
+ const int slice_start = (input->height * jobnr ) / nb_jobs;
+ const int slice_end = (input->height * (jobnr+1)) / nb_jobs;
+ const int nl = output->linesize[0] >> 2;
+ const int nl1 = nl + nl;
+ const int nl2 = nl1 + nl;
+
+ for (y = slice_start; y < slice_end; y++) {
+
+ uint32_t *E = (uint32_t *)(output->data[0] + y * output->linesize[0] * n);
+ const uint32_t *sa2 = (uint32_t *)(input->data[0] + y * input->linesize[0] - 8); /* center */
+ const uint32_t *sa1 = sa2 - (input->linesize[0]>>2); /* up x1 */
+ const uint32_t *sa0 = sa1 - (input->linesize[0]>>2); /* up x2 */
+ const uint32_t *sa3 = sa2 + (input->linesize[0]>>2); /* down x1 */
+ const uint32_t *sa4 = sa3 + (input->linesize[0]>>2); /* down x2 */
+
+ if (y <= 1) {
+ sa0 = sa1;
+ if (y == 0) {
+ sa0 = sa1 = sa2;
+ }
+ }
+
+ if (y >= input->height - 2) {
+ sa4 = sa3;
+ if (y == input->height - 1) {
+ sa4 = sa3 = sa2;
+ }
+ }
+
+ for (x = 0; x < input->width; x++) {
+ const uint32_t B1 = sa0[2];
+ const uint32_t PB = sa1[2];
+ const uint32_t PE = sa2[2];
+ const uint32_t PH = sa3[2];
+ const uint32_t H5 = sa4[2];
+
+ const int pprev = 2 - (x > 0);
+ const uint32_t A1 = sa0[pprev];
+ const uint32_t PA = sa1[pprev];
+ const uint32_t PD = sa2[pprev];
+ const uint32_t PG = sa3[pprev];
+ const uint32_t G5 = sa4[pprev];
+
+ const int pprev2 = pprev - (x > 1);
+ const uint32_t A0 = sa1[pprev2];
+ const uint32_t D0 = sa2[pprev2];
+ const uint32_t G0 = sa3[pprev2];
+
+ const int pnext = 3 - (x == input->width - 1);
+ const uint32_t C1 = sa0[pnext];
+ const uint32_t PC = sa1[pnext];
+ const uint32_t PF = sa2[pnext];
+ const uint32_t PI = sa3[pnext];
+ const uint32_t I5 = sa4[pnext];
+
+ const int pnext2 = pnext + 1 - (x >= input->width - 2);
+ const uint32_t C4 = sa1[pnext2];
+ const uint32_t F4 = sa2[pnext2];
+ const uint32_t I4 = sa3[pnext2];
+
+ if (n == 2) {
+ E[0] = E[1] = // 0, 1
+ E[nl] = E[nl + 1] = PE; // 2, 3
+
+ FILT2(PE, PI, PH, PF, PG, PC, PD, PB, PA, G5, C4, G0, D0, C1, B1, F4, I4, H5, I5, A0, A1, 0, 1, nl, nl+1);
+ FILT2(PE, PC, PF, PB, PI, PA, PH, PD, PG, I4, A1, I5, H5, A0, D0, B1, C1, F4, C4, G5, G0, nl, 0, nl+1, 1);
+ FILT2(PE, PA, PB, PD, PC, PG, PF, PH, PI, C1, G0, C4, F4, G5, H5, D0, A0, B1, A1, I4, I5, nl+1, nl, 1, 0);
+ FILT2(PE, PG, PD, PH, PA, PI, PB, PF, PC, A0, I5, A1, B1, I4, F4, H5, G5, D0, G0, C1, C4, 1, nl+1, 0, nl);
+ } else if (n == 3) {
+ E[0] = E[1] = E[2] = // 0, 1, 2
+ E[nl] = E[nl+1] = E[nl+2] = // 3, 4, 5
+ E[nl1] = E[nl1+1] = E[nl1+2] = PE; // 6, 7, 8
+
+ FILT3(PE, PI, PH, PF, PG, PC, PD, PB, PA, G5, C4, G0, D0, C1, B1, F4, I4, H5, I5, A0, A1, 0, 1, 2, nl, nl+1, nl+2, nl1, nl1+1, nl1+2);
+ FILT3(PE, PC, PF, PB, PI, PA, PH, PD, PG, I4, A1, I5, H5, A0, D0, B1, C1, F4, C4, G5, G0, nl1, nl, 0, nl1+1, nl+1, 1, nl1+2, nl+2, 2);
+ FILT3(PE, PA, PB, PD, PC, PG, PF, PH, PI, C1, G0, C4, F4, G5, H5, D0, A0, B1, A1, I4, I5, nl1+2, nl1+1, nl1, nl+2, nl+1, nl, 2, 1, 0);
+ FILT3(PE, PG, PD, PH, PA, PI, PB, PF, PC, A0, I5, A1, B1, I4, F4, H5, G5, D0, G0, C1, C4, 2, nl+2, nl1+2, 1, nl+1, nl1+1, 0, nl, nl1);
+ } else if (n == 4) {
+ E[0] = E[1] = E[2] = E[3] = // 0, 1, 2, 3
+ E[nl] = E[nl+1] = E[nl+2] = E[nl+3] = // 4, 5, 6, 7
+ E[nl1] = E[nl1+1] = E[nl1+2] = E[nl1+3] = // 8, 9, 10, 11
+ E[nl2] = E[nl2+1] = E[nl2+2] = E[nl2+3] = PE; // 12, 13, 14, 15
+
+ FILT4(PE, PI, PH, PF, PG, PC, PD, PB, PA, G5, C4, G0, D0, C1, B1, F4, I4, H5, I5, A0, A1, nl2+3, nl2+2, nl1+3, 3, nl+3, nl1+2, nl2+1, nl2, nl1+1, nl+2, 2, 1, nl+1, nl1, nl, 0);
+ FILT4(PE, PC, PF, PB, PI, PA, PH, PD, PG, I4, A1, I5, H5, A0, D0, B1, C1, F4, C4, G5, G0, 3, nl+3, 2, 0, 1, nl+2, nl1+3, nl2+3, nl1+2, nl+1, nl, nl1, nl1+1, nl2+2, nl2+1, nl2);
+ FILT4(PE, PA, PB, PD, PC, PG, PF, PH, PI, C1, G0, C4, F4, G5, H5, D0, A0, B1, A1, I4, I5, 0, 1, nl, nl2, nl1, nl+1, 2, 3, nl+2, nl1+1, nl2+1, nl2+2, nl1+2, nl+3, nl1+3, nl2+3);
+ FILT4(PE, PG, PD, PH, PA, PI, PB, PF, PC, A0, I5, A1, B1, I4, F4, H5, G5, D0, G0, C1, C4, nl2, nl1, nl2+1, nl2+3, nl2+2, nl1+1, nl, 0, nl+1, nl1+2, nl1+3, nl+3, nl+2, 1, 2, 3);
+ }
+
+ sa0 += 1;
+ sa1 += 1;
+ sa2 += 1;
+ sa3 += 1;
+ sa4 += 1;
+
+ E += n;
+ }
+ }
+}
+
+#define XBR_FUNC(size) \
+static int xbr##size##x(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \
+{ \
+ xbr_filter(arg, jobnr, nb_jobs, size); \
+ return 0; \
+}
+
+XBR_FUNC(2)
+XBR_FUNC(3)
+XBR_FUNC(4)
+
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ XBRContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ outlink->w = inlink->w * s->n;
+ outlink->h = inlink->h * s->n;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_0RGB32, AV_PIX_FMT_NONE,
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ XBRContext *s = ctx->priv;
+ ThreadData td;
+
+ AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+
+ av_frame_copy_props(out, in);
+
+ td.in = in;
+ td.out = out;
+ td.rgbtoyuv = s->rgbtoyuv;
+ ctx->internal->execute(ctx, s->func, &td, NULL, FFMIN(inlink->h, ff_filter_get_nb_threads(ctx)));
+
+ out->width = outlink->w;
+ out->height = outlink->h;
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static int init(AVFilterContext *ctx)
+{
+ XBRContext *s = ctx->priv;
+ static const xbrfunc_t xbrfuncs[] = {xbr2x, xbr3x, xbr4x};
+
+ uint32_t c;
+ int bg, rg, g;
+
+ for (bg = -255; bg < 256; bg++) {
+ for (rg = -255; rg < 256; rg++) {
+ const uint32_t u = (uint32_t)((-169*rg + 500*bg)/1000) + 128;
+ const uint32_t v = (uint32_t)(( 500*rg - 81*bg)/1000) + 128;
+ int startg = FFMAX3(-bg, -rg, 0);
+ int endg = FFMIN3(255-bg, 255-rg, 255);
+ uint32_t y = (uint32_t)(( 299*rg + 1000*startg + 114*bg)/1000);
+ c = bg + (rg<<16) + 0x010101 * startg;
+ for (g = startg; g <= endg; g++) {
+ s->rgbtoyuv[c] = ((y++) << 16) + (u << 8) + v;
+ c+= 0x010101;
+ }
+ }
+ }
+
+ s->func = xbrfuncs[s->n - 2];
+ return 0;
+}
+
+static const AVFilterPad xbr_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad xbr_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_xbr = {
+ .name = "xbr",
+ .description = NULL_IF_CONFIG_SMALL("Scale the input using xBR algorithm."),
+ .inputs = xbr_inputs,
+ .outputs = xbr_outputs,
+ .query_formats = query_formats,
+ .priv_size = sizeof(XBRContext),
+ .priv_class = &xbr_class,
+ .init = init,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_yadif.c b/libavfilter/vf_yadif.c
index 75f2d17758..694ac44999 100644
--- a/libavfilter/vf_yadif.c
+++ b/libavfilter/vf_yadif.c
@@ -1,37 +1,36 @@
/*
- * Copyright (C) 2006-2010 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (C) 2006-2011 Michael Niedermayer <michaelni@gmx.at>
* 2010 James Darnley <james.darnley@gmail.com>
+
+ * This file is part of FFmpeg.
*
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/avassert.h"
#include "libavutil/cpu.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
+#include "libavutil/imgutils.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "yadif.h"
-#undef NDEBUG
-#include <assert.h>
-
typedef struct ThreadData {
AVFrame *frame;
int plane;
@@ -69,7 +68,7 @@ typedef struct ThreadData {
CHECK( 1) CHECK( 2) }} }} \
}\
\
- if (mode < 2) { \
+ if (!(mode&2)) { \
int b = (prev2[2 * mrefs] + next2[2 * mrefs])>>1; \
int f = (prev2[2 * prefs] + next2[2 * prefs])>>1; \
int max = FFMAX3(d - e, d - c, FFMIN(b - c, f - e)); \
@@ -112,6 +111,7 @@ static void filter_line_c(void *dst1,
FILTER(0, w, 1)
}
+#define MAX_ALIGN 8
static void filter_edges(void *dst1, void *prev1, void *cur1, void *next1,
int w, int prefs, int mrefs, int parity, int mode)
{
@@ -127,13 +127,14 @@ static void filter_edges(void *dst1, void *prev1, void *cur1, void *next1,
* for is_not_edge should let the compiler ignore the whole branch. */
FILTER(0, 3, 0)
- dst = (uint8_t*)dst1 + w - 3;
- prev = (uint8_t*)prev1 + w - 3;
- cur = (uint8_t*)cur1 + w - 3;
- next = (uint8_t*)next1 + w - 3;
+ dst = (uint8_t*)dst1 + w - (MAX_ALIGN-1);
+ prev = (uint8_t*)prev1 + w - (MAX_ALIGN-1);
+ cur = (uint8_t*)cur1 + w - (MAX_ALIGN-1);
+ next = (uint8_t*)next1 + w - (MAX_ALIGN-1);
prev2 = (uint8_t*)(parity ? prev : cur);
next2 = (uint8_t*)(parity ? cur : next);
+ FILTER(w - (MAX_ALIGN-1), w - 3, 1)
FILTER(w - 3, w, 0)
}
@@ -171,13 +172,14 @@ static void filter_edges_16bit(void *dst1, void *prev1, void *cur1, void *next1,
FILTER(0, 3, 0)
- dst = (uint16_t*)dst1 + w - 3;
- prev = (uint16_t*)prev1 + w - 3;
- cur = (uint16_t*)cur1 + w - 3;
- next = (uint16_t*)next1 + w - 3;
+ dst = (uint16_t*)dst1 + w - (MAX_ALIGN/2-1);
+ prev = (uint16_t*)prev1 + w - (MAX_ALIGN/2-1);
+ cur = (uint16_t*)cur1 + w - (MAX_ALIGN/2-1);
+ next = (uint16_t*)next1 + w - (MAX_ALIGN/2-1);
prev2 = (uint16_t*)(parity ? prev : cur);
next2 = (uint16_t*)(parity ? cur : next);
+ FILTER(w - (MAX_ALIGN/2-1), w - 3, 1)
FILTER(w - 3, w, 0)
}
@@ -188,9 +190,8 @@ static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
int refs = s->cur->linesize[td->plane];
int df = (s->csp->comp[td->plane].depth + 7) / 8;
int pix_3 = 3 * df;
- int slice_h = td->h / nb_jobs;
- int slice_start = jobnr * slice_h;
- int slice_end = (jobnr == nb_jobs - 1) ? td->h : (jobnr + 1) * slice_h;
+ int slice_start = (td->h * jobnr ) / nb_jobs;
+ int slice_end = (td->h * (jobnr+1)) / nb_jobs;
int y;
/* filtering reads 3 pixels to the left/right; to avoid invalid reads,
@@ -204,7 +205,7 @@ static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
uint8_t *dst = &td->frame->data[td->plane][y * td->frame->linesize[td->plane]];
int mode = y == 1 || y + 2 == td->h ? 2 : s->mode;
s->filter_line(dst + pix_3, prev + pix_3, cur + pix_3,
- next + pix_3, td->w - 6,
+ next + pix_3, td->w - (3 + MAX_ALIGN/df-1),
y + 1 < td->h ? refs : -refs,
y ? -refs : refs,
td->parity ^ td->tff, mode);
@@ -232,8 +233,8 @@ static void filter(AVFilterContext *ctx, AVFrame *dstpic,
int h = dstpic->height;
if (i == 1 || i == 2) {
- w >>= yadif->csp->log2_chroma_w;
- h >>= yadif->csp->log2_chroma_h;
+ w = AV_CEIL_RSHIFT(w, yadif->csp->log2_chroma_w);
+ h = AV_CEIL_RSHIFT(h, yadif->csp->log2_chroma_h);
}
@@ -241,30 +242,12 @@ static void filter(AVFilterContext *ctx, AVFrame *dstpic,
td.h = h;
td.plane = i;
- ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(h, ctx->graph->nb_threads));
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(h, ff_filter_get_nb_threads(ctx)));
}
emms_c();
}
-static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h)
-{
- AVFrame *frame;
- int width = FFALIGN(w, 32);
- int height = FFALIGN(h + 2, 32);
- int i;
-
- frame = ff_default_get_video_buffer(link, width, height);
-
- frame->width = w;
- frame->height = h;
-
- for (i = 0; i < 3; i++)
- frame->data[i] += frame->linesize[i];
-
- return frame;
-}
-
static int return_frame(AVFilterContext *ctx, int is_second)
{
YADIFContext *yadif = ctx->priv;
@@ -305,11 +288,36 @@ static int return_frame(AVFilterContext *ctx, int is_second)
return ret;
}
+static int checkstride(YADIFContext *yadif, const AVFrame *a, const AVFrame *b)
+{
+ int i;
+ for (i = 0; i < yadif->csp->nb_components; i++)
+ if (a->linesize[i] != b->linesize[i])
+ return 1;
+ return 0;
+}
+
+static void fixstride(AVFilterLink *link, AVFrame *f)
+{
+ AVFrame *dst = ff_default_get_video_buffer(link, f->width, f->height);
+ if(!dst)
+ return;
+ av_frame_copy_props(dst, f);
+ av_image_copy(dst->data, dst->linesize,
+ (const uint8_t **)f->data, f->linesize,
+ dst->format, dst->width, dst->height);
+ av_frame_unref(f);
+ av_frame_move_ref(f, dst);
+ av_frame_free(&dst);
+}
+
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
AVFilterContext *ctx = link->dst;
YADIFContext *yadif = ctx->priv;
+ av_assert0(frame);
+
if (yadif->frame_pending)
return_frame(ctx, 1);
@@ -319,10 +327,31 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
yadif->cur = yadif->next;
yadif->next = frame;
- if (!yadif->cur)
+ if (!yadif->cur &&
+ !(yadif->cur = av_frame_clone(yadif->next)))
+ return AVERROR(ENOMEM);
+
+ if (checkstride(yadif, yadif->next, yadif->cur)) {
+ av_log(ctx, AV_LOG_VERBOSE, "Reallocating frame due to differing stride\n");
+ fixstride(link, yadif->next);
+ }
+ if (checkstride(yadif, yadif->next, yadif->cur))
+ fixstride(link, yadif->cur);
+ if (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))
+ fixstride(link, yadif->prev);
+ if (checkstride(yadif, yadif->next, yadif->cur) || (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to reallocate frame\n");
+ return -1;
+ }
+
+ if (!yadif->prev)
return 0;
- if (yadif->auto_enable && !yadif->cur->interlaced_frame) {
+ if ((yadif->deint && !yadif->cur->interlaced_frame) ||
+ ctx->is_disabled ||
+ (yadif->deint && !yadif->prev->interlaced_frame && yadif->prev->repeat_pict) ||
+ (yadif->deint && !yadif->next->interlaced_frame && yadif->next->repeat_pict)
+ ) {
yadif->out = av_frame_clone(yadif->cur);
if (!yadif->out)
return AVERROR(ENOMEM);
@@ -333,10 +362,6 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
return ff_filter_frame(ctx->outputs[0], yadif->out);
}
- if (!yadif->prev &&
- !(yadif->prev = av_frame_clone(yadif->cur)))
- return AVERROR(ENOMEM);
-
yadif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h);
if (!yadif->out)
return AVERROR(ENOMEM);
@@ -354,73 +379,42 @@ static int request_frame(AVFilterLink *link)
{
AVFilterContext *ctx = link->src;
YADIFContext *yadif = ctx->priv;
+ int ret;
if (yadif->frame_pending) {
return_frame(ctx, 1);
return 0;
}
- do {
- int ret;
+ if (yadif->eof)
+ return AVERROR_EOF;
- if (yadif->eof)
- return AVERROR_EOF;
+ ret = ff_request_frame(ctx->inputs[0]);
- ret = ff_request_frame(link->src->inputs[0]);
+ if (ret == AVERROR_EOF && yadif->cur) {
+ AVFrame *next = av_frame_clone(yadif->next);
- if (ret == AVERROR_EOF && yadif->next) {
- AVFrame *next = av_frame_clone(yadif->next);
-
- if (!next)
- return AVERROR(ENOMEM);
-
- next->pts = yadif->next->pts * 2 - yadif->cur->pts;
-
- filter_frame(link->src->inputs[0], next);
- yadif->eof = 1;
- } else if (ret < 0) {
- return ret;
- }
- } while (!yadif->cur);
-
- return 0;
-}
+ if (!next)
+ return AVERROR(ENOMEM);
-static int poll_frame(AVFilterLink *link)
-{
- YADIFContext *yadif = link->src->priv;
- int ret, val;
+ next->pts = yadif->next->pts * 2 - yadif->cur->pts;
- if (yadif->frame_pending)
- return 1;
-
- val = ff_poll_frame(link->src->inputs[0]);
- if (val <= 0)
- return val;
-
- //FIXME change API to not require this red tape
- if (val == 1 && !yadif->next) {
- if ((ret = ff_request_frame(link->src->inputs[0])) < 0)
- return ret;
- val = ff_poll_frame(link->src->inputs[0]);
- if (val <= 0)
- return val;
+ filter_frame(ctx->inputs[0], next);
+ yadif->eof = 1;
+ } else if (ret < 0) {
+ return ret;
}
- assert(yadif->next || !val);
- if (yadif->auto_enable && yadif->next && !yadif->next->interlaced_frame)
- return val;
-
- return val * ((yadif->mode&1)+1);
+ return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
YADIFContext *yadif = ctx->priv;
- if (yadif->prev) av_frame_free(&yadif->prev);
- if (yadif->cur ) av_frame_free(&yadif->cur );
- if (yadif->next) av_frame_free(&yadif->next);
+ av_frame_free(&yadif->prev);
+ av_frame_free(&yadif->cur );
+ av_frame_free(&yadif->next);
}
static int query_formats(AVFilterContext *ctx)
@@ -435,37 +429,62 @@ static int query_formats(AVFilterContext *ctx)
AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ444P,
- AV_NE( AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE ),
+ AV_PIX_FMT_GRAY16,
AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUVJ440P,
- AV_NE( AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUV420P10LE ),
- AV_NE( AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUV422P10LE ),
- AV_NE( AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUV444P10LE ),
- AV_NE( AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUV420P16LE ),
- AV_NE( AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUV422P16LE ),
- AV_NE( AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUV444P16LE ),
+ AV_PIX_FMT_YUV420P9,
+ AV_PIX_FMT_YUV422P9,
+ AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUV422P10,
+ AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P12,
+ AV_PIX_FMT_YUV422P12,
+ AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_YUV420P14,
+ AV_PIX_FMT_YUV422P14,
+ AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_YUV420P16,
+ AV_PIX_FMT_YUV422P16,
+ AV_PIX_FMT_YUV444P16,
AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUVA422P,
+ AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_GBRP9,
+ AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12,
+ AV_PIX_FMT_GBRP14,
+ AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_GBRAP,
AV_PIX_FMT_NONE
};
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
-
- return 0;
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
}
static int config_props(AVFilterLink *link)
{
- YADIFContext *s = link->src->priv;
+ AVFilterContext *ctx = link->src;
+ YADIFContext *s = ctx->priv;
- link->time_base.num = link->src->inputs[0]->time_base.num;
- link->time_base.den = link->src->inputs[0]->time_base.den * 2;
- link->w = link->src->inputs[0]->w;
- link->h = link->src->inputs[0]->h;
+ link->time_base.num = ctx->inputs[0]->time_base.num;
+ link->time_base.den = ctx->inputs[0]->time_base.den * 2;
+ link->w = ctx->inputs[0]->w;
+ link->h = ctx->inputs[0]->h;
- if (s->mode & 1)
- link->frame_rate = av_mul_q(link->src->inputs[0]->frame_rate,
+ if(s->mode & 1)
+ link->frame_rate = av_mul_q(ctx->inputs[0]->frame_rate,
(AVRational){2, 1});
+ if (link->w < 3 || link->h < 3) {
+ av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n");
+ return AVERROR(EINVAL);
+ }
+
s->csp = av_pix_fmt_desc_get(link->format);
if (s->csp->comp[0].depth > 8) {
s->filter_line = filter_line_c_16bit;
@@ -473,39 +492,46 @@ static int config_props(AVFilterLink *link)
} else {
s->filter_line = filter_line_c;
s->filter_edges = filter_edges;
-
- if (ARCH_X86)
- ff_yadif_init_x86(s);
}
+ if (ARCH_X86)
+ ff_yadif_init_x86(s);
+
return 0;
}
+
#define OFFSET(x) offsetof(YADIFContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "mode", NULL, OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 3, FLAGS },
- { "parity", NULL, OFFSET(parity), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, FLAGS, "parity" },
- { "auto", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = -1 }, .unit = "parity" },
- { "tff", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, .unit = "parity" },
- { "bff", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, .unit = "parity" },
- { "auto", NULL, OFFSET(auto_enable), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
- { NULL },
-};
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
-static const AVClass yadif_class = {
- .class_name = "yadif",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
+static const AVOption yadif_options[] = {
+ { "mode", "specify the interlacing mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=YADIF_MODE_SEND_FRAME}, 0, 3, FLAGS, "mode"},
+ CONST("send_frame", "send one frame for each frame", YADIF_MODE_SEND_FRAME, "mode"),
+ CONST("send_field", "send one frame for each field", YADIF_MODE_SEND_FIELD, "mode"),
+ CONST("send_frame_nospatial", "send one frame for each frame, but skip spatial interlacing check", YADIF_MODE_SEND_FRAME_NOSPATIAL, "mode"),
+ CONST("send_field_nospatial", "send one frame for each field, but skip spatial interlacing check", YADIF_MODE_SEND_FIELD_NOSPATIAL, "mode"),
+
+ { "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=YADIF_PARITY_AUTO}, -1, 1, FLAGS, "parity" },
+ CONST("tff", "assume top field first", YADIF_PARITY_TFF, "parity"),
+ CONST("bff", "assume bottom field first", YADIF_PARITY_BFF, "parity"),
+ CONST("auto", "auto detect parity", YADIF_PARITY_AUTO, "parity"),
+
+ { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=YADIF_DEINT_ALL}, 0, 1, FLAGS, "deint" },
+ CONST("all", "deinterlace all frames", YADIF_DEINT_ALL, "deint"),
+ CONST("interlaced", "only deinterlace frames marked as interlaced", YADIF_DEINT_INTERLACED, "deint"),
+
+ { NULL }
};
+AVFILTER_DEFINE_CLASS(yadif);
+
static const AVFilterPad avfilter_vf_yadif_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -514,7 +540,6 @@ static const AVFilterPad avfilter_vf_yadif_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
- .poll_frame = poll_frame,
.request_frame = request_frame,
.config_props = config_props,
},
@@ -523,16 +548,12 @@ static const AVFilterPad avfilter_vf_yadif_outputs[] = {
AVFilter ff_vf_yadif = {
.name = "yadif",
- .description = NULL_IF_CONFIG_SMALL("Deinterlace the input image"),
-
+ .description = NULL_IF_CONFIG_SMALL("Deinterlace the input image."),
.priv_size = sizeof(YADIFContext),
.priv_class = &yadif_class,
.uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_yadif_inputs,
-
- .outputs = avfilter_vf_yadif_outputs,
-
- .flags = AVFILTER_FLAG_SLICE_THREADS,
+ .inputs = avfilter_vf_yadif_inputs,
+ .outputs = avfilter_vf_yadif_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
};
diff --git a/libavfilter/vf_zoompan.c b/libavfilter/vf_zoompan.c
new file mode 100644
index 0000000000..136d6c83fd
--- /dev/null
+++ b/libavfilter/vf_zoompan.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/eval.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "libswscale/swscale.h"
+
+static const char *const var_names[] = {
+ "in_w", "iw",
+ "in_h", "ih",
+ "out_w", "ow",
+ "out_h", "oh",
+ "in",
+ "on",
+ "duration",
+ "pduration",
+ "time",
+ "frame",
+ "zoom",
+ "pzoom",
+ "x", "px",
+ "y", "py",
+ "a",
+ "sar",
+ "dar",
+ "hsub",
+ "vsub",
+ NULL
+};
+
+enum var_name {
+ VAR_IN_W, VAR_IW,
+ VAR_IN_H, VAR_IH,
+ VAR_OUT_W, VAR_OW,
+ VAR_OUT_H, VAR_OH,
+ VAR_IN,
+ VAR_ON,
+ VAR_DURATION,
+ VAR_PDURATION,
+ VAR_TIME,
+ VAR_FRAME,
+ VAR_ZOOM,
+ VAR_PZOOM,
+ VAR_X, VAR_PX,
+ VAR_Y, VAR_PY,
+ VAR_A,
+ VAR_SAR,
+ VAR_DAR,
+ VAR_HSUB,
+ VAR_VSUB,
+ VARS_NB
+};
+
+typedef struct ZPcontext {
+ const AVClass *class;
+ char *zoom_expr_str;
+ char *x_expr_str;
+ char *y_expr_str;
+ char *duration_expr_str;
+ int w, h;
+ double x, y;
+ double prev_zoom;
+ int prev_nb_frames;
+ struct SwsContext *sws;
+ int64_t frame_count;
+ const AVPixFmtDescriptor *desc;
+ AVFrame *in;
+ double var_values[VARS_NB];
+ int nb_frames;
+ int current_frame;
+ int finished;
+ AVRational framerate;
+} ZPContext;
+
+#define OFFSET(x) offsetof(ZPContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption zoompan_options[] = {
+ { "zoom", "set the zoom expression", OFFSET(zoom_expr_str), AV_OPT_TYPE_STRING, {.str = "1" }, .flags = FLAGS },
+ { "z", "set the zoom expression", OFFSET(zoom_expr_str), AV_OPT_TYPE_STRING, {.str = "1" }, .flags = FLAGS },
+ { "x", "set the x expression", OFFSET(x_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, .flags = FLAGS },
+ { "y", "set the y expression", OFFSET(y_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, .flags = FLAGS },
+ { "d", "set the duration expression", OFFSET(duration_expr_str), AV_OPT_TYPE_STRING, {.str="90"}, .flags = FLAGS },
+ { "s", "set the output image size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, .flags = FLAGS },
+ { "fps", "set the output framerate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 0, INT_MAX, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(zoompan);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ZPContext *s = ctx->priv;
+
+ s->prev_zoom = 1;
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ZPContext *s = ctx->priv;
+
+ outlink->w = s->w;
+ outlink->h = s->h;
+ outlink->time_base = av_inv_q(s->framerate);
+ outlink->frame_rate = s->framerate;
+ s->desc = av_pix_fmt_desc_get(outlink->format);
+
+ return 0;
+}
+
+static int output_single_frame(AVFilterContext *ctx, AVFrame *in, double *var_values, int i,
+ double *zoom, double *dx, double *dy)
+{
+ ZPContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ int64_t pts = s->frame_count;
+ int k, x, y, w, h, ret = 0;
+ uint8_t *input[4];
+ int px[4], py[4];
+ AVFrame *out;
+
+ var_values[VAR_PX] = s->x;
+ var_values[VAR_PY] = s->y;
+ var_values[VAR_PZOOM] = s->prev_zoom;
+ var_values[VAR_PDURATION] = s->prev_nb_frames;
+ var_values[VAR_TIME] = pts * av_q2d(outlink->time_base);
+ var_values[VAR_FRAME] = i;
+ var_values[VAR_ON] = outlink->frame_count_in + 1;
+ if ((ret = av_expr_parse_and_eval(zoom, s->zoom_expr_str,
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
+ return ret;
+
+ *zoom = av_clipd(*zoom, 1, 10);
+ var_values[VAR_ZOOM] = *zoom;
+ w = in->width * (1.0 / *zoom);
+ h = in->height * (1.0 / *zoom);
+
+ if ((ret = av_expr_parse_and_eval(dx, s->x_expr_str,
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
+ return ret;
+ x = *dx = av_clipd(*dx, 0, FFMAX(in->width - w, 0));
+ var_values[VAR_X] = *dx;
+ x &= ~((1 << s->desc->log2_chroma_w) - 1);
+
+ if ((ret = av_expr_parse_and_eval(dy, s->y_expr_str,
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
+ return ret;
+ y = *dy = av_clipd(*dy, 0, FFMAX(in->height - h, 0));
+ var_values[VAR_Y] = *dy;
+ y &= ~((1 << s->desc->log2_chroma_h) - 1);
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ ret = AVERROR(ENOMEM);
+ return ret;
+ }
+
+ px[1] = px[2] = AV_CEIL_RSHIFT(x, s->desc->log2_chroma_w);
+ px[0] = px[3] = x;
+
+ py[1] = py[2] = AV_CEIL_RSHIFT(y, s->desc->log2_chroma_h);
+ py[0] = py[3] = y;
+
+ s->sws = sws_alloc_context();
+ if (!s->sws) {
+ ret = AVERROR(ENOMEM);
+ return ret;
+ }
+
+ for (k = 0; in->data[k]; k++)
+ input[k] = in->data[k] + py[k] * in->linesize[k] + px[k];
+
+ av_opt_set_int(s->sws, "srcw", w, 0);
+ av_opt_set_int(s->sws, "srch", h, 0);
+ av_opt_set_int(s->sws, "src_format", in->format, 0);
+ av_opt_set_int(s->sws, "dstw", outlink->w, 0);
+ av_opt_set_int(s->sws, "dsth", outlink->h, 0);
+ av_opt_set_int(s->sws, "dst_format", outlink->format, 0);
+ av_opt_set_int(s->sws, "sws_flags", SWS_BICUBIC, 0);
+
+ if ((ret = sws_init_context(s->sws, NULL, NULL)) < 0)
+ return ret;
+
+ sws_scale(s->sws, (const uint8_t *const *)&input, in->linesize, 0, h, out->data, out->linesize);
+
+ out->pts = pts;
+ s->frame_count++;
+
+ ret = ff_filter_frame(outlink, out);
+ sws_freeContext(s->sws);
+ s->sws = NULL;
+ s->current_frame++;
+ return ret;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ZPContext *s = ctx->priv;
+ double nb_frames;
+ int ret;
+
+ av_assert0(s->in == NULL);
+
+ s->finished = 0;
+ s->var_values[VAR_IN_W] = s->var_values[VAR_IW] = in->width;
+ s->var_values[VAR_IN_H] = s->var_values[VAR_IH] = in->height;
+ s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = s->w;
+ s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = s->h;
+ s->var_values[VAR_IN] = inlink->frame_count_out + 1;
+ s->var_values[VAR_ON] = outlink->frame_count_in + 1;
+ s->var_values[VAR_PX] = s->x;
+ s->var_values[VAR_PY] = s->y;
+ s->var_values[VAR_X] = 0;
+ s->var_values[VAR_Y] = 0;
+ s->var_values[VAR_PZOOM] = s->prev_zoom;
+ s->var_values[VAR_ZOOM] = 1;
+ s->var_values[VAR_PDURATION] = s->prev_nb_frames;
+ s->var_values[VAR_A] = (double) in->width / in->height;
+ s->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
+ (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
+ s->var_values[VAR_DAR] = s->var_values[VAR_A] * s->var_values[VAR_SAR];
+ s->var_values[VAR_HSUB] = 1 << s->desc->log2_chroma_w;
+ s->var_values[VAR_VSUB] = 1 << s->desc->log2_chroma_h;
+
+ if ((ret = av_expr_parse_and_eval(&nb_frames, s->duration_expr_str,
+ var_names, s->var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
+ av_frame_free(&in);
+ return ret;
+ }
+
+ s->var_values[VAR_DURATION] = s->nb_frames = nb_frames;
+ s->in = in;
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ZPContext *s = ctx->priv;
+ AVFrame *in = s->in;
+ double zoom=-1, dx=-1, dy=-1;
+ int ret = -1;
+
+ if (in) {
+ ret = output_single_frame(ctx, in, s->var_values, s->current_frame,
+ &zoom, &dx, &dy);
+ if (ret < 0)
+ goto fail;
+ }
+
+ if (s->current_frame >= s->nb_frames) {
+ if (dx != -1)
+ s->x = dx;
+ if (dy != -1)
+ s->y = dy;
+ if (zoom != -1)
+ s->prev_zoom = zoom;
+ s->prev_nb_frames = s->nb_frames;
+ s->nb_frames = 0;
+ s->current_frame = 0;
+ av_frame_free(&s->in);
+ s->finished = 1;
+ ret = ff_request_frame(ctx->inputs[0]);
+ }
+
+fail:
+ sws_freeContext(s->sws);
+ s->sws = NULL;
+
+ return ret;
+}
+
+static int poll_frame(AVFilterLink *link)
+{
+ ZPContext *s = link->src->priv;
+ return s->nb_frames - s->current_frame;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P,
+ AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ZPContext *s = ctx->priv;
+
+ sws_freeContext(s->sws);
+ s->sws = NULL;
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .needs_fifo = 1,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .poll_frame = poll_frame,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_zoompan = {
+ .name = "zoompan",
+ .description = NULL_IF_CONFIG_SMALL("Apply Zoom & Pan effect."),
+ .priv_size = sizeof(ZPContext),
+ .priv_class = &zoompan_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = inputs,
+ .outputs = outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_zscale.c b/libavfilter/vf_zscale.c
new file mode 100644
index 0000000000..1675278935
--- /dev/null
+++ b/libavfilter/vf_zscale.c
@@ -0,0 +1,808 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * zscale video filter using z.lib library
+ */
+
+#include <float.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <zimg.h>
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "libavutil/avstring.h"
+#include "libavutil/eval.h"
+#include "libavutil/internal.h"
+#include "libavutil/mathematics.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/avassert.h"
+
+static const char *const var_names[] = {
+ "in_w", "iw",
+ "in_h", "ih",
+ "out_w", "ow",
+ "out_h", "oh",
+ "a",
+ "sar",
+ "dar",
+ "hsub",
+ "vsub",
+ "ohsub",
+ "ovsub",
+ NULL
+};
+
+enum var_name {
+ VAR_IN_W, VAR_IW,
+ VAR_IN_H, VAR_IH,
+ VAR_OUT_W, VAR_OW,
+ VAR_OUT_H, VAR_OH,
+ VAR_A,
+ VAR_SAR,
+ VAR_DAR,
+ VAR_HSUB,
+ VAR_VSUB,
+ VAR_OHSUB,
+ VAR_OVSUB,
+ VARS_NB
+};
+
+typedef struct ZScaleContext {
+ const AVClass *class;
+
+ /**
+ * New dimensions. Special values are:
+ * 0 = original width/height
+ * -1 = keep original aspect
+ * -N = try to keep aspect but make sure it is divisible by N
+ */
+ int w, h;
+ int dither;
+ int filter;
+ int colorspace;
+ int trc;
+ int primaries;
+ int range;
+ int chromal;
+ int colorspace_in;
+ int trc_in;
+ int primaries_in;
+ int range_in;
+ int chromal_in;
+ char *size_str;
+ double nominal_peak_luminance;
+ int approximate_gamma;
+
+ char *w_expr; ///< width expression string
+ char *h_expr; ///< height expression string
+
+ int out_h_chr_pos;
+ int out_v_chr_pos;
+ int in_h_chr_pos;
+ int in_v_chr_pos;
+
+ int force_original_aspect_ratio;
+
+ void *tmp;
+ size_t tmp_size;
+
+ zimg_image_format src_format, dst_format;
+ zimg_image_format alpha_src_format, alpha_dst_format;
+ zimg_graph_builder_params alpha_params, params;
+ zimg_filter_graph *alpha_graph, *graph;
+
+ enum AVColorSpace in_colorspace, out_colorspace;
+ enum AVColorTransferCharacteristic in_trc, out_trc;
+ enum AVColorPrimaries in_primaries, out_primaries;
+ enum AVColorRange in_range, out_range;
+ enum AVChromaLocation in_chromal, out_chromal;
+} ZScaleContext;
+
+static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
+{
+ ZScaleContext *s = ctx->priv;
+ int ret;
+
+ if (s->size_str && (s->w_expr || s->h_expr)) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Size and width/height expressions cannot be set at the same time.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (s->w_expr && !s->h_expr)
+ FFSWAP(char *, s->w_expr, s->size_str);
+
+ if (s->size_str) {
+ char buf[32];
+ if ((ret = av_parse_video_size(&s->w, &s->h, s->size_str)) < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Invalid size '%s'\n", s->size_str);
+ return ret;
+ }
+ snprintf(buf, sizeof(buf)-1, "%d", s->w);
+ av_opt_set(s, "w", buf, 0);
+ snprintf(buf, sizeof(buf)-1, "%d", s->h);
+ av_opt_set(s, "h", buf, 0);
+ }
+ if (!s->w_expr)
+ av_opt_set(s, "w", "iw", 0);
+ if (!s->h_expr)
+ av_opt_set(s, "h", "ih", 0);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pixel_fmts[] = {
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
+ AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP16,
+ AV_PIX_FMT_NONE
+ };
+ int ret;
+
+ ret = ff_formats_ref(ff_make_format_list(pixel_fmts), &ctx->inputs[0]->out_formats);
+ if (ret < 0)
+ return ret;
+ return ff_formats_ref(ff_make_format_list(pixel_fmts), &ctx->outputs[0]->in_formats);
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ ZScaleContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format);
+ int64_t w, h;
+ double var_values[VARS_NB], res;
+ char *expr;
+ int ret;
+ int factor_w, factor_h;
+
+ var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
+ var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
+ var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
+ var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
+ var_values[VAR_A] = (double) inlink->w / inlink->h;
+ var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
+ (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
+ var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
+ var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
+ var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
+ var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w;
+ var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h;
+
+ /* evaluate width and height */
+ av_expr_parse_and_eval(&res, (expr = s->w_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx);
+ s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
+ goto fail;
+ s->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
+ /* evaluate again the width, as it may depend on the output height */
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
+ goto fail;
+ s->w = res;
+
+ w = s->w;
+ h = s->h;
+
+ /* Check if it is requested that the result has to be divisible by a some
+ * factor (w or h = -n with n being the factor). */
+ factor_w = 1;
+ factor_h = 1;
+ if (w < -1) {
+ factor_w = -w;
+ }
+ if (h < -1) {
+ factor_h = -h;
+ }
+
+ if (w < 0 && h < 0)
+ s->w = s->h = 0;
+
+ if (!(w = s->w))
+ w = inlink->w;
+ if (!(h = s->h))
+ h = inlink->h;
+
+ /* Make sure that the result is divisible by the factor we determined
+ * earlier. If no factor was set, it is nothing will happen as the default
+ * factor is 1 */
+ if (w < 0)
+ w = av_rescale(h, inlink->w, inlink->h * factor_w) * factor_w;
+ if (h < 0)
+ h = av_rescale(w, inlink->h, inlink->w * factor_h) * factor_h;
+
+ /* Note that force_original_aspect_ratio may overwrite the previous set
+ * dimensions so that it is not divisible by the set factors anymore. */
+ if (s->force_original_aspect_ratio) {
+ int tmp_w = av_rescale(h, inlink->w, inlink->h);
+ int tmp_h = av_rescale(w, inlink->h, inlink->w);
+
+ if (s->force_original_aspect_ratio == 1) {
+ w = FFMIN(tmp_w, w);
+ h = FFMIN(tmp_h, h);
+ } else {
+ w = FFMAX(tmp_w, w);
+ h = FFMAX(tmp_h, h);
+ }
+ }
+
+ if (w > INT_MAX || h > INT_MAX ||
+ (h * inlink->w) > INT_MAX ||
+ (w * inlink->h) > INT_MAX)
+ av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
+
+ outlink->w = w;
+ outlink->h = h;
+
+ if (inlink->w == outlink->w &&
+ inlink->h == outlink->h &&
+ inlink->format == outlink->format)
+ ;
+ else {
+ }
+
+ if (inlink->sample_aspect_ratio.num){
+ outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink->w, outlink->w * inlink->h}, inlink->sample_aspect_ratio);
+ } else
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+
+ av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d\n",
+ inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format),
+ inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den,
+ outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
+ outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den);
+ return 0;
+
+fail:
+ av_log(ctx, AV_LOG_ERROR,
+ "Error when evaluating the expression '%s'.\n"
+ "Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\n",
+ expr, s->w_expr, s->h_expr);
+ return ret;
+}
+
+static int print_zimg_error(AVFilterContext *ctx)
+{
+ char err_msg[1024];
+ int err_code = zimg_get_last_error(err_msg, sizeof(err_msg));
+
+ av_log(ctx, AV_LOG_ERROR, "code %d: %s\n", err_code, err_msg);
+
+ return err_code;
+}
+
+static int convert_chroma_location(enum AVChromaLocation chroma_location)
+{
+ switch (chroma_location) {
+ case AVCHROMA_LOC_UNSPECIFIED:
+ case AVCHROMA_LOC_LEFT:
+ return ZIMG_CHROMA_LEFT;
+ case AVCHROMA_LOC_CENTER:
+ return ZIMG_CHROMA_CENTER;
+ case AVCHROMA_LOC_TOPLEFT:
+ return ZIMG_CHROMA_TOP_LEFT;
+ case AVCHROMA_LOC_TOP:
+ return ZIMG_CHROMA_TOP;
+ case AVCHROMA_LOC_BOTTOMLEFT:
+ return ZIMG_CHROMA_BOTTOM_LEFT;
+ case AVCHROMA_LOC_BOTTOM:
+ return ZIMG_CHROMA_BOTTOM;
+ }
+ return ZIMG_CHROMA_LEFT;
+}
+
+static int convert_matrix(enum AVColorSpace colorspace)
+{
+ switch (colorspace) {
+ case AVCOL_SPC_RGB:
+ return ZIMG_MATRIX_RGB;
+ case AVCOL_SPC_BT709:
+ return ZIMG_MATRIX_709;
+ case AVCOL_SPC_UNSPECIFIED:
+ return ZIMG_MATRIX_UNSPECIFIED;
+ case AVCOL_SPC_BT470BG:
+ return ZIMG_MATRIX_470BG;
+ case AVCOL_SPC_SMPTE170M:
+ return ZIMG_MATRIX_170M;
+ case AVCOL_SPC_YCGCO:
+ return ZIMG_MATRIX_YCGCO;
+ case AVCOL_SPC_BT2020_NCL:
+ return ZIMG_MATRIX_2020_NCL;
+ case AVCOL_SPC_BT2020_CL:
+ return ZIMG_MATRIX_2020_CL;
+ }
+ return ZIMG_MATRIX_UNSPECIFIED;
+}
+
+static int convert_trc(enum AVColorTransferCharacteristic color_trc)
+{
+ switch (color_trc) {
+ case AVCOL_TRC_UNSPECIFIED:
+ return ZIMG_TRANSFER_UNSPECIFIED;
+ case AVCOL_TRC_BT709:
+ return ZIMG_TRANSFER_709;
+ case AVCOL_TRC_SMPTE170M:
+ return ZIMG_TRANSFER_601;
+ case AVCOL_TRC_LINEAR:
+ return ZIMG_TRANSFER_LINEAR;
+ case AVCOL_TRC_BT2020_10:
+ return ZIMG_TRANSFER_2020_10;
+ case AVCOL_TRC_BT2020_12:
+ return ZIMG_TRANSFER_2020_12;
+ case AVCOL_TRC_SMPTE2084:
+ return ZIMG_TRANSFER_ST2084;
+ case AVCOL_TRC_ARIB_STD_B67:
+ return ZIMG_TRANSFER_ARIB_B67;
+ case AVCOL_TRC_IEC61966_2_1:
+ return ZIMG_TRANSFER_IEC_61966_2_1;
+ }
+ return ZIMG_TRANSFER_UNSPECIFIED;
+}
+
+static int convert_primaries(enum AVColorPrimaries color_primaries)
+{
+ switch (color_primaries) {
+ case AVCOL_PRI_UNSPECIFIED:
+ return ZIMG_PRIMARIES_UNSPECIFIED;
+ case AVCOL_PRI_BT709:
+ return ZIMG_PRIMARIES_709;
+ case AVCOL_PRI_SMPTE170M:
+ return ZIMG_PRIMARIES_170M;
+ case AVCOL_PRI_SMPTE240M:
+ return ZIMG_PRIMARIES_240M;
+ case AVCOL_PRI_BT2020:
+ return ZIMG_PRIMARIES_2020;
+ case AVCOL_PRI_SMPTE432:
+ return ZIMG_PRIMARIES_ST432_1;
+ }
+ return ZIMG_PRIMARIES_UNSPECIFIED;
+}
+
+static int convert_range(enum AVColorRange color_range)
+{
+ switch (color_range) {
+ case AVCOL_RANGE_UNSPECIFIED:
+ case AVCOL_RANGE_MPEG:
+ return ZIMG_RANGE_LIMITED;
+ case AVCOL_RANGE_JPEG:
+ return ZIMG_RANGE_FULL;
+ }
+ return ZIMG_RANGE_LIMITED;
+}
+
+static int filter_frame(AVFilterLink *link, AVFrame *in)
+{
+ ZScaleContext *s = link->dst->priv;
+ AVFilterLink *outlink = link->dst->outputs[0];
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
+ const AVPixFmtDescriptor *odesc = av_pix_fmt_desc_get(outlink->format);
+ zimg_image_buffer_const src_buf = { ZIMG_API_VERSION };
+ zimg_image_buffer dst_buf = { ZIMG_API_VERSION };
+ char buf[32];
+ size_t tmp_size;
+ int ret = 0, plane;
+ AVFrame *out;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+
+ av_frame_copy_props(out, in);
+ out->width = outlink->w;
+ out->height = outlink->h;
+
+ if( in->width != link->w
+ || in->height != link->h
+ || in->format != link->format
+ || s->in_colorspace != in->colorspace
+ || s->in_trc != in->color_trc
+ || s->in_primaries != in->color_primaries
+ || s->in_range != in->color_range
+ || s->out_colorspace != out->colorspace
+ || s->out_trc != out->color_trc
+ || s->out_primaries != out->color_primaries
+ || s->out_range != out->color_range
+ || s->in_chromal != in->chroma_location
+ || s->out_chromal != out->chroma_location) {
+ snprintf(buf, sizeof(buf)-1, "%d", outlink->w);
+ av_opt_set(s, "w", buf, 0);
+ snprintf(buf, sizeof(buf)-1, "%d", outlink->h);
+ av_opt_set(s, "h", buf, 0);
+
+ link->dst->inputs[0]->format = in->format;
+ link->dst->inputs[0]->w = in->width;
+ link->dst->inputs[0]->h = in->height;
+
+ if ((ret = config_props(outlink)) < 0) {
+ av_frame_free(&in);
+ av_frame_free(&out);
+ return ret;
+ }
+
+ zimg_image_format_default(&s->src_format, ZIMG_API_VERSION);
+ zimg_image_format_default(&s->dst_format, ZIMG_API_VERSION);
+ zimg_graph_builder_params_default(&s->params, ZIMG_API_VERSION);
+
+ s->params.dither_type = s->dither;
+ s->params.cpu_type = ZIMG_CPU_AUTO;
+ s->params.resample_filter = s->filter;
+ s->params.resample_filter_uv = s->filter;
+ s->params.nominal_peak_luminance = s->nominal_peak_luminance;
+ s->params.allow_approximate_gamma = s->approximate_gamma;
+
+ s->src_format.width = in->width;
+ s->src_format.height = in->height;
+ s->src_format.subsample_w = desc->log2_chroma_w;
+ s->src_format.subsample_h = desc->log2_chroma_h;
+ s->src_format.depth = desc->comp[0].depth;
+ s->src_format.pixel_type = desc->comp[0].depth > 8 ? ZIMG_PIXEL_WORD : ZIMG_PIXEL_BYTE;
+ s->src_format.color_family = (desc->flags & AV_PIX_FMT_FLAG_RGB) ? ZIMG_COLOR_RGB : ZIMG_COLOR_YUV;
+ s->src_format.matrix_coefficients = (desc->flags & AV_PIX_FMT_FLAG_RGB) ? ZIMG_MATRIX_RGB : s->colorspace_in == -1 ? convert_matrix(in->colorspace) : s->colorspace_in;
+ s->src_format.transfer_characteristics = s->trc_in == - 1 ? convert_trc(in->color_trc) : s->trc_in;
+ s->src_format.color_primaries = s->primaries_in == -1 ? convert_primaries(in->color_primaries) : s->primaries_in;
+ s->src_format.pixel_range = (desc->flags & AV_PIX_FMT_FLAG_RGB) ? ZIMG_RANGE_FULL : s->range_in == -1 ? convert_range(in->color_range) : s->range_in;
+ s->src_format.chroma_location = s->chromal_in == -1 ? convert_chroma_location(in->chroma_location) : s->chromal_in;
+
+ s->dst_format.width = out->width;
+ s->dst_format.height = out->height;
+ s->dst_format.subsample_w = odesc->log2_chroma_w;
+ s->dst_format.subsample_h = odesc->log2_chroma_h;
+ s->dst_format.depth = odesc->comp[0].depth;
+ s->dst_format.pixel_type = odesc->comp[0].depth > 8 ? ZIMG_PIXEL_WORD : ZIMG_PIXEL_BYTE;
+ s->dst_format.color_family = (odesc->flags & AV_PIX_FMT_FLAG_RGB) ? ZIMG_COLOR_RGB : ZIMG_COLOR_YUV;
+ s->dst_format.matrix_coefficients = (odesc->flags & AV_PIX_FMT_FLAG_RGB) ? ZIMG_MATRIX_RGB : s->colorspace == -1 ? convert_matrix(out->colorspace) : s->colorspace;
+ s->dst_format.transfer_characteristics = s->trc == -1 ? convert_trc(out->color_trc) : s->trc;
+ s->dst_format.color_primaries = s->primaries == -1 ? convert_primaries(out->color_primaries) : s->primaries;
+ s->dst_format.pixel_range = (odesc->flags & AV_PIX_FMT_FLAG_RGB) ? ZIMG_RANGE_FULL : s->range == -1 ? convert_range(out->color_range) : s->range;
+ s->dst_format.chroma_location = s->chromal == -1 ? convert_chroma_location(out->chroma_location) : s->chromal;
+
+ if (s->colorspace != -1)
+ out->colorspace = (int)s->dst_format.matrix_coefficients;
+
+ if (s->primaries != -1)
+ out->color_primaries = (int)s->dst_format.color_primaries;
+
+ if (s->range != -1)
+ out->color_range = (int)s->dst_format.pixel_range + 1;
+
+ if (s->trc != -1)
+ out->color_trc = (int)s->dst_format.transfer_characteristics;
+
+ if (s->chromal != -1)
+ out->chroma_location = (int)s->dst_format.chroma_location - 1;
+
+ zimg_filter_graph_free(s->graph);
+ s->graph = zimg_filter_graph_build(&s->src_format, &s->dst_format, &s->params);
+ if (!s->graph) {
+ ret = print_zimg_error(link->dst);
+ goto fail;
+ }
+
+ if ((ret = zimg_filter_graph_get_tmp_size(s->graph, &tmp_size))) {
+ ret = print_zimg_error(link->dst);
+ goto fail;
+ }
+
+ if (tmp_size > s->tmp_size) {
+ av_freep(&s->tmp);
+ s->tmp = av_malloc(tmp_size);
+ if (!s->tmp) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ s->tmp_size = tmp_size;
+ }
+
+ s->in_colorspace = in->colorspace;
+ s->in_trc = in->color_trc;
+ s->in_primaries = in->color_primaries;
+ s->in_range = in->color_range;
+ s->out_colorspace = out->colorspace;
+ s->out_trc = out->color_trc;
+ s->out_primaries = out->color_primaries;
+ s->out_range = out->color_range;
+
+ if (desc->flags & AV_PIX_FMT_FLAG_ALPHA && odesc->flags & AV_PIX_FMT_FLAG_ALPHA) {
+ zimg_image_format_default(&s->alpha_src_format, ZIMG_API_VERSION);
+ zimg_image_format_default(&s->alpha_dst_format, ZIMG_API_VERSION);
+ zimg_graph_builder_params_default(&s->alpha_params, ZIMG_API_VERSION);
+
+ s->alpha_params.dither_type = s->dither;
+ s->alpha_params.cpu_type = ZIMG_CPU_AUTO;
+ s->alpha_params.resample_filter = s->filter;
+
+ s->alpha_src_format.width = in->width;
+ s->alpha_src_format.height = in->height;
+ s->alpha_src_format.depth = desc->comp[0].depth;
+ s->alpha_src_format.pixel_type = desc->comp[0].depth > 8 ? ZIMG_PIXEL_WORD : ZIMG_PIXEL_BYTE;
+ s->alpha_src_format.color_family = ZIMG_COLOR_GREY;
+
+ s->alpha_dst_format.width = out->width;
+ s->alpha_dst_format.height = out->height;
+ s->alpha_dst_format.depth = odesc->comp[0].depth;
+ s->alpha_dst_format.pixel_type = odesc->comp[0].depth > 8 ? ZIMG_PIXEL_WORD : ZIMG_PIXEL_BYTE;
+ s->alpha_dst_format.color_family = ZIMG_COLOR_GREY;
+
+ zimg_filter_graph_free(s->alpha_graph);
+ s->alpha_graph = zimg_filter_graph_build(&s->alpha_src_format, &s->alpha_dst_format, &s->alpha_params);
+ if (!s->alpha_graph) {
+ ret = print_zimg_error(link->dst);
+ goto fail;
+ }
+ }
+ }
+
+ if (s->colorspace != -1)
+ out->colorspace = (int)s->dst_format.matrix_coefficients;
+
+ if (s->primaries != -1)
+ out->color_primaries = (int)s->dst_format.color_primaries;
+
+ if (s->range != -1)
+ out->color_range = (int)s->dst_format.pixel_range;
+
+ if (s->trc != -1)
+ out->color_trc = (int)s->dst_format.transfer_characteristics;
+
+ av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
+ (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
+ (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
+ INT_MAX);
+
+ for (plane = 0; plane < 3; plane++) {
+ int p = desc->comp[plane].plane;
+ src_buf.plane[plane].data = in->data[p];
+ src_buf.plane[plane].stride = in->linesize[p];
+ src_buf.plane[plane].mask = -1;
+
+ p = odesc->comp[plane].plane;
+ dst_buf.plane[plane].data = out->data[p];
+ dst_buf.plane[plane].stride = out->linesize[p];
+ dst_buf.plane[plane].mask = -1;
+ }
+
+ ret = zimg_filter_graph_process(s->graph, &src_buf, &dst_buf, s->tmp, 0, 0, 0, 0);
+ if (ret) {
+ print_zimg_error(link->dst);
+ goto fail;
+ }
+
+ if (desc->flags & AV_PIX_FMT_FLAG_ALPHA && odesc->flags & AV_PIX_FMT_FLAG_ALPHA) {
+ src_buf.plane[0].data = in->data[3];
+ src_buf.plane[0].stride = in->linesize[3];
+ src_buf.plane[0].mask = -1;
+
+ dst_buf.plane[0].data = out->data[3];
+ dst_buf.plane[0].stride = out->linesize[3];
+ dst_buf.plane[0].mask = -1;
+
+ ret = zimg_filter_graph_process(s->alpha_graph, &src_buf, &dst_buf, s->tmp, 0, 0, 0, 0);
+ if (ret) {
+ print_zimg_error(link->dst);
+ goto fail;
+ }
+ } else if (odesc->flags & AV_PIX_FMT_FLAG_ALPHA) {
+ int y;
+
+ for (y = 0; y < outlink->h; y++)
+ memset(out->data[3] + y * out->linesize[3], 0xff, outlink->w);
+ }
+
+fail:
+ av_frame_free(&in);
+ if (ret) {
+ av_frame_free(&out);
+ return ret;
+ }
+
+ return ff_filter_frame(outlink, out);
+}
+
+static void uninit(AVFilterContext *ctx)
+{
+ ZScaleContext *s = ctx->priv;
+
+ zimg_filter_graph_free(s->graph);
+ av_freep(&s->tmp);
+ s->tmp_size = 0;
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ ZScaleContext *s = ctx->priv;
+ int ret;
+
+ if ( !strcmp(cmd, "width") || !strcmp(cmd, "w")
+ || !strcmp(cmd, "height") || !strcmp(cmd, "h")) {
+
+ int old_w = s->w;
+ int old_h = s->h;
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ av_opt_set(s, cmd, args, 0);
+ if ((ret = config_props(outlink)) < 0) {
+ s->w = old_w;
+ s->h = old_h;
+ }
+ } else
+ ret = AVERROR(ENOSYS);
+
+ return ret;
+}
+
+#define OFFSET(x) offsetof(ZScaleContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption zscale_options[] = {
+ { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "height", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "dither", "set dither type", OFFSET(dither), AV_OPT_TYPE_INT, {.i64 = 0}, 0, ZIMG_DITHER_ERROR_DIFFUSION, FLAGS, "dither" },
+ { "d", "set dither type", OFFSET(dither), AV_OPT_TYPE_INT, {.i64 = 0}, 0, ZIMG_DITHER_ERROR_DIFFUSION, FLAGS, "dither" },
+ { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_DITHER_NONE}, 0, 0, FLAGS, "dither" },
+ { "ordered", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_DITHER_ORDERED}, 0, 0, FLAGS, "dither" },
+ { "random", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_DITHER_RANDOM}, 0, 0, FLAGS, "dither" },
+ { "error_diffusion", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_DITHER_ERROR_DIFFUSION}, 0, 0, FLAGS, "dither" },
+ { "filter", "set filter type", OFFSET(filter), AV_OPT_TYPE_INT, {.i64 = ZIMG_RESIZE_BILINEAR}, 0, ZIMG_RESIZE_LANCZOS, FLAGS, "filter" },
+ { "f", "set filter type", OFFSET(filter), AV_OPT_TYPE_INT, {.i64 = ZIMG_RESIZE_BILINEAR}, 0, ZIMG_RESIZE_LANCZOS, FLAGS, "filter" },
+ { "point", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_RESIZE_POINT}, 0, 0, FLAGS, "filter" },
+ { "bilinear", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_RESIZE_BILINEAR}, 0, 0, FLAGS, "filter" },
+ { "bicubic", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_RESIZE_BICUBIC}, 0, 0, FLAGS, "filter" },
+ { "spline16", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_RESIZE_SPLINE16}, 0, 0, FLAGS, "filter" },
+ { "spline36", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_RESIZE_SPLINE36}, 0, 0, FLAGS, "filter" },
+ { "lanczos", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_RESIZE_LANCZOS}, 0, 0, FLAGS, "filter" },
+ { "range", "set color range", OFFSET(range), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_RANGE_FULL, FLAGS, "range" },
+ { "r", "set color range", OFFSET(range), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_RANGE_FULL, FLAGS, "range" },
+ { "input", 0, 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, FLAGS, "range" },
+ { "limited", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_RANGE_LIMITED}, 0, 0, FLAGS, "range" },
+ { "full", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_RANGE_FULL}, 0, 0, FLAGS, "range" },
+ { "primaries", "set color primaries", OFFSET(primaries), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_PRIMARIES_ST432_1, FLAGS, "primaries" },
+ { "p", "set color primaries", OFFSET(primaries), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_PRIMARIES_ST432_1, FLAGS, "primaries" },
+ { "input", 0, 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, FLAGS, "primaries" },
+ { "709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_PRIMARIES_709}, 0, 0, FLAGS, "primaries" },
+ { "unspecified", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_PRIMARIES_UNSPECIFIED}, 0, 0, FLAGS, "primaries" },
+ { "170m", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_PRIMARIES_170M}, 0, 0, FLAGS, "primaries" },
+ { "240m", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_PRIMARIES_240M}, 0, 0, FLAGS, "primaries" },
+ { "2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_PRIMARIES_2020}, 0, 0, FLAGS, "primaries" },
+ { "smpte432", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_PRIMARIES_ST432_1}, 0, 0, FLAGS, "primaries" },
+ { "transfer", "set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_TRANSFER_ARIB_B67, FLAGS, "transfer" },
+ { "t", "set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_TRANSFER_ARIB_B67, FLAGS, "transfer" },
+ { "input", 0, 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, FLAGS, "transfer" },
+ { "709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_TRANSFER_709}, 0, 0, FLAGS, "transfer" },
+ { "unspecified", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_TRANSFER_UNSPECIFIED}, 0, 0, FLAGS, "transfer" },
+ { "601", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_TRANSFER_601}, 0, 0, FLAGS, "transfer" },
+ { "linear", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_TRANSFER_LINEAR}, 0, 0, FLAGS, "transfer" },
+ { "2020_10", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_TRANSFER_2020_10}, 0, 0, FLAGS, "transfer" },
+ { "2020_12", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_TRANSFER_2020_12}, 0, 0, FLAGS, "transfer" },
+ { "smpte2084", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_TRANSFER_ST2084}, 0, 0, FLAGS, "transfer" },
+ { "iec61966-2-1", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_TRANSFER_IEC_61966_2_1},0, 0, FLAGS, "transfer" },
+ { "arib-std-b67", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_TRANSFER_ARIB_B67}, 0, 0, FLAGS, "transfer" },
+ { "matrix", "set colorspace matrix", OFFSET(colorspace), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_MATRIX_2020_CL, FLAGS, "matrix" },
+ { "m", "set colorspace matrix", OFFSET(colorspace), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_MATRIX_2020_CL, FLAGS, "matrix" },
+ { "input", 0, 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, FLAGS, "matrix" },
+ { "709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_MATRIX_709}, 0, 0, FLAGS, "matrix" },
+ { "unspecified", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_MATRIX_UNSPECIFIED}, 0, 0, FLAGS, "matrix" },
+ { "470bg", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_MATRIX_470BG}, 0, 0, FLAGS, "matrix" },
+ { "170m", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_MATRIX_170M}, 0, 0, FLAGS, "matrix" },
+ { "ycgco", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_MATRIX_YCGCO}, 0, 0, FLAGS, "matrix" },
+ { "2020_ncl", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_MATRIX_2020_NCL}, 0, 0, FLAGS, "matrix" },
+ { "2020_cl", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_MATRIX_2020_CL}, 0, 0, FLAGS, "matrix" },
+ { "rangein", "set input color range", OFFSET(range_in), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_RANGE_FULL, FLAGS, "range" },
+ { "rin", "set input color range", OFFSET(range_in), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_RANGE_FULL, FLAGS, "range" },
+ { "primariesin", "set input color primaries", OFFSET(primaries_in), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_PRIMARIES_ST432_1, FLAGS, "primaries" },
+ { "pin", "set input color primaries", OFFSET(primaries_in), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_PRIMARIES_ST432_1, FLAGS, "primaries" },
+ { "transferin", "set input transfer characteristic", OFFSET(trc_in), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_TRANSFER_ARIB_B67, FLAGS, "transfer" },
+ { "tin", "set input transfer characteristic", OFFSET(trc_in), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_TRANSFER_ARIB_B67, FLAGS, "transfer" },
+ { "matrixin", "set input colorspace matrix", OFFSET(colorspace_in), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_MATRIX_2020_CL, FLAGS, "matrix" },
+ { "min", "set input colorspace matrix", OFFSET(colorspace_in), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_MATRIX_2020_CL, FLAGS, "matrix" },
+ { "chromal", "set output chroma location", OFFSET(chromal), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_CHROMA_BOTTOM, FLAGS, "chroma" },
+ { "c", "set output chroma location", OFFSET(chromal), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_CHROMA_BOTTOM, FLAGS, "chroma" },
+ { "input", 0, 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, FLAGS, "chroma" },
+ { "left", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_CHROMA_LEFT}, 0, 0, FLAGS, "chroma" },
+ { "center", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_CHROMA_CENTER}, 0, 0, FLAGS, "chroma" },
+ { "topleft", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_CHROMA_TOP_LEFT}, 0, 0, FLAGS, "chroma" },
+ { "top", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_CHROMA_TOP}, 0, 0, FLAGS, "chroma" },
+ { "bottomleft",0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_CHROMA_BOTTOM_LEFT}, 0, 0, FLAGS, "chroma" },
+ { "bottom", 0, 0, AV_OPT_TYPE_CONST, {.i64 = ZIMG_CHROMA_BOTTOM}, 0, 0, FLAGS, "chroma" },
+ { "chromalin", "set input chroma location", OFFSET(chromal_in), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_CHROMA_BOTTOM, FLAGS, "chroma" },
+ { "cin", "set input chroma location", OFFSET(chromal_in), AV_OPT_TYPE_INT, {.i64 = -1}, -1, ZIMG_CHROMA_BOTTOM, FLAGS, "chroma" },
+ { "npl", "set nominal peak luminance", OFFSET(nominal_peak_luminance), AV_OPT_TYPE_DOUBLE, {.dbl = NAN}, 0, DBL_MAX, FLAGS },
+ { "agamma", "allow approximate gamma", OFFSET(approximate_gamma), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
+ { NULL }
+};
+
+static const AVClass zscale_class = {
+ .class_name = "zscale",
+ .item_name = av_default_item_name,
+ .option = zscale_options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_FILTER,
+};
+
+static const AVFilterPad avfilter_vf_zscale_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_vf_zscale_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_zscale = {
+ .name = "zscale",
+ .description = NULL_IF_CONFIG_SMALL("Apply resizing, colorspace and bit depth conversion."),
+ .init_dict = init_dict,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ZScaleContext),
+ .priv_class = &zscale_class,
+ .uninit = uninit,
+ .inputs = avfilter_vf_zscale_inputs,
+ .outputs = avfilter_vf_zscale_outputs,
+ .process_command = process_command,
+};
diff --git a/libavfilter/video.c b/libavfilter/video.c
index cadac50da8..fabdafd0d9 100644
--- a/libavfilter/video.c
+++ b/libavfilter/video.c
@@ -1,24 +1,29 @@
/*
- * This file is part of Libav.
+ * Copyright 2007 Bobby Bingham
+ * Copyright Stefano Sabatini <stefasab gmail com>
+ * Copyright Vitor Sessak <vitor1001 gmail com>
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <string.h>
#include <stdio.h>
+#include "libavutil/avassert.h"
#include "libavutil/buffer.h"
#include "libavutil/imgutils.h"
#include "libavutil/mem.h"
@@ -27,38 +32,52 @@
#include "internal.h"
#include "video.h"
+#define BUFFER_ALIGN 32
+
+
AVFrame *ff_null_get_video_buffer(AVFilterLink *link, int w, int h)
{
return ff_get_video_buffer(link->dst->outputs[0], w, h);
}
-/* TODO: set the buffer's priv member to a context structure for the whole
- * filter chain. This will allow for a buffer pool instead of the constant
- * alloc & free cycle currently implemented. */
AVFrame *ff_default_get_video_buffer(AVFilterLink *link, int w, int h)
{
- AVFrame *frame = av_frame_alloc();
- int ret;
+ int pool_width = 0;
+ int pool_height = 0;
+ int pool_align = 0;
+ enum AVPixelFormat pool_format = AV_PIX_FMT_NONE;
- if (!frame)
- return NULL;
+ if (!link->frame_pool) {
+ link->frame_pool = ff_frame_pool_video_init(av_buffer_allocz, w, h,
+ link->format, BUFFER_ALIGN);
+ if (!link->frame_pool)
+ return NULL;
+ } else {
+ if (ff_frame_pool_get_video_config(link->frame_pool,
+ &pool_width, &pool_height,
+ &pool_format, &pool_align) < 0) {
+ return NULL;
+ }
- frame->width = w;
- frame->height = h;
- frame->format = link->format;
+ if (pool_width != w || pool_height != h ||
+ pool_format != link->format || pool_align != BUFFER_ALIGN) {
- ret = av_frame_get_buffer(frame, 32);
- if (ret < 0)
- av_frame_free(&frame);
+ ff_frame_pool_uninit((FFFramePool **)&link->frame_pool);
+ link->frame_pool = ff_frame_pool_video_init(av_buffer_allocz, w, h,
+ link->format, BUFFER_ALIGN);
+ if (!link->frame_pool)
+ return NULL;
+ }
+ }
- return frame;
+ return ff_frame_pool_get(link->frame_pool);
}
AVFrame *ff_get_video_buffer(AVFilterLink *link, int w, int h)
{
AVFrame *ret = NULL;
- FF_DPRINTF_START(NULL, get_video_buffer); ff_dlog_link(NULL, link, 0);
+ FF_TPRINTF_START(NULL, get_video_buffer); ff_tlog_link(NULL, link, 0);
if (link->dstpad->get_video_buffer)
ret = link->dstpad->get_video_buffer(link, w, h);
diff --git a/libavfilter/video.h b/libavfilter/video.h
index f7e8e34008..56c58d6766 100644
--- a/libavfilter/video.h
+++ b/libavfilter/video.h
@@ -1,18 +1,20 @@
/*
- * This file is part of Libav.
+ * Copyright (c) 2007 Bobby Bingham
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavfilter/vidstabutils.c b/libavfilter/vidstabutils.c
new file mode 100644
index 0000000000..13544cf573
--- /dev/null
+++ b/libavfilter/vidstabutils.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2013 Georg Martius <georg dot martius at web dot de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "vidstabutils.h"
+
+/** convert AV's pixelformat to vid.stab pixelformat */
+VSPixelFormat ff_av2vs_pixfmt(AVFilterContext *ctx, enum AVPixelFormat pf)
+{
+ switch (pf) {
+ case AV_PIX_FMT_YUV420P: return PF_YUV420P;
+ case AV_PIX_FMT_YUV422P: return PF_YUV422P;
+ case AV_PIX_FMT_YUV444P: return PF_YUV444P;
+ case AV_PIX_FMT_YUV410P: return PF_YUV410P;
+ case AV_PIX_FMT_YUV411P: return PF_YUV411P;
+ case AV_PIX_FMT_YUV440P: return PF_YUV440P;
+ case AV_PIX_FMT_YUVA420P: return PF_YUVA420P;
+ case AV_PIX_FMT_GRAY8: return PF_GRAY8;
+ case AV_PIX_FMT_RGB24: return PF_RGB24;
+ case AV_PIX_FMT_BGR24: return PF_BGR24;
+ case AV_PIX_FMT_RGBA: return PF_RGBA;
+ default:
+ av_log(ctx, AV_LOG_ERROR, "cannot deal with pixel format %i\n", pf);
+ return PF_NONE;
+ }
+}
+
+/** struct to hold a valid context for logging from within vid.stab lib */
+typedef struct {
+ const AVClass *class;
+} VS2AVLogCtx;
+
+/** wrapper to log vs_log into av_log */
+static int vs2av_log(int type, const char *tag, const char *format, ...)
+{
+ va_list ap;
+ VS2AVLogCtx ctx;
+ AVClass class = {
+ .class_name = tag,
+ .item_name = av_default_item_name,
+ .option = 0,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_FILTER,
+ };
+ ctx.class = &class;
+ va_start(ap, format);
+ av_vlog(&ctx, type, format, ap);
+ va_end(ap);
+ return VS_OK;
+}
+
+/** sets the memory allocation function and logging constants to av versions */
+void ff_vs_init(void)
+{
+ vs_malloc = av_malloc;
+ vs_zalloc = av_mallocz;
+ vs_realloc = av_realloc;
+ vs_free = av_free;
+
+ VS_ERROR_TYPE = AV_LOG_ERROR;
+ VS_WARN_TYPE = AV_LOG_WARNING;
+ VS_INFO_TYPE = AV_LOG_INFO;
+ VS_MSG_TYPE = AV_LOG_VERBOSE;
+
+ vs_log = vs2av_log;
+
+ VS_ERROR = 0;
+ VS_OK = 1;
+}
diff --git a/libavfilter/vidstabutils.h b/libavfilter/vidstabutils.h
new file mode 100644
index 0000000000..c6d6cedb72
--- /dev/null
+++ b/libavfilter/vidstabutils.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2013 Georg Martius <georg dot martius at web dot de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_VIDSTABUTILS_H
+#define AVFILTER_VIDSTABUTILS_H
+
+#include <vid.stab/libvidstab.h>
+
+#include "avfilter.h"
+
+/* Conversion routines between libav* and vid.stab */
+
+/**
+ * Converts an AVPixelFormat to a VSPixelFormat.
+ *
+ * @param[in] ctx AVFilterContext used for logging
+ * @param[in] pf AVPixelFormat
+ * @return a corresponding VSPixelFormat
+ */
+VSPixelFormat ff_av2vs_pixfmt(AVFilterContext *ctx, enum AVPixelFormat pf);
+
+/**
+ * Initialize libvidstab
+ *
+ * Sets the memory allocation functions and logging constants to corresponding
+ * av* versions.
+ */
+void ff_vs_init(void);
+
+#endif
diff --git a/libavfilter/vsink_nullsink.c b/libavfilter/vsink_nullsink.c
index 14b6b122ab..281721bc55 100644
--- a/libavfilter/vsink_nullsink.c
+++ b/libavfilter/vsink_nullsink.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavfilter/vsrc_cellauto.c b/libavfilter/vsrc_cellauto.c
new file mode 100644
index 0000000000..4961f762f4
--- /dev/null
+++ b/libavfilter/vsrc_cellauto.c
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) Stefano Sabatini 2011
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * cellular automaton video source, based on Stephen Wolfram "experimentus crucis"
+ */
+
+/* #define DEBUG */
+
+#include "libavutil/file.h"
+#include "libavutil/internal.h"
+#include "libavutil/lfg.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/random_seed.h"
+#include "libavutil/avstring.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "formats.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ int w, h;
+ char *filename;
+ char *rule_str;
+ uint8_t *file_buf;
+ size_t file_bufsize;
+ uint8_t *buf;
+ int buf_prev_row_idx, buf_row_idx;
+ uint8_t rule;
+ uint64_t pts;
+ AVRational frame_rate;
+ double random_fill_ratio;
+ uint32_t random_seed;
+ int stitch, scroll, start_full;
+ int64_t generation; ///< the generation number, starting from 0
+ AVLFG lfg;
+ char *pattern;
+} CellAutoContext;
+
+#define OFFSET(x) offsetof(CellAutoContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption cellauto_options[] = {
+ { "filename", "read initial pattern from file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "f", "read initial pattern from file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "pattern", "set initial pattern", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "p", "set initial pattern", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS },
+ { "rule", "set rule", OFFSET(rule), AV_OPT_TYPE_INT, {.i64 = 110}, 0, 255, FLAGS },
+ { "random_fill_ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl = 1/M_PHI}, 0, 1, FLAGS },
+ { "ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl = 1/M_PHI}, 0, 1, FLAGS },
+ { "random_seed", "set the seed for filling the initial grid randomly", OFFSET(random_seed), AV_OPT_TYPE_INT, {.i64 = -1}, -1, UINT32_MAX, FLAGS },
+ { "seed", "set the seed for filling the initial grid randomly", OFFSET(random_seed), AV_OPT_TYPE_INT, {.i64 = -1}, -1, UINT32_MAX, FLAGS },
+ { "scroll", "scroll pattern downward", OFFSET(scroll), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
+ { "start_full", "start filling the whole video", OFFSET(start_full), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
+ { "full", "start filling the whole video", OFFSET(start_full), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
+ { "stitch", "stitch boundaries", OFFSET(stitch), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(cellauto);
+
+#ifdef DEBUG
+static void show_cellauto_row(AVFilterContext *ctx)
+{
+ CellAutoContext *s = ctx->priv;
+ int i;
+ uint8_t *row = s->buf + s->w * s->buf_row_idx;
+ char *line = av_malloc(s->w + 1);
+ if (!line)
+ return;
+
+ for (i = 0; i < s->w; i++)
+ line[i] = row[i] ? '@' : ' ';
+ line[i] = 0;
+ av_log(ctx, AV_LOG_DEBUG, "generation:%"PRId64" row:%s|\n", s->generation, line);
+ av_free(line);
+}
+#endif
+
+static int init_pattern_from_string(AVFilterContext *ctx)
+{
+ CellAutoContext *s = ctx->priv;
+ char *p;
+ int i, w = 0;
+
+ w = strlen(s->pattern);
+ av_log(ctx, AV_LOG_DEBUG, "w:%d\n", w);
+
+ if (s->w) {
+ if (w > s->w) {
+ av_log(ctx, AV_LOG_ERROR,
+ "The specified width is %d which cannot contain the provided string width of %d\n",
+ s->w, w);
+ return AVERROR(EINVAL);
+ }
+ } else {
+ /* width was not specified, set it to width of the provided row */
+ s->w = w;
+ s->h = (double)s->w * M_PHI;
+ }
+
+ s->buf = av_mallocz_array(sizeof(uint8_t) * s->w, s->h);
+ if (!s->buf)
+ return AVERROR(ENOMEM);
+
+ /* fill buf */
+ p = s->pattern;
+ for (i = (s->w - w)/2;; i++) {
+ av_log(ctx, AV_LOG_DEBUG, "%d %c\n", i, *p == '\n' ? 'N' : *p);
+ if (*p == '\n' || !*p)
+ break;
+ else
+ s->buf[i] = !!av_isgraph(*(p++));
+ }
+
+ return 0;
+}
+
+static int init_pattern_from_file(AVFilterContext *ctx)
+{
+ CellAutoContext *s = ctx->priv;
+ int ret;
+
+ ret = av_file_map(s->filename,
+ &s->file_buf, &s->file_bufsize, 0, ctx);
+ if (ret < 0)
+ return ret;
+
+ /* create a string based on the read file */
+ s->pattern = av_malloc(s->file_bufsize + 1);
+ if (!s->pattern)
+ return AVERROR(ENOMEM);
+ memcpy(s->pattern, s->file_buf, s->file_bufsize);
+ s->pattern[s->file_bufsize] = 0;
+
+ return init_pattern_from_string(ctx);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ CellAutoContext *s = ctx->priv;
+ int ret;
+
+ if (!s->w && !s->filename && !s->pattern)
+ av_opt_set(s, "size", "320x518", 0);
+
+ if (s->filename && s->pattern) {
+ av_log(ctx, AV_LOG_ERROR, "Only one of the filename or pattern options can be used\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (s->filename) {
+ if ((ret = init_pattern_from_file(ctx)) < 0)
+ return ret;
+ } else if (s->pattern) {
+ if ((ret = init_pattern_from_string(ctx)) < 0)
+ return ret;
+ } else {
+ /* fill the first row randomly */
+ int i;
+
+ s->buf = av_mallocz_array(sizeof(uint8_t) * s->w, s->h);
+ if (!s->buf)
+ return AVERROR(ENOMEM);
+ if (s->random_seed == -1)
+ s->random_seed = av_get_random_seed();
+
+ av_lfg_init(&s->lfg, s->random_seed);
+
+ for (i = 0; i < s->w; i++) {
+ double r = (double)av_lfg_get(&s->lfg) / UINT32_MAX;
+ if (r <= s->random_fill_ratio)
+ s->buf[i] = 1;
+ }
+ }
+
+ av_log(ctx, AV_LOG_VERBOSE,
+ "s:%dx%d r:%d/%d rule:%d stitch:%d scroll:%d full:%d seed:%u\n",
+ s->w, s->h, s->frame_rate.num, s->frame_rate.den,
+ s->rule, s->stitch, s->scroll, s->start_full,
+ s->random_seed);
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ CellAutoContext *s = ctx->priv;
+
+ av_file_unmap(s->file_buf, s->file_bufsize);
+ av_freep(&s->buf);
+ av_freep(&s->pattern);
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ CellAutoContext *s = outlink->src->priv;
+
+ outlink->w = s->w;
+ outlink->h = s->h;
+ outlink->time_base = av_inv_q(s->frame_rate);
+
+ return 0;
+}
+
+static void evolve(AVFilterContext *ctx)
+{
+ CellAutoContext *s = ctx->priv;
+ int i, v, pos[3];
+ uint8_t *row, *prev_row = s->buf + s->buf_row_idx * s->w;
+ enum { NW, N, NE };
+
+ s->buf_prev_row_idx = s->buf_row_idx;
+ s->buf_row_idx = s->buf_row_idx == s->h-1 ? 0 : s->buf_row_idx+1;
+ row = s->buf + s->w * s->buf_row_idx;
+
+ for (i = 0; i < s->w; i++) {
+ if (s->stitch) {
+ pos[NW] = i-1 < 0 ? s->w-1 : i-1;
+ pos[N] = i;
+ pos[NE] = i+1 == s->w ? 0 : i+1;
+ v = prev_row[pos[NW]]<<2 | prev_row[pos[N]]<<1 | prev_row[pos[NE]];
+ } else {
+ v = 0;
+ v|= i-1 >= 0 ? prev_row[i-1]<<2 : 0;
+ v|= prev_row[i ]<<1 ;
+ v|= i+1 < s->w ? prev_row[i+1] : 0;
+ }
+ row[i] = !!(s->rule & (1<<v));
+ ff_dlog(ctx, "i:%d context:%c%c%c -> cell:%d\n", i,
+ v&4?'@':' ', v&2?'@':' ', v&1?'@':' ', row[i]);
+ }
+
+ s->generation++;
+}
+
+static void fill_picture(AVFilterContext *ctx, AVFrame *picref)
+{
+ CellAutoContext *s = ctx->priv;
+ int i, j, k, row_idx = 0;
+ uint8_t *p0 = picref->data[0];
+
+ if (s->scroll && s->generation >= s->h)
+ /* show on top the oldest row */
+ row_idx = (s->buf_row_idx + 1) % s->h;
+
+ /* fill the output picture with the whole buffer */
+ for (i = 0; i < s->h; i++) {
+ uint8_t byte = 0;
+ uint8_t *row = s->buf + row_idx*s->w;
+ uint8_t *p = p0;
+ for (k = 0, j = 0; j < s->w; j++) {
+ byte |= row[j]<<(7-k++);
+ if (k==8 || j == s->w-1) {
+ k = 0;
+ *p++ = byte;
+ byte = 0;
+ }
+ }
+ row_idx = (row_idx + 1) % s->h;
+ p0 += picref->linesize[0];
+ }
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ CellAutoContext *s = outlink->src->priv;
+ AVFrame *picref = ff_get_video_buffer(outlink, s->w, s->h);
+ if (!picref)
+ return AVERROR(ENOMEM);
+ picref->sample_aspect_ratio = (AVRational) {1, 1};
+ if (s->generation == 0 && s->start_full) {
+ int i;
+ for (i = 0; i < s->h-1; i++)
+ evolve(outlink->src);
+ }
+ fill_picture(outlink->src, picref);
+ evolve(outlink->src);
+
+ picref->pts = s->pts++;
+
+#ifdef DEBUG
+ show_cellauto_row(outlink->src);
+#endif
+ return ff_filter_frame(outlink, picref);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_MONOBLACK, AV_PIX_FMT_NONE };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static const AVFilterPad cellauto_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vsrc_cellauto = {
+ .name = "cellauto",
+ .description = NULL_IF_CONFIG_SMALL("Create pattern generated by an elementary cellular automaton."),
+ .priv_size = sizeof(CellAutoContext),
+ .priv_class = &cellauto_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = NULL,
+ .outputs = cellauto_outputs,
+};
diff --git a/libavfilter/vsrc_color.c b/libavfilter/vsrc_color.c
deleted file mode 100644
index 8c8ca3fdef..0000000000
--- a/libavfilter/vsrc_color.c
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright (c) 2010 Stefano Sabatini
- *
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * Libav is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * color source
- */
-
-#include <stdio.h>
-#include <string.h>
-
-#include "avfilter.h"
-#include "formats.h"
-#include "internal.h"
-#include "video.h"
-#include "libavutil/pixdesc.h"
-#include "libavutil/colorspace.h"
-#include "libavutil/imgutils.h"
-#include "libavutil/internal.h"
-#include "libavutil/mathematics.h"
-#include "libavutil/mem.h"
-#include "libavutil/opt.h"
-#include "libavutil/parseutils.h"
-#include "drawutils.h"
-
-typedef struct ColorContext {
- const AVClass *class;
- int w, h;
- uint8_t color[4];
- AVRational frame_rate;
- uint8_t *line[4];
- int line_step[4];
- int hsub, vsub; ///< chroma subsampling values
- uint64_t pts;
- char *color_str;
- char *size_str;
- char *framerate_str;
-} ColorContext;
-
-static av_cold int color_init(AVFilterContext *ctx)
-{
- ColorContext *color = ctx->priv;
- int ret;
-
- if (av_parse_video_size(&color->w, &color->h, color->size_str) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame size: %s\n", color->size_str);
- return AVERROR(EINVAL);
- }
-
- if (av_parse_video_rate(&color->frame_rate, color->framerate_str) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: %s\n", color->framerate_str);
- return AVERROR(EINVAL);
- }
-
- if ((ret = av_parse_color(color->color, color->color_str, -1, ctx)) < 0)
- return ret;
-
- return 0;
-}
-
-static av_cold void color_uninit(AVFilterContext *ctx)
-{
- ColorContext *color = ctx->priv;
- int i;
-
- for (i = 0; i < 4; i++) {
- av_freep(&color->line[i]);
- color->line_step[i] = 0;
- }
-}
-
-static int query_formats(AVFilterContext *ctx)
-{
- static const enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
- AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
- AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
-
- AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
- AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
- AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
- AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
- AV_PIX_FMT_YUVA420P,
-
- AV_PIX_FMT_NONE
- };
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
-}
-
-static int color_config_props(AVFilterLink *inlink)
-{
- AVFilterContext *ctx = inlink->src;
- ColorContext *color = ctx->priv;
- uint8_t rgba_color[4];
- int is_packed_rgba;
- const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
-
- color->hsub = pix_desc->log2_chroma_w;
- color->vsub = pix_desc->log2_chroma_h;
-
- color->w &= ~((1 << color->hsub) - 1);
- color->h &= ~((1 << color->vsub) - 1);
- if (av_image_check_size(color->w, color->h, 0, ctx) < 0)
- return AVERROR(EINVAL);
-
- memcpy(rgba_color, color->color, sizeof(rgba_color));
- ff_fill_line_with_color(color->line, color->line_step, color->w, color->color,
- inlink->format, rgba_color, &is_packed_rgba, NULL);
-
- av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d r:%d/%d color:0x%02x%02x%02x%02x[%s]\n",
- color->w, color->h, color->frame_rate.num, color->frame_rate.den,
- color->color[0], color->color[1], color->color[2], color->color[3],
- is_packed_rgba ? "rgba" : "yuva");
- inlink->w = color->w;
- inlink->h = color->h;
- inlink->time_base = av_inv_q(color->frame_rate);
- inlink->frame_rate = color->frame_rate;
-
- return 0;
-}
-
-static int color_request_frame(AVFilterLink *link)
-{
- ColorContext *color = link->src->priv;
- AVFrame *frame = ff_get_video_buffer(link, color->w, color->h);
-
- if (!frame)
- return AVERROR(ENOMEM);
-
- frame->sample_aspect_ratio = (AVRational) {1, 1};
- frame->pts = color->pts++;
-
- ff_draw_rectangle(frame->data, frame->linesize,
- color->line, color->line_step, color->hsub, color->vsub,
- 0, 0, color->w, color->h);
- return ff_filter_frame(link, frame);
-}
-
-#define OFFSET(x) offsetof(ColorContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "color", "Output video color", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, .flags = FLAGS },
- { "size", "Output video size (wxh or an abbreviation)", OFFSET(size_str), AV_OPT_TYPE_STRING, { .str = "320x240" }, .flags = FLAGS },
- { "framerate", "Output video framerate", OFFSET(framerate_str), AV_OPT_TYPE_STRING, { .str = "25" }, .flags = FLAGS },
- { NULL },
-};
-
-static const AVClass color_class = {
- .class_name = "color",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-static const AVFilterPad avfilter_vsrc_color_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .request_frame = color_request_frame,
- .config_props = color_config_props
- },
- { NULL }
-};
-
-AVFilter ff_vsrc_color = {
- .name = "color",
- .description = NULL_IF_CONFIG_SMALL("Provide an uniformly colored input, syntax is: [color[:size[:rate]]]"),
-
- .priv_class = &color_class,
- .priv_size = sizeof(ColorContext),
- .init = color_init,
- .uninit = color_uninit,
-
- .query_formats = query_formats,
-
- .inputs = NULL,
-
- .outputs = avfilter_vsrc_color_outputs,
-};
diff --git a/libavfilter/vsrc_life.c b/libavfilter/vsrc_life.c
new file mode 100644
index 0000000000..8d51051487
--- /dev/null
+++ b/libavfilter/vsrc_life.c
@@ -0,0 +1,454 @@
+/*
+ * Copyright (c) Stefano Sabatini 2010
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * life video source, based on John Conways' Life Game
+ */
+
+/* #define DEBUG */
+
+#include "libavutil/file.h"
+#include "libavutil/internal.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/lfg.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/random_seed.h"
+#include "libavutil/avstring.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "formats.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ int w, h;
+ char *filename;
+ char *rule_str;
+ uint8_t *file_buf;
+ size_t file_bufsize;
+
+ /**
+ * The two grid state buffers.
+ *
+ * A 0xFF (ALIVE_CELL) value means the cell is alive (or new born), while
+ * the decreasing values from 0xFE to 0 means the cell is dead; the range
+ * of values is used for the slow death effect, or mold (0xFE means dead,
+ * 0xFD means very dead, 0xFC means very very dead... and 0x00 means
+ * definitely dead/mold).
+ */
+ uint8_t *buf[2];
+
+ uint8_t buf_idx;
+ uint16_t stay_rule; ///< encode the behavior for filled cells
+ uint16_t born_rule; ///< encode the behavior for empty cells
+ uint64_t pts;
+ AVRational frame_rate;
+ double random_fill_ratio;
+ uint32_t random_seed;
+ int stitch;
+ int mold;
+ uint8_t life_color[4];
+ uint8_t death_color[4];
+ uint8_t mold_color[4];
+ AVLFG lfg;
+ void (*draw)(AVFilterContext*, AVFrame*);
+} LifeContext;
+
+#define ALIVE_CELL 0xFF
+#define OFFSET(x) offsetof(LifeContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption life_options[] = {
+ { "filename", "set source file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "f", "set source file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS },
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
+ { "rule", "set rule", OFFSET(rule_str), AV_OPT_TYPE_STRING, {.str = "B3/S23"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "random_fill_ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl=1/M_PHI}, 0, 1, FLAGS },
+ { "ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl=1/M_PHI}, 0, 1, FLAGS },
+ { "random_seed", "set the seed for filling the initial grid randomly", OFFSET(random_seed), AV_OPT_TYPE_INT, {.i64=-1}, -1, UINT32_MAX, FLAGS },
+ { "seed", "set the seed for filling the initial grid randomly", OFFSET(random_seed), AV_OPT_TYPE_INT, {.i64=-1}, -1, UINT32_MAX, FLAGS },
+ { "stitch", "stitch boundaries", OFFSET(stitch), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
+ { "mold", "set mold speed for dead cells", OFFSET(mold), AV_OPT_TYPE_INT, {.i64=0}, 0, 0xFF, FLAGS },
+ { "life_color", "set life color", OFFSET( life_color), AV_OPT_TYPE_COLOR, {.str="white"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "death_color", "set death color", OFFSET(death_color), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "mold_color", "set mold color", OFFSET( mold_color), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(life);
+
+static int parse_rule(uint16_t *born_rule, uint16_t *stay_rule,
+ const char *rule_str, void *log_ctx)
+{
+ char *tail;
+ const char *p = rule_str;
+ *born_rule = 0;
+ *stay_rule = 0;
+
+ if (strchr("bBsS", *p)) {
+ /* parse rule as a Born / Stay Alive code, see
+ * http://en.wikipedia.org/wiki/Conway%27s_Game_of_Life */
+ do {
+ uint16_t *rule = (*p == 'b' || *p == 'B') ? born_rule : stay_rule;
+ p++;
+ while (*p >= '0' && *p <= '8') {
+ *rule += 1<<(*p - '0');
+ p++;
+ }
+ if (*p != '/')
+ break;
+ p++;
+ } while (strchr("bBsS", *p));
+
+ if (*p)
+ goto error;
+ } else {
+ /* parse rule as a number, expressed in the form STAY|(BORN<<9),
+ * where STAY and BORN encode the corresponding 9-bits rule */
+ long int rule = strtol(rule_str, &tail, 10);
+ if (*tail)
+ goto error;
+ *born_rule = ((1<<9)-1) & rule;
+ *stay_rule = rule >> 9;
+ }
+
+ return 0;
+
+error:
+ av_log(log_ctx, AV_LOG_ERROR, "Invalid rule code '%s' provided\n", rule_str);
+ return AVERROR(EINVAL);
+}
+
+#ifdef DEBUG
+static void show_life_grid(AVFilterContext *ctx)
+{
+ LifeContext *life = ctx->priv;
+ int i, j;
+
+ char *line = av_malloc(life->w + 1);
+ if (!line)
+ return;
+ for (i = 0; i < life->h; i++) {
+ for (j = 0; j < life->w; j++)
+ line[j] = life->buf[life->buf_idx][i*life->w + j] == ALIVE_CELL ? '@' : ' ';
+ line[j] = 0;
+ av_log(ctx, AV_LOG_DEBUG, "%3d: %s\n", i, line);
+ }
+ av_free(line);
+}
+#endif
+
+static int init_pattern_from_file(AVFilterContext *ctx)
+{
+ LifeContext *life = ctx->priv;
+ char *p;
+ int ret, i, i0, j, h = 0, w, max_w = 0;
+
+ if ((ret = av_file_map(life->filename, &life->file_buf, &life->file_bufsize,
+ 0, ctx)) < 0)
+ return ret;
+ av_freep(&life->filename);
+
+ /* prescan file to get the number of lines and the maximum width */
+ w = 0;
+ for (i = 0; i < life->file_bufsize; i++) {
+ if (life->file_buf[i] == '\n') {
+ h++; max_w = FFMAX(w, max_w); w = 0;
+ } else {
+ w++;
+ }
+ }
+ av_log(ctx, AV_LOG_DEBUG, "h:%d max_w:%d\n", h, max_w);
+
+ if (life->w) {
+ if (max_w > life->w || h > life->h) {
+ av_log(ctx, AV_LOG_ERROR,
+ "The specified size is %dx%d which cannot contain the provided file size of %dx%d\n",
+ life->w, life->h, max_w, h);
+ return AVERROR(EINVAL);
+ }
+ } else {
+ /* size was not specified, set it to size of the grid */
+ life->w = max_w;
+ life->h = h;
+ }
+
+ if (!(life->buf[0] = av_calloc(life->h * life->w, sizeof(*life->buf[0]))) ||
+ !(life->buf[1] = av_calloc(life->h * life->w, sizeof(*life->buf[1])))) {
+ av_freep(&life->buf[0]);
+ av_freep(&life->buf[1]);
+ return AVERROR(ENOMEM);
+ }
+
+ /* fill buf[0] */
+ p = life->file_buf;
+ for (i0 = 0, i = (life->h - h)/2; i0 < h; i0++, i++) {
+ for (j = (life->w - max_w)/2;; j++) {
+ av_log(ctx, AV_LOG_DEBUG, "%d:%d %c\n", i, j, *p == '\n' ? 'N' : *p);
+ if (*p == '\n') {
+ p++; break;
+ } else
+ life->buf[0][i*life->w + j] = av_isgraph(*(p++)) ? ALIVE_CELL : 0;
+ }
+ }
+ life->buf_idx = 0;
+
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ LifeContext *life = ctx->priv;
+ int ret;
+
+ if (!life->w && !life->filename)
+ av_opt_set(life, "size", "320x240", 0);
+
+ if ((ret = parse_rule(&life->born_rule, &life->stay_rule, life->rule_str, ctx)) < 0)
+ return ret;
+
+ if (!life->mold && memcmp(life->mold_color, "\x00\x00\x00", 3))
+ av_log(ctx, AV_LOG_WARNING,
+ "Mold color is set while mold isn't, ignoring the color.\n");
+
+ if (!life->filename) {
+ /* fill the grid randomly */
+ int i;
+
+ if (!(life->buf[0] = av_calloc(life->h * life->w, sizeof(*life->buf[0]))) ||
+ !(life->buf[1] = av_calloc(life->h * life->w, sizeof(*life->buf[1])))) {
+ av_freep(&life->buf[0]);
+ av_freep(&life->buf[1]);
+ return AVERROR(ENOMEM);
+ }
+ if (life->random_seed == -1)
+ life->random_seed = av_get_random_seed();
+
+ av_lfg_init(&life->lfg, life->random_seed);
+
+ for (i = 0; i < life->w * life->h; i++) {
+ double r = (double)av_lfg_get(&life->lfg) / UINT32_MAX;
+ if (r <= life->random_fill_ratio)
+ life->buf[0][i] = ALIVE_CELL;
+ }
+ life->buf_idx = 0;
+ } else {
+ if ((ret = init_pattern_from_file(ctx)) < 0)
+ return ret;
+ }
+
+ av_log(ctx, AV_LOG_VERBOSE,
+ "s:%dx%d r:%d/%d rule:%s stay_rule:%d born_rule:%d stitch:%d seed:%u\n",
+ life->w, life->h, life->frame_rate.num, life->frame_rate.den,
+ life->rule_str, life->stay_rule, life->born_rule, life->stitch,
+ life->random_seed);
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ LifeContext *life = ctx->priv;
+
+ av_file_unmap(life->file_buf, life->file_bufsize);
+ av_freep(&life->rule_str);
+ av_freep(&life->buf[0]);
+ av_freep(&life->buf[1]);
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ LifeContext *life = outlink->src->priv;
+
+ outlink->w = life->w;
+ outlink->h = life->h;
+ outlink->time_base = av_inv_q(life->frame_rate);
+
+ return 0;
+}
+
+static void evolve(AVFilterContext *ctx)
+{
+ LifeContext *life = ctx->priv;
+ int i, j;
+ uint8_t *oldbuf = life->buf[ life->buf_idx];
+ uint8_t *newbuf = life->buf[!life->buf_idx];
+
+ enum { NW, N, NE, W, E, SW, S, SE };
+
+ /* evolve the grid */
+ for (i = 0; i < life->h; i++) {
+ for (j = 0; j < life->w; j++) {
+ int pos[8][2], n, alive, cell;
+ if (life->stitch) {
+ pos[NW][0] = (i-1) < 0 ? life->h-1 : i-1; pos[NW][1] = (j-1) < 0 ? life->w-1 : j-1;
+ pos[N ][0] = (i-1) < 0 ? life->h-1 : i-1; pos[N ][1] = j ;
+ pos[NE][0] = (i-1) < 0 ? life->h-1 : i-1; pos[NE][1] = (j+1) == life->w ? 0 : j+1;
+ pos[W ][0] = i ; pos[W ][1] = (j-1) < 0 ? life->w-1 : j-1;
+ pos[E ][0] = i ; pos[E ][1] = (j+1) == life->w ? 0 : j+1;
+ pos[SW][0] = (i+1) == life->h ? 0 : i+1; pos[SW][1] = (j-1) < 0 ? life->w-1 : j-1;
+ pos[S ][0] = (i+1) == life->h ? 0 : i+1; pos[S ][1] = j ;
+ pos[SE][0] = (i+1) == life->h ? 0 : i+1; pos[SE][1] = (j+1) == life->w ? 0 : j+1;
+ } else {
+ pos[NW][0] = (i-1) < 0 ? -1 : i-1; pos[NW][1] = (j-1) < 0 ? -1 : j-1;
+ pos[N ][0] = (i-1) < 0 ? -1 : i-1; pos[N ][1] = j ;
+ pos[NE][0] = (i-1) < 0 ? -1 : i-1; pos[NE][1] = (j+1) == life->w ? -1 : j+1;
+ pos[W ][0] = i ; pos[W ][1] = (j-1) < 0 ? -1 : j-1;
+ pos[E ][0] = i ; pos[E ][1] = (j+1) == life->w ? -1 : j+1;
+ pos[SW][0] = (i+1) == life->h ? -1 : i+1; pos[SW][1] = (j-1) < 0 ? -1 : j-1;
+ pos[S ][0] = (i+1) == life->h ? -1 : i+1; pos[S ][1] = j ;
+ pos[SE][0] = (i+1) == life->h ? -1 : i+1; pos[SE][1] = (j+1) == life->w ? -1 : j+1;
+ }
+
+ /* compute the number of live neighbor cells */
+ n = (pos[NW][0] == -1 || pos[NW][1] == -1 ? 0 : oldbuf[pos[NW][0]*life->w + pos[NW][1]] == ALIVE_CELL) +
+ (pos[N ][0] == -1 || pos[N ][1] == -1 ? 0 : oldbuf[pos[N ][0]*life->w + pos[N ][1]] == ALIVE_CELL) +
+ (pos[NE][0] == -1 || pos[NE][1] == -1 ? 0 : oldbuf[pos[NE][0]*life->w + pos[NE][1]] == ALIVE_CELL) +
+ (pos[W ][0] == -1 || pos[W ][1] == -1 ? 0 : oldbuf[pos[W ][0]*life->w + pos[W ][1]] == ALIVE_CELL) +
+ (pos[E ][0] == -1 || pos[E ][1] == -1 ? 0 : oldbuf[pos[E ][0]*life->w + pos[E ][1]] == ALIVE_CELL) +
+ (pos[SW][0] == -1 || pos[SW][1] == -1 ? 0 : oldbuf[pos[SW][0]*life->w + pos[SW][1]] == ALIVE_CELL) +
+ (pos[S ][0] == -1 || pos[S ][1] == -1 ? 0 : oldbuf[pos[S ][0]*life->w + pos[S ][1]] == ALIVE_CELL) +
+ (pos[SE][0] == -1 || pos[SE][1] == -1 ? 0 : oldbuf[pos[SE][0]*life->w + pos[SE][1]] == ALIVE_CELL);
+ cell = oldbuf[i*life->w + j];
+ alive = 1<<n & (cell == ALIVE_CELL ? life->stay_rule : life->born_rule);
+ if (alive) *newbuf = ALIVE_CELL; // new cell is alive
+ else if (cell) *newbuf = cell - 1; // new cell is dead and in the process of mold
+ else *newbuf = 0; // new cell is definitely dead
+ ff_dlog(ctx, "i:%d j:%d live_neighbors:%d cell:%d -> cell:%d\n", i, j, n, cell, *newbuf);
+ newbuf++;
+ }
+ }
+
+ life->buf_idx = !life->buf_idx;
+}
+
+static void fill_picture_monoblack(AVFilterContext *ctx, AVFrame *picref)
+{
+ LifeContext *life = ctx->priv;
+ uint8_t *buf = life->buf[life->buf_idx];
+ int i, j, k;
+
+ /* fill the output picture with the old grid buffer */
+ for (i = 0; i < life->h; i++) {
+ uint8_t byte = 0;
+ uint8_t *p = picref->data[0] + i * picref->linesize[0];
+ for (k = 0, j = 0; j < life->w; j++) {
+ byte |= (buf[i*life->w+j] == ALIVE_CELL)<<(7-k++);
+ if (k==8 || j == life->w-1) {
+ k = 0;
+ *p++ = byte;
+ byte = 0;
+ }
+ }
+ }
+}
+
+// divide by 255 and round to nearest
+// apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16
+#define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
+
+static void fill_picture_rgb(AVFilterContext *ctx, AVFrame *picref)
+{
+ LifeContext *life = ctx->priv;
+ uint8_t *buf = life->buf[life->buf_idx];
+ int i, j;
+
+ /* fill the output picture with the old grid buffer */
+ for (i = 0; i < life->h; i++) {
+ uint8_t *p = picref->data[0] + i * picref->linesize[0];
+ for (j = 0; j < life->w; j++) {
+ uint8_t v = buf[i*life->w + j];
+ if (life->mold && v != ALIVE_CELL) {
+ const uint8_t *c1 = life-> mold_color;
+ const uint8_t *c2 = life->death_color;
+ int death_age = FFMIN((0xff - v) * life->mold, 0xff);
+ *p++ = FAST_DIV255((c2[0] << 8) + ((int)c1[0] - (int)c2[0]) * death_age);
+ *p++ = FAST_DIV255((c2[1] << 8) + ((int)c1[1] - (int)c2[1]) * death_age);
+ *p++ = FAST_DIV255((c2[2] << 8) + ((int)c1[2] - (int)c2[2]) * death_age);
+ } else {
+ const uint8_t *c = v == ALIVE_CELL ? life->life_color : life->death_color;
+ AV_WB24(p, c[0]<<16 | c[1]<<8 | c[2]);
+ p += 3;
+ }
+ }
+ }
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ LifeContext *life = outlink->src->priv;
+ AVFrame *picref = ff_get_video_buffer(outlink, life->w, life->h);
+ if (!picref)
+ return AVERROR(ENOMEM);
+ picref->sample_aspect_ratio = (AVRational) {1, 1};
+ picref->pts = life->pts++;
+
+ life->draw(outlink->src, picref);
+ evolve(outlink->src);
+#ifdef DEBUG
+ show_life_grid(outlink->src);
+#endif
+ return ff_filter_frame(outlink, picref);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ LifeContext *life = ctx->priv;
+ enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_NONE, AV_PIX_FMT_NONE };
+ AVFilterFormats *fmts_list;
+
+ if (life->mold || memcmp(life-> life_color, "\xff\xff\xff", 3)
+ || memcmp(life->death_color, "\x00\x00\x00", 3)) {
+ pix_fmts[0] = AV_PIX_FMT_RGB24;
+ life->draw = fill_picture_rgb;
+ } else {
+ pix_fmts[0] = AV_PIX_FMT_MONOBLACK;
+ life->draw = fill_picture_monoblack;
+ }
+
+ fmts_list = ff_make_format_list(pix_fmts);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static const AVFilterPad life_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL}
+};
+
+AVFilter ff_vsrc_life = {
+ .name = "life",
+ .description = NULL_IF_CONFIG_SMALL("Create life."),
+ .priv_size = sizeof(LifeContext),
+ .priv_class = &life_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = NULL,
+ .outputs = life_outputs,
+};
diff --git a/libavfilter/vsrc_mandelbrot.c b/libavfilter/vsrc_mandelbrot.c
new file mode 100644
index 0000000000..2f6944f02f
--- /dev/null
+++ b/libavfilter/vsrc_mandelbrot.c
@@ -0,0 +1,432 @@
+/*
+ * Copyright (c) 2011 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * The vsrc_color filter from Stefano Sabatini was used as template to create
+ * this
+ */
+
+/**
+ * @file
+ * Mandelbrot fractal renderer
+ */
+
+#include "avfilter.h"
+#include "formats.h"
+#include "video.h"
+#include "internal.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include <float.h>
+#include <math.h>
+
+#define SQR(a) ((a)*(a))
+
+enum Outer{
+ ITERATION_COUNT,
+ NORMALIZED_ITERATION_COUNT,
+ WHITE,
+ OUTZ,
+};
+
+enum Inner{
+ BLACK,
+ PERIOD,
+ CONVTIME,
+ MINCOL,
+};
+
+typedef struct Point {
+ double p[2];
+ uint32_t val;
+} Point;
+
+typedef struct {
+ const AVClass *class;
+ int w, h;
+ AVRational frame_rate;
+ uint64_t pts;
+ int maxiter;
+ double start_x;
+ double start_y;
+ double start_scale;
+ double end_scale;
+ double end_pts;
+ double bailout;
+ int outer;
+ int inner;
+ int cache_allocated;
+ int cache_used;
+ Point *point_cache;
+ Point *next_cache;
+ double (*zyklus)[2];
+ uint32_t dither;
+
+ double morphxf;
+ double morphyf;
+ double morphamp;
+} MBContext;
+
+#define OFFSET(x) offsetof(MBContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption mandelbrot_options[] = {
+ {"size", "set frame size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="640x480"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ {"s", "set frame size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="640x480"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ {"rate", "set frame rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ {"r", "set frame rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ {"maxiter", "set max iterations number", OFFSET(maxiter), AV_OPT_TYPE_INT, {.i64=7189}, 1, INT_MAX, FLAGS },
+ {"start_x", "set the initial x position", OFFSET(start_x), AV_OPT_TYPE_DOUBLE, {.dbl=-0.743643887037158704752191506114774}, -100, 100, FLAGS },
+ {"start_y", "set the initial y position", OFFSET(start_y), AV_OPT_TYPE_DOUBLE, {.dbl=-0.131825904205311970493132056385139}, -100, 100, FLAGS },
+ {"start_scale", "set the initial scale value", OFFSET(start_scale), AV_OPT_TYPE_DOUBLE, {.dbl=3.0}, 0, FLT_MAX, FLAGS },
+ {"end_scale", "set the terminal scale value", OFFSET(end_scale), AV_OPT_TYPE_DOUBLE, {.dbl=0.3}, 0, FLT_MAX, FLAGS },
+ {"end_pts", "set the terminal pts value", OFFSET(end_pts), AV_OPT_TYPE_DOUBLE, {.dbl=400}, 0, INT64_MAX, FLAGS },
+ {"bailout", "set the bailout value", OFFSET(bailout), AV_OPT_TYPE_DOUBLE, {.dbl=10}, 0, FLT_MAX, FLAGS },
+ {"morphxf", "set morph x frequency", OFFSET(morphxf), AV_OPT_TYPE_DOUBLE, {.dbl=0.01}, -FLT_MAX, FLT_MAX, FLAGS },
+ {"morphyf", "set morph y frequency", OFFSET(morphyf), AV_OPT_TYPE_DOUBLE, {.dbl=0.0123}, -FLT_MAX, FLT_MAX, FLAGS },
+ {"morphamp", "set morph amplitude", OFFSET(morphamp), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -FLT_MAX, FLT_MAX, FLAGS },
+
+ {"outer", "set outer coloring mode", OFFSET(outer), AV_OPT_TYPE_INT, {.i64=NORMALIZED_ITERATION_COUNT}, 0, INT_MAX, FLAGS, "outer" },
+ {"iteration_count", "set iteration count mode", 0, AV_OPT_TYPE_CONST, {.i64=ITERATION_COUNT}, INT_MIN, INT_MAX, FLAGS, "outer" },
+ {"normalized_iteration_count", "set normalized iteration count mode", 0, AV_OPT_TYPE_CONST, {.i64=NORMALIZED_ITERATION_COUNT}, INT_MIN, INT_MAX, FLAGS, "outer" },
+ {"white", "set white mode", 0, AV_OPT_TYPE_CONST, {.i64=WHITE}, INT_MIN, INT_MAX, FLAGS, "outer" },
+ {"outz", "set outz mode", 0, AV_OPT_TYPE_CONST, {.i64=OUTZ}, INT_MIN, INT_MAX, FLAGS, "outer" },
+
+ {"inner", "set inner coloring mode", OFFSET(inner), AV_OPT_TYPE_INT, {.i64=MINCOL}, 0, INT_MAX, FLAGS, "inner" },
+ {"black", "set black mode", 0, AV_OPT_TYPE_CONST, {.i64=BLACK}, INT_MIN, INT_MAX, FLAGS, "inner"},
+ {"period", "set period mode", 0, AV_OPT_TYPE_CONST, {.i64=PERIOD}, INT_MIN, INT_MAX, FLAGS, "inner"},
+ {"convergence", "show time until convergence", 0, AV_OPT_TYPE_CONST, {.i64=CONVTIME}, INT_MIN, INT_MAX, FLAGS, "inner"},
+ {"mincol", "color based on point closest to the origin of the iterations", 0, AV_OPT_TYPE_CONST, {.i64=MINCOL}, INT_MIN, INT_MAX, FLAGS, "inner"},
+
+ {NULL},
+};
+
+AVFILTER_DEFINE_CLASS(mandelbrot);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ MBContext *s = ctx->priv;
+
+ s->bailout *= s->bailout;
+
+ s->start_scale /=s->h;
+ s->end_scale /=s->h;
+
+ s->cache_allocated = s->w * s->h * 3;
+ s->cache_used = 0;
+ s->point_cache= av_malloc_array(s->cache_allocated, sizeof(*s->point_cache));
+ s-> next_cache= av_malloc_array(s->cache_allocated, sizeof(*s-> next_cache));
+ s-> zyklus = av_malloc_array(s->maxiter + 16, sizeof(*s->zyklus));
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ MBContext *s = ctx->priv;
+
+ av_freep(&s->point_cache);
+ av_freep(&s-> next_cache);
+ av_freep(&s->zyklus);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_0BGR32,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->src;
+ MBContext *s = ctx->priv;
+
+ if (av_image_check_size(s->w, s->h, 0, ctx) < 0)
+ return AVERROR(EINVAL);
+
+ inlink->w = s->w;
+ inlink->h = s->h;
+ inlink->time_base = av_inv_q(s->frame_rate);
+
+ return 0;
+}
+
+static void fill_from_cache(AVFilterContext *ctx, uint32_t *color, int *in_cidx, int *out_cidx, double py, double scale){
+ MBContext *s = ctx->priv;
+ if(s->morphamp)
+ return;
+ for(; *in_cidx < s->cache_used; (*in_cidx)++){
+ Point *p= &s->point_cache[*in_cidx];
+ int x;
+ if(p->p[1] > py)
+ break;
+ x= lrint((p->p[0] - s->start_x) / scale + s->w/2);
+ if(x<0 || x >= s->w)
+ continue;
+ if(color) color[x] = p->val;
+ if(out_cidx && *out_cidx < s->cache_allocated)
+ s->next_cache[(*out_cidx)++]= *p;
+ }
+}
+
+static int interpol(MBContext *s, uint32_t *color, int x, int y, int linesize)
+{
+ uint32_t a,b,c,d, i;
+ uint32_t ipol=0xFF000000;
+ int dist;
+
+ if(!x || !y || x+1==s->w || y+1==s->h)
+ return 0;
+
+ dist= FFMAX(FFABS(x-(s->w>>1))*s->h, FFABS(y-(s->h>>1))*s->w);
+
+ if(dist<(s->w*s->h>>3))
+ return 0;
+
+ a=color[(x+1) + (y+0)*linesize];
+ b=color[(x-1) + (y+1)*linesize];
+ c=color[(x+0) + (y+1)*linesize];
+ d=color[(x+1) + (y+1)*linesize];
+
+ if(a&&c){
+ b= color[(x-1) + (y+0)*linesize];
+ d= color[(x+0) + (y-1)*linesize];
+ }else if(b&&d){
+ a= color[(x+1) + (y-1)*linesize];
+ c= color[(x-1) + (y-1)*linesize];
+ }else if(c){
+ d= color[(x+0) + (y-1)*linesize];
+ a= color[(x-1) + (y+0)*linesize];
+ b= color[(x+1) + (y-1)*linesize];
+ }else if(d){
+ c= color[(x-1) + (y-1)*linesize];
+ a= color[(x-1) + (y+0)*linesize];
+ b= color[(x+1) + (y-1)*linesize];
+ }else
+ return 0;
+
+ for(i=0; i<3; i++){
+ int s= 8*i;
+ uint8_t ac= a>>s;
+ uint8_t bc= b>>s;
+ uint8_t cc= c>>s;
+ uint8_t dc= d>>s;
+ int ipolab= (ac + bc);
+ int ipolcd= (cc + dc);
+ if(FFABS(ipolab - ipolcd) > 5)
+ return 0;
+ if(FFABS(ac-bc)+FFABS(cc-dc) > 20)
+ return 0;
+ ipol |= ((ipolab + ipolcd + 2)/4)<<s;
+ }
+ color[x + y*linesize]= ipol;
+ return 1;
+}
+
+static void draw_mandelbrot(AVFilterContext *ctx, uint32_t *color, int linesize, int64_t pts)
+{
+ MBContext *s = ctx->priv;
+ int x,y,i, in_cidx=0, next_cidx=0, tmp_cidx;
+ double scale= s->start_scale*pow(s->end_scale/s->start_scale, pts/s->end_pts);
+ int use_zyklus=0;
+ fill_from_cache(ctx, NULL, &in_cidx, NULL, s->start_y+scale*(-s->h/2-0.5), scale);
+ tmp_cidx= in_cidx;
+ memset(color, 0, sizeof(*color)*s->w);
+ for(y=0; y<s->h; y++){
+ int y1= y+1;
+ const double ci=s->start_y+scale*(y-s->h/2);
+ fill_from_cache(ctx, NULL, &in_cidx, &next_cidx, ci, scale);
+ if(y1<s->h){
+ memset(color+linesize*y1, 0, sizeof(*color)*s->w);
+ fill_from_cache(ctx, color+linesize*y1, &tmp_cidx, NULL, ci + 3*scale/2, scale);
+ }
+
+ for(x=0; x<s->w; x++){
+ float av_uninit(epsilon);
+ const double cr=s->start_x+scale*(x-s->w/2);
+ double zr=cr;
+ double zi=ci;
+ uint32_t c=0;
+ double dv= s->dither / (double)(1LL<<32);
+ s->dither= s->dither*1664525+1013904223;
+
+ if(color[x + y*linesize] & 0xFF000000)
+ continue;
+ if(!s->morphamp){
+ if(interpol(s, color, x, y, linesize)){
+ if(next_cidx < s->cache_allocated){
+ s->next_cache[next_cidx ].p[0]= cr;
+ s->next_cache[next_cidx ].p[1]= ci;
+ s->next_cache[next_cidx++].val = color[x + y*linesize];
+ }
+ continue;
+ }
+ }else{
+ zr += cos(pts * s->morphxf) * s->morphamp;
+ zi += sin(pts * s->morphyf) * s->morphamp;
+ }
+
+ use_zyklus= (x==0 || s->inner!=BLACK ||color[x-1 + y*linesize] == 0xFF000000);
+ if(use_zyklus)
+ epsilon= scale*(abs(x-s->w/2) + abs(y-s->h/2))/s->w;
+
+#define Z_Z2_C(outr,outi,inr,ini)\
+ outr= inr*inr - ini*ini + cr;\
+ outi= 2*inr*ini + ci;
+
+#define Z_Z2_C_ZYKLUS(outr,outi,inr,ini, Z)\
+ Z_Z2_C(outr,outi,inr,ini)\
+ if(use_zyklus){\
+ if(Z && fabs(s->zyklus[i>>1][0]-outr)+fabs(s->zyklus[i>>1][1]-outi) <= epsilon)\
+ break;\
+ }\
+ s->zyklus[i][0]= outr;\
+ s->zyklus[i][1]= outi;\
+
+
+
+ for(i=0; i<s->maxiter-8; i++){
+ double t;
+ Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0)
+ i++;
+ Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1)
+ i++;
+ Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0)
+ i++;
+ Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1)
+ i++;
+ Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0)
+ i++;
+ Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1)
+ i++;
+ Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0)
+ i++;
+ Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1)
+ if(zr*zr + zi*zi > s->bailout){
+ i-= FFMIN(7, i);
+ for(; i<s->maxiter; i++){
+ zr= s->zyklus[i][0];
+ zi= s->zyklus[i][1];
+ if(zr*zr + zi*zi > s->bailout){
+ switch(s->outer){
+ case ITERATION_COUNT:
+ zr = i;
+ c = lrintf((sinf(zr)+1)*127) + lrintf((sinf(zr/1.234)+1)*127)*256*256 + lrintf((sinf(zr/100)+1)*127)*256;
+ break;
+ case NORMALIZED_ITERATION_COUNT:
+ zr = i + log2(log(s->bailout) / log(zr*zr + zi*zi));
+ c = lrintf((sinf(zr)+1)*127) + lrintf((sinf(zr/1.234)+1)*127)*256*256 + lrintf((sinf(zr/100)+1)*127)*256;
+ break;
+ case WHITE:
+ c = 0xFFFFFF;
+ break;
+ case OUTZ:
+ zr /= s->bailout;
+ zi /= s->bailout;
+ c = (((int)(zr*128+128))&0xFF)*256 + (((int)(zi*128+128))&0xFF);
+ }
+ break;
+ }
+ }
+ break;
+ }
+ }
+ if(!c){
+ if(s->inner==PERIOD){
+ int j;
+ for(j=i-1; j; j--)
+ if(SQR(s->zyklus[j][0]-zr) + SQR(s->zyklus[j][1]-zi) < epsilon*epsilon*10)
+ break;
+ if(j){
+ c= i-j;
+ c= ((c<<5)&0xE0) + ((c<<10)&0xE000) + ((c<<15)&0xE00000);
+ }
+ }else if(s->inner==CONVTIME){
+ c= floor(i*255.0/s->maxiter+dv)*0x010101;
+ } else if(s->inner==MINCOL){
+ int j;
+ double closest=9999;
+ int closest_index=0;
+ for(j=i-1; j>=0; j--)
+ if(SQR(s->zyklus[j][0]) + SQR(s->zyklus[j][1]) < closest){
+ closest= SQR(s->zyklus[j][0]) + SQR(s->zyklus[j][1]);
+ closest_index= j;
+ }
+ closest = sqrt(closest);
+ c= lrintf((s->zyklus[closest_index][0]/closest+1)*127+dv) + lrintf((s->zyklus[closest_index][1]/closest+1)*127+dv)*256;
+ }
+ }
+ c |= 0xFF000000;
+ color[x + y*linesize]= c;
+ if(next_cidx < s->cache_allocated){
+ s->next_cache[next_cidx ].p[0]= cr;
+ s->next_cache[next_cidx ].p[1]= ci;
+ s->next_cache[next_cidx++].val = c;
+ }
+ }
+ fill_from_cache(ctx, NULL, &in_cidx, &next_cidx, ci + scale/2, scale);
+ }
+ FFSWAP(void*, s->next_cache, s->point_cache);
+ s->cache_used = next_cidx;
+ if(s->cache_used == s->cache_allocated)
+ av_log(ctx, AV_LOG_INFO, "Mandelbrot cache is too small!\n");
+}
+
+static int request_frame(AVFilterLink *link)
+{
+ MBContext *s = link->src->priv;
+ AVFrame *picref = ff_get_video_buffer(link, s->w, s->h);
+ if (!picref)
+ return AVERROR(ENOMEM);
+
+ picref->sample_aspect_ratio = (AVRational) {1, 1};
+ picref->pts = s->pts++;
+
+ draw_mandelbrot(link->src, (uint32_t*)picref->data[0], picref->linesize[0]/4, picref->pts);
+ return ff_filter_frame(link, picref);
+}
+
+static const AVFilterPad mandelbrot_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vsrc_mandelbrot = {
+ .name = "mandelbrot",
+ .description = NULL_IF_CONFIG_SMALL("Render a Mandelbrot fractal."),
+ .priv_size = sizeof(MBContext),
+ .priv_class = &mandelbrot_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = NULL,
+ .outputs = mandelbrot_outputs,
+};
diff --git a/libavfilter/vsrc_movie.c b/libavfilter/vsrc_movie.c
deleted file mode 100644
index 5989a59190..0000000000
--- a/libavfilter/vsrc_movie.c
+++ /dev/null
@@ -1,292 +0,0 @@
-/*
- * Copyright (c) 2010 Stefano Sabatini
- * Copyright (c) 2008 Victor Paesa
- *
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * Libav is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * movie video source
- *
- * @todo use direct rendering (no allocation of a new frame)
- * @todo support a PTS correction mechanism
- * @todo support more than one output stream
- */
-
-#include <float.h>
-#include <stdint.h>
-
-#include "libavutil/attributes.h"
-#include "libavutil/avstring.h"
-#include "libavutil/opt.h"
-#include "libavutil/imgutils.h"
-#include "libavformat/avformat.h"
-#include "avfilter.h"
-#include "formats.h"
-#include "internal.h"
-#include "video.h"
-
-typedef struct MovieContext {
- const AVClass *class;
- int64_t seek_point; ///< seekpoint in microseconds
- double seek_point_d;
- char *format_name;
- char *file_name;
- int stream_index;
-
- AVFormatContext *format_ctx;
- AVCodecContext *codec_ctx;
- int is_done;
- AVFrame *frame; ///< video frame to store the decoded images in
-
- int w, h;
-} MovieContext;
-
-#define OFFSET(x) offsetof(MovieContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-
-static const AVOption movie_options[]= {
- { "filename", NULL, OFFSET(file_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "format_name", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "f", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "stream_index", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
- { "si", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
- { "seek_point", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, (INT64_MAX-1) / 1000000, FLAGS },
- { "sp", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, (INT64_MAX-1) / 1000000, FLAGS },
- { NULL },
-};
-
-static const char *movie_get_name(void *ctx)
-{
- return "movie";
-}
-
-static const AVClass movie_class = {
- "MovieContext",
- movie_get_name,
- movie_options
-};
-
-static av_cold int movie_init(AVFilterContext *ctx)
-{
- MovieContext *movie = ctx->priv;
- AVInputFormat *iformat = NULL;
- AVStream *st;
- AVCodec *codec;
- int ret;
- int64_t timestamp;
-
- av_register_all();
-
- // Try to find the movie format (container)
- iformat = movie->format_name ? av_find_input_format(movie->format_name) : NULL;
-
- movie->format_ctx = NULL;
- if ((ret = avformat_open_input(&movie->format_ctx, movie->file_name, iformat, NULL)) < 0) {
- av_log(ctx, AV_LOG_ERROR,
- "Failed to avformat_open_input '%s'\n", movie->file_name);
- return ret;
- }
- if ((ret = avformat_find_stream_info(movie->format_ctx, NULL)) < 0)
- av_log(ctx, AV_LOG_WARNING, "Failed to find stream info\n");
-
- // if seeking requested, we execute it
- if (movie->seek_point > 0) {
- timestamp = movie->seek_point;
- // add the stream start time, should it exist
- if (movie->format_ctx->start_time != AV_NOPTS_VALUE) {
- if (timestamp > INT64_MAX - movie->format_ctx->start_time) {
- av_log(ctx, AV_LOG_ERROR,
- "%s: seek value overflow with start_time:%"PRId64" seek_point:%"PRId64"\n",
- movie->file_name, movie->format_ctx->start_time, movie->seek_point);
- return AVERROR(EINVAL);
- }
- timestamp += movie->format_ctx->start_time;
- }
- if ((ret = av_seek_frame(movie->format_ctx, -1, timestamp, AVSEEK_FLAG_BACKWARD)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "%s: could not seek to position %"PRId64"\n",
- movie->file_name, timestamp);
- return ret;
- }
- }
-
- /* select the video stream */
- if ((ret = av_find_best_stream(movie->format_ctx, AVMEDIA_TYPE_VIDEO,
- movie->stream_index, -1, NULL, 0)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "No video stream with index '%d' found\n",
- movie->stream_index);
- return ret;
- }
- movie->stream_index = ret;
- st = movie->format_ctx->streams[movie->stream_index];
-
- /*
- * So now we've got a pointer to the so-called codec context for our video
- * stream, but we still have to find the actual codec and open it.
- */
- codec = avcodec_find_decoder(st->codecpar->codec_id);
- if (!codec) {
- av_log(ctx, AV_LOG_ERROR, "Failed to find any codec\n");
- return AVERROR(EINVAL);
- }
-
- movie->codec_ctx = avcodec_alloc_context3(codec);
- if (!movie->codec_ctx)
- return AVERROR(ENOMEM);
-
- ret = avcodec_parameters_to_context(movie->codec_ctx, st->codecpar);
- if (ret < 0)
- return ret;
-
- movie->codec_ctx->refcounted_frames = 1;
-
- if ((ret = avcodec_open2(movie->codec_ctx, codec, NULL)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Failed to open codec\n");
- return ret;
- }
-
- movie->w = movie->codec_ctx->width;
- movie->h = movie->codec_ctx->height;
-
- av_log(ctx, AV_LOG_VERBOSE, "seek_point:%"PRIi64" format_name:%s file_name:%s stream_index:%d\n",
- movie->seek_point, movie->format_name, movie->file_name,
- movie->stream_index);
-
- return 0;
-}
-
-static av_cold int init(AVFilterContext *ctx)
-{
- MovieContext *movie = ctx->priv;
-
- movie->seek_point = movie->seek_point_d * 1000000 + 0.5;
-
- return movie_init(ctx);
-}
-
-static av_cold void uninit(AVFilterContext *ctx)
-{
- MovieContext *movie = ctx->priv;
-
- avcodec_free_context(&movie->codec_ctx);
- if (movie->format_ctx)
- avformat_close_input(&movie->format_ctx);
- av_frame_free(&movie->frame);
-}
-
-static int query_formats(AVFilterContext *ctx)
-{
- MovieContext *movie = ctx->priv;
- enum AVPixelFormat pix_fmts[] = { movie->codec_ctx->pix_fmt, AV_PIX_FMT_NONE };
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
-}
-
-static int config_output_props(AVFilterLink *outlink)
-{
- MovieContext *movie = outlink->src->priv;
-
- outlink->w = movie->w;
- outlink->h = movie->h;
- outlink->time_base = movie->format_ctx->streams[movie->stream_index]->time_base;
-
- return 0;
-}
-
-static int movie_get_frame(AVFilterLink *outlink)
-{
- MovieContext *movie = outlink->src->priv;
- AVPacket pkt;
- int ret, frame_decoded;
-
- if (movie->is_done == 1)
- return 0;
-
- movie->frame = av_frame_alloc();
- if (!movie->frame)
- return AVERROR(ENOMEM);
-
- while ((ret = av_read_frame(movie->format_ctx, &pkt)) >= 0) {
- // Is this a packet from the video stream?
- if (pkt.stream_index == movie->stream_index) {
- avcodec_decode_video2(movie->codec_ctx, movie->frame, &frame_decoded, &pkt);
-
- if (frame_decoded) {
- av_log(outlink->src, AV_LOG_TRACE,
- "movie_get_frame(): file:'%s' pts:%"PRId64" time:%f aspect:%d/%d\n",
- movie->file_name, movie->frame->pts,
- (double)movie->frame->pts *
- av_q2d(movie->format_ctx->streams[movie->stream_index]->time_base),
- movie->frame->sample_aspect_ratio.num,
- movie->frame->sample_aspect_ratio.den);
- // We got it. Free the packet since we are returning
- av_packet_unref(&pkt);
-
- return 0;
- }
- }
- // Free the packet that was allocated by av_read_frame
- av_packet_unref(&pkt);
- }
-
- // On multi-frame source we should stop the mixing process when
- // the movie source does not have more frames
- if (ret == AVERROR_EOF)
- movie->is_done = 1;
- return ret;
-}
-
-static int request_frame(AVFilterLink *outlink)
-{
- MovieContext *movie = outlink->src->priv;
- int ret;
-
- if (movie->is_done)
- return AVERROR_EOF;
- if ((ret = movie_get_frame(outlink)) < 0)
- return ret;
-
- ret = ff_filter_frame(outlink, movie->frame);
- movie->frame = NULL;
-
- return ret;
-}
-
-static const AVFilterPad avfilter_vsrc_movie_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .request_frame = request_frame,
- .config_props = config_output_props,
- },
- { NULL }
-};
-
-AVFilter ff_vsrc_movie = {
- .name = "movie",
- .description = NULL_IF_CONFIG_SMALL("Read from a movie source."),
- .priv_size = sizeof(MovieContext),
- .priv_class = &movie_class,
- .init = init,
- .uninit = uninit,
- .query_formats = query_formats,
-
- .inputs = NULL,
- .outputs = avfilter_vsrc_movie_outputs,
-};
diff --git a/libavfilter/vsrc_mptestsrc.c b/libavfilter/vsrc_mptestsrc.c
new file mode 100644
index 0000000000..c5fdea75dc
--- /dev/null
+++ b/libavfilter/vsrc_mptestsrc.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * MP test source, ported from MPlayer libmpcodecs/vf_test.c
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "formats.h"
+#include "video.h"
+
+#define WIDTH 512
+#define HEIGHT 512
+
+enum test_type {
+ TEST_DC_LUMA,
+ TEST_DC_CHROMA,
+ TEST_FREQ_LUMA,
+ TEST_FREQ_CHROMA,
+ TEST_AMP_LUMA,
+ TEST_AMP_CHROMA,
+ TEST_CBP,
+ TEST_MV,
+ TEST_RING1,
+ TEST_RING2,
+ TEST_ALL,
+ TEST_NB
+};
+
+typedef struct MPTestContext {
+ const AVClass *class;
+ AVRational frame_rate;
+ int64_t pts, max_pts, duration;
+ int hsub, vsub;
+ int test; ///< test_type
+} MPTestContext;
+
+#define OFFSET(x) offsetof(MPTestContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption mptestsrc_options[]= {
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
+ { "duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
+ { "d", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
+
+ { "test", "set test to perform", OFFSET(test), AV_OPT_TYPE_INT, {.i64=TEST_ALL}, 0, INT_MAX, FLAGS, "test" },
+ { "t", "set test to perform", OFFSET(test), AV_OPT_TYPE_INT, {.i64=TEST_ALL}, 0, INT_MAX, FLAGS, "test" },
+ { "dc_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_DC_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "dc_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_DC_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "freq_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_FREQ_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "freq_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_FREQ_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "amp_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_AMP_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "amp_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_AMP_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "cbp", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_CBP}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "mv", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_MV}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "ring1", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_RING1}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "ring2", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_RING2}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "all", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_ALL}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(mptestsrc);
+
+static double c[64];
+
+static void init_idct(void)
+{
+ int i, j;
+
+ for (i = 0; i < 8; i++) {
+ double s = i == 0 ? sqrt(0.125) : 0.5;
+
+ for (j = 0; j < 8; j++)
+ c[i*8+j] = s*cos((M_PI/8.0)*i*(j+0.5));
+ }
+}
+
+static void idct(uint8_t *dst, int dst_linesize, int src[64])
+{
+ int i, j, k;
+ double tmp[64];
+
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < 8; j++) {
+ double sum = 0.0;
+
+ for (k = 0; k < 8; k++)
+ sum += c[k*8+j] * src[8*i+k];
+
+ tmp[8*i+j] = sum;
+ }
+ }
+
+ for (j = 0; j < 8; j++) {
+ for (i = 0; i < 8; i++) {
+ double sum = 0.0;
+
+ for (k = 0; k < 8; k++)
+ sum += c[k*8+i]*tmp[8*k+j];
+
+ dst[dst_linesize*i + j] = av_clip_uint8(lrint(sum));
+ }
+ }
+}
+
+static void draw_dc(uint8_t *dst, int dst_linesize, int color, int w, int h)
+{
+ int x, y;
+
+ for (y = 0; y < h; y++)
+ for (x = 0; x < w; x++)
+ dst[x + y*dst_linesize] = color;
+}
+
+static void draw_basis(uint8_t *dst, int dst_linesize, int amp, int freq, int dc)
+{
+ int src[64];
+
+ memset(src, 0, 64*sizeof(int));
+ src[0] = dc;
+ if (amp)
+ src[freq] = amp;
+ idct(dst, dst_linesize, src);
+}
+
+static void draw_cbp(uint8_t *dst[3], int dst_linesize[3], int cbp, int amp, int dc)
+{
+ if (cbp&1) draw_basis(dst[0] , dst_linesize[0], amp, 1, dc);
+ if (cbp&2) draw_basis(dst[0]+8 , dst_linesize[0], amp, 1, dc);
+ if (cbp&4) draw_basis(dst[0]+ 8*dst_linesize[0], dst_linesize[0], amp, 1, dc);
+ if (cbp&8) draw_basis(dst[0]+8+8*dst_linesize[0], dst_linesize[0], amp, 1, dc);
+ if (cbp&16) draw_basis(dst[1] , dst_linesize[1], amp, 1, dc);
+ if (cbp&32) draw_basis(dst[2] , dst_linesize[2], amp, 1, dc);
+}
+
+static void dc_test(uint8_t *dst, int dst_linesize, int w, int h, int off)
+{
+ const int step = FFMAX(256/(w*h/256), 1);
+ int x, y, color = off;
+
+ for (y = 0; y < h; y += 16) {
+ for (x = 0; x < w; x += 16) {
+ draw_dc(dst + x + y*dst_linesize, dst_linesize, color, 8, 8);
+ color += step;
+ }
+ }
+}
+
+static void freq_test(uint8_t *dst, int dst_linesize, int off)
+{
+ int x, y, freq = 0;
+
+ for (y = 0; y < 8*16; y += 16) {
+ for (x = 0; x < 8*16; x += 16) {
+ draw_basis(dst + x + y*dst_linesize, dst_linesize, 4*(96+off), freq, 128*8);
+ freq++;
+ }
+ }
+}
+
+static void amp_test(uint8_t *dst, int dst_linesize, int off)
+{
+ int x, y, amp = off;
+
+ for (y = 0; y < 16*16; y += 16) {
+ for (x = 0; x < 16*16; x += 16) {
+ draw_basis(dst + x + y*dst_linesize, dst_linesize, 4*amp, 1, 128*8);
+ amp++;
+ }
+ }
+}
+
+static void cbp_test(uint8_t *dst[3], int dst_linesize[3], int off)
+{
+ int x, y, cbp = 0;
+
+ for (y = 0; y < 16*8; y += 16) {
+ for (x = 0; x < 16*8; x += 16) {
+ uint8_t *dst1[3];
+ dst1[0] = dst[0] + x*2 + y*2*dst_linesize[0];
+ dst1[1] = dst[1] + x + y* dst_linesize[1];
+ dst1[2] = dst[2] + x + y* dst_linesize[2];
+
+ draw_cbp(dst1, dst_linesize, cbp, (64+off)*4, 128*8);
+ cbp++;
+ }
+ }
+}
+
+static void mv_test(uint8_t *dst, int dst_linesize, int off)
+{
+ int x, y;
+
+ for (y = 0; y < 16*16; y++) {
+ if (y&16)
+ continue;
+ for (x = 0; x < 16*16; x++)
+ dst[x + y*dst_linesize] = x + off*8/(y/32+1);
+ }
+}
+
+static void ring1_test(uint8_t *dst, int dst_linesize, int off)
+{
+ int x, y, color = 0;
+
+ for (y = off; y < 16*16; y += 16) {
+ for (x = off; x < 16*16; x += 16) {
+ draw_dc(dst + x + y*dst_linesize, dst_linesize, ((x+y)&16) ? color : -color, 16, 16);
+ color++;
+ }
+ }
+}
+
+static void ring2_test(uint8_t *dst, int dst_linesize, int off)
+{
+ int x, y;
+
+ for (y = 0; y < 16*16; y++) {
+ for (x = 0; x < 16*16; x++) {
+ double d = hypot(x-8*16, y-8*16);
+ double r = d/20 - (int)(d/20);
+ if (r < off/30.0) {
+ dst[x + y*dst_linesize] = 255;
+ dst[x + y*dst_linesize+256] = 0;
+ } else {
+ dst[x + y*dst_linesize] = x;
+ dst[x + y*dst_linesize+256] = x;
+ }
+ }
+ }
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ MPTestContext *test = ctx->priv;
+
+ test->max_pts = test->duration >= 0 ?
+ av_rescale_q(test->duration, AV_TIME_BASE_Q, av_inv_q(test->frame_rate)) : -1;
+ test->pts = 0;
+
+ av_log(ctx, AV_LOG_VERBOSE, "rate:%d/%d duration:%f\n",
+ test->frame_rate.num, test->frame_rate.den,
+ test->duration < 0 ? -1 : test->max_pts * av_q2d(av_inv_q(test->frame_rate)));
+ init_idct();
+
+ return 0;
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ MPTestContext *test = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(outlink->format);
+
+ test->hsub = pix_desc->log2_chroma_w;
+ test->vsub = pix_desc->log2_chroma_h;
+
+ outlink->w = WIDTH;
+ outlink->h = HEIGHT;
+ outlink->time_base = av_inv_q(test->frame_rate);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ MPTestContext *test = outlink->src->priv;
+ AVFrame *picref;
+ int w = WIDTH, h = HEIGHT,
+ cw = AV_CEIL_RSHIFT(w, test->hsub), ch = AV_CEIL_RSHIFT(h, test->vsub);
+ unsigned int frame = outlink->frame_count_in;
+ enum test_type tt = test->test;
+ int i;
+
+ if (test->max_pts >= 0 && test->pts > test->max_pts)
+ return AVERROR_EOF;
+ picref = ff_get_video_buffer(outlink, w, h);
+ if (!picref)
+ return AVERROR(ENOMEM);
+ picref->pts = test->pts++;
+
+ // clean image
+ for (i = 0; i < h; i++)
+ memset(picref->data[0] + i*picref->linesize[0], 0, w);
+ for (i = 0; i < ch; i++) {
+ memset(picref->data[1] + i*picref->linesize[1], 128, cw);
+ memset(picref->data[2] + i*picref->linesize[2], 128, cw);
+ }
+
+ if (tt == TEST_ALL && frame%30) /* draw a black frame at the beginning of each test */
+ tt = (frame/30)%(TEST_NB-1);
+
+ switch (tt) {
+ case TEST_DC_LUMA: dc_test(picref->data[0], picref->linesize[0], 256, 256, frame%30); break;
+ case TEST_DC_CHROMA: dc_test(picref->data[1], picref->linesize[1], 256, 256, frame%30); break;
+ case TEST_FREQ_LUMA: freq_test(picref->data[0], picref->linesize[0], frame%30); break;
+ case TEST_FREQ_CHROMA: freq_test(picref->data[1], picref->linesize[1], frame%30); break;
+ case TEST_AMP_LUMA: amp_test(picref->data[0], picref->linesize[0], frame%30); break;
+ case TEST_AMP_CHROMA: amp_test(picref->data[1], picref->linesize[1], frame%30); break;
+ case TEST_CBP: cbp_test(picref->data , picref->linesize , frame%30); break;
+ case TEST_MV: mv_test(picref->data[0], picref->linesize[0], frame%30); break;
+ case TEST_RING1: ring1_test(picref->data[0], picref->linesize[0], frame%30); break;
+ case TEST_RING2: ring2_test(picref->data[0], picref->linesize[0], frame%30); break;
+ }
+
+ return ff_filter_frame(outlink, picref);
+}
+
+static const AVFilterPad mptestsrc_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vsrc_mptestsrc = {
+ .name = "mptestsrc",
+ .description = NULL_IF_CONFIG_SMALL("Generate various test pattern."),
+ .priv_size = sizeof(MPTestContext),
+ .priv_class = &mptestsrc_class,
+ .init = init,
+ .query_formats = query_formats,
+ .inputs = NULL,
+ .outputs = mptestsrc_outputs,
+};
diff --git a/libavfilter/vsrc_nullsrc.c b/libavfilter/vsrc_nullsrc.c
deleted file mode 100644
index 63e90fdd1a..0000000000
--- a/libavfilter/vsrc_nullsrc.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * Libav is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * null video source
- */
-
-#include <stdio.h>
-
-#include "libavutil/avstring.h"
-#include "libavutil/eval.h"
-#include "libavutil/internal.h"
-#include "libavutil/mathematics.h"
-#include "libavutil/opt.h"
-#include "libavutil/parseutils.h"
-#include "avfilter.h"
-#include "formats.h"
-#include "internal.h"
-
-static const char *const var_names[] = {
- "E",
- "PHI",
- "PI",
- "AVTB", /* default timebase 1/AV_TIME_BASE */
- NULL
-};
-
-enum var_name {
- VAR_E,
- VAR_PHI,
- VAR_PI,
- VAR_AVTB,
- VAR_VARS_NB
-};
-
-typedef struct NullContext {
- const AVClass *class;
- int w, h;
- char *tb_expr;
- double var_values[VAR_VARS_NB];
-} NullContext;
-
-static int config_props(AVFilterLink *outlink)
-{
- AVFilterContext *ctx = outlink->src;
- NullContext *priv = ctx->priv;
- AVRational tb;
- int ret;
- double res;
-
- priv->var_values[VAR_E] = M_E;
- priv->var_values[VAR_PHI] = M_PHI;
- priv->var_values[VAR_PI] = M_PI;
- priv->var_values[VAR_AVTB] = av_q2d(AV_TIME_BASE_Q);
-
- if ((ret = av_expr_parse_and_eval(&res, priv->tb_expr, var_names, priv->var_values,
- NULL, NULL, NULL, NULL, NULL, 0, NULL)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid expression '%s' for timebase.\n", priv->tb_expr);
- return ret;
- }
- tb = av_d2q(res, INT_MAX);
- if (tb.num <= 0 || tb.den <= 0) {
- av_log(ctx, AV_LOG_ERROR,
- "Invalid non-positive value for the timebase %d/%d.\n",
- tb.num, tb.den);
- return AVERROR(EINVAL);
- }
-
- outlink->w = priv->w;
- outlink->h = priv->h;
- outlink->time_base = tb;
-
- av_log(outlink->src, AV_LOG_VERBOSE, "w:%d h:%d tb:%d/%d\n", priv->w, priv->h,
- tb.num, tb.den);
-
- return 0;
-}
-
-static int request_frame(AVFilterLink *link)
-{
- return -1;
-}
-
-#define OFFSET(x) offsetof(NullContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "width", NULL, OFFSET(w), AV_OPT_TYPE_INT, { .i64 = 352 }, 1, INT_MAX, FLAGS },
- { "height", NULL, OFFSET(h), AV_OPT_TYPE_INT, { .i64 = 288 }, 1, INT_MAX, FLAGS },
- { "timebase", NULL, OFFSET(tb_expr), AV_OPT_TYPE_STRING, { .str = "AVTB" }, 0, 0, FLAGS },
- { NULL },
-};
-
-static const AVClass nullsrc_class = {
- .class_name = "nullsrc",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-static const AVFilterPad avfilter_vsrc_nullsrc_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_props,
- .request_frame = request_frame,
- },
- { NULL }
-};
-
-AVFilter ff_vsrc_nullsrc = {
- .name = "nullsrc",
- .description = NULL_IF_CONFIG_SMALL("Null video source, never return images."),
-
- .priv_size = sizeof(NullContext),
- .priv_class = &nullsrc_class,
-
- .inputs = NULL,
-
- .outputs = avfilter_vsrc_nullsrc_outputs,
-};
diff --git a/libavfilter/vsrc_testsrc.c b/libavfilter/vsrc_testsrc.c
index 5bd458c997..422f6d8c54 100644
--- a/libavfilter/vsrc_testsrc.c
+++ b/libavfilter/vsrc_testsrc.c
@@ -1,21 +1,22 @@
/*
* Copyright (c) 2007 Nicolas George <nicolas.george@normalesup.org>
* Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2012 Paul B Mahol
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -28,84 +29,98 @@
*
* rgbtestsrc is ported from MPlayer libmpcodecs/vf_rgbtest.c by
* Michael Niedermayer.
+ *
+ * allyuv, smptebars and smptehdbars are by Paul B Mahol.
*/
#include <float.h>
+#include "libavutil/avassert.h"
#include "libavutil/common.h"
-#include "libavutil/mathematics.h"
+#include "libavutil/ffmath.h"
#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/parseutils.h"
+#include "libavutil/xga_font_data.h"
#include "avfilter.h"
+#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
typedef struct TestSourceContext {
const AVClass *class;
- int h, w;
+ int w, h;
unsigned int nb_frame;
AVRational time_base, frame_rate;
- int64_t pts, max_pts;
- char *size; ///< video frame size
- char *rate; ///< video frame rate
- char *duration; ///< total duration of the generated video
+ int64_t pts;
+ int64_t duration; ///< duration expressed in microseconds
AVRational sar; ///< sample aspect ratio
+ int draw_once; ///< draw only the first frame, always put out the same picture
+ int draw_once_reset; ///< draw only the first frame or in case of reset
+ AVFrame *picref; ///< cached reference containing the painted picture
void (* fill_picture_fn)(AVFilterContext *ctx, AVFrame *frame);
+ /* only used by testsrc */
+ int nb_decimals;
+
+ /* only used by color */
+ FFDrawContext draw;
+ FFDrawColor color;
+ uint8_t color_rgba[4];
+
/* only used by rgbtest */
- int rgba_map[4];
+ uint8_t rgba_map[4];
+
+ /* only used by haldclut */
+ int level;
} TestSourceContext;
#define OFFSET(x) offsetof(TestSourceContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
-static const AVOption testsrc_options[] = {
- { "size", "set video size", OFFSET(size), AV_OPT_TYPE_STRING, {.str = "320x240"}, .flags = FLAGS },
- { "s", "set video size", OFFSET(size), AV_OPT_TYPE_STRING, {.str = "320x240"}, .flags = FLAGS },
- { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, .flags = FLAGS },
- { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, .flags = FLAGS },
- { "duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS },
- { "sar", "set video sample aspect ratio", OFFSET(sar), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, INT_MAX, FLAGS },
- { NULL },
-};
+#define SIZE_OPTIONS \
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS },\
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS },\
-static av_cold int init_common(AVFilterContext *ctx)
-{
- TestSourceContext *test = ctx->priv;
- int64_t duration = -1;
- int ret = 0;
+#define COMMON_OPTIONS_NOSIZE \
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },\
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },\
+ { "duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },\
+ { "d", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },\
+ { "sar", "set video sample aspect ratio", OFFSET(sar), AV_OPT_TYPE_RATIONAL, {.dbl= 1}, 0, INT_MAX, FLAGS },
- if ((ret = av_parse_video_size(&test->w, &test->h, test->size)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame size: '%s'\n", test->size);
- return ret;
- }
+#define COMMON_OPTIONS SIZE_OPTIONS COMMON_OPTIONS_NOSIZE
- if ((ret = av_parse_video_rate(&test->frame_rate, test->rate)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: '%s'\n", test->rate);
- return ret;
- }
+static const AVOption options[] = {
+ COMMON_OPTIONS
+ { NULL }
+};
- if ((test->duration) && (ret = av_parse_time(&duration, test->duration, 1)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid duration: '%s'\n", test->duration);
- return ret;
- }
+static av_cold int init(AVFilterContext *ctx)
+{
+ TestSourceContext *test = ctx->priv;
test->time_base = av_inv_q(test->frame_rate);
- test->max_pts = duration >= 0 ?
- av_rescale_q(duration, AV_TIME_BASE_Q, test->time_base) : -1;
test->nb_frame = 0;
test->pts = 0;
- av_log(ctx, AV_LOG_DEBUG, "size:%dx%d rate:%d/%d duration:%f sar:%d/%d\n",
+ av_log(ctx, AV_LOG_VERBOSE, "size:%dx%d rate:%d/%d duration:%f sar:%d/%d\n",
test->w, test->h, test->frame_rate.num, test->frame_rate.den,
- duration < 0 ? -1 : test->max_pts * av_q2d(test->time_base),
+ test->duration < 0 ? -1 : (double)test->duration/1000000,
test->sar.num, test->sar.den);
return 0;
}
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ TestSourceContext *test = ctx->priv;
+
+ av_frame_free(&test->picref);
+}
+
static int config_props(AVFilterLink *outlink)
{
TestSourceContext *test = outlink->src->priv;
@@ -124,36 +139,322 @@ static int request_frame(AVFilterLink *outlink)
TestSourceContext *test = outlink->src->priv;
AVFrame *frame;
- if (test->max_pts >= 0 && test->pts > test->max_pts)
+ if (test->duration >= 0 &&
+ av_rescale_q(test->pts, test->time_base, AV_TIME_BASE_Q) >= test->duration)
return AVERROR_EOF;
- frame = ff_get_video_buffer(outlink, test->w, test->h);
+
+ if (test->draw_once) {
+ if (test->draw_once_reset) {
+ av_frame_free(&test->picref);
+ test->draw_once_reset = 0;
+ }
+ if (!test->picref) {
+ test->picref =
+ ff_get_video_buffer(outlink, test->w, test->h);
+ if (!test->picref)
+ return AVERROR(ENOMEM);
+ test->fill_picture_fn(outlink->src, test->picref);
+ }
+ frame = av_frame_clone(test->picref);
+ } else
+ frame = ff_get_video_buffer(outlink, test->w, test->h);
+
if (!frame)
return AVERROR(ENOMEM);
-
- frame->pts = test->pts++;
+ frame->pts = test->pts;
frame->key_frame = 1;
frame->interlaced_frame = 0;
frame->pict_type = AV_PICTURE_TYPE_I;
frame->sample_aspect_ratio = test->sar;
+ if (!test->draw_once)
+ test->fill_picture_fn(outlink->src, frame);
+
+ test->pts++;
test->nb_frame++;
- test->fill_picture_fn(outlink->src, frame);
return ff_filter_frame(outlink, frame);
}
-#if CONFIG_TESTSRC_FILTER
+#if CONFIG_COLOR_FILTER
+
+static const AVOption color_options[] = {
+ { "color", "set color", OFFSET(color_rgba), AV_OPT_TYPE_COLOR, {.str = "black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "c", "set color", OFFSET(color_rgba), AV_OPT_TYPE_COLOR, {.str = "black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ COMMON_OPTIONS
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(color);
+
+static void color_fill_picture(AVFilterContext *ctx, AVFrame *picref)
+{
+ TestSourceContext *test = ctx->priv;
+ ff_fill_rectangle(&test->draw, &test->color,
+ picref->data, picref->linesize,
+ 0, 0, test->w, test->h);
+}
+
+static av_cold int color_init(AVFilterContext *ctx)
+{
+ TestSourceContext *test = ctx->priv;
+ test->fill_picture_fn = color_fill_picture;
+ test->draw_once = 1;
+ return init(ctx);
+}
+
+static int color_query_formats(AVFilterContext *ctx)
+{
+ return ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
+}
-static const char *testsrc_get_name(void *ctx)
+static int color_config_props(AVFilterLink *inlink)
{
- return "testsrc";
+ AVFilterContext *ctx = inlink->src;
+ TestSourceContext *test = ctx->priv;
+ int ret;
+
+ ff_draw_init(&test->draw, inlink->format, 0);
+ ff_draw_color(&test->draw, &test->color, test->color_rgba);
+
+ test->w = ff_draw_round_to_sub(&test->draw, 0, -1, test->w);
+ test->h = ff_draw_round_to_sub(&test->draw, 1, -1, test->h);
+ if (av_image_check_size(test->w, test->h, 0, ctx) < 0)
+ return AVERROR(EINVAL);
+
+ if ((ret = config_props(inlink)) < 0)
+ return ret;
+
+ return 0;
}
-static const AVClass testsrc_class = {
- .class_name = "TestSourceContext",
- .item_name = testsrc_get_name,
- .option = testsrc_options,
+static int color_process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ TestSourceContext *test = ctx->priv;
+ int ret;
+
+ if (!strcmp(cmd, "color") || !strcmp(cmd, "c")) {
+ uint8_t color_rgba[4];
+
+ ret = av_parse_color(color_rgba, args, -1, ctx);
+ if (ret < 0)
+ return ret;
+
+ memcpy(test->color_rgba, color_rgba, sizeof(color_rgba));
+ ff_draw_color(&test->draw, &test->color, test->color_rgba);
+ test->draw_once_reset = 1;
+ return 0;
+ }
+
+ return AVERROR(ENOSYS);
+}
+
+static const AVFilterPad color_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = color_config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vsrc_color = {
+ .name = "color",
+ .description = NULL_IF_CONFIG_SMALL("Provide an uniformly colored input."),
+ .priv_class = &color_class,
+ .priv_size = sizeof(TestSourceContext),
+ .init = color_init,
+ .uninit = uninit,
+ .query_formats = color_query_formats,
+ .inputs = NULL,
+ .outputs = color_outputs,
+ .process_command = color_process_command,
+};
+
+#endif /* CONFIG_COLOR_FILTER */
+
+#if CONFIG_HALDCLUTSRC_FILTER
+
+static const AVOption haldclutsrc_options[] = {
+ { "level", "set level", OFFSET(level), AV_OPT_TYPE_INT, {.i64 = 6}, 2, 8, FLAGS },
+ COMMON_OPTIONS_NOSIZE
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(haldclutsrc);
+
+static void haldclutsrc_fill_picture(AVFilterContext *ctx, AVFrame *frame)
+{
+ int i, j, k, x = 0, y = 0, is16bit = 0, step;
+ uint32_t alpha = 0;
+ const TestSourceContext *hc = ctx->priv;
+ int level = hc->level;
+ float scale;
+ const int w = frame->width;
+ const int h = frame->height;
+ const uint8_t *data = frame->data[0];
+ const int linesize = frame->linesize[0];
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
+ uint8_t rgba_map[4];
+
+ av_assert0(w == h && w == level*level*level);
+
+ ff_fill_rgba_map(rgba_map, frame->format);
+
+ switch (frame->format) {
+ case AV_PIX_FMT_RGB48:
+ case AV_PIX_FMT_BGR48:
+ case AV_PIX_FMT_RGBA64:
+ case AV_PIX_FMT_BGRA64:
+ is16bit = 1;
+ alpha = 0xffff;
+ break;
+ case AV_PIX_FMT_RGBA:
+ case AV_PIX_FMT_BGRA:
+ case AV_PIX_FMT_ARGB:
+ case AV_PIX_FMT_ABGR:
+ alpha = 0xff;
+ break;
+ }
+
+ step = av_get_padded_bits_per_pixel(desc) >> (3 + is16bit);
+ scale = ((float)(1 << (8*(is16bit+1))) - 1) / (level*level - 1);
+
+#define LOAD_CLUT(nbits) do { \
+ uint##nbits##_t *dst = ((uint##nbits##_t *)(data + y*linesize)) + x*step; \
+ dst[rgba_map[0]] = av_clip_uint##nbits(i * scale); \
+ dst[rgba_map[1]] = av_clip_uint##nbits(j * scale); \
+ dst[rgba_map[2]] = av_clip_uint##nbits(k * scale); \
+ if (step == 4) \
+ dst[rgba_map[3]] = alpha; \
+} while (0)
+
+ level *= level;
+ for (k = 0; k < level; k++) {
+ for (j = 0; j < level; j++) {
+ for (i = 0; i < level; i++) {
+ if (!is16bit)
+ LOAD_CLUT(8);
+ else
+ LOAD_CLUT(16);
+ if (++x == w) {
+ x = 0;
+ y++;
+ }
+ }
+ }
+ }
+}
+
+static av_cold int haldclutsrc_init(AVFilterContext *ctx)
+{
+ TestSourceContext *hc = ctx->priv;
+ hc->fill_picture_fn = haldclutsrc_fill_picture;
+ hc->draw_once = 1;
+ return init(ctx);
+}
+
+static int haldclutsrc_query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_RGB48, AV_PIX_FMT_BGR48,
+ AV_PIX_FMT_RGBA64, AV_PIX_FMT_BGRA64,
+ AV_PIX_FMT_NONE,
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int haldclutsrc_config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ TestSourceContext *hc = ctx->priv;
+
+ hc->w = hc->h = hc->level * hc->level * hc->level;
+ return config_props(outlink);
+}
+
+static const AVFilterPad haldclutsrc_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = haldclutsrc_config_props,
+ },
+ { NULL }
};
+AVFilter ff_vsrc_haldclutsrc = {
+ .name = "haldclutsrc",
+ .description = NULL_IF_CONFIG_SMALL("Provide an identity Hald CLUT."),
+ .priv_class = &haldclutsrc_class,
+ .priv_size = sizeof(TestSourceContext),
+ .init = haldclutsrc_init,
+ .uninit = uninit,
+ .query_formats = haldclutsrc_query_formats,
+ .inputs = NULL,
+ .outputs = haldclutsrc_outputs,
+};
+#endif /* CONFIG_HALDCLUTSRC_FILTER */
+
+#if CONFIG_NULLSRC_FILTER
+
+#define nullsrc_options options
+AVFILTER_DEFINE_CLASS(nullsrc);
+
+static void nullsrc_fill_picture(AVFilterContext *ctx, AVFrame *picref) { }
+
+static av_cold int nullsrc_init(AVFilterContext *ctx)
+{
+ TestSourceContext *test = ctx->priv;
+
+ test->fill_picture_fn = nullsrc_fill_picture;
+ return init(ctx);
+}
+
+static const AVFilterPad nullsrc_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL },
+};
+
+AVFilter ff_vsrc_nullsrc = {
+ .name = "nullsrc",
+ .description = NULL_IF_CONFIG_SMALL("Null video source, return unprocessed video frames."),
+ .init = nullsrc_init,
+ .uninit = uninit,
+ .priv_size = sizeof(TestSourceContext),
+ .priv_class = &nullsrc_class,
+ .inputs = NULL,
+ .outputs = nullsrc_outputs,
+};
+
+#endif /* CONFIG_NULLSRC_FILTER */
+
+#if CONFIG_TESTSRC_FILTER
+
+static const AVOption testsrc_options[] = {
+ COMMON_OPTIONS
+ { "decimals", "set number of decimals to show", OFFSET(nb_decimals), AV_OPT_TYPE_INT, {.i64=0}, 0, 17, FLAGS },
+ { "n", "set number of decimals to show", OFFSET(nb_decimals), AV_OPT_TYPE_INT, {.i64=0}, 0, 17, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(testsrc);
+
/**
* Fill a rectangle with value val.
*
@@ -166,8 +467,8 @@ static const AVClass testsrc_class = {
* @param w width of the rectangle to draw, expressed as a number of segment_width units
* @param h height of the rectangle to draw, expressed as a number of segment_width units
*/
-static void draw_rectangle(unsigned val, uint8_t *dst, int dst_linesize, unsigned segment_width,
- unsigned x, unsigned y, unsigned w, unsigned h)
+static void draw_rectangle(unsigned val, uint8_t *dst, int dst_linesize, int segment_width,
+ int x, int y, int w, int h)
{
int i;
int step = 3;
@@ -181,8 +482,8 @@ static void draw_rectangle(unsigned val, uint8_t *dst, int dst_linesize, unsigne
}
}
-static void draw_digit(int digit, uint8_t *dst, unsigned dst_linesize,
- unsigned segment_width)
+static void draw_digit(int digit, uint8_t *dst, int dst_linesize,
+ int segment_width)
{
#define TOP_HBAR 1
#define MID_HBAR 2
@@ -276,7 +577,7 @@ static void test_fill_picture(AVFilterContext *ctx, AVFrame *frame)
}
/* draw sliding color line */
- p = data + frame->linesize[0] * height * 3/4;
+ p0 = p = data + frame->linesize[0] * (height * 3/4);
grad = (256 * test->nb_frame * test->time_base.num / test->time_base.den) %
GRADIENT_SIZE;
rgrad = 0;
@@ -304,15 +605,25 @@ static void test_fill_picture(AVFilterContext *ctx, AVFrame *frame)
if (grad >= GRADIENT_SIZE)
grad -= GRADIENT_SIZE;
}
+ p = p0;
for (y = height / 8; y > 0; y--) {
- memcpy(p, p - frame->linesize[0], 3 * width);
+ memcpy(p+frame->linesize[0], p, 3 * width);
p += frame->linesize[0];
}
/* draw digits */
seg_size = width / 80;
if (seg_size >= 1 && height >= 13 * seg_size) {
- second = test->nb_frame * test->time_base.num / test->time_base.den;
+ int64_t p10decimals = 1;
+ double time = av_q2d(test->time_base) * test->nb_frame *
+ ff_exp10(test->nb_decimals);
+ if (time >= INT_MAX)
+ return;
+
+ for (x = 0; x < test->nb_decimals; x++)
+ p10decimals *= 10;
+
+ second = av_rescale_rnd(test->nb_frame * test->time_base.num, p10decimals, test->time_base.den, AV_ROUND_ZERO);
x = width - (width - seg_size * 64) / 2;
y = (height - seg_size * 13) / 2;
p = data + (x*3 + y * frame->linesize[0]);
@@ -331,7 +642,7 @@ static av_cold int test_init(AVFilterContext *ctx)
TestSourceContext *test = ctx->priv;
test->fill_picture_fn = test_fill_picture;
- return init_common(ctx);
+ return init(ctx);
}
static int test_query_formats(AVFilterContext *ctx)
@@ -339,8 +650,11 @@ static int test_query_formats(AVFilterContext *ctx)
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE
};
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
}
static const AVFilterPad avfilter_vsrc_testsrc_outputs[] = {
@@ -359,29 +673,289 @@ AVFilter ff_vsrc_testsrc = {
.priv_size = sizeof(TestSourceContext),
.priv_class = &testsrc_class,
.init = test_init,
-
+ .uninit = uninit,
.query_formats = test_query_formats,
+ .inputs = NULL,
+ .outputs = avfilter_vsrc_testsrc_outputs,
+};
+
+#endif /* CONFIG_TESTSRC_FILTER */
- .inputs = NULL,
+#if CONFIG_TESTSRC2_FILTER
- .outputs = avfilter_vsrc_testsrc_outputs,
+static const AVOption testsrc2_options[] = {
+ COMMON_OPTIONS
+ { NULL }
};
-#endif /* CONFIG_TESTSRC_FILTER */
+AVFILTER_DEFINE_CLASS(testsrc2);
-#if CONFIG_RGBTESTSRC_FILTER
+static void set_color(TestSourceContext *s, FFDrawColor *color, uint32_t argb)
+{
+ uint8_t rgba[4] = { (argb >> 16) & 0xFF,
+ (argb >> 8) & 0xFF,
+ (argb >> 0) & 0xFF,
+ (argb >> 24) & 0xFF, };
+ ff_draw_color(&s->draw, color, rgba);
+}
+
+static uint32_t color_gradient(unsigned index)
+{
+ unsigned si = index & 0xFF, sd = 0xFF - si;
+ switch (index >> 8) {
+ case 0: return 0xFF0000 + (si << 8);
+ case 1: return 0x00FF00 + (sd << 16);
+ case 2: return 0x00FF00 + (si << 0);
+ case 3: return 0x0000FF + (sd << 8);
+ case 4: return 0x0000FF + (si << 16);
+ case 5: return 0xFF0000 + (sd << 0);
+ }
+ av_assert0(0);
+}
+
+static void draw_text(TestSourceContext *s, AVFrame *frame, FFDrawColor *color,
+ int x0, int y0, const uint8_t *text)
+{
+ int x = x0;
+
+ for (; *text; text++) {
+ if (*text == '\n') {
+ x = x0;
+ y0 += 16;
+ continue;
+ }
+ ff_blend_mask(&s->draw, color, frame->data, frame->linesize,
+ frame->width, frame->height,
+ avpriv_vga16_font + *text * 16, 1, 8, 16, 0, 0, x, y0);
+ x += 8;
+ }
+}
+
+static void test2_fill_picture(AVFilterContext *ctx, AVFrame *frame)
+{
+ TestSourceContext *s = ctx->priv;
+ FFDrawColor color;
+
+ /* colored background */
+ {
+ unsigned i, x = 0, x2;
+
+ x = 0;
+ for (i = 1; i < 7; i++) {
+ x2 = av_rescale(i, s->w, 6);
+ x2 = ff_draw_round_to_sub(&s->draw, 0, 0, x2);
+ set_color(s, &color, ((i & 1) ? 0xFF0000 : 0) |
+ ((i & 2) ? 0x00FF00 : 0) |
+ ((i & 4) ? 0x0000FF : 0));
+ ff_fill_rectangle(&s->draw, &color, frame->data, frame->linesize,
+ x, 0, x2 - x, frame->height);
+ x = x2;
+ }
+ }
+
+ /* oblique gradient */
+ /* note: too slow if using blending */
+ if (s->h >= 64) {
+ unsigned x, dx, y0, y, g0, g;
+
+ dx = ff_draw_round_to_sub(&s->draw, 0, +1, 1);
+ y0 = av_rescale_q(s->pts, s->time_base, av_make_q(2, s->h - 16));
+ g0 = av_rescale_q(s->pts, s->time_base, av_make_q(1, 128));
+ for (x = 0; x < s->w; x += dx) {
+ g = (av_rescale(x, 6 * 256, s->w) + g0) % (6 * 256);
+ set_color(s, &color, color_gradient(g));
+ y = y0 + av_rescale(x, s->h / 2, s->w);
+ y %= 2 * (s->h - 16);
+ if (y > s->h - 16)
+ y = 2 * (s->h - 16) - y;
+ y = ff_draw_round_to_sub(&s->draw, 1, 0, y);
+ ff_fill_rectangle(&s->draw, &color, frame->data, frame->linesize,
+ x, y, dx, 16);
+ }
+ }
+
+ /* top right: draw clock hands */
+ if (s->w >= 64 && s->h >= 64) {
+ int l = (FFMIN(s->w, s->h) - 32) >> 1;
+ int steps = FFMAX(4, l >> 5);
+ int xc = (s->w >> 2) + (s->w >> 1);
+ int yc = (s->h >> 2);
+ int cycle = l << 2;
+ int pos, xh, yh;
+ int c, i;
+
+ for (c = 0; c < 3; c++) {
+ set_color(s, &color, 0xBBBBBB ^ (0xFF << (c << 3)));
+ pos = av_rescale_q(s->pts, s->time_base, av_make_q(64 >> (c << 1), cycle)) % cycle;
+ xh = pos < 1 * l ? pos :
+ pos < 2 * l ? l :
+ pos < 3 * l ? 3 * l - pos : 0;
+ yh = pos < 1 * l ? 0 :
+ pos < 2 * l ? pos - l :
+ pos < 3 * l ? l :
+ cycle - pos;
+ xh -= l >> 1;
+ yh -= l >> 1;
+ for (i = 1; i <= steps; i++) {
+ int x = av_rescale(xh, i, steps) + xc;
+ int y = av_rescale(yh, i, steps) + yc;
+ x = ff_draw_round_to_sub(&s->draw, 0, -1, x);
+ y = ff_draw_round_to_sub(&s->draw, 1, -1, y);
+ ff_fill_rectangle(&s->draw, &color, frame->data, frame->linesize,
+ x, y, 8, 8);
+ }
+ }
+ }
+
+ /* bottom left: beating rectangles */
+ if (s->w >= 64 && s->h >= 64) {
+ int l = (FFMIN(s->w, s->h) - 16) >> 2;
+ int cycle = l << 3;
+ int xc = (s->w >> 2);
+ int yc = (s->h >> 2) + (s->h >> 1);
+ int xm1 = ff_draw_round_to_sub(&s->draw, 0, -1, xc - 8);
+ int xm2 = ff_draw_round_to_sub(&s->draw, 0, +1, xc + 8);
+ int ym1 = ff_draw_round_to_sub(&s->draw, 1, -1, yc - 8);
+ int ym2 = ff_draw_round_to_sub(&s->draw, 1, +1, yc + 8);
+ int size, step, x1, x2, y1, y2;
+
+ size = av_rescale_q(s->pts, s->time_base, av_make_q(4, cycle));
+ step = size / l;
+ size %= l;
+ if (step & 1)
+ size = l - size;
+ step = (step >> 1) & 3;
+ set_color(s, &color, 0xFF808080);
+ x1 = ff_draw_round_to_sub(&s->draw, 0, -1, xc - 4 - size);
+ x2 = ff_draw_round_to_sub(&s->draw, 0, +1, xc + 4 + size);
+ y1 = ff_draw_round_to_sub(&s->draw, 1, -1, yc - 4 - size);
+ y2 = ff_draw_round_to_sub(&s->draw, 1, +1, yc + 4 + size);
+ if (step == 0 || step == 2)
+ ff_fill_rectangle(&s->draw, &color, frame->data, frame->linesize,
+ x1, ym1, x2 - x1, ym2 - ym1);
+ if (step == 1 || step == 2)
+ ff_fill_rectangle(&s->draw, &color, frame->data, frame->linesize,
+ xm1, y1, xm2 - xm1, y2 - y1);
+ if (step == 3)
+ ff_fill_rectangle(&s->draw, &color, frame->data, frame->linesize,
+ x1, y1, x2 - x1, y2 - y1);
+ }
+
+ /* bottom right: checker with random noise */
+ {
+ unsigned xmin = av_rescale(5, s->w, 8);
+ unsigned xmax = av_rescale(7, s->w, 8);
+ unsigned ymin = av_rescale(5, s->h, 8);
+ unsigned ymax = av_rescale(7, s->h, 8);
+ unsigned x, y, i, r;
+ uint8_t alpha[256];
+
+ r = s->pts;
+ for (y = ymin; y < ymax - 15; y += 16) {
+ for (x = xmin; x < xmax - 15; x += 16) {
+ if ((x ^ y) & 16)
+ continue;
+ for (i = 0; i < 256; i++) {
+ r = r * 1664525 + 1013904223;
+ alpha[i] = r >> 24;
+ }
+ set_color(s, &color, 0xFF00FF80);
+ ff_blend_mask(&s->draw, &color, frame->data, frame->linesize,
+ frame->width, frame->height,
+ alpha, 16, 16, 16, 3, 0, x, y);
+ }
+ }
+ }
+
+ /* bouncing square */
+ if (s->w >= 16 && s->h >= 16) {
+ unsigned w = s->w - 8;
+ unsigned h = s->h - 8;
+ unsigned x = av_rescale_q(s->pts, s->time_base, av_make_q(233, 55 * w)) % (w << 1);
+ unsigned y = av_rescale_q(s->pts, s->time_base, av_make_q(233, 89 * h)) % (h << 1);
+ if (x > w)
+ x = (w << 1) - x;
+ if (y > h)
+ y = (h << 1) - y;
+ x = ff_draw_round_to_sub(&s->draw, 0, -1, x);
+ y = ff_draw_round_to_sub(&s->draw, 1, -1, y);
+ set_color(s, &color, 0xFF8000FF);
+ ff_fill_rectangle(&s->draw, &color, frame->data, frame->linesize,
+ x, y, 8, 8);
+ }
+
+ /* top right: draw frame time and frame number */
+ {
+ char buf[256];
+ unsigned time;
+
+ time = av_rescale_q(s->pts, s->time_base, av_make_q(1, 1000)) % 86400000;
+ set_color(s, &color, 0xC0000000);
+ ff_blend_rectangle(&s->draw, &color, frame->data, frame->linesize,
+ frame->width, frame->height,
+ 2, 2, 100, 36);
+ set_color(s, &color, 0xFFFF8000);
+ snprintf(buf, sizeof(buf), "%02d:%02d:%02d.%03d\n%12"PRIi64,
+ time / 3600000, (time / 60000) % 60, (time / 1000) % 60,
+ time % 1000, s->pts);
+ draw_text(s, frame, &color, 4, 4, buf);
+ }
+}
+static av_cold int test2_init(AVFilterContext *ctx)
+{
+ TestSourceContext *s = ctx->priv;
+
+ s->fill_picture_fn = test2_fill_picture;
+ return init(ctx);
+}
-static const char *rgbtestsrc_get_name(void *ctx)
+static int test2_query_formats(AVFilterContext *ctx)
{
- return "rgbtestsrc";
+ return ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
}
-static const AVClass rgbtestsrc_class = {
- .class_name = "RGBTestSourceContext",
- .item_name = rgbtestsrc_get_name,
- .option = testsrc_options,
+static int test2_config_props(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->src;
+ TestSourceContext *s = ctx->priv;
+
+ av_assert0(ff_draw_init(&s->draw, inlink->format, 0) >= 0);
+ s->w = ff_draw_round_to_sub(&s->draw, 0, -1, s->w);
+ s->h = ff_draw_round_to_sub(&s->draw, 1, -1, s->h);
+ if (av_image_check_size(s->w, s->h, 0, ctx) < 0)
+ return AVERROR(EINVAL);
+ return config_props(inlink);
+}
+
+static const AVFilterPad avfilter_vsrc_testsrc2_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = test2_config_props,
+ },
+ { NULL }
};
+AVFilter ff_vsrc_testsrc2 = {
+ .name = "testsrc2",
+ .description = NULL_IF_CONFIG_SMALL("Generate another test pattern."),
+ .priv_size = sizeof(TestSourceContext),
+ .priv_class = &testsrc2_class,
+ .init = test2_init,
+ .uninit = uninit,
+ .query_formats = test2_query_formats,
+ .inputs = NULL,
+ .outputs = avfilter_vsrc_testsrc2_outputs,
+};
+
+#endif /* CONFIG_TESTSRC2_FILTER */
+
+#if CONFIG_RGBTESTSRC_FILTER
+
+#define rgbtestsrc_options options
+AVFILTER_DEFINE_CLASS(rgbtestsrc);
+
#define R 0
#define G 1
#define B 2
@@ -389,7 +963,7 @@ static const AVClass rgbtestsrc_class = {
static void rgbtest_put_pixel(uint8_t *dst, int dst_linesize,
int x, int y, int r, int g, int b, enum AVPixelFormat fmt,
- int rgba_map[4])
+ uint8_t rgba_map[4])
{
int32_t v;
uint8_t *p;
@@ -411,7 +985,7 @@ static void rgbtest_put_pixel(uint8_t *dst, int dst_linesize,
case AV_PIX_FMT_BGRA:
case AV_PIX_FMT_ARGB:
case AV_PIX_FMT_ABGR:
- v = (r << (rgba_map[R]*8)) + (g << (rgba_map[G]*8)) + (b << (rgba_map[B]*8));
+ v = (r << (rgba_map[R]*8)) + (g << (rgba_map[G]*8)) + (b << (rgba_map[B]*8)) + (255 << (rgba_map[A]*8));
p = dst + 4*x + y*dst_linesize;
AV_WL32(p, v);
break;
@@ -442,8 +1016,9 @@ static av_cold int rgbtest_init(AVFilterContext *ctx)
{
TestSourceContext *test = ctx->priv;
+ test->draw_once = 1;
test->fill_picture_fn = rgbtest_fill_picture;
- return init_common(ctx);
+ return init(ctx);
}
static int rgbtest_query_formats(AVFilterContext *ctx)
@@ -456,23 +1031,18 @@ static int rgbtest_query_formats(AVFilterContext *ctx)
AV_PIX_FMT_RGB555, AV_PIX_FMT_BGR555,
AV_PIX_FMT_NONE
};
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
}
static int rgbtest_config_props(AVFilterLink *outlink)
{
TestSourceContext *test = outlink->src->priv;
- switch (outlink->format) {
- case AV_PIX_FMT_ARGB: test->rgba_map[A] = 0; test->rgba_map[R] = 1; test->rgba_map[G] = 2; test->rgba_map[B] = 3; break;
- case AV_PIX_FMT_ABGR: test->rgba_map[A] = 0; test->rgba_map[B] = 1; test->rgba_map[G] = 2; test->rgba_map[R] = 3; break;
- case AV_PIX_FMT_RGBA:
- case AV_PIX_FMT_RGB24: test->rgba_map[R] = 0; test->rgba_map[G] = 1; test->rgba_map[B] = 2; test->rgba_map[A] = 3; break;
- case AV_PIX_FMT_BGRA:
- case AV_PIX_FMT_BGR24: test->rgba_map[B] = 0; test->rgba_map[G] = 1; test->rgba_map[R] = 2; test->rgba_map[A] = 3; break;
- }
-
+ ff_fill_rgba_map(test->rgba_map, outlink->format);
return config_props(outlink);
}
@@ -492,12 +1062,640 @@ AVFilter ff_vsrc_rgbtestsrc = {
.priv_size = sizeof(TestSourceContext),
.priv_class = &rgbtestsrc_class,
.init = rgbtest_init,
-
+ .uninit = uninit,
.query_formats = rgbtest_query_formats,
+ .inputs = NULL,
+ .outputs = avfilter_vsrc_rgbtestsrc_outputs,
+};
+
+#endif /* CONFIG_RGBTESTSRC_FILTER */
+
+#if CONFIG_YUVTESTSRC_FILTER
+
+#define yuvtestsrc_options options
+AVFILTER_DEFINE_CLASS(yuvtestsrc);
+
+static void yuvtest_fill_picture8(AVFilterContext *ctx, AVFrame *frame)
+{
+ int x, y, w = frame->width, h = frame->height / 3;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
+ const int factor = 1 << desc->comp[0].depth;
+ const int mid = 1 << (desc->comp[0].depth - 1);
+ uint8_t *ydst = frame->data[0];
+ uint8_t *udst = frame->data[1];
+ uint8_t *vdst = frame->data[2];
+ int ylinesize = frame->linesize[0];
+ int ulinesize = frame->linesize[1];
+ int vlinesize = frame->linesize[2];
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ int c = factor * x / w;
+
+ ydst[x] = c;
+ udst[x] = mid;
+ vdst[x] = mid;
+ }
+
+ ydst += ylinesize;
+ udst += ulinesize;
+ vdst += vlinesize;
+ }
+
+ h += h;
+ for (; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ int c = factor * x / w;
+
+ ydst[x] = mid;
+ udst[x] = c;
+ vdst[x] = mid;
+ }
+
+ ydst += ylinesize;
+ udst += ulinesize;
+ vdst += vlinesize;
+ }
+
+ for (; y < frame->height; y++) {
+ for (x = 0; x < w; x++) {
+ int c = factor * x / w;
+
+ ydst[x] = mid;
+ udst[x] = mid;
+ vdst[x] = c;
+ }
+
+ ydst += ylinesize;
+ udst += ulinesize;
+ vdst += vlinesize;
+ }
+}
+
+static void yuvtest_fill_picture16(AVFilterContext *ctx, AVFrame *frame)
+{
+ int x, y, w = frame->width, h = frame->height / 3;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
+ const int factor = 1 << desc->comp[0].depth;
+ const int mid = 1 << (desc->comp[0].depth - 1);
+ uint16_t *ydst = (uint16_t *)frame->data[0];
+ uint16_t *udst = (uint16_t *)frame->data[1];
+ uint16_t *vdst = (uint16_t *)frame->data[2];
+ int ylinesize = frame->linesize[0] / 2;
+ int ulinesize = frame->linesize[1] / 2;
+ int vlinesize = frame->linesize[2] / 2;
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ int c = factor * x / w;
+
+ ydst[x] = c;
+ udst[x] = mid;
+ vdst[x] = mid;
+ }
- .inputs = NULL,
+ ydst += ylinesize;
+ udst += ulinesize;
+ vdst += vlinesize;
+ }
+
+ h += h;
+ for (; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ int c = factor * x / w;
+
+ ydst[x] = mid;
+ udst[x] = c;
+ vdst[x] = mid;
+ }
+
+ ydst += ylinesize;
+ udst += ulinesize;
+ vdst += vlinesize;
+ }
+
+ for (; y < frame->height; y++) {
+ for (x = 0; x < w; x++) {
+ int c = factor * x / w;
+
+ ydst[x] = mid;
+ udst[x] = mid;
+ vdst[x] = c;
+ }
+
+ ydst += ylinesize;
+ udst += ulinesize;
+ vdst += vlinesize;
+ }
+}
+
+static av_cold int yuvtest_init(AVFilterContext *ctx)
+{
+ TestSourceContext *test = ctx->priv;
+
+ test->draw_once = 1;
+ return init(ctx);
+}
+
+static int yuvtest_query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int yuvtest_config_props(AVFilterLink *outlink)
+{
+ TestSourceContext *test = outlink->src->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
- .outputs = avfilter_vsrc_rgbtestsrc_outputs,
+ test->fill_picture_fn = desc->comp[0].depth > 8 ? yuvtest_fill_picture16 : yuvtest_fill_picture8;
+ return config_props(outlink);
+}
+
+static const AVFilterPad avfilter_vsrc_yuvtestsrc_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = yuvtest_config_props,
+ },
+ { NULL }
};
-#endif /* CONFIG_RGBTESTSRC_FILTER */
+AVFilter ff_vsrc_yuvtestsrc = {
+ .name = "yuvtestsrc",
+ .description = NULL_IF_CONFIG_SMALL("Generate YUV test pattern."),
+ .priv_size = sizeof(TestSourceContext),
+ .priv_class = &yuvtestsrc_class,
+ .init = yuvtest_init,
+ .uninit = uninit,
+ .query_formats = yuvtest_query_formats,
+ .inputs = NULL,
+ .outputs = avfilter_vsrc_yuvtestsrc_outputs,
+};
+
+#endif /* CONFIG_YUVTESTSRC_FILTER */
+
+#if CONFIG_SMPTEBARS_FILTER || CONFIG_SMPTEHDBARS_FILTER
+
+static const uint8_t rainbow[7][4] = {
+ { 180, 128, 128, 255 }, /* 75% white */
+ { 162, 44, 142, 255 }, /* 75% yellow */
+ { 131, 156, 44, 255 }, /* 75% cyan */
+ { 112, 72, 58, 255 }, /* 75% green */
+ { 84, 184, 198, 255 }, /* 75% magenta */
+ { 65, 100, 212, 255 }, /* 75% red */
+ { 35, 212, 114, 255 }, /* 75% blue */
+};
+
+static const uint8_t rainbowhd[7][4] = {
+ { 180, 128, 128, 255 }, /* 75% white */
+ { 168, 44, 136, 255 }, /* 75% yellow */
+ { 145, 147, 44, 255 }, /* 75% cyan */
+ { 133, 63, 52, 255 }, /* 75% green */
+ { 63, 193, 204, 255 }, /* 75% magenta */
+ { 51, 109, 212, 255 }, /* 75% red */
+ { 28, 212, 120, 255 }, /* 75% blue */
+};
+
+static const uint8_t wobnair[7][4] = {
+ { 35, 212, 114, 255 }, /* 75% blue */
+ { 19, 128, 128, 255 }, /* 7.5% intensity black */
+ { 84, 184, 198, 255 }, /* 75% magenta */
+ { 19, 128, 128, 255 }, /* 7.5% intensity black */
+ { 131, 156, 44, 255 }, /* 75% cyan */
+ { 19, 128, 128, 255 }, /* 7.5% intensity black */
+ { 180, 128, 128, 255 }, /* 75% white */
+};
+
+static const uint8_t white[4] = { 235, 128, 128, 255 };
+
+/* pluge pulses */
+static const uint8_t neg4ire[4] = { 7, 128, 128, 255 };
+static const uint8_t pos4ire[4] = { 24, 128, 128, 255 };
+
+/* fudged Q/-I */
+static const uint8_t i_pixel[4] = { 57, 156, 97, 255 };
+static const uint8_t q_pixel[4] = { 44, 171, 147, 255 };
+
+static const uint8_t gray40[4] = { 104, 128, 128, 255 };
+static const uint8_t gray15[4] = { 49, 128, 128, 255 };
+static const uint8_t cyan[4] = { 188, 154, 16, 255 };
+static const uint8_t yellow[4] = { 219, 16, 138, 255 };
+static const uint8_t blue[4] = { 32, 240, 118, 255 };
+static const uint8_t red[4] = { 63, 102, 240, 255 };
+static const uint8_t black0[4] = { 16, 128, 128, 255 };
+static const uint8_t black2[4] = { 20, 128, 128, 255 };
+static const uint8_t black4[4] = { 25, 128, 128, 255 };
+static const uint8_t neg2[4] = { 12, 128, 128, 255 };
+
+static void draw_bar(TestSourceContext *test, const uint8_t color[4],
+ int x, int y, int w, int h,
+ AVFrame *frame)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
+ uint8_t *p, *p0;
+ int plane;
+
+ x = FFMIN(x, test->w - 1);
+ y = FFMIN(y, test->h - 1);
+ w = FFMAX(FFMIN(w, test->w - x), 0);
+ h = FFMAX(FFMIN(h, test->h - y), 0);
+
+ av_assert0(x + w <= test->w);
+ av_assert0(y + h <= test->h);
+
+ for (plane = 0; frame->data[plane]; plane++) {
+ const int c = color[plane];
+ const int linesize = frame->linesize[plane];
+ int i, px, py, pw, ph;
+
+ if (plane == 1 || plane == 2) {
+ px = x >> desc->log2_chroma_w;
+ pw = AV_CEIL_RSHIFT(w, desc->log2_chroma_w);
+ py = y >> desc->log2_chroma_h;
+ ph = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
+ } else {
+ px = x;
+ pw = w;
+ py = y;
+ ph = h;
+ }
+
+ p0 = p = frame->data[plane] + py * linesize + px;
+ memset(p, c, pw);
+ p += linesize;
+ for (i = 1; i < ph; i++, p += linesize)
+ memcpy(p, p0, pw);
+ }
+}
+
+static int smptebars_query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_NONE,
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static const AVFilterPad smptebars_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+#if CONFIG_SMPTEBARS_FILTER
+
+#define smptebars_options options
+AVFILTER_DEFINE_CLASS(smptebars);
+
+static void smptebars_fill_picture(AVFilterContext *ctx, AVFrame *picref)
+{
+ TestSourceContext *test = ctx->priv;
+ int r_w, r_h, w_h, p_w, p_h, i, tmp, x = 0;
+ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(picref->format);
+
+ av_frame_set_colorspace(picref, AVCOL_SPC_BT470BG);
+
+ r_w = FFALIGN((test->w + 6) / 7, 1 << pixdesc->log2_chroma_w);
+ r_h = FFALIGN(test->h * 2 / 3, 1 << pixdesc->log2_chroma_h);
+ w_h = FFALIGN(test->h * 3 / 4 - r_h, 1 << pixdesc->log2_chroma_h);
+ p_w = FFALIGN(r_w * 5 / 4, 1 << pixdesc->log2_chroma_w);
+ p_h = test->h - w_h - r_h;
+
+ for (i = 0; i < 7; i++) {
+ draw_bar(test, rainbow[i], x, 0, r_w, r_h, picref);
+ draw_bar(test, wobnair[i], x, r_h, r_w, w_h, picref);
+ x += r_w;
+ }
+ x = 0;
+ draw_bar(test, i_pixel, x, r_h + w_h, p_w, p_h, picref);
+ x += p_w;
+ draw_bar(test, white, x, r_h + w_h, p_w, p_h, picref);
+ x += p_w;
+ draw_bar(test, q_pixel, x, r_h + w_h, p_w, p_h, picref);
+ x += p_w;
+ tmp = FFALIGN(5 * r_w - x, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, black0, x, r_h + w_h, tmp, p_h, picref);
+ x += tmp;
+ tmp = FFALIGN(r_w / 3, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, neg4ire, x, r_h + w_h, tmp, p_h, picref);
+ x += tmp;
+ draw_bar(test, black0, x, r_h + w_h, tmp, p_h, picref);
+ x += tmp;
+ draw_bar(test, pos4ire, x, r_h + w_h, tmp, p_h, picref);
+ x += tmp;
+ draw_bar(test, black0, x, r_h + w_h, test->w - x, p_h, picref);
+}
+
+static av_cold int smptebars_init(AVFilterContext *ctx)
+{
+ TestSourceContext *test = ctx->priv;
+
+ test->fill_picture_fn = smptebars_fill_picture;
+ test->draw_once = 1;
+ return init(ctx);
+}
+
+AVFilter ff_vsrc_smptebars = {
+ .name = "smptebars",
+ .description = NULL_IF_CONFIG_SMALL("Generate SMPTE color bars."),
+ .priv_size = sizeof(TestSourceContext),
+ .priv_class = &smptebars_class,
+ .init = smptebars_init,
+ .uninit = uninit,
+ .query_formats = smptebars_query_formats,
+ .inputs = NULL,
+ .outputs = smptebars_outputs,
+};
+
+#endif /* CONFIG_SMPTEBARS_FILTER */
+
+#if CONFIG_SMPTEHDBARS_FILTER
+
+#define smptehdbars_options options
+AVFILTER_DEFINE_CLASS(smptehdbars);
+
+static void smptehdbars_fill_picture(AVFilterContext *ctx, AVFrame *picref)
+{
+ TestSourceContext *test = ctx->priv;
+ int d_w, r_w, r_h, l_w, i, tmp, x = 0, y = 0;
+ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(picref->format);
+
+ av_frame_set_colorspace(picref, AVCOL_SPC_BT709);
+
+ d_w = FFALIGN(test->w / 8, 1 << pixdesc->log2_chroma_w);
+ r_h = FFALIGN(test->h * 7 / 12, 1 << pixdesc->log2_chroma_h);
+ draw_bar(test, gray40, x, 0, d_w, r_h, picref);
+ x += d_w;
+
+ r_w = FFALIGN((((test->w + 3) / 4) * 3) / 7, 1 << pixdesc->log2_chroma_w);
+ for (i = 0; i < 7; i++) {
+ draw_bar(test, rainbowhd[i], x, 0, r_w, r_h, picref);
+ x += r_w;
+ }
+ draw_bar(test, gray40, x, 0, test->w - x, r_h, picref);
+ y = r_h;
+ r_h = FFALIGN(test->h / 12, 1 << pixdesc->log2_chroma_h);
+ draw_bar(test, cyan, 0, y, d_w, r_h, picref);
+ x = d_w;
+ draw_bar(test, i_pixel, x, y, r_w, r_h, picref);
+ x += r_w;
+ tmp = r_w * 6;
+ draw_bar(test, rainbowhd[0], x, y, tmp, r_h, picref);
+ x += tmp;
+ l_w = x;
+ draw_bar(test, blue, x, y, test->w - x, r_h, picref);
+ y += r_h;
+ draw_bar(test, yellow, 0, y, d_w, r_h, picref);
+ x = d_w;
+ draw_bar(test, q_pixel, x, y, r_w, r_h, picref);
+ x += r_w;
+
+ for (i = 0; i < tmp; i += 1 << pixdesc->log2_chroma_w) {
+ uint8_t yramp[4] = {0};
+
+ yramp[0] = i * 255 / tmp;
+ yramp[1] = 128;
+ yramp[2] = 128;
+ yramp[3] = 255;
+
+ draw_bar(test, yramp, x, y, 1 << pixdesc->log2_chroma_w, r_h, picref);
+ x += 1 << pixdesc->log2_chroma_w;
+ }
+ draw_bar(test, red, x, y, test->w - x, r_h, picref);
+ y += r_h;
+ draw_bar(test, gray15, 0, y, d_w, test->h - y, picref);
+ x = d_w;
+ tmp = FFALIGN(r_w * 3 / 2, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, black0, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ tmp = FFALIGN(r_w * 2, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, white, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ tmp = FFALIGN(r_w * 5 / 6, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, black0, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ tmp = FFALIGN(r_w / 3, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, neg2, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ draw_bar(test, black0, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ draw_bar(test, black2, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ draw_bar(test, black0, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ draw_bar(test, black4, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ r_w = l_w - x;
+ draw_bar(test, black0, x, y, r_w, test->h - y, picref);
+ x += r_w;
+ draw_bar(test, gray15, x, y, test->w - x, test->h - y, picref);
+}
+
+static av_cold int smptehdbars_init(AVFilterContext *ctx)
+{
+ TestSourceContext *test = ctx->priv;
+
+ test->fill_picture_fn = smptehdbars_fill_picture;
+ test->draw_once = 1;
+ return init(ctx);
+}
+
+AVFilter ff_vsrc_smptehdbars = {
+ .name = "smptehdbars",
+ .description = NULL_IF_CONFIG_SMALL("Generate SMPTE HD color bars."),
+ .priv_size = sizeof(TestSourceContext),
+ .priv_class = &smptehdbars_class,
+ .init = smptehdbars_init,
+ .uninit = uninit,
+ .query_formats = smptebars_query_formats,
+ .inputs = NULL,
+ .outputs = smptebars_outputs,
+};
+
+#endif /* CONFIG_SMPTEHDBARS_FILTER */
+#endif /* CONFIG_SMPTEBARS_FILTER || CONFIG_SMPTEHDBARS_FILTER */
+
+#if CONFIG_ALLYUV_FILTER
+
+static const AVOption allyuv_options[] = {
+ COMMON_OPTIONS_NOSIZE
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(allyuv);
+
+static void allyuv_fill_picture(AVFilterContext *ctx, AVFrame *frame)
+{
+ const int ys = frame->linesize[0];
+ const int us = frame->linesize[1];
+ const int vs = frame->linesize[2];
+ int x, y, j;
+
+ for (y = 0; y < 4096; y++) {
+ for (x = 0; x < 2048; x++) {
+ frame->data[0][y * ys + x] = ((x / 8) % 256);
+ frame->data[0][y * ys + 4095 - x] = ((x / 8) % 256);
+ }
+
+ for (x = 0; x < 2048; x+=8) {
+ for (j = 0; j < 8; j++) {
+ frame->data[1][vs * y + x + j] = (y%16 + (j % 8) * 16);
+ frame->data[1][vs * y + 4095 - x - j] = (128 + y%16 + (j % 8) * 16);
+ }
+ }
+
+ for (x = 0; x < 4096; x++)
+ frame->data[2][y * us + x] = 256 * y / 4096;
+ }
+}
+
+static av_cold int allyuv_init(AVFilterContext *ctx)
+{
+ TestSourceContext *test = ctx->priv;
+
+ test->w = test->h = 4096;
+ test->draw_once = 1;
+ test->fill_picture_fn = allyuv_fill_picture;
+ return init(ctx);
+}
+
+static int allyuv_query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static const AVFilterPad avfilter_vsrc_allyuv_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vsrc_allyuv = {
+ .name = "allyuv",
+ .description = NULL_IF_CONFIG_SMALL("Generate all yuv colors."),
+ .priv_size = sizeof(TestSourceContext),
+ .priv_class = &allyuv_class,
+ .init = allyuv_init,
+ .uninit = uninit,
+ .query_formats = allyuv_query_formats,
+ .inputs = NULL,
+ .outputs = avfilter_vsrc_allyuv_outputs,
+};
+
+#endif /* CONFIG_ALLYUV_FILTER */
+
+#if CONFIG_ALLRGB_FILTER
+
+static const AVOption allrgb_options[] = {
+ COMMON_OPTIONS_NOSIZE
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(allrgb);
+
+static void allrgb_fill_picture(AVFilterContext *ctx, AVFrame *frame)
+{
+ unsigned x, y;
+ const int linesize = frame->linesize[0];
+ uint8_t *line = frame->data[0];
+
+ for (y = 0; y < 4096; y++) {
+ uint8_t *dst = line;
+
+ for (x = 0; x < 4096; x++) {
+ *dst++ = x;
+ *dst++ = y;
+ *dst++ = (x >> 8) | ((y >> 8) << 4);
+ }
+ line += linesize;
+ }
+}
+
+static av_cold int allrgb_init(AVFilterContext *ctx)
+{
+ TestSourceContext *test = ctx->priv;
+
+ test->w = test->h = 4096;
+ test->draw_once = 1;
+ test->fill_picture_fn = allrgb_fill_picture;
+ return init(ctx);
+}
+
+static int allrgb_config_props(AVFilterLink *outlink)
+{
+ TestSourceContext *test = outlink->src->priv;
+
+ ff_fill_rgba_map(test->rgba_map, outlink->format);
+ return config_props(outlink);
+}
+
+static int allrgb_query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static const AVFilterPad avfilter_vsrc_allrgb_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = allrgb_config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vsrc_allrgb = {
+ .name = "allrgb",
+ .description = NULL_IF_CONFIG_SMALL("Generate all RGB colors."),
+ .priv_size = sizeof(TestSourceContext),
+ .priv_class = &allrgb_class,
+ .init = allrgb_init,
+ .uninit = uninit,
+ .query_formats = allrgb_query_formats,
+ .inputs = NULL,
+ .outputs = avfilter_vsrc_allrgb_outputs,
+};
+
+#endif /* CONFIG_ALLRGB_FILTER */
diff --git a/libavfilter/w3fdif.h b/libavfilter/w3fdif.h
new file mode 100644
index 0000000000..9c0b723a02
--- /dev/null
+++ b/libavfilter/w3fdif.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_W3FDIF_H
+#define AVFILTER_W3FDIF_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+typedef struct W3FDIFDSPContext {
+ void (*filter_simple_low)(int32_t *work_line,
+ uint8_t *in_lines_cur[2],
+ const int16_t *coef, int linesize);
+ void (*filter_complex_low)(int32_t *work_line,
+ uint8_t *in_lines_cur[4],
+ const int16_t *coef, int linesize);
+ void (*filter_simple_high)(int32_t *work_line,
+ uint8_t *in_lines_cur[3],
+ uint8_t *in_lines_adj[3],
+ const int16_t *coef, int linesize);
+ void (*filter_complex_high)(int32_t *work_line,
+ uint8_t *in_lines_cur[5],
+ uint8_t *in_lines_adj[5],
+ const int16_t *coef, int linesize);
+ void (*filter_scale)(uint8_t *out_pixel, const int32_t *work_pixel,
+ int linesize, int max);
+} W3FDIFDSPContext;
+
+void ff_w3fdif_init_x86(W3FDIFDSPContext *dsp, int depth);
+
+#endif /* AVFILTER_W3FDIF_H */
diff --git a/libavfilter/window_func.c b/libavfilter/window_func.c
new file mode 100644
index 0000000000..acf1b20847
--- /dev/null
+++ b/libavfilter/window_func.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <math.h>
+
+#include "libavutil/avassert.h"
+#include "window_func.h"
+
+void ff_generate_window_func(float *lut, int N, int win_func, float *overlap)
+{
+ int n;
+
+ switch (win_func) {
+ case WFUNC_RECT:
+ for (n = 0; n < N; n++)
+ lut[n] = 1.;
+ *overlap = 0.;
+ break;
+ case WFUNC_BARTLETT:
+ for (n = 0; n < N; n++)
+ lut[n] = 1.-fabs((n-(N-1)/2.)/((N-1)/2.));
+ *overlap = 0.5;
+ break;
+ case WFUNC_HANNING:
+ for (n = 0; n < N; n++)
+ lut[n] = .5*(1-cos(2*M_PI*n/(N-1)));
+ *overlap = 0.5;
+ break;
+ case WFUNC_HAMMING:
+ for (n = 0; n < N; n++)
+ lut[n] = .54-.46*cos(2*M_PI*n/(N-1));
+ *overlap = 0.5;
+ break;
+ case WFUNC_BLACKMAN:
+ for (n = 0; n < N; n++)
+ lut[n] = .42659-.49656*cos(2*M_PI*n/(N-1))+.076849*cos(4*M_PI*n/(N-1));
+ *overlap = 0.661;
+ break;
+ case WFUNC_WELCH:
+ for (n = 0; n < N; n++)
+ lut[n] = 1.-(n-(N-1)/2.)/((N-1)/2.)*(n-(N-1)/2.)/((N-1)/2.);
+ *overlap = 0.293;
+ break;
+ case WFUNC_FLATTOP:
+ for (n = 0; n < N; n++)
+ lut[n] = 1.-1.985844164102*cos( 2*M_PI*n/(N-1))+1.791176438506*cos( 4*M_PI*n/(N-1))-
+ 1.282075284005*cos( 6*M_PI*n/(N-1))+0.667777530266*cos( 8*M_PI*n/(N-1))-
+ 0.240160796576*cos(10*M_PI*n/(N-1))+0.056656381764*cos(12*M_PI*n/(N-1))-
+ 0.008134974479*cos(14*M_PI*n/(N-1))+0.000624544650*cos(16*M_PI*n/(N-1))-
+ 0.000019808998*cos(18*M_PI*n/(N-1))+0.000000132974*cos(20*M_PI*n/(N-1));
+ *overlap = 0.841;
+ break;
+ case WFUNC_BHARRIS:
+ for (n = 0; n < N; n++)
+ lut[n] = 0.35875-0.48829*cos(2*M_PI*n/(N-1))+0.14128*cos(4*M_PI*n/(N-1))-0.01168*cos(6*M_PI*n/(N-1));
+ *overlap = 0.661;
+ break;
+ case WFUNC_BNUTTALL:
+ for (n = 0; n < N; n++)
+ lut[n] = 0.3635819-0.4891775*cos(2*M_PI*n/(N-1))+0.1365995*cos(4*M_PI*n/(N-1))-0.0106411*cos(6*M_PI*n/(N-1));
+ *overlap = 0.661;
+ break;
+ case WFUNC_BHANN:
+ for (n = 0; n < N; n++)
+ lut[n] = 0.62-0.48*fabs(n/(double)(N-1)-.5)-0.38*cos(2*M_PI*n/(N-1));
+ *overlap = 0.5;
+ break;
+ case WFUNC_SINE:
+ for (n = 0; n < N; n++)
+ lut[n] = sin(M_PI*n/(N-1));
+ *overlap = 0.75;
+ break;
+ case WFUNC_NUTTALL:
+ for (n = 0; n < N; n++)
+ lut[n] = 0.355768-0.487396*cos(2*M_PI*n/(N-1))+0.144232*cos(4*M_PI*n/(N-1))-0.012604*cos(6*M_PI*n/(N-1));
+ *overlap = 0.663;
+ break;
+ case WFUNC_LANCZOS:
+#define SINC(x) (!(x)) ? 1 : sin(M_PI * (x))/(M_PI * (x));
+ for (n = 0; n < N; n++)
+ lut[n] = SINC((2.*n)/(N-1)-1);
+ *overlap = 0.75;
+ break;
+ case WFUNC_GAUSS:
+#define SQR(x) ((x)*(x))
+ for (n = 0; n < N; n++)
+ lut[n] = exp(-0.5 * SQR((n-(N-1)/2)/(0.4*(N-1)/2.f)));
+ *overlap = 0.75;
+ break;
+ case WFUNC_TUKEY:
+ for (n = 0; n < N; n++) {
+ float M = (N-1)/2.;
+
+ if (FFABS(n - M) >= 0.3 * M) {
+ lut[n] = 0.5 * (1 + cos((M_PI*(FFABS(n - M) - 0.3 * M))/((1 - 0.3) * M)));
+ } else {
+ lut[n] = 1;
+ }
+ }
+ *overlap = 0.33;
+ break;
+ case WFUNC_DOLPH: {
+ double b = cosh(7.6009022095419887 / (N-1)), sum, t, c, norm = 0;
+ int j;
+ for (c = 1 - 1 / (b*b), n = (N-1) / 2; n >= 0; --n) {
+ for (sum = !n, b = t = j = 1; j <= n && sum != t; b *= (n-j) * (1./j), ++j)
+ t = sum, sum += (b *= c * (N - n - j) * (1./j));
+ sum /= (N - 1 - n), sum /= (norm = norm ? norm : sum);
+ lut[n] = sum;
+ lut[N - 1 - n] = sum;
+ }
+ *overlap = 0.5;}
+ break;
+ case WFUNC_CAUCHY:
+ for (n = 0; n < N; n++) {
+ double x = 2 * ((n / (double)(N - 1)) - .5);
+
+ if (x <= -.5 || x >= .5) {
+ lut[n] = 0;
+ } else {
+ lut[n] = FFMIN(1, fabs(1/(1+4*16*x*x)));
+ }
+ }
+ *overlap = 0.75;
+ break;
+ case WFUNC_PARZEN:
+ for (n = 0; n < N; n++) {
+ double x = 2 * ((n / (double)(N - 1)) - .5);
+
+ if (x > 0.25 && x <= 0.5) {
+ lut[n] = -2 * powf(-1 + 2 * x, 3);
+ } else if (x >= -.5 && x < -.25) {
+ lut[n] = 2 * powf(1 + 2 * x, 3);
+ } else if (x >= -.25 && x < 0) {
+ lut[n] = 1 - 24 * x * x - 48 * x * x * x;
+ } else if (x >= 0 && x <= .25) {
+ lut[n] = 1 - 24 * x * x + 48 * x * x * x;
+ } else {
+ lut[n] = 0;
+ }
+ }
+ *overlap = 0.75;
+ break;
+ case WFUNC_POISSON:
+ for (n = 0; n < N; n++) {
+ double x = 2 * ((n / (double)(N - 1)) - .5);
+
+ if (x >= 0 && x <= .5) {
+ lut[n] = exp(-6*x);
+ } else if (x < 0 && x >= -.5) {
+ lut[n] = exp(6*x);
+ } else {
+ lut[n] = 0;
+ }
+ }
+ *overlap = 0.75;
+ break;
+ default:
+ av_assert0(0);
+ }
+}
diff --git a/libavfilter/window_func.h b/libavfilter/window_func.h
new file mode 100644
index 0000000000..4611498d47
--- /dev/null
+++ b/libavfilter/window_func.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#ifndef AVFILTER_WINDOW_FUNC_H
+#define AVFILTER_WINDOW_FUNC_H
+
+enum WindowFunc { WFUNC_RECT, WFUNC_HANNING, WFUNC_HAMMING, WFUNC_BLACKMAN,
+ WFUNC_BARTLETT, WFUNC_WELCH, WFUNC_FLATTOP,
+ WFUNC_BHARRIS, WFUNC_BNUTTALL, WFUNC_SINE, WFUNC_NUTTALL,
+ WFUNC_BHANN, WFUNC_LANCZOS, WFUNC_GAUSS, WFUNC_TUKEY,
+ WFUNC_DOLPH, WFUNC_CAUCHY, WFUNC_PARZEN, WFUNC_POISSON,
+ NB_WFUNC };
+
+void ff_generate_window_func(float *lut, int N, int win_func, float *overlap);
+
+#endif /* AVFILTER_WINDOW_FUNC_H */
diff --git a/libavfilter/x86/Makefile b/libavfilter/x86/Makefile
index 13b5d318ec..b6195f84c4 100644
--- a/libavfilter/x86/Makefile
+++ b/libavfilter/x86/Makefile
@@ -1,11 +1,48 @@
+OBJS-$(CONFIG_BLEND_FILTER) += x86/vf_blend_init.o
+OBJS-$(CONFIG_BWDIF_FILTER) += x86/vf_bwdif_init.o
+OBJS-$(CONFIG_COLORSPACE_FILTER) += x86/colorspacedsp_init.o
+OBJS-$(CONFIG_EQ_FILTER) += x86/vf_eq.o
+OBJS-$(CONFIG_FSPP_FILTER) += x86/vf_fspp_init.o
OBJS-$(CONFIG_GRADFUN_FILTER) += x86/vf_gradfun_init.o
OBJS-$(CONFIG_HQDN3D_FILTER) += x86/vf_hqdn3d_init.o
+OBJS-$(CONFIG_IDET_FILTER) += x86/vf_idet_init.o
OBJS-$(CONFIG_INTERLACE_FILTER) += x86/vf_interlace_init.o
+OBJS-$(CONFIG_MASKEDMERGE_FILTER) += x86/vf_maskedmerge_init.o
+OBJS-$(CONFIG_NOISE_FILTER) += x86/vf_noise.o
+OBJS-$(CONFIG_PP7_FILTER) += x86/vf_pp7_init.o
+OBJS-$(CONFIG_PSNR_FILTER) += x86/vf_psnr_init.o
+OBJS-$(CONFIG_PULLUP_FILTER) += x86/vf_pullup_init.o
+OBJS-$(CONFIG_REMOVEGRAIN_FILTER) += x86/vf_removegrain_init.o
+OBJS-$(CONFIG_SHOWCQT_FILTER) += x86/avf_showcqt_init.o
+OBJS-$(CONFIG_SPP_FILTER) += x86/vf_spp.o
+OBJS-$(CONFIG_SSIM_FILTER) += x86/vf_ssim_init.o
+OBJS-$(CONFIG_STEREO3D_FILTER) += x86/vf_stereo3d_init.o
+OBJS-$(CONFIG_TBLEND_FILTER) += x86/vf_blend_init.o
+OBJS-$(CONFIG_TINTERLACE_FILTER) += x86/vf_tinterlace_init.o
OBJS-$(CONFIG_VOLUME_FILTER) += x86/af_volume_init.o
+OBJS-$(CONFIG_W3FDIF_FILTER) += x86/vf_w3fdif_init.o
OBJS-$(CONFIG_YADIF_FILTER) += x86/vf_yadif_init.o
+YASM-OBJS-$(CONFIG_BLEND_FILTER) += x86/vf_blend.o
+YASM-OBJS-$(CONFIG_BWDIF_FILTER) += x86/vf_bwdif.o
+YASM-OBJS-$(CONFIG_COLORSPACE_FILTER) += x86/colorspacedsp.o
+YASM-OBJS-$(CONFIG_FSPP_FILTER) += x86/vf_fspp.o
YASM-OBJS-$(CONFIG_GRADFUN_FILTER) += x86/vf_gradfun.o
YASM-OBJS-$(CONFIG_HQDN3D_FILTER) += x86/vf_hqdn3d.o
+YASM-OBJS-$(CONFIG_IDET_FILTER) += x86/vf_idet.o
YASM-OBJS-$(CONFIG_INTERLACE_FILTER) += x86/vf_interlace.o
+YASM-OBJS-$(CONFIG_MASKEDMERGE_FILTER) += x86/vf_maskedmerge.o
+YASM-OBJS-$(CONFIG_PP7_FILTER) += x86/vf_pp7.o
+YASM-OBJS-$(CONFIG_PSNR_FILTER) += x86/vf_psnr.o
+YASM-OBJS-$(CONFIG_PULLUP_FILTER) += x86/vf_pullup.o
+ifdef CONFIG_GPL
+YASM-OBJS-$(CONFIG_REMOVEGRAIN_FILTER) += x86/vf_removegrain.o
+endif
+YASM-OBJS-$(CONFIG_SHOWCQT_FILTER) += x86/avf_showcqt.o
+YASM-OBJS-$(CONFIG_SSIM_FILTER) += x86/vf_ssim.o
+YASM-OBJS-$(CONFIG_STEREO3D_FILTER) += x86/vf_stereo3d.o
+YASM-OBJS-$(CONFIG_TBLEND_FILTER) += x86/vf_blend.o
+YASM-OBJS-$(CONFIG_TINTERLACE_FILTER) += x86/vf_interlace.o
YASM-OBJS-$(CONFIG_VOLUME_FILTER) += x86/af_volume.o
-YASM-OBJS-$(CONFIG_YADIF_FILTER) += x86/vf_yadif.o
+YASM-OBJS-$(CONFIG_W3FDIF_FILTER) += x86/vf_w3fdif.o
+YASM-OBJS-$(CONFIG_YADIF_FILTER) += x86/vf_yadif.o x86/yadif-16.o x86/yadif-10.o
diff --git a/libavfilter/x86/af_volume.asm b/libavfilter/x86/af_volume.asm
index 25ba9234e5..723ab1f8fb 100644
--- a/libavfilter/x86/af_volume.asm
+++ b/libavfilter/x86/af_volume.asm
@@ -2,20 +2,20 @@
;* x86-optimized functions for volume filter
;* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
;*
-;* This file is part of Libav.
+;* This file is part of FFmpeg.
;*
-;* Libav is free software; you can redistribute it and/or
+;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
-;* Libav is distributed in the hope that it will be useful,
+;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
-;* License along with Libav; if not, write to the Free Software
+;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
@@ -99,9 +99,11 @@ cglobal scale_samples_s32, 4,4,4, dst, src, len, volume
INIT_XMM sse2
%define CVTDQ2PD cvtdq2pd
SCALE_SAMPLES_S32
+%if HAVE_AVX_EXTERNAL
%define CVTDQ2PD vcvtdq2pd
INIT_YMM avx
SCALE_SAMPLES_S32
+%endif
%undef CVTDQ2PD
; NOTE: This is not bit-identical with the C version because it clips to
diff --git a/libavfilter/x86/af_volume_init.c b/libavfilter/x86/af_volume_init.c
index 26605fb2ce..88f5a9679a 100644
--- a/libavfilter/x86/af_volume_init.c
+++ b/libavfilter/x86/af_volume_init.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavfilter/x86/avf_showcqt.asm b/libavfilter/x86/avf_showcqt.asm
new file mode 100644
index 0000000000..63e58408cd
--- /dev/null
+++ b/libavfilter/x86/avf_showcqt.asm
@@ -0,0 +1,192 @@
+;*****************************************************************************
+;* x86-optimized functions for showcqt filter
+;*
+;* Copyright (C) 2016 Muhammad Faiz <mfcc64@gmail.com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+%if ARCH_X86_64
+%define pointer resq
+%else
+%define pointer resd
+%endif
+
+struc Coeffs
+ .val: pointer 1
+ .start: resd 1
+ .len: resd 1
+ .sizeof:
+endstruc
+
+%macro CQT_CALC 9
+; %1 = a_re, %2 = a_im, %3 = b_re, %4 = b_im
+; %5 = m_re, %6 = m_im, %7 = tmp, %8 = coeffval, %9 = coeffsq_offset
+ mov id, xd
+ add id, [coeffsq + Coeffs.start + %9]
+ movaps m%5, [srcq + 8 * iq]
+ movaps m%7, [srcq + 8 * iq + mmsize]
+ shufps m%6, m%5, m%7, q3131
+ shufps m%5, m%5, m%7, q2020
+ sub id, fft_lend
+ FMULADD_PS m%2, m%6, m%8, m%2, m%6
+ neg id
+ FMULADD_PS m%1, m%5, m%8, m%1, m%5
+ movups m%5, [srcq + 8 * iq - mmsize + 8]
+ movups m%7, [srcq + 8 * iq - 2*mmsize + 8]
+ %if mmsize == 32
+ vperm2f128 m%5, m%5, m%5, 1
+ vperm2f128 m%7, m%7, m%7, 1
+ %endif
+ shufps m%6, m%5, m%7, q1313
+ shufps m%5, m%5, m%7, q0202
+ FMULADD_PS m%4, m%6, m%8, m%4, m%6
+ FMULADD_PS m%3, m%5, m%8, m%3, m%5
+%endmacro ; CQT_CALC
+
+%macro CQT_SEPARATE 6 ; a_re, a_im, b_re, b_im, tmp, tmp2
+ addps m%5, m%4, m%2
+ subps m%6, m%3, m%1
+ addps m%1, m%1, m%3
+ subps m%2, m%2, m%4
+ HADDPS m%5, m%6, m%3
+ HADDPS m%1, m%2, m%3
+ HADDPS m%1, m%5, m%2
+ %if mmsize == 32
+ vextractf128 xmm%2, m%1, 1
+ addps xmm%1, xmm%2
+ %endif
+%endmacro ; CQT_SEPARATE
+
+%macro DECLARE_CQT_CALC 0
+; ff_showcqt_cqt_calc_*(dst, src, coeffs, len, fft_len)
+%if ARCH_X86_64
+cglobal showcqt_cqt_calc, 5, 10, 12, dst, src, coeffs, len, fft_len, x, coeffs_val, coeffs_val2, i, coeffs_len
+ align 16
+ .loop_k:
+ mov xd, [coeffsq + Coeffs.len]
+ xorps m0, m0, m0
+ movaps m1, m0
+ movaps m2, m0
+ mov coeffs_lend, [coeffsq + Coeffs.len + Coeffs.sizeof]
+ movaps m3, m0
+ movaps m8, m0
+ cmp coeffs_lend, xd
+ movaps m9, m0
+ movaps m10, m0
+ movaps m11, m0
+ cmova coeffs_lend, xd
+ xor xd, xd
+ test coeffs_lend, coeffs_lend
+ jz .check_loop_b
+ mov coeffs_valq, [coeffsq + Coeffs.val]
+ mov coeffs_val2q, [coeffsq + Coeffs.val + Coeffs.sizeof]
+ align 16
+ .loop_ab:
+ movaps m7, [coeffs_valq + 4 * xq]
+ CQT_CALC 0, 1, 2, 3, 4, 5, 6, 7, 0
+ movaps m7, [coeffs_val2q + 4 * xq]
+ CQT_CALC 8, 9, 10, 11, 4, 5, 6, 7, Coeffs.sizeof
+ add xd, mmsize/4
+ cmp xd, coeffs_lend
+ jb .loop_ab
+ .check_loop_b:
+ cmp xd, [coeffsq + Coeffs.len + Coeffs.sizeof]
+ jae .check_loop_a
+ align 16
+ .loop_b:
+ movaps m7, [coeffs_val2q + 4 * xq]
+ CQT_CALC 8, 9, 10, 11, 4, 5, 6, 7, Coeffs.sizeof
+ add xd, mmsize/4
+ cmp xd, [coeffsq + Coeffs.len + Coeffs.sizeof]
+ jb .loop_b
+ .loop_end:
+ CQT_SEPARATE 0, 1, 2, 3, 4, 5
+ CQT_SEPARATE 8, 9, 10, 11, 4, 5
+ mulps xmm0, xmm0
+ mulps xmm8, xmm8
+ HADDPS xmm0, xmm8, xmm1
+ movaps [dstq], xmm0
+ sub lend, 2
+ lea dstq, [dstq + 16]
+ lea coeffsq, [coeffsq + 2*Coeffs.sizeof]
+ jnz .loop_k
+ REP_RET
+ align 16
+ .check_loop_a:
+ cmp xd, [coeffsq + Coeffs.len]
+ jae .loop_end
+ align 16
+ .loop_a:
+ movaps m7, [coeffs_valq + 4 * xq]
+ CQT_CALC 0, 1, 2, 3, 4, 5, 6, 7, 0
+ add xd, mmsize/4
+ cmp xd, [coeffsq + Coeffs.len]
+ jb .loop_a
+ jmp .loop_end
+%else
+cglobal showcqt_cqt_calc, 4, 7, 8, dst, src, coeffs, len, x, coeffs_val, i
+%define fft_lend r4m
+ align 16
+ .loop_k:
+ mov xd, [coeffsq + Coeffs.len]
+ xorps m0, m0, m0
+ movaps m1, m0
+ movaps m2, m0
+ movaps m3, m0
+ test xd, xd
+ jz .store
+ mov coeffs_valq, [coeffsq + Coeffs.val]
+ xor xd, xd
+ align 16
+ .loop_x:
+ movaps m7, [coeffs_valq + 4 * xq]
+ CQT_CALC 0, 1, 2, 3, 4, 5, 6, 7, 0
+ add xd, mmsize/4
+ cmp xd, [coeffsq + Coeffs.len]
+ jb .loop_x
+ CQT_SEPARATE 0, 1, 2, 3, 4, 5
+ mulps xmm0, xmm0
+ HADDPS xmm0, xmm0, xmm1
+ .store:
+ movlps [dstq], xmm0
+ sub lend, 1
+ lea dstq, [dstq + 8]
+ lea coeffsq, [coeffsq + Coeffs.sizeof]
+ jnz .loop_k
+ REP_RET
+%endif ; ARCH_X86_64
+%endmacro ; DECLARE_CQT_CALC
+
+INIT_XMM sse
+DECLARE_CQT_CALC
+INIT_XMM sse3
+DECLARE_CQT_CALC
+%if HAVE_AVX_EXTERNAL
+INIT_YMM avx
+DECLARE_CQT_CALC
+%endif
+%if HAVE_FMA3_EXTERNAL
+INIT_YMM fma3
+DECLARE_CQT_CALC
+%endif
+%if HAVE_FMA4_EXTERNAL
+INIT_XMM fma4
+DECLARE_CQT_CALC
+%endif
diff --git a/libavfilter/x86/avf_showcqt_init.c b/libavfilter/x86/avf_showcqt_init.c
new file mode 100644
index 0000000000..0cc164c352
--- /dev/null
+++ b/libavfilter/x86/avf_showcqt_init.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2016 Muhammad Faiz <mfcc64@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/x86/cpu.h"
+#include "libavfilter/avf_showcqt.h"
+
+#define DECLARE_CQT_CALC(type) \
+void ff_showcqt_cqt_calc_##type(FFTComplex *dst, const FFTComplex *src, \
+ const Coeffs *coeffs, int len, int fft_len)
+
+DECLARE_CQT_CALC(sse);
+DECLARE_CQT_CALC(sse3);
+DECLARE_CQT_CALC(avx);
+DECLARE_CQT_CALC(fma3);
+DECLARE_CQT_CALC(fma4);
+
+#define permute_coeffs_0 NULL
+
+static void permute_coeffs_01452367(float *v, int len)
+{
+ int k;
+ for (k = 0; k < len; k += 8) {
+ FFSWAP(float, v[k+2], v[k+4]);
+ FFSWAP(float, v[k+3], v[k+5]);
+ }
+}
+
+av_cold void ff_showcqt_init_x86(ShowCQTContext *s)
+{
+ int cpuflags = av_get_cpu_flags();
+
+#define SELECT_CQT_CALC(type, TYPE, align, perm) \
+if (EXTERNAL_##TYPE(cpuflags)) { \
+ s->cqt_calc = ff_showcqt_cqt_calc_##type; \
+ s->cqt_align = align; \
+ s->permute_coeffs = permute_coeffs_##perm; \
+}
+
+ SELECT_CQT_CALC(sse, SSE, 4, 0);
+ SELECT_CQT_CALC(sse3, SSE3_FAST, 4, 0);
+ SELECT_CQT_CALC(fma4, FMA4, 4, 0); // using xmm
+ SELECT_CQT_CALC(avx, AVX_FAST, 8, 01452367);
+ SELECT_CQT_CALC(fma3, FMA3_FAST, 8, 01452367);
+}
diff --git a/libavfilter/x86/colorspacedsp.asm b/libavfilter/x86/colorspacedsp.asm
new file mode 100644
index 0000000000..67d851abf4
--- /dev/null
+++ b/libavfilter/x86/colorspacedsp.asm
@@ -0,0 +1,1097 @@
+;*****************************************************************************
+;* x86-optimized functions for colorspace filter
+;*
+;* Copyright (C) 2016 Ronald S. Bultje <rsbultje@gmail.com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+pw_1: times 8 dw 1
+pw_2: times 8 dw 2
+pw_4: times 8 dw 4
+pw_8: times 8 dw 8
+pw_16: times 8 dw 16
+pw_64: times 8 dw 64
+pw_128: times 8 dw 128
+pw_256: times 8 dw 256
+pw_512: times 8 dw 512
+pw_1023: times 8 dw 1023
+pw_1024: times 8 dw 1024
+pw_2048: times 8 dw 2048
+pw_4095: times 8 dw 4095
+pw_8192: times 8 dw 8192
+pw_16384: times 8 dw 16384
+
+pd_1: times 4 dd 1
+pd_2: times 4 dd 2
+pd_128: times 4 dd 128
+pd_512: times 4 dd 512
+pd_2048: times 4 dd 2048
+pd_8192: times 4 dd 8192
+pd_32768: times 4 dd 32768
+pd_131072: times 4 dd 131072
+
+SECTION .text
+
+; void ff_yuv2yuv_420p8to8_sse2(uint8_t *yuv_out[3], ptrdiff_t yuv_out_stride[3],
+; uint8_t *yuv_in[3], ptrdiff_t yuv_in_stride[3],
+; int w, int h, const int16_t yuv2yuv_coeffs[3][3][8],
+; const int16_t yuv_offset[2][8])
+
+%if ARCH_X86_64
+%macro YUV2YUV_FN 4 ; in_bitdepth, out_bitdepth, log2_chroma_w (horiz), log2_chroma_h (vert)
+
+%assign %%sh (14 + %1 - %2)
+%assign %%rnd (1 << (%%sh - 1))
+%assign %%uvinoff (128 << (%1 - 8))
+%assign %%uvoutoff (128 << (%2 - 8))
+%if %3 == 0
+%assign %%ss 444
+%elif %4 == 0
+%assign %%ss 422
+%else ; %4 == 1
+%assign %%ss 420
+%endif ; %3/%4
+%if %2 != 8
+%assign %%maxval (1 << %2) - 1
+%endif ; %2 != 8
+
+%assign %%ypsh %%sh - 1
+%if %%ypsh > 14
+%assign %%yoffsh %%ypsh - 13
+%assign %%ypsh 14
+%else
+%assign %%yoffsh 1
+%endif
+%assign %%yprnd (1 << (%%yoffsh - 1))
+%assign %%ypmul (1 << %%ypsh)
+
+cglobal yuv2yuv_ %+ %%ss %+ p%1to%2, 8, 14, 16, 0 - (4 * mmsize), \
+ yo, yos, yi, yis, w, h, c, yoff, ui, vi, uo, vo
+%if %3 == 1
+ inc wd
+ sar wd, 1
+%if %4 == 1
+ inc hd
+ sar hd, 1
+%endif ; %4 == 1
+%endif ; %3 == 1
+ mov [rsp+3*mmsize+0], wd
+ mov [rsp+3*mmsize+4], hd
+
+ mova m10, [cq]
+ pxor m11, m11
+ mova m12, [pd_ %+ %%uvoutoff]
+ pslld m12, %%sh
+ paddd m12, [pd_ %+ %%rnd]
+ mova m13, [pw_ %+ %%uvinoff]
+ mova m14, [yoffq+ 0] ; y_off_in
+ mova m15, [yoffq+16] ; y_off_out
+%if %%yoffsh != 0
+ psllw m15, %%yoffsh
+%endif
+ paddw m15, [pw_ %+ %%yprnd]
+ punpcklwd m10, m15
+ mova m15, [pw_ %+ %%ypmul]
+ movh m0, [cq+1*16] ; cyu
+ movh m1, [cq+2*16] ; cyv
+ movh m2, [cq+4*16] ; cuu
+ movh m3, [cq+5*16] ; cuv
+ movh m4, [cq+7*16] ; cvu
+ movh m5, [cq+8*16] ; cvv
+ punpcklwd m0, m1
+ punpcklwd m2, m3
+ punpcklwd m4, m5
+ mova [rsp+0*mmsize], m0
+ mova [rsp+1*mmsize], m2
+ mova [rsp+2*mmsize], m4
+
+ DEFINE_ARGS yo, yos, yi, yis, ui, vi, uo, vo, uis, vis, uos, vos, x, tmp
+
+ mov uiq, [yiq+gprsize*1]
+ mov viq, [yiq+gprsize*2]
+ mov yiq, [yiq+gprsize*0]
+ mov uoq, [yoq+gprsize*1]
+ mov voq, [yoq+gprsize*2]
+ mov yoq, [yoq+gprsize*0]
+ mov uisq, [yisq+gprsize*1]
+ mov visq, [yisq+gprsize*2]
+ mov yisq, [yisq+gprsize*0]
+ mov uosq, [yosq+gprsize*1]
+ mov vosq, [yosq+gprsize*2]
+ mov yosq, [yosq+gprsize*0]
+
+.loop_v:
+ xor xq, xq
+
+.loop_h:
+%if %4 == 1
+ lea tmpq, [yiq+yisq]
+%endif ; %4 == 1
+%if %1 == 8
+ movu m0, [yiq+xq*(1<<%3)] ; y00/01
+%if %4 == 1
+ movu m2, [tmpq+xq*2] ; y10/11
+%endif ; %4 == 1
+%if %3 == 1
+ movh m4, [uiq+xq] ; u
+ movh m5, [viq+xq] ; v
+%else ; %3 != 1
+ movu m4, [uiq+xq] ; u
+ movu m5, [viq+xq] ; v
+%endif ; %3 ==/!= 1
+ punpckhbw m1, m0, m11
+ punpcklbw m0, m11
+%if %4 == 1
+ punpckhbw m3, m2, m11
+ punpcklbw m2, m11
+%endif ; %4 == 1
+%if %3 == 0
+ punpckhbw m2, m4, m11
+ punpckhbw m3, m5, m11
+%endif ; %3 == 0
+ punpcklbw m4, m11
+ punpcklbw m5, m11
+%else ; %1 != 8
+ movu m0, [yiq+xq*(2<<%3)] ; y00/01
+ movu m1, [yiq+xq*(2<<%3)+mmsize] ; y00/01
+%if %4 == 1
+ movu m2, [tmpq+xq*4] ; y10/11
+ movu m3, [tmpq+xq*4+mmsize] ; y10/11
+%endif ; %4 == 1
+ movu m4, [uiq+xq*2] ; u
+ movu m5, [viq+xq*2] ; v
+%if %3 == 0
+ movu m2, [uiq+xq*2+mmsize]
+ movu m3, [viq+xq*2+mmsize]
+%endif ; %3 == 0
+%endif ; %1 ==/!= 8
+ psubw m0, m14
+ psubw m1, m14
+%if %4 == 1
+ psubw m2, m14
+ psubw m3, m14
+%endif ; %4 == 1
+ psubw m4, m13
+ psubw m5, m13
+%if %3 == 0
+ psubw m2, m13
+ psubw m3, m13
+%endif ; %3 == 0
+
+ SBUTTERFLY wd, 4, 5, 6
+ pmaddwd m6, m4, [rsp+1*mmsize]
+ pmaddwd m7, m5, [rsp+1*mmsize]
+%if %3 == 0
+ SBUTTERFLY wd, 2, 3, 8
+ pmaddwd m8, m2, [rsp+1*mmsize]
+ pmaddwd m9, m3, [rsp+1*mmsize]
+%else ; %3 != 0
+ pmaddwd m8, m4, [rsp+2*mmsize]
+ pmaddwd m9, m5, [rsp+2*mmsize]
+%endif
+ paddd m6, m12
+ paddd m7, m12
+ paddd m8, m12
+ paddd m9, m12
+ psrad m6, %%sh
+ psrad m7, %%sh
+ psrad m8, %%sh
+ psrad m9, %%sh
+ packssdw m6, m7
+ packssdw m8, m9
+%if %2 == 8
+ packuswb m6, m8
+%if %3 == 0
+ movu [uoq+xq], m6
+%else ; %3 != 0
+ movh [uoq+xq], m6
+ movhps [voq+xq], m6
+%endif ; %3 ==/!= 0
+%else ; %2 != 8
+ CLIPW m6, m11, [pw_ %+ %%maxval]
+ CLIPW m8, m11, [pw_ %+ %%maxval]
+ movu [uoq+xq*2], m6
+%if %3 == 0
+ movu [uoq+xq*2+mmsize], m8
+%else ; %3 != 0
+ movu [voq+xq*2], m8
+%endif ; %3 ==/!= 0
+%endif ; %2 ==/!= 8
+
+%if %3 == 0
+ pmaddwd m6, m4, [rsp+2*mmsize]
+ pmaddwd m7, m5, [rsp+2*mmsize]
+ pmaddwd m8, m2, [rsp+2*mmsize]
+ pmaddwd m9, m3, [rsp+2*mmsize]
+ paddd m6, m12
+ paddd m7, m12
+ paddd m8, m12
+ paddd m9, m12
+ psrad m6, %%sh
+ psrad m7, %%sh
+ psrad m8, %%sh
+ psrad m9, %%sh
+ packssdw m6, m7
+ packssdw m8, m9
+%if %2 == 8
+ packuswb m6, m8
+ movu [voq+xq], m6
+%else ; %2 != 8
+ CLIPW m6, m11, [pw_ %+ %%maxval]
+ CLIPW m8, m11, [pw_ %+ %%maxval]
+ movu [voq+xq*2], m6
+ movu [voq+xq*2+mmsize], m8
+%endif ; %2 ==/!= 8
+%endif ; %3 == 0
+
+ pmaddwd m4, [rsp+0*mmsize]
+ pmaddwd m5, [rsp+0*mmsize] ; uv_val
+%if %3 == 0
+ pmaddwd m2, [rsp+0*mmsize]
+ pmaddwd m3, [rsp+0*mmsize]
+%endif ; %3 == 0
+
+ ; unpack y pixels with m15 (shifted round + offset), then multiply
+ ; by m10, add uv pixels, and we're done!
+%if %3 == 1
+ punpckhdq m8, m4, m4
+ punpckldq m4, m4
+ punpckhdq m9, m5, m5
+ punpckldq m5, m5
+%else ; %3 != 1
+ SWAP 8, 5, 2
+ SWAP 3, 9
+%endif ; %3 ==/!= 1
+%if %4 == 1
+ punpckhwd m6, m2, m15
+ punpcklwd m2, m15
+ punpckhwd m7, m3, m15
+ punpcklwd m3, m15
+ pmaddwd m2, m10
+ pmaddwd m6, m10
+ pmaddwd m3, m10
+ pmaddwd m7, m10
+ paddd m2, m4
+ paddd m6, m8
+ paddd m3, m5
+ paddd m7, m9
+ psrad m2, %%sh
+ psrad m6, %%sh
+ psrad m3, %%sh
+ psrad m7, %%sh
+ packssdw m2, m6
+ packssdw m3, m7
+
+ lea tmpq, [yoq+yosq]
+%if %2 == 8
+ packuswb m2, m3
+ movu [tmpq+xq*2], m2
+%else ; %2 != 8
+ CLIPW m2, m11, [pw_ %+ %%maxval]
+ CLIPW m3, m11, [pw_ %+ %%maxval]
+ movu [tmpq+xq*4], m2
+ movu [tmpq+xq*4+mmsize], m3
+%endif ; %2 ==/!= 8
+%endif ; %4 == 1
+
+ punpckhwd m6, m0, m15
+ punpcklwd m0, m15
+ punpckhwd m7, m1, m15
+ punpcklwd m1, m15
+ pmaddwd m0, m10
+ pmaddwd m6, m10
+ pmaddwd m1, m10
+ pmaddwd m7, m10
+ paddd m0, m4
+ paddd m6, m8
+ paddd m1, m5
+ paddd m7, m9
+ psrad m0, %%sh
+ psrad m6, %%sh
+ psrad m1, %%sh
+ psrad m7, %%sh
+ packssdw m0, m6
+ packssdw m1, m7
+
+%if %2 == 8
+ packuswb m0, m1
+ movu [yoq+xq*(1<<%3)], m0
+%else ; %2 != 8
+ CLIPW m0, m11, [pw_ %+ %%maxval]
+ CLIPW m1, m11, [pw_ %+ %%maxval]
+ movu [yoq+xq*(2<<%3)], m0
+ movu [yoq+xq*(2<<%3)+mmsize], m1
+%endif ; %2 ==/!= 8
+
+ add xq, mmsize >> %3
+ cmp xd, dword [rsp+3*mmsize+0]
+ jl .loop_h
+
+%if %4 == 1
+ lea yiq, [yiq+yisq*2]
+ lea yoq, [yoq+yosq*2]
+%else ; %4 != 1
+ add yiq, yisq
+ add yoq, yosq
+%endif ; %4 ==/!= 1
+ add uiq, uisq
+ add viq, visq
+ add uoq, uosq
+ add voq, vosq
+ dec dword [rsp+3*mmsize+4]
+ jg .loop_v
+
+ RET
+%endmacro
+
+%macro YUV2YUV_FNS 2 ; ss_w, ss_h
+YUV2YUV_FN 8, 8, %1, %2
+YUV2YUV_FN 10, 8, %1, %2
+YUV2YUV_FN 12, 8, %1, %2
+YUV2YUV_FN 8, 10, %1, %2
+YUV2YUV_FN 10, 10, %1, %2
+YUV2YUV_FN 12, 10, %1, %2
+YUV2YUV_FN 8, 12, %1, %2
+YUV2YUV_FN 10, 12, %1, %2
+YUV2YUV_FN 12, 12, %1, %2
+%endmacro
+
+INIT_XMM sse2
+YUV2YUV_FNS 0, 0
+YUV2YUV_FNS 1, 0
+YUV2YUV_FNS 1, 1
+
+; void ff_yuv2rgb_420p8_sse2(int16_t *rgb[3], ptrdiff_t rgb_stride,
+; uint8_t *yuv[3], ptrdiff_t yuv_stride[3],
+; int w, int h, const int16_t yuv2rgb_coeffs[3][3][8],
+; const int16_t yuv_offset[8])
+%macro YUV2RGB_FN 3 ; depth, log2_chroma_w (horiz), log2_chroma_h (vert)
+%assign %%sh (%1 - 1)
+%assign %%rnd (1 << (%%sh - 1))
+%assign %%uvoff (1 << (%1 - 1))
+%if %2 == 0
+%assign %%ss 444
+%elif %3 == 0
+%assign %%ss 422
+%else ; %3 == 1
+%assign %%ss 420
+%endif ; %2/%3
+
+cglobal yuv2rgb_ %+ %%ss %+ p%1, 8, 14, 16, 0 - 8 * mmsize, \
+ rgb, rgbs, yuv, yuvs, ww, h, c, yoff
+%if %2 == 1
+ inc wwd
+ sar wwd, 1
+%endif ; %2 == 1
+%if %3 == 1
+ inc hd
+ sar hd, 1
+%endif ; %3 == 1
+ pxor m11, m11
+ mova m15, [yoffq] ; yoff
+ movh m14, [cq+ 0] ; cy
+ movh m10, [cq+ 32] ; crv
+ movh m13, [cq+112] ; cbu
+ movh m12, [cq+ 64] ; cgu
+ movh m9, [cq+ 80] ; cgv
+ punpcklwd m14, [pw_ %+ %%rnd] ; cy, rnd
+ punpcklwd m13, m11 ; cbu, 0
+ punpcklwd m11, m10 ; 0, crv
+ punpcklwd m12, m9 ; cgu, cgv
+ mova [rsp+0*mmsize], m11
+ mova [rsp+1*mmsize], m12
+ mova [rsp+2*mmsize], m13
+ mova [rsp+3*mmsize], m14
+ pxor m14, m14
+
+ DEFINE_ARGS r, rgbs, y, ys, ww, h, g, b, u, v, us, vs, x, tmp
+
+ mov gq, [rq+1*gprsize]
+ mov bq, [rq+2*gprsize]
+ mov rq, [rq+0*gprsize]
+ mov uq, [yq+1*gprsize]
+ mov vq, [yq+2*gprsize]
+ mov yq, [yq+0*gprsize]
+ mov usq, [ysq+1*gprsize]
+ mov vsq, [ysq+2*gprsize]
+ mov ysq, [ysq+0*gprsize]
+
+.loop_v:
+ xor xq, xq
+
+.loop_h:
+%if %3 == 1
+ lea tmpq, [yq+ysq]
+%endif ; %3 == 1
+%if %1 == 8
+ movu m0, [yq+xq*(1<<%2)]
+%if %3 == 1
+ movu m2, [tmpq+xq*2]
+%endif ; %3 == 1
+%if %2 == 1
+ movh m4, [uq+xq]
+ movh m5, [vq+xq]
+%else ; %2 != 1
+ movu m4, [uq+xq]
+ movu m5, [vq+xq]
+%endif ; %2 ==/!= 1
+ punpckhbw m1, m0, m14
+ punpcklbw m0, m14
+%if %3 == 1
+ punpckhbw m3, m2, m14
+ punpcklbw m2, m14
+%endif ; %3 == 1
+%if %2 == 0
+ punpckhbw m2, m4, m14
+ punpckhbw m3, m5, m14
+%endif ; %2 == 0
+ punpcklbw m4, m14
+ punpcklbw m5, m14
+%else ; %1 != 8
+ movu m0, [yq+xq*(2<<%2)]
+ movu m1, [yq+xq*(2<<%2)+mmsize]
+%if %3 == 1
+ movu m2, [tmpq+xq*4]
+ movu m3, [tmpq+xq*4+mmsize]
+%endif ; %3 == 1
+ movu m4, [uq+xq*2]
+ movu m5, [vq+xq*2]
+%if %2 == 0
+ movu m2, [uq+xq*2+mmsize]
+ movu m3, [vq+xq*2+mmsize]
+%endif ; %2 == 0
+%endif ; %1 ==/!= 8
+ psubw m0, m15
+ psubw m1, m15
+%if %3 == 1
+ psubw m2, m15
+ psubw m3, m15
+%endif ; %3 == 1
+ psubw m4, [pw_ %+ %%uvoff]
+ psubw m5, [pw_ %+ %%uvoff]
+ SBUTTERFLY wd, 4, 5, 6
+%if %2 == 0
+ psubw m2, [pw_ %+ %%uvoff]
+ psubw m3, [pw_ %+ %%uvoff]
+ SBUTTERFLY wd, 2, 3, 6
+%endif ; %2 == 0
+
+ ; calculate y+rnd full-resolution [0-3,6-9]
+ punpckhwd m6, m0, [pw_1] ; y, 1
+ punpcklwd m0, [pw_1] ; y, 1
+ punpckhwd m7, m1, [pw_1] ; y, 1
+ punpcklwd m1, [pw_1] ; y, 1
+ pmaddwd m0, [rsp+3*mmsize]
+ pmaddwd m6, [rsp+3*mmsize]
+ pmaddwd m1, [rsp+3*mmsize]
+ pmaddwd m7, [rsp+3*mmsize]
+%if %3 == 1
+ punpckhwd m8, m2, [pw_1] ; y, 1
+ punpcklwd m2, [pw_1] ; y, 1
+ punpckhwd m9, m3, [pw_1] ; y, 1
+ punpcklwd m3, [pw_1] ; y, 1
+ pmaddwd m2, [rsp+3*mmsize]
+ pmaddwd m8, [rsp+3*mmsize]
+ pmaddwd m3, [rsp+3*mmsize]
+ pmaddwd m9, [rsp+3*mmsize]
+ mova [rsp+4*mmsize], m2
+ mova [rsp+5*mmsize], m8
+ mova [rsp+6*mmsize], m3
+ mova [rsp+7*mmsize], m9
+%endif ; %3 == 1
+
+ ; calculate r offsets (un-subsampled, then duplicate)
+ pmaddwd m10, m4, [rsp+0*mmsize]
+%if %2 == 1
+ pmaddwd m12, m5, [rsp+0*mmsize]
+ punpckhdq m11, m10, m10
+ punpckldq m10, m10
+ punpckhdq m13, m12, m12
+ punpckldq m12, m12
+%else ; %2 != 1
+ pmaddwd m11, m5, [rsp+0*mmsize]
+ pmaddwd m12, m2, [rsp+0*mmsize]
+ pmaddwd m13, m3, [rsp+0*mmsize]
+%endif ; %2 ==/!= 1
+%if %3 == 1
+ paddd m2, m10, [rsp+4*mmsize]
+ paddd m3, m11, [rsp+5*mmsize]
+ paddd m8, m12, [rsp+6*mmsize]
+ paddd m9, m13, [rsp+7*mmsize]
+%endif
+ paddd m10, m0
+ paddd m11, m6
+ paddd m12, m1
+ paddd m13, m7
+%if %3 == 1
+ psrad m2, %%sh
+ psrad m3, %%sh
+ psrad m8, %%sh
+ psrad m9, %%sh
+%endif ; %3 == 1
+ psrad m10, %%sh
+ psrad m11, %%sh
+ psrad m12, %%sh
+ psrad m13, %%sh
+%if %3 == 1
+ lea tmpq, [rq+rgbsq*2]
+ packssdw m2, m3
+ packssdw m8, m9
+ mova [tmpq+xq*4], m2
+ mova [tmpq+xq*4+mmsize], m8
+%endif ; %3 == 1
+ packssdw m10, m11
+ packssdw m12, m13
+ mova [rq+xq*(2 << %2)], m10
+ mova [rq+xq*(2 << %2)+mmsize], m12
+
+ ; calculate g offsets (un-subsampled, then duplicate)
+ pmaddwd m10, m4, [rsp+1*mmsize]
+%if %2 == 1
+ pmaddwd m12, m5, [rsp+1*mmsize]
+ punpckhdq m11, m10, m10
+ punpckldq m10, m10
+ punpckhdq m13, m12, m12
+ punpckldq m12, m12
+%else ; %2 != 1
+ pmaddwd m11, m5, [rsp+1*mmsize]
+ pmaddwd m12, m2, [rsp+1*mmsize]
+ pmaddwd m13, m3, [rsp+1*mmsize]
+%endif ; %2 ==/!= 1
+%if %3 == 1
+ paddd m2, m10, [rsp+4*mmsize]
+ paddd m3, m11, [rsp+5*mmsize]
+ paddd m8, m12, [rsp+6*mmsize]
+ paddd m9, m13, [rsp+7*mmsize]
+%endif ; %3 == 1
+ paddd m10, m0
+ paddd m11, m6
+ paddd m12, m1
+ paddd m13, m7
+%if %3 == 1
+ psrad m2, %%sh
+ psrad m3, %%sh
+ psrad m8, %%sh
+ psrad m9, %%sh
+%endif ; %3 == 1
+ psrad m10, %%sh
+ psrad m11, %%sh
+ psrad m12, %%sh
+ psrad m13, %%sh
+%if %3 == 1
+ lea tmpq, [gq+rgbsq*2]
+ packssdw m2, m3
+ packssdw m8, m9
+ mova [tmpq+xq*4], m2
+ mova [tmpq+xq*4+mmsize], m8
+%endif ; %3 == 1
+ packssdw m10, m11
+ packssdw m12, m13
+ mova [gq+xq*(2 << %2)], m10
+ mova [gq+xq*(2 << %2)+mmsize], m12
+
+ ; calculate b offsets (un-subsampled, then duplicate)
+ pmaddwd m4, [rsp+2*mmsize]
+ pmaddwd m5, [rsp+2*mmsize]
+%if %2 == 1
+ punpckhdq m2, m4, m4
+ punpckldq m4, m4
+ punpckhdq m3, m5, m5
+ punpckldq m5, m5
+%else ; %2 != 1
+ pmaddwd m2, [rsp+2*mmsize]
+ pmaddwd m3, [rsp+2*mmsize]
+ SWAP 2, 5
+%endif ; %2 ==/!= 1
+ paddd m0, m4
+ paddd m6, m2
+ paddd m1, m5
+ paddd m7, m3
+%if %3 == 1
+ paddd m4, [rsp+4*mmsize]
+ paddd m2, [rsp+5*mmsize]
+ paddd m5, [rsp+6*mmsize]
+ paddd m3, [rsp+7*mmsize]
+%endif ; %3 == 1
+ psrad m0, %%sh
+ psrad m6, %%sh
+ psrad m1, %%sh
+ psrad m7, %%sh
+%if %3 == 1
+ psrad m4, %%sh
+ psrad m2, %%sh
+ psrad m5, %%sh
+ psrad m3, %%sh
+%endif ; %3 == 1
+ packssdw m0, m6
+ packssdw m1, m7
+ movu [bq+xq*(2 << %2)], m0
+ movu [bq+xq*(2 << %2)+mmsize], m1
+%if %3 == 1
+ lea tmpq, [bq+rgbsq*2]
+ packssdw m4, m2
+ packssdw m5, m3
+ movu [tmpq+xq*4], m4
+ movu [tmpq+xq*4+mmsize], m5
+%endif ; %3 == 1
+
+ add xd, mmsize >> %2
+ cmp xd, wwd
+ jl .loop_h
+
+ lea rq, [rq+rgbsq*(2 << %3)]
+ lea gq, [gq+rgbsq*(2 << %3)]
+ lea bq, [bq+rgbsq*(2 << %3)]
+%if %3 == 1
+ lea yq, [yq+ysq*2]
+%else ; %3 != 0
+ add yq, ysq
+%endif ; %3 ==/!= 1
+ add uq, usq
+ add vq, vsq
+ dec hd
+ jg .loop_v
+
+ RET
+%endmacro
+
+%macro YUV2RGB_FNS 2
+YUV2RGB_FN 8, %1, %2
+YUV2RGB_FN 10, %1, %2
+YUV2RGB_FN 12, %1, %2
+%endmacro
+
+INIT_XMM sse2
+YUV2RGB_FNS 0, 0
+YUV2RGB_FNS 1, 0
+YUV2RGB_FNS 1, 1
+
+%macro RGB2YUV_FN 3 ; depth, log2_chroma_w (horiz), log2_chroma_h (vert)
+%assign %%sh 29 - %1
+%assign %%rnd (1 << (%%sh - 15))
+%assign %%uvrnd ((128 << (%1 - 8)) << (%%sh - 14))
+%if %1 != 8
+%assign %%maxval ((1 << %1) - 1)
+%endif ; %1 != 8
+%if %2 == 0
+%assign %%ss 444
+%elif %3 == 0
+%assign %%ss 422
+%else ; %3 == 1
+%assign %%ss 420
+%endif ; %2/%3
+
+cglobal rgb2yuv_ %+ %%ss %+ p%1, 8, 14, 16, 0 - 6 * mmsize, \
+ yuv, yuvs, rgb, rgbs, ww, h, c, off
+%if %2 == 1
+ inc wwd
+ sar wwd, 1
+%endif ; %2 == 1
+%if %3 == 1
+ inc hd
+ sar hd, 1
+%endif ; %3 == 1
+
+ ; prepare coeffs
+ movh m8, [offq]
+ movh m9, [pw_ %+ %%uvrnd]
+ psllw m8, %%sh - 14
+ paddw m9, [pw_ %+ %%rnd]
+ paddw m8, [pw_ %+ %%rnd]
+ movh m0, [cq+ 0]
+ movh m1, [cq+ 16]
+ movh m2, [cq+ 32]
+ movh m3, [cq+ 48]
+ movh m4, [cq+ 64]
+ movh m5, [cq+ 80]
+ movh m6, [cq+112]
+ movh m7, [cq+128]
+ punpcklwd m0, m1
+ punpcklwd m2, m8
+ punpcklwd m3, m4
+ punpcklwd m4, m5, m9
+ punpcklwd m5, m6
+ punpcklwd m7, m9
+
+ mova [rsp+0*mmsize], m0 ; cry, cgy
+ mova [rsp+1*mmsize], m2 ; cby, off + rnd
+ mova [rsp+2*mmsize], m3 ; cru, cgu
+ mova [rsp+3*mmsize], m4 ; cburv, uvoff + rnd
+ mova [rsp+4*mmsize], m5 ; cburv, cgv
+ mova [rsp+5*mmsize], m7 ; cbv, uvoff + rnd
+
+
+ DEFINE_ARGS y, ys, r, rgbs, ww, h, u, v, us, vs, g, b, tmp, x
+ mov gq, [rq+gprsize*1]
+ mov bq, [rq+gprsize*2]
+ mov rq, [rq+gprsize*0]
+ mov uq, [yq+gprsize*1]
+ mov vq, [yq+gprsize*2]
+ mov yq, [yq+gprsize*0]
+ mov usq, [ysq+gprsize*1]
+ mov vsq, [ysq+gprsize*2]
+ mov ysq, [ysq+gprsize*0]
+
+ pxor m15, m15
+.loop_v:
+ xor xd, xd
+
+.loop_h:
+ ; top line y
+ mova m0, [rq+xq*(2<<%2)]
+ mova m3, [rq+xq*(2<<%2)+mmsize]
+ mova m1, [gq+xq*(2<<%2)]
+ mova m4, [gq+xq*(2<<%2)+mmsize]
+ mova m2, [bq+xq*(2<<%2)]
+ mova m5, [bq+xq*(2<<%2)+mmsize]
+
+ punpcklwd m6, m0, m1
+ punpckhwd m7, m0, m1
+ punpcklwd m8, m3, m4
+ punpckhwd m9, m3, m4
+ punpcklwd m10, m2, [pw_16384]
+ punpckhwd m11, m2, [pw_16384]
+ punpcklwd m12, m5, [pw_16384]
+ punpckhwd m13, m5, [pw_16384]
+
+ pmaddwd m6, [rsp+0*mmsize]
+ pmaddwd m7, [rsp+0*mmsize]
+ pmaddwd m8, [rsp+0*mmsize]
+ pmaddwd m9, [rsp+0*mmsize]
+ pmaddwd m10, [rsp+1*mmsize]
+ pmaddwd m11, [rsp+1*mmsize]
+ pmaddwd m12, [rsp+1*mmsize]
+ pmaddwd m13, [rsp+1*mmsize]
+ paddd m6, m10
+ paddd m7, m11
+ paddd m8, m12
+ paddd m9, m13
+ psrad m6, %%sh
+ psrad m7, %%sh
+ psrad m8, %%sh
+ psrad m9, %%sh
+ packssdw m6, m7
+ packssdw m8, m9
+%if %1 == 8
+ packuswb m6, m8
+ movu [yq+xq*(1<<%2)], m6
+%else
+ CLIPW m6, m15, [pw_ %+ %%maxval]
+ CLIPW m8, m15, [pw_ %+ %%maxval]
+ movu [yq+xq*(2<<%2)], m6
+ movu [yq+xq*(2<<%2)+mmsize], m8
+%endif
+
+%if %2 == 1
+ ; subsampling cached data
+ pmaddwd m0, [pw_1]
+ pmaddwd m1, [pw_1]
+ pmaddwd m2, [pw_1]
+ pmaddwd m3, [pw_1]
+ pmaddwd m4, [pw_1]
+ pmaddwd m5, [pw_1]
+
+%if %3 == 1
+ ; bottom line y, r/g portion only
+ lea tmpq, [rgbsq+xq*2]
+ mova m6, [rq+tmpq*2]
+ mova m9, [rq+tmpq*2+mmsize]
+ mova m7, [gq+tmpq*2]
+ mova m10, [gq+tmpq*2+mmsize]
+ mova m8, [bq+tmpq*2]
+ mova m11, [bq+tmpq*2+mmsize]
+
+ punpcklwd m12, m6, m7
+ punpckhwd m13, m6, m7
+ punpcklwd m14, m9, m10
+ punpckhwd m15, m9, m10
+
+ ; release two more registers
+ pmaddwd m6, [pw_1]
+ pmaddwd m7, [pw_1]
+ pmaddwd m9, [pw_1]
+ pmaddwd m10, [pw_1]
+ paddd m0, m6
+ paddd m3, m9
+ paddd m1, m7
+ paddd m4, m10
+
+ ; bottom line y, b/rnd portion only
+ punpcklwd m6, m8, [pw_16384]
+ punpckhwd m7, m8, [pw_16384]
+ punpcklwd m9, m11, [pw_16384]
+ punpckhwd m10, m11, [pw_16384]
+
+ pmaddwd m12, [rsp+0*mmsize]
+ pmaddwd m13, [rsp+0*mmsize]
+ pmaddwd m14, [rsp+0*mmsize]
+ pmaddwd m15, [rsp+0*mmsize]
+ pmaddwd m6, [rsp+1*mmsize]
+ pmaddwd m7, [rsp+1*mmsize]
+ pmaddwd m9, [rsp+1*mmsize]
+ pmaddwd m10, [rsp+1*mmsize]
+ paddd m12, m6
+ paddd m13, m7
+ paddd m14, m9
+ paddd m15, m10
+ psrad m12, %%sh
+ psrad m13, %%sh
+ psrad m14, %%sh
+ psrad m15, %%sh
+ packssdw m12, m13
+ packssdw m14, m15
+ lea tmpq, [yq+ysq]
+%if %1 == 8
+ packuswb m12, m14
+ movu [tmpq+xq*2], m12
+%else
+ pxor m15, m15
+ CLIPW m12, m15, [pw_ %+ %%maxval]
+ CLIPW m14, m15, [pw_ %+ %%maxval]
+ movu [tmpq+xq*4], m12
+ movu [tmpq+xq*4+mmsize], m14
+%endif
+
+ ; complete subsampling of r/g/b pixels for u/v
+ pmaddwd m8, [pw_1]
+ pmaddwd m11, [pw_1]
+ paddd m2, m8
+ paddd m5, m11
+ paddd m0, [pd_2]
+ paddd m1, [pd_2]
+ paddd m2, [pd_2]
+ paddd m3, [pd_2]
+ paddd m4, [pd_2]
+ paddd m5, [pd_2]
+ psrad m0, 2
+ psrad m1, 2
+ psrad m2, 2
+ psrad m3, 2
+ psrad m4, 2
+ psrad m5, 2
+%else ; %3 != 1
+ paddd m0, [pd_1]
+ paddd m1, [pd_1]
+ paddd m2, [pd_1]
+ paddd m3, [pd_1]
+ paddd m4, [pd_1]
+ paddd m5, [pd_1]
+ psrad m0, 1
+ psrad m1, 1
+ psrad m2, 1
+ psrad m3, 1
+ psrad m4, 1
+ psrad m5, 1
+%endif ; %3 ==/!= 1
+ packssdw m0, m3
+ packssdw m1, m4
+ packssdw m2, m5
+%endif ; %2 == 1
+
+ ; convert u/v pixels
+ SBUTTERFLY wd, 0, 1, 6
+ punpckhwd m6, m2, [pw_16384]
+ punpcklwd m2, [pw_16384]
+
+ pmaddwd m7, m0, [rsp+2*mmsize]
+ pmaddwd m8, m1, [rsp+2*mmsize]
+ pmaddwd m9, m2, [rsp+3*mmsize]
+ pmaddwd m10, m6, [rsp+3*mmsize]
+ pmaddwd m0, [rsp+4*mmsize]
+ pmaddwd m1, [rsp+4*mmsize]
+ pmaddwd m2, [rsp+5*mmsize]
+ pmaddwd m6, [rsp+5*mmsize]
+ paddd m7, m9
+ paddd m8, m10
+ paddd m0, m2
+ paddd m1, m6
+ psrad m7, %%sh
+ psrad m8, %%sh
+ psrad m0, %%sh
+ psrad m1, %%sh
+ packssdw m7, m8
+ packssdw m0, m1
+%if %2 == 1
+%if %1 == 8
+ packuswb m7, m0
+ movh [uq+xq], m7
+ movhps [vq+xq], m7
+%else
+ CLIPW m7, m15, [pw_ %+ %%maxval]
+ CLIPW m0, m15, [pw_ %+ %%maxval]
+ movu [uq+xq*2], m7
+ movu [vq+xq*2], m0
+%endif
+%else ; %2 != 1
+ ; second set of u/v pixels
+ SBUTTERFLY wd, 3, 4, 6
+ punpckhwd m6, m5, [pw_16384]
+ punpcklwd m5, [pw_16384]
+
+ pmaddwd m8, m3, [rsp+2*mmsize]
+ pmaddwd m9, m4, [rsp+2*mmsize]
+ pmaddwd m10, m5, [rsp+3*mmsize]
+ pmaddwd m11, m6, [rsp+3*mmsize]
+ pmaddwd m3, [rsp+4*mmsize]
+ pmaddwd m4, [rsp+4*mmsize]
+ pmaddwd m5, [rsp+5*mmsize]
+ pmaddwd m6, [rsp+5*mmsize]
+ paddd m8, m10
+ paddd m9, m11
+ paddd m3, m5
+ paddd m4, m6
+ psrad m8, %%sh
+ psrad m9, %%sh
+ psrad m3, %%sh
+ psrad m4, %%sh
+ packssdw m8, m9
+ packssdw m3, m4
+
+%if %1 == 8
+ packuswb m7, m8
+ packuswb m0, m3
+ movu [uq+xq], m7
+ movu [vq+xq], m0
+%else
+ CLIPW m7, m15, [pw_ %+ %%maxval]
+ CLIPW m0, m15, [pw_ %+ %%maxval]
+ CLIPW m8, m15, [pw_ %+ %%maxval]
+ CLIPW m3, m15, [pw_ %+ %%maxval]
+ movu [uq+xq*2], m7
+ movu [uq+xq*2+mmsize], m8
+ movu [vq+xq*2], m0
+ movu [vq+xq*2+mmsize], m3
+%endif
+%endif ; %2 ==/!= 1
+
+ add xq, mmsize >> %2
+ cmp xd, wwd
+ jl .loop_h
+
+%if %3 == 0
+ add yq, ysq
+%else ; %3 != 0
+ lea yq, [yq+ysq*2]
+%endif ; %3 ==/!= 0
+ add uq, usq
+ add vq, vsq
+ lea rq, [rq+rgbsq*(2<<%3)]
+ lea gq, [gq+rgbsq*(2<<%3)]
+ lea bq, [bq+rgbsq*(2<<%3)]
+ dec hd
+ jg .loop_v
+
+ RET
+%endmacro
+
+%macro RGB2YUV_FNS 2
+RGB2YUV_FN 8, %1, %2
+RGB2YUV_FN 10, %1, %2
+RGB2YUV_FN 12, %1, %2
+%endmacro
+
+INIT_XMM sse2
+RGB2YUV_FNS 0, 0
+RGB2YUV_FNS 1, 0
+RGB2YUV_FNS 1, 1
+
+; void ff_multiply3x3_sse2(int16_t *data[3], ptrdiff_t stride,
+; int w, int h, const int16_t coeff[3][3][8])
+INIT_XMM sse2
+cglobal multiply3x3, 5, 7, 16, data, stride, ww, h, c
+ movh m0, [cq+ 0]
+ movh m1, [cq+ 32]
+ movh m2, [cq+ 48]
+ movh m3, [cq+ 80]
+ movh m4, [cq+ 96]
+ movh m5, [cq+128]
+ punpcklwd m0, [cq+ 16]
+ punpcklwd m1, [pw_8192]
+ punpcklwd m2, [cq+ 64]
+ punpcklwd m3, [pw_8192]
+ punpcklwd m4, [cq+112]
+ punpcklwd m5, [pw_8192]
+
+ DEFINE_ARGS data0, stride, ww, h, data1, data2, x
+ shl strideq, 1
+ mov data1q, [data0q+gprsize*1]
+ mov data2q, [data0q+gprsize*2]
+ mov data0q, [data0q+gprsize*0]
+
+.loop_v:
+ xor xd, xd
+
+.loop_h:
+ mova m6, [data0q+xq*2]
+ mova m7, [data1q+xq*2]
+ mova m8, [data2q+xq*2]
+ SBUTTERFLY wd, 6, 7, 9
+ punpckhwd m9, m8, [pw_1]
+ punpcklwd m8, [pw_1]
+
+ pmaddwd m10, m6, m0
+ pmaddwd m11, m7, m0
+ pmaddwd m12, m8, m1
+ pmaddwd m13, m9, m1
+ paddd m10, m12
+ paddd m11, m13
+ psrad m10, 14
+ psrad m11, 14
+
+ pmaddwd m12, m6, m2
+ pmaddwd m13, m7, m2
+ pmaddwd m14, m8, m3
+ pmaddwd m15, m9, m3
+ paddd m12, m14
+ paddd m13, m15
+ psrad m12, 14
+ psrad m13, 14
+
+ pmaddwd m6, m4
+ pmaddwd m7, m4
+ pmaddwd m8, m5
+ pmaddwd m9, m5
+ paddd m6, m8
+ paddd m7, m9
+ psrad m6, 14
+ psrad m7, 14
+
+ packssdw m10, m11
+ packssdw m12, m13
+ packssdw m6, m7
+
+ mova [data0q+xq*2], m10
+ mova [data1q+xq*2], m12
+ mova [data2q+xq*2], m6
+
+ add xd, mmsize / 2
+ cmp xd, wwd
+ jl .loop_h
+
+ add data0q, strideq
+ add data1q, strideq
+ add data2q, strideq
+ dec hd
+ jg .loop_v
+
+ RET
+%endif
diff --git a/libavfilter/x86/colorspacedsp_init.c b/libavfilter/x86/colorspacedsp_init.c
new file mode 100644
index 0000000000..b5006ac295
--- /dev/null
+++ b/libavfilter/x86/colorspacedsp_init.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/x86/cpu.h"
+
+#include "libavfilter/colorspacedsp.h"
+
+#define decl_yuv2yuv_fn(t) \
+void ff_yuv2yuv_##t##_sse2(uint8_t *yuv_out[3], const ptrdiff_t yuv_out_stride[3], \
+ uint8_t *yuv_in[3], const ptrdiff_t yuv_in_stride[3], \
+ int w, int h, const int16_t yuv2yuv_coeffs[3][3][8], \
+ const int16_t yuv_offset[2][8])
+
+#define decl_yuv2yuv_fns(ss) \
+decl_yuv2yuv_fn(ss##p8to8); \
+decl_yuv2yuv_fn(ss##p10to8); \
+decl_yuv2yuv_fn(ss##p12to8); \
+decl_yuv2yuv_fn(ss##p8to10); \
+decl_yuv2yuv_fn(ss##p10to10); \
+decl_yuv2yuv_fn(ss##p12to10); \
+decl_yuv2yuv_fn(ss##p8to12); \
+decl_yuv2yuv_fn(ss##p10to12); \
+decl_yuv2yuv_fn(ss##p12to12)
+
+decl_yuv2yuv_fns(420);
+decl_yuv2yuv_fns(422);
+decl_yuv2yuv_fns(444);
+
+#define decl_yuv2rgb_fn(t) \
+void ff_yuv2rgb_##t##_sse2(int16_t *rgb_out[3], ptrdiff_t rgb_stride, \
+ uint8_t *yuv_in[3], const ptrdiff_t yuv_stride[3], \
+ int w, int h, const int16_t coeff[3][3][8], \
+ const int16_t yuv_offset[8])
+
+#define decl_yuv2rgb_fns(ss) \
+decl_yuv2rgb_fn(ss##p8); \
+decl_yuv2rgb_fn(ss##p10); \
+decl_yuv2rgb_fn(ss##p12)
+
+decl_yuv2rgb_fns(420);
+decl_yuv2rgb_fns(422);
+decl_yuv2rgb_fns(444);
+
+#define decl_rgb2yuv_fn(t) \
+void ff_rgb2yuv_##t##_sse2(uint8_t *yuv_out[3], const ptrdiff_t yuv_stride[3], \
+ int16_t *rgb_in[3], ptrdiff_t rgb_stride, \
+ int w, int h, const int16_t coeff[3][3][8], \
+ const int16_t yuv_offset[8])
+
+#define decl_rgb2yuv_fns(ss) \
+decl_rgb2yuv_fn(ss##p8); \
+decl_rgb2yuv_fn(ss##p10); \
+decl_rgb2yuv_fn(ss##p12)
+
+decl_rgb2yuv_fns(420);
+decl_rgb2yuv_fns(422);
+decl_rgb2yuv_fns(444);
+
+void ff_multiply3x3_sse2(int16_t *data[3], ptrdiff_t stride, int w, int h,
+ const int16_t coeff[3][3][8]);
+
+void ff_colorspacedsp_x86_init(ColorSpaceDSPContext *dsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (ARCH_X86_64 && EXTERNAL_SSE2(cpu_flags)) {
+#define assign_yuv2yuv_fns(ss) \
+ dsp->yuv2yuv[BPP_8 ][BPP_8 ][SS_##ss] = ff_yuv2yuv_##ss##p8to8_sse2; \
+ dsp->yuv2yuv[BPP_8 ][BPP_10][SS_##ss] = ff_yuv2yuv_##ss##p8to10_sse2; \
+ dsp->yuv2yuv[BPP_8 ][BPP_12][SS_##ss] = ff_yuv2yuv_##ss##p8to12_sse2; \
+ dsp->yuv2yuv[BPP_10][BPP_8 ][SS_##ss] = ff_yuv2yuv_##ss##p10to8_sse2; \
+ dsp->yuv2yuv[BPP_10][BPP_10][SS_##ss] = ff_yuv2yuv_##ss##p10to10_sse2; \
+ dsp->yuv2yuv[BPP_10][BPP_12][SS_##ss] = ff_yuv2yuv_##ss##p10to12_sse2; \
+ dsp->yuv2yuv[BPP_12][BPP_8 ][SS_##ss] = ff_yuv2yuv_##ss##p12to8_sse2; \
+ dsp->yuv2yuv[BPP_12][BPP_10][SS_##ss] = ff_yuv2yuv_##ss##p12to10_sse2; \
+ dsp->yuv2yuv[BPP_12][BPP_12][SS_##ss] = ff_yuv2yuv_##ss##p12to12_sse2
+
+ assign_yuv2yuv_fns(420);
+ assign_yuv2yuv_fns(422);
+ assign_yuv2yuv_fns(444);
+
+#define assign_yuv2rgb_fns(ss) \
+ dsp->yuv2rgb[BPP_8 ][SS_##ss] = ff_yuv2rgb_##ss##p8_sse2; \
+ dsp->yuv2rgb[BPP_10][SS_##ss] = ff_yuv2rgb_##ss##p10_sse2; \
+ dsp->yuv2rgb[BPP_12][SS_##ss] = ff_yuv2rgb_##ss##p12_sse2
+
+ assign_yuv2rgb_fns(420);
+ assign_yuv2rgb_fns(422);
+ assign_yuv2rgb_fns(444);
+
+#define assign_rgb2yuv_fns(ss) \
+ dsp->rgb2yuv[BPP_8 ][SS_##ss] = ff_rgb2yuv_##ss##p8_sse2; \
+ dsp->rgb2yuv[BPP_10][SS_##ss] = ff_rgb2yuv_##ss##p10_sse2; \
+ dsp->rgb2yuv[BPP_12][SS_##ss] = ff_rgb2yuv_##ss##p12_sse2
+
+ assign_rgb2yuv_fns(420);
+ assign_rgb2yuv_fns(422);
+ assign_rgb2yuv_fns(444);
+
+ dsp->multiply3x3 = ff_multiply3x3_sse2;
+ }
+}
diff --git a/libavfilter/x86/vf_blend.asm b/libavfilter/x86/vf_blend.asm
new file mode 100644
index 0000000000..33b1ad1496
--- /dev/null
+++ b/libavfilter/x86/vf_blend.asm
@@ -0,0 +1,316 @@
+;*****************************************************************************
+;* x86-optimized functions for blend filter
+;*
+;* Copyright (C) 2015 Paul B Mahol
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+ps_255: times 4 dd 255.0
+pw_1: times 8 dw 1
+pw_128: times 8 dw 128
+pw_255: times 8 dw 255
+pb_127: times 16 db 127
+pb_128: times 16 db 128
+pb_255: times 16 db 255
+
+SECTION .text
+
+%macro BLEND_INIT 2
+%if ARCH_X86_64
+cglobal blend_%1, 6, 9, %2, top, top_linesize, bottom, bottom_linesize, dst, dst_linesize, width, end, x
+ mov widthd, dword widthm
+%else
+cglobal blend_%1, 5, 7, %2, top, top_linesize, bottom, bottom_linesize, dst, end, x
+%define dst_linesizeq r5mp
+%define widthq r6mp
+%endif
+ mov endd, dword r7m
+ add topq, widthq
+ add bottomq, widthq
+ add dstq, widthq
+ neg widthq
+%endmacro
+
+%macro BLEND_END 0
+ add topq, top_linesizeq
+ add bottomq, bottom_linesizeq
+ add dstq, dst_linesizeq
+ sub endd, 1
+ jg .nextrow
+REP_RET
+%endmacro
+
+%macro BLEND_SIMPLE 2
+BLEND_INIT %1, 2
+.nextrow:
+ mov xq, widthq
+
+ .loop:
+ movu m0, [topq + xq]
+ movu m1, [bottomq + xq]
+ p%2 m0, m1
+ mova [dstq + xq], m0
+ add xq, mmsize
+ jl .loop
+BLEND_END
+%endmacro
+
+INIT_XMM sse2
+BLEND_SIMPLE xor, xor
+BLEND_SIMPLE or, or
+BLEND_SIMPLE and, and
+BLEND_SIMPLE addition, addusb
+BLEND_SIMPLE subtract, subusb
+BLEND_SIMPLE darken, minub
+BLEND_SIMPLE lighten, maxub
+
+BLEND_INIT difference128, 4
+ pxor m2, m2
+ mova m3, [pw_128]
+.nextrow:
+ mov xq, widthq
+
+ .loop:
+ movh m0, [topq + xq]
+ movh m1, [bottomq + xq]
+ punpcklbw m0, m2
+ punpcklbw m1, m2
+ paddw m0, m3
+ psubw m0, m1
+ packuswb m0, m0
+ movh [dstq + xq], m0
+ add xq, mmsize / 2
+ jl .loop
+BLEND_END
+
+%macro MULTIPLY 3 ; a, b, pw_1
+ pmullw %1, %2 ; xxxxxxxx a * b
+ paddw %1, %3
+ mova %2, %1
+ psrlw %2, 8
+ paddw %1, %2
+ psrlw %1, 8 ; 00xx00xx a * b / 255
+%endmacro
+
+%macro SCREEN 4 ; a, b, pw_1, pw_255
+ pxor %1, %4 ; 00xx00xx 255 - a
+ pxor %2, %4
+ MULTIPLY %1, %2, %3
+ pxor %1, %4 ; 00xx00xx 255 - x / 255
+%endmacro
+
+BLEND_INIT multiply, 4
+ pxor m2, m2
+ mova m3, [pw_1]
+.nextrow:
+ mov xq, widthq
+
+ .loop:
+ ; word
+ ; |--|
+ movh m0, [topq + xq] ; 0000xxxx
+ movh m1, [bottomq + xq]
+ punpcklbw m0, m2 ; 00xx00xx
+ punpcklbw m1, m2
+
+ MULTIPLY m0, m1, m3
+
+ packuswb m0, m0 ; 0000xxxx
+ movh [dstq + xq], m0
+ add xq, mmsize / 2
+
+ jl .loop
+BLEND_END
+
+BLEND_INIT screen, 5
+ pxor m2, m2
+ mova m3, [pw_1]
+ mova m4, [pw_255]
+.nextrow:
+ mov xq, widthq
+
+ .loop:
+ movh m0, [topq + xq] ; 0000xxxx
+ movh m1, [bottomq + xq]
+ punpcklbw m0, m2 ; 00xx00xx
+ punpcklbw m1, m2
+
+ SCREEN m0, m1, m3, m4
+
+ packuswb m0, m0 ; 0000xxxx
+ movh [dstq + xq], m0
+ add xq, mmsize / 2
+
+ jl .loop
+BLEND_END
+
+BLEND_INIT average, 3
+ pxor m2, m2
+.nextrow:
+ mov xq, widthq
+
+ .loop:
+ movh m0, [topq + xq]
+ movh m1, [bottomq + xq]
+ punpcklbw m0, m2
+ punpcklbw m1, m2
+ paddw m0, m1
+ psrlw m0, 1
+ packuswb m0, m0
+ movh [dstq + xq], m0
+ add xq, mmsize / 2
+ jl .loop
+BLEND_END
+
+BLEND_INIT addition128, 4
+ pxor m2, m2
+ mova m3, [pw_128]
+.nextrow:
+ mov xq, widthq
+
+ .loop:
+ movh m0, [topq + xq]
+ movh m1, [bottomq + xq]
+ punpcklbw m0, m2
+ punpcklbw m1, m2
+ paddw m0, m1
+ psubw m0, m3
+ packuswb m0, m0
+ movh [dstq + xq], m0
+ add xq, mmsize / 2
+ jl .loop
+BLEND_END
+
+BLEND_INIT hardmix, 5
+ mova m2, [pb_255]
+ mova m3, [pb_128]
+ mova m4, [pb_127]
+.nextrow:
+ mov xq, widthq
+
+ .loop:
+ movu m0, [topq + xq]
+ movu m1, [bottomq + xq]
+ pxor m1, m4
+ pxor m0, m3
+ pcmpgtb m1, m0
+ pxor m1, m2
+ mova [dstq + xq], m1
+ add xq, mmsize
+ jl .loop
+BLEND_END
+
+BLEND_INIT divide, 4
+ pxor m2, m2
+ mova m3, [ps_255]
+.nextrow:
+ mov xq, widthq
+
+ .loop:
+ movd m0, [topq + xq] ; 000000xx
+ movd m1, [bottomq + xq]
+ punpcklbw m0, m2 ; 00000x0x
+ punpcklbw m1, m2
+ punpcklwd m0, m2 ; 000x000x
+ punpcklwd m1, m2
+
+ cvtdq2ps m0, m0
+ cvtdq2ps m1, m1
+ divps m0, m1 ; a / b
+ mulps m0, m3 ; a / b * 255
+ minps m0, m3
+ cvttps2dq m0, m0
+
+ packssdw m0, m0 ; 00000x0x
+ packuswb m0, m0 ; 000000xx
+ movd [dstq + xq], m0
+ add xq, mmsize / 4
+
+ jl .loop
+BLEND_END
+
+BLEND_INIT phoenix, 4
+ mova m3, [pb_255]
+.nextrow:
+ mov xq, widthq
+
+ .loop:
+ movu m0, [topq + xq]
+ movu m1, [bottomq + xq]
+ mova m2, m0
+ pminub m0, m1
+ pmaxub m1, m2
+ mova m2, m3
+ psubusb m2, m1
+ paddusb m2, m0
+ mova [dstq + xq], m2
+ add xq, mmsize
+ jl .loop
+BLEND_END
+
+%macro BLEND_ABS 0
+BLEND_INIT difference, 3
+ pxor m2, m2
+.nextrow:
+ mov xq, widthq
+
+ .loop:
+ movh m0, [topq + xq]
+ movh m1, [bottomq + xq]
+ punpcklbw m0, m2
+ punpcklbw m1, m2
+ psubw m0, m1
+ ABS1 m0, m1
+ packuswb m0, m0
+ movh [dstq + xq], m0
+ add xq, mmsize / 2
+ jl .loop
+BLEND_END
+
+BLEND_INIT negation, 5
+ pxor m2, m2
+ mova m4, [pw_255]
+.nextrow:
+ mov xq, widthq
+
+ .loop:
+ movh m0, [topq + xq]
+ movh m1, [bottomq + xq]
+ punpcklbw m0, m2
+ punpcklbw m1, m2
+ mova m3, m4
+ psubw m3, m0
+ psubw m3, m1
+ ABS1 m3, m1
+ mova m0, m4
+ psubw m0, m3
+ packuswb m0, m0
+ movh [dstq + xq], m0
+ add xq, mmsize / 2
+ jl .loop
+BLEND_END
+%endmacro
+
+INIT_XMM sse2
+BLEND_ABS
+INIT_XMM ssse3
+BLEND_ABS
diff --git a/libavfilter/x86/vf_blend_init.c b/libavfilter/x86/vf_blend_init.c
new file mode 100644
index 0000000000..96fe3d8baa
--- /dev/null
+++ b/libavfilter/x86/vf_blend_init.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/x86/cpu.h"
+#include "libavfilter/blend.h"
+
+#define BLEND_FUNC(name, opt) \
+void ff_blend_##name##_##opt(const uint8_t *top, ptrdiff_t top_linesize, \
+ const uint8_t *bottom, ptrdiff_t bottom_linesize, \
+ uint8_t *dst, ptrdiff_t dst_linesize, \
+ ptrdiff_t width, ptrdiff_t height, \
+ struct FilterParams *param, double *values, int starty);
+
+BLEND_FUNC(addition, sse2)
+BLEND_FUNC(addition128, sse2)
+BLEND_FUNC(average, sse2)
+BLEND_FUNC(and, sse2)
+BLEND_FUNC(darken, sse2)
+BLEND_FUNC(difference128, sse2)
+BLEND_FUNC(multiply, sse2)
+BLEND_FUNC(screen, sse2)
+BLEND_FUNC(hardmix, sse2)
+BLEND_FUNC(divide, sse2)
+BLEND_FUNC(lighten, sse2)
+BLEND_FUNC(or, sse2)
+BLEND_FUNC(phoenix, sse2)
+BLEND_FUNC(subtract, sse2)
+BLEND_FUNC(xor, sse2)
+BLEND_FUNC(difference, sse2)
+BLEND_FUNC(difference, ssse3)
+BLEND_FUNC(negation, sse2)
+BLEND_FUNC(negation, ssse3)
+
+av_cold void ff_blend_init_x86(FilterParams *param, int is_16bit)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_SSE2(cpu_flags) && param->opacity == 1 && !is_16bit) {
+ switch (param->mode) {
+ case BLEND_ADDITION: param->blend = ff_blend_addition_sse2; break;
+ case BLEND_ADDITION128: param->blend = ff_blend_addition128_sse2; break;
+ case BLEND_AND: param->blend = ff_blend_and_sse2; break;
+ case BLEND_AVERAGE: param->blend = ff_blend_average_sse2; break;
+ case BLEND_DARKEN: param->blend = ff_blend_darken_sse2; break;
+ case BLEND_DIFFERENCE128: param->blend = ff_blend_difference128_sse2; break;
+ case BLEND_DIVIDE: param->blend = ff_blend_divide_sse2; break;
+ case BLEND_HARDMIX: param->blend = ff_blend_hardmix_sse2; break;
+ case BLEND_LIGHTEN: param->blend = ff_blend_lighten_sse2; break;
+ case BLEND_MULTIPLY: param->blend = ff_blend_multiply_sse2; break;
+ case BLEND_OR: param->blend = ff_blend_or_sse2; break;
+ case BLEND_PHOENIX: param->blend = ff_blend_phoenix_sse2; break;
+ case BLEND_SCREEN: param->blend = ff_blend_screen_sse2; break;
+ case BLEND_SUBTRACT: param->blend = ff_blend_subtract_sse2; break;
+ case BLEND_XOR: param->blend = ff_blend_xor_sse2; break;
+ case BLEND_DIFFERENCE: param->blend = ff_blend_difference_sse2; break;
+ case BLEND_NEGATION: param->blend = ff_blend_negation_sse2; break;
+ }
+ }
+ if (EXTERNAL_SSSE3(cpu_flags) && param->opacity == 1 && !is_16bit) {
+ switch (param->mode) {
+ case BLEND_DIFFERENCE: param->blend = ff_blend_difference_ssse3; break;
+ case BLEND_NEGATION: param->blend = ff_blend_negation_ssse3; break;
+ }
+ }
+}
diff --git a/libavfilter/x86/vf_bwdif.asm b/libavfilter/x86/vf_bwdif.asm
new file mode 100644
index 0000000000..147b7c6ac6
--- /dev/null
+++ b/libavfilter/x86/vf_bwdif.asm
@@ -0,0 +1,270 @@
+;*****************************************************************************
+;* x86-optimized functions for bwdif filter
+;*
+;* Copyright (C) 2016 Thomas Mundt <loudmax@yahoo.de>
+;*
+;* Based on yadif simd code
+;* Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
+;* 2013 Daniel Kang <daniel.d.kang@gmail.com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+pw_coefhf: times 4 dw 1016, 5570
+pw_coefhf1: times 8 dw -3801
+pw_coefsp: times 4 dw 5077, -981
+pw_splfdif: times 4 dw -768, 768
+
+SECTION .text
+
+%macro LOAD8 2
+ movh %1, %2
+ punpcklbw %1, m7
+%endmacro
+
+%macro LOAD12 2
+ movu %1, %2
+%endmacro
+
+%macro DISP8 0
+ packuswb m2, m2
+ movh [dstq], m2
+%endmacro
+
+%macro DISP12 0
+ CLIPW m2, m7, m12
+ movu [dstq], m2
+%endmacro
+
+%macro FILTER 5
+ pxor m7, m7
+.loop%1:
+ LOAD%4 m0, [curq+t0*%5]
+ LOAD%4 m1, [curq+t1*%5]
+ LOAD%4 m2, [%2]
+ LOAD%4 m3, [%3]
+ mova m4, m3
+ paddw m3, m2
+ psubw m2, m4
+ ABS1 m2, m4
+ mova m8, m3
+ mova m9, m2
+ LOAD%4 m3, [prevq+t0*%5]
+ LOAD%4 m4, [prevq+t1*%5]
+ psubw m3, m0
+ psubw m4, m1
+ ABS2 m3, m4, m5, m6
+ paddw m3, m4
+ psrlw m2, 1
+ psrlw m3, 1
+ pmaxsw m2, m3
+ LOAD%4 m3, [nextq+t0*%5]
+ LOAD%4 m4, [nextq+t1*%5]
+ psubw m3, m0
+ psubw m4, m1
+ ABS2 m3, m4, m5, m6
+ paddw m3, m4
+ psrlw m3, 1
+ pmaxsw m2, m3
+
+ LOAD%4 m3, [%2+t0*2*%5]
+ LOAD%4 m4, [%3+t0*2*%5]
+ LOAD%4 m5, [%2+t1*2*%5]
+ LOAD%4 m6, [%3+t1*2*%5]
+ paddw m3, m4
+ paddw m5, m6
+ mova m6, m3
+ paddw m6, m5
+ mova m10, m6
+ psrlw m3, 1
+ psrlw m5, 1
+ psubw m3, m0
+ psubw m5, m1
+ mova m6, m3
+ pminsw m3, m5
+ pmaxsw m5, m6
+ mova m4, m8
+ psraw m4, 1
+ mova m6, m4
+ psubw m6, m0
+ psubw m4, m1
+ pmaxsw m3, m6
+ pminsw m5, m6
+ pmaxsw m3, m4
+ pminsw m5, m4
+ mova m6, m7
+ psubw m6, m3
+ pmaxsw m6, m5
+ mova m3, m2
+ pcmpgtw m3, m7
+ pand m6, m3
+ pmaxsw m2, m6
+ mova m11, m2
+
+ LOAD%4 m2, [%2+t0*4*%5]
+ LOAD%4 m3, [%3+t0*4*%5]
+ LOAD%4 m4, [%2+t1*4*%5]
+ LOAD%4 m5, [%3+t1*4*%5]
+ paddw m2, m3
+ paddw m4, m5
+ paddw m2, m4
+ mova m3, m2
+ punpcklwd m2, m8
+ punpckhwd m3, m8
+ pmaddwd m2, [pw_coefhf]
+ pmaddwd m3, [pw_coefhf]
+ mova m4, m10
+ mova m6, m4
+ pmullw m4, [pw_coefhf1]
+ pmulhw m6, [pw_coefhf1]
+ mova m5, m4
+ punpcklwd m4, m6
+ punpckhwd m5, m6
+ paddd m2, m4
+ paddd m3, m5
+ psrad m2, 2
+ psrad m3, 2
+
+ mova m4, m0
+ paddw m0, m1
+%if ARCH_X86_64
+ LOAD%4 m5, [curq+t2*%5]
+ LOAD%4 m6, [curq+t3*%5]
+%else
+ mov r4, prefs3mp
+ mov r5, mrefs3mp
+ LOAD%4 m5, [curq+t0*%5]
+ LOAD%4 m6, [curq+t1*%5]
+ mov r4, prefsmp
+ mov r5, mrefsmp
+%endif
+ paddw m6, m5
+ psubw m1, m4
+ ABS1 m1, m4
+ pcmpgtw m1, m9
+ mova m4, m1
+ punpcklwd m1, m4
+ punpckhwd m4, m4
+ pand m2, m1
+ pand m3, m4
+ mova m5, [pw_splfdif]
+ mova m7, m5
+ pand m5, m1
+ pand m7, m4
+ paddw m5, [pw_coefsp]
+ paddw m7, [pw_coefsp]
+ mova m4, m0
+ punpcklwd m0, m6
+ punpckhwd m4, m6
+ pmaddwd m0, m5
+ pmaddwd m4, m7
+ paddd m2, m0
+ paddd m3, m4
+ psrad m2, 13
+ psrad m3, 13
+ packssdw m2, m3
+
+ mova m4, m8
+ psraw m4, 1
+ mova m0, m11
+ mova m3, m4
+ psubw m4, m0
+ paddw m3, m0
+ CLIPW m2, m4, m3
+ pxor m7, m7
+ DISP%4
+
+ add dstq, STEP
+ add prevq, STEP
+ add curq, STEP
+ add nextq, STEP
+ sub DWORD wm, mmsize/2
+ jg .loop%1
+%endmacro
+
+%macro PROC 2
+%if ARCH_X86_64
+ movsxd r5, DWORD prefsm
+ movsxd r6, DWORD mrefsm
+ movsxd r7, DWORD prefs3m
+ movsxd r8, DWORD mrefs3m
+ DECLARE_REG_TMP 5, 6, 7, 8
+%else
+ %define m8 [rsp+ 0]
+ %define m9 [rsp+16]
+ %define m10 [rsp+32]
+ %define m11 [rsp+48]
+ mov r4, prefsmp
+ mov r5, mrefsmp
+ DECLARE_REG_TMP 4, 5
+%endif
+ cmp DWORD paritym, 0
+ je .parity0
+ FILTER 1, prevq, curq, %1, %2
+ jmp .ret
+.parity0:
+ FILTER 0, curq, nextq, %1, %2
+.ret:
+ RET
+%endmacro
+
+%macro BWDIF 0
+%if ARCH_X86_64
+cglobal bwdif_filter_line, 4, 9, 12, 0, dst, prev, cur, next, w, prefs, \
+ mrefs, prefs2, mrefs2, prefs3, mrefs3, \
+ prefs4, mrefs4, parity, clip_max
+%else
+cglobal bwdif_filter_line, 4, 6, 8, 64, dst, prev, cur, next, w, prefs, \
+ mrefs, prefs2, mrefs2, prefs3, mrefs3, \
+ prefs4, mrefs4, parity, clip_max
+%endif
+ %define STEP mmsize/2
+ PROC 8, 1
+
+%if ARCH_X86_64
+cglobal bwdif_filter_line_12bit, 4, 9, 13, 0, dst, prev, cur, next, w, \
+ prefs, mrefs, prefs2, mrefs2, \
+ prefs3, mrefs3, prefs4, \
+ mrefs4, parity, clip_max
+ movd m12, DWORD clip_maxm
+ SPLATW m12, m12, 0
+%else
+cglobal bwdif_filter_line_12bit, 4, 6, 8, 80, dst, prev, cur, next, w, \
+ prefs, mrefs, prefs2, mrefs2, \
+ prefs3, mrefs3, prefs4, \
+ mrefs4, parity, clip_max
+ %define m12 [rsp+64]
+ movd m0, DWORD clip_maxm
+ SPLATW m0, m0, 0
+ mova m12, m0
+%endif
+ %define STEP mmsize
+ PROC 12, 2
+%endmacro
+
+INIT_XMM ssse3
+BWDIF
+INIT_XMM sse2
+BWDIF
+%if ARCH_X86_32
+INIT_MMX mmxext
+BWDIF
+%endif
diff --git a/libavfilter/x86/vf_bwdif_init.c b/libavfilter/x86/vf_bwdif_init.c
new file mode 100644
index 0000000000..1cb8438e5f
--- /dev/null
+++ b/libavfilter/x86/vf_bwdif_init.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2016 Thomas Mundt <loudmax@yahoo.de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/mem.h"
+#include "libavutil/x86/asm.h"
+#include "libavutil/x86/cpu.h"
+#include "libavfilter/bwdif.h"
+
+void ff_bwdif_filter_line_mmxext(void *dst, void *prev, void *cur, void *next,
+ int w, int prefs, int mrefs, int prefs2,
+ int mrefs2, int prefs3, int mrefs3, int prefs4,
+ int mrefs4, int parity, int clip_max);
+void ff_bwdif_filter_line_sse2(void *dst, void *prev, void *cur, void *next,
+ int w, int prefs, int mrefs, int prefs2,
+ int mrefs2, int prefs3, int mrefs3, int prefs4,
+ int mrefs4, int parity, int clip_max);
+void ff_bwdif_filter_line_ssse3(void *dst, void *prev, void *cur, void *next,
+ int w, int prefs, int mrefs, int prefs2,
+ int mrefs2, int prefs3, int mrefs3, int prefs4,
+ int mrefs4, int parity, int clip_max);
+
+void ff_bwdif_filter_line_12bit_mmxext(void *dst, void *prev, void *cur, void *next,
+ int w, int prefs, int mrefs, int prefs2,
+ int mrefs2, int prefs3, int mrefs3, int prefs4,
+ int mrefs4, int parity, int clip_max);
+void ff_bwdif_filter_line_12bit_sse2(void *dst, void *prev, void *cur, void *next,
+ int w, int prefs, int mrefs, int prefs2,
+ int mrefs2, int prefs3, int mrefs3, int prefs4,
+ int mrefs4, int parity, int clip_max);
+void ff_bwdif_filter_line_12bit_ssse3(void *dst, void *prev, void *cur, void *next,
+ int w, int prefs, int mrefs, int prefs2,
+ int mrefs2, int prefs3, int mrefs3, int prefs4,
+ int mrefs4, int parity, int clip_max);
+
+av_cold void ff_bwdif_init_x86(BWDIFContext *bwdif)
+{
+ int cpu_flags = av_get_cpu_flags();
+ int bit_depth = (!bwdif->csp) ? 8 : bwdif->csp->comp[0].depth;
+
+ if (bit_depth <= 8) {
+#if ARCH_X86_32
+ if (EXTERNAL_MMXEXT(cpu_flags))
+ bwdif->filter_line = ff_bwdif_filter_line_mmxext;
+#endif /* ARCH_X86_32 */
+ if (EXTERNAL_SSE2(cpu_flags))
+ bwdif->filter_line = ff_bwdif_filter_line_sse2;
+ if (EXTERNAL_SSSE3(cpu_flags))
+ bwdif->filter_line = ff_bwdif_filter_line_ssse3;
+ } else if (bit_depth <= 12) {
+#if ARCH_X86_32
+ if (EXTERNAL_MMXEXT(cpu_flags))
+ bwdif->filter_line = ff_bwdif_filter_line_12bit_mmxext;
+#endif /* ARCH_X86_32 */
+ if (EXTERNAL_SSE2(cpu_flags))
+ bwdif->filter_line = ff_bwdif_filter_line_12bit_sse2;
+ if (EXTERNAL_SSSE3(cpu_flags))
+ bwdif->filter_line = ff_bwdif_filter_line_12bit_ssse3;
+ }
+}
diff --git a/libavfilter/x86/vf_eq.c b/libavfilter/x86/vf_eq.c
new file mode 100644
index 0000000000..16f399505f
--- /dev/null
+++ b/libavfilter/x86/vf_eq.c
@@ -0,0 +1,96 @@
+/*
+ *
+ * Original MPlayer filters by Richard Felker.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/mem.h"
+#include "libavutil/x86/asm.h"
+#include "libavfilter/vf_eq.h"
+
+#if HAVE_MMX_INLINE && HAVE_6REGS
+static void process_MMX(EQParameters *param, uint8_t *dst, int dst_stride,
+ const uint8_t *src, int src_stride, int w, int h)
+{
+ int i;
+ int pel;
+ int dstep = dst_stride - w;
+ int sstep = src_stride - w;
+ short brvec[4];
+ short contvec[4];
+ int contrast = (int) (param->contrast * 256 * 16);
+ int brightness = ((int) (100.0 * param->brightness + 100.0) * 511) / 200 - 128 - contrast / 32;
+
+ brvec[0] = brvec[1] = brvec[2] = brvec[3] = brightness;
+ contvec[0] = contvec[1] = contvec[2] = contvec[3] = contrast;
+
+ while (h--) {
+ __asm__ volatile (
+ "movq (%5), %%mm3 \n\t"
+ "movq (%6), %%mm4 \n\t"
+ "pxor %%mm0, %%mm0 \n\t"
+ "movl %4, %%eax \n\t"
+ ".p2align 4 \n\t"
+ "1: \n\t"
+ "movq (%0), %%mm1 \n\t"
+ "movq (%0), %%mm2 \n\t"
+ "punpcklbw %%mm0, %%mm1\n\t"
+ "punpckhbw %%mm0, %%mm2\n\t"
+ "psllw $4, %%mm1 \n\t"
+ "psllw $4, %%mm2 \n\t"
+ "pmulhw %%mm4, %%mm1 \n\t"
+ "pmulhw %%mm4, %%mm2 \n\t"
+ "paddw %%mm3, %%mm1 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "packuswb %%mm2, %%mm1 \n\t"
+ "add $8, %0 \n\t"
+ "movq %%mm1, (%1) \n\t"
+ "add $8, %1 \n\t"
+ "decl %%eax \n\t"
+ "jnz 1b \n\t"
+ : "=r" (src), "=r" (dst)
+ : "0" (src), "1" (dst), "r" (w>>3), "r" (brvec), "r" (contvec)
+ : "%eax"
+ );
+
+ for (i = w&7; i; i--) {
+ pel = ((*src++ * contrast) >> 12) + brightness;
+ if (pel & ~255)
+ pel = (-pel) >> 31;
+ *dst++ = pel;
+ }
+
+ src += sstep;
+ dst += dstep;
+ }
+ __asm__ volatile ( "emms \n\t" ::: "memory" );
+}
+#endif
+
+av_cold void ff_eq_init_x86(EQContext *eq)
+{
+#if HAVE_MMX_INLINE && HAVE_6REGS
+ int cpu_flags = av_get_cpu_flags();
+
+ if (cpu_flags & AV_CPU_FLAG_MMX) {
+ eq->process = process_MMX;
+ }
+#endif
+}
diff --git a/libavfilter/x86/vf_fspp.asm b/libavfilter/x86/vf_fspp.asm
new file mode 100644
index 0000000000..c7f8f64f1b
--- /dev/null
+++ b/libavfilter/x86/vf_fspp.asm
@@ -0,0 +1,727 @@
+;*****************************************************************************
+;* x86-optimized functions for fspp filter
+;*
+;* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+;* Copyright (C) 2005 Nikolaj Poroshin <porosh3@psu.ru>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License along
+;* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+;* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+pb_dither: db 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, \
+ 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, \
+ 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, \
+ 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21
+pw_187E: times 4 dw 0x187E ; FIX64(0.382683433, 14)
+pw_22A3: times 4 dw 0x22A3 ; FIX64(1.082392200, 13)
+pw_2D41: times 4 dw 0x2D41 ; FIX64(1.414213562, 13)
+pw_539F: times 4 dw 0x539F ; FIX64(1.306562965, 14)
+pw_5A82: times 4 dw 0x5A82 ; FIX64(1.414213562, 14)
+pw_3B21: times 4 dw 0x3B21 ; FIX64(1.847759065, 13)
+pw_AC62: times 4 dw 0xAC62 ; FIX64(-2.613125930, 13)
+pw_3642: times 4 dw 0x3642 ; FIX64(0.847759065, 14)
+pw_2441: times 4 dw 0x2441 ; FIX64(0.566454497, 14)
+pw_0CBB: times 4 dw 0x0CBB ; FIX64(0.198912367, 14)
+pw_4: times 4 dw 4
+pw_2: times 4 dw 2
+
+SECTION .text
+
+%define DCTSIZE 8
+
+INIT_MMX mmx
+
+;void ff_store_slice_mmx(uint8_t *dst, int16_t *src,
+; ptrdiff_t dst_stride, ptrdiff_t src_stride,
+; ptrdiff_t width, ptrdiff_t height, ptrdiff_t log2_scale)
+%if ARCH_X86_64
+cglobal store_slice, 7, 9, 0, dst, src, dst_stride, src_stride, width, dither_height, dither, tmp, tmp2
+%else
+cglobal store_slice, 2, 7, 0, dst, src, width, dither_height, dither, tmp, tmp2
+%define dst_strideq r2m
+%define src_strideq r3m
+ mov widthq, r4m
+ mov dither_heightq, r5m
+ mov ditherq, r6m ; log2_scale
+%endif
+ add widthq, 7
+ mov tmpq, src_strideq
+ and widthq, ~7
+ sub dst_strideq, widthq
+ movd m5, ditherd ; log2_scale
+ xor ditherq, -1 ; log2_scale
+ mov tmp2q, tmpq
+ add ditherq, 7 ; log2_scale
+ neg tmpq
+ sub tmp2q, widthq
+ movd m2, ditherd ; log2_scale
+ add tmp2q, tmp2q
+ lea ditherq, [pb_dither]
+ mov src_strideq, tmp2q
+ shl tmpq, 4
+ lea dither_heightq, [ditherq+dither_heightq*8]
+ pxor m7, m7
+
+.loop_height:
+ movq m3, [ditherq]
+ movq m4, m3
+ punpcklbw m3, m7
+ punpckhbw m4, m7
+ mov tmp2q, widthq
+ psraw m3, m5
+ psraw m4, m5
+
+.loop_width:
+ movq [srcq+tmpq], m7
+ movq m0, [srcq]
+ movq m1, [srcq+8]
+ movq [srcq+tmpq+8], m7
+ paddw m0, m3
+ paddw m1, m4
+ movq [srcq], m7
+ psraw m0, m2
+ psraw m1, m2
+ movq [srcq+8], m7
+ packuswb m0, m1
+ add srcq, 16
+ movq [dstq], m0
+ add dstq, 8
+ sub tmp2q, 8
+ jg .loop_width
+
+ add srcq, src_strideq
+ add ditherq, 8
+ add dstq, dst_strideq
+ cmp ditherq, dither_heightq
+ jl .loop_height
+ RET
+
+;void ff_store_slice2_mmx(uint8_t *dst, int16_t *src,
+; ptrdiff_t dst_stride, ptrdiff_t src_stride,
+; ptrdiff_t width, ptrdiff_t height, ptrdiff_t log2_scale)
+%if ARCH_X86_64
+cglobal store_slice2, 7, 9, 0, dst, src, dst_stride, src_stride, width, dither_height, dither, tmp, tmp2
+%else
+cglobal store_slice2, 0, 7, 0, dst, src, width, dither_height, dither, tmp, tmp2
+%define dst_strideq r2m
+%define src_strideq r3m
+ mov dstq, dstm
+ mov srcq, srcm
+ mov widthq, r4m
+ mov dither_heightq, r5m
+ mov ditherq, r6m ; log2_scale
+%endif
+ add widthq, 7
+ mov tmpq, src_strideq
+ and widthq, ~7
+ sub dst_strideq, widthq
+ movd m5, ditherd ; log2_scale
+ xor ditherq, -1 ; log2_scale
+ mov tmp2q, tmpq
+ add ditherq, 7 ; log2_scale
+ sub tmp2q, widthq
+ movd m2, ditherd ; log2_scale
+ add tmp2q, tmp2q
+ lea ditherq, [pb_dither]
+ mov src_strideq, tmp2q
+ shl tmpq, 5
+ lea dither_heightq, [ditherq+dither_heightq*8]
+ pxor m7, m7
+
+.loop_height:
+ movq m3, [ditherq]
+ movq m4, m3
+ punpcklbw m3, m7
+ punpckhbw m4, m7
+ mov tmp2q,widthq
+ psraw m3, m5
+ psraw m4, m5
+
+.loop_width:
+ movq m0, [srcq]
+ movq m1, [srcq+8]
+ paddw m0, m3
+ paddw m0, [srcq+tmpq]
+ paddw m1, m4
+ movq m6, [srcq+tmpq+8]
+ movq [srcq+tmpq], m7
+ psraw m0, m2
+ paddw m1, m6
+ movq [srcq+tmpq+8], m7
+ psraw m1, m2
+ packuswb m0, m1
+ movq [dstq], m0
+ add srcq, 16
+ add dstq, 8
+ sub tmp2q, 8
+ jg .loop_width
+
+ add srcq, src_strideq
+ add ditherq, 8
+ add dstq, dst_strideq
+ cmp ditherq, dither_heightq
+ jl .loop_height
+ RET
+
+;void ff_mul_thrmat_mmx(int16_t *thr_adr_noq, int16_t *thr_adr, int q);
+cglobal mul_thrmat, 3, 3, 0, thrn, thr, q
+ movd m7, qd
+ movq m0, [thrnq]
+ punpcklwd m7, m7
+ movq m1, [thrnq+8]
+ punpckldq m7, m7
+ pmullw m0, m7
+ movq m2, [thrnq+8*2]
+ pmullw m1, m7
+ movq m3, [thrnq+8*3]
+ pmullw m2, m7
+ movq [thrq], m0
+ movq m4, [thrnq+8*4]
+ pmullw m3, m7
+ movq [thrq+8], m1
+ movq m5, [thrnq+8*5]
+ pmullw m4, m7
+ movq [thrq+8*2], m2
+ movq m6, [thrnq+8*6]
+ pmullw m5, m7
+ movq [thrq+8*3], m3
+ movq m0, [thrnq+8*7]
+ pmullw m6, m7
+ movq [thrq+8*4], m4
+ movq m1, [thrnq+8*7+8]
+ pmullw m0, m7
+ movq [thrq+8*5], m5
+ movq m2, [thrnq+8*7+8*2]
+ pmullw m1, m7
+ movq [thrq+8*6], m6
+ movq m3, [thrnq+8*7+8*3]
+ pmullw m2, m7
+ movq [thrq+8*7], m0
+ movq m4, [thrnq+8*7+8*4]
+ pmullw m3, m7
+ movq [thrq+8*7+8], m1
+ movq m5, [thrnq+8*7+8*5]
+ pmullw m4, m7
+ movq [thrq+8*7+8*2], m2
+ movq m6, [thrnq+8*7+8*6]
+ pmullw m5, m7
+ movq [thrq+8*7+8*3], m3
+ movq m0, [thrnq+14*8]
+ pmullw m6, m7
+ movq [thrq+8*7+8*4], m4
+ movq m1, [thrnq+14*8+8]
+ pmullw m0, m7
+ movq [thrq+8*7+8*5], m5
+ pmullw m1, m7
+ movq [thrq+8*7+8*6], m6
+ movq [thrq+14*8], m0
+ movq [thrq+14*8+8], m1
+ RET
+
+%macro COLUMN_FDCT 1-3 0, 0
+ movq m1, [srcq+DCTSIZE*0*2]
+ movq m7, [srcq+DCTSIZE*3*2]
+ movq m0, m1
+ paddw m1, [srcq+DCTSIZE*7*2]
+ movq m3, m7
+ paddw m7, [srcq+DCTSIZE*4*2]
+ movq m5, m1
+ movq m6, [srcq+DCTSIZE*1*2]
+ psubw m1, m7
+ movq m2, [srcq+DCTSIZE*2*2]
+ movq m4, m6
+ paddw m6, [srcq+DCTSIZE*6*2]
+ paddw m5, m7
+ paddw m2, [srcq+DCTSIZE*5*2]
+ movq m7, m6
+ paddw m6, m2
+ psubw m7, m2
+ movq m2, m5
+ paddw m5, m6
+ psubw m2, m6
+ paddw m7, m1
+ movq m6, [thrq+4*16+%2]
+ psllw m7, 2
+ psubw m5, [thrq+%2]
+ psubw m2, m6
+ paddusw m5, [thrq+%2]
+ paddusw m2, m6
+ pmulhw m7, [pw_2D41]
+ paddw m5, [thrq+%2]
+ paddw m2, m6
+ psubusw m5, [thrq+%2]
+ psubusw m2, m6
+ paddw m5, [pw_2]
+ movq m6, m2
+ paddw m2, m5
+ psubw m5, m6
+ movq m6, m1
+ paddw m1, m7
+ psubw m1, [thrq+2*16+%2]
+ psubw m6, m7
+ movq m7, [thrq+6*16+%2]
+ psraw m5, 2
+ paddusw m1, [thrq+2*16+%2]
+ psubw m6, m7
+ paddw m1, [thrq+2*16+%2]
+ paddusw m6, m7
+ psubusw m1, [thrq+2*16+%2]
+ paddw m6, m7
+ psubw m3, [srcq+DCTSIZE*4*2]
+ psubusw m6, m7
+ movq m7, m1
+ psraw m2, 2
+ psubw m4, [srcq+DCTSIZE*6*2]
+ psubw m1, m6
+ psubw m0, [srcq+DCTSIZE*7*2]
+ paddw m6, m7
+ psraw m6, 2
+ movq m7, m2
+ pmulhw m1, [pw_5A82]
+ paddw m2, m6
+ movq [rsp], m2
+ psubw m7, m6
+ movq m2, [srcq+DCTSIZE*2*2]
+ psubw m1, m6
+ psubw m2, [srcq+DCTSIZE*5*2]
+ movq m6, m5
+ movq [rsp+8*3], m7
+ paddw m3, m2
+ paddw m2, m4
+ paddw m4, m0
+ movq m7, m3
+ psubw m3, m4
+ psllw m3, 2
+ psllw m7, 2
+ pmulhw m3, [pw_187E]
+ psllw m4, 2
+ pmulhw m7, [pw_22A3]
+ psllw m2, 2
+ pmulhw m4, [pw_539F]
+ paddw m5, m1
+ pmulhw m2, [pw_2D41]
+ psubw m6, m1
+ paddw m7, m3
+ movq [rsp+8], m5
+ paddw m4, m3
+ movq m3, [thrq+3*16+%2]
+ movq m1, m0
+ movq [rsp+8*2], m6
+ psubw m1, m2
+ paddw m0, m2
+ movq m5, m1
+ movq m2, [thrq+5*16+%2]
+ psubw m1, m7
+ paddw m5, m7
+ psubw m1, m3
+ movq m7, [thrq+16+%2]
+ psubw m5, m2
+ movq m6, m0
+ paddw m0, m4
+ paddusw m1, m3
+ psubw m6, m4
+ movq m4, [thrq+7*16+%2]
+ psubw m0, m7
+ psubw m6, m4
+ paddusw m5, m2
+ paddusw m6, m4
+ paddw m1, m3
+ paddw m5, m2
+ paddw m6, m4
+ psubusw m1, m3
+ psubusw m5, m2
+ psubusw m6, m4
+ movq m4, m1
+ por m4, m5
+ paddusw m0, m7
+ por m4, m6
+ paddw m0, m7
+ packssdw m4, m4
+ psubusw m0, m7
+ movd tmpd, m4
+ or tmpd, tmpd
+ jnz %1
+ movq m4, [rsp]
+ movq m1, m0
+ pmulhw m0, [pw_3642]
+ movq m2, m1
+ movq m5, [outq+DCTSIZE*0*2]
+ movq m3, m2
+ pmulhw m1, [pw_2441]
+ paddw m5, m4
+ movq m6, [rsp+8]
+ psraw m3, 2
+ pmulhw m2, [pw_0CBB]
+ psubw m4, m3
+ movq m7, [outq+DCTSIZE*1*2]
+ paddw m5, m3
+ movq [outq+DCTSIZE*7*2], m4
+ paddw m7, m6
+ movq m3, [rsp+8*2]
+ psubw m6, m0
+ movq m4, [outq+DCTSIZE*2*2]
+ paddw m7, m0
+ movq [outq], m5
+ paddw m4, m3
+ movq [outq+DCTSIZE*6*2], m6
+ psubw m3, m1
+ movq m5, [outq+DCTSIZE*5*2]
+ paddw m4, m1
+ movq m6, [outq+DCTSIZE*3*2]
+ paddw m5, m3
+ movq m0, [rsp+8*3]
+ add srcq, 8+%3
+ movq [outq+DCTSIZE*1*2], m7
+ paddw m6, m0
+ movq [outq+DCTSIZE*2*2], m4
+ psubw m0, m2
+ movq m7, [outq+DCTSIZE*4*2]
+ paddw m6, m2
+ movq [outq+DCTSIZE*5*2], m5
+ paddw m7, m0
+ movq [outq+DCTSIZE*3*2], m6
+ movq [outq+DCTSIZE*4*2], m7
+ add outq, 8+%3
+%endmacro
+
+%macro COLUMN_IDCT 0-1 0
+ movq m3, m5
+ psubw m5, m1
+ psllw m5, 1
+ paddw m3, m1
+ movq m2, m0
+ psubw m0, m6
+ movq m1, m5
+ psllw m0, 1
+ pmulhw m1, [pw_AC62]
+ paddw m5, m0
+ pmulhw m5, [pw_3B21]
+ paddw m2, m6
+ pmulhw m0, [pw_22A3]
+ movq m7, m2
+ movq m4, [rsp]
+ psubw m2, m3
+ psllw m2, 1
+ paddw m7, m3
+ pmulhw m2, [pw_2D41]
+ movq m6, m4
+ psraw m7, 2
+ paddw m4, [outq]
+ psubw m6, m7
+ movq m3, [rsp+8]
+ paddw m4, m7
+ movq [outq+DCTSIZE*7*2], m6
+ paddw m1, m5
+ movq [outq], m4
+ psubw m1, m7
+ movq m7, [rsp+8*2]
+ psubw m0, m5
+ movq m6, [rsp+8*3]
+ movq m5, m3
+ paddw m3, [outq+DCTSIZE*1*2]
+ psubw m5, m1
+ psubw m2, m1
+ paddw m3, m1
+ movq [outq+DCTSIZE*6*2], m5
+ movq m4, m7
+ paddw m7, [outq+DCTSIZE*2*2]
+ psubw m4, m2
+ paddw m4, [outq+DCTSIZE*5*2]
+ paddw m7, m2
+ movq [outq+DCTSIZE*1*2], m3
+ paddw m0, m2
+ movq [outq+DCTSIZE*2*2], m7
+ movq m1, m6
+ paddw m6, [outq+DCTSIZE*4*2]
+ psubw m1, m0
+ paddw m1, [outq+DCTSIZE*3*2]
+ paddw m6, m0
+ movq [outq+DCTSIZE*5*2], m4
+ add srcq, 8+%1
+ movq [outq+DCTSIZE*4*2], m6
+ movq [outq+DCTSIZE*3*2], m1
+ add outq, 8+%1
+%endmacro
+
+;void ff_column_fidct_mmx(int16_t *thr_adr, int16_t *data, int16_t *output, int cnt);
+cglobal column_fidct, 4, 5, 0, 32, thr, src, out, cnt, tmp
+.fdct1:
+ COLUMN_FDCT .idct1
+ jmp .fdct2
+
+.idct1:
+ COLUMN_IDCT
+
+.fdct2:
+ COLUMN_FDCT .idct2, 8, 16
+ sub cntd, 2
+ jg .fdct1
+ RET
+
+.idct2:
+ COLUMN_IDCT 16
+ sub cntd, 2
+ jg .fdct1
+ RET
+
+;void ff_row_idct_mmx(int16_t *workspace, int16_t *output_adr, ptrdiff_t output_stride, int cnt);
+cglobal row_idct, 4, 5, 0, 16, src, dst, stride, cnt, stride3
+ add strideq, strideq
+ lea stride3q, [strideq+strideq*2]
+.loop:
+ movq m0, [srcq+DCTSIZE*0*2]
+ movq m1, [srcq+DCTSIZE*1*2]
+ movq m4, m0
+ movq m2, [srcq+DCTSIZE*2*2]
+ punpcklwd m0, m1
+ movq m3, [srcq+DCTSIZE*3*2]
+ punpckhwd m4, m1
+ movq m7, m2
+ punpcklwd m2, m3
+ movq m6, m0
+ punpckldq m0, m2
+ punpckhdq m6, m2
+ movq m5, m0
+ punpckhwd m7, m3
+ psubw m0, m6
+ pmulhw m0, [pw_5A82]
+ movq m2, m4
+ punpckldq m4, m7
+ paddw m5, m6
+ punpckhdq m2, m7
+ movq m1, m4
+ psllw m0, 2
+ paddw m4, m2
+ movq m3, [srcq+DCTSIZE*0*2+8]
+ psubw m1, m2
+ movq m2, [srcq+DCTSIZE*1*2+8]
+ psubw m0, m5
+ movq m6, m4
+ paddw m4, m5
+ psubw m6, m5
+ movq m7, m1
+ movq m5, [srcq+DCTSIZE*2*2+8]
+ paddw m1, m0
+ movq [rsp], m4
+ movq m4, m3
+ movq [rsp+8], m6
+ punpcklwd m3, m2
+ movq m6, [srcq+DCTSIZE*3*2+8]
+ punpckhwd m4, m2
+ movq m2, m5
+ punpcklwd m5, m6
+ psubw m7, m0
+ punpckhwd m2, m6
+ movq m0, m3
+ punpckldq m3, m5
+ punpckhdq m0, m5
+ movq m5, m4
+ movq m6, m3
+ punpckldq m4, m2
+ psubw m3, m0
+ punpckhdq m5, m2
+ paddw m6, m0
+ movq m2, m4
+ movq m0, m3
+ psubw m4, m5
+ pmulhw m0, [pw_AC62]
+ paddw m3, m4
+ pmulhw m3, [pw_3B21]
+ paddw m2, m5
+ pmulhw m4, [pw_22A3]
+ movq m5, m2
+ psubw m2, m6
+ paddw m5, m6
+ pmulhw m2, [pw_2D41]
+ paddw m0, m3
+ psllw m0, 3
+ psubw m4, m3
+ movq m6, [rsp]
+ movq m3, m1
+ psllw m4, 3
+ psubw m0, m5
+ psllw m2, 3
+ paddw m1, m0
+ psubw m2, m0
+ psubw m3, m0
+ paddw m4, m2
+ movq m0, m7
+ paddw m7, m2
+ psubw m0, m2
+ movq m2, [pw_4]
+ psubw m6, m5
+ paddw m5, [rsp]
+ paddw m1, m2
+ paddw m5, m2
+ psraw m1, 3
+ paddw m7, m2
+ psraw m5, 3
+ paddw m5, [dstq]
+ psraw m7, 3
+ paddw m1, [dstq+strideq*1]
+ paddw m0, m2
+ paddw m7, [dstq+strideq*2]
+ paddw m3, m2
+ movq [dstq], m5
+ paddw m6, m2
+ movq [dstq+strideq*1], m1
+ psraw m0, 3
+ movq [dstq+strideq*2], m7
+ add dstq, stride3q
+ movq m5, [rsp+8]
+ psraw m3, 3
+ paddw m0, [dstq+strideq*2]
+ psubw m5, m4
+ paddw m3, [dstq+stride3q*1]
+ psraw m6, 3
+ paddw m4, [rsp+8]
+ paddw m5, m2
+ paddw m6, [dstq+strideq*4]
+ paddw m4, m2
+ movq [dstq+strideq*2], m0
+ psraw m5, 3
+ paddw m5, [dstq]
+ psraw m4, 3
+ paddw m4, [dstq+strideq*1]
+ add srcq, DCTSIZE*2*4
+ movq [dstq+stride3q*1], m3
+ movq [dstq+strideq*4], m6
+ movq [dstq], m5
+ movq [dstq+strideq*1], m4
+ sub dstq, stride3q
+ add dstq, 8
+ dec r3d
+ jnz .loop
+ RET
+
+;void ff_row_fdct_mmx(int16_t *data, const uint8_t *pixels, ptrdiff_t line_size, int cnt);
+cglobal row_fdct, 4, 5, 0, 16, src, pix, stride, cnt, stride3
+ lea stride3q, [strideq+strideq*2]
+.loop:
+ movd m0, [pixq]
+ pxor m7, m7
+ movd m1, [pixq+strideq*1]
+ punpcklbw m0, m7
+ movd m2, [pixq+strideq*2]
+ punpcklbw m1, m7
+ punpcklbw m2, m7
+ add pixq,stride3q
+ movq m5, m0
+ movd m3, [pixq+strideq*4]
+ movq m6, m1
+ movd m4, [pixq+stride3q*1]
+ punpcklbw m3, m7
+ psubw m5, m3
+ punpcklbw m4, m7
+ paddw m0, m3
+ psubw m6, m4
+ movd m3, [pixq+strideq*2]
+ paddw m1, m4
+ movq [rsp], m5
+ punpcklbw m3, m7
+ movq [rsp+8], m6
+ movq m4, m2
+ movd m5, [pixq]
+ paddw m2, m3
+ movd m6, [pixq+strideq*1]
+ punpcklbw m5, m7
+ psubw m4, m3
+ punpcklbw m6, m7
+ movq m3, m5
+ paddw m5, m6
+ psubw m3, m6
+ movq m6, m0
+ movq m7, m1
+ psubw m0, m5
+ psubw m1, m2
+ paddw m7, m2
+ paddw m1, m0
+ movq m2, m7
+ psllw m1, 2
+ paddw m6, m5
+ pmulhw m1, [pw_2D41]
+ paddw m7, m6
+ psubw m6, m2
+ movq m5, m0
+ movq m2, m7
+ punpcklwd m7, m6
+ paddw m0, m1
+ punpckhwd m2, m6
+ psubw m5, m1
+ movq m6, m0
+ movq m1, [rsp+8]
+ punpcklwd m0, m5
+ punpckhwd m6, m5
+ movq m5, m0
+ punpckldq m0, m7
+ paddw m3, m4
+ punpckhdq m5, m7
+ movq m7, m6
+ movq [srcq+DCTSIZE*0*2], m0
+ punpckldq m6, m2
+ movq [srcq+DCTSIZE*1*2], m5
+ punpckhdq m7, m2
+ movq [srcq+DCTSIZE*2*2], m6
+ paddw m4, m1
+ movq [srcq+DCTSIZE*3*2], m7
+ psllw m3, 2
+ movq m2, [rsp]
+ psllw m4, 2
+ pmulhw m4, [pw_2D41]
+ paddw m1, m2
+ psllw m1, 2
+ movq m0, m3
+ pmulhw m0, [pw_22A3]
+ psubw m3, m1
+ pmulhw m3, [pw_187E]
+ movq m5, m2
+ pmulhw m1, [pw_539F]
+ psubw m2, m4
+ paddw m5, m4
+ movq m6, m2
+ paddw m0, m3
+ movq m7, m5
+ paddw m2, m0
+ psubw m6, m0
+ movq m4, m2
+ paddw m1, m3
+ punpcklwd m2, m6
+ paddw m5, m1
+ punpckhwd m4, m6
+ psubw m7, m1
+ movq m6, m5
+ punpcklwd m5, m7
+ punpckhwd m6, m7
+ movq m7, m2
+ punpckldq m2, m5
+ sub pixq, stride3q
+ punpckhdq m7, m5
+ movq m5, m4
+ movq [srcq+DCTSIZE*0*2+8], m2
+ punpckldq m4, m6
+ movq [srcq+DCTSIZE*1*2+8], m7
+ punpckhdq m5, m6
+ movq [srcq+DCTSIZE*2*2+8], m4
+ add pixq, 4
+ movq [srcq+DCTSIZE*3*2+8], m5
+ add srcq, DCTSIZE*4*2
+ dec cntd
+ jnz .loop
+ RET
diff --git a/libavfilter/x86/vf_fspp_init.c b/libavfilter/x86/vf_fspp_init.c
new file mode 100644
index 0000000000..8e00317cb7
--- /dev/null
+++ b/libavfilter/x86/vf_fspp_init.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (C) 2005 Nikolaj Poroshin <porosh3@psu.ru>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/x86/cpu.h"
+#include "libavfilter/vf_fspp.h"
+
+void ff_store_slice_mmx(uint8_t *dst, int16_t *src,
+ ptrdiff_t dst_stride, ptrdiff_t src_stride,
+ ptrdiff_t width, ptrdiff_t height, ptrdiff_t log2_scale);
+void ff_store_slice2_mmx(uint8_t *dst, int16_t *src,
+ ptrdiff_t dst_stride, ptrdiff_t src_stride,
+ ptrdiff_t width, ptrdiff_t height, ptrdiff_t log2_scale);
+void ff_mul_thrmat_mmx(int16_t *thr_adr_noq, int16_t *thr_adr, int q);
+void ff_column_fidct_mmx(int16_t *thr_adr, int16_t *data, int16_t *output, int cnt);
+void ff_row_idct_mmx(int16_t *workspace, int16_t *output_adr, ptrdiff_t output_stride, int cnt);
+void ff_row_fdct_mmx(int16_t *data, const uint8_t *pixels, ptrdiff_t line_size, int cnt);
+
+av_cold void ff_fspp_init_x86(FSPPContext *s)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_MMX(cpu_flags)) {
+ s->store_slice = ff_store_slice_mmx;
+ s->store_slice2 = ff_store_slice2_mmx;
+ s->mul_thrmat = ff_mul_thrmat_mmx;
+ s->column_fidct = ff_column_fidct_mmx;
+ s->row_idct = ff_row_idct_mmx;
+ s->row_fdct = ff_row_fdct_mmx;
+ }
+}
diff --git a/libavfilter/x86/vf_gradfun.asm b/libavfilter/x86/vf_gradfun.asm
index 00fcb166fb..3581f89fe8 100644
--- a/libavfilter/x86/vf_gradfun.asm
+++ b/libavfilter/x86/vf_gradfun.asm
@@ -1,20 +1,20 @@
;******************************************************************************
;* x86-optimized functions for gradfun filter
;*
-;* This file is part of Libav.
+;* This file is part of FFmpeg.
;*
-;* Libav is free software; you can redistribute it and/or
+;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
-;* Libav is distributed in the hope that it will be useful,
+;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
-;* License along with Libav; if not, write to the Free Software
+;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
diff --git a/libavfilter/x86/vf_gradfun_init.c b/libavfilter/x86/vf_gradfun_init.c
index f8d85c7120..3f515622b6 100644
--- a/libavfilter/x86/vf_gradfun_init.c
+++ b/libavfilter/x86/vf_gradfun_init.c
@@ -1,20 +1,20 @@
/*
* Copyright (C) 2009 Loren Merritt <lorenm@u.washington.edu>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -25,29 +25,29 @@
#include "libavutil/x86/cpu.h"
#include "libavfilter/gradfun.h"
-void ff_gradfun_filter_line_mmxext(intptr_t x, uint8_t *dst, uint8_t *src,
- uint16_t *dc, int thresh,
+void ff_gradfun_filter_line_mmxext(intptr_t x, uint8_t *dst, const uint8_t *src,
+ const uint16_t *dc, int thresh,
const uint16_t *dithers);
-
-void ff_gradfun_filter_line_ssse3(intptr_t x, uint8_t *dst, uint8_t *src,
- uint16_t *dc, int thresh,
+void ff_gradfun_filter_line_ssse3(intptr_t x, uint8_t *dst, const uint8_t *src,
+ const uint16_t *dc, int thresh,
const uint16_t *dithers);
void ff_gradfun_blur_line_movdqa_sse2(intptr_t x, uint16_t *buf,
- uint16_t *buf1, uint16_t *dc,
- uint8_t *src1, uint8_t *src2);
+ const uint16_t *buf1, uint16_t *dc,
+ const uint8_t *src1, const uint8_t *src2);
void ff_gradfun_blur_line_movdqu_sse2(intptr_t x, uint16_t *buf,
- uint16_t *buf1, uint16_t *dc,
- uint8_t *src1, uint8_t *src2);
+ const uint16_t *buf1, uint16_t *dc,
+ const uint8_t *src1, const uint8_t *src2);
#if HAVE_YASM
-static void gradfun_filter_line(uint8_t *dst, uint8_t *src, uint16_t *dc,
- int width, int thresh, const uint16_t *dithers,
- int alignment)
+static void gradfun_filter_line_mmxext(uint8_t *dst, const uint8_t *src,
+ const uint16_t *dc,
+ int width, int thresh,
+ const uint16_t *dithers)
{
intptr_t x;
- if (width & alignment) {
- x = width & ~alignment;
+ if (width & 3) {
+ x = width & ~3;
ff_gradfun_filter_line_c(dst + x, src + x, dc + x / 2,
width - x, thresh, dithers);
width = x;
@@ -57,22 +57,25 @@ static void gradfun_filter_line(uint8_t *dst, uint8_t *src, uint16_t *dc,
thresh, dithers);
}
-static void gradfun_filter_line_mmxext(uint8_t *dst, uint8_t *src, uint16_t *dc,
- int width, int thresh,
- const uint16_t *dithers)
-{
- gradfun_filter_line(dst, src, dc, width, thresh, dithers, 3);
-}
-
-static void gradfun_filter_line_ssse3(uint8_t *dst, uint8_t *src, uint16_t *dc,
+static void gradfun_filter_line_ssse3(uint8_t *dst, const uint8_t *src, const uint16_t *dc,
int width, int thresh,
const uint16_t *dithers)
{
- gradfun_filter_line(dst, src, dc, width, thresh, dithers, 7);
+ intptr_t x;
+ if (width & 7) {
+ // could be 10% faster if I somehow eliminated this
+ x = width & ~7;
+ ff_gradfun_filter_line_c(dst + x, src + x, dc + x / 2,
+ width - x, thresh, dithers);
+ width = x;
+ }
+ x = -width;
+ ff_gradfun_filter_line_ssse3(x, dst + width, src + width, dc + width / 2,
+ thresh, dithers);
}
-static void gradfun_blur_line_sse2(uint16_t *dc, uint16_t *buf, uint16_t *buf1,
- uint8_t *src, int src_linesize, int width)
+static void gradfun_blur_line_sse2(uint16_t *dc, uint16_t *buf, const uint16_t *buf1,
+ const uint8_t *src, int src_linesize, int width)
{
intptr_t x = -2 * width;
if (((intptr_t) src | src_linesize) & 15)
diff --git a/libavfilter/x86/vf_hqdn3d.asm b/libavfilter/x86/vf_hqdn3d.asm
index 02632a1f09..e3b1bdca53 100644
--- a/libavfilter/x86/vf_hqdn3d.asm
+++ b/libavfilter/x86/vf_hqdn3d.asm
@@ -1,20 +1,20 @@
;******************************************************************************
;* Copyright (c) 2012 Loren Merritt
;*
-;* This file is part of Libav.
+;* This file is part of FFmpeg.
;*
-;* Libav is free software; you can redistribute it and/or
+;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
-;* Libav is distributed in the hope that it will be useful,
+;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
-;* License along with Libav; if not, write to the Free Software
+;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
@@ -27,8 +27,8 @@ SECTION .text
%if lut_bits != 8
sar %1q, 8-lut_bits
%endif
- movsx %1d, word [%3q+%1q*2]
- add %1d, %2d
+ movsx %1q, word [%3q+%1q*2]
+ add %1q, %2q
%endmacro
%macro LOAD 3 ; dstreg, x, bitdepth
diff --git a/libavfilter/x86/vf_hqdn3d_init.c b/libavfilter/x86/vf_hqdn3d_init.c
index 06f9e00ec9..b63916b674 100644
--- a/libavfilter/x86/vf_hqdn3d_init.c
+++ b/libavfilter/x86/vf_hqdn3d_init.c
@@ -1,18 +1,20 @@
/*
- * This file is part of Libav.
+ * Copyright (c) 2012 Loren Merritt
*
- * Libav is free software; you can redistribute it and/or modify
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
diff --git a/libavfilter/x86/vf_idet.asm b/libavfilter/x86/vf_idet.asm
new file mode 100644
index 0000000000..9596abd7e2
--- /dev/null
+++ b/libavfilter/x86/vf_idet.asm
@@ -0,0 +1,170 @@
+;*****************************************************************************
+;* x86-optimized functions for idet filter
+;*
+;* Copyright (C) 2014 Pascal Massimino (pascal.massimino@gmail.com)
+;* Copyright (c) 2014 Neil Birkbeck (birkbeck@google.com)
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION .text
+
+; Implementation that does 8-bytes at a time using single-word operations.
+%macro IDET_FILTER_LINE 1
+INIT_MMX %1
+cglobal idet_filter_line, 4, 5, 0, a, b, c, width, index
+ xor indexq, indexq
+%define m_zero m2
+%define m_sum m5
+ pxor m_sum, m_sum
+ pxor m_zero, m_zero
+
+.loop:
+ movu m0, [aq + indexq*1]
+ punpckhbw m1, m0, m_zero
+ punpcklbw m0, m_zero
+
+ movu m3, [cq + indexq*1]
+ punpckhbw m4, m3, m_zero
+ punpcklbw m3, m_zero
+
+ paddsw m1, m4
+ paddsw m0, m3
+
+ movu m3, [bq + indexq*1]
+ punpckhbw m4, m3, m_zero
+ punpcklbw m3, m_zero
+
+ paddw m4, m4
+ paddw m3, m3
+ psubsw m1, m4
+ psubsw m0, m3
+
+ ABS2 m1, m0, m4, m3
+
+ paddw m0, m1
+ punpckhwd m1, m0, m_zero
+ punpcklwd m0, m_zero
+
+ paddd m0, m1
+ paddd m_sum, m0
+
+ add indexq, 0x8
+ CMP widthd, indexd
+ jg .loop
+
+ HADDD m_sum, m0
+ movd eax, m_sum
+ RET
+%endmacro
+
+%if ARCH_X86_32
+IDET_FILTER_LINE mmxext
+IDET_FILTER_LINE mmx
+%endif
+
+;******************************************************************************
+; 16bit implementation that does 4/8-pixels at a time
+
+%macro PABS_DIFF_WD 3 ; a, b, junk , output=a
+ psubusw %3, %2, %1
+ psubusw %1, %2
+ por %1, %3
+
+ mova %2, %1
+ punpcklwd %1, m_zero
+ punpckhwd %2, m_zero
+ paddd %1, %2
+%endmacro
+
+%macro IDET_FILTER_LINE_16BIT 1 ; %1=increment (4 or 8 words)
+cglobal idet_filter_line_16bit, 4, 5, 8, a, b, c, width, index
+ xor indexq, indexq
+%define m_zero m1
+%define m_sum m0
+ pxor m_sum, m_sum
+ pxor m_zero, m_zero
+
+.loop_16bit:
+ movu m2, [bq + indexq * 2] ; B
+ movu m3, [aq + indexq * 2] ; A
+ mova m6, m2
+ psubusw m5, m2, m3 ; ba
+
+ movu m4, [cq + indexq * 2] ; C
+ add indexq, %1
+ psubusw m3, m2 ; ab
+ CMP indexd, widthd
+
+ psubusw m6, m4 ; bc
+ psubusw m4, m2 ; cb
+
+ PABS_DIFF_WD m3, m6, m7 ; |ab - bc|
+ PABS_DIFF_WD m5, m4, m7 ; |ba - cb|
+ paddd m_sum, m3
+ paddd m_sum, m5
+ jl .loop_16bit
+
+ HADDD m_sum, m2
+ movd eax, m_sum
+ RET
+%endmacro
+
+INIT_XMM sse2
+IDET_FILTER_LINE_16BIT 8
+%if ARCH_X86_32
+INIT_MMX mmx
+IDET_FILTER_LINE_16BIT 4
+%endif
+
+;******************************************************************************
+; SSE2 8-bit implementation that does 16-bytes at a time:
+
+INIT_XMM sse2
+cglobal idet_filter_line, 4, 6, 7, a, b, c, width, index, total
+ xor indexq, indexq
+ pxor m0, m0
+ pxor m1, m1
+
+.sse2_loop:
+ movu m2, [bq + indexq*1] ; B
+ movu m3, [aq + indexq*1] ; A
+ mova m6, m2
+ mova m4, m3
+ psubusb m5, m2, m3 ; ba
+
+ movu m3, [cq + indexq*1] ; C
+ add indexq, 0x10
+ psubusb m4, m2 ; ab
+ CMP indexd, widthd
+
+ psubusb m6, m3 ; bc
+ psubusb m3, m2 ; cb
+
+ psadbw m4, m6 ; |ab - bc|
+ paddq m0, m4
+ psadbw m5, m3 ; |ba - cb|
+ paddq m1, m5
+ jl .sse2_loop
+
+ paddq m0, m1
+ movhlps m1, m0
+ paddq m0, m1
+ movd eax, m0
+ RET
diff --git a/libavfilter/x86/vf_idet_init.c b/libavfilter/x86/vf_idet_init.c
new file mode 100644
index 0000000000..1147ca8ba8
--- /dev/null
+++ b/libavfilter/x86/vf_idet_init.c
@@ -0,0 +1,87 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/mem.h"
+#include "libavutil/x86/asm.h"
+#include "libavutil/x86/cpu.h"
+#include "libavfilter/vf_idet.h"
+
+#if HAVE_YASM
+
+/* declares main callable idet_filter_line_{mmx,mmxext,sse2}() */
+#define FUNC_MAIN_DECL(KIND, SPAN) \
+int ff_idet_filter_line_##KIND(const uint8_t *a, const uint8_t *b, \
+ const uint8_t *c, int w); \
+static int idet_filter_line_##KIND(const uint8_t *a, const uint8_t *b, \
+ const uint8_t *c, int w) { \
+ int sum = 0; \
+ const int left_over = w & (SPAN - 1); \
+ w -= left_over; \
+ if (w > 0) \
+ sum += ff_idet_filter_line_##KIND(a, b, c, w); \
+ if (left_over > 0) \
+ sum += ff_idet_filter_line_c(a + w, b + w, c + w, left_over); \
+ return sum; \
+}
+
+
+#define FUNC_MAIN_DECL_16bit(KIND, SPAN) \
+int ff_idet_filter_line_16bit_##KIND(const uint16_t *a, const uint16_t *b, \
+ const uint16_t *c, int w); \
+static int idet_filter_line_16bit_##KIND(const uint16_t *a, const uint16_t *b, \
+ const uint16_t *c, int w) { \
+ int sum = 0; \
+ const int left_over = w & (SPAN - 1); \
+ w -= left_over; \
+ if (w > 0) \
+ sum += ff_idet_filter_line_16bit_##KIND(a, b, c, w); \
+ if (left_over > 0) \
+ sum += ff_idet_filter_line_c_16bit(a + w, b + w, c + w, left_over); \
+ return sum; \
+}
+
+FUNC_MAIN_DECL(sse2, 16)
+FUNC_MAIN_DECL_16bit(sse2, 8)
+#if ARCH_X86_32
+FUNC_MAIN_DECL(mmx, 8)
+FUNC_MAIN_DECL(mmxext, 8)
+FUNC_MAIN_DECL_16bit(mmx, 4)
+#endif
+
+#endif
+av_cold void ff_idet_init_x86(IDETContext *idet, int for_16b)
+{
+#if HAVE_YASM
+ const int cpu_flags = av_get_cpu_flags();
+
+#if ARCH_X86_32
+ if (EXTERNAL_MMX(cpu_flags)) {
+ idet->filter_line = for_16b ? (ff_idet_filter_func)idet_filter_line_16bit_mmx : idet_filter_line_mmx;
+ }
+ if (EXTERNAL_MMXEXT(cpu_flags)) {
+ idet->filter_line = for_16b ? (ff_idet_filter_func)idet_filter_line_16bit_mmx : idet_filter_line_mmxext;
+ }
+#endif // ARCH_x86_32
+
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ idet->filter_line = for_16b ? (ff_idet_filter_func)idet_filter_line_16bit_sse2 : idet_filter_line_sse2;
+ }
+#endif // HAVE_YASM
+}
diff --git a/libavfilter/x86/vf_interlace.asm b/libavfilter/x86/vf_interlace.asm
index f2344216b3..f70c700965 100644
--- a/libavfilter/x86/vf_interlace.asm
+++ b/libavfilter/x86/vf_interlace.asm
@@ -4,20 +4,20 @@
;* Copyright (C) 2014 Kieran Kunhya <kierank@obe.tv>
;* Copyright (c) 2014 Michael Niedermayer <michaelni@gmx.at>
;*
-;* This file is part of Libav.
+;* This file is part of FFmpeg.
;*
-;* Libav is free software; you can redistribute it and/or modify
+;* FFmpeg is free software; you can redistribute it and/or modify
;* it under the terms of the GNU General Public License as published by
;* the Free Software Foundation; either version 2 of the License, or
;* (at your option) any later version.
;*
-;* Libav is distributed in the hope that it will be useful,
+;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;* GNU General Public License for more details.
;*
;* You should have received a copy of the GNU General Public License along
-;* with Libav; if not, write to the Free Software Foundation, Inc.,
+;* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
;* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
;******************************************************************************
diff --git a/libavfilter/x86/vf_interlace_init.c b/libavfilter/x86/vf_interlace_init.c
index 105eeb6920..52a22f80c7 100644
--- a/libavfilter/x86/vf_interlace_init.c
+++ b/libavfilter/x86/vf_interlace_init.c
@@ -1,20 +1,20 @@
/*
* Copyright (C) 2014 Kieran Kunhya <kierank@obe.tv>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or modify
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
diff --git a/libavfilter/x86/vf_maskedmerge.asm b/libavfilter/x86/vf_maskedmerge.asm
new file mode 100644
index 0000000000..7e61935b97
--- /dev/null
+++ b/libavfilter/x86/vf_maskedmerge.asm
@@ -0,0 +1,81 @@
+;*****************************************************************************
+;* x86-optimized functions for maskedmerge filter
+;*
+;* Copyright (C) 2015 Paul B Mahol
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;*****************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+pw_128: times 8 dw 128
+pw_256: times 8 dw 256
+
+SECTION .text
+
+INIT_XMM sse2
+%if ARCH_X86_64
+cglobal maskedmerge8, 8, 11, 7, bsrc, osrc, msrc, dst, blinesize, olinesize, mlinesize, dlinesize, w, h, x
+ mov wd, dword wm
+ mov hd, dword hm
+%else
+cglobal maskedmerge8, 5, 7, 7, bsrc, osrc, msrc, dst, blinesize, w, x
+ mov wd, r8m
+%define olinesizeq r5mp
+%define mlinesizeq r6mp
+%define dlinesizeq r7mp
+%define hd r9mp
+%endif
+ mova m4, [pw_256]
+ mova m5, [pw_128]
+ pxor m6, m6
+ add bsrcq, wq
+ add osrcq, wq
+ add msrcq, wq
+ add dstq, wq
+ neg wq
+.nextrow:
+ mov xq, wq
+
+ .loop:
+ movh m0, [bsrcq + xq]
+ movh m1, [osrcq + xq]
+ movh m3, [msrcq + xq]
+ mova m2, m4
+ punpcklbw m0, m6
+ punpcklbw m1, m6
+ punpcklbw m3, m6
+ psubw m2, m3
+ pmullw m2, m0
+ pmullw m1, m3
+ paddw m1, m2
+ paddw m1, m5
+ psrlw m1, 8
+ packuswb m1, m1
+ movh [dstq + xq], m1
+ add xq, mmsize / 2
+ jl .loop
+
+ add bsrcq, blinesizeq
+ add osrcq, olinesizeq
+ add msrcq, mlinesizeq
+ add dstq, dlinesizeq
+ sub hd, 1
+ jg .nextrow
+REP_RET
diff --git a/libavfilter/x86/vf_maskedmerge_init.c b/libavfilter/x86/vf_maskedmerge_init.c
new file mode 100644
index 0000000000..73ab888083
--- /dev/null
+++ b/libavfilter/x86/vf_maskedmerge_init.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/x86/cpu.h"
+#include "libavfilter/maskedmerge.h"
+
+void ff_maskedmerge8_sse2(const uint8_t *bsrc, const uint8_t *osrc,
+ const uint8_t *msrc, uint8_t *dst,
+ ptrdiff_t blinesize, ptrdiff_t olinesize,
+ ptrdiff_t mlinesize, ptrdiff_t dlinesize,
+ int w, int h,
+ int half, int shift);
+
+av_cold void ff_maskedmerge_init_x86(MaskedMergeContext *s)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_SSE2(cpu_flags) && s->depth == 8) {
+ s->maskedmerge = ff_maskedmerge8_sse2;
+ }
+}
diff --git a/libavfilter/x86/vf_noise.c b/libavfilter/x86/vf_noise.c
new file mode 100644
index 0000000000..f7a4d00336
--- /dev/null
+++ b/libavfilter/x86/vf_noise.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/x86/cpu.h"
+#include "libavutil/x86/asm.h"
+#include "libavfilter/vf_noise.h"
+
+#if HAVE_INLINE_ASM
+static void line_noise_mmx(uint8_t *dst, const uint8_t *src,
+ const int8_t *noise, int len, int shift)
+{
+ x86_reg mmx_len= len & (~7);
+ noise += shift;
+
+ __asm__ volatile(
+ "mov %3, %%"FF_REG_a" \n\t"
+ "pcmpeqb %%mm7, %%mm7 \n\t"
+ "psllw $15, %%mm7 \n\t"
+ "packsswb %%mm7, %%mm7 \n\t"
+ ".p2align 4 \n\t"
+ "1: \n\t"
+ "movq (%0, %%"FF_REG_a"), %%mm0 \n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm1 \n\t"
+ "pxor %%mm7, %%mm0 \n\t"
+ "paddsb %%mm1, %%mm0 \n\t"
+ "pxor %%mm7, %%mm0 \n\t"
+ "movq %%mm0, (%2, %%"FF_REG_a") \n\t"
+ "add $8, %%"FF_REG_a" \n\t"
+ " js 1b \n\t"
+ :: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len)
+ : "%"FF_REG_a
+ );
+ if (mmx_len != len)
+ ff_line_noise_c(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0);
+}
+
+#if HAVE_6REGS
+static void line_noise_avg_mmx(uint8_t *dst, const uint8_t *src,
+ int len, const int8_t * const *shift)
+{
+ x86_reg mmx_len = len & (~7);
+
+ __asm__ volatile(
+ "mov %5, %%"FF_REG_a" \n\t"
+ ".p2align 4 \n\t"
+ "1: \n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm1 \n\t"
+ "movq (%0, %%"FF_REG_a"), %%mm0 \n\t"
+ "paddb (%2, %%"FF_REG_a"), %%mm1\n\t"
+ "paddb (%3, %%"FF_REG_a"), %%mm1\n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm1, %%mm3 \n\t"
+ "punpcklbw %%mm0, %%mm0 \n\t"
+ "punpckhbw %%mm2, %%mm2 \n\t"
+ "punpcklbw %%mm1, %%mm1 \n\t"
+ "punpckhbw %%mm3, %%mm3 \n\t"
+ "pmulhw %%mm0, %%mm1 \n\t"
+ "pmulhw %%mm2, %%mm3 \n\t"
+ "paddw %%mm1, %%mm1 \n\t"
+ "paddw %%mm3, %%mm3 \n\t"
+ "paddw %%mm0, %%mm1 \n\t"
+ "paddw %%mm2, %%mm3 \n\t"
+ "psrlw $8, %%mm1 \n\t"
+ "psrlw $8, %%mm3 \n\t"
+ "packuswb %%mm3, %%mm1 \n\t"
+ "movq %%mm1, (%4, %%"FF_REG_a") \n\t"
+ "add $8, %%"FF_REG_a" \n\t"
+ " js 1b \n\t"
+ :: "r" (src+mmx_len), "r" (shift[0]+mmx_len), "r" (shift[1]+mmx_len), "r" (shift[2]+mmx_len),
+ "r" (dst+mmx_len), "g" (-mmx_len)
+ : "%"FF_REG_a
+ );
+
+ if (mmx_len != len){
+ const int8_t *shift2[3] = { shift[0]+mmx_len, shift[1]+mmx_len, shift[2]+mmx_len };
+ ff_line_noise_avg_c(dst+mmx_len, src+mmx_len, len-mmx_len, shift2);
+ }
+}
+#endif /* HAVE_6REGS */
+
+static void line_noise_mmxext(uint8_t *dst, const uint8_t *src,
+ const int8_t *noise, int len, int shift)
+{
+ x86_reg mmx_len = len & (~7);
+ noise += shift;
+
+ __asm__ volatile(
+ "mov %3, %%"FF_REG_a" \n\t"
+ "pcmpeqb %%mm7, %%mm7 \n\t"
+ "psllw $15, %%mm7 \n\t"
+ "packsswb %%mm7, %%mm7 \n\t"
+ ".p2align 4 \n\t"
+ "1: \n\t"
+ "movq (%0, %%"FF_REG_a"), %%mm0 \n\t"
+ "movq (%1, %%"FF_REG_a"), %%mm1 \n\t"
+ "pxor %%mm7, %%mm0 \n\t"
+ "paddsb %%mm1, %%mm0 \n\t"
+ "pxor %%mm7, %%mm0 \n\t"
+ "movntq %%mm0, (%2, %%"FF_REG_a") \n\t"
+ "add $8, %%"FF_REG_a" \n\t"
+ " js 1b \n\t"
+ :: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len)
+ : "%"FF_REG_a
+ );
+ if (mmx_len != len)
+ ff_line_noise_c(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0);
+}
+#endif /* HAVE_INLINE_ASM */
+
+av_cold void ff_noise_init_x86(NoiseContext *n)
+{
+#if HAVE_INLINE_ASM
+ int cpu_flags = av_get_cpu_flags();
+
+ if (INLINE_MMX(cpu_flags)) {
+ n->line_noise = line_noise_mmx;
+#if HAVE_6REGS
+ n->line_noise_avg = line_noise_avg_mmx;
+#endif
+ }
+ if (INLINE_MMXEXT(cpu_flags)) {
+ n->line_noise = line_noise_mmxext;
+ }
+#endif
+}
diff --git a/libavfilter/x86/vf_pp7.asm b/libavfilter/x86/vf_pp7.asm
new file mode 100644
index 0000000000..7b3e5cf5e3
--- /dev/null
+++ b/libavfilter/x86/vf_pp7.asm
@@ -0,0 +1,57 @@
+;*****************************************************************************
+;* x86-optimized functions for pp7 filter
+;*
+;* Copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License along
+;* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+;* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION .text
+
+INIT_MMX mmx
+
+;void ff_pp7_dctB_mmx(int16_t *dst, int16_t *src)
+cglobal pp7_dctB, 2, 2, 0, dst, src
+ movq m0, [srcq]
+ movq m1, [srcq+mmsize*1]
+ paddw m0, [srcq+mmsize*6]
+ paddw m1, [srcq+mmsize*5]
+ movq m2, [srcq+mmsize*2]
+ movq m3, [srcq+mmsize*3]
+ paddw m2, [srcq+mmsize*4]
+ paddw m3, m3
+ movq m4, m3
+ psubw m3, m0
+ paddw m4, m0
+ movq m0, m2
+ psubw m2, m1
+ paddw m0, m1
+ movq m1, m4
+ psubw m4, m0
+ paddw m1, m0
+ movq m0, m3
+ psubw m3, m2
+ psubw m3, m2
+ paddw m2, m0
+ paddw m2, m0
+ movq [dstq], m1
+ movq [dstq+mmsize*2], m4
+ movq [dstq+mmsize*1], m2
+ movq [dstq+mmsize*3], m3
+ RET
diff --git a/libavfilter/x86/vf_pp7_init.c b/libavfilter/x86/vf_pp7_init.c
new file mode 100644
index 0000000000..165b0dd5d0
--- /dev/null
+++ b/libavfilter/x86/vf_pp7_init.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/x86/cpu.h"
+#include "libavfilter/vf_pp7.h"
+
+void ff_pp7_dctB_mmx(int16_t *dst, int16_t *src);
+
+av_cold void ff_pp7_init_x86(PP7Context *p)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_MMX(cpu_flags))
+ p->dctB = ff_pp7_dctB_mmx;
+}
diff --git a/libavfilter/x86/vf_psnr.asm b/libavfilter/x86/vf_psnr.asm
new file mode 100644
index 0000000000..11eb81a225
--- /dev/null
+++ b/libavfilter/x86/vf_psnr.asm
@@ -0,0 +1,140 @@
+;*****************************************************************************
+;* x86-optimized functions for psnr filter
+;*
+;* Copyright (C) 2015 Ronald S. Bultje <rsbultje@gmail.com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION .text
+
+%macro SSE_LINE_FN 2 ; 8 or 16, byte or word
+INIT_XMM sse2
+%if ARCH_X86_32
+%if %1 == 8
+cglobal sse_line_%1 %+ bit, 0, 6, 8, res, buf, w, px1, px2, ref
+%else
+cglobal sse_line_%1 %+ bit, 0, 7, 8, res, buf, reshigh, w, px1, px2, ref
+%endif
+ mov bufq, r0mp
+ mov refq, r1mp
+ mov wd, r2m
+%else
+cglobal sse_line_%1 %+ bit, 3, 5, 8, buf, ref, w, px1, px2
+%endif
+ pxor m6, m6
+ pxor m7, m7
+ sub wd, mmsize*2
+ jl .end
+
+.loop:
+ movu m0, [bufq+mmsize*0]
+ movu m1, [bufq+mmsize*1]
+ movu m2, [refq+mmsize*0]
+ movu m3, [refq+mmsize*1]
+%if %1 == 8
+ add bufq, mmsize*2
+ add refq, mmsize*2
+ psubusb m4, m0, m2
+ psubusb m5, m1, m3
+ psubusb m2, m0
+ psubusb m3, m1
+ por m2, m4
+ por m3, m5
+ punpcklbw m0, m2, m6
+ punpcklbw m1, m3, m6
+ punpckhbw m2, m6
+ punpckhbw m3, m6
+%else
+ psubw m0, m2
+ psubw m1, m3
+ movu m2, [bufq+mmsize*2]
+ movu m3, [bufq+mmsize*3]
+ movu m4, [refq+mmsize*2]
+ movu m5, [refq+mmsize*3]
+ psubw m2, m4
+ psubw m3, m5
+ add bufq, mmsize*4
+ add refq, mmsize*4
+%endif
+ pmaddwd m0, m0
+ pmaddwd m1, m1
+ pmaddwd m2, m2
+ pmaddwd m3, m3
+ paddd m0, m1
+ paddd m2, m3
+%if %1 == 8
+ paddd m7, m0
+ paddd m7, m2
+%else
+ paddd m0, m2
+ punpckldq m2, m0, m6
+ punpckhdq m0, m6
+ paddq m7, m0
+ paddq m7, m2
+%endif
+ sub wd, mmsize*2
+ jge .loop
+
+.end:
+ add wd, mmsize*2
+ movhlps m0, m7
+%if %1 == 8
+ paddd m7, m0
+ pshufd m0, m7, 1
+ paddd m7, m0
+ movd eax, m7
+%else
+ paddq m7, m0
+%if ARCH_X86_32
+ movd eax, m7
+ psrldq m7, 4
+ movd edx, m7
+%else
+ movq rax, m7
+%endif
+%endif
+
+ ; deal with cases where w % 32 != 0
+ test wd, wd
+ jz .end_scalar
+.loop_scalar:
+ movzx px1d, %2 [bufq+wq*(%1/8)-(%1/8)]
+ movzx px2d, %2 [refq+wq*(%1/8)-(%1/8)]
+ sub px1d, px2d
+ imul px1d, px1d
+%if %1 == 8
+ add eax, px1d
+%elif ARCH_X86_64
+ add rax, px1q
+%else
+ add eax, px1d
+ adc edx, 0
+%endif
+ dec wd
+ jg .loop_scalar
+
+.end_scalar:
+ ; for %1=8, no need to zero edx on x86-32, since edx=wd, which is zero
+ RET
+%endmacro
+
+INIT_XMM sse2
+SSE_LINE_FN 8, byte
+SSE_LINE_FN 16, word
diff --git a/libavfilter/x86/vf_psnr_init.c b/libavfilter/x86/vf_psnr_init.c
new file mode 100644
index 0000000000..c387812204
--- /dev/null
+++ b/libavfilter/x86/vf_psnr_init.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2015 Ronald S. Bultje <rsbultje@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/x86/cpu.h"
+
+#include "libavfilter/psnr.h"
+
+uint64_t ff_sse_line_8bit_sse2(const uint8_t *buf, const uint8_t *ref, int w);
+uint64_t ff_sse_line_16bit_sse2(const uint8_t *buf, const uint8_t *ref, int w);
+
+void ff_psnr_init_x86(PSNRDSPContext *dsp, int bpp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ if (bpp <= 8) {
+ dsp->sse_line = ff_sse_line_8bit_sse2;
+ } else if (bpp <= 15) {
+ dsp->sse_line = ff_sse_line_16bit_sse2;
+ }
+ }
+}
diff --git a/libavfilter/x86/vf_pullup.asm b/libavfilter/x86/vf_pullup.asm
new file mode 100644
index 0000000000..26c2a27d37
--- /dev/null
+++ b/libavfilter/x86/vf_pullup.asm
@@ -0,0 +1,178 @@
+;*****************************************************************************
+;* x86-optimized functions for pullup filter
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License along
+;* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+;* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION .text
+
+INIT_MMX mmx
+cglobal pullup_filter_diff, 3, 5, 8, first, second, size
+ mov r3, 4
+ pxor m4, m4
+ pxor m7, m7
+
+.loop:
+ movq m0, [firstq]
+ movq m2, [firstq]
+ add firstq, sizeq
+ movq m1, [secondq]
+ add secondq, sizeq
+ psubusb m2, m1
+ psubusb m1, m0
+ movq m0, m2
+ movq m3, m1
+ punpcklbw m0, m7
+ punpcklbw m1, m7
+ punpckhbw m2, m7
+ punpckhbw m3, m7
+ paddw m4, m0
+ paddw m4, m1
+ paddw m4, m2
+ paddw m4, m3
+
+ dec r3
+ jnz .loop
+
+ movq m3, m4
+ punpcklwd m4, m7
+ punpckhwd m3, m7
+ paddd m3, m4
+ movd eax, m3
+ psrlq m3, 32
+ movd r4d, m3
+ add eax, r4d
+ RET
+
+INIT_MMX mmx
+cglobal pullup_filter_comb, 3, 5, 8, first, second, size
+ mov r3, 4
+ pxor m6, m6
+ pxor m7, m7
+ sub secondq, sizeq
+
+.loop:
+ movq m0, [firstq]
+ movq m1, [secondq]
+ punpcklbw m0, m7
+ movq m2, [secondq+sizeq]
+ punpcklbw m1, m7
+ punpcklbw m2, m7
+ paddw m0, m0
+ paddw m1, m2
+ movq m2, m0
+ psubusw m0, m1
+ psubusw m1, m2
+ paddw m6, m0
+ paddw m6, m1
+
+ movq m0, [firstq]
+ movq m1, [secondq]
+ punpckhbw m0, m7
+ movq m2, [secondq+sizeq]
+ punpckhbw m1, m7
+ punpckhbw m2, m7
+ paddw m0, m0
+ paddw m1, m2
+ movq m2, m0
+ psubusw m0, m1
+ psubusw m1, m2
+ paddw m6, m0
+ paddw m6, m1
+
+ movq m0, [secondq+sizeq]
+ movq m1, [firstq]
+ punpcklbw m0, m7
+ movq m2, [firstq+sizeq]
+ punpcklbw m1, m7
+ punpcklbw m2, m7
+ paddw m0, m0
+ paddw m1, m2
+ movq m2, m0
+ psubusw m0, m1
+ psubusw m1, m2
+ paddw m6, m0
+ paddw m6, m1
+
+ movq m0, [secondq+sizeq]
+ movq m1, [firstq]
+ punpckhbw m0, m7
+ movq m2, [firstq+sizeq]
+ punpckhbw m1, m7
+ punpckhbw m2, m7
+ paddw m0, m0
+ paddw m1, m2
+ movq m2, m0
+ psubusw m0, m1
+ psubusw m1, m2
+ paddw m6, m0
+ paddw m6, m1
+
+ add firstq, sizeq
+ add secondq, sizeq
+ dec r3
+ jnz .loop
+
+ movq m5, m6
+ punpcklwd m6, m7
+ punpckhwd m5, m7
+ paddd m5, m6
+ movd eax, m5
+ psrlq m5, 32
+ movd r4d, m5
+ add eax, r4d
+ RET
+
+INIT_MMX mmx
+cglobal pullup_filter_var, 3, 5, 8, first, second, size
+ mov r3, 3
+ pxor m4, m4
+ pxor m7, m7
+
+.loop:
+ movq m0, [firstq]
+ movq m2, [firstq]
+ movq m1, [firstq+sizeq]
+ add firstq, sizeq
+ psubusb m2, m1
+ psubusb m1, m0
+ movq m0, m2
+ movq m3, m1
+ punpcklbw m0, m7
+ punpcklbw m1, m7
+ punpckhbw m2, m7
+ punpckhbw m3, m7
+ paddw m4, m0
+ paddw m4, m1
+ paddw m4, m2
+ paddw m4, m3
+
+ dec r3
+ jnz .loop
+
+ movq m3, m4
+ punpcklwd m4, m7
+ punpckhwd m3, m7
+ paddd m3, m4
+ movd eax, m3
+ psrlq m3, 32
+ movd r4d, m3
+ add eax, r4d
+ shl eax, 2
+ RET
diff --git a/libavfilter/x86/vf_pullup_init.c b/libavfilter/x86/vf_pullup_init.c
new file mode 100644
index 0000000000..5b36b68e51
--- /dev/null
+++ b/libavfilter/x86/vf_pullup_init.c
@@ -0,0 +1,41 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/mem.h"
+#include "libavutil/x86/asm.h"
+#include "libavutil/x86/cpu.h"
+#include "libavfilter/vf_pullup.h"
+
+int ff_pullup_filter_diff_mmx(const uint8_t *a, const uint8_t *b, ptrdiff_t s);
+int ff_pullup_filter_comb_mmx(const uint8_t *a, const uint8_t *b, ptrdiff_t s);
+int ff_pullup_filter_var_mmx (const uint8_t *a, const uint8_t *b, ptrdiff_t s);
+
+av_cold void ff_pullup_init_x86(PullupContext *s)
+{
+#if HAVE_YASM
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_MMX(cpu_flags)) {
+ s->diff = ff_pullup_filter_diff_mmx;
+ s->comb = ff_pullup_filter_comb_mmx;
+ s->var = ff_pullup_filter_var_mmx;
+ }
+#endif
+}
diff --git a/libavfilter/x86/vf_removegrain.asm b/libavfilter/x86/vf_removegrain.asm
new file mode 100644
index 0000000000..d049bf257d
--- /dev/null
+++ b/libavfilter/x86/vf_removegrain.asm
@@ -0,0 +1,1218 @@
+;*****************************************************************************
+;* x86-optimized functions for removegrain filter
+;*
+;* Copyright (C) 2015 James Darnley
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License along
+;* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+;* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+;*****************************************************************************
+
+; column: -1 0 +1
+; row -1: a1 a2 a3
+; row 0: a4 c a5
+; row +1: a6 a7 a8
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA 32
+
+pw_4: times 16 dw 4
+pw_8: times 16 dw 8
+pw_div9: times 16 dw ((1<<16)+4)/9
+
+SECTION .text
+
+;*** Preprocessor helpers
+
+%define a1 srcq+stride_n-1
+%define a2 srcq+stride_n
+%define a3 srcq+stride_n+1
+%define a4 srcq-1
+%define c srcq
+%define a5 srcq+1
+%define a6 srcq+stride_p-1
+%define a7 srcq+stride_p
+%define a8 srcq+stride_p+1
+
+; %1 dest simd register
+; %2 source memory location
+; %3 zero location (simd register/memory)
+%macro LOAD 3
+ movh %1, %2
+ punpcklbw %1, %3
+%endmacro
+
+%macro LOAD_SQUARE 0
+ movu m1, [a1]
+ movu m2, [a2]
+ movu m3, [a3]
+ movu m4, [a4]
+ movu m0, [c]
+ movu m5, [a5]
+ movu m6, [a6]
+ movu m7, [a7]
+ movu m8, [a8]
+%endmacro
+
+; %1 zero location (simd register/memory)
+%macro LOAD_SQUARE_16 1
+ LOAD m1, [a1], %1
+ LOAD m2, [a2], %1
+ LOAD m3, [a3], %1
+ LOAD m4, [a4], %1
+ LOAD m0, [c], %1
+ LOAD m5, [a5], %1
+ LOAD m6, [a6], %1
+ LOAD m7, [a7], %1
+ LOAD m8, [a8], %1
+%endmacro
+
+; %1 data type
+; %2 simd register to hold maximums
+; %3 simd register to hold minimums
+; %4 temp location (simd register/memory)
+%macro SORT_PAIR 4
+ mova %4, %2
+ pmin%1 %2, %3
+ pmax%1 %3, %4
+%endmacro
+
+%macro SORT_AXIS 0
+ SORT_PAIR ub, m1, m8, m9
+ SORT_PAIR ub, m2, m7, m10
+ SORT_PAIR ub, m3, m6, m11
+ SORT_PAIR ub, m4, m5, m12
+%endmacro
+
+
+%macro SORT_AXIS_16 0
+ SORT_PAIR sw, m1, m8, m9
+ SORT_PAIR sw, m2, m7, m10
+ SORT_PAIR sw, m3, m6, m11
+ SORT_PAIR sw, m4, m5, m12
+%endmacro
+
+; The loop doesn't need to do all the iterations. It could stop when the right
+; pixels are in the right registers.
+%macro SORT_SQUARE 0
+ %assign k 7
+ %rep 7
+ %assign i 1
+ %assign j 2
+ %rep k
+ SORT_PAIR ub, m %+ i , m %+ j , m9
+ %assign i i+1
+ %assign j j+1
+ %endrep
+ %assign k k-1
+ %endrep
+%endmacro
+
+; %1 dest simd register
+; %2 source (simd register/memory)
+; %3 temp simd register
+%macro ABS_DIFF 3
+ mova %3, %2
+ psubusb %3, %1
+ psubusb %1, %2
+ por %1, %3
+%endmacro
+
+; %1 dest simd register
+; %2 source (simd register/memory)
+; %3 temp simd register
+%macro ABS_DIFF_W 3
+ mova %3, %2
+ psubusw %3, %1
+ psubusw %1, %2
+ por %1, %3
+%endmacro
+
+; %1 simd register that holds the "false" values and will hold the result
+; %2 simd register that holds the "true" values
+; %3 location (simd register/memory) that hold the mask
+%macro BLEND 3
+%if cpuflag(avx2)
+ vpblendvb %1, %1, %2, %3
+%else
+ pand %2, %3
+ pandn %3, %1
+ por %3, %2
+ SWAP %1, %3
+%endif
+%endmacro
+
+; Functions
+
+INIT_XMM sse2
+cglobal rg_fl_mode_1, 4, 5, 3, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ .loop:
+ movu m0, [a1]
+ mova m1, m0
+
+ movu m2, [a2]
+ pmaxub m0, m2
+ pminub m1, m2
+
+ movu m2, [a3]
+ pmaxub m0, m2
+ pminub m1, m2
+
+ movu m2, [a4]
+ pmaxub m0, m2
+ pminub m1, m2
+
+ movu m2, [a5]
+ pmaxub m0, m2
+ pminub m1, m2
+
+ movu m2, [a6]
+ pmaxub m0, m2
+ pminub m1, m2
+
+ movu m2, [a7]
+ pmaxub m0, m2
+ pminub m1, m2
+
+ movu m2, [a8]
+ pmaxub m0, m2
+ pminub m1, m2
+
+ movu m2, [c]
+ pminub m2, m0
+ pmaxub m2, m1
+
+ movu [dstq], m2
+ add srcq, mmsize
+ add dstq, mmsize
+ sub pixelsd, mmsize
+ jg .loop
+RET
+
+%if ARCH_X86_64
+cglobal rg_fl_mode_2, 4, 5, 10, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ .loop:
+ LOAD_SQUARE
+ SORT_SQUARE
+
+ CLIPUB m0, m2, m7
+
+ movu [dstq], m0
+ add srcq, mmsize
+ add dstq, mmsize
+ sub pixelsd, mmsize
+ jg .loop
+RET
+
+cglobal rg_fl_mode_3, 4, 5, 10, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ .loop:
+ LOAD_SQUARE
+ SORT_SQUARE
+
+ CLIPUB m0, m3, m6
+
+ movu [dstq], m0
+ add srcq, mmsize
+ add dstq, mmsize
+ sub pixelsd, mmsize
+ jg .loop
+RET
+
+cglobal rg_fl_mode_4, 4, 5, 10, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ .loop:
+ LOAD_SQUARE
+ SORT_SQUARE
+
+ CLIPUB m0, m4, m5
+
+ movu [dstq], m0
+ add srcq, mmsize
+ add dstq, mmsize
+ sub pixelsd, mmsize
+ jg .loop
+RET
+
+cglobal rg_fl_mode_5, 4, 5, 13, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ .loop:
+ LOAD_SQUARE
+ SORT_AXIS
+
+ mova m9, m0
+ mova m10, m0
+ mova m11, m0
+ mova m12, m0
+
+ CLIPUB m9, m1, m8
+ CLIPUB m10, m2, m7
+ CLIPUB m11, m3, m6
+ CLIPUB m12, m4, m5
+
+ mova m8, m9 ; clip1
+ mova m7, m10 ; clip2
+ mova m6, m11 ; clip3
+ mova m5, m12 ; clip4
+
+ ABS_DIFF m9, m0, m1 ; c1
+ ABS_DIFF m10, m0, m2 ; c2
+ ABS_DIFF m11, m0, m3 ; c3
+ ABS_DIFF m12, m0, m4 ; c4
+
+ pminub m9, m10
+ pminub m9, m11
+ pminub m9, m12 ; mindiff
+
+ pcmpeqb m10, m9
+ pcmpeqb m11, m9
+ pcmpeqb m12, m9
+
+ ; Notice the order here: c1, c3, c2, c4
+ BLEND m8, m6, m11
+ BLEND m8, m7, m10
+ BLEND m8, m5, m12
+
+ movu [dstq], m8
+ add srcq, mmsize
+ add dstq, mmsize
+ sub pixelsd, mmsize
+ jg .loop
+RET
+
+cglobal rg_fl_mode_6, 4, 5, 16, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ ; Some register saving suggestions: the zero can be somewhere other than a
+ ; register, the center pixels could be on the stack.
+
+ pxor m15, m15
+ .loop:
+ LOAD_SQUARE_16 m15
+ SORT_AXIS_16
+
+ mova m9, m0
+ mova m10, m0
+ mova m11, m0
+ mova m12, m0
+ CLIPW m9, m1, m8 ; clip1
+ CLIPW m10, m2, m7 ; clip2
+ CLIPW m11, m3, m6 ; clip3
+ CLIPW m12, m4, m5 ; clip4
+
+ psubw m8, m1 ; d1
+ psubw m7, m2 ; d2
+ psubw m6, m3 ; d3
+ psubw m5, m4 ; d4
+
+ mova m1, m9
+ mova m2, m10
+ mova m3, m11
+ mova m4, m12
+ ABS_DIFF_W m1, m0, m13
+ ABS_DIFF_W m2, m0, m14
+ ABS_DIFF_W m3, m0, m13
+ ABS_DIFF_W m4, m0, m14
+ psllw m1, 1
+ psllw m2, 1
+ psllw m3, 1
+ psllw m4, 1
+ paddw m1, m8 ; c1
+ paddw m2, m7 ; c2
+ paddw m3, m6 ; c3
+ paddw m4, m5 ; c4
+ ; As the differences (d1..d4) can only be positive, there is no need to
+ ; clip to zero. Also, the maximum positive value is less than 768.
+
+ pminsw m1, m2
+ pminsw m1, m3
+ pminsw m1, m4
+
+ pcmpeqw m2, m1
+ pcmpeqw m3, m1
+ pcmpeqw m4, m1
+
+ BLEND m9, m11, m3
+ BLEND m9, m10, m2
+ BLEND m9, m12, m4
+ packuswb m9, m9
+
+ movh [dstq], m9
+ add srcq, mmsize/2
+ add dstq, mmsize/2
+ sub pixelsd, mmsize/2
+ jg .loop
+RET
+
+; This is just copy-pasted straight from mode 6 with the left shifts removed.
+cglobal rg_fl_mode_7, 4, 5, 16, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ ; Can this be done without unpacking?
+
+ pxor m15, m15
+ .loop:
+ LOAD_SQUARE_16 m15
+ SORT_AXIS_16
+
+ mova m9, m0
+ mova m10, m0
+ mova m11, m0
+ mova m12, m0
+ CLIPW m9, m1, m8 ; clip1
+ CLIPW m10, m2, m7 ; clip2
+ CLIPW m11, m3, m6 ; clip3
+ CLIPW m12, m4, m5 ; clip4
+
+ psubw m8, m1 ; d1
+ psubw m7, m2 ; d2
+ psubw m6, m3 ; d3
+ psubw m5, m4 ; d4
+
+ mova m1, m9
+ mova m2, m10
+ mova m3, m11
+ mova m4, m12
+ ABS_DIFF_W m1, m0, m13
+ ABS_DIFF_W m2, m0, m14
+ ABS_DIFF_W m3, m0, m13
+ ABS_DIFF_W m4, m0, m14
+ paddw m1, m8 ; c1
+ paddw m2, m7 ; c2
+ paddw m3, m6 ; c3
+ paddw m4, m5 ; c4
+
+ pminsw m1, m2
+ pminsw m1, m3
+ pminsw m1, m4
+
+ pcmpeqw m2, m1
+ pcmpeqw m3, m1
+ pcmpeqw m4, m1
+
+ BLEND m9, m11, m3
+ BLEND m9, m10, m2
+ BLEND m9, m12, m4
+ packuswb m9, m9
+
+ movh [dstq], m9
+ add srcq, mmsize/2
+ add dstq, mmsize/2
+ sub pixelsd, mmsize/2
+ jg .loop
+RET
+
+; This is just copy-pasted straight from mode 6 with a few changes.
+cglobal rg_fl_mode_8, 4, 5, 16, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ pxor m15, m15
+ .loop:
+ LOAD_SQUARE_16 m15
+ SORT_AXIS_16
+
+ mova m9, m0
+ mova m10, m0
+ mova m11, m0
+ mova m12, m0
+ CLIPW m9, m1, m8 ; clip1
+ CLIPW m10, m2, m7 ; clip2
+ CLIPW m11, m3, m6 ; clip3
+ CLIPW m12, m4, m5 ; clip4
+
+ psubw m8, m1 ; d1
+ psubw m7, m2 ; d2
+ psubw m6, m3 ; d3
+ psubw m5, m4 ; d4
+ psllw m8, 1
+ psllw m7, 1
+ psllw m6, 1
+ psllw m5, 1
+
+ mova m1, m9
+ mova m2, m10
+ mova m3, m11
+ mova m4, m12
+ ABS_DIFF_W m1, m0, m13
+ ABS_DIFF_W m2, m0, m14
+ ABS_DIFF_W m3, m0, m13
+ ABS_DIFF_W m4, m0, m14
+ paddw m1, m8 ; c1
+ paddw m2, m7 ; c1
+ paddw m3, m6 ; c1
+ paddw m4, m5 ; c1
+ ; As the differences (d1..d4) can only be positive, there is no need to
+ ; clip to zero. Also, the maximum positive value is less than 768.
+
+ pminsw m1, m2
+ pminsw m1, m3
+ pminsw m1, m4
+
+ pcmpeqw m2, m1
+ pcmpeqw m3, m1
+ pcmpeqw m4, m1
+
+ BLEND m9, m11, m3
+ BLEND m9, m10, m2
+ BLEND m9, m12, m4
+ packuswb m9, m9
+
+ movh [dstq], m9
+ add srcq, mmsize/2
+ add dstq, mmsize/2
+ sub pixelsd, mmsize/2
+ jg .loop
+RET
+
+cglobal rg_fl_mode_9, 4, 5, 13, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ .loop:
+ LOAD_SQUARE
+ SORT_AXIS
+
+ mova m9, m0
+ mova m10, m0
+ mova m11, m0
+ mova m12, m0
+ CLIPUB m9, m1, m8 ; clip1
+ CLIPUB m10, m2, m7 ; clip2
+ CLIPUB m11, m3, m6 ; clip3
+ CLIPUB m12, m4, m5 ; clip4
+
+ psubb m8, m1 ; d1
+ psubb m7, m2 ; d2
+ psubb m6, m3 ; d3
+ psubb m5, m4 ; d4
+
+ pminub m8, m7
+ pminub m8, m6
+ pminub m8, m5
+
+ pcmpeqb m7, m8
+ pcmpeqb m6, m8
+ pcmpeqb m5, m8
+
+ BLEND m9, m11, m6
+ BLEND m9, m10, m7
+ BLEND m9, m12, m5
+
+ movu [dstq], m9
+ add srcq, mmsize
+ add dstq, mmsize
+ sub pixelsd, mmsize
+ jg .loop
+RET
+%endif
+
+cglobal rg_fl_mode_10, 4, 5, 8, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ .loop:
+ movu m0, [c]
+
+ movu m1, [a4]
+ mova m2, m1
+ ABS_DIFF m1, m0, m7
+
+ movu m3, [a5] ; load pixel
+ mova m4, m3
+ ABS_DIFF m4, m0, m7 ; absolute difference from center
+ pminub m1, m4 ; mindiff
+ pcmpeqb m4, m1 ; if (difference == mindiff)
+ BLEND m2, m3, m4 ; return pixel
+
+ movu m5, [a1]
+ mova m6, m5
+ ABS_DIFF m6, m0, m7
+ pminub m1, m6
+ pcmpeqb m6, m1
+ BLEND m2, m5, m6
+
+ movu m3, [a3]
+ mova m4, m3
+ ABS_DIFF m4, m0, m7
+ pminub m1, m4
+ pcmpeqb m4, m1
+ BLEND m2, m3, m4
+
+ movu m5, [a2]
+ mova m6, m5
+ ABS_DIFF m6, m0, m7
+ pminub m1, m6
+ pcmpeqb m6, m1
+ BLEND m2, m5, m6
+
+ movu m3, [a6]
+ mova m4, m3
+ ABS_DIFF m4, m0, m7
+ pminub m1, m4
+ pcmpeqb m4, m1
+ BLEND m2, m3, m4
+
+ movu m5, [a8]
+ mova m6, m5
+ ABS_DIFF m6, m0, m7
+ pminub m1, m6
+ pcmpeqb m6, m1
+ BLEND m2, m5, m6
+
+ movu m3, [a7]
+ mova m4, m3
+ ABS_DIFF m4, m0, m7
+ pminub m1, m4
+ pcmpeqb m4, m1
+ BLEND m2, m3, m4
+
+ movu [dstq], m2
+ add srcq, mmsize
+ add dstq, mmsize
+ sub pixelsd, mmsize
+ jg .loop
+RET
+
+cglobal rg_fl_mode_11_12, 4, 5, 7, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ pxor m0, m0
+ .loop:
+ LOAD m1, [c], m0
+ LOAD m2, [a2], m0
+ LOAD m3, [a4], m0
+ LOAD m4, [a5], m0
+ LOAD m5, [a7], m0
+
+ psllw m1, 2
+ paddw m2, m3
+ paddw m4, m5
+ paddw m2, m4
+ psllw m2, 1
+
+ LOAD m3, [a1], m0
+ LOAD m4, [a3], m0
+ LOAD m5, [a6], m0
+ LOAD m6, [a8], m0
+ paddw m1, m2
+ paddw m3, m4
+ paddw m5, m6
+ paddw m1, m3
+ paddw m1, m5
+
+ paddw m1, [pw_8]
+ psraw m1, 4
+
+ packuswb m1, m1
+
+ movh [dstq], m1
+ add srcq, mmsize/2
+ add dstq, mmsize/2
+ sub pixelsd, mmsize/2
+ jg .loop
+RET
+
+cglobal rg_fl_mode_13_14, 4, 5, 8, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ .loop:
+ movu m1, [a1]
+ movu m2, [a8]
+ mova m0, m1
+ pavgb m1, m2
+ ABS_DIFF m0, m2, m6
+
+ movu m3, [a3]
+ movu m4, [a6]
+ mova m5, m3
+ pavgb m3, m4
+ ABS_DIFF m5, m4, m7
+ pminub m0, m5
+ pcmpeqb m5, m0
+ BLEND m1, m3, m5
+
+ movu m2, [a2]
+ movu m3, [a7]
+ mova m4, m2
+ pavgb m2, m3
+ ABS_DIFF m4, m3, m6
+ pminub m0, m4
+ pcmpeqb m4, m0
+ BLEND m1, m2, m4
+
+ movu [dstq], m1
+ add srcq, mmsize
+ add dstq, mmsize
+ sub pixelsd, mmsize
+ jg .loop
+RET
+
+%if ARCH_X86_64
+cglobal rg_fl_mode_15_16, 4, 5, 16, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ pxor m15, m15
+ .loop:
+ LOAD_SQUARE_16 m15
+
+ mova m9, m1
+ mova m10, m2
+ mova m11, m3
+ ABS_DIFF_W m9, m8, m12
+ ABS_DIFF_W m10, m7, m13
+ ABS_DIFF_W m11, m6, m14
+ pminsw m9, m10
+ pminsw m9, m11
+ pcmpeqw m10, m9
+ pcmpeqw m11, m9
+
+ mova m12, m2
+ mova m13, m1
+ mova m14, m6
+ paddw m12, m7
+ psllw m12, 1
+ paddw m13, m3
+ paddw m14, m8
+ paddw m12, [pw_4]
+ paddw m13, m14
+ paddw m12, m13
+ psrlw m12, 3
+
+ SORT_PAIR ub, m1, m8, m0
+ SORT_PAIR ub, m2, m7, m9
+ SORT_PAIR ub, m3, m6, m14
+ mova m4, m12
+ mova m5, m12
+ CLIPW m4, m1, m8
+ CLIPW m5, m2, m7
+ CLIPW m12, m3, m6
+
+ BLEND m4, m12, m11
+ BLEND m4, m5, m10
+ packuswb m4, m4
+
+ movh [dstq], m4
+ add srcq, mmsize/2
+ add dstq, mmsize/2
+ sub pixelsd, mmsize/2
+ jg .loop
+RET
+
+cglobal rg_fl_mode_17, 4, 5, 9, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ .loop:
+ LOAD_SQUARE
+ SORT_AXIS
+
+ pmaxub m1, m2
+ pmaxub m3, m4
+
+ pminub m8, m7
+ pminub m5, m6
+
+ pmaxub m1, m3
+ pminub m8, m5
+
+ mova m2, m1
+ pminub m1, m8
+ pmaxub m8, m2
+
+ CLIPUB m0, m1, m8
+
+ movu [dstq], m0
+ add srcq, mmsize
+ add dstq, mmsize
+ sub pixelsd, mmsize
+ jg .loop
+RET
+
+cglobal rg_fl_mode_18, 4, 5, 16, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ .loop:
+ LOAD_SQUARE
+
+ mova m9, m1
+ mova m10, m8
+ ABS_DIFF m9, m0, m11
+ ABS_DIFF m10, m0, m12
+ pmaxub m9, m10 ; m9 = d1
+
+ mova m10, m2
+ mova m11, m7
+ ABS_DIFF m10, m0, m12
+ ABS_DIFF m11, m0, m13
+ pmaxub m10, m11 ; m10 = d2
+
+ mova m11, m3
+ mova m12, m6
+ ABS_DIFF m11, m0, m13
+ ABS_DIFF m12, m0, m14
+ pmaxub m11, m12 ; m11 = d3
+
+ mova m12, m4
+ mova m13, m5
+ ABS_DIFF m12, m0, m14
+ ABS_DIFF m13, m0, m15
+ pmaxub m12, m13 ; m12 = d4
+
+ mova m13, m9
+ pminub m13, m10
+ pminub m13, m11
+ pminub m13, m12 ; m13 = mindiff
+
+ pcmpeqb m10, m13
+ pcmpeqb m11, m13
+ pcmpeqb m12, m13
+
+ mova m14, m1
+ pminub m1, m8
+ pmaxub m8, m14
+
+ mova m13, m0
+ mova m14, m1
+ pminub m1, m8
+ pmaxub m8, m14
+ CLIPUB m13, m1, m8 ; m13 = ret...d1
+
+ mova m14, m0
+ mova m15, m3
+ pminub m3, m6
+ pmaxub m6, m15
+ CLIPUB m14, m3, m6
+ pand m14, m11
+ pandn m11, m13
+ por m14, m11 ; m14 = ret...d3
+
+ mova m15, m0
+ mova m1, m2
+ pminub m2, m7
+ pmaxub m7, m1
+ CLIPUB m15, m2, m7
+ pand m15, m10
+ pandn m10, m14
+ por m15, m10 ; m15 = ret...d2
+
+ mova m1, m0
+ mova m2, m4
+ pminub m4, m5
+ pmaxub m5, m2
+ CLIPUB m1, m4, m5
+ pand m1, m12
+ pandn m12, m15
+ por m1, m12 ; m15 = ret...d4
+
+ movu [dstq], m1
+ add srcq, mmsize
+ add dstq, mmsize
+ sub pixelsd, mmsize
+ jg .loop
+RET
+%endif
+
+cglobal rg_fl_mode_19, 4, 5, 7, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ pxor m0, m0
+ .loop:
+ LOAD m1, [a1], m0
+ LOAD m2, [a2], m0
+ paddw m1, m2
+
+ LOAD m3, [a3], m0
+ LOAD m4, [a4], m0
+ paddw m3, m4
+
+ LOAD m5, [a5], m0
+ LOAD m6, [a6], m0
+ paddw m5, m6
+
+ LOAD m2, [a7], m0
+ LOAD m4, [a8], m0
+ paddw m2, m4
+
+ paddw m1, m3
+ paddw m2, m5
+ paddw m1, m2
+
+ paddw m1, [pw_4]
+ psraw m1, 3
+
+ packuswb m1, m1
+
+ movh [dstq], m1
+ add srcq, mmsize/2
+ add dstq, mmsize/2
+ sub pixelsd, mmsize/2
+ jg .loop
+RET
+
+cglobal rg_fl_mode_20, 4, 5, 7, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ pxor m0, m0
+ .loop:
+ LOAD m1, [a1], m0
+ LOAD m2, [a2], m0
+ paddw m1, m2
+
+ LOAD m3, [a3], m0
+ LOAD m4, [a4], m0
+ paddw m3, m4
+
+ LOAD m5, [a5], m0
+ LOAD m6, [a6], m0
+ paddw m5, m6
+
+ LOAD m2, [a7], m0
+ LOAD m4, [a8], m0
+ paddw m2, m4
+
+ LOAD m6, [c], m0
+ paddw m1, m3
+ paddw m2, m5
+ paddw m6, [pw_4]
+
+ paddw m1, m2
+ paddw m1, m6
+
+ pmulhuw m1, [pw_div9]
+
+ packuswb m1, m1
+
+ movh [dstq], m1
+ add srcq, mmsize/2
+ add dstq, mmsize/2
+ sub pixelsd, mmsize/2
+ jg .loop
+RET
+
+cglobal rg_fl_mode_21, 4, 5, 8, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ pxor m0, m0
+ .loop:
+ movu m1, [a1]
+ movu m2, [a8]
+ pavgb m7, m1, m2
+ punpckhbw m3, m1, m0
+ punpcklbw m1, m0
+ punpckhbw m4, m2, m0
+ punpcklbw m2, m0
+ paddw m3, m4
+ paddw m1, m2
+ psrlw m3, 1
+ psrlw m1, 1
+ packuswb m1, m3
+
+ movu m2, [a2]
+ movu m3, [a7]
+ pavgb m6, m2, m3
+ punpckhbw m4, m2, m0
+ punpcklbw m2, m0
+ punpckhbw m5, m3, m0
+ punpcklbw m3, m0
+ paddw m4, m5
+ paddw m2, m3
+ psrlw m4, 1
+ psrlw m2, 1
+ packuswb m2, m4
+
+ pminub m1, m2
+ pmaxub m7, m6
+
+ movu m2, [a3]
+ movu m3, [a6]
+ pavgb m6, m2, m3
+ punpckhbw m4, m2, m0
+ punpcklbw m2, m0
+ punpckhbw m5, m3, m0
+ punpcklbw m3, m0
+ paddw m4, m5
+ paddw m2, m3
+ psrlw m4, 1
+ psrlw m2, 1
+ packuswb m2, m4
+
+ pminub m1, m2
+ pmaxub m7, m6
+
+ movu m2, [a4]
+ movu m3, [a5]
+ pavgb m6, m2, m3
+ punpckhbw m4, m2, m0
+ punpcklbw m2, m0
+ punpckhbw m5, m3, m0
+ punpcklbw m3, m0
+ paddw m4, m5
+ paddw m2, m3
+ psrlw m4, 1
+ psrlw m2, 1
+ packuswb m2, m4
+
+ pminub m1, m2
+ pmaxub m7, m6
+
+ movu m3, [c]
+ CLIPUB m3, m1, m7
+
+ movu [dstq], m3
+ add srcq, mmsize
+ add dstq, mmsize
+ sub pixelsd, mmsize
+ jg .loop
+RET
+
+cglobal rg_fl_mode_22, 4, 5, 8, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ .loop:
+ movu m0, [a1]
+ movu m1, [a8]
+ pavgb m0, m1
+ movu m2, [a2]
+ movu m3, [a7]
+ pavgb m2, m3
+ movu m4, [a3]
+ movu m5, [a6]
+ pavgb m4, m5
+ movu m6, [a4]
+ movu m7, [a5]
+ pavgb m6, m7
+
+ mova m1, m0
+ mova m3, m2
+ mova m5, m4
+ mova m7, m6
+ pminub m0, m2
+ pminub m4, m6
+ pmaxub m1, m3
+ pmaxub m5, m7
+ pminub m0, m4
+ pmaxub m1, m5
+
+ movu m2, [c]
+ CLIPUB m2, m0, m1
+
+ movu [dstq], m2
+ add srcq, mmsize
+ add dstq, mmsize
+ sub pixelsd, mmsize
+ jg .loop
+RET
+
+%if ARCH_X86_64
+cglobal rg_fl_mode_23, 4, 5, 16, 0, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ pxor m15, m15
+ .loop:
+ LOAD_SQUARE_16 m15
+ SORT_AXIS_16
+
+ mova m9, m8
+ mova m10, m7
+ mova m11, m6
+ mova m12, m5
+ psubw m9, m1 ; linediff1
+ psubw m10, m2 ; linediff2
+ psubw m11, m3 ; linediff3
+ psubw m12, m4 ; linediff4
+
+ psubw m1, m0
+ psubw m2, m0
+ psubw m3, m0
+ psubw m4, m0
+ pminsw m1, m9 ; d1
+ pminsw m2, m10 ; d2
+ pminsw m3, m11 ; d3
+ pminsw m4, m12 ; d4
+ pmaxsw m1, m2
+ pmaxsw m3, m4
+ pmaxsw m1, m3
+ pmaxsw m1, m15 ; d
+
+ mova m13, m0
+ mova m14, m0
+ mova m2, m0
+ mova m4, m0
+ psubw m13, m8
+ psubw m14, m7
+ psubw m2, m6
+ psubw m4, m5
+ pminsw m9, m13 ; u1
+ pminsw m10, m14 ; u2
+ pminsw m11, m2 ; u3
+ pminsw m12, m4 ; u4
+ pmaxsw m9, m10
+ pmaxsw m11, m12
+ pmaxsw m9, m11
+ pmaxsw m9, m15 ; u
+
+ paddw m0, m1
+ psubw m0, m9
+ packuswb m0, m0
+
+ movh [dstq], m0
+ add srcq, mmsize/2
+ add dstq, mmsize/2
+ sub pixelsd, mmsize/2
+ jg .loop
+RET
+
+cglobal rg_fl_mode_24, 4, 5, 16, mmsize, dst, src, stride, pixels
+ mov r4q, strideq
+ neg r4q
+ %define stride_p strideq
+ %define stride_n r4q
+
+ pxor m15, m15
+ .loop:
+ LOAD_SQUARE_16 m15
+ mova [rsp], m0
+ SORT_AXIS_16
+
+ mova m9, m8
+ mova m10, m7
+ mova m11, m6
+ mova m12, m5
+ psubw m9, m1 ; linediff1
+ psubw m10, m2 ; linediff2
+ psubw m11, m3 ; linediff3
+ psubw m12, m4 ; linediff4
+
+ psubw m1, [rsp] ; td1
+ psubw m2, [rsp] ; td2
+ psubw m3, [rsp] ; td3
+ psubw m4, [rsp] ; td4
+ mova m0, m9
+ mova m13, m10
+ mova m14, m11
+ mova m15, m12
+ psubw m0, m1
+ psubw m13, m2
+ psubw m14, m3
+ psubw m15, m4
+ pminsw m1, m0 ; d1
+ pminsw m2, m13 ; d2
+ pminsw m3, m14 ; d3
+ pminsw m4, m15 ; d4
+ pmaxsw m1, m2
+ pmaxsw m3, m4
+
+ mova m0, [rsp]
+ mova m13, [rsp]
+ mova m14, [rsp]
+ mova m15, [rsp]
+ psubw m0, m8 ; tu1
+ psubw m13, m7 ; tu2
+ psubw m14, m6 ; tu3
+ psubw m15, m5 ; tu4
+ psubw m9, m0
+ psubw m10, m13
+ psubw m11, m14
+ psubw m12, m15
+ pminsw m9, m0 ; u1
+ pminsw m10, m13 ; u2
+ pminsw m11, m14 ; u3
+ pminsw m12, m15 ; u4
+ pmaxsw m9, m10
+ pmaxsw m11, m12
+
+ pmaxsw m1, m3 ; d without max(d,0)
+ pmaxsw m9, m11 ; u without max(u,0)
+ pxor m15, m15
+ pmaxsw m1, m15
+ pmaxsw m9, m15
+
+ mova m0, [rsp]
+ paddw m0, m1
+ psubw m0, m9
+ packuswb m0, m0
+
+ movh [dstq], m0
+ add srcq, mmsize/2
+ add dstq, mmsize/2
+ sub pixelsd, mmsize/2
+ jg .loop
+RET
+%endif
diff --git a/libavfilter/x86/vf_removegrain_init.c b/libavfilter/x86/vf_removegrain_init.c
new file mode 100644
index 0000000000..07314b3244
--- /dev/null
+++ b/libavfilter/x86/vf_removegrain_init.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2015 James Darnley
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/x86/cpu.h"
+#include "libavfilter/removegrain.h"
+
+void ff_rg_fl_mode_1_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_10_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_11_12_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_13_14_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_19_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_20_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_21_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_22_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+#if ARCH_X86_64
+void ff_rg_fl_mode_2_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_3_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_4_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_5_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_6_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_7_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_8_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_9_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_15_16_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_17_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_18_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_23_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+void ff_rg_fl_mode_24_sse2(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels);
+#endif
+
+av_cold void ff_removegrain_init_x86(RemoveGrainContext *rg)
+{
+#if CONFIG_GPL
+ int cpu_flags = av_get_cpu_flags();
+ int i;
+
+ for (i = 0; i < rg->nb_planes; i++) {
+ if (EXTERNAL_SSE2(cpu_flags))
+ switch (rg->mode[i]) {
+ case 1: rg->fl[i] = ff_rg_fl_mode_1_sse2; break;
+ case 10: rg->fl[i] = ff_rg_fl_mode_10_sse2; break;
+ case 11: /* fall through */
+ case 12: rg->fl[i] = ff_rg_fl_mode_11_12_sse2; break;
+ case 13: /* fall through */
+ case 14: rg->fl[i] = ff_rg_fl_mode_13_14_sse2; break;
+ case 19: rg->fl[i] = ff_rg_fl_mode_19_sse2; break;
+ case 20: rg->fl[i] = ff_rg_fl_mode_20_sse2; break;
+ case 21: rg->fl[i] = ff_rg_fl_mode_21_sse2; break;
+ case 22: rg->fl[i] = ff_rg_fl_mode_22_sse2; break;
+#if ARCH_X86_64
+ case 2: rg->fl[i] = ff_rg_fl_mode_2_sse2; break;
+ case 3: rg->fl[i] = ff_rg_fl_mode_3_sse2; break;
+ case 4: rg->fl[i] = ff_rg_fl_mode_4_sse2; break;
+ case 5: rg->fl[i] = ff_rg_fl_mode_5_sse2; break;
+ case 6: rg->fl[i] = ff_rg_fl_mode_6_sse2; break;
+ case 7: rg->fl[i] = ff_rg_fl_mode_7_sse2; break;
+ case 8: rg->fl[i] = ff_rg_fl_mode_8_sse2; break;
+ case 9: rg->fl[i] = ff_rg_fl_mode_9_sse2; break;
+ case 15: /* fall through */
+ case 16: rg->fl[i] = ff_rg_fl_mode_15_16_sse2; break;
+ case 17: rg->fl[i] = ff_rg_fl_mode_17_sse2; break;
+ case 18: rg->fl[i] = ff_rg_fl_mode_18_sse2; break;
+ case 23: rg->fl[i] = ff_rg_fl_mode_23_sse2; break;
+ case 24: rg->fl[i] = ff_rg_fl_mode_24_sse2; break;
+#endif /* ARCH_x86_64 */
+ }
+ }
+#endif /* CONFIG_GPL */
+}
diff --git a/libavfilter/x86/vf_spp.c b/libavfilter/x86/vf_spp.c
new file mode 100644
index 0000000000..45a9eb068c
--- /dev/null
+++ b/libavfilter/x86/vf_spp.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/mem.h"
+#include "libavutil/x86/asm.h"
+#include "libavfilter/vf_spp.h"
+
+#if HAVE_MMX_INLINE
+static void hardthresh_mmx(int16_t dst[64], const int16_t src[64],
+ int qp, const uint8_t *permutation)
+{
+ int bias = 0; //FIXME
+ unsigned int threshold1;
+
+ threshold1 = qp * ((1<<4) - bias) - 1;
+
+#define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
+ "movq " #src0 ", %%mm0 \n" \
+ "movq " #src1 ", %%mm1 \n" \
+ "movq " #src2 ", %%mm2 \n" \
+ "movq " #src3 ", %%mm3 \n" \
+ "psubw %%mm4, %%mm0 \n" \
+ "psubw %%mm4, %%mm1 \n" \
+ "psubw %%mm4, %%mm2 \n" \
+ "psubw %%mm4, %%mm3 \n" \
+ "paddusw %%mm5, %%mm0 \n" \
+ "paddusw %%mm5, %%mm1 \n" \
+ "paddusw %%mm5, %%mm2 \n" \
+ "paddusw %%mm5, %%mm3 \n" \
+ "paddw %%mm6, %%mm0 \n" \
+ "paddw %%mm6, %%mm1 \n" \
+ "paddw %%mm6, %%mm2 \n" \
+ "paddw %%mm6, %%mm3 \n" \
+ "psubusw %%mm6, %%mm0 \n" \
+ "psubusw %%mm6, %%mm1 \n" \
+ "psubusw %%mm6, %%mm2 \n" \
+ "psubusw %%mm6, %%mm3 \n" \
+ "psraw $3, %%mm0 \n" \
+ "psraw $3, %%mm1 \n" \
+ "psraw $3, %%mm2 \n" \
+ "psraw $3, %%mm3 \n" \
+ \
+ "movq %%mm0, %%mm7 \n" \
+ "punpcklwd %%mm2, %%mm0 \n" /*A*/ \
+ "punpckhwd %%mm2, %%mm7 \n" /*C*/ \
+ "movq %%mm1, %%mm2 \n" \
+ "punpcklwd %%mm3, %%mm1 \n" /*B*/ \
+ "punpckhwd %%mm3, %%mm2 \n" /*D*/ \
+ "movq %%mm0, %%mm3 \n" \
+ "punpcklwd %%mm1, %%mm0 \n" /*A*/ \
+ "punpckhwd %%mm7, %%mm3 \n" /*C*/ \
+ "punpcklwd %%mm2, %%mm7 \n" /*B*/ \
+ "punpckhwd %%mm2, %%mm1 \n" /*D*/ \
+ \
+ "movq %%mm0, " #dst0 " \n" \
+ "movq %%mm7, " #dst1 " \n" \
+ "movq %%mm3, " #dst2 " \n" \
+ "movq %%mm1, " #dst3 " \n"
+
+ __asm__ volatile(
+ "movd %2, %%mm4 \n"
+ "movd %3, %%mm5 \n"
+ "movd %4, %%mm6 \n"
+ "packssdw %%mm4, %%mm4 \n"
+ "packssdw %%mm5, %%mm5 \n"
+ "packssdw %%mm6, %%mm6 \n"
+ "packssdw %%mm4, %%mm4 \n"
+ "packssdw %%mm5, %%mm5 \n"
+ "packssdw %%mm6, %%mm6 \n"
+ REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0))
+ REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
+ REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
+ REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
+ : : "r" (src), "r" (dst), "g" (threshold1+1), "g" (threshold1+5), "g" (threshold1-4) //FIXME maybe more accurate then needed?
+ );
+ dst[0] = (src[0] + 4) >> 3;
+}
+
+static void softthresh_mmx(int16_t dst[64], const int16_t src[64],
+ int qp, const uint8_t *permutation)
+{
+ int bias = 0; //FIXME
+ unsigned int threshold1;
+
+ threshold1 = qp*((1<<4) - bias) - 1;
+
+#undef REQUANT_CORE
+#define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
+ "movq " #src0 ", %%mm0 \n" \
+ "movq " #src1 ", %%mm1 \n" \
+ "pxor %%mm6, %%mm6 \n" \
+ "pxor %%mm7, %%mm7 \n" \
+ "pcmpgtw %%mm0, %%mm6 \n" \
+ "pcmpgtw %%mm1, %%mm7 \n" \
+ "pxor %%mm6, %%mm0 \n" \
+ "pxor %%mm7, %%mm1 \n" \
+ "psubusw %%mm4, %%mm0 \n" \
+ "psubusw %%mm4, %%mm1 \n" \
+ "pxor %%mm6, %%mm0 \n" \
+ "pxor %%mm7, %%mm1 \n" \
+ "movq " #src2 ", %%mm2 \n" \
+ "movq " #src3 ", %%mm3 \n" \
+ "pxor %%mm6, %%mm6 \n" \
+ "pxor %%mm7, %%mm7 \n" \
+ "pcmpgtw %%mm2, %%mm6 \n" \
+ "pcmpgtw %%mm3, %%mm7 \n" \
+ "pxor %%mm6, %%mm2 \n" \
+ "pxor %%mm7, %%mm3 \n" \
+ "psubusw %%mm4, %%mm2 \n" \
+ "psubusw %%mm4, %%mm3 \n" \
+ "pxor %%mm6, %%mm2 \n" \
+ "pxor %%mm7, %%mm3 \n" \
+ \
+ "paddsw %%mm5, %%mm0 \n" \
+ "paddsw %%mm5, %%mm1 \n" \
+ "paddsw %%mm5, %%mm2 \n" \
+ "paddsw %%mm5, %%mm3 \n" \
+ "psraw $3, %%mm0 \n" \
+ "psraw $3, %%mm1 \n" \
+ "psraw $3, %%mm2 \n" \
+ "psraw $3, %%mm3 \n" \
+ \
+ "movq %%mm0, %%mm7 \n" \
+ "punpcklwd %%mm2, %%mm0 \n" /*A*/ \
+ "punpckhwd %%mm2, %%mm7 \n" /*C*/ \
+ "movq %%mm1, %%mm2 \n" \
+ "punpcklwd %%mm3, %%mm1 \n" /*B*/ \
+ "punpckhwd %%mm3, %%mm2 \n" /*D*/ \
+ "movq %%mm0, %%mm3 \n" \
+ "punpcklwd %%mm1, %%mm0 \n" /*A*/ \
+ "punpckhwd %%mm7, %%mm3 \n" /*C*/ \
+ "punpcklwd %%mm2, %%mm7 \n" /*B*/ \
+ "punpckhwd %%mm2, %%mm1 \n" /*D*/ \
+ \
+ "movq %%mm0, " #dst0 " \n" \
+ "movq %%mm7, " #dst1 " \n" \
+ "movq %%mm3, " #dst2 " \n" \
+ "movq %%mm1, " #dst3 " \n"
+
+ __asm__ volatile(
+ "movd %2, %%mm4 \n"
+ "movd %3, %%mm5 \n"
+ "packssdw %%mm4, %%mm4 \n"
+ "packssdw %%mm5, %%mm5 \n"
+ "packssdw %%mm4, %%mm4 \n"
+ "packssdw %%mm5, %%mm5 \n"
+ REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0))
+ REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
+ REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
+ REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
+ : : "r" (src), "r" (dst), "g" (threshold1), "rm" (4) //FIXME maybe more accurate then needed?
+ );
+
+ dst[0] = (src[0] + 4) >> 3;
+}
+
+static void store_slice_mmx(uint8_t *dst, const int16_t *src,
+ int dst_stride, int src_stride,
+ int width, int height, int log2_scale,
+ const uint8_t dither[8][8])
+{
+ int y;
+
+ for (y = 0; y < height; y++) {
+ uint8_t *dst1 = dst;
+ const int16_t *src1 = src;
+ __asm__ volatile(
+ "movq (%3), %%mm3 \n"
+ "movq (%3), %%mm4 \n"
+ "movd %4, %%mm2 \n"
+ "pxor %%mm0, %%mm0 \n"
+ "punpcklbw %%mm0, %%mm3 \n"
+ "punpckhbw %%mm0, %%mm4 \n"
+ "psraw %%mm2, %%mm3 \n"
+ "psraw %%mm2, %%mm4 \n"
+ "movd %5, %%mm2 \n"
+ "1: \n"
+ "movq (%0), %%mm0 \n"
+ "movq 8(%0), %%mm1 \n"
+ "paddw %%mm3, %%mm0 \n"
+ "paddw %%mm4, %%mm1 \n"
+ "psraw %%mm2, %%mm0 \n"
+ "psraw %%mm2, %%mm1 \n"
+ "packuswb %%mm1, %%mm0 \n"
+ "movq %%mm0, (%1) \n"
+ "add $16, %0 \n"
+ "add $8, %1 \n"
+ "cmp %2, %1 \n"
+ " jb 1b \n"
+ : "+r" (src1), "+r"(dst1)
+ : "r"(dst + width), "r"(dither[y]), "g"(log2_scale), "g"(MAX_LEVEL - log2_scale)
+ );
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+#endif /* HAVE_MMX_INLINE */
+
+av_cold void ff_spp_init_x86(SPPContext *s)
+{
+#if HAVE_MMX_INLINE
+ int cpu_flags = av_get_cpu_flags();
+
+ if (cpu_flags & AV_CPU_FLAG_MMX) {
+ int64_t bps;
+ s->store_slice = store_slice_mmx;
+ av_opt_get_int(s->dct, "bits_per_sample", 0, &bps);
+ if (bps <= 8) {
+ switch (s->mode) {
+ case 0: s->requantize = hardthresh_mmx; break;
+ case 1: s->requantize = softthresh_mmx; break;
+ }
+ }
+ }
+#endif
+}
diff --git a/libavfilter/x86/vf_ssim.asm b/libavfilter/x86/vf_ssim.asm
new file mode 100644
index 0000000000..3293e66701
--- /dev/null
+++ b/libavfilter/x86/vf_ssim.asm
@@ -0,0 +1,247 @@
+;*****************************************************************************
+;* x86-optimized functions for ssim filter
+;*
+;* Copyright (C) 2015 Ronald S. Bultje <rsbultje@gmail.com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+pw_1: times 8 dw 1
+ssim_c1: times 4 dd 416 ;(.01*.01*255*255*64 + .5)
+ssim_c2: times 4 dd 235963 ;(.03*.03*255*255*64*63 + .5)
+
+SECTION .text
+
+%macro SSIM_4X4_LINE 1
+%if ARCH_X86_64
+cglobal ssim_4x4_line, 6, 8, %1, buf, buf_stride, ref, ref_stride, sums, w, buf_stride3, ref_stride3
+%else
+cglobal ssim_4x4_line, 5, 7, %1, buf, buf_stride, ref, ref_stride, sums, buf_stride3, ref_stride3
+%define wd r5mp
+%endif
+ lea ref_stride3q, [ref_strideq*3]
+ lea buf_stride3q, [buf_strideq*3]
+%if notcpuflag(xop)
+ pxor m7, m7
+ mova m15, [pw_1]
+%endif
+
+.loop:
+%if cpuflag(xop)
+ pmovzxbw m0, [bufq+buf_strideq*0]
+ pmovzxbw m1, [refq+ref_strideq*0]
+ pmaddwd m4, m0, m0
+ pmaddwd m6, m0, m1
+ pmovzxbw m2, [bufq+buf_strideq*1]
+ vpmadcswd m4, m1, m1, m4
+ pmovzxbw m3, [refq+ref_strideq*1]
+ paddw m0, m2
+ vpmadcswd m4, m2, m2, m4
+ vpmadcswd m6, m2, m3, m6
+ paddw m1, m3
+ vpmadcswd m4, m3, m3, m4
+
+ pmovzxbw m2, [bufq+buf_strideq*2]
+ pmovzxbw m3, [refq+ref_strideq*2]
+ vpmadcswd m4, m2, m2, m4
+ vpmadcswd m6, m2, m3, m6
+ pmovzxbw m5, [bufq+buf_stride3q]
+ pmovzxbw m7, [refq+ref_stride3q]
+ vpmadcswd m4, m3, m3, m4
+ vpmadcswd m6, m5, m7, m6
+ paddw m0, m2
+ paddw m1, m3
+ vpmadcswd m4, m5, m5, m4
+ paddw m0, m5
+ paddw m1, m7
+ vpmadcswd m4, m7, m7, m4
+%else
+ movh m0, [bufq+buf_strideq*0] ; a1
+ movh m1, [refq+ref_strideq*0] ; b1
+ movh m2, [bufq+buf_strideq*1] ; a2
+ movh m3, [refq+ref_strideq*1] ; b2
+ punpcklbw m0, m7 ; s1 [word]
+ punpcklbw m1, m7 ; s2 [word]
+ punpcklbw m2, m7 ; s1 [word]
+ punpcklbw m3, m7 ; s2 [word]
+ pmaddwd m4, m0, m0 ; a1 * a1
+ pmaddwd m5, m1, m1 ; b1 * b1
+ pmaddwd m8, m2, m2 ; a2 * a2
+ pmaddwd m9, m3, m3 ; b2 * b2
+ paddd m4, m5 ; ss
+ paddd m8, m9 ; ss
+ pmaddwd m6, m0, m1 ; a1 * b1 = ss12
+ pmaddwd m5, m2, m3 ; a2 * b2 = ss12
+ paddw m0, m2
+ paddw m1, m3
+ paddd m6, m5 ; s12
+ paddd m4, m8 ; ss
+
+ movh m2, [bufq+buf_strideq*2] ; a3
+ movh m3, [refq+ref_strideq*2] ; b3
+ movh m5, [bufq+buf_stride3q] ; a4
+ movh m8, [refq+ref_stride3q] ; b4
+ punpcklbw m2, m7 ; s1 [word]
+ punpcklbw m3, m7 ; s2 [word]
+ punpcklbw m5, m7 ; s1 [word]
+ punpcklbw m8, m7 ; s2 [word]
+ pmaddwd m9, m2, m2 ; a3 * a3
+ pmaddwd m10, m3, m3 ; b3 * b3
+ pmaddwd m12, m5, m5 ; a4 * a4
+ pmaddwd m13, m8, m8 ; b4 * b4
+ pmaddwd m11, m2, m3 ; a3 * b3 = ss12
+ pmaddwd m14, m5, m8 ; a4 * b4 = ss12
+ paddd m9, m10
+ paddd m12, m13
+ paddw m0, m2
+ paddw m1, m3
+ paddw m0, m5
+ paddw m1, m8
+ paddd m6, m11
+ paddd m4, m9
+ paddd m6, m14
+ paddd m4, m12
+%endif
+
+ ; m0 = [word] s1 a,a,a,a,b,b,b,b
+ ; m1 = [word] s2 a,a,a,a,b,b,b,b
+ ; m4 = [dword] ss a,a,b,b
+ ; m6 = [dword] s12 a,a,b,b
+
+%if cpuflag(xop)
+ vphaddwq m0, m0 ; [dword] s1 a, 0, b, 0
+ vphaddwq m1, m1 ; [dword] s2 a, 0, b, 0
+ vphadddq m4, m4 ; [dword] ss a, 0, b, 0
+ vphadddq m6, m6 ; [dword] s12 a, 0, b, 0
+ punpckhdq m2, m0, m1 ; [dword] s1 b, s2 b, 0, 0
+ punpckldq m0, m1 ; [dword] s1 a, s2 a, 0, 0
+ punpckhdq m3, m4, m6 ; [dword] ss b, s12 b, 0, 0
+ punpckldq m4, m6 ; [dword] ss a, s12 a, 0, 0
+ punpcklqdq m1, m2, m3 ; [dword] b s1, s2, ss, s12
+ punpcklqdq m0, m4 ; [dword] a s1, s2, ss, s12
+%else
+ pmaddwd m0, m15 ; [dword] s1 a,a,b,b
+ pmaddwd m1, m15 ; [dword] s2 a,a,b,b
+ phaddd m0, m4 ; [dword] s1 a, b, ss a, b
+ phaddd m1, m6 ; [dword] s2 a, b, s12 a, b
+ punpckhdq m2, m0, m1 ; [dword] ss a, s12 a, ss b, s12 b
+ punpckldq m0, m1 ; [dword] s1 a, s2 a, s1 b, s2 b
+ punpckhqdq m1, m0, m2 ; [dword] b s1, s2, ss, s12
+ punpcklqdq m0, m2 ; [dword] a s1, s2, ss, s12
+%endif
+
+ mova [sumsq+ 0], m0
+ mova [sumsq+mmsize], m1
+
+ add bufq, mmsize/2
+ add refq, mmsize/2
+ add sumsq, mmsize*2
+ sub wd, mmsize/8
+ jg .loop
+ RET
+%endmacro
+
+%if ARCH_X86_64
+INIT_XMM ssse3
+SSIM_4X4_LINE 16
+%endif
+%if HAVE_XOP_EXTERNAL
+INIT_XMM xop
+SSIM_4X4_LINE 8
+%endif
+
+INIT_XMM sse4
+cglobal ssim_end_line, 3, 3, 6, sum0, sum1, w
+ pxor m0, m0
+.loop:
+ mova m1, [sum0q+mmsize*0]
+ mova m2, [sum0q+mmsize*1]
+ mova m3, [sum0q+mmsize*2]
+ mova m4, [sum0q+mmsize*3]
+ paddd m1, [sum1q+mmsize*0]
+ paddd m2, [sum1q+mmsize*1]
+ paddd m3, [sum1q+mmsize*2]
+ paddd m4, [sum1q+mmsize*3]
+ paddd m1, m2
+ paddd m2, m3
+ paddd m3, m4
+ paddd m4, [sum0q+mmsize*4]
+ paddd m4, [sum1q+mmsize*4]
+ TRANSPOSE4x4D 1, 2, 3, 4, 5
+
+ ; m1 = fs1, m2 = fs2, m3 = fss, m4 = fs12
+ pslld m3, 6
+ pslld m4, 6
+ pmulld m5, m1, m2 ; fs1 * fs2
+ pmulld m1, m1 ; fs1 * fs1
+ pmulld m2, m2 ; fs2 * fs2
+ psubd m3, m1
+ psubd m4, m5 ; covariance
+ psubd m3, m2 ; variance
+
+ ; m1 = fs1 * fs1, m2 = fs2 * fs2, m3 = variance, m4 = covariance, m5 = fs1 * fs2
+ paddd m4, m4 ; 2 * covariance
+ paddd m5, m5 ; 2 * fs1 * fs2
+ paddd m1, m2 ; fs1 * fs1 + fs2 * fs2
+ paddd m3, [ssim_c2] ; variance + ssim_c2
+ paddd m4, [ssim_c2] ; 2 * covariance + ssim_c2
+ paddd m5, [ssim_c1] ; 2 * fs1 * fs2 + ssim_c1
+ paddd m1, [ssim_c1] ; fs1 * fs1 + fs2 * fs2 + ssim_c1
+
+ ; convert to float
+ cvtdq2ps m3, m3
+ cvtdq2ps m4, m4
+ cvtdq2ps m5, m5
+ cvtdq2ps m1, m1
+ mulps m4, m5
+ mulps m3, m1
+ divps m4, m3 ; ssim_endl
+ addps m0, m4 ; ssim
+ add sum0q, mmsize*4
+ add sum1q, mmsize*4
+ sub wd, 4
+ jg .loop
+
+ ; subps the ones we added too much
+ test wd, wd
+ jz .end
+ add wd, 4
+ test wd, 2
+ jz .skip2
+ psrldq m4, 8
+.skip2:
+ test wd, 1
+ jz .skip1
+ psrldq m4, 4
+.skip1:
+ subps m0, m4
+
+.end:
+ movhlps m4, m0
+ addps m0, m4
+ movss m4, m0
+ shufps m0, m0, 1
+ addss m0, m4
+%if ARCH_X86_32
+ movss r0m, m0
+ fld r0mp
+%endif
+ RET
diff --git a/libavfilter/x86/vf_ssim_init.c b/libavfilter/x86/vf_ssim_init.c
new file mode 100644
index 0000000000..599c928403
--- /dev/null
+++ b/libavfilter/x86/vf_ssim_init.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2015 Ronald S. Bultje <rsbultje@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/x86/cpu.h"
+
+#include "libavfilter/ssim.h"
+
+void ff_ssim_4x4_line_ssse3(const uint8_t *buf, ptrdiff_t buf_stride,
+ const uint8_t *ref, ptrdiff_t ref_stride,
+ int (*sums)[4], int w);
+void ff_ssim_4x4_line_xop (const uint8_t *buf, ptrdiff_t buf_stride,
+ const uint8_t *ref, ptrdiff_t ref_stride,
+ int (*sums)[4], int w);
+float ff_ssim_end_line_sse4(const int (*sum0)[4], const int (*sum1)[4], int w);
+
+void ff_ssim_init_x86(SSIMDSPContext *dsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (ARCH_X86_64 && EXTERNAL_SSSE3(cpu_flags))
+ dsp->ssim_4x4_line = ff_ssim_4x4_line_ssse3;
+ if (EXTERNAL_SSE4(cpu_flags))
+ dsp->ssim_end_line = ff_ssim_end_line_sse4;
+ if (EXTERNAL_XOP(cpu_flags))
+ dsp->ssim_4x4_line = ff_ssim_4x4_line_xop;
+}
diff --git a/libavfilter/x86/vf_stereo3d.asm b/libavfilter/x86/vf_stereo3d.asm
new file mode 100644
index 0000000000..a057e495f1
--- /dev/null
+++ b/libavfilter/x86/vf_stereo3d.asm
@@ -0,0 +1,216 @@
+;*****************************************************************************
+;* x86-optimized functions for stereo3d filter
+;*
+;* Copyright (C) 2015 Paul B Mahol
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;*****************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+; rgbrgbrgbrgb
+; rrrrggggbbbb
+
+shuf: db 0, 4, 8, 1,5, 9, 2, 6,10,3, 7,11,-1,-1,-1,-1
+ex_r: db 0,-1,-1,-1,3,-1,-1,-1,6,-1,-1,-1, 9,-1,-1,-1
+ex_g: db 1,-1,-1,-1,4,-1,-1,-1,7,-1,-1,-1,10,-1,-1,-1
+ex_b: db 2,-1,-1,-1,5,-1,-1,-1,8,-1,-1,-1,11,-1,-1,-1
+
+SECTION .text
+
+INIT_XMM sse4
+%if ARCH_X86_64
+cglobal anaglyph, 6, 10, 14, 2*6*mmsize, dst, lsrc, rsrc, dst_linesize, l_linesize, r_linesize, width, height, o, cnt
+%define ana_matrix_rq r6q
+%define ana_matrix_gq r7q
+%define ana_matrix_bq r8q
+
+%else ; ARCH_X86_32
+%if HAVE_ALIGNED_STACK
+cglobal anaglyph, 3, 7, 8, 2*9*mmsize, dst, lsrc, rsrc, dst_linesize, l_linesize, o, cnt
+%else
+cglobal anaglyph, 3, 6, 8, 2*9*mmsize, dst, lsrc, rsrc, dst_linesize, o, cnt
+%define l_linesizeq r4mp
+%endif ; HAVE_ALIGNED_STACK
+%define ana_matrix_rq r3q
+%define ana_matrix_gq r4q
+%define ana_matrix_bq r5q
+%define r_linesizeq r5mp
+%define widthd r6mp
+%define heightd r7mp
+%define m8 [rsp+mmsize*12]
+%define m9 [rsp+mmsize*13]
+%define m10 [rsp+mmsize*14]
+%define m11 [rsp+mmsize*15]
+%define m12 [rsp+mmsize*16]
+%define m13 [rsp+mmsize*17]
+%endif ; ARCH
+
+ mov ana_matrix_rq, r8m
+ mov ana_matrix_gq, r9m
+ mov ana_matrix_bq, r10m
+ movu m3, [ana_matrix_rq+ 0]
+ movq m5, [ana_matrix_rq+16]
+ pshufd m0, m3, q0000
+ pshufd m1, m3, q1111
+ pshufd m2, m3, q2222
+ pshufd m3, m3, q3333
+ pshufd m4, m5, q0000
+ pshufd m5, m5, q1111
+ mova [rsp+mmsize*0], m0
+ mova [rsp+mmsize*1], m1
+ mova [rsp+mmsize*2], m2
+ mova [rsp+mmsize*3], m3
+ mova [rsp+mmsize*4], m4
+ mova [rsp+mmsize*5], m5
+
+ movu m3, [ana_matrix_gq+ 0]
+ movq m5, [ana_matrix_gq+16]
+ pshufd m0, m3, q0000
+ pshufd m1, m3, q1111
+ pshufd m2, m3, q2222
+ pshufd m3, m3, q3333
+ pshufd m4, m5, q0000
+ pshufd m5, m5, q1111
+ mova [rsp+mmsize*6 ], m0
+ mova [rsp+mmsize*7 ], m1
+ mova [rsp+mmsize*8 ], m2
+ mova [rsp+mmsize*9 ], m3
+ mova [rsp+mmsize*10], m4
+ mova [rsp+mmsize*11], m5
+
+%if ARCH_X86_64
+ movu m11, [ana_matrix_bq+ 0]
+ movq m13, [ana_matrix_bq+16]
+ pshufd m8, m11, q0000
+ pshufd m9, m11, q1111
+ pshufd m10, m11, q2222
+ pshufd m11, m11, q3333
+ pshufd m12, m13, q0000
+ pshufd m13, m13, q1111
+ mov widthd, dword widthm
+ mov heightd, dword heightm
+%else
+ movu m3, [ana_matrix_bq+ 0]
+ movq m5, [ana_matrix_bq+16]
+ pshufd m0, m3, q0000
+ pshufd m1, m3, q1111
+ pshufd m2, m3, q2222
+ pshufd m3, m3, q3333
+ pshufd m4, m5, q0000
+ pshufd m5, m5, q1111
+ mova [rsp+mmsize*12], m0
+ mova [rsp+mmsize*13], m1
+ mova [rsp+mmsize*14], m2
+ mova [rsp+mmsize*15], m3
+ mova [rsp+mmsize*16], m4
+ mova [rsp+mmsize*17], m5
+ mov dst_linesizeq, r3m
+%if HAVE_ALIGNED_STACK
+ mov l_linesizeq, r4m
+%endif
+%endif ; ARCH
+
+.nextrow:
+ mov od, widthd
+ xor cntd, cntd
+
+ .loop:
+ movu m3, [lsrcq+cntq]
+ pshufb m1, m3, [ex_r]
+ pshufb m2, m3, [ex_g]
+ pshufb m3, [ex_b]
+ movu m0, [rsrcq+cntq]
+ pshufb m4, m0, [ex_r]
+ pshufb m5, m0, [ex_g]
+ pshufb m0, [ex_b]
+ pmulld m1, [rsp+mmsize*0]
+ pmulld m2, [rsp+mmsize*1]
+ pmulld m3, [rsp+mmsize*2]
+ pmulld m4, [rsp+mmsize*3]
+ pmulld m5, [rsp+mmsize*4]
+ pmulld m0, [rsp+mmsize*5]
+ paddd m1, m2
+ paddd m3, m4
+ paddd m5, m0
+ paddd m1, m3
+ paddd m1, m5
+
+ movu m3, [lsrcq+cntq]
+ pshufb m7, m3, [ex_r]
+ pshufb m2, m3, [ex_g]
+ pshufb m3, [ex_b]
+ movu m0, [rsrcq+cntq]
+ pshufb m4, m0, [ex_r]
+ pshufb m5, m0, [ex_g]
+ pshufb m0, [ex_b]
+ pmulld m7, [rsp+mmsize*6]
+ pmulld m2, [rsp+mmsize*7]
+ pmulld m3, [rsp+mmsize*8]
+ pmulld m4, [rsp+mmsize*9]
+ pmulld m5, [rsp+mmsize*10]
+ pmulld m0, [rsp+mmsize*11]
+ paddd m7, m2
+ paddd m3, m4
+ paddd m5, m0
+ paddd m7, m3
+ paddd m7, m5
+
+ movu m4, [lsrcq+cntq]
+ pshufb m2, m4, [ex_r]
+ pshufb m3, m4, [ex_g]
+ pshufb m4, [ex_b]
+ movu m0, [rsrcq+cntq]
+ pshufb m5, m0, [ex_r]
+ pshufb m6, m0, [ex_g]
+ pshufb m0, [ex_b]
+ pmulld m2, m8
+ pmulld m3, m9
+ pmulld m4, m10
+ pmulld m5, m11
+ pmulld m6, m12
+ pmulld m0, m13
+ paddd m2, m3
+ paddd m4, m5
+ paddd m6, m0
+ paddd m2, m4
+ paddd m2, m6
+
+ psrld m1, 16
+ psrld m7, 16
+ psrld m2, 16
+
+ packusdw m1, m7
+ packusdw m2, m2
+ packuswb m1, m2
+ pshufb m1, [shuf]
+
+ movq [dstq+cntq+0], m1
+ psrldq m1, 8
+ movd [dstq+cntq+8], m1
+ add cntd, 12
+ sub od, 4
+ jg .loop
+
+ add dstq, dst_linesizeq
+ add lsrcq, l_linesizeq
+ add rsrcq, r_linesizeq
+ sub heightd, 1
+ jg .nextrow
+REP_RET
diff --git a/libavfilter/x86/vf_stereo3d_init.c b/libavfilter/x86/vf_stereo3d_init.c
new file mode 100644
index 0000000000..da160a89a0
--- /dev/null
+++ b/libavfilter/x86/vf_stereo3d_init.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/x86/cpu.h"
+
+#include "libavfilter/stereo3d.h"
+
+void ff_anaglyph_sse4(uint8_t *dst, uint8_t *lsrc, uint8_t *rsrc,
+ ptrdiff_t dst_linesize, ptrdiff_t l_linesize, ptrdiff_t r_linesize,
+ int width, int height,
+ const int *ana_matrix_r, const int *ana_matrix_g, const int *ana_matrix_b);
+
+void ff_stereo3d_init_x86(Stereo3DDSPContext *dsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_SSE4(cpu_flags)) {
+ dsp->anaglyph = ff_anaglyph_sse4;
+ }
+}
diff --git a/libavfilter/x86/vf_tinterlace_init.c b/libavfilter/x86/vf_tinterlace_init.c
new file mode 100644
index 0000000000..ddb0cced36
--- /dev/null
+++ b/libavfilter/x86/vf_tinterlace_init.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2014 Kieran Kunhya <kierank@obe.tv>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/internal.h"
+#include "libavutil/mem.h"
+#include "libavutil/x86/asm.h"
+#include "libavutil/x86/cpu.h"
+
+#include "libavfilter/tinterlace.h"
+
+void ff_lowpass_line_sse2(uint8_t *dstp, ptrdiff_t linesize,
+ const uint8_t *srcp,
+ const uint8_t *srcp_above,
+ const uint8_t *srcp_below);
+void ff_lowpass_line_avx (uint8_t *dstp, ptrdiff_t linesize,
+ const uint8_t *srcp,
+ const uint8_t *srcp_above,
+ const uint8_t *srcp_below);
+
+av_cold void ff_tinterlace_init_x86(TInterlaceContext *s)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_SSE2(cpu_flags))
+ s->lowpass_line = ff_lowpass_line_sse2;
+ if (EXTERNAL_AVX(cpu_flags))
+ s->lowpass_line = ff_lowpass_line_avx;
+}
diff --git a/libavfilter/x86/vf_w3fdif.asm b/libavfilter/x86/vf_w3fdif.asm
new file mode 100644
index 0000000000..52628c38d7
--- /dev/null
+++ b/libavfilter/x86/vf_w3fdif.asm
@@ -0,0 +1,259 @@
+;*****************************************************************************
+;* x86-optimized functions for w3fdif filter
+;*
+;* Copyright (c) 2015 Paul B Mahol
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION .text
+
+INIT_XMM sse2
+cglobal w3fdif_scale, 3, 3, 2, 0, out_pixel, work_pixel, linesize
+.loop:
+ mova m0, [work_pixelq]
+ mova m1, [work_pixelq+mmsize]
+ psrad m0, 15
+ psrad m1, 15
+ packssdw m0, m1
+ packuswb m0, m0
+ movh [out_pixelq], m0
+ add out_pixelq, mmsize/2
+ add work_pixelq, mmsize*2
+ sub linesized, mmsize/2
+ jg .loop
+REP_RET
+
+cglobal w3fdif_simple_low, 4, 5, 6, 0, work_line, in_lines_cur0, coef, linesize, offset
+ movd m1, [coefq]
+ DEFINE_ARGS work_line, in_lines_cur0, in_lines_cur1, linesize, offset
+ SPLATW m0, m1, 0
+ SPLATW m1, m1, 1
+ pxor m4, m4
+ mov offsetq, 0
+ mov in_lines_cur1q, [in_lines_cur0q + gprsize]
+ mov in_lines_cur0q, [in_lines_cur0q]
+
+.loop:
+ movh m2, [in_lines_cur0q+offsetq]
+ movh m3, [in_lines_cur1q+offsetq]
+ punpcklbw m2, m4
+ punpcklbw m3, m4
+ SBUTTERFLY wd, 2, 3, 5
+ pmaddwd m2, m0
+ pmaddwd m3, m1
+ mova [work_lineq+offsetq*4], m2
+ mova [work_lineq+offsetq*4+mmsize], m3
+ add offsetq, mmsize/2
+ sub linesized, mmsize/2
+ jg .loop
+REP_RET
+
+cglobal w3fdif_complex_low, 4, 7, 8, 0, work_line, in_lines_cur0, coef, linesize
+ movq m0, [coefq]
+ DEFINE_ARGS work_line, in_lines_cur0, in_lines_cur1, linesize, offset, in_lines_cur2, in_lines_cur3
+ pshufd m2, m0, q1111
+ SPLATD m0
+ pxor m1, m1
+ mov offsetq, 0
+ mov in_lines_cur3q, [in_lines_cur0q+gprsize*3]
+ mov in_lines_cur2q, [in_lines_cur0q+gprsize*2]
+ mov in_lines_cur1q, [in_lines_cur0q+gprsize]
+ mov in_lines_cur0q, [in_lines_cur0q]
+
+.loop:
+ movh m4, [in_lines_cur0q+offsetq]
+ movh m5, [in_lines_cur1q+offsetq]
+ punpcklbw m4, m1
+ punpcklbw m5, m1
+ SBUTTERFLY wd, 4, 5, 7
+ pmaddwd m4, m0
+ pmaddwd m5, m0
+ movh m6, [in_lines_cur2q+offsetq]
+ movh m3, [in_lines_cur3q+offsetq]
+ punpcklbw m6, m1
+ punpcklbw m3, m1
+ SBUTTERFLY wd, 6, 3, 7
+ pmaddwd m6, m2
+ pmaddwd m3, m2
+ paddd m4, m6
+ paddd m5, m3
+ mova [work_lineq+offsetq*4], m4
+ mova [work_lineq+offsetq*4+mmsize], m5
+ add offsetq, mmsize/2
+ sub linesized, mmsize/2
+ jg .loop
+REP_RET
+
+%if ARCH_X86_64
+cglobal w3fdif_simple_high, 5, 9, 8, 0, work_line, in_lines_cur0, in_lines_adj0, coef, linesize
+%else
+cglobal w3fdif_simple_high, 4, 7, 8, 0, work_line, in_lines_cur0, in_lines_adj0, coef, linesize
+%endif
+ movq m2, [coefq]
+%if ARCH_X86_64
+ DEFINE_ARGS work_line, in_lines_cur0, in_lines_adj0, in_lines_cur1, linesize, offset, in_lines_cur2, in_lines_adj1, in_lines_adj2
+ xor offsetq, offsetq
+%else
+ DEFINE_ARGS work_line, in_lines_cur0, in_lines_adj0, in_lines_cur1, in_lines_cur2, in_lines_adj1, in_lines_adj2
+ %define linesized r4mp
+%endif
+
+ pshufd m0, m2, q0000
+ SPLATW m2, m2, 2
+ pxor m7, m7
+ mov in_lines_cur2q, [in_lines_cur0q+gprsize*2]
+ mov in_lines_cur1q, [in_lines_cur0q+gprsize]
+ mov in_lines_cur0q, [in_lines_cur0q]
+ mov in_lines_adj2q, [in_lines_adj0q+gprsize*2]
+ mov in_lines_adj1q, [in_lines_adj0q+gprsize]
+ mov in_lines_adj0q, [in_lines_adj0q]
+
+%if ARCH_X86_32
+ sub in_lines_cur1q, in_lines_cur0q
+ sub in_lines_cur2q, in_lines_cur0q
+ sub in_lines_adj0q, in_lines_cur0q
+ sub in_lines_adj1q, in_lines_cur0q
+ sub in_lines_adj2q, in_lines_cur0q
+ %define offsetq in_lines_cur0q
+%endif
+
+.loop:
+%if ARCH_X86_64
+ movh m3, [in_lines_cur0q+offsetq]
+%else
+ movh m3, [in_lines_cur0q]
+%endif
+ movh m4, [in_lines_cur1q+offsetq]
+ punpcklbw m3, m7
+ punpcklbw m4, m7
+ SBUTTERFLY wd, 3, 4, 1
+ pmaddwd m3, m0
+ pmaddwd m4, m0
+ movh m5, [in_lines_adj0q+offsetq]
+ movh m6, [in_lines_adj1q+offsetq]
+ punpcklbw m5, m7
+ punpcklbw m6, m7
+ SBUTTERFLY wd, 5, 6, 1
+ pmaddwd m5, m0
+ pmaddwd m6, m0
+ paddd m3, m5
+ paddd m4, m6
+ movh m5, [in_lines_cur2q+offsetq]
+ movh m6, [in_lines_adj2q+offsetq]
+ punpcklbw m5, m7
+ punpcklbw m6, m7
+ SBUTTERFLY wd, 5, 6, 1
+ pmaddwd m5, m2
+ pmaddwd m6, m2
+ paddd m3, m5
+ paddd m4, m6
+%if ARCH_X86_64
+ paddd m3, [work_lineq+offsetq*4]
+ paddd m4, [work_lineq+offsetq*4+mmsize]
+ mova [work_lineq+offsetq*4], m3
+ mova [work_lineq+offsetq*4+mmsize], m4
+%else
+ paddd m3, [work_lineq]
+ paddd m4, [work_lineq+mmsize]
+ mova [work_lineq], m3
+ mova [work_lineq+mmsize], m4
+ add work_lineq, mmsize*2
+%endif
+ add offsetq, mmsize/2
+ sub linesized, mmsize/2
+ jg .loop
+REP_RET
+
+%if ARCH_X86_64
+
+cglobal w3fdif_complex_high, 5, 13, 10, 0, work_line, in_lines_cur0, in_lines_adj0, coef, linesize
+ movq m0, [coefq+0]
+ movd m4, [coefq+8]
+ DEFINE_ARGS work_line, in_lines_cur0, in_lines_adj0, in_lines_cur1, linesize, offset, in_lines_cur2, in_lines_cur3, in_lines_cur4, in_lines_adj1, in_lines_adj2, in_lines_adj3, in_lines_adj4
+ pshufd m1, m0, q1111
+ SPLATD m0
+ SPLATW m4, m4
+ pxor m3, m3
+ mov offsetq, 0
+ mov in_lines_cur4q, [in_lines_cur0q+gprsize*4]
+ mov in_lines_cur3q, [in_lines_cur0q+gprsize*3]
+ mov in_lines_cur2q, [in_lines_cur0q+gprsize*2]
+ mov in_lines_cur1q, [in_lines_cur0q+gprsize]
+ mov in_lines_cur0q, [in_lines_cur0q]
+ mov in_lines_adj4q, [in_lines_adj0q+gprsize*4]
+ mov in_lines_adj3q, [in_lines_adj0q+gprsize*3]
+ mov in_lines_adj2q, [in_lines_adj0q+gprsize*2]
+ mov in_lines_adj1q, [in_lines_adj0q+gprsize]
+ mov in_lines_adj0q, [in_lines_adj0q]
+
+.loop:
+ movh m5, [in_lines_cur0q+offsetq]
+ movh m6, [in_lines_cur1q+offsetq]
+ punpcklbw m5, m3
+ punpcklbw m6, m3
+ SBUTTERFLY wd, 5, 6, 2
+ pmaddwd m5, m0
+ pmaddwd m6, m0
+ movh m8, [in_lines_cur2q+offsetq]
+ movh m9, [in_lines_cur3q+offsetq]
+ punpcklbw m8, m3
+ punpcklbw m9, m3
+ SBUTTERFLY wd, 8, 9, 2
+ pmaddwd m8, m1
+ pmaddwd m9, m1
+ paddd m5, m8
+ paddd m6, m9
+ movh m8, [in_lines_adj0q+offsetq]
+ movh m9, [in_lines_adj1q+offsetq]
+ punpcklbw m8, m3
+ punpcklbw m9, m3
+ SBUTTERFLY wd, 8, 9, 2
+ pmaddwd m8, m0
+ pmaddwd m9, m0
+ paddd m5, m8
+ paddd m6, m9
+ movh m8, [in_lines_adj2q+offsetq]
+ movh m9, [in_lines_adj3q+offsetq]
+ punpcklbw m8, m3
+ punpcklbw m9, m3
+ SBUTTERFLY wd, 8, 9, 2
+ pmaddwd m8, m1
+ pmaddwd m9, m1
+ paddd m5, m8
+ paddd m6, m9
+ movh m8, [in_lines_cur4q+offsetq]
+ movh m9, [in_lines_adj4q+offsetq]
+ punpcklbw m8, m3
+ punpcklbw m9, m3
+ SBUTTERFLY wd, 8, 9, 2
+ pmaddwd m8, m4
+ pmaddwd m9, m4
+ paddd m5, m8
+ paddd m6, m9
+ paddd m5, [work_lineq+offsetq*4]
+ paddd m6, [work_lineq+offsetq*4+mmsize]
+ mova [work_lineq+offsetq*4], m5
+ mova [work_lineq+offsetq*4+mmsize], m6
+ add offsetq, mmsize/2
+ sub linesized, mmsize/2
+ jg .loop
+REP_RET
+
+%endif
diff --git a/libavfilter/x86/vf_w3fdif_init.c b/libavfilter/x86/vf_w3fdif_init.c
new file mode 100644
index 0000000000..d4534bb1ed
--- /dev/null
+++ b/libavfilter/x86/vf_w3fdif_init.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/mem.h"
+#include "libavutil/x86/asm.h"
+#include "libavutil/x86/cpu.h"
+#include "libavfilter/w3fdif.h"
+
+void ff_w3fdif_simple_low_sse2(int32_t *work_line,
+ uint8_t *in_lines_cur[2],
+ const int16_t *coef, int linesize);
+
+void ff_w3fdif_simple_high_sse2(int32_t *work_line,
+ uint8_t *in_lines_cur[3],
+ uint8_t *in_lines_adj[3],
+ const int16_t *coef, int linesize);
+
+void ff_w3fdif_complex_low_sse2(int32_t *work_line,
+ uint8_t *in_lines_cur[4],
+ const int16_t *coef, int linesize);
+
+void ff_w3fdif_complex_high_sse2(int32_t *work_line,
+ uint8_t *in_lines_cur[5],
+ uint8_t *in_lines_adj[5],
+ const int16_t *coef, int linesize);
+
+void ff_w3fdif_scale_sse2(uint8_t *out_pixel, const int32_t *work_pixel,
+ int linesize, int max);
+
+av_cold void ff_w3fdif_init_x86(W3FDIFDSPContext *dsp, int depth)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_SSE2(cpu_flags) && depth <= 8) {
+ dsp->filter_simple_low = ff_w3fdif_simple_low_sse2;
+ dsp->filter_simple_high = ff_w3fdif_simple_high_sse2;
+ dsp->filter_complex_low = ff_w3fdif_complex_low_sse2;
+ dsp->filter_scale = ff_w3fdif_scale_sse2;
+ }
+
+ if (ARCH_X86_64 && EXTERNAL_SSE2(cpu_flags) && depth <= 8) {
+ dsp->filter_complex_high = ff_w3fdif_complex_high_sse2;
+ }
+}
diff --git a/libavfilter/x86/vf_yadif.asm b/libavfilter/x86/vf_yadif.asm
index 3d8b2bc180..a29620ce55 100644
--- a/libavfilter/x86/vf_yadif.asm
+++ b/libavfilter/x86/vf_yadif.asm
@@ -4,20 +4,20 @@
;* Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
;* Copyright (c) 2013 Daniel Kang <daniel.d.kang@gmail.com>
;*
-;* This file is part of Libav.
+;* This file is part of FFmpeg.
;*
-;* Libav is free software; you can redistribute it and/or
+;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
-;* Libav is distributed in the hope that it will be useful,
+;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
-;* License along with Libav; if not, write to the Free Software
+;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
@@ -39,11 +39,7 @@ SECTION .text
pavgb m5, m3
pand m4, [pb_1]
psubusb m5, m4
-%if mmsize == 16
- psrldq m5, 1
-%else
- psrlq m5, 8
-%endif
+ RSHIFT m5, 1
punpcklbw m5, m7
mova m4, m2
psubusb m2, m3
@@ -51,13 +47,8 @@ SECTION .text
pmaxub m2, m3
mova m3, m2
mova m4, m2
-%if mmsize == 16
- psrldq m3, 1
- psrldq m4, 2
-%else
- psrlq m3, 8
- psrlq m4, 16
-%endif
+ RSHIFT m3, 1
+ RSHIFT m4, 2
punpcklbw m2, m7
punpcklbw m3, m7
punpcklbw m4, m7
@@ -90,17 +81,17 @@ SECTION .text
%endmacro
%macro LOAD 2
- movh m%1, %2
- punpcklbw m%1, m7
+ movh %1, %2
+ punpcklbw %1, m7
%endmacro
%macro FILTER 3
.loop%1:
pxor m7, m7
- LOAD 0, [curq+t1]
- LOAD 1, [curq+t0]
- LOAD 2, [%2]
- LOAD 3, [%3]
+ LOAD m0, [curq+t1]
+ LOAD m1, [curq+t0]
+ LOAD m2, [%2]
+ LOAD m3, [%3]
mova m4, m3
paddw m3, m2
psraw m3, 1
@@ -109,8 +100,8 @@ SECTION .text
mova [rsp+32], m1
psubw m2, m4
ABS1 m2, m4
- LOAD 3, [prevq+t1]
- LOAD 4, [prevq+t0]
+ LOAD m3, [prevq+t1]
+ LOAD m4, [prevq+t0]
psubw m3, m0
psubw m4, m1
ABS1 m3, m5
@@ -119,8 +110,8 @@ SECTION .text
psrlw m2, 1
psrlw m3, 1
pmaxsw m2, m3
- LOAD 3, [nextq+t1]
- LOAD 4, [nextq+t0]
+ LOAD m3, [nextq+t1]
+ LOAD m4, [nextq+t0]
psubw m3, m0
psubw m4, m1
ABS1 m3, m5
@@ -166,10 +157,10 @@ SECTION .text
mova m6, [rsp+48]
cmp DWORD r8m, 2
jge .end%1
- LOAD 2, [%2+t1*2]
- LOAD 4, [%3+t1*2]
- LOAD 3, [%2+t0*2]
- LOAD 5, [%3+t0*2]
+ LOAD m2, [%2+t1*2]
+ LOAD m4, [%3+t1*2]
+ LOAD m3, [%2+t0*2]
+ LOAD m5, [%3+t0*2]
paddw m2, m4
paddw m3, m5
psrlw m2, 1
@@ -220,8 +211,6 @@ cglobal yadif_filter_line, 4, 6, 8, 80, dst, prev, cur, next, w, prefs, \
cglobal yadif_filter_line, 4, 7, 8, 80, dst, prev, cur, next, w, prefs, \
mrefs, parity, mode
%endif
- cmp DWORD wm, 0
- jle .ret
%if ARCH_X86_32
mov r4, r5mp
mov r5, r6mp
diff --git a/libavfilter/x86/vf_yadif_init.c b/libavfilter/x86/vf_yadif_init.c
index 863e7df87b..c39bc44da6 100644
--- a/libavfilter/x86/vf_yadif_init.c
+++ b/libavfilter/x86/vf_yadif_init.c
@@ -1,26 +1,25 @@
/*
* Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
-#include "libavutil/internal.h"
#include "libavutil/mem.h"
#include "libavutil/x86/cpu.h"
#include "libavfilter/yadif.h"
@@ -35,16 +34,63 @@ void ff_yadif_filter_line_ssse3(void *dst, void *prev, void *cur,
void *next, int w, int prefs,
int mrefs, int parity, int mode);
+void ff_yadif_filter_line_16bit_mmxext(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+void ff_yadif_filter_line_16bit_sse2(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+void ff_yadif_filter_line_16bit_ssse3(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+void ff_yadif_filter_line_16bit_sse4(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+
+void ff_yadif_filter_line_10bit_mmxext(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+void ff_yadif_filter_line_10bit_sse2(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+void ff_yadif_filter_line_10bit_ssse3(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+
av_cold void ff_yadif_init_x86(YADIFContext *yadif)
{
int cpu_flags = av_get_cpu_flags();
+ int bit_depth = (!yadif->csp) ? 8
+ : yadif->csp->comp[0].depth;
+ if (bit_depth >= 15) {
+#if ARCH_X86_32
+ if (EXTERNAL_MMXEXT(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_16bit_mmxext;
+#endif /* ARCH_X86_32 */
+ if (EXTERNAL_SSE2(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_16bit_sse2;
+ if (EXTERNAL_SSSE3(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_16bit_ssse3;
+ if (EXTERNAL_SSE4(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_16bit_sse4;
+ } else if ( bit_depth >= 9 && bit_depth <= 14) {
+#if ARCH_X86_32
+ if (EXTERNAL_MMXEXT(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_10bit_mmxext;
+#endif /* ARCH_X86_32 */
+ if (EXTERNAL_SSE2(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_10bit_sse2;
+ if (EXTERNAL_SSSE3(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_10bit_ssse3;
+ } else {
#if ARCH_X86_32
- if (EXTERNAL_MMXEXT(cpu_flags))
- yadif->filter_line = ff_yadif_filter_line_mmxext;
+ if (EXTERNAL_MMXEXT(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_mmxext;
#endif /* ARCH_X86_32 */
- if (EXTERNAL_SSE2(cpu_flags))
- yadif->filter_line = ff_yadif_filter_line_sse2;
- if (EXTERNAL_SSSE3(cpu_flags))
- yadif->filter_line = ff_yadif_filter_line_ssse3;
+ if (EXTERNAL_SSE2(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_sse2;
+ if (EXTERNAL_SSSE3(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_ssse3;
+ }
}
diff --git a/libavfilter/x86/yadif-10.asm b/libavfilter/x86/yadif-10.asm
new file mode 100644
index 0000000000..8853e0d2c7
--- /dev/null
+++ b/libavfilter/x86/yadif-10.asm
@@ -0,0 +1,255 @@
+;*****************************************************************************
+;* x86-optimized functions for yadif filter
+;*
+;* Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
+;* Copyright (c) 2013 Daniel Kang <daniel.d.kang@gmail.com>
+;* Copyright (c) 2011-2013 James Darnley <james.darnley@gmail.com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+pw_1: times 8 dw 1
+
+SECTION .text
+
+%macro PMAXUW 2
+%if cpuflag(sse4)
+ pmaxuw %1, %2
+%else
+ psubusw %1, %2
+ paddusw %1, %2
+%endif
+%endmacro
+
+%macro CHECK 2
+ movu m2, [curq+t1+%1*2]
+ movu m3, [curq+t0+%2*2]
+ mova m4, m2
+ mova m5, m2
+ pxor m4, m3
+ pavgw m5, m3
+ pand m4, [pw_1]
+ psubusw m5, m4
+ RSHIFT m5, 2
+ mova m4, m2
+ psubusw m2, m3
+ psubusw m3, m4
+ PMAXUW m2, m3
+ mova m3, m2
+ mova m4, m2
+ RSHIFT m3, 2
+ RSHIFT m4, 4
+ paddw m2, m3
+ paddw m2, m4
+%endmacro
+
+%macro CHECK1 0
+ mova m3, m0
+ pcmpgtw m3, m2
+ pminsw m0, m2
+ mova m6, m3
+ pand m5, m3
+ pandn m3, m1
+ por m3, m5
+ mova m1, m3
+%endmacro
+
+; %macro CHECK2 0
+; paddw m6, [pw_1]
+; psllw m6, 14
+; paddsw m2, m6
+; mova m3, m0
+; pcmpgtw m3, m2
+; pminsw m0, m2
+; pand m5, m3
+; pandn m3, m1
+; por m3, m5
+; mova m1, m3
+; %endmacro
+
+; This version of CHECK2 is required for 14-bit samples. The left-shift trick
+; in the old code is not large enough to correctly select pixels or scores.
+
+%macro CHECK2 0
+ mova m3, m0
+ pcmpgtw m0, m2
+ pand m0, m6
+ mova m6, m0
+ pand m5, m6
+ pand m2, m0
+ pandn m6, m1
+ pandn m0, m3
+ por m6, m5
+ por m0, m2
+ mova m1, m6
+%endmacro
+
+%macro LOAD 2
+ movu %1, %2
+%endmacro
+
+%macro FILTER 3
+.loop%1:
+ pxor m7, m7
+ LOAD m0, [curq+t1]
+ LOAD m1, [curq+t0]
+ LOAD m2, [%2]
+ LOAD m3, [%3]
+ mova m4, m3
+ paddw m3, m2
+ psraw m3, 1
+ mova [rsp+ 0], m0
+ mova [rsp+16], m3
+ mova [rsp+32], m1
+ psubw m2, m4
+ ABS1 m2, m4
+ LOAD m3, [prevq+t1]
+ LOAD m4, [prevq+t0]
+ psubw m3, m0
+ psubw m4, m1
+ ABS2 m3, m4, m5, m6
+ paddw m3, m4
+ psrlw m2, 1
+ psrlw m3, 1
+ pmaxsw m2, m3
+ LOAD m3, [nextq+t1]
+ LOAD m4, [nextq+t0]
+ psubw m3, m0
+ psubw m4, m1
+ ABS2 m3, m4, m5, m6
+ paddw m3, m4
+ psrlw m3, 1
+ pmaxsw m2, m3
+ mova [rsp+48], m2
+
+ paddw m1, m0
+ paddw m0, m0
+ psubw m0, m1
+ psrlw m1, 1
+ ABS1 m0, m2
+
+ movu m2, [curq+t1-1*2]
+ movu m3, [curq+t0-1*2]
+ mova m4, m2
+ psubusw m2, m3
+ psubusw m3, m4
+ PMAXUW m2, m3
+ mova m3, m2
+ RSHIFT m3, 4
+ paddw m0, m2
+ paddw m0, m3
+ psubw m0, [pw_1]
+
+ CHECK -2, 0
+ CHECK1
+ CHECK -3, 1
+ CHECK2
+ CHECK 0, -2
+ CHECK1
+ CHECK 1, -3
+ CHECK2
+
+ mova m6, [rsp+48]
+ cmp DWORD r8m, 2
+ jge .end%1
+ LOAD m2, [%2+t1*2]
+ LOAD m4, [%3+t1*2]
+ LOAD m3, [%2+t0*2]
+ LOAD m5, [%3+t0*2]
+ paddw m2, m4
+ paddw m3, m5
+ psrlw m2, 1
+ psrlw m3, 1
+ mova m4, [rsp+ 0]
+ mova m5, [rsp+16]
+ mova m7, [rsp+32]
+ psubw m2, m4
+ psubw m3, m7
+ mova m0, m5
+ psubw m5, m4
+ psubw m0, m7
+ mova m4, m2
+ pminsw m2, m3
+ pmaxsw m3, m4
+ pmaxsw m2, m5
+ pminsw m3, m5
+ pmaxsw m2, m0
+ pminsw m3, m0
+ pxor m4, m4
+ pmaxsw m6, m3
+ psubw m4, m2
+ pmaxsw m6, m4
+
+.end%1:
+ mova m2, [rsp+16]
+ mova m3, m2
+ psubw m2, m6
+ paddw m3, m6
+ pmaxsw m1, m2
+ pminsw m1, m3
+
+ movu [dstq], m1
+ add dstq, mmsize-4
+ add prevq, mmsize-4
+ add curq, mmsize-4
+ add nextq, mmsize-4
+ sub DWORD r4m, mmsize/2-2
+ jg .loop%1
+%endmacro
+
+%macro YADIF 0
+%if ARCH_X86_32
+cglobal yadif_filter_line_10bit, 4, 6, 8, 80, dst, prev, cur, next, w, \
+ prefs, mrefs, parity, mode
+%else
+cglobal yadif_filter_line_10bit, 4, 7, 8, 80, dst, prev, cur, next, w, \
+ prefs, mrefs, parity, mode
+%endif
+%if ARCH_X86_32
+ mov r4, r5mp
+ mov r5, r6mp
+ DECLARE_REG_TMP 4,5
+%else
+ movsxd r5, DWORD r5m
+ movsxd r6, DWORD r6m
+ DECLARE_REG_TMP 5,6
+%endif
+
+ cmp DWORD paritym, 0
+ je .parity0
+ FILTER 1, prevq, curq
+ jmp .ret
+
+.parity0:
+ FILTER 0, curq, nextq
+
+.ret:
+ RET
+%endmacro
+
+INIT_XMM ssse3
+YADIF
+INIT_XMM sse2
+YADIF
+%if ARCH_X86_32
+INIT_MMX mmxext
+YADIF
+%endif
diff --git a/libavfilter/x86/yadif-16.asm b/libavfilter/x86/yadif-16.asm
new file mode 100644
index 0000000000..79d127dfaa
--- /dev/null
+++ b/libavfilter/x86/yadif-16.asm
@@ -0,0 +1,317 @@
+;*****************************************************************************
+;* x86-optimized functions for yadif filter
+;*
+;* Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
+;* Copyright (c) 2013 Daniel Kang <daniel.d.kang@gmail.com>
+;* Copyright (c) 2011-2013 James Darnley <james.darnley@gmail.com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+pw_1: times 8 dw 1
+pw_8000: times 8 dw 0x8000
+pd_1: times 4 dd 1
+pd_8000: times 4 dd 0x8000
+
+SECTION .text
+
+%macro PABS 2
+%if cpuflag(ssse3)
+ pabsd %1, %1
+%else
+ pxor %2, %2
+ pcmpgtd %2, %1
+ pxor %1, %2
+ psubd %1, %2
+%endif
+%endmacro
+
+%macro PACK 1
+%if cpuflag(sse4)
+ packusdw %1, %1
+%else
+ psubd %1, [pd_8000]
+ packssdw %1, %1
+ paddw %1, [pw_8000]
+%endif
+%endmacro
+
+%macro PMINSD 3
+%if cpuflag(sse4)
+ pminsd %1, %2
+%else
+ mova %3, %2
+ pcmpgtd %3, %1
+ pand %1, %3
+ pandn %3, %2
+ por %1, %3
+%endif
+%endmacro
+
+%macro PMAXSD 3
+%if cpuflag(sse4)
+ pmaxsd %1, %2
+%else
+ mova %3, %1
+ pcmpgtd %3, %2
+ pand %1, %3
+ pandn %3, %2
+ por %1, %3
+%endif
+%endmacro
+
+%macro PMAXUW 2
+%if cpuflag(sse4)
+ pmaxuw %1, %2
+%else
+ psubusw %1, %2
+ paddusw %1, %2
+%endif
+%endmacro
+
+%macro CHECK 2
+ movu m2, [curq+t1+%1*2]
+ movu m3, [curq+t0+%2*2]
+ mova m4, m2
+ mova m5, m2
+ pxor m4, m3
+ pavgw m5, m3
+ pand m4, [pw_1]
+ psubusw m5, m4
+ RSHIFT m5, 2
+ punpcklwd m5, m7
+ mova m4, m2
+ psubusw m2, m3
+ psubusw m3, m4
+ PMAXUW m2, m3
+ mova m3, m2
+ mova m4, m2
+ RSHIFT m3, 2
+ RSHIFT m4, 4
+ punpcklwd m2, m7
+ punpcklwd m3, m7
+ punpcklwd m4, m7
+ paddd m2, m3
+ paddd m2, m4
+%endmacro
+
+%macro CHECK1 0
+ mova m3, m0
+ pcmpgtd m3, m2
+ PMINSD m0, m2, m6
+ mova m6, m3
+ pand m5, m3
+ pandn m3, m1
+ por m3, m5
+ mova m1, m3
+%endmacro
+
+%macro CHECK2 0
+ paddd m6, [pd_1]
+ pslld m6, 30
+ paddd m2, m6
+ mova m3, m0
+ pcmpgtd m3, m2
+ PMINSD m0, m2, m4
+ pand m5, m3
+ pandn m3, m1
+ por m3, m5
+ mova m1, m3
+%endmacro
+
+; This version of CHECK2 has 3 fewer instructions on sets older than SSE4 but I
+; am not sure whether it is any faster. A rewrite or refactor of the filter
+; code should make it possible to eliminate the move instruction at the end. It
+; exists to satisfy the expectation that the "score" values are in m1.
+
+; %macro CHECK2 0
+; mova m3, m0
+; pcmpgtd m0, m2
+; pand m0, m6
+; mova m6, m0
+; pand m5, m6
+; pand m2, m0
+; pandn m6, m1
+; pandn m0, m3
+; por m6, m5
+; por m0, m2
+; mova m1, m6
+; %endmacro
+
+%macro LOAD 2
+ movh %1, %2
+ punpcklwd %1, m7
+%endmacro
+
+%macro FILTER 3
+.loop%1:
+ pxor m7, m7
+ LOAD m0, [curq+t1]
+ LOAD m1, [curq+t0]
+ LOAD m2, [%2]
+ LOAD m3, [%3]
+ mova m4, m3
+ paddd m3, m2
+ psrad m3, 1
+ mova [rsp+ 0], m0
+ mova [rsp+16], m3
+ mova [rsp+32], m1
+ psubd m2, m4
+ PABS m2, m4
+ LOAD m3, [prevq+t1]
+ LOAD m4, [prevq+t0]
+ psubd m3, m0
+ psubd m4, m1
+ PABS m3, m5
+ PABS m4, m5
+ paddd m3, m4
+ psrld m2, 1
+ psrld m3, 1
+ PMAXSD m2, m3, m6
+ LOAD m3, [nextq+t1]
+ LOAD m4, [nextq+t0]
+ psubd m3, m0
+ psubd m4, m1
+ PABS m3, m5
+ PABS m4, m5
+ paddd m3, m4
+ psrld m3, 1
+ PMAXSD m2, m3, m6
+ mova [rsp+48], m2
+
+ paddd m1, m0
+ paddd m0, m0
+ psubd m0, m1
+ psrld m1, 1
+ PABS m0, m2
+
+ movu m2, [curq+t1-1*2]
+ movu m3, [curq+t0-1*2]
+ mova m4, m2
+ psubusw m2, m3
+ psubusw m3, m4
+ PMAXUW m2, m3
+ mova m3, m2
+ RSHIFT m3, 4
+ punpcklwd m2, m7
+ punpcklwd m3, m7
+ paddd m0, m2
+ paddd m0, m3
+ psubd m0, [pd_1]
+
+ CHECK -2, 0
+ CHECK1
+ CHECK -3, 1
+ CHECK2
+ CHECK 0, -2
+ CHECK1
+ CHECK 1, -3
+ CHECK2
+
+ mova m6, [rsp+48]
+ cmp DWORD r8m, 2
+ jge .end%1
+ LOAD m2, [%2+t1*2]
+ LOAD m4, [%3+t1*2]
+ LOAD m3, [%2+t0*2]
+ LOAD m5, [%3+t0*2]
+ paddd m2, m4
+ paddd m3, m5
+ psrld m2, 1
+ psrld m3, 1
+ mova m4, [rsp+ 0]
+ mova m5, [rsp+16]
+ mova m7, [rsp+32]
+ psubd m2, m4
+ psubd m3, m7
+ mova m0, m5
+ psubd m5, m4
+ psubd m0, m7
+ mova m4, m2
+ PMINSD m2, m3, m7
+ PMAXSD m3, m4, m7
+ PMAXSD m2, m5, m7
+ PMINSD m3, m5, m7
+ PMAXSD m2, m0, m7
+ PMINSD m3, m0, m7
+ pxor m4, m4
+ PMAXSD m6, m3, m7
+ psubd m4, m2
+ PMAXSD m6, m4, m7
+
+.end%1:
+ mova m2, [rsp+16]
+ mova m3, m2
+ psubd m2, m6
+ paddd m3, m6
+ PMAXSD m1, m2, m7
+ PMINSD m1, m3, m7
+ PACK m1
+
+ movh [dstq], m1
+ add dstq, mmsize/2
+ add prevq, mmsize/2
+ add curq, mmsize/2
+ add nextq, mmsize/2
+ sub DWORD r4m, mmsize/4
+ jg .loop%1
+%endmacro
+
+%macro YADIF 0
+%if ARCH_X86_32
+cglobal yadif_filter_line_16bit, 4, 6, 8, 80, dst, prev, cur, next, w, \
+ prefs, mrefs, parity, mode
+%else
+cglobal yadif_filter_line_16bit, 4, 7, 8, 80, dst, prev, cur, next, w, \
+ prefs, mrefs, parity, mode
+%endif
+%if ARCH_X86_32
+ mov r4, r5mp
+ mov r5, r6mp
+ DECLARE_REG_TMP 4,5
+%else
+ movsxd r5, DWORD r5m
+ movsxd r6, DWORD r6m
+ DECLARE_REG_TMP 5,6
+%endif
+
+ cmp DWORD paritym, 0
+ je .parity0
+ FILTER 1, prevq, curq
+ jmp .ret
+
+.parity0:
+ FILTER 0, curq, nextq
+
+.ret:
+ RET
+%endmacro
+
+INIT_XMM sse4
+YADIF
+INIT_XMM ssse3
+YADIF
+INIT_XMM sse2
+YADIF
+%if ARCH_X86_32
+INIT_MMX mmxext
+YADIF
+%endif
diff --git a/libavfilter/yadif.h b/libavfilter/yadif.h
index 75e35c4bcf..d23d1380d0 100644
--- a/libavfilter/yadif.h
+++ b/libavfilter/yadif.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -22,31 +22,33 @@
#include "libavutil/pixdesc.h"
#include "avfilter.h"
+enum YADIFMode {
+ YADIF_MODE_SEND_FRAME = 0, ///< send 1 frame for each frame
+ YADIF_MODE_SEND_FIELD = 1, ///< send 1 frame for each field
+ YADIF_MODE_SEND_FRAME_NOSPATIAL = 2, ///< send 1 frame for each frame but skips spatial interlacing check
+ YADIF_MODE_SEND_FIELD_NOSPATIAL = 3, ///< send 1 frame for each field but skips spatial interlacing check
+};
+
+enum YADIFParity {
+ YADIF_PARITY_TFF = 0, ///< top field first
+ YADIF_PARITY_BFF = 1, ///< bottom field first
+ YADIF_PARITY_AUTO = -1, ///< auto detection
+};
+
+enum YADIFDeint {
+ YADIF_DEINT_ALL = 0, ///< deinterlace all frames
+ YADIF_DEINT_INTERLACED = 1, ///< only deinterlace frames marked as interlaced
+};
+
typedef struct YADIFContext {
const AVClass *class;
- /**
- * 0: send 1 frame for each frame
- * 1: send 1 frame for each field
- * 2: like 0 but skips spatial interlacing check
- * 3: like 1 but skips spatial interlacing check
- */
- int mode;
- /**
- * 0: top field first
- * 1: bottom field first
- * -1: auto-detection
- */
- int parity;
+ int mode; ///< YADIFMode
+ int parity; ///< YADIFParity
+ int deint; ///< YADIFDeint
int frame_pending;
- /**
- * 0: deinterlace all frames
- * 1: only deinterlace frames marked as interlaced
- */
- int auto_enable;
-
AVFrame *cur;
AVFrame *next;
AVFrame *prev;
@@ -63,6 +65,8 @@ typedef struct YADIFContext {
const AVPixFmtDescriptor *csp;
int eof;
+ uint8_t *temp_line;
+ int temp_line_size;
} YADIFContext;
void ff_yadif_init_x86(YADIFContext *yadif);