summaryrefslogtreecommitdiff
path: root/libavdevice
diff options
context:
space:
mode:
Diffstat (limited to 'libavdevice')
-rw-r--r--libavdevice/Makefile47
-rw-r--r--libavdevice/alldevices.c27
-rw-r--r--libavdevice/alsa.c143
-rw-r--r--libavdevice/alsa.h28
-rw-r--r--libavdevice/alsa_dec.c72
-rw-r--r--libavdevice/alsa_enc.c71
-rw-r--r--libavdevice/avdevice.c247
-rw-r--r--libavdevice/avdevice.h450
-rw-r--r--libavdevice/avdeviceres.rc55
-rw-r--r--libavdevice/avfoundation.m1049
-rw-r--r--libavdevice/avfoundation_dec.m686
-rw-r--r--libavdevice/bktr.c35
-rw-r--r--libavdevice/caca.c241
-rw-r--r--libavdevice/decklink_common.cpp241
-rw-r--r--libavdevice/decklink_common.h109
-rw-r--r--libavdevice/decklink_common_c.h39
-rw-r--r--libavdevice/decklink_dec.cpp646
-rw-r--r--libavdevice/decklink_dec.h37
-rw-r--r--libavdevice/decklink_dec_c.c59
-rw-r--r--libavdevice/decklink_enc.cpp426
-rw-r--r--libavdevice/decklink_enc.h37
-rw-r--r--libavdevice/decklink_enc_c.c57
-rw-r--r--libavdevice/dshow.c1310
-rw-r--r--libavdevice/dshow_capture.h352
-rw-r--r--libavdevice/dshow_common.c190
-rw-r--r--libavdevice/dshow_crossbar.c208
-rw-r--r--libavdevice/dshow_enummediatypes.c105
-rw-r--r--libavdevice/dshow_enumpins.c105
-rw-r--r--libavdevice/dshow_filter.c202
-rw-r--r--libavdevice/dshow_pin.c384
-rw-r--r--libavdevice/dv1394.c15
-rw-r--r--libavdevice/dv1394.h8
-rw-r--r--libavdevice/fbdev_common.c134
-rw-r--r--libavdevice/fbdev_common.h38
-rw-r--r--libavdevice/fbdev_dec.c (renamed from libavdevice/fbdev.c)114
-rw-r--r--libavdevice/fbdev_enc.c220
-rw-r--r--libavdevice/gdigrab.c649
-rw-r--r--libavdevice/iec61883.c500
-rw-r--r--libavdevice/internal.h28
-rw-r--r--libavdevice/jack.c43
-rw-r--r--libavdevice/lavfi.c517
-rw-r--r--libavdevice/libavdevice.v2
-rw-r--r--libavdevice/libcdio.c23
-rw-r--r--libavdevice/libdc1394.c23
-rw-r--r--libavdevice/openal-dec.c254
-rw-r--r--libavdevice/opengl_enc.c1307
-rw-r--r--libavdevice/opengl_enc_shaders.h188
-rw-r--r--libavdevice/oss.c31
-rw-r--r--libavdevice/oss.h8
-rw-r--r--libavdevice/oss_dec.c13
-rw-r--r--libavdevice/oss_enc.c20
-rw-r--r--libavdevice/pulse.c197
-rw-r--r--libavdevice/pulse_audio_common.c249
-rw-r--r--libavdevice/pulse_audio_common.h40
-rw-r--r--libavdevice/pulse_audio_dec.c376
-rw-r--r--libavdevice/pulse_audio_enc.c796
-rw-r--r--libavdevice/qtkit.m362
-rw-r--r--libavdevice/sdl.c377
-rw-r--r--libavdevice/sndio.c10
-rw-r--r--libavdevice/sndio.h10
-rw-r--r--libavdevice/sndio_dec.c9
-rw-r--r--libavdevice/sndio_enc.c18
-rw-r--r--libavdevice/timefilter-test.c24
-rw-r--r--libavdevice/timefilter.c36
-rw-r--r--libavdevice/timefilter.h27
-rw-r--r--libavdevice/utils.c59
-rw-r--r--libavdevice/v4l.c364
-rw-r--r--libavdevice/v4l2-common.c105
-rw-r--r--libavdevice/v4l2-common.h62
-rw-r--r--libavdevice/v4l2.c1039
-rw-r--r--libavdevice/v4l2enc.c119
-rw-r--r--libavdevice/version.h14
-rw-r--r--libavdevice/vfwcap.c51
-rw-r--r--libavdevice/x11grab.c164
-rw-r--r--libavdevice/xcbgrab.c49
-rw-r--r--libavdevice/xv.c386
76 files changed, 14975 insertions, 1761 deletions
diff --git a/libavdevice/Makefile b/libavdevice/Makefile
index b3b53da8f2..585827be82 100644
--- a/libavdevice/Makefile
+++ b/libavdevice/Makefile
@@ -1,3 +1,5 @@
+include $(SUBDIR)../config.mak
+
NAME = avdevice
HEADERS = avdevice.h \
@@ -5,31 +7,66 @@ HEADERS = avdevice.h \
OBJS = alldevices.o \
avdevice.o \
+ utils.o \
OBJS-$(HAVE_LIBC_MSVCRT) += file_open.o
# input/output devices
-OBJS-$(CONFIG_ALSA_INDEV) += alsa_dec.o alsa.o
+OBJS-$(CONFIG_ALSA_INDEV) += alsa_dec.o alsa.o timefilter.o
OBJS-$(CONFIG_ALSA_OUTDEV) += alsa_enc.o alsa.o
-OBJS-$(CONFIG_AVFOUNDATION_INDEV) += avfoundation_dec.o
+OBJS-$(CONFIG_AVFOUNDATION_INDEV) += avfoundation.o
OBJS-$(CONFIG_BKTR_INDEV) += bktr.o
+OBJS-$(CONFIG_CACA_OUTDEV) += caca.o
+OBJS-$(CONFIG_DECKLINK_OUTDEV) += decklink_enc.o decklink_enc_c.o decklink_common.o
+OBJS-$(CONFIG_DECKLINK_INDEV) += decklink_dec.o decklink_dec_c.o decklink_common.o
+OBJS-$(CONFIG_DSHOW_INDEV) += dshow_crossbar.o dshow.o dshow_enummediatypes.o \
+ dshow_enumpins.o dshow_filter.o \
+ dshow_pin.o dshow_common.o
OBJS-$(CONFIG_DV1394_INDEV) += dv1394.o
-OBJS-$(CONFIG_FBDEV_INDEV) += fbdev.o
+OBJS-$(CONFIG_FBDEV_INDEV) += fbdev_dec.o \
+ fbdev_common.o
+OBJS-$(CONFIG_FBDEV_OUTDEV) += fbdev_enc.o \
+ fbdev_common.o
+OBJS-$(CONFIG_GDIGRAB_INDEV) += gdigrab.o
+OBJS-$(CONFIG_IEC61883_INDEV) += iec61883.o
OBJS-$(CONFIG_JACK_INDEV) += jack.o timefilter.o
+OBJS-$(CONFIG_LAVFI_INDEV) += lavfi.o
+OBJS-$(CONFIG_OPENAL_INDEV) += openal-dec.o
+OBJS-$(CONFIG_OPENGL_OUTDEV) += opengl_enc.o
OBJS-$(CONFIG_OSS_INDEV) += oss_dec.o oss.o
OBJS-$(CONFIG_OSS_OUTDEV) += oss_enc.o oss.o
-OBJS-$(CONFIG_PULSE_INDEV) += pulse.o
+OBJS-$(CONFIG_PULSE_INDEV) += pulse_audio_dec.o \
+ pulse_audio_common.o timefilter.o
+OBJS-$(CONFIG_PULSE_OUTDEV) += pulse_audio_enc.o \
+ pulse_audio_common.o
+OBJS-$(CONFIG_QTKIT_INDEV) += qtkit.o
+OBJS-$(CONFIG_SDL_OUTDEV) += sdl.o
OBJS-$(CONFIG_SNDIO_INDEV) += sndio_dec.o sndio.o
OBJS-$(CONFIG_SNDIO_OUTDEV) += sndio_enc.o sndio.o
-OBJS-$(CONFIG_V4L2_INDEV) += v4l2.o
+OBJS-$(CONFIG_V4L2_INDEV) += v4l2.o v4l2-common.o timefilter.o
+OBJS-$(CONFIG_V4L2_OUTDEV) += v4l2enc.o v4l2-common.o
+OBJS-$(CONFIG_V4L_INDEV) += v4l.o
OBJS-$(CONFIG_VFWCAP_INDEV) += vfwcap.o
OBJS-$(CONFIG_X11GRAB_INDEV) += x11grab.o
OBJS-$(CONFIG_X11GRAB_XCB_INDEV) += xcbgrab.o
+OBJS-$(CONFIG_XV_OUTDEV) += xv.o
# external libraries
OBJS-$(CONFIG_LIBCDIO_INDEV) += libcdio.o
OBJS-$(CONFIG_LIBDC1394_INDEV) += libdc1394.o
+# Windows resource file
+SLIBOBJS-$(HAVE_GNU_WINDRES) += avdeviceres.o
+
+SKIPHEADERS += decklink_common.h
+SKIPHEADERS-$(CONFIG_DECKLINK) += decklink_enc.h decklink_dec.h \
+ decklink_common_c.h
+SKIPHEADERS-$(CONFIG_DSHOW_INDEV) += dshow_capture.h
+SKIPHEADERS-$(CONFIG_FBDEV_INDEV) += fbdev_common.h
+SKIPHEADERS-$(CONFIG_FBDEV_OUTDEV) += fbdev_common.h
+SKIPHEADERS-$(CONFIG_LIBPULSE) += pulse_audio_common.h
+SKIPHEADERS-$(CONFIG_V4L2_INDEV) += v4l2-common.h
+SKIPHEADERS-$(CONFIG_V4L2_OUTDEV) += v4l2-common.h
SKIPHEADERS-$(HAVE_ALSA_ASOUNDLIB_H) += alsa.h
SKIPHEADERS-$(HAVE_SNDIO_H) += sndio.h
diff --git a/libavdevice/alldevices.c b/libavdevice/alldevices.c
index 8439b5b6e3..26aecf29d9 100644
--- a/libavdevice/alldevices.c
+++ b/libavdevice/alldevices.c
@@ -1,25 +1,24 @@
/*
* Register all the grabbing devices.
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
-#include "libavformat/avformat.h"
#include "avdevice.h"
#define REGISTER_OUTDEV(X, x) \
@@ -50,16 +49,28 @@ void avdevice_register_all(void)
REGISTER_INOUTDEV(ALSA, alsa);
REGISTER_INDEV (AVFOUNDATION, avfoundation);
REGISTER_INDEV (BKTR, bktr);
+ REGISTER_OUTDEV (CACA, caca);
+ REGISTER_INOUTDEV(DECKLINK, decklink);
+ REGISTER_INDEV (DSHOW, dshow);
REGISTER_INDEV (DV1394, dv1394);
- REGISTER_INDEV (FBDEV, fbdev);
+ REGISTER_INOUTDEV(FBDEV, fbdev);
+ REGISTER_INDEV (GDIGRAB, gdigrab);
+ REGISTER_INDEV (IEC61883, iec61883);
REGISTER_INDEV (JACK, jack);
+ REGISTER_INDEV (LAVFI, lavfi);
+ REGISTER_INDEV (OPENAL, openal);
+ REGISTER_OUTDEV (OPENGL, opengl);
REGISTER_INOUTDEV(OSS, oss);
- REGISTER_INDEV (PULSE, pulse);
+ REGISTER_INOUTDEV(PULSE, pulse);
+ REGISTER_INDEV (QTKIT, qtkit);
+ REGISTER_OUTDEV (SDL, sdl);
REGISTER_INOUTDEV(SNDIO, sndio);
- REGISTER_INDEV (V4L2, v4l2);
+ REGISTER_INOUTDEV(V4L2, v4l2);
+// REGISTER_INDEV (V4L, v4l
REGISTER_INDEV (VFWCAP, vfwcap);
REGISTER_INDEV (X11GRAB, x11grab);
REGISTER_INDEV (X11GRAB_XCB, x11grab_xcb);
+ REGISTER_OUTDEV (XV, xv);
/* external libraries */
REGISTER_INDEV (LIBCDIO, libcdio);
diff --git a/libavdevice/alsa.c b/libavdevice/alsa.c
index d394e4377d..8d27913a1a 100644
--- a/libavdevice/alsa.c
+++ b/libavdevice/alsa.c
@@ -3,20 +3,20 @@
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -29,7 +29,7 @@
*/
#include <alsa/asoundlib.h>
-#include "libavformat/avformat.h"
+#include "avdevice.h"
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
@@ -62,48 +62,45 @@ static av_cold snd_pcm_format_t codec_id_to_pcm_format(int codec_id)
}
}
-#define REORDER_OUT_50(NAME, TYPE) \
-static void alsa_reorder_ ## NAME ## _out_50(const void *in_v, void *out_v, int n) \
-{ \
- const TYPE *in = in_v; \
- TYPE *out = out_v; \
-\
- while (n-- > 0) { \
+#define MAKE_REORDER_FUNC(NAME, TYPE, CHANNELS, LAYOUT, MAP) \
+static void alsa_reorder_ ## NAME ## _ ## LAYOUT(const void *in_v, \
+ void *out_v, \
+ int n) \
+{ \
+ const TYPE *in = in_v; \
+ TYPE *out = out_v; \
+ \
+ while (n-- > 0) { \
+ MAP \
+ in += CHANNELS; \
+ out += CHANNELS; \
+ } \
+}
+
+#define MAKE_REORDER_FUNCS(CHANNELS, LAYOUT, MAP) \
+ MAKE_REORDER_FUNC(int8, int8_t, CHANNELS, LAYOUT, MAP) \
+ MAKE_REORDER_FUNC(int16, int16_t, CHANNELS, LAYOUT, MAP) \
+ MAKE_REORDER_FUNC(int32, int32_t, CHANNELS, LAYOUT, MAP) \
+ MAKE_REORDER_FUNC(f32, float, CHANNELS, LAYOUT, MAP)
+
+MAKE_REORDER_FUNCS(5, out_50, \
out[0] = in[0]; \
out[1] = in[1]; \
out[2] = in[3]; \
out[3] = in[4]; \
out[4] = in[2]; \
- in += 5; \
- out += 5; \
- } \
-}
+ )
-#define REORDER_OUT_51(NAME, TYPE) \
-static void alsa_reorder_ ## NAME ## _out_51(const void *in_v, void *out_v, int n) \
-{ \
- const TYPE *in = in_v; \
- TYPE *out = out_v; \
-\
- while (n-- > 0) { \
+MAKE_REORDER_FUNCS(6, out_51, \
out[0] = in[0]; \
out[1] = in[1]; \
out[2] = in[4]; \
out[3] = in[5]; \
out[4] = in[2]; \
out[5] = in[3]; \
- in += 6; \
- out += 6; \
- } \
-}
+ )
-#define REORDER_OUT_71(NAME, TYPE) \
-static void alsa_reorder_ ## NAME ## _out_71(const void *in_v, void *out_v, int n) \
-{ \
- const TYPE *in = in_v; \
- TYPE *out = out_v; \
-\
- while (n-- > 0) { \
+MAKE_REORDER_FUNCS(8, out_71, \
out[0] = in[0]; \
out[1] = in[1]; \
out[2] = in[4]; \
@@ -112,23 +109,7 @@ static void alsa_reorder_ ## NAME ## _out_71(const void *in_v, void *out_v, int
out[5] = in[3]; \
out[6] = in[6]; \
out[7] = in[7]; \
- in += 8; \
- out += 8; \
- } \
-}
-
-REORDER_OUT_50(int8, int8_t)
-REORDER_OUT_51(int8, int8_t)
-REORDER_OUT_71(int8, int8_t)
-REORDER_OUT_50(int16, int16_t)
-REORDER_OUT_51(int16, int16_t)
-REORDER_OUT_71(int16, int16_t)
-REORDER_OUT_50(int32, int32_t)
-REORDER_OUT_51(int32, int32_t)
-REORDER_OUT_71(int32, int32_t)
-REORDER_OUT_50(f32, float)
-REORDER_OUT_51(f32, float)
-REORDER_OUT_71(f32, float)
+ )
#define FORMAT_I8 0
#define FORMAT_I16 1
@@ -299,7 +280,7 @@ av_cold int ff_alsa_open(AVFormatContext *ctx, snd_pcm_stream_t mode,
}
if (s->reorder_func) {
s->reorder_buf_size = buffer_size;
- s->reorder_buf = av_malloc(s->reorder_buf_size * s->frame_size);
+ s->reorder_buf = av_malloc_array(s->reorder_buf_size, s->frame_size);
if (!s->reorder_buf)
goto fail1;
}
@@ -320,6 +301,8 @@ av_cold int ff_alsa_close(AVFormatContext *s1)
AlsaData *s = s1->priv_data;
av_freep(&s->reorder_buf);
+ if (CONFIG_ALSA_INDEV)
+ ff_timefilter_destroy(s->timefilter);
snd_pcm_close(s->h);
return 0;
}
@@ -353,10 +336,64 @@ int ff_alsa_extend_reorder_buf(AlsaData *s, int min_size)
av_assert0(size != 0);
while (size < min_size)
size *= 2;
- r = av_realloc(s->reorder_buf, size * s->frame_size);
+ r = av_realloc_array(s->reorder_buf, size, s->frame_size);
if (!r)
return AVERROR(ENOMEM);
s->reorder_buf = r;
s->reorder_buf_size = size;
return 0;
}
+
+/* ported from alsa-utils/aplay.c */
+int ff_alsa_get_device_list(AVDeviceInfoList *device_list, snd_pcm_stream_t stream_type)
+{
+ int ret = 0;
+ void **hints, **n;
+ char *name = NULL, *descr = NULL, *io = NULL, *tmp;
+ AVDeviceInfo *new_device = NULL;
+ const char *filter = stream_type == SND_PCM_STREAM_PLAYBACK ? "Output" : "Input";
+
+ if (snd_device_name_hint(-1, "pcm", &hints) < 0)
+ return AVERROR_EXTERNAL;
+ n = hints;
+ while (*n && !ret) {
+ name = snd_device_name_get_hint(*n, "NAME");
+ descr = snd_device_name_get_hint(*n, "DESC");
+ io = snd_device_name_get_hint(*n, "IOID");
+ if (!io || !strcmp(io, filter)) {
+ new_device = av_mallocz(sizeof(AVDeviceInfo));
+ if (!new_device) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ new_device->device_name = av_strdup(name);
+ if ((tmp = strrchr(descr, '\n')) && tmp[1])
+ new_device->device_description = av_strdup(&tmp[1]);
+ else
+ new_device->device_description = av_strdup(descr);
+ if (!new_device->device_description || !new_device->device_name) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ if ((ret = av_dynarray_add_nofree(&device_list->devices,
+ &device_list->nb_devices, new_device)) < 0) {
+ goto fail;
+ }
+ if (!strcmp(new_device->device_name, "default"))
+ device_list->default_device = device_list->nb_devices - 1;
+ new_device = NULL;
+ }
+ fail:
+ free(io);
+ free(name);
+ free(descr);
+ n++;
+ }
+ if (new_device) {
+ av_free(new_device->device_description);
+ av_free(new_device->device_name);
+ av_free(new_device);
+ }
+ snd_device_name_free_hint(hints);
+ return ret;
+}
diff --git a/libavdevice/alsa.h b/libavdevice/alsa.h
index 590b354053..cd41d965f7 100644
--- a/libavdevice/alsa.h
+++ b/libavdevice/alsa.h
@@ -3,20 +3,20 @@
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -32,26 +32,32 @@
#include <alsa/asoundlib.h>
#include "config.h"
-#include "libavformat/avformat.h"
#include "libavutil/log.h"
+#include "timefilter.h"
+#include "avdevice.h"
/* XXX: we make the assumption that the soundcard accepts this format */
/* XXX: find better solution with "preinit" method, needed also in
other formats */
#define DEFAULT_CODEC_ID AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE)
-#define ALSA_BUFFER_SIZE_MAX 32768
+typedef void (*ff_reorder_func)(const void *, void *, int);
+
+#define ALSA_BUFFER_SIZE_MAX 65536
typedef struct AlsaData {
AVClass *class;
snd_pcm_t *h;
- int frame_size; ///< preferred size for reads and writes
- int period_size; ///< bytes per sample * channels
+ int frame_size; ///< bytes per sample * channels
+ int period_size; ///< preferred size for reads and writes, in frames
int sample_rate; ///< sample rate set by user
int channels; ///< number of channels set by user
+ int last_period;
+ TimeFilter *timefilter;
void (*reorder_func)(const void *, void *, int);
void *reorder_buf;
int reorder_buf_size; ///< in frames
+ int64_t timestamp; ///< current timestamp, without latency applied.
} AlsaData;
/**
@@ -68,6 +74,7 @@ typedef struct AlsaData {
*
* @return 0 if OK, AVERROR_xxx on error
*/
+av_warn_unused_result
int ff_alsa_open(AVFormatContext *s, snd_pcm_stream_t mode,
unsigned int *sample_rate,
int channels, enum AVCodecID *codec_id);
@@ -89,8 +96,13 @@ int ff_alsa_close(AVFormatContext *s1);
*
* @return 0 if OK, AVERROR_xxx on error
*/
+av_warn_unused_result
int ff_alsa_xrun_recover(AVFormatContext *s1, int err);
+av_warn_unused_result
int ff_alsa_extend_reorder_buf(AlsaData *s, int size);
+av_warn_unused_result
+int ff_alsa_get_device_list(AVDeviceInfoList *device_list, snd_pcm_stream_t stream_type);
+
#endif /* AVDEVICE_ALSA_H */
diff --git a/libavdevice/alsa_dec.c b/libavdevice/alsa_dec.c
index 58bf1dd6a1..c50ce71506 100644
--- a/libavdevice/alsa_dec.c
+++ b/libavdevice/alsa_dec.c
@@ -3,20 +3,20 @@
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,7 +27,7 @@
* @author Benoit Fouet ( benoit fouet free fr )
* @author Nicolas George ( nicolas george normalesup org )
*
- * This avdevice decoder allows to capture audio from an ALSA (Advanced
+ * This avdevice decoder can capture audio from an ALSA (Advanced
* Linux Sound Architecture) device.
*
* The filename parameter is the name of an ALSA PCM device capable of
@@ -48,11 +48,13 @@
#include <alsa/asoundlib.h>
#include "libavutil/internal.h"
+#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
+#include "libavutil/time.h"
-#include "libavformat/avformat.h"
#include "libavformat/internal.h"
+#include "avdevice.h"
#include "alsa.h"
static av_cold int audio_read_header(AVFormatContext *s1)
@@ -61,7 +63,6 @@ static av_cold int audio_read_header(AVFormatContext *s1)
AVStream *st;
int ret;
enum AVCodecID codec_id;
- snd_pcm_sw_params_t *sw_params;
st = avformat_new_stream(s1, NULL);
if (!st) {
@@ -77,35 +78,18 @@ static av_cold int audio_read_header(AVFormatContext *s1)
return AVERROR(EIO);
}
- if (snd_pcm_type(s->h) != SND_PCM_TYPE_HW)
- av_log(s1, AV_LOG_WARNING,
- "capture with some ALSA plugins, especially dsnoop, "
- "may hang.\n");
-
- ret = snd_pcm_sw_params_malloc(&sw_params);
- if (ret < 0) {
- av_log(s1, AV_LOG_ERROR, "cannot allocate software parameters structure (%s)\n",
- snd_strerror(ret));
- goto fail;
- }
-
- snd_pcm_sw_params_current(s->h, sw_params);
- snd_pcm_sw_params_set_tstamp_mode(s->h, sw_params, SND_PCM_TSTAMP_ENABLE);
-
- ret = snd_pcm_sw_params(s->h, sw_params);
- snd_pcm_sw_params_free(sw_params);
- if (ret < 0) {
- av_log(s1, AV_LOG_ERROR, "cannot install ALSA software parameters (%s)\n",
- snd_strerror(ret));
- goto fail;
- }
-
/* take real parameters */
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = codec_id;
st->codecpar->sample_rate = s->sample_rate;
st->codecpar->channels = s->channels;
+ st->codecpar->frame_size = s->frame_size;
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+ /* microseconds instead of seconds, MHz instead of Hz */
+ s->timefilter = ff_timefilter_new(1000000.0 / s->sample_rate,
+ s->period_size, 1.5E-6);
+ if (!s->timefilter)
+ goto fail;
return 0;
@@ -117,16 +101,15 @@ fail:
static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
AlsaData *s = s1->priv_data;
- AVStream *st = s1->streams[0];
int res;
- snd_htimestamp_t timestamp;
- snd_pcm_uframes_t ts_delay;
+ int64_t dts;
+ snd_pcm_sframes_t delay = 0;
- if (av_new_packet(pkt, s->period_size) < 0) {
+ if (av_new_packet(pkt, s->period_size * s->frame_size) < 0) {
return AVERROR(EIO);
}
- while ((res = snd_pcm_readi(s->h, pkt->data, pkt->size / s->frame_size)) < 0) {
+ while ((res = snd_pcm_readi(s->h, pkt->data, s->period_size)) < 0) {
if (res == -EAGAIN) {
av_packet_unref(pkt);
@@ -139,20 +122,25 @@ static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
return AVERROR(EIO);
}
+ ff_timefilter_reset(s->timefilter);
}
- snd_pcm_htimestamp(s->h, &ts_delay, &timestamp);
- ts_delay += res;
- pkt->pts = timestamp.tv_sec * 1000000LL
- + (timestamp.tv_nsec * st->codecpar->sample_rate
- - (int64_t)ts_delay * 1000000000LL + st->codecpar->sample_rate * 500LL)
- / (st->codecpar->sample_rate * 1000LL);
+ dts = av_gettime();
+ snd_pcm_delay(s->h, &delay);
+ dts -= av_rescale(delay + res, 1000000, s->sample_rate);
+ pkt->pts = ff_timefilter_update(s->timefilter, dts, s->last_period);
+ s->last_period = res;
pkt->size = res * s->frame_size;
return 0;
}
+static int audio_get_device_list(AVFormatContext *h, AVDeviceInfoList *device_list)
+{
+ return ff_alsa_get_device_list(device_list, SND_PCM_STREAM_CAPTURE);
+}
+
static const AVOption options[] = {
{ "sample_rate", "", offsetof(AlsaData, sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "channels", "", offsetof(AlsaData, channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
@@ -164,6 +152,7 @@ static const AVClass alsa_demuxer_class = {
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_alsa_demuxer = {
@@ -173,6 +162,7 @@ AVInputFormat ff_alsa_demuxer = {
.read_header = audio_read_header,
.read_packet = audio_read_packet,
.read_close = ff_alsa_close,
+ .get_device_list = audio_get_device_list,
.flags = AVFMT_NOFILE,
.priv_class = &alsa_demuxer_class,
};
diff --git a/libavdevice/alsa_enc.c b/libavdevice/alsa_enc.c
index 3094b5043e..bddc61f4aa 100644
--- a/libavdevice/alsa_enc.c
+++ b/libavdevice/alsa_enc.c
@@ -3,20 +3,20 @@
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -26,7 +26,7 @@
* @author Luca Abeni ( lucabe72 email it )
* @author Benoit Fouet ( benoit fouet free fr )
*
- * This avdevice encoder allows to play audio to an ALSA (Advanced Linux
+ * This avdevice encoder can play audio to an ALSA (Advanced Linux
* Sound Architecture) device.
*
* The filename parameter is the name of an ALSA PCM device capable of
@@ -40,20 +40,27 @@
#include <alsa/asoundlib.h>
#include "libavutil/internal.h"
+#include "libavutil/time.h"
-#include "libavformat/avformat.h"
+#include "libavformat/internal.h"
+#include "avdevice.h"
#include "alsa.h"
static av_cold int audio_write_header(AVFormatContext *s1)
{
AlsaData *s = s1->priv_data;
- AVStream *st;
+ AVStream *st = NULL;
unsigned int sample_rate;
enum AVCodecID codec_id;
int res;
+ if (s1->nb_streams != 1 || s1->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
+ av_log(s1, AV_LOG_ERROR, "Only a single audio stream is supported.\n");
+ return AVERROR(EINVAL);
+ }
st = s1->streams[0];
+
sample_rate = st->codecpar->sample_rate;
codec_id = st->codecpar->codec_id;
res = ff_alsa_open(s1, SND_PCM_STREAM_PLAYBACK, &sample_rate,
@@ -64,6 +71,7 @@ static av_cold int audio_write_header(AVFormatContext *s1)
st->codecpar->sample_rate, sample_rate);
goto fail;
}
+ avpriv_set_pts_info(st, 64, 1, sample_rate);
return res;
@@ -80,6 +88,10 @@ static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
uint8_t *buf = pkt->data;
size /= s->frame_size;
+ if (pkt->dts != AV_NOPTS_VALUE)
+ s->timestamp = pkt->dts;
+ s->timestamp += pkt->duration ? pkt->duration : size;
+
if (s->reorder_func) {
if (size > s->reorder_buf_size)
if (ff_alsa_extend_reorder_buf(s, size))
@@ -104,6 +116,47 @@ static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
return 0;
}
+static int audio_write_frame(AVFormatContext *s1, int stream_index,
+ AVFrame **frame, unsigned flags)
+{
+ AlsaData *s = s1->priv_data;
+ AVPacket pkt;
+
+ /* ff_alsa_open() should have accepted only supported formats */
+ if ((flags & AV_WRITE_UNCODED_FRAME_QUERY))
+ return av_sample_fmt_is_planar(s1->streams[stream_index]->codecpar->format) ?
+ AVERROR(EINVAL) : 0;
+ /* set only used fields */
+ pkt.data = (*frame)->data[0];
+ pkt.size = (*frame)->nb_samples * s->frame_size;
+ pkt.dts = (*frame)->pkt_dts;
+ pkt.duration = av_frame_get_pkt_duration(*frame);
+ return audio_write_packet(s1, &pkt);
+}
+
+static void
+audio_get_output_timestamp(AVFormatContext *s1, int stream,
+ int64_t *dts, int64_t *wall)
+{
+ AlsaData *s = s1->priv_data;
+ snd_pcm_sframes_t delay = 0;
+ *wall = av_gettime();
+ snd_pcm_delay(s->h, &delay);
+ *dts = s->timestamp - delay;
+}
+
+static int audio_get_device_list(AVFormatContext *h, AVDeviceInfoList *device_list)
+{
+ return ff_alsa_get_device_list(device_list, SND_PCM_STREAM_PLAYBACK);
+}
+
+static const AVClass alsa_muxer_class = {
+ .class_name = "ALSA muxer",
+ .item_name = av_default_item_name,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
+};
+
AVOutputFormat ff_alsa_muxer = {
.name = "alsa",
.long_name = NULL_IF_CONFIG_SMALL("ALSA audio output"),
@@ -113,5 +166,9 @@ AVOutputFormat ff_alsa_muxer = {
.write_header = audio_write_header,
.write_packet = audio_write_packet,
.write_trailer = ff_alsa_close,
+ .write_uncoded_frame = audio_write_frame,
+ .get_device_list = audio_get_device_list,
+ .get_output_timestamp = audio_get_output_timestamp,
.flags = AVFMT_NOFILE,
+ .priv_class = &alsa_muxer_class,
};
diff --git a/libavdevice/avdevice.c b/libavdevice/avdevice.c
index 5a5c762c8b..01c46924d1 100644
--- a/libavdevice/avdevice.c
+++ b/libavdevice/avdevice.c
@@ -1,36 +1,271 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/avassert.h"
+#include "libavutil/samplefmt.h"
+#include "libavutil/pixfmt.h"
+#include "libavcodec/avcodec.h"
#include "avdevice.h"
+#include "internal.h"
#include "config.h"
+#include "libavutil/ffversion.h"
+const char av_device_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
+
+#define E AV_OPT_FLAG_ENCODING_PARAM
+#define D AV_OPT_FLAG_DECODING_PARAM
+#define A AV_OPT_FLAG_AUDIO_PARAM
+#define V AV_OPT_FLAG_VIDEO_PARAM
+#define OFFSET(x) offsetof(AVDeviceCapabilitiesQuery, x)
+
+const AVOption av_device_capabilities[] = {
+ { "codec", "codec", OFFSET(codec), AV_OPT_TYPE_INT,
+ {.i64 = AV_CODEC_ID_NONE}, AV_CODEC_ID_NONE, INT_MAX, E|D|A|V },
+ { "sample_format", "sample format", OFFSET(sample_format), AV_OPT_TYPE_SAMPLE_FMT,
+ {.i64 = AV_SAMPLE_FMT_NONE}, AV_SAMPLE_FMT_NONE, INT_MAX, E|D|A },
+ { "sample_rate", "sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT,
+ {.i64 = -1}, -1, INT_MAX, E|D|A },
+ { "channels", "channels", OFFSET(channels), AV_OPT_TYPE_INT,
+ {.i64 = -1}, -1, INT_MAX, E|D|A },
+ { "channel_layout", "channel layout", OFFSET(channel_layout), AV_OPT_TYPE_CHANNEL_LAYOUT,
+ {.i64 = -1}, -1, INT_MAX, E|D|A },
+ { "pixel_format", "pixel format", OFFSET(pixel_format), AV_OPT_TYPE_PIXEL_FMT,
+ {.i64 = AV_PIX_FMT_NONE}, AV_PIX_FMT_NONE, INT_MAX, E|D|V },
+ { "window_size", "window size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE,
+ {.str = NULL}, -1, INT_MAX, E|D|V },
+ { "frame_size", "frame size", OFFSET(frame_width), AV_OPT_TYPE_IMAGE_SIZE,
+ {.str = NULL}, -1, INT_MAX, E|D|V },
+ { "fps", "fps", OFFSET(fps), AV_OPT_TYPE_RATIONAL,
+ {.dbl = -1}, -1, INT_MAX, E|D|V },
+ { NULL }
+};
+
+#undef E
+#undef D
+#undef A
+#undef V
+#undef OFFSET
+
unsigned avdevice_version(void)
{
+ av_assert0(LIBAVDEVICE_VERSION_MICRO >= 100);
return LIBAVDEVICE_VERSION_INT;
}
const char * avdevice_configuration(void)
{
- return LIBAV_CONFIGURATION;
+ return FFMPEG_CONFIGURATION;
}
const char * avdevice_license(void)
{
#define LICENSE_PREFIX "libavdevice license: "
- return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
+ return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
+}
+
+static void *device_next(void *prev, int output,
+ AVClassCategory c1, AVClassCategory c2)
+{
+ const AVClass *pc;
+ AVClassCategory category = AV_CLASS_CATEGORY_NA;
+ do {
+ if (output) {
+ if (!(prev = av_oformat_next(prev)))
+ break;
+ pc = ((AVOutputFormat *)prev)->priv_class;
+ } else {
+ if (!(prev = av_iformat_next(prev)))
+ break;
+ pc = ((AVInputFormat *)prev)->priv_class;
+ }
+ if (!pc)
+ continue;
+ category = pc->category;
+ } while (category != c1 && category != c2);
+ return prev;
+}
+
+AVInputFormat *av_input_audio_device_next(AVInputFormat *d)
+{
+ return device_next(d, 0, AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
+ AV_CLASS_CATEGORY_DEVICE_INPUT);
+}
+
+AVInputFormat *av_input_video_device_next(AVInputFormat *d)
+{
+ return device_next(d, 0, AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
+ AV_CLASS_CATEGORY_DEVICE_INPUT);
+}
+
+AVOutputFormat *av_output_audio_device_next(AVOutputFormat *d)
+{
+ return device_next(d, 1, AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
+ AV_CLASS_CATEGORY_DEVICE_OUTPUT);
+}
+
+AVOutputFormat *av_output_video_device_next(AVOutputFormat *d)
+{
+ return device_next(d, 1, AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
+ AV_CLASS_CATEGORY_DEVICE_OUTPUT);
+}
+
+int avdevice_app_to_dev_control_message(struct AVFormatContext *s, enum AVAppToDevMessageType type,
+ void *data, size_t data_size)
+{
+ if (!s->oformat || !s->oformat->control_message)
+ return AVERROR(ENOSYS);
+ return s->oformat->control_message(s, type, data, data_size);
+}
+
+int avdevice_dev_to_app_control_message(struct AVFormatContext *s, enum AVDevToAppMessageType type,
+ void *data, size_t data_size)
+{
+ if (!av_format_get_control_message_cb(s))
+ return AVERROR(ENOSYS);
+ return av_format_get_control_message_cb(s)(s, type, data, data_size);
+}
+
+int avdevice_capabilities_create(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s,
+ AVDictionary **device_options)
+{
+ int ret;
+ av_assert0(s && caps);
+ av_assert0(s->iformat || s->oformat);
+ if ((s->oformat && !s->oformat->create_device_capabilities) ||
+ (s->iformat && !s->iformat->create_device_capabilities))
+ return AVERROR(ENOSYS);
+ *caps = av_mallocz(sizeof(**caps));
+ if (!(*caps))
+ return AVERROR(ENOMEM);
+ (*caps)->device_context = s;
+ if (((ret = av_opt_set_dict(s->priv_data, device_options)) < 0))
+ goto fail;
+ if (s->iformat) {
+ if ((ret = s->iformat->create_device_capabilities(s, *caps)) < 0)
+ goto fail;
+ } else {
+ if ((ret = s->oformat->create_device_capabilities(s, *caps)) < 0)
+ goto fail;
+ }
+ av_opt_set_defaults(*caps);
+ return 0;
+ fail:
+ av_freep(caps);
+ return ret;
+}
+
+void avdevice_capabilities_free(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s)
+{
+ if (!s || !caps || !(*caps))
+ return;
+ av_assert0(s->iformat || s->oformat);
+ if (s->iformat) {
+ if (s->iformat->free_device_capabilities)
+ s->iformat->free_device_capabilities(s, *caps);
+ } else {
+ if (s->oformat->free_device_capabilities)
+ s->oformat->free_device_capabilities(s, *caps);
+ }
+ av_freep(caps);
+}
+
+int avdevice_list_devices(AVFormatContext *s, AVDeviceInfoList **device_list)
+{
+ int ret;
+ av_assert0(s);
+ av_assert0(device_list);
+ av_assert0(s->oformat || s->iformat);
+ if ((s->oformat && !s->oformat->get_device_list) ||
+ (s->iformat && !s->iformat->get_device_list)) {
+ *device_list = NULL;
+ return AVERROR(ENOSYS);
+ }
+ *device_list = av_mallocz(sizeof(AVDeviceInfoList));
+ if (!(*device_list))
+ return AVERROR(ENOMEM);
+ /* no default device by default */
+ (*device_list)->default_device = -1;
+ if (s->oformat)
+ ret = s->oformat->get_device_list(s, *device_list);
+ else
+ ret = s->iformat->get_device_list(s, *device_list);
+ if (ret < 0)
+ avdevice_free_list_devices(device_list);
+ return ret;
+}
+
+static int list_devices_for_context(AVFormatContext *s, AVDictionary *options,
+ AVDeviceInfoList **device_list)
+{
+ AVDictionary *tmp = NULL;
+ int ret;
+
+ av_dict_copy(&tmp, options, 0);
+ if ((ret = av_opt_set_dict2(s, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
+ goto fail;
+ ret = avdevice_list_devices(s, device_list);
+ fail:
+ av_dict_free(&tmp);
+ avformat_free_context(s);
+ return ret;
+}
+
+int avdevice_list_input_sources(AVInputFormat *device, const char *device_name,
+ AVDictionary *device_options, AVDeviceInfoList **device_list)
+{
+ AVFormatContext *s = NULL;
+ int ret;
+
+ if ((ret = ff_alloc_input_device_context(&s, device, device_name)) < 0)
+ return ret;
+ return list_devices_for_context(s, device_options, device_list);
+}
+
+int avdevice_list_output_sinks(AVOutputFormat *device, const char *device_name,
+ AVDictionary *device_options, AVDeviceInfoList **device_list)
+{
+ AVFormatContext *s = NULL;
+ int ret;
+
+ if ((ret = avformat_alloc_output_context2(&s, device, device_name, NULL)) < 0)
+ return ret;
+ return list_devices_for_context(s, device_options, device_list);
+}
+
+void avdevice_free_list_devices(AVDeviceInfoList **device_list)
+{
+ AVDeviceInfoList *list;
+ AVDeviceInfo *dev;
+ int i;
+
+ av_assert0(device_list);
+ list = *device_list;
+ if (!list)
+ return;
+
+ for (i = 0; i < list->nb_devices; i++) {
+ dev = list->devices[i];
+ if (dev) {
+ av_freep(&dev->device_name);
+ av_freep(&dev->device_description);
+ av_free(dev);
+ }
+ }
+ av_freep(&list->devices);
+ av_freep(device_list);
}
diff --git a/libavdevice/avdevice.h b/libavdevice/avdevice.h
index 39166a570a..6b0446cde7 100644
--- a/libavdevice/avdevice.h
+++ b/libavdevice/avdevice.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -43,6 +43,11 @@
* @}
*/
+#include "libavutil/log.h"
+#include "libavutil/opt.h"
+#include "libavutil/dict.h"
+#include "libavformat/avformat.h"
+
/**
* Return the LIBAVDEVICE_VERSION_INT constant.
*/
@@ -64,4 +69,441 @@ const char *avdevice_license(void);
*/
void avdevice_register_all(void);
+/**
+ * Audio input devices iterator.
+ *
+ * If d is NULL, returns the first registered input audio/video device,
+ * if d is non-NULL, returns the next registered input audio/video device after d
+ * or NULL if d is the last one.
+ */
+AVInputFormat *av_input_audio_device_next(AVInputFormat *d);
+
+/**
+ * Video input devices iterator.
+ *
+ * If d is NULL, returns the first registered input audio/video device,
+ * if d is non-NULL, returns the next registered input audio/video device after d
+ * or NULL if d is the last one.
+ */
+AVInputFormat *av_input_video_device_next(AVInputFormat *d);
+
+/**
+ * Audio output devices iterator.
+ *
+ * If d is NULL, returns the first registered output audio/video device,
+ * if d is non-NULL, returns the next registered output audio/video device after d
+ * or NULL if d is the last one.
+ */
+AVOutputFormat *av_output_audio_device_next(AVOutputFormat *d);
+
+/**
+ * Video output devices iterator.
+ *
+ * If d is NULL, returns the first registered output audio/video device,
+ * if d is non-NULL, returns the next registered output audio/video device after d
+ * or NULL if d is the last one.
+ */
+AVOutputFormat *av_output_video_device_next(AVOutputFormat *d);
+
+typedef struct AVDeviceRect {
+ int x; /**< x coordinate of top left corner */
+ int y; /**< y coordinate of top left corner */
+ int width; /**< width */
+ int height; /**< height */
+} AVDeviceRect;
+
+/**
+ * Message types used by avdevice_app_to_dev_control_message().
+ */
+enum AVAppToDevMessageType {
+ /**
+ * Dummy message.
+ */
+ AV_APP_TO_DEV_NONE = MKBETAG('N','O','N','E'),
+
+ /**
+ * Window size change message.
+ *
+ * Message is sent to the device every time the application changes the size
+ * of the window device renders to.
+ * Message should also be sent right after window is created.
+ *
+ * data: AVDeviceRect: new window size.
+ */
+ AV_APP_TO_DEV_WINDOW_SIZE = MKBETAG('G','E','O','M'),
+
+ /**
+ * Repaint request message.
+ *
+ * Message is sent to the device when window has to be repainted.
+ *
+ * data: AVDeviceRect: area required to be repainted.
+ * NULL: whole area is required to be repainted.
+ */
+ AV_APP_TO_DEV_WINDOW_REPAINT = MKBETAG('R','E','P','A'),
+
+ /**
+ * Request pause/play.
+ *
+ * Application requests pause/unpause playback.
+ * Mostly usable with devices that have internal buffer.
+ * By default devices are not paused.
+ *
+ * data: NULL
+ */
+ AV_APP_TO_DEV_PAUSE = MKBETAG('P', 'A', 'U', ' '),
+ AV_APP_TO_DEV_PLAY = MKBETAG('P', 'L', 'A', 'Y'),
+ AV_APP_TO_DEV_TOGGLE_PAUSE = MKBETAG('P', 'A', 'U', 'T'),
+
+ /**
+ * Volume control message.
+ *
+ * Set volume level. It may be device-dependent if volume
+ * is changed per stream or system wide. Per stream volume
+ * change is expected when possible.
+ *
+ * data: double: new volume with range of 0.0 - 1.0.
+ */
+ AV_APP_TO_DEV_SET_VOLUME = MKBETAG('S', 'V', 'O', 'L'),
+
+ /**
+ * Mute control messages.
+ *
+ * Change mute state. It may be device-dependent if mute status
+ * is changed per stream or system wide. Per stream mute status
+ * change is expected when possible.
+ *
+ * data: NULL.
+ */
+ AV_APP_TO_DEV_MUTE = MKBETAG(' ', 'M', 'U', 'T'),
+ AV_APP_TO_DEV_UNMUTE = MKBETAG('U', 'M', 'U', 'T'),
+ AV_APP_TO_DEV_TOGGLE_MUTE = MKBETAG('T', 'M', 'U', 'T'),
+
+ /**
+ * Get volume/mute messages.
+ *
+ * Force the device to send AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED or
+ * AV_DEV_TO_APP_MUTE_STATE_CHANGED command respectively.
+ *
+ * data: NULL.
+ */
+ AV_APP_TO_DEV_GET_VOLUME = MKBETAG('G', 'V', 'O', 'L'),
+ AV_APP_TO_DEV_GET_MUTE = MKBETAG('G', 'M', 'U', 'T'),
+};
+
+/**
+ * Message types used by avdevice_dev_to_app_control_message().
+ */
+enum AVDevToAppMessageType {
+ /**
+ * Dummy message.
+ */
+ AV_DEV_TO_APP_NONE = MKBETAG('N','O','N','E'),
+
+ /**
+ * Create window buffer message.
+ *
+ * Device requests to create a window buffer. Exact meaning is device-
+ * and application-dependent. Message is sent before rendering first
+ * frame and all one-shot initializations should be done here.
+ * Application is allowed to ignore preferred window buffer size.
+ *
+ * @note: Application is obligated to inform about window buffer size
+ * with AV_APP_TO_DEV_WINDOW_SIZE message.
+ *
+ * data: AVDeviceRect: preferred size of the window buffer.
+ * NULL: no preferred size of the window buffer.
+ */
+ AV_DEV_TO_APP_CREATE_WINDOW_BUFFER = MKBETAG('B','C','R','E'),
+
+ /**
+ * Prepare window buffer message.
+ *
+ * Device requests to prepare a window buffer for rendering.
+ * Exact meaning is device- and application-dependent.
+ * Message is sent before rendering of each frame.
+ *
+ * data: NULL.
+ */
+ AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER = MKBETAG('B','P','R','E'),
+
+ /**
+ * Display window buffer message.
+ *
+ * Device requests to display a window buffer.
+ * Message is sent when new frame is ready to be displayed.
+ * Usually buffers need to be swapped in handler of this message.
+ *
+ * data: NULL.
+ */
+ AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER = MKBETAG('B','D','I','S'),
+
+ /**
+ * Destroy window buffer message.
+ *
+ * Device requests to destroy a window buffer.
+ * Message is sent when device is about to be destroyed and window
+ * buffer is not required anymore.
+ *
+ * data: NULL.
+ */
+ AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER = MKBETAG('B','D','E','S'),
+
+ /**
+ * Buffer fullness status messages.
+ *
+ * Device signals buffer overflow/underflow.
+ *
+ * data: NULL.
+ */
+ AV_DEV_TO_APP_BUFFER_OVERFLOW = MKBETAG('B','O','F','L'),
+ AV_DEV_TO_APP_BUFFER_UNDERFLOW = MKBETAG('B','U','F','L'),
+
+ /**
+ * Buffer readable/writable.
+ *
+ * Device informs that buffer is readable/writable.
+ * When possible, device informs how many bytes can be read/write.
+ *
+ * @warning Device may not inform when number of bytes than can be read/write changes.
+ *
+ * data: int64_t: amount of bytes available to read/write.
+ * NULL: amount of bytes available to read/write is not known.
+ */
+ AV_DEV_TO_APP_BUFFER_READABLE = MKBETAG('B','R','D',' '),
+ AV_DEV_TO_APP_BUFFER_WRITABLE = MKBETAG('B','W','R',' '),
+
+ /**
+ * Mute state change message.
+ *
+ * Device informs that mute state has changed.
+ *
+ * data: int: 0 for not muted state, non-zero for muted state.
+ */
+ AV_DEV_TO_APP_MUTE_STATE_CHANGED = MKBETAG('C','M','U','T'),
+
+ /**
+ * Volume level change message.
+ *
+ * Device informs that volume level has changed.
+ *
+ * data: double: new volume with range of 0.0 - 1.0.
+ */
+ AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED = MKBETAG('C','V','O','L'),
+};
+
+/**
+ * Send control message from application to device.
+ *
+ * @param s device context.
+ * @param type message type.
+ * @param data message data. Exact type depends on message type.
+ * @param data_size size of message data.
+ * @return >= 0 on success, negative on error.
+ * AVERROR(ENOSYS) when device doesn't implement handler of the message.
+ */
+int avdevice_app_to_dev_control_message(struct AVFormatContext *s,
+ enum AVAppToDevMessageType type,
+ void *data, size_t data_size);
+
+/**
+ * Send control message from device to application.
+ *
+ * @param s device context.
+ * @param type message type.
+ * @param data message data. Can be NULL.
+ * @param data_size size of message data.
+ * @return >= 0 on success, negative on error.
+ * AVERROR(ENOSYS) when application doesn't implement handler of the message.
+ */
+int avdevice_dev_to_app_control_message(struct AVFormatContext *s,
+ enum AVDevToAppMessageType type,
+ void *data, size_t data_size);
+
+/**
+ * Following API allows user to probe device capabilities (supported codecs,
+ * pixel formats, sample formats, resolutions, channel counts, etc).
+ * It is build on top op AVOption API.
+ * Queried capabilities make it possible to set up converters of video or audio
+ * parameters that fit to the device.
+ *
+ * List of capabilities that can be queried:
+ * - Capabilities valid for both audio and video devices:
+ * - codec: supported audio/video codecs.
+ * type: AV_OPT_TYPE_INT (AVCodecID value)
+ * - Capabilities valid for audio devices:
+ * - sample_format: supported sample formats.
+ * type: AV_OPT_TYPE_INT (AVSampleFormat value)
+ * - sample_rate: supported sample rates.
+ * type: AV_OPT_TYPE_INT
+ * - channels: supported number of channels.
+ * type: AV_OPT_TYPE_INT
+ * - channel_layout: supported channel layouts.
+ * type: AV_OPT_TYPE_INT64
+ * - Capabilities valid for video devices:
+ * - pixel_format: supported pixel formats.
+ * type: AV_OPT_TYPE_INT (AVPixelFormat value)
+ * - window_size: supported window sizes (describes size of the window size presented to the user).
+ * type: AV_OPT_TYPE_IMAGE_SIZE
+ * - frame_size: supported frame sizes (describes size of provided video frames).
+ * type: AV_OPT_TYPE_IMAGE_SIZE
+ * - fps: supported fps values
+ * type: AV_OPT_TYPE_RATIONAL
+ *
+ * Value of the capability may be set by user using av_opt_set() function
+ * and AVDeviceCapabilitiesQuery object. Following queries will
+ * limit results to the values matching already set capabilities.
+ * For example, setting a codec may impact number of formats or fps values
+ * returned during next query. Setting invalid value may limit results to zero.
+ *
+ * Example of the usage basing on opengl output device:
+ *
+ * @code
+ * AVFormatContext *oc = NULL;
+ * AVDeviceCapabilitiesQuery *caps = NULL;
+ * AVOptionRanges *ranges;
+ * int ret;
+ *
+ * if ((ret = avformat_alloc_output_context2(&oc, NULL, "opengl", NULL)) < 0)
+ * goto fail;
+ * if (avdevice_capabilities_create(&caps, oc, NULL) < 0)
+ * goto fail;
+ *
+ * //query codecs
+ * if (av_opt_query_ranges(&ranges, caps, "codec", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
+ * goto fail;
+ * //pick codec here and set it
+ * av_opt_set(caps, "codec", AV_CODEC_ID_RAWVIDEO, 0);
+ *
+ * //query format
+ * if (av_opt_query_ranges(&ranges, caps, "pixel_format", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
+ * goto fail;
+ * //pick format here and set it
+ * av_opt_set(caps, "pixel_format", AV_PIX_FMT_YUV420P, 0);
+ *
+ * //query and set more capabilities
+ *
+ * fail:
+ * //clean up code
+ * avdevice_capabilities_free(&query, oc);
+ * avformat_free_context(oc);
+ * @endcode
+ */
+
+/**
+ * Structure describes device capabilities.
+ *
+ * It is used by devices in conjunction with av_device_capabilities AVOption table
+ * to implement capabilities probing API based on AVOption API. Should not be used directly.
+ */
+typedef struct AVDeviceCapabilitiesQuery {
+ const AVClass *av_class;
+ AVFormatContext *device_context;
+ enum AVCodecID codec;
+ enum AVSampleFormat sample_format;
+ enum AVPixelFormat pixel_format;
+ int sample_rate;
+ int channels;
+ int64_t channel_layout;
+ int window_width;
+ int window_height;
+ int frame_width;
+ int frame_height;
+ AVRational fps;
+} AVDeviceCapabilitiesQuery;
+
+/**
+ * AVOption table used by devices to implement device capabilities API. Should not be used by a user.
+ */
+extern const AVOption av_device_capabilities[];
+
+/**
+ * Initialize capabilities probing API based on AVOption API.
+ *
+ * avdevice_capabilities_free() must be called when query capabilities API is
+ * not used anymore.
+ *
+ * @param[out] caps Device capabilities data. Pointer to a NULL pointer must be passed.
+ * @param s Context of the device.
+ * @param device_options An AVDictionary filled with device-private options.
+ * On return this parameter will be destroyed and replaced with a dict
+ * containing options that were not found. May be NULL.
+ * The same options must be passed later to avformat_write_header() for output
+ * devices or avformat_open_input() for input devices, or at any other place
+ * that affects device-private options.
+ *
+ * @return >= 0 on success, negative otherwise.
+ */
+int avdevice_capabilities_create(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s,
+ AVDictionary **device_options);
+
+/**
+ * Free resources created by avdevice_capabilities_create()
+ *
+ * @param caps Device capabilities data to be freed.
+ * @param s Context of the device.
+ */
+void avdevice_capabilities_free(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s);
+
+/**
+ * Structure describes basic parameters of the device.
+ */
+typedef struct AVDeviceInfo {
+ char *device_name; /**< device name, format depends on device */
+ char *device_description; /**< human friendly name */
+} AVDeviceInfo;
+
+/**
+ * List of devices.
+ */
+typedef struct AVDeviceInfoList {
+ AVDeviceInfo **devices; /**< list of autodetected devices */
+ int nb_devices; /**< number of autodetected devices */
+ int default_device; /**< index of default device or -1 if no default */
+} AVDeviceInfoList;
+
+/**
+ * List devices.
+ *
+ * Returns available device names and their parameters.
+ *
+ * @note: Some devices may accept system-dependent device names that cannot be
+ * autodetected. The list returned by this function cannot be assumed to
+ * be always completed.
+ *
+ * @param s device context.
+ * @param[out] device_list list of autodetected devices.
+ * @return count of autodetected devices, negative on error.
+ */
+int avdevice_list_devices(struct AVFormatContext *s, AVDeviceInfoList **device_list);
+
+/**
+ * Convenient function to free result of avdevice_list_devices().
+ *
+ * @param devices device list to be freed.
+ */
+void avdevice_free_list_devices(AVDeviceInfoList **device_list);
+
+/**
+ * List devices.
+ *
+ * Returns available device names and their parameters.
+ * These are convinient wrappers for avdevice_list_devices().
+ * Device context is allocated and deallocated internally.
+ *
+ * @param device device format. May be NULL if device name is set.
+ * @param device_name device name. May be NULL if device format is set.
+ * @param device_options An AVDictionary filled with device-private options. May be NULL.
+ * The same options must be passed later to avformat_write_header() for output
+ * devices or avformat_open_input() for input devices, or at any other place
+ * that affects device-private options.
+ * @param[out] device_list list of autodetected devices
+ * @return count of autodetected devices, negative on error.
+ * @note device argument takes precedence over device_name when both are set.
+ */
+int avdevice_list_input_sources(struct AVInputFormat *device, const char *device_name,
+ AVDictionary *device_options, AVDeviceInfoList **device_list);
+int avdevice_list_output_sinks(struct AVOutputFormat *device, const char *device_name,
+ AVDictionary *device_options, AVDeviceInfoList **device_list);
+
#endif /* AVDEVICE_AVDEVICE_H */
diff --git a/libavdevice/avdeviceres.rc b/libavdevice/avdeviceres.rc
new file mode 100644
index 0000000000..e13e73d57e
--- /dev/null
+++ b/libavdevice/avdeviceres.rc
@@ -0,0 +1,55 @@
+/*
+ * Windows resource file for libavdevice
+ *
+ * Copyright (C) 2012 James Almer
+ * Copyright (C) 2013 Tiancheng "Timothy" Gu
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <windows.h>
+#include "libavdevice/version.h"
+#include "libavutil/ffversion.h"
+#include "config.h"
+
+1 VERSIONINFO
+FILEVERSION LIBAVDEVICE_VERSION_MAJOR, LIBAVDEVICE_VERSION_MINOR, LIBAVDEVICE_VERSION_MICRO, 0
+PRODUCTVERSION LIBAVDEVICE_VERSION_MAJOR, LIBAVDEVICE_VERSION_MINOR, LIBAVDEVICE_VERSION_MICRO, 0
+FILEFLAGSMASK VS_FFI_FILEFLAGSMASK
+FILEOS VOS_NT_WINDOWS32
+FILETYPE VFT_DLL
+{
+ BLOCK "StringFileInfo"
+ {
+ BLOCK "040904B0"
+ {
+ VALUE "CompanyName", "FFmpeg Project"
+ VALUE "FileDescription", "FFmpeg device handling library"
+ VALUE "FileVersion", AV_STRINGIFY(LIBAVDEVICE_VERSION)
+ VALUE "InternalName", "libavdevice"
+ VALUE "LegalCopyright", "Copyright (C) 2000-" AV_STRINGIFY(CONFIG_THIS_YEAR) " FFmpeg Project"
+ VALUE "OriginalFilename", "avdevice" BUILDSUF "-" AV_STRINGIFY(LIBAVDEVICE_VERSION_MAJOR) SLIBSUF
+ VALUE "ProductName", "FFmpeg"
+ VALUE "ProductVersion", FFMPEG_VERSION
+ }
+ }
+
+ BLOCK "VarFileInfo"
+ {
+ VALUE "Translation", 0x0409, 0x04B0
+ }
+}
diff --git a/libavdevice/avfoundation.m b/libavdevice/avfoundation.m
new file mode 100644
index 0000000000..8132278bdf
--- /dev/null
+++ b/libavdevice/avfoundation.m
@@ -0,0 +1,1049 @@
+/*
+ * AVFoundation input device
+ * Copyright (c) 2014 Thilo Borgmann <thilo.borgmann@mail.de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * AVFoundation input device
+ * @author Thilo Borgmann <thilo.borgmann@mail.de>
+ */
+
+#import <AVFoundation/AVFoundation.h>
+#include <pthread.h>
+
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "libavutil/avstring.h"
+#include "libavformat/internal.h"
+#include "libavutil/internal.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/time.h"
+#include "avdevice.h"
+
+static const int avf_time_base = 1000000;
+
+static const AVRational avf_time_base_q = {
+ .num = 1,
+ .den = avf_time_base
+};
+
+struct AVFPixelFormatSpec {
+ enum AVPixelFormat ff_id;
+ OSType avf_id;
+};
+
+static const struct AVFPixelFormatSpec avf_pixel_formats[] = {
+ { AV_PIX_FMT_MONOBLACK, kCVPixelFormatType_1Monochrome },
+ { AV_PIX_FMT_RGB555BE, kCVPixelFormatType_16BE555 },
+ { AV_PIX_FMT_RGB555LE, kCVPixelFormatType_16LE555 },
+ { AV_PIX_FMT_RGB565BE, kCVPixelFormatType_16BE565 },
+ { AV_PIX_FMT_RGB565LE, kCVPixelFormatType_16LE565 },
+ { AV_PIX_FMT_RGB24, kCVPixelFormatType_24RGB },
+ { AV_PIX_FMT_BGR24, kCVPixelFormatType_24BGR },
+ { AV_PIX_FMT_0RGB, kCVPixelFormatType_32ARGB },
+ { AV_PIX_FMT_BGR0, kCVPixelFormatType_32BGRA },
+ { AV_PIX_FMT_0BGR, kCVPixelFormatType_32ABGR },
+ { AV_PIX_FMT_RGB0, kCVPixelFormatType_32RGBA },
+ { AV_PIX_FMT_BGR48BE, kCVPixelFormatType_48RGB },
+ { AV_PIX_FMT_UYVY422, kCVPixelFormatType_422YpCbCr8 },
+ { AV_PIX_FMT_YUVA444P, kCVPixelFormatType_4444YpCbCrA8R },
+ { AV_PIX_FMT_YUVA444P16LE, kCVPixelFormatType_4444AYpCbCr16 },
+ { AV_PIX_FMT_YUV444P, kCVPixelFormatType_444YpCbCr8 },
+ { AV_PIX_FMT_YUV422P16, kCVPixelFormatType_422YpCbCr16 },
+ { AV_PIX_FMT_YUV422P10, kCVPixelFormatType_422YpCbCr10 },
+ { AV_PIX_FMT_YUV444P10, kCVPixelFormatType_444YpCbCr10 },
+ { AV_PIX_FMT_YUV420P, kCVPixelFormatType_420YpCbCr8Planar },
+ { AV_PIX_FMT_NV12, kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange },
+ { AV_PIX_FMT_YUYV422, kCVPixelFormatType_422YpCbCr8_yuvs },
+#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
+ { AV_PIX_FMT_GRAY8, kCVPixelFormatType_OneComponent8 },
+#endif
+ { AV_PIX_FMT_NONE, 0 }
+};
+
+typedef struct
+{
+ AVClass* class;
+
+ int frames_captured;
+ int audio_frames_captured;
+ int64_t first_pts;
+ int64_t first_audio_pts;
+ pthread_mutex_t frame_lock;
+ pthread_cond_t frame_wait_cond;
+ id avf_delegate;
+ id avf_audio_delegate;
+
+ AVRational framerate;
+ int width, height;
+
+ int capture_cursor;
+ int capture_mouse_clicks;
+
+ int list_devices;
+ int video_device_index;
+ int video_stream_index;
+ int audio_device_index;
+ int audio_stream_index;
+
+ char *video_filename;
+ char *audio_filename;
+
+ int num_video_devices;
+
+ int audio_channels;
+ int audio_bits_per_sample;
+ int audio_float;
+ int audio_be;
+ int audio_signed_integer;
+ int audio_packed;
+ int audio_non_interleaved;
+
+ int32_t *audio_buffer;
+ int audio_buffer_size;
+
+ enum AVPixelFormat pixel_format;
+
+ AVCaptureSession *capture_session;
+ AVCaptureVideoDataOutput *video_output;
+ AVCaptureAudioDataOutput *audio_output;
+ CMSampleBufferRef current_frame;
+ CMSampleBufferRef current_audio_frame;
+} AVFContext;
+
+static void lock_frames(AVFContext* ctx)
+{
+ pthread_mutex_lock(&ctx->frame_lock);
+}
+
+static void unlock_frames(AVFContext* ctx)
+{
+ pthread_mutex_unlock(&ctx->frame_lock);
+}
+
+/** FrameReciever class - delegate for AVCaptureSession
+ */
+@interface AVFFrameReceiver : NSObject
+{
+ AVFContext* _context;
+}
+
+- (id)initWithContext:(AVFContext*)context;
+
+- (void) captureOutput:(AVCaptureOutput *)captureOutput
+ didOutputSampleBuffer:(CMSampleBufferRef)videoFrame
+ fromConnection:(AVCaptureConnection *)connection;
+
+@end
+
+@implementation AVFFrameReceiver
+
+- (id)initWithContext:(AVFContext*)context
+{
+ if (self = [super init]) {
+ _context = context;
+ }
+ return self;
+}
+
+- (void) captureOutput:(AVCaptureOutput *)captureOutput
+ didOutputSampleBuffer:(CMSampleBufferRef)videoFrame
+ fromConnection:(AVCaptureConnection *)connection
+{
+ lock_frames(_context);
+
+ if (_context->current_frame != nil) {
+ CFRelease(_context->current_frame);
+ }
+
+ _context->current_frame = (CMSampleBufferRef)CFRetain(videoFrame);
+
+ pthread_cond_signal(&_context->frame_wait_cond);
+
+ unlock_frames(_context);
+
+ ++_context->frames_captured;
+}
+
+@end
+
+/** AudioReciever class - delegate for AVCaptureSession
+ */
+@interface AVFAudioReceiver : NSObject
+{
+ AVFContext* _context;
+}
+
+- (id)initWithContext:(AVFContext*)context;
+
+- (void) captureOutput:(AVCaptureOutput *)captureOutput
+ didOutputSampleBuffer:(CMSampleBufferRef)audioFrame
+ fromConnection:(AVCaptureConnection *)connection;
+
+@end
+
+@implementation AVFAudioReceiver
+
+- (id)initWithContext:(AVFContext*)context
+{
+ if (self = [super init]) {
+ _context = context;
+ }
+ return self;
+}
+
+- (void) captureOutput:(AVCaptureOutput *)captureOutput
+ didOutputSampleBuffer:(CMSampleBufferRef)audioFrame
+ fromConnection:(AVCaptureConnection *)connection
+{
+ lock_frames(_context);
+
+ if (_context->current_audio_frame != nil) {
+ CFRelease(_context->current_audio_frame);
+ }
+
+ _context->current_audio_frame = (CMSampleBufferRef)CFRetain(audioFrame);
+
+ pthread_cond_signal(&_context->frame_wait_cond);
+
+ unlock_frames(_context);
+
+ ++_context->audio_frames_captured;
+}
+
+@end
+
+static void destroy_context(AVFContext* ctx)
+{
+ [ctx->capture_session stopRunning];
+
+ [ctx->capture_session release];
+ [ctx->video_output release];
+ [ctx->audio_output release];
+ [ctx->avf_delegate release];
+ [ctx->avf_audio_delegate release];
+
+ ctx->capture_session = NULL;
+ ctx->video_output = NULL;
+ ctx->audio_output = NULL;
+ ctx->avf_delegate = NULL;
+ ctx->avf_audio_delegate = NULL;
+
+ av_freep(&ctx->audio_buffer);
+
+ pthread_mutex_destroy(&ctx->frame_lock);
+ pthread_cond_destroy(&ctx->frame_wait_cond);
+
+ if (ctx->current_frame) {
+ CFRelease(ctx->current_frame);
+ }
+}
+
+static void parse_device_name(AVFormatContext *s)
+{
+ AVFContext *ctx = (AVFContext*)s->priv_data;
+ char *tmp = av_strdup(s->filename);
+ char *save;
+
+ if (tmp[0] != ':') {
+ ctx->video_filename = av_strtok(tmp, ":", &save);
+ ctx->audio_filename = av_strtok(NULL, ":", &save);
+ } else {
+ ctx->audio_filename = av_strtok(tmp, ":", &save);
+ }
+}
+
+/**
+ * Configure the video device.
+ *
+ * Configure the video device using a run-time approach to access properties
+ * since formats, activeFormat are available since iOS >= 7.0 or OSX >= 10.7
+ * and activeVideoMaxFrameDuration is available since i0S >= 7.0 and OSX >= 10.9.
+ *
+ * The NSUndefinedKeyException must be handled by the caller of this function.
+ *
+ */
+static int configure_video_device(AVFormatContext *s, AVCaptureDevice *video_device)
+{
+ AVFContext *ctx = (AVFContext*)s->priv_data;
+
+ double framerate = av_q2d(ctx->framerate);
+ NSObject *range = nil;
+ NSObject *format = nil;
+ NSObject *selected_range = nil;
+ NSObject *selected_format = nil;
+
+ for (format in [video_device valueForKey:@"formats"]) {
+ CMFormatDescriptionRef formatDescription;
+ CMVideoDimensions dimensions;
+
+ formatDescription = (CMFormatDescriptionRef) [format performSelector:@selector(formatDescription)];
+ dimensions = CMVideoFormatDescriptionGetDimensions(formatDescription);
+
+ if ((ctx->width == 0 && ctx->height == 0) ||
+ (dimensions.width == ctx->width && dimensions.height == ctx->height)) {
+
+ selected_format = format;
+
+ for (range in [format valueForKey:@"videoSupportedFrameRateRanges"]) {
+ double max_framerate;
+
+ [[range valueForKey:@"maxFrameRate"] getValue:&max_framerate];
+ if (fabs (framerate - max_framerate) < 0.01) {
+ selected_range = range;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!selected_format) {
+ av_log(s, AV_LOG_ERROR, "Selected video size (%dx%d) is not supported by the device\n",
+ ctx->width, ctx->height);
+ goto unsupported_format;
+ }
+
+ if (!selected_range) {
+ av_log(s, AV_LOG_ERROR, "Selected framerate (%f) is not supported by the device\n",
+ framerate);
+ goto unsupported_format;
+ }
+
+ if ([video_device lockForConfiguration:NULL] == YES) {
+ NSValue *min_frame_duration = [selected_range valueForKey:@"minFrameDuration"];
+
+ [video_device setValue:selected_format forKey:@"activeFormat"];
+ [video_device setValue:min_frame_duration forKey:@"activeVideoMinFrameDuration"];
+ [video_device setValue:min_frame_duration forKey:@"activeVideoMaxFrameDuration"];
+ } else {
+ av_log(s, AV_LOG_ERROR, "Could not lock device for configuration");
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+
+unsupported_format:
+
+ av_log(s, AV_LOG_ERROR, "Supported modes:\n");
+ for (format in [video_device valueForKey:@"formats"]) {
+ CMFormatDescriptionRef formatDescription;
+ CMVideoDimensions dimensions;
+
+ formatDescription = (CMFormatDescriptionRef) [format performSelector:@selector(formatDescription)];
+ dimensions = CMVideoFormatDescriptionGetDimensions(formatDescription);
+
+ for (range in [format valueForKey:@"videoSupportedFrameRateRanges"]) {
+ double min_framerate;
+ double max_framerate;
+
+ [[range valueForKey:@"minFrameRate"] getValue:&min_framerate];
+ [[range valueForKey:@"maxFrameRate"] getValue:&max_framerate];
+ av_log(s, AV_LOG_ERROR, " %dx%d@[%f %f]fps\n",
+ dimensions.width, dimensions.height,
+ min_framerate, max_framerate);
+ }
+ }
+ return AVERROR(EINVAL);
+}
+
+static int add_video_device(AVFormatContext *s, AVCaptureDevice *video_device)
+{
+ AVFContext *ctx = (AVFContext*)s->priv_data;
+ int ret;
+ NSError *error = nil;
+ AVCaptureInput* capture_input = nil;
+ struct AVFPixelFormatSpec pxl_fmt_spec;
+ NSNumber *pixel_format;
+ NSDictionary *capture_dict;
+ dispatch_queue_t queue;
+
+ if (ctx->video_device_index < ctx->num_video_devices) {
+ capture_input = (AVCaptureInput*) [[[AVCaptureDeviceInput alloc] initWithDevice:video_device error:&error] autorelease];
+ } else {
+ capture_input = (AVCaptureInput*) video_device;
+ }
+
+ if (!capture_input) {
+ av_log(s, AV_LOG_ERROR, "Failed to create AV capture input device: %s\n",
+ [[error localizedDescription] UTF8String]);
+ return 1;
+ }
+
+ if ([ctx->capture_session canAddInput:capture_input]) {
+ [ctx->capture_session addInput:capture_input];
+ } else {
+ av_log(s, AV_LOG_ERROR, "can't add video input to capture session\n");
+ return 1;
+ }
+
+ // Attaching output
+ ctx->video_output = [[AVCaptureVideoDataOutput alloc] init];
+
+ if (!ctx->video_output) {
+ av_log(s, AV_LOG_ERROR, "Failed to init AV video output\n");
+ return 1;
+ }
+
+ // Configure device framerate and video size
+ @try {
+ if ((ret = configure_video_device(s, video_device)) < 0) {
+ return ret;
+ }
+ } @catch (NSException *exception) {
+ if (![[exception name] isEqualToString:NSUndefinedKeyException]) {
+ av_log (s, AV_LOG_ERROR, "An error occurred: %s", [exception.reason UTF8String]);
+ return AVERROR_EXTERNAL;
+ }
+ }
+
+ // select pixel format
+ pxl_fmt_spec.ff_id = AV_PIX_FMT_NONE;
+
+ for (int i = 0; avf_pixel_formats[i].ff_id != AV_PIX_FMT_NONE; i++) {
+ if (ctx->pixel_format == avf_pixel_formats[i].ff_id) {
+ pxl_fmt_spec = avf_pixel_formats[i];
+ break;
+ }
+ }
+
+ // check if selected pixel format is supported by AVFoundation
+ if (pxl_fmt_spec.ff_id == AV_PIX_FMT_NONE) {
+ av_log(s, AV_LOG_ERROR, "Selected pixel format (%s) is not supported by AVFoundation.\n",
+ av_get_pix_fmt_name(pxl_fmt_spec.ff_id));
+ return 1;
+ }
+
+ // check if the pixel format is available for this device
+ if ([[ctx->video_output availableVideoCVPixelFormatTypes] indexOfObject:[NSNumber numberWithInt:pxl_fmt_spec.avf_id]] == NSNotFound) {
+ av_log(s, AV_LOG_ERROR, "Selected pixel format (%s) is not supported by the input device.\n",
+ av_get_pix_fmt_name(pxl_fmt_spec.ff_id));
+
+ pxl_fmt_spec.ff_id = AV_PIX_FMT_NONE;
+
+ av_log(s, AV_LOG_ERROR, "Supported pixel formats:\n");
+ for (NSNumber *pxl_fmt in [ctx->video_output availableVideoCVPixelFormatTypes]) {
+ struct AVFPixelFormatSpec pxl_fmt_dummy;
+ pxl_fmt_dummy.ff_id = AV_PIX_FMT_NONE;
+ for (int i = 0; avf_pixel_formats[i].ff_id != AV_PIX_FMT_NONE; i++) {
+ if ([pxl_fmt intValue] == avf_pixel_formats[i].avf_id) {
+ pxl_fmt_dummy = avf_pixel_formats[i];
+ break;
+ }
+ }
+
+ if (pxl_fmt_dummy.ff_id != AV_PIX_FMT_NONE) {
+ av_log(s, AV_LOG_ERROR, " %s\n", av_get_pix_fmt_name(pxl_fmt_dummy.ff_id));
+
+ // select first supported pixel format instead of user selected (or default) pixel format
+ if (pxl_fmt_spec.ff_id == AV_PIX_FMT_NONE) {
+ pxl_fmt_spec = pxl_fmt_dummy;
+ }
+ }
+ }
+
+ // fail if there is no appropriate pixel format or print a warning about overriding the pixel format
+ if (pxl_fmt_spec.ff_id == AV_PIX_FMT_NONE) {
+ return 1;
+ } else {
+ av_log(s, AV_LOG_WARNING, "Overriding selected pixel format to use %s instead.\n",
+ av_get_pix_fmt_name(pxl_fmt_spec.ff_id));
+ }
+ }
+
+ ctx->pixel_format = pxl_fmt_spec.ff_id;
+ pixel_format = [NSNumber numberWithUnsignedInt:pxl_fmt_spec.avf_id];
+ capture_dict = [NSDictionary dictionaryWithObject:pixel_format
+ forKey:(id)kCVPixelBufferPixelFormatTypeKey];
+
+ [ctx->video_output setVideoSettings:capture_dict];
+ [ctx->video_output setAlwaysDiscardsLateVideoFrames:YES];
+
+ ctx->avf_delegate = [[AVFFrameReceiver alloc] initWithContext:ctx];
+
+ queue = dispatch_queue_create("avf_queue", NULL);
+ [ctx->video_output setSampleBufferDelegate:ctx->avf_delegate queue:queue];
+ dispatch_release(queue);
+
+ if ([ctx->capture_session canAddOutput:ctx->video_output]) {
+ [ctx->capture_session addOutput:ctx->video_output];
+ } else {
+ av_log(s, AV_LOG_ERROR, "can't add video output to capture session\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static int add_audio_device(AVFormatContext *s, AVCaptureDevice *audio_device)
+{
+ AVFContext *ctx = (AVFContext*)s->priv_data;
+ NSError *error = nil;
+ AVCaptureDeviceInput* audio_dev_input = [[[AVCaptureDeviceInput alloc] initWithDevice:audio_device error:&error] autorelease];
+ dispatch_queue_t queue;
+
+ if (!audio_dev_input) {
+ av_log(s, AV_LOG_ERROR, "Failed to create AV capture input device: %s\n",
+ [[error localizedDescription] UTF8String]);
+ return 1;
+ }
+
+ if ([ctx->capture_session canAddInput:audio_dev_input]) {
+ [ctx->capture_session addInput:audio_dev_input];
+ } else {
+ av_log(s, AV_LOG_ERROR, "can't add audio input to capture session\n");
+ return 1;
+ }
+
+ // Attaching output
+ ctx->audio_output = [[AVCaptureAudioDataOutput alloc] init];
+
+ if (!ctx->audio_output) {
+ av_log(s, AV_LOG_ERROR, "Failed to init AV audio output\n");
+ return 1;
+ }
+
+ ctx->avf_audio_delegate = [[AVFAudioReceiver alloc] initWithContext:ctx];
+
+ queue = dispatch_queue_create("avf_audio_queue", NULL);
+ [ctx->audio_output setSampleBufferDelegate:ctx->avf_audio_delegate queue:queue];
+ dispatch_release(queue);
+
+ if ([ctx->capture_session canAddOutput:ctx->audio_output]) {
+ [ctx->capture_session addOutput:ctx->audio_output];
+ } else {
+ av_log(s, AV_LOG_ERROR, "adding audio output to capture session failed\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static int get_video_config(AVFormatContext *s)
+{
+ AVFContext *ctx = (AVFContext*)s->priv_data;
+ CVImageBufferRef image_buffer;
+ CGSize image_buffer_size;
+ AVStream* stream = avformat_new_stream(s, NULL);
+
+ if (!stream) {
+ return 1;
+ }
+
+ // Take stream info from the first frame.
+ while (ctx->frames_captured < 1) {
+ CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.1, YES);
+ }
+
+ lock_frames(ctx);
+
+ ctx->video_stream_index = stream->index;
+
+ avpriv_set_pts_info(stream, 64, 1, avf_time_base);
+
+ image_buffer = CMSampleBufferGetImageBuffer(ctx->current_frame);
+ image_buffer_size = CVImageBufferGetEncodedSize(image_buffer);
+
+ stream->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
+ stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
+ stream->codecpar->width = (int)image_buffer_size.width;
+ stream->codecpar->height = (int)image_buffer_size.height;
+ stream->codecpar->format = ctx->pixel_format;
+
+ CFRelease(ctx->current_frame);
+ ctx->current_frame = nil;
+
+ unlock_frames(ctx);
+
+ return 0;
+}
+
+static int get_audio_config(AVFormatContext *s)
+{
+ AVFContext *ctx = (AVFContext*)s->priv_data;
+ CMFormatDescriptionRef format_desc;
+ AVStream* stream = avformat_new_stream(s, NULL);
+
+ if (!stream) {
+ return 1;
+ }
+
+ // Take stream info from the first frame.
+ while (ctx->audio_frames_captured < 1) {
+ CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.1, YES);
+ }
+
+ lock_frames(ctx);
+
+ ctx->audio_stream_index = stream->index;
+
+ avpriv_set_pts_info(stream, 64, 1, avf_time_base);
+
+ format_desc = CMSampleBufferGetFormatDescription(ctx->current_audio_frame);
+ const AudioStreamBasicDescription *basic_desc = CMAudioFormatDescriptionGetStreamBasicDescription(format_desc);
+
+ if (!basic_desc) {
+ av_log(s, AV_LOG_ERROR, "audio format not available\n");
+ return 1;
+ }
+
+ stream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
+ stream->codecpar->sample_rate = basic_desc->mSampleRate;
+ stream->codecpar->channels = basic_desc->mChannelsPerFrame;
+ stream->codecpar->channel_layout = av_get_default_channel_layout(stream->codecpar->channels);
+
+ ctx->audio_channels = basic_desc->mChannelsPerFrame;
+ ctx->audio_bits_per_sample = basic_desc->mBitsPerChannel;
+ ctx->audio_float = basic_desc->mFormatFlags & kAudioFormatFlagIsFloat;
+ ctx->audio_be = basic_desc->mFormatFlags & kAudioFormatFlagIsBigEndian;
+ ctx->audio_signed_integer = basic_desc->mFormatFlags & kAudioFormatFlagIsSignedInteger;
+ ctx->audio_packed = basic_desc->mFormatFlags & kAudioFormatFlagIsPacked;
+ ctx->audio_non_interleaved = basic_desc->mFormatFlags & kAudioFormatFlagIsNonInterleaved;
+
+ if (basic_desc->mFormatID == kAudioFormatLinearPCM &&
+ ctx->audio_float &&
+ ctx->audio_bits_per_sample == 32 &&
+ ctx->audio_packed) {
+ stream->codecpar->codec_id = ctx->audio_be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
+ } else if (basic_desc->mFormatID == kAudioFormatLinearPCM &&
+ ctx->audio_signed_integer &&
+ ctx->audio_bits_per_sample == 16 &&
+ ctx->audio_packed) {
+ stream->codecpar->codec_id = ctx->audio_be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
+ } else if (basic_desc->mFormatID == kAudioFormatLinearPCM &&
+ ctx->audio_signed_integer &&
+ ctx->audio_bits_per_sample == 24 &&
+ ctx->audio_packed) {
+ stream->codecpar->codec_id = ctx->audio_be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
+ } else if (basic_desc->mFormatID == kAudioFormatLinearPCM &&
+ ctx->audio_signed_integer &&
+ ctx->audio_bits_per_sample == 32 &&
+ ctx->audio_packed) {
+ stream->codecpar->codec_id = ctx->audio_be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
+ } else {
+ av_log(s, AV_LOG_ERROR, "audio format is not supported\n");
+ return 1;
+ }
+
+ if (ctx->audio_non_interleaved) {
+ CMBlockBufferRef block_buffer = CMSampleBufferGetDataBuffer(ctx->current_audio_frame);
+ ctx->audio_buffer_size = CMBlockBufferGetDataLength(block_buffer);
+ ctx->audio_buffer = av_malloc(ctx->audio_buffer_size);
+ if (!ctx->audio_buffer) {
+ av_log(s, AV_LOG_ERROR, "error allocating audio buffer\n");
+ return 1;
+ }
+ }
+
+ CFRelease(ctx->current_audio_frame);
+ ctx->current_audio_frame = nil;
+
+ unlock_frames(ctx);
+
+ return 0;
+}
+
+static int avf_read_header(AVFormatContext *s)
+{
+ NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+ int capture_screen = 0;
+ uint32_t num_screens = 0;
+ AVFContext *ctx = (AVFContext*)s->priv_data;
+ AVCaptureDevice *video_device = nil;
+ AVCaptureDevice *audio_device = nil;
+ // Find capture device
+ NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
+ ctx->num_video_devices = [devices count];
+
+ ctx->first_pts = av_gettime();
+ ctx->first_audio_pts = av_gettime();
+
+ pthread_mutex_init(&ctx->frame_lock, NULL);
+ pthread_cond_init(&ctx->frame_wait_cond, NULL);
+
+#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
+ CGGetActiveDisplayList(0, NULL, &num_screens);
+#endif
+
+ // List devices if requested
+ if (ctx->list_devices) {
+ int index = 0;
+ av_log(ctx, AV_LOG_INFO, "AVFoundation video devices:\n");
+ for (AVCaptureDevice *device in devices) {
+ const char *name = [[device localizedName] UTF8String];
+ index = [devices indexOfObject:device];
+ av_log(ctx, AV_LOG_INFO, "[%d] %s\n", index, name);
+ index++;
+ }
+#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
+ if (num_screens > 0) {
+ CGDirectDisplayID screens[num_screens];
+ CGGetActiveDisplayList(num_screens, screens, &num_screens);
+ for (int i = 0; i < num_screens; i++) {
+ av_log(ctx, AV_LOG_INFO, "[%d] Capture screen %d\n", index + i, i);
+ }
+ }
+#endif
+
+ av_log(ctx, AV_LOG_INFO, "AVFoundation audio devices:\n");
+ devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeAudio];
+ for (AVCaptureDevice *device in devices) {
+ const char *name = [[device localizedName] UTF8String];
+ int index = [devices indexOfObject:device];
+ av_log(ctx, AV_LOG_INFO, "[%d] %s\n", index, name);
+ }
+ goto fail;
+ }
+
+ // parse input filename for video and audio device
+ parse_device_name(s);
+
+ // check for device index given in filename
+ if (ctx->video_device_index == -1 && ctx->video_filename) {
+ sscanf(ctx->video_filename, "%d", &ctx->video_device_index);
+ }
+ if (ctx->audio_device_index == -1 && ctx->audio_filename) {
+ sscanf(ctx->audio_filename, "%d", &ctx->audio_device_index);
+ }
+
+ if (ctx->video_device_index >= 0) {
+ if (ctx->video_device_index < ctx->num_video_devices) {
+ video_device = [devices objectAtIndex:ctx->video_device_index];
+ } else if (ctx->video_device_index < ctx->num_video_devices + num_screens) {
+#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
+ CGDirectDisplayID screens[num_screens];
+ CGGetActiveDisplayList(num_screens, screens, &num_screens);
+ AVCaptureScreenInput* capture_screen_input = [[[AVCaptureScreenInput alloc] initWithDisplayID:screens[ctx->video_device_index - ctx->num_video_devices]] autorelease];
+
+ if (ctx->framerate.num > 0) {
+ capture_screen_input.minFrameDuration = CMTimeMake(ctx->framerate.den, ctx->framerate.num);
+ }
+
+#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
+ if (ctx->capture_cursor) {
+ capture_screen_input.capturesCursor = YES;
+ } else {
+ capture_screen_input.capturesCursor = NO;
+ }
+#endif
+
+ if (ctx->capture_mouse_clicks) {
+ capture_screen_input.capturesMouseClicks = YES;
+ } else {
+ capture_screen_input.capturesMouseClicks = NO;
+ }
+
+ video_device = (AVCaptureDevice*) capture_screen_input;
+ capture_screen = 1;
+#endif
+ } else {
+ av_log(ctx, AV_LOG_ERROR, "Invalid device index\n");
+ goto fail;
+ }
+ } else if (ctx->video_filename &&
+ strncmp(ctx->video_filename, "none", 4)) {
+ if (!strncmp(ctx->video_filename, "default", 7)) {
+ video_device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
+ } else {
+ // looking for video inputs
+ for (AVCaptureDevice *device in devices) {
+ if (!strncmp(ctx->video_filename, [[device localizedName] UTF8String], strlen(ctx->video_filename))) {
+ video_device = device;
+ break;
+ }
+ }
+
+#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
+ // looking for screen inputs
+ if (!video_device) {
+ int idx;
+ if(sscanf(ctx->video_filename, "Capture screen %d", &idx) && idx < num_screens) {
+ CGDirectDisplayID screens[num_screens];
+ CGGetActiveDisplayList(num_screens, screens, &num_screens);
+ AVCaptureScreenInput* capture_screen_input = [[[AVCaptureScreenInput alloc] initWithDisplayID:screens[idx]] autorelease];
+ video_device = (AVCaptureDevice*) capture_screen_input;
+ ctx->video_device_index = ctx->num_video_devices + idx;
+ capture_screen = 1;
+
+ if (ctx->framerate.num > 0) {
+ capture_screen_input.minFrameDuration = CMTimeMake(ctx->framerate.den, ctx->framerate.num);
+ }
+
+#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
+ if (ctx->capture_cursor) {
+ capture_screen_input.capturesCursor = YES;
+ } else {
+ capture_screen_input.capturesCursor = NO;
+ }
+#endif
+
+ if (ctx->capture_mouse_clicks) {
+ capture_screen_input.capturesMouseClicks = YES;
+ } else {
+ capture_screen_input.capturesMouseClicks = NO;
+ }
+ }
+ }
+#endif
+ }
+
+ if (!video_device) {
+ av_log(ctx, AV_LOG_ERROR, "Video device not found\n");
+ goto fail;
+ }
+ }
+
+ // get audio device
+ if (ctx->audio_device_index >= 0) {
+ NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeAudio];
+
+ if (ctx->audio_device_index >= [devices count]) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid audio device index\n");
+ goto fail;
+ }
+
+ audio_device = [devices objectAtIndex:ctx->audio_device_index];
+ } else if (ctx->audio_filename &&
+ strncmp(ctx->audio_filename, "none", 4)) {
+ if (!strncmp(ctx->audio_filename, "default", 7)) {
+ audio_device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
+ } else {
+ NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeAudio];
+
+ for (AVCaptureDevice *device in devices) {
+ if (!strncmp(ctx->audio_filename, [[device localizedName] UTF8String], strlen(ctx->audio_filename))) {
+ audio_device = device;
+ break;
+ }
+ }
+ }
+
+ if (!audio_device) {
+ av_log(ctx, AV_LOG_ERROR, "Audio device not found\n");
+ goto fail;
+ }
+ }
+
+ // Video nor Audio capture device not found, looking for AVMediaTypeVideo/Audio
+ if (!video_device && !audio_device) {
+ av_log(s, AV_LOG_ERROR, "No AV capture device found\n");
+ goto fail;
+ }
+
+ if (video_device) {
+ if (ctx->video_device_index < ctx->num_video_devices) {
+ av_log(s, AV_LOG_DEBUG, "'%s' opened\n", [[video_device localizedName] UTF8String]);
+ } else {
+ av_log(s, AV_LOG_DEBUG, "'%s' opened\n", [[video_device description] UTF8String]);
+ }
+ }
+ if (audio_device) {
+ av_log(s, AV_LOG_DEBUG, "audio device '%s' opened\n", [[audio_device localizedName] UTF8String]);
+ }
+
+ // Initialize capture session
+ ctx->capture_session = [[AVCaptureSession alloc] init];
+
+ if (video_device && add_video_device(s, video_device)) {
+ goto fail;
+ }
+ if (audio_device && add_audio_device(s, audio_device)) {
+ }
+
+ [ctx->capture_session startRunning];
+
+ /* Unlock device configuration only after the session is started so it
+ * does not reset the capture formats */
+ if (!capture_screen) {
+ [video_device unlockForConfiguration];
+ }
+
+ if (video_device && get_video_config(s)) {
+ goto fail;
+ }
+
+ // set audio stream
+ if (audio_device && get_audio_config(s)) {
+ goto fail;
+ }
+
+ [pool release];
+ return 0;
+
+fail:
+ [pool release];
+ destroy_context(ctx);
+ return AVERROR(EIO);
+}
+
+static int avf_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVFContext* ctx = (AVFContext*)s->priv_data;
+
+ do {
+ CVImageBufferRef image_buffer;
+ lock_frames(ctx);
+
+ image_buffer = CMSampleBufferGetImageBuffer(ctx->current_frame);
+
+ if (ctx->current_frame != nil) {
+ void *data;
+ if (av_new_packet(pkt, (int)CVPixelBufferGetDataSize(image_buffer)) < 0) {
+ return AVERROR(EIO);
+ }
+
+ CMItemCount count;
+ CMSampleTimingInfo timing_info;
+
+ if (CMSampleBufferGetOutputSampleTimingInfoArray(ctx->current_frame, 1, &timing_info, &count) == noErr) {
+ AVRational timebase_q = av_make_q(1, timing_info.presentationTimeStamp.timescale);
+ pkt->pts = pkt->dts = av_rescale_q(timing_info.presentationTimeStamp.value, timebase_q, avf_time_base_q);
+ }
+
+ pkt->stream_index = ctx->video_stream_index;
+ pkt->flags |= AV_PKT_FLAG_KEY;
+
+ CVPixelBufferLockBaseAddress(image_buffer, 0);
+
+ data = CVPixelBufferGetBaseAddress(image_buffer);
+ memcpy(pkt->data, data, pkt->size);
+
+ CVPixelBufferUnlockBaseAddress(image_buffer, 0);
+ CFRelease(ctx->current_frame);
+ ctx->current_frame = nil;
+ } else if (ctx->current_audio_frame != nil) {
+ CMBlockBufferRef block_buffer = CMSampleBufferGetDataBuffer(ctx->current_audio_frame);
+ int block_buffer_size = CMBlockBufferGetDataLength(block_buffer);
+
+ if (!block_buffer || !block_buffer_size) {
+ return AVERROR(EIO);
+ }
+
+ if (ctx->audio_non_interleaved && block_buffer_size > ctx->audio_buffer_size) {
+ return AVERROR_BUFFER_TOO_SMALL;
+ }
+
+ if (av_new_packet(pkt, block_buffer_size) < 0) {
+ return AVERROR(EIO);
+ }
+
+ CMItemCount count;
+ CMSampleTimingInfo timing_info;
+
+ if (CMSampleBufferGetOutputSampleTimingInfoArray(ctx->current_audio_frame, 1, &timing_info, &count) == noErr) {
+ AVRational timebase_q = av_make_q(1, timing_info.presentationTimeStamp.timescale);
+ pkt->pts = pkt->dts = av_rescale_q(timing_info.presentationTimeStamp.value, timebase_q, avf_time_base_q);
+ }
+
+ pkt->stream_index = ctx->audio_stream_index;
+ pkt->flags |= AV_PKT_FLAG_KEY;
+
+ if (ctx->audio_non_interleaved) {
+ int sample, c, shift, num_samples;
+
+ OSStatus ret = CMBlockBufferCopyDataBytes(block_buffer, 0, pkt->size, ctx->audio_buffer);
+ if (ret != kCMBlockBufferNoErr) {
+ return AVERROR(EIO);
+ }
+
+ num_samples = pkt->size / (ctx->audio_channels * (ctx->audio_bits_per_sample >> 3));
+
+ // transform decoded frame into output format
+ #define INTERLEAVE_OUTPUT(bps) \
+ { \
+ int##bps##_t **src; \
+ int##bps##_t *dest; \
+ src = av_malloc(ctx->audio_channels * sizeof(int##bps##_t*)); \
+ if (!src) return AVERROR(EIO); \
+ for (c = 0; c < ctx->audio_channels; c++) { \
+ src[c] = ((int##bps##_t*)ctx->audio_buffer) + c * num_samples; \
+ } \
+ dest = (int##bps##_t*)pkt->data; \
+ shift = bps - ctx->audio_bits_per_sample; \
+ for (sample = 0; sample < num_samples; sample++) \
+ for (c = 0; c < ctx->audio_channels; c++) \
+ *dest++ = src[c][sample] << shift; \
+ av_freep(&src); \
+ }
+
+ if (ctx->audio_bits_per_sample <= 16) {
+ INTERLEAVE_OUTPUT(16)
+ } else {
+ INTERLEAVE_OUTPUT(32)
+ }
+ } else {
+ OSStatus ret = CMBlockBufferCopyDataBytes(block_buffer, 0, pkt->size, pkt->data);
+ if (ret != kCMBlockBufferNoErr) {
+ return AVERROR(EIO);
+ }
+ }
+
+ CFRelease(ctx->current_audio_frame);
+ ctx->current_audio_frame = nil;
+ } else {
+ pkt->data = NULL;
+ pthread_cond_wait(&ctx->frame_wait_cond, &ctx->frame_lock);
+ }
+
+ unlock_frames(ctx);
+ } while (!pkt->data);
+
+ return 0;
+}
+
+static int avf_close(AVFormatContext *s)
+{
+ AVFContext* ctx = (AVFContext*)s->priv_data;
+ destroy_context(ctx);
+ return 0;
+}
+
+static const AVOption options[] = {
+ { "list_devices", "list available devices", offsetof(AVFContext, list_devices), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
+ { "true", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
+ { "false", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
+ { "video_device_index", "select video device by index for devices with same name (starts at 0)", offsetof(AVFContext, video_device_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
+ { "audio_device_index", "select audio device by index for devices with same name (starts at 0)", offsetof(AVFContext, audio_device_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
+ { "pixel_format", "set pixel format", offsetof(AVFContext, pixel_format), AV_OPT_TYPE_PIXEL_FMT, {.i64 = AV_PIX_FMT_YUV420P}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM},
+ { "framerate", "set frame rate", offsetof(AVFContext, framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "ntsc"}, 0, 0, AV_OPT_FLAG_DECODING_PARAM },
+ { "video_size", "set video size", offsetof(AVFContext, width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM },
+ { "capture_cursor", "capture the screen cursor", offsetof(AVFContext, capture_cursor), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
+ { "capture_mouse_clicks", "capture the screen mouse clicks", offsetof(AVFContext, capture_mouse_clicks), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
+
+ { NULL },
+};
+
+static const AVClass avf_class = {
+ .class_name = "AVFoundation input device",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
+};
+
+AVInputFormat ff_avfoundation_demuxer = {
+ .name = "avfoundation",
+ .long_name = NULL_IF_CONFIG_SMALL("AVFoundation input device"),
+ .priv_data_size = sizeof(AVFContext),
+ .read_header = avf_read_header,
+ .read_packet = avf_read_packet,
+ .read_close = avf_close,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &avf_class,
+};
diff --git a/libavdevice/avfoundation_dec.m b/libavdevice/avfoundation_dec.m
deleted file mode 100644
index f01484c48f..0000000000
--- a/libavdevice/avfoundation_dec.m
+++ /dev/null
@@ -1,686 +0,0 @@
-/*
- * AVFoundation input device
- * Copyright (c) 2015 Luca Barbato
- * Alexandre Lision
- *
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * Libav is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#import <AVFoundation/AVFoundation.h>
-#include <pthread.h>
-
-#include "libavformat/avformat.h"
-#include "libavformat/internal.h"
-
-#include "libavutil/log.h"
-#include "libavutil/mathematics.h"
-#include "libavutil/opt.h"
-#include "libavutil/parseutils.h"
-#include "libavutil/pixdesc.h"
-#include "libavutil/time.h"
-
-#include "avdevice.h"
-
-struct AVPixelFormatMap {
- enum AVPixelFormat pix_fmt;
- OSType core_video_fmt;
-};
-
-static const struct AVPixelFormatMap pixel_format_map[] = {
- { AV_PIX_FMT_ABGR, kCVPixelFormatType_32ABGR },
- { AV_PIX_FMT_ARGB, kCVPixelFormatType_32ARGB },
- { AV_PIX_FMT_BGR24, kCVPixelFormatType_24BGR },
- { AV_PIX_FMT_BGR48BE, kCVPixelFormatType_48RGB },
- { AV_PIX_FMT_BGRA, kCVPixelFormatType_32BGRA },
- { AV_PIX_FMT_MONOBLACK, kCVPixelFormatType_1Monochrome },
- { AV_PIX_FMT_NV12, kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange },
- { AV_PIX_FMT_RGB24, kCVPixelFormatType_24RGB },
- { AV_PIX_FMT_RGB555BE, kCVPixelFormatType_16BE555 },
- { AV_PIX_FMT_RGB555LE, kCVPixelFormatType_16LE555 },
- { AV_PIX_FMT_RGB565BE, kCVPixelFormatType_16BE565 },
- { AV_PIX_FMT_RGB565LE, kCVPixelFormatType_16LE565 },
- { AV_PIX_FMT_RGBA, kCVPixelFormatType_32RGBA },
- { AV_PIX_FMT_UYVY422, kCVPixelFormatType_422YpCbCr8 },
- { AV_PIX_FMT_YUV420P, kCVPixelFormatType_420YpCbCr8Planar },
- { AV_PIX_FMT_YUV422P10, kCVPixelFormatType_422YpCbCr10 },
- { AV_PIX_FMT_YUV422P16, kCVPixelFormatType_422YpCbCr16 },
- { AV_PIX_FMT_YUV444P, kCVPixelFormatType_444YpCbCr8 },
- { AV_PIX_FMT_YUV444P10, kCVPixelFormatType_444YpCbCr10 },
- { AV_PIX_FMT_YUVA444P, kCVPixelFormatType_4444YpCbCrA8R },
- { AV_PIX_FMT_YUVA444P16LE, kCVPixelFormatType_4444AYpCbCr16 },
- { AV_PIX_FMT_YUYV422, kCVPixelFormatType_422YpCbCr8_yuvs },
-#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
- { AV_PIX_FMT_GRAY8, kCVPixelFormatType_OneComponent8 },
-#endif
- { AV_PIX_FMT_NONE, 0 }
-};
-
-static enum AVPixelFormat core_video_to_pix_fmt(OSType core_video_fmt)
-{
- int i;
- for (i = 0; pixel_format_map[i].pix_fmt != AV_PIX_FMT_NONE; i++)
- if (core_video_fmt == pixel_format_map[i].core_video_fmt)
- return pixel_format_map[i].pix_fmt;
- return AV_PIX_FMT_NONE;
-}
-
-static OSType pix_fmt_to_core_video(enum AVPixelFormat pix_fmt)
-{
- int i;
- for (i = 0; pixel_format_map[i].pix_fmt != AV_PIX_FMT_NONE; i++)
- if (pix_fmt == pixel_format_map[i].pix_fmt)
- return pixel_format_map[i].core_video_fmt;
- return 0;
-}
-
-typedef struct AVFoundationCaptureContext {
- AVClass *class;
- /* AVOptions */
- int list_devices;
- int list_formats;
- char *pixel_format;
- char *video_size; /* String describing video size */
- char *framerate; /* String describing the framerate */
-
- int video_stream_index;
- int width, height;
- AVRational internal_framerate;
-
- int64_t first_pts;
- int frames_captured;
- pthread_mutex_t frame_lock;
- pthread_cond_t frame_wait_cond;
-
- /* ARC-compatible pointers to ObjC objects */
- CFTypeRef session; /* AVCaptureSession */
- CFTypeRef video_output;
- CFTypeRef video_delegate;
- CVImageBufferRef current_frame;
-} AVFoundationCaptureContext;
-
-#define AUDIO_DEVICES 1
-#define VIDEO_DEVICES 2
-#define ALL_DEVICES AUDIO_DEVICES | VIDEO_DEVICES
-
-#define OFFSET(x) offsetof(AVFoundationCaptureContext, x)
-#define DEC AV_OPT_FLAG_DECODING_PARAM
-static const AVOption options[] = {
- { "list_devices", "List available devices and exit", OFFSET(list_devices), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, DEC, "list_devices" },
- { "all", "Show all the supported devices", OFFSET(list_devices), AV_OPT_TYPE_CONST, { .i64 = ALL_DEVICES }, 0, INT_MAX, DEC, "list_devices" },
- { "audio", "Show only the audio devices", OFFSET(list_devices), AV_OPT_TYPE_CONST, { .i64 = AUDIO_DEVICES }, 0, INT_MAX, DEC, "list_devices" },
- { "video", "Show only the video devices", OFFSET(list_devices), AV_OPT_TYPE_CONST, { .i64 = VIDEO_DEVICES }, 0, INT_MAX, DEC, "list_devices" },
- { "list_formats", "List available formats and exit", OFFSET(list_formats), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, DEC, "list_formats" },
- { "pixel_format", "Preferred pixel format", OFFSET(pixel_format), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, DEC },
- { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, DEC },
- { "framerate", "A string representing desired framerate", OFFSET(framerate), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, DEC },
- { NULL },
-};
-
-static void list_capture_devices_by_type(AVFormatContext *s, NSString *type)
-{
- NSArray *devices = [AVCaptureDevice devicesWithMediaType:type];
-
- av_log(s, AV_LOG_INFO, "Type: %s\n", [type UTF8String]);
- for (AVCaptureDevice *device in devices) {
- av_log(s, AV_LOG_INFO, "uniqueID: %s\nname: %s\nformat:\n",
- [[device uniqueID] UTF8String],
- [[device localizedName] UTF8String]);
-
- for (AVCaptureDeviceFormat *format in device.formats)
- av_log(s, AV_LOG_INFO, "\t%s\n",
- [[NSString stringWithFormat: @ "%@", format] UTF8String]);
- }
-}
-
-static int avfoundation_list_capture_devices(AVFormatContext *s)
-{
- AVFoundationCaptureContext *ctx = s->priv_data;
-
- if (ctx->list_devices & AUDIO_DEVICES)
- list_capture_devices_by_type(s, AVMediaTypeAudio);
-
- if (ctx->list_devices & VIDEO_DEVICES)
- list_capture_devices_by_type(s, AVMediaTypeVideo);
-
- return AVERROR_EXIT;
-}
-
-static int list_formats(AVFormatContext *s)
-{
- av_log(s, AV_LOG_VERBOSE, "Supported pixel formats (first is more efficient):\n");
- AVCaptureVideoDataOutput *out = [[AVCaptureVideoDataOutput alloc] init];
-
- for (NSNumber *cv_pixel_format in[out availableVideoCVPixelFormatTypes]) {
- OSType cv_fmt = [cv_pixel_format intValue];
- enum AVPixelFormat pix_fmt = core_video_to_pix_fmt(cv_fmt);
- if (pix_fmt != AV_PIX_FMT_NONE) {
- av_log(s, AV_LOG_VERBOSE, " %s: %d\n",
- av_get_pix_fmt_name(pix_fmt),
- cv_fmt);
- }
- }
- return AVERROR_EXIT;
-}
-
-static void lock_frames(AVFoundationCaptureContext *ctx)
-{
- pthread_mutex_lock(&ctx->frame_lock);
-}
-
-static void unlock_frames(AVFoundationCaptureContext *ctx)
-{
- pthread_mutex_unlock(&ctx->frame_lock);
-}
-
-@interface VideoCapture : NSObject <AVCaptureVideoDataOutputSampleBufferDelegate>
-{
- AVFoundationCaptureContext *_context;
-}
-
-- (id)initWithContext:(AVFoundationCaptureContext *)context;
-
-- (void)captureOutput:(AVCaptureOutput *)captureOutput
- didOutputSampleBuffer:(CMSampleBufferRef)videoFrame
- fromConnection:(AVCaptureConnection *)connection;
-
-@end
-
-@implementation VideoCapture
-
-- (id)initWithContext:(AVFoundationCaptureContext *)context
-{
- if (self = [super init]) {
- _context = context;
- }
- return self;
-}
-
-- (void)captureOutput:(AVCaptureOutput *)captureOutput
- didOutputSampleBuffer:(CMSampleBufferRef)videoFrame
- fromConnection:(AVCaptureConnection *)connection
-{
- CVImageBufferRef buf;
- lock_frames(_context);
-
- if (_context->current_frame != nil) {
- CFRelease(_context->current_frame);
- }
-
- buf = CMSampleBufferGetImageBuffer(videoFrame);
- if (!buf)
- return;
-
- CFRetain(buf);
-
- _context->current_frame = buf;
-
- pthread_cond_signal(&_context->frame_wait_cond);
-
- unlock_frames(_context);
-
- ++_context->frames_captured;
-}
-
-@end
-
-/**
- * Configure the video device.
- */
-static bool configure_video_device(AVFormatContext *s, AVCaptureDevice *video_device)
-{
- AVFoundationCaptureContext *ctx = s->priv_data;
- AVCaptureDeviceFormat *selected_format = nil;
- AVFrameRateRange *selected_range = nil;
- double framerate = av_q2d(ctx->internal_framerate);
- double epsilon = 0.00000001;
-
- for (AVCaptureDeviceFormat *format in[video_device formats]) {
- CMFormatDescriptionRef formatDescription;
- CMVideoDimensions dimensions;
-
- formatDescription = (CMFormatDescriptionRef)format.formatDescription;
- dimensions = CMVideoFormatDescriptionGetDimensions(formatDescription);
-
- if ((ctx->width == 0 && ctx->height == 0) ||
- (dimensions.width == ctx->width && dimensions.height == ctx->height)) {
- av_log(s, AV_LOG_VERBOSE, "Trying video size %dx%d\n",
- dimensions.width, dimensions.height);
- ctx->width = dimensions.width;
- ctx->height = dimensions.height;
- selected_format = format;
- if (framerate) {
- av_log(s, AV_LOG_VERBOSE, "Checking support for framerate %f\n",
- framerate);
- for (AVFrameRateRange *range in format.videoSupportedFrameRateRanges)
- if (range.minFrameRate <= (framerate + epsilon) &&
- range.maxFrameRate >= (framerate - epsilon)) {
- selected_range = range;
- break;
- }
- } else {
- selected_range = format.videoSupportedFrameRateRanges[0];
- framerate = selected_range.maxFrameRate;
- break;
- }
-
- if (selected_format && selected_range)
- break;
- }
- }
-
- if (!selected_format) {
- av_log(s, AV_LOG_ERROR, "Selected video size (%dx%d) is not supported by the device\n",
- ctx->width, ctx->height);
- return false;
- } else {
- av_log(s, AV_LOG_VERBOSE, "Setting video size to %dx%d\n",
- ctx->width, ctx->height);
- }
-
- if (framerate && !selected_range) {
- av_log(s, AV_LOG_ERROR, "Selected framerate (%f) is not supported by the device\n",
- framerate);
- return false;
- } else {
- av_log(s, AV_LOG_VERBOSE, "Setting framerate to %f\n",
- framerate);
- }
-
- if ([video_device lockForConfiguration : NULL] == YES) {
- [video_device setActiveFormat : selected_format];
- [video_device setActiveVideoMinFrameDuration : CMTimeMake(1, framerate)];
- [video_device setActiveVideoMaxFrameDuration : CMTimeMake(1, framerate)];
- } else {
- av_log(s, AV_LOG_ERROR, "Could not lock device for configuration\n");
- return false;
- }
- return true;
-}
-
-static void print_supported_formats(AVFormatContext *s, AVCaptureDevice *device)
-{
- av_log(s, AV_LOG_WARNING, "Supported modes:\n");
- for (AVCaptureDeviceFormat *format in[device formats]) {
- CMFormatDescriptionRef formatDescription;
- CMVideoDimensions dimensions;
-
- formatDescription = (CMFormatDescriptionRef)format.formatDescription;
- dimensions = CMVideoFormatDescriptionGetDimensions(formatDescription);
-
- for (AVFrameRateRange *range in format.videoSupportedFrameRateRanges)
- av_log(s, AV_LOG_WARNING, " %dx%d@[%f %f]fps\n",
- dimensions.width, dimensions.height,
- range.minFrameRate, range.maxFrameRate);
- }
-}
-
-static int setup_stream(AVFormatContext *s, AVCaptureDevice *device)
-{
- AVFoundationCaptureContext *ctx = s->priv_data;
- NSError *__autoreleasing error = nil;
- AVCaptureDeviceInput *input;
- AVCaptureSession *session = (__bridge AVCaptureSession *)ctx->session;
-
- av_log(s, AV_LOG_VERBOSE, "Setting up stream for device %s\n", [[device uniqueID] UTF8String]);
-
- if (!configure_video_device(s, device)) {
- av_log(s, AV_LOG_ERROR, "device configuration failed\n");
- print_supported_formats(s, device);
- return AVERROR(EINVAL);
- }
-
- // add the input devices
- input = [AVCaptureDeviceInput deviceInputWithDevice:device
- error:&error];
- if (!input) {
- av_log(s, AV_LOG_ERROR, "%s\n",
- [[error localizedDescription] UTF8String]);
- return AVERROR_UNKNOWN;
- }
-
- if ([session canAddInput : input]) {
- [session addInput : input];
- } else {
- av_log(s, AV_LOG_ERROR, "Cannot add video input to capture session\n");
- return AVERROR(EINVAL);
- }
-
- // add the output devices
- if ([device hasMediaType : AVMediaTypeVideo]) {
- AVCaptureVideoDataOutput *out = [[AVCaptureVideoDataOutput alloc] init];
- NSNumber *core_video_fmt = nil;
- if (!out) {
- av_log(s, AV_LOG_ERROR, "Failed to init AV video output\n");
- return AVERROR(EINVAL);
- }
-
- [out setAlwaysDiscardsLateVideoFrames : YES];
-
- if (ctx->pixel_format) {
- // Try to use specified pixel format
- core_video_fmt = [NSNumber numberWithInt:pix_fmt_to_core_video(av_get_pix_fmt(ctx->pixel_format))];
- if ([[out availableVideoCVPixelFormatTypes] indexOfObject : core_video_fmt] != NSNotFound) {
- av_log(s, AV_LOG_VERBOSE, "Pixel format %s supported!\n", ctx->pixel_format);
- } else {
- core_video_fmt = nil;
- }
- }
-
- if (!ctx->pixel_format || !core_video_fmt) {
- av_log(s, AV_LOG_VERBOSE, "Pixel format not supported or not provided, overriding...\n");
- for (NSNumber *cv_pixel_format in[out availableVideoCVPixelFormatTypes]) {
- OSType cv_fmt = [cv_pixel_format intValue];
- enum AVPixelFormat pix_fmt = core_video_to_pix_fmt(cv_fmt);
- // Use the first one in the list, it will be the most effective
- if (pix_fmt != AV_PIX_FMT_NONE) {
- core_video_fmt = cv_pixel_format;
- ctx->pixel_format = av_strdup(av_get_pix_fmt_name(pix_fmt));
- break;
- }
- }
- }
-
- // fail if there is no appropriate pixel format
- if (!core_video_fmt) {
- return AVERROR(EINVAL);
- } else {
- av_log(s, AV_LOG_VERBOSE, "Using %s.\n",
- ctx->pixel_format);
- }
-
- NSDictionary *capture_dict = [NSDictionary dictionaryWithObject:core_video_fmt
- forKey:(const NSString *)kCVPixelBufferPixelFormatTypeKey];
- [out setVideoSettings : capture_dict];
-
- VideoCapture *delegate = [[VideoCapture alloc] initWithContext:ctx];
-
- dispatch_queue_t queue = dispatch_queue_create("avf_queue", NULL);
- [out setSampleBufferDelegate : delegate queue : queue];
-
- if ([session canAddOutput : out]) {
- [session addOutput : out];
- ctx->video_output = (__bridge_retained CFTypeRef)out;
- ctx->video_delegate = (__bridge_retained CFTypeRef)delegate;
- } else {
- av_log(s, AV_LOG_ERROR, "can't add video output to capture session\n");
- return AVERROR(EINVAL);
- }
- }
-
- return 0;
-}
-
-static int get_video_config(AVFormatContext *s)
-{
- AVFoundationCaptureContext *ctx = (AVFoundationCaptureContext *)s->priv_data;
- CVImageBufferRef image_buffer;
- CGSize image_buffer_size;
- AVStream *stream = avformat_new_stream(s, NULL);
-
- if (!stream) {
- av_log(s, AV_LOG_ERROR, "Failed to create AVStream\n");
- return AVERROR(EINVAL);
- }
-
- // Take stream info from the first frame.
- while (ctx->frames_captured < 1)
- CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.1, YES);
-
- lock_frames(ctx);
-
- ctx->video_stream_index = stream->index;
-
- avpriv_set_pts_info(stream, 64, 1, 1000000);
-
- image_buffer = ctx->current_frame;
- image_buffer_size = CVImageBufferGetEncodedSize(image_buffer);
-
- stream->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
- stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
- stream->codec->width = (int)image_buffer_size.width;
- stream->codec->height = (int)image_buffer_size.height;
- stream->codec->pix_fmt = av_get_pix_fmt(ctx->pixel_format);
-
- CFRelease(ctx->current_frame);
- ctx->current_frame = nil;
-
- unlock_frames(ctx);
-
- return 0;
-}
-
-static void destroy_context(AVFoundationCaptureContext *ctx)
-{
- AVCaptureSession *session = (__bridge AVCaptureSession *)ctx->session;
- [session stopRunning];
-
- ctx->session = NULL;
-
- pthread_mutex_destroy(&ctx->frame_lock);
- pthread_cond_destroy(&ctx->frame_wait_cond);
-
- if (ctx->current_frame) {
- CFRelease(ctx->current_frame);
- }
-}
-
-static int setup_default_stream(AVFormatContext *s)
-{
- AVCaptureDevice *device;
- for (NSString *type in @[AVMediaTypeVideo]) {
- device = [AVCaptureDevice defaultDeviceWithMediaType:type];
- if (device) {
- av_log(s, AV_LOG_VERBOSE, "Using default device %s\n",
- [[device uniqueID] UTF8String]);
- return setup_stream(s, device);
- }
- }
- return AVERROR(EINVAL);
-}
-
-/**
- * Try to open device given in filename
- * Two supported formats: "device_unique_id" or "[device_unique_id]"
- */
-static AVCaptureDevice *create_device(AVFormatContext *s)
-{
- NSString *filename;
- NSError *__autoreleasing error = nil;
- NSRegularExpression *exp;
- NSArray *matches;
- AVCaptureDevice *device;
-
- filename = [NSString stringWithFormat:@ "%s", s->filename];
-
- if ((device = [AVCaptureDevice deviceWithUniqueID:filename])) {
- av_log(s, AV_LOG_VERBOSE, "Device with name %s found\n", [filename UTF8String]);
- return device;
- }
-
- // Remove '[]' from the device name
- NSString *pat = @"(?<=\\[).*?(?=\\])";
- exp = [NSRegularExpression regularExpressionWithPattern:pat
- options:0
- error:&error];
- if (!exp) {
- av_log(s, AV_LOG_ERROR, "%s\n",
- [[error localizedDescription] UTF8String]);
- return NULL;
- }
-
- matches = [exp matchesInString:filename options:0
- range:NSMakeRange(0, [filename length])];
-
- if (matches.count > 0) {
- for (NSTextCheckingResult *match in matches) {
- NSRange range = [match rangeAtIndex:0];
- NSString *uniqueID = [filename substringWithRange:NSMakeRange(range.location, range.length)];
- av_log(s, AV_LOG_VERBOSE, "opening device with ID: %s\n", [uniqueID UTF8String]);
- if (!(device = [AVCaptureDevice deviceWithUniqueID:uniqueID])) {
- av_log(s, AV_LOG_ERROR, "Device with name %s not found", [filename UTF8String]);
- return NULL;
- }
- return device;
- }
- }
- return NULL;
-}
-
-static int setup_streams(AVFormatContext *s)
-{
- AVFoundationCaptureContext *ctx = s->priv_data;
- int ret;
- AVCaptureDevice *device;
-
- pthread_mutex_init(&ctx->frame_lock, NULL);
- pthread_cond_init(&ctx->frame_wait_cond, NULL);
-
- ctx->session = (__bridge_retained CFTypeRef)[[AVCaptureSession alloc] init];
-
- if (!strncmp(s->filename, "default", 7)) {
- ret = setup_default_stream(s);
- } else {
- device = create_device(s);
- if (device) {
- ret = setup_stream(s, device);
- } else {
- av_log(s, AV_LOG_ERROR, "No matches for %s\n", s->filename);
- ret = setup_default_stream(s);
- }
- }
-
- if (ret < 0) {
- av_log(s, AV_LOG_ERROR, "No device could be added\n");
- return ret;
- }
-
- av_log(s, AV_LOG_VERBOSE, "Starting session!\n");
- [(__bridge AVCaptureSession *)ctx->session startRunning];
-
- // Session is started, unlock device
- [device unlockForConfiguration];
-
- av_log(s, AV_LOG_VERBOSE, "Checking video config\n");
- if (get_video_config(s)) {
- destroy_context(ctx);
- return AVERROR(EIO);
- }
-
- return 0;
-}
-
-static int avfoundation_read_header(AVFormatContext *s)
-{
- AVFoundationCaptureContext *ctx = s->priv_data;
- ctx->first_pts = av_gettime();
-
- AVRational framerate_q = { 0, 1 };
- ctx->internal_framerate = framerate_q;
-
- if (ctx->list_devices)
- return avfoundation_list_capture_devices(s);
- if (ctx->list_formats) {
- return list_formats(s);
- }
-
- if (ctx->pixel_format) {
- if (av_get_pix_fmt(ctx->pixel_format) == AV_PIX_FMT_NONE) {
- av_log(s, AV_LOG_ERROR, "No such input format: %s.\n",
- ctx->pixel_format);
- return AVERROR(EINVAL);
- }
- }
-
- if (ctx->video_size &&
- (av_parse_video_size(&ctx->width, &ctx->height, ctx->video_size)) < 0) {
- av_log(s, AV_LOG_ERROR, "Could not parse video size '%s'.\n",
- ctx->video_size);
- return AVERROR(EINVAL);
- }
-
- if (ctx->framerate &&
- (av_parse_video_rate(&ctx->internal_framerate, ctx->framerate)) < 0) {
- av_log(s, AV_LOG_ERROR, "Could not parse framerate '%s'.\n",
- ctx->framerate);
- return AVERROR(EINVAL);
- }
-
- return setup_streams(s);
-}
-
-static int avfoundation_read_packet(AVFormatContext *s, AVPacket *pkt)
-{
- AVFoundationCaptureContext *ctx = (AVFoundationCaptureContext *)s->priv_data;
-
- do {
- lock_frames(ctx);
-
- if (ctx->current_frame != nil) {
- if (av_new_packet(pkt, (int)CVPixelBufferGetDataSize(ctx->current_frame)) < 0) {
- return AVERROR(EIO);
- }
-
- pkt->pts = pkt->dts = av_rescale_q(av_gettime() - ctx->first_pts,
- AV_TIME_BASE_Q,
- (AVRational) {1, 1000000 });
- pkt->stream_index = ctx->video_stream_index;
- pkt->flags |= AV_PKT_FLAG_KEY;
-
- CVPixelBufferLockBaseAddress(ctx->current_frame, 0);
-
- void *data = CVPixelBufferGetBaseAddress(ctx->current_frame);
- memcpy(pkt->data, data, pkt->size);
-
- CVPixelBufferUnlockBaseAddress(ctx->current_frame, 0);
- CFRelease(ctx->current_frame);
- ctx->current_frame = nil;
- } else {
- pkt->data = NULL;
- pthread_cond_wait(&ctx->frame_wait_cond, &ctx->frame_lock);
- }
-
- unlock_frames(ctx);
- } while (!pkt->data);
-
- return 0;
-}
-
-static int avfoundation_read_close(AVFormatContext *s)
-{
- av_log(s, AV_LOG_VERBOSE, "Closing session...\n");
- AVFoundationCaptureContext *ctx = s->priv_data;
- destroy_context(ctx);
- return 0;
-}
-
-static const AVClass avfoundation_class = {
- .class_name = "AVFoundation AVCaptureDevice indev",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-AVInputFormat ff_avfoundation_demuxer = {
- .name = "avfoundation",
- .long_name = NULL_IF_CONFIG_SMALL("AVFoundation AVCaptureDevice grab"),
- .priv_data_size = sizeof(AVFoundationCaptureContext),
- .read_header = avfoundation_read_header,
- .read_packet = avfoundation_read_packet,
- .read_close = avfoundation_read_close,
- .flags = AVFMT_NOFILE,
- .priv_class = &avfoundation_class,
-};
diff --git a/libavdevice/bktr.c b/libavdevice/bktr.c
index f76a1636c6..2902425b4d 100644
--- a/libavdevice/bktr.c
+++ b/libavdevice/bktr.c
@@ -3,28 +3,27 @@
* Copyright (c) 2002 Steve O'Hara-Smith
* based on
* Linux video grab interface
- * Copyright (c) 2000,2001 Gerard Lantau
+ * Copyright (c) 2000, 2001 Fabrice Bellard
* and
* simple_grab.c Copyright (c) 1999 Roger Hardiman
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/internal.h"
#include "libavutil/log.h"
@@ -50,6 +49,7 @@
#include <sys/time.h>
#include <signal.h>
#include <stdint.h>
+#include "avdevice.h"
typedef struct VideoData {
AVClass *class;
@@ -58,7 +58,6 @@ typedef struct VideoData {
int width, height;
uint64_t per_frame;
int standard;
- char *video_size; /**< String describing video size, set by a private option. */
char *framerate; /**< Set by a private option. */
} VideoData;
@@ -81,7 +80,7 @@ typedef struct VideoData {
#define VIDEO_FORMAT NTSC
#endif
-static int bktr_dev[] = { METEOR_DEV0, METEOR_DEV1, METEOR_DEV2,
+static const int bktr_dev[] = { METEOR_DEV0, METEOR_DEV1, METEOR_DEV2,
METEOR_DEV3, METEOR_DEV_SVIDEO };
uint8_t *video_buf;
@@ -104,7 +103,7 @@ static av_cold int bktr_init(const char *video_device, int width, int height,
long ioctl_frequency;
char *arg;
int c;
- struct sigaction act = { 0 }, old;
+ struct sigaction act = { {0} }, old;
int ret;
char errbuf[128];
@@ -260,15 +259,9 @@ static int grab_read_header(AVFormatContext *s1)
{
VideoData *s = s1->priv_data;
AVStream *st;
- int width, height;
AVRational framerate;
int ret = 0;
- if ((ret = av_parse_video_size(&width, &height, s->video_size)) < 0) {
- av_log(s1, AV_LOG_ERROR, "Could not parse video size '%s'.\n", s->video_size);
- goto out;
- }
-
if (!s->framerate)
switch (s->standard) {
case PAL: s->framerate = av_strdup("pal"); break;
@@ -291,19 +284,16 @@ static int grab_read_header(AVFormatContext *s1)
}
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in use */
- s->width = width;
- s->height = height;
s->per_frame = ((uint64_t)1000000 * framerate.den) / framerate.num;
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->format = AV_PIX_FMT_YUV420P;
st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
- st->codecpar->width = width;
- st->codecpar->height = height;
+ st->codecpar->width = s->width;
+ st->codecpar->height = s->height;
st->avg_frame_rate = framerate;
-
- if (bktr_init(s1->filename, width, height, s->standard,
+ if (bktr_init(s1->filename, s->width, s->height, s->standard,
&s->video_fd, &s->tuner_fd, -1, 0.0) < 0) {
ret = AVERROR(EIO);
goto out;
@@ -344,7 +334,7 @@ static const AVOption options[] = {
{ "PALN", "", 0, AV_OPT_TYPE_CONST, {.i64 = PALN}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PALM", "", 0, AV_OPT_TYPE_CONST, {.i64 = PALM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSCJ", "", 0, AV_OPT_TYPE_CONST, {.i64 = NTSCJ}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
- { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), AV_OPT_TYPE_STRING, {.str = "vga"}, 0, 0, DEC },
+ { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = "vga"}, 0, 0, DEC },
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ NULL },
};
@@ -354,6 +344,7 @@ static const AVClass bktr_class = {
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
AVInputFormat ff_bktr_demuxer = {
diff --git a/libavdevice/caca.c b/libavdevice/caca.c
new file mode 100644
index 0000000000..93cc0ffd25
--- /dev/null
+++ b/libavdevice/caca.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2012 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <caca.h>
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avdevice.h"
+
+typedef struct CACAContext {
+ AVClass *class;
+ AVFormatContext *ctx;
+ char *window_title;
+ int window_width, window_height;
+
+ caca_canvas_t *canvas;
+ caca_display_t *display;
+ caca_dither_t *dither;
+
+ char *algorithm, *antialias;
+ char *charset, *color;
+ char *driver;
+
+ char *list_dither;
+ int list_drivers;
+} CACAContext;
+
+static int caca_write_trailer(AVFormatContext *s)
+{
+ CACAContext *c = s->priv_data;
+
+ av_freep(&c->window_title);
+
+ if (c->display) {
+ caca_free_display(c->display);
+ c->display = NULL;
+ }
+ if (c->dither) {
+ caca_free_dither(c->dither);
+ c->dither = NULL;
+ }
+ if (c->canvas) {
+ caca_free_canvas(c->canvas);
+ c->canvas = NULL;
+ }
+ return 0;
+}
+
+static void list_drivers(CACAContext *c)
+{
+ const char *const *drivers = caca_get_display_driver_list();
+ int i;
+
+ av_log(c->ctx, AV_LOG_INFO, "Available drivers:\n");
+ for (i = 0; drivers[i]; i += 2)
+ av_log(c->ctx, AV_LOG_INFO, "%s: %s\n", drivers[i], drivers[i + 1]);
+}
+
+#define DEFINE_LIST_DITHER(thing, thing_str) \
+static void list_dither_## thing(CACAContext *c) \
+{ \
+ const char *const *thing = caca_get_dither_## thing ##_list(c->dither); \
+ int i; \
+ \
+ av_log(c->ctx, AV_LOG_INFO, "Available %s:\n", thing_str); \
+ for (i = 0; thing[i]; i += 2) \
+ av_log(c->ctx, AV_LOG_INFO, "%s: %s\n", thing[i], thing[i + 1]); \
+}
+
+DEFINE_LIST_DITHER(color, "colors");
+DEFINE_LIST_DITHER(charset, "charsets");
+DEFINE_LIST_DITHER(algorithm, "algorithms");
+DEFINE_LIST_DITHER(antialias, "antialias");
+
+static int caca_write_header(AVFormatContext *s)
+{
+ CACAContext *c = s->priv_data;
+ AVStream *st = s->streams[0];
+ AVCodecParameters *encctx = st->codecpar;
+ int ret, bpp;
+
+ c->ctx = s;
+ if (c->list_drivers) {
+ list_drivers(c);
+ return AVERROR_EXIT;
+ }
+ if (c->list_dither) {
+ if (!strcmp(c->list_dither, "colors")) {
+ list_dither_color(c);
+ } else if (!strcmp(c->list_dither, "charsets")) {
+ list_dither_charset(c);
+ } else if (!strcmp(c->list_dither, "algorithms")) {
+ list_dither_algorithm(c);
+ } else if (!strcmp(c->list_dither, "antialiases")) {
+ list_dither_antialias(c);
+ } else {
+ av_log(s, AV_LOG_ERROR,
+ "Invalid argument '%s', for 'list_dither' option\n"
+ "Argument must be one of 'algorithms, 'antialiases', 'charsets', 'colors'\n",
+ c->list_dither);
+ return AVERROR(EINVAL);
+ }
+ return AVERROR_EXIT;
+ }
+
+ if ( s->nb_streams > 1
+ || encctx->codec_type != AVMEDIA_TYPE_VIDEO
+ || encctx->codec_id != AV_CODEC_ID_RAWVIDEO) {
+ av_log(s, AV_LOG_ERROR, "Only supports one rawvideo stream\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (encctx->format != AV_PIX_FMT_RGB24) {
+ av_log(s, AV_LOG_ERROR,
+ "Unsupported pixel format '%s', choose rgb24\n",
+ av_get_pix_fmt_name(encctx->format));
+ return AVERROR(EINVAL);
+ }
+
+ c->canvas = caca_create_canvas(c->window_width, c->window_height);
+ if (!c->canvas) {
+ ret = AVERROR(errno);
+ av_log(s, AV_LOG_ERROR, "Failed to create canvas\n");
+ goto fail;
+ }
+
+ bpp = av_get_bits_per_pixel(av_pix_fmt_desc_get(encctx->format));
+ c->dither = caca_create_dither(bpp, encctx->width, encctx->height,
+ bpp / 8 * encctx->width,
+ 0x0000ff, 0x00ff00, 0xff0000, 0);
+ if (!c->dither) {
+ ret = AVERROR(errno);
+ av_log(s, AV_LOG_ERROR, "Failed to create dither\n");
+ goto fail;
+ }
+
+#define CHECK_DITHER_OPT(opt) do { \
+ if (caca_set_dither_##opt(c->dither, c->opt) < 0) { \
+ ret = AVERROR(errno); \
+ av_log(s, AV_LOG_ERROR, "Failed to set value '%s' for option '%s'\n", \
+ c->opt, #opt); \
+ goto fail; \
+ } \
+} while (0)
+
+ CHECK_DITHER_OPT(algorithm);
+ CHECK_DITHER_OPT(antialias);
+ CHECK_DITHER_OPT(charset);
+ CHECK_DITHER_OPT(color);
+
+ c->display = caca_create_display_with_driver(c->canvas, c->driver);
+ if (!c->display) {
+ ret = AVERROR(errno);
+ av_log(s, AV_LOG_ERROR, "Failed to create display\n");
+ list_drivers(c);
+ goto fail;
+ }
+
+ if (!c->window_width || !c->window_height) {
+ c->window_width = caca_get_canvas_width(c->canvas);
+ c->window_height = caca_get_canvas_height(c->canvas);
+ }
+
+ if (!c->window_title)
+ c->window_title = av_strdup(s->filename);
+ caca_set_display_title(c->display, c->window_title);
+ caca_set_display_time(c->display, av_rescale_q(1, st->codec->time_base, AV_TIME_BASE_Q));
+
+ return 0;
+
+fail:
+ caca_write_trailer(s);
+ return ret;
+}
+
+static int caca_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ CACAContext *c = s->priv_data;
+
+ caca_dither_bitmap(c->canvas, 0, 0, c->window_width, c->window_height, c->dither, pkt->data);
+ caca_refresh_display(c->display);
+
+ return 0;
+}
+
+#define OFFSET(x) offsetof(CACAContext,x)
+#define ENC AV_OPT_FLAG_ENCODING_PARAM
+
+static const AVOption options[] = {
+ { "window_size", "set window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL }, 0, 0, ENC},
+ { "window_title", "set window title", OFFSET(window_title), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, ENC },
+ { "driver", "set display driver", OFFSET(driver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, ENC },
+ { "algorithm", "set dithering algorithm", OFFSET(algorithm), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
+ { "antialias", "set antialias method", OFFSET(antialias), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
+ { "charset", "set charset used to render output", OFFSET(charset), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
+ { "color", "set color used to render output", OFFSET(color), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
+ { "list_drivers", "list available drivers", OFFSET(list_drivers), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, ENC },
+ { "list_dither", "list available dither options", OFFSET(list_dither), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, ENC, "list_dither" },
+ { "algorithms", NULL, 0, AV_OPT_TYPE_CONST, {.str = "algorithms"}, 0, 0, ENC, "list_dither" },
+ { "antialiases", NULL, 0, AV_OPT_TYPE_CONST, {.str = "antialiases"},0, 0, ENC, "list_dither" },
+ { "charsets", NULL, 0, AV_OPT_TYPE_CONST, {.str = "charsets"}, 0, 0, ENC, "list_dither" },
+ { "colors", NULL, 0, AV_OPT_TYPE_CONST, {.str = "colors"}, 0, 0, ENC, "list_dither" },
+ { NULL },
+};
+
+static const AVClass caca_class = {
+ .class_name = "caca_outdev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
+};
+
+AVOutputFormat ff_caca_muxer = {
+ .name = "caca",
+ .long_name = NULL_IF_CONFIG_SMALL("caca (color ASCII art) output device"),
+ .priv_data_size = sizeof(CACAContext),
+ .audio_codec = AV_CODEC_ID_NONE,
+ .video_codec = AV_CODEC_ID_RAWVIDEO,
+ .write_header = caca_write_header,
+ .write_packet = caca_write_packet,
+ .write_trailer = caca_write_trailer,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &caca_class,
+};
diff --git a/libavdevice/decklink_common.cpp b/libavdevice/decklink_common.cpp
new file mode 100644
index 0000000000..ac7964cd17
--- /dev/null
+++ b/libavdevice/decklink_common.cpp
@@ -0,0 +1,241 @@
+/*
+ * Blackmagic DeckLink output
+ * Copyright (c) 2013-2014 Ramiro Polla, Luca Barbato, Deti Fliegl
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <DeckLinkAPI.h>
+#ifdef _WIN32
+#include <DeckLinkAPI_i.c>
+#else
+#include <DeckLinkAPIDispatch.cpp>
+#endif
+
+#include <pthread.h>
+#include <semaphore.h>
+
+extern "C" {
+#include "libavformat/avformat.h"
+#include "libavformat/internal.h"
+#include "libavutil/imgutils.h"
+}
+
+#include "decklink_common.h"
+
+#ifdef _WIN32
+IDeckLinkIterator *CreateDeckLinkIteratorInstance(void)
+{
+ IDeckLinkIterator *iter;
+
+ if (CoInitialize(NULL) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "COM initialization failed.\n");
+ return NULL;
+ }
+
+ if (CoCreateInstance(CLSID_CDeckLinkIterator, NULL, CLSCTX_ALL,
+ IID_IDeckLinkIterator, (void**) &iter) != S_OK) {
+ av_log(NULL, AV_LOG_ERROR, "DeckLink drivers not installed.\n");
+ return NULL;
+ }
+
+ return iter;
+}
+#endif
+
+#ifdef _WIN32
+static char *dup_wchar_to_utf8(wchar_t *w)
+{
+ char *s = NULL;
+ int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
+ s = (char *) av_malloc(l);
+ if (s)
+ WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
+ return s;
+}
+#define DECKLINK_STR OLECHAR *
+#define DECKLINK_STRDUP dup_wchar_to_utf8
+#define DECKLINK_FREE(s) SysFreeString(s)
+#elif defined(__APPLE__)
+static char *dup_cfstring_to_utf8(CFStringRef w)
+{
+ char s[256];
+ CFStringGetCString(w, s, 255, kCFStringEncodingUTF8);
+ return av_strdup(s);
+}
+#define DECKLINK_STR const __CFString *
+#define DECKLINK_STRDUP dup_cfstring_to_utf8
+#define DECKLINK_FREE(s) free((void *) s)
+#else
+#define DECKLINK_STR const char *
+#define DECKLINK_STRDUP av_strdup
+/* free() is needed for a string returned by the DeckLink SDL. */
+#define DECKLINK_FREE(s) free((void *) s)
+#endif
+
+HRESULT ff_decklink_get_display_name(IDeckLink *This, const char **displayName)
+{
+ DECKLINK_STR tmpDisplayName;
+ HRESULT hr = This->GetDisplayName(&tmpDisplayName);
+ if (hr != S_OK)
+ return hr;
+ *displayName = DECKLINK_STRDUP(tmpDisplayName);
+ DECKLINK_FREE(tmpDisplayName);
+ return hr;
+}
+
+int ff_decklink_set_format(AVFormatContext *avctx,
+ int width, int height,
+ int tb_num, int tb_den,
+ decklink_direction_t direction, int num)
+{
+ struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
+ struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
+ BMDDisplayModeSupport support;
+ IDeckLinkDisplayModeIterator *itermode;
+ IDeckLinkDisplayMode *mode;
+ int i = 1;
+ HRESULT res;
+
+ if (direction == DIRECTION_IN) {
+ res = ctx->dli->GetDisplayModeIterator (&itermode);
+ } else {
+ res = ctx->dlo->GetDisplayModeIterator (&itermode);
+ }
+
+ if (res!= S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not get Display Mode Iterator\n");
+ return AVERROR(EIO);
+ }
+
+
+ if (tb_num == 1) {
+ tb_num *= 1000;
+ tb_den *= 1000;
+ }
+ ctx->bmd_mode = bmdModeUnknown;
+ while ((ctx->bmd_mode == bmdModeUnknown) && itermode->Next(&mode) == S_OK) {
+ BMDTimeValue bmd_tb_num, bmd_tb_den;
+ int bmd_width = mode->GetWidth();
+ int bmd_height = mode->GetHeight();
+
+ mode->GetFrameRate(&bmd_tb_num, &bmd_tb_den);
+
+ if ((bmd_width == width && bmd_height == height &&
+ bmd_tb_num == tb_num && bmd_tb_den == tb_den) || i == num) {
+ ctx->bmd_mode = mode->GetDisplayMode();
+ ctx->bmd_width = bmd_width;
+ ctx->bmd_height = bmd_height;
+ ctx->bmd_tb_den = bmd_tb_den;
+ ctx->bmd_tb_num = bmd_tb_num;
+ ctx->bmd_field_dominance = mode->GetFieldDominance();
+ av_log(avctx, AV_LOG_INFO, "Found Decklink mode %d x %d with rate %.2f%s\n",
+ bmd_width, bmd_height, (float)bmd_tb_den/(float)bmd_tb_num,
+ (ctx->bmd_field_dominance==bmdLowerFieldFirst || ctx->bmd_field_dominance==bmdUpperFieldFirst)?"(i)":"");
+ }
+
+ mode->Release();
+ i++;
+ }
+
+ itermode->Release();
+
+ if (ctx->bmd_mode == bmdModeUnknown)
+ return -1;
+ if (direction == DIRECTION_IN) {
+ if (ctx->dli->DoesSupportVideoMode(ctx->bmd_mode, bmdFormat8BitYUV,
+ bmdVideoOutputFlagDefault,
+ &support, NULL) != S_OK)
+ return -1;
+ } else {
+ if (ctx->dlo->DoesSupportVideoMode(ctx->bmd_mode, bmdFormat8BitYUV,
+ bmdVideoOutputFlagDefault,
+ &support, NULL) != S_OK)
+ return -1;
+ }
+ if (support == bmdDisplayModeSupported)
+ return 0;
+
+ return -1;
+}
+
+int ff_decklink_set_format(AVFormatContext *avctx, decklink_direction_t direction, int num) {
+ return ff_decklink_set_format(avctx, 0, 0, 0, 0, direction, num);
+}
+
+int ff_decklink_list_devices(AVFormatContext *avctx)
+{
+ IDeckLink *dl = NULL;
+ IDeckLinkIterator *iter = CreateDeckLinkIteratorInstance();
+ if (!iter) {
+ av_log(avctx, AV_LOG_ERROR, "Could not create DeckLink iterator\n");
+ return AVERROR(EIO);
+ }
+ av_log(avctx, AV_LOG_INFO, "Blackmagic DeckLink devices:\n");
+ while (iter->Next(&dl) == S_OK) {
+ const char *displayName;
+ ff_decklink_get_display_name(dl, &displayName);
+ av_log(avctx, AV_LOG_INFO, "\t'%s'\n", displayName);
+ av_free((void *) displayName);
+ dl->Release();
+ }
+ iter->Release();
+ return 0;
+}
+
+int ff_decklink_list_formats(AVFormatContext *avctx, decklink_direction_t direction)
+{
+ struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
+ struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
+ IDeckLinkDisplayModeIterator *itermode;
+ IDeckLinkDisplayMode *mode;
+ int i=0;
+ HRESULT res;
+
+ if (direction == DIRECTION_IN) {
+ res = ctx->dli->GetDisplayModeIterator (&itermode);
+ } else {
+ res = ctx->dlo->GetDisplayModeIterator (&itermode);
+ }
+
+ if (res!= S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not get Display Mode Iterator\n");
+ return AVERROR(EIO);
+ }
+
+ av_log(avctx, AV_LOG_INFO, "Supported formats for '%s':\n",
+ avctx->filename);
+ while (itermode->Next(&mode) == S_OK) {
+ BMDTimeValue tb_num, tb_den;
+ mode->GetFrameRate(&tb_num, &tb_den);
+ av_log(avctx, AV_LOG_INFO, "\t%d\t%ldx%ld at %d/%d fps",
+ ++i,mode->GetWidth(), mode->GetHeight(),
+ (int) tb_den, (int) tb_num);
+ switch (mode->GetFieldDominance()) {
+ case bmdLowerFieldFirst:
+ av_log(avctx, AV_LOG_INFO, " (interlaced, lower field first)"); break;
+ case bmdUpperFieldFirst:
+ av_log(avctx, AV_LOG_INFO, " (interlaced, upper field first)"); break;
+ }
+ av_log(avctx, AV_LOG_INFO, "\n");
+ mode->Release();
+ }
+
+ itermode->Release();
+
+ return 0;
+}
diff --git a/libavdevice/decklink_common.h b/libavdevice/decklink_common.h
new file mode 100644
index 0000000000..dff4fc1cec
--- /dev/null
+++ b/libavdevice/decklink_common.h
@@ -0,0 +1,109 @@
+/*
+ * Blackmagic DeckLink common code
+ * Copyright (c) 2013-2014 Ramiro Polla, Luca Barbato, Deti Fliegl
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVDEVICE_DECKLINK_COMMON_H
+#define AVDEVICE_DECKLINK_COMMON_H
+
+#include <DeckLinkAPIVersion.h>
+
+#include "decklink_common_c.h"
+
+class decklink_output_callback;
+class decklink_input_callback;
+
+typedef struct AVPacketQueue {
+ AVPacketList *first_pkt, *last_pkt;
+ int nb_packets;
+ unsigned long long size;
+ int abort_request;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ AVFormatContext *avctx;
+} AVPacketQueue;
+
+struct decklink_ctx {
+ /* DeckLink SDK interfaces */
+ IDeckLink *dl;
+ IDeckLinkOutput *dlo;
+ IDeckLinkInput *dli;
+ decklink_output_callback *output_callback;
+ decklink_input_callback *input_callback;
+
+ /* DeckLink mode information */
+ BMDTimeValue bmd_tb_den;
+ BMDTimeValue bmd_tb_num;
+ BMDDisplayMode bmd_mode;
+ int bmd_width;
+ int bmd_height;
+ int bmd_field_dominance;
+
+ /* Capture buffer queue */
+ AVPacketQueue queue;
+
+ /* Streams present */
+ int audio;
+ int video;
+
+ /* Status */
+ int playback_started;
+ int capture_started;
+ int64_t last_pts;
+ unsigned long frameCount;
+ unsigned int dropped;
+ AVStream *audio_st;
+ AVStream *video_st;
+ AVStream *teletext_st;
+
+ /* Options */
+ int list_devices;
+ int list_formats;
+ int64_t teletext_lines;
+ double preroll;
+
+ int frames_preroll;
+ int frames_buffer;
+
+ sem_t semaphore;
+
+ int channels;
+};
+
+typedef enum { DIRECTION_IN, DIRECTION_OUT} decklink_direction_t;
+
+#ifdef _WIN32
+#if BLACKMAGIC_DECKLINK_API_VERSION < 0x0a040000
+typedef unsigned long buffercount_type;
+#else
+typedef unsigned int buffercount_type;
+#endif
+IDeckLinkIterator *CreateDeckLinkIteratorInstance(void);
+#else
+typedef uint32_t buffercount_type;
+#endif
+
+
+HRESULT ff_decklink_get_display_name(IDeckLink *This, const char **displayName);
+int ff_decklink_set_format(AVFormatContext *avctx, int width, int height, int tb_num, int tb_den, decklink_direction_t direction = DIRECTION_OUT, int num = 0);
+int ff_decklink_set_format(AVFormatContext *avctx, decklink_direction_t direction, int num);
+int ff_decklink_list_devices(AVFormatContext *avctx);
+int ff_decklink_list_formats(AVFormatContext *avctx, decklink_direction_t direction = DIRECTION_OUT);
+
+#endif /* AVDEVICE_DECKLINK_COMMON_H */
diff --git a/libavdevice/decklink_common_c.h b/libavdevice/decklink_common_c.h
new file mode 100644
index 0000000000..2b5d92f250
--- /dev/null
+++ b/libavdevice/decklink_common_c.h
@@ -0,0 +1,39 @@
+/*
+ * Blackmagic DeckLink common code
+ * Copyright (c) 2013-2014 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVDEVICE_DECKLINK_COMMON_C_H
+#define AVDEVICE_DECKLINK_COMMON_C_H
+
+struct decklink_cctx {
+ const AVClass *cclass;
+
+ void *ctx;
+
+ /* Options */
+ int list_devices;
+ int list_formats;
+ int64_t teletext_lines;
+ double preroll;
+ int v210;
+ int audio_channels;
+};
+
+#endif /* AVDEVICE_DECKLINK_COMMON_C_H */
diff --git a/libavdevice/decklink_dec.cpp b/libavdevice/decklink_dec.cpp
new file mode 100644
index 0000000000..1c305f3f00
--- /dev/null
+++ b/libavdevice/decklink_dec.cpp
@@ -0,0 +1,646 @@
+/*
+ * Blackmagic DeckLink output
+ * Copyright (c) 2013-2014 Luca Barbato, Deti Fliegl
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <DeckLinkAPI.h>
+
+#include <pthread.h>
+#include <semaphore.h>
+
+extern "C" {
+#include "config.h"
+#include "libavformat/avformat.h"
+#include "libavformat/internal.h"
+#include "libavutil/imgutils.h"
+#if CONFIG_LIBZVBI
+#include <libzvbi.h>
+#endif
+}
+
+#include "decklink_common.h"
+#include "decklink_dec.h"
+
+#if CONFIG_LIBZVBI
+static uint8_t calc_parity_and_line_offset(int line)
+{
+ uint8_t ret = (line < 313) << 5;
+ if (line >= 7 && line <= 22)
+ ret += line;
+ if (line >= 320 && line <= 335)
+ ret += (line - 313);
+ return ret;
+}
+
+int teletext_data_unit_from_vbi_data(int line, uint8_t *src, uint8_t *tgt)
+{
+ vbi_bit_slicer slicer;
+
+ vbi_bit_slicer_init(&slicer, 720, 13500000, 6937500, 6937500, 0x00aaaae4, 0xffff, 18, 6, 42 * 8, VBI_MODULATION_NRZ_MSB, VBI_PIXFMT_UYVY);
+
+ if (vbi_bit_slice(&slicer, src, tgt + 4) == FALSE)
+ return -1;
+
+ tgt[0] = 0x02; // data_unit_id
+ tgt[1] = 0x2c; // data_unit_length
+ tgt[2] = calc_parity_and_line_offset(line); // field_parity, line_offset
+ tgt[3] = 0xe4; // framing code
+
+ return 0;
+}
+#endif
+
+static void avpacket_queue_init(AVFormatContext *avctx, AVPacketQueue *q)
+{
+ memset(q, 0, sizeof(AVPacketQueue));
+ pthread_mutex_init(&q->mutex, NULL);
+ pthread_cond_init(&q->cond, NULL);
+ q->avctx = avctx;
+}
+
+static void avpacket_queue_flush(AVPacketQueue *q)
+{
+ AVPacketList *pkt, *pkt1;
+
+ pthread_mutex_lock(&q->mutex);
+ for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
+ pkt1 = pkt->next;
+ av_packet_unref(&pkt->pkt);
+ av_freep(&pkt);
+ }
+ q->last_pkt = NULL;
+ q->first_pkt = NULL;
+ q->nb_packets = 0;
+ q->size = 0;
+ pthread_mutex_unlock(&q->mutex);
+}
+
+static void avpacket_queue_end(AVPacketQueue *q)
+{
+ avpacket_queue_flush(q);
+ pthread_mutex_destroy(&q->mutex);
+ pthread_cond_destroy(&q->cond);
+}
+
+static unsigned long long avpacket_queue_size(AVPacketQueue *q)
+{
+ unsigned long long size;
+ pthread_mutex_lock(&q->mutex);
+ size = q->size;
+ pthread_mutex_unlock(&q->mutex);
+ return size;
+}
+
+static int avpacket_queue_put(AVPacketQueue *q, AVPacket *pkt)
+{
+ AVPacketList *pkt1;
+
+ // Drop Packet if queue size is > 1GB
+ if (avpacket_queue_size(q) > 1024 * 1024 * 1024 ) {
+ av_log(q->avctx, AV_LOG_WARNING, "Decklink input buffer overrun!\n");
+ return -1;
+ }
+ /* duplicate the packet */
+ if (av_dup_packet(pkt) < 0) {
+ return -1;
+ }
+
+ pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));
+ if (!pkt1) {
+ return -1;
+ }
+ pkt1->pkt = *pkt;
+ pkt1->next = NULL;
+
+ pthread_mutex_lock(&q->mutex);
+
+ if (!q->last_pkt) {
+ q->first_pkt = pkt1;
+ } else {
+ q->last_pkt->next = pkt1;
+ }
+
+ q->last_pkt = pkt1;
+ q->nb_packets++;
+ q->size += pkt1->pkt.size + sizeof(*pkt1);
+
+ pthread_cond_signal(&q->cond);
+
+ pthread_mutex_unlock(&q->mutex);
+ return 0;
+}
+
+static int avpacket_queue_get(AVPacketQueue *q, AVPacket *pkt, int block)
+{
+ AVPacketList *pkt1;
+ int ret;
+
+ pthread_mutex_lock(&q->mutex);
+
+ for (;; ) {
+ pkt1 = q->first_pkt;
+ if (pkt1) {
+ q->first_pkt = pkt1->next;
+ if (!q->first_pkt) {
+ q->last_pkt = NULL;
+ }
+ q->nb_packets--;
+ q->size -= pkt1->pkt.size + sizeof(*pkt1);
+ *pkt = pkt1->pkt;
+ av_free(pkt1);
+ ret = 1;
+ break;
+ } else if (!block) {
+ ret = 0;
+ break;
+ } else {
+ pthread_cond_wait(&q->cond, &q->mutex);
+ }
+ }
+ pthread_mutex_unlock(&q->mutex);
+ return ret;
+}
+
+class decklink_input_callback : public IDeckLinkInputCallback
+{
+public:
+ decklink_input_callback(AVFormatContext *_avctx);
+ ~decklink_input_callback();
+
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
+ virtual ULONG STDMETHODCALLTYPE AddRef(void);
+ virtual ULONG STDMETHODCALLTYPE Release(void);
+ virtual HRESULT STDMETHODCALLTYPE VideoInputFormatChanged(BMDVideoInputFormatChangedEvents, IDeckLinkDisplayMode*, BMDDetectedVideoInputFormatFlags);
+ virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame*, IDeckLinkAudioInputPacket*);
+
+private:
+ ULONG m_refCount;
+ pthread_mutex_t m_mutex;
+ AVFormatContext *avctx;
+ decklink_ctx *ctx;
+ int no_video;
+ int64_t initial_video_pts;
+ int64_t initial_audio_pts;
+};
+
+decklink_input_callback::decklink_input_callback(AVFormatContext *_avctx) : m_refCount(0)
+{
+ avctx = _avctx;
+ decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
+ ctx = (struct decklink_ctx *) cctx->ctx;
+ initial_audio_pts = initial_video_pts = AV_NOPTS_VALUE;
+ pthread_mutex_init(&m_mutex, NULL);
+}
+
+decklink_input_callback::~decklink_input_callback()
+{
+ pthread_mutex_destroy(&m_mutex);
+}
+
+ULONG decklink_input_callback::AddRef(void)
+{
+ pthread_mutex_lock(&m_mutex);
+ m_refCount++;
+ pthread_mutex_unlock(&m_mutex);
+
+ return (ULONG)m_refCount;
+}
+
+ULONG decklink_input_callback::Release(void)
+{
+ pthread_mutex_lock(&m_mutex);
+ m_refCount--;
+ pthread_mutex_unlock(&m_mutex);
+
+ if (m_refCount == 0) {
+ delete this;
+ return 0;
+ }
+
+ return (ULONG)m_refCount;
+}
+
+HRESULT decklink_input_callback::VideoInputFrameArrived(
+ IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame)
+{
+ void *frameBytes;
+ void *audioFrameBytes;
+ BMDTimeValue frameTime;
+ BMDTimeValue frameDuration;
+
+ ctx->frameCount++;
+
+ // Handle Video Frame
+ if (videoFrame) {
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ if (ctx->frameCount % 25 == 0) {
+ unsigned long long qsize = avpacket_queue_size(&ctx->queue);
+ av_log(avctx, AV_LOG_DEBUG,
+ "Frame received (#%lu) - Valid (%liB) - QSize %fMB\n",
+ ctx->frameCount,
+ videoFrame->GetRowBytes() * videoFrame->GetHeight(),
+ (double)qsize / 1024 / 1024);
+ }
+
+ videoFrame->GetBytes(&frameBytes);
+ videoFrame->GetStreamTime(&frameTime, &frameDuration,
+ ctx->video_st->time_base.den);
+
+ if (videoFrame->GetFlags() & bmdFrameHasNoInputSource) {
+ if (videoFrame->GetPixelFormat() == bmdFormat8BitYUV) {
+ unsigned bars[8] = {
+ 0xEA80EA80, 0xD292D210, 0xA910A9A5, 0x90229035,
+ 0x6ADD6ACA, 0x51EF515A, 0x286D28EF, 0x10801080 };
+ int width = videoFrame->GetWidth();
+ int height = videoFrame->GetHeight();
+ unsigned *p = (unsigned *)frameBytes;
+
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x += 2)
+ *p++ = bars[(x * 8) / width];
+ }
+ }
+
+ if (!no_video) {
+ av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - No input signal detected "
+ "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
+ }
+ no_video = 1;
+ } else {
+ if (no_video) {
+ av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - Input returned "
+ "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
+ }
+ no_video = 0;
+ }
+
+ pkt.pts = frameTime / ctx->video_st->time_base.num;
+
+ if (initial_video_pts == AV_NOPTS_VALUE) {
+ initial_video_pts = pkt.pts;
+ }
+
+ pkt.pts -= initial_video_pts;
+ pkt.dts = pkt.pts;
+
+ pkt.duration = frameDuration;
+ //To be made sure it still applies
+ pkt.flags |= AV_PKT_FLAG_KEY;
+ pkt.stream_index = ctx->video_st->index;
+ pkt.data = (uint8_t *)frameBytes;
+ pkt.size = videoFrame->GetRowBytes() *
+ videoFrame->GetHeight();
+ //fprintf(stderr,"Video Frame size %d ts %d\n", pkt.size, pkt.pts);
+
+#if CONFIG_LIBZVBI
+ if (!no_video && ctx->teletext_lines && videoFrame->GetPixelFormat() == bmdFormat8BitYUV && videoFrame->GetWidth() == 720) {
+ IDeckLinkVideoFrameAncillary *vanc;
+ AVPacket txt_pkt;
+ uint8_t txt_buf0[1611]; // max 35 * 46 bytes decoded teletext lines + 1 byte data_identifier
+ uint8_t *txt_buf = txt_buf0;
+
+ if (videoFrame->GetAncillaryData(&vanc) == S_OK) {
+ int i;
+ int64_t line_mask = 1;
+ txt_buf[0] = 0x10; // data_identifier - EBU_data
+ txt_buf++;
+ for (i = 6; i < 336; i++, line_mask <<= 1) {
+ uint8_t *buf;
+ if ((ctx->teletext_lines & line_mask) && vanc->GetBufferForVerticalBlankingLine(i, (void**)&buf) == S_OK) {
+ if (teletext_data_unit_from_vbi_data(i, buf, txt_buf) >= 0)
+ txt_buf += 46;
+ }
+ if (i == 22)
+ i = 317;
+ }
+ vanc->Release();
+ if (txt_buf - txt_buf0 > 1) {
+ int stuffing_units = (4 - ((45 + txt_buf - txt_buf0) / 46) % 4) % 4;
+ while (stuffing_units--) {
+ memset(txt_buf, 0xff, 46);
+ txt_buf[1] = 0x2c; // data_unit_length
+ txt_buf += 46;
+ }
+ av_init_packet(&txt_pkt);
+ txt_pkt.pts = pkt.pts;
+ txt_pkt.dts = pkt.dts;
+ txt_pkt.stream_index = ctx->teletext_st->index;
+ txt_pkt.data = txt_buf0;
+ txt_pkt.size = txt_buf - txt_buf0;
+ if (avpacket_queue_put(&ctx->queue, &txt_pkt) < 0) {
+ ++ctx->dropped;
+ }
+ }
+ }
+ }
+#endif
+
+ if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
+ ++ctx->dropped;
+ }
+ }
+
+ // Handle Audio Frame
+ if (audioFrame) {
+ AVPacket pkt;
+ BMDTimeValue audio_pts;
+ av_init_packet(&pkt);
+
+ //hack among hacks
+ pkt.size = audioFrame->GetSampleFrameCount() * ctx->audio_st->codecpar->channels * (16 / 8);
+ audioFrame->GetBytes(&audioFrameBytes);
+ audioFrame->GetPacketTime(&audio_pts, ctx->audio_st->time_base.den);
+ pkt.pts = audio_pts / ctx->audio_st->time_base.num;
+
+ if (initial_audio_pts == AV_NOPTS_VALUE) {
+ initial_audio_pts = pkt.pts;
+ }
+
+ pkt.pts -= initial_audio_pts;
+ pkt.dts = pkt.pts;
+
+ //fprintf(stderr,"Audio Frame size %d ts %d\n", pkt.size, pkt.pts);
+ pkt.flags |= AV_PKT_FLAG_KEY;
+ pkt.stream_index = ctx->audio_st->index;
+ pkt.data = (uint8_t *)audioFrameBytes;
+
+ if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
+ ++ctx->dropped;
+ }
+ }
+
+ return S_OK;
+}
+
+HRESULT decklink_input_callback::VideoInputFormatChanged(
+ BMDVideoInputFormatChangedEvents events, IDeckLinkDisplayMode *mode,
+ BMDDetectedVideoInputFormatFlags)
+{
+ return S_OK;
+}
+
+static HRESULT decklink_start_input(AVFormatContext *avctx)
+{
+ struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
+ struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
+
+ ctx->input_callback = new decklink_input_callback(avctx);
+ ctx->dli->SetCallback(ctx->input_callback);
+ return ctx->dli->StartStreams();
+}
+
+extern "C" {
+
+av_cold int ff_decklink_read_close(AVFormatContext *avctx)
+{
+ struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
+ struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
+
+ if (ctx->capture_started) {
+ ctx->dli->StopStreams();
+ ctx->dli->DisableVideoInput();
+ ctx->dli->DisableAudioInput();
+ }
+
+ if (ctx->dli)
+ ctx->dli->Release();
+ if (ctx->dl)
+ ctx->dl->Release();
+
+ avpacket_queue_end(&ctx->queue);
+
+ av_freep(&cctx->ctx);
+
+ return 0;
+}
+
+av_cold int ff_decklink_read_header(AVFormatContext *avctx)
+{
+ struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
+ struct decklink_ctx *ctx;
+ IDeckLinkDisplayModeIterator *itermode;
+ IDeckLinkIterator *iter;
+ IDeckLink *dl = NULL;
+ AVStream *st;
+ HRESULT result;
+ char fname[1024];
+ char *tmp;
+ int mode_num = 0;
+
+ ctx = (struct decklink_ctx *) av_mallocz(sizeof(struct decklink_ctx));
+ if (!ctx)
+ return AVERROR(ENOMEM);
+ ctx->list_devices = cctx->list_devices;
+ ctx->list_formats = cctx->list_formats;
+ ctx->teletext_lines = cctx->teletext_lines;
+ ctx->preroll = cctx->preroll;
+ cctx->ctx = ctx;
+
+#if !CONFIG_LIBZVBI
+ if (ctx->teletext_lines) {
+ av_log(avctx, AV_LOG_ERROR, "Libzvbi support is needed for capturing teletext, please recompile FFmpeg.\n");
+ return AVERROR(ENOSYS);
+ }
+#endif
+
+ /* Check audio channel option for valid values: 2, 8 or 16 */
+ switch (cctx->audio_channels) {
+ case 2:
+ case 8:
+ case 16:
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Value of channels option must be one of 2, 8 or 16\n");
+ return AVERROR(EINVAL);
+ }
+
+ iter = CreateDeckLinkIteratorInstance();
+ if (!iter) {
+ av_log(avctx, AV_LOG_ERROR, "Could not create DeckLink iterator\n");
+ return AVERROR(EIO);
+ }
+
+ /* List available devices. */
+ if (ctx->list_devices) {
+ ff_decklink_list_devices(avctx);
+ return AVERROR_EXIT;
+ }
+
+ strcpy (fname, avctx->filename);
+ tmp=strchr (fname, '@');
+ if (tmp != NULL) {
+ mode_num = atoi (tmp+1);
+ *tmp = 0;
+ }
+
+ /* Open device. */
+ while (iter->Next(&dl) == S_OK) {
+ const char *displayName;
+ ff_decklink_get_display_name(dl, &displayName);
+ if (!strcmp(fname, displayName)) {
+ av_free((void *) displayName);
+ ctx->dl = dl;
+ break;
+ }
+ av_free((void *) displayName);
+ dl->Release();
+ }
+ iter->Release();
+ if (!ctx->dl) {
+ av_log(avctx, AV_LOG_ERROR, "Could not open '%s'\n", fname);
+ return AVERROR(EIO);
+ }
+
+ /* Get input device. */
+ if (ctx->dl->QueryInterface(IID_IDeckLinkInput, (void **) &ctx->dli) != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not open output device from '%s'\n",
+ avctx->filename);
+ ctx->dl->Release();
+ return AVERROR(EIO);
+ }
+
+ /* List supported formats. */
+ if (ctx->list_formats) {
+ ff_decklink_list_formats(avctx, DIRECTION_IN);
+ ctx->dli->Release();
+ ctx->dl->Release();
+ return AVERROR_EXIT;
+ }
+
+ if (ctx->dli->GetDisplayModeIterator(&itermode) != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not get Display Mode Iterator\n");
+ ctx->dl->Release();
+ return AVERROR(EIO);
+ }
+
+ if (mode_num > 0) {
+ if (ff_decklink_set_format(avctx, DIRECTION_IN, mode_num) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Could not set mode %d for %s\n", mode_num, fname);
+ goto error;
+ }
+ }
+
+ itermode->Release();
+
+ /* Setup streams. */
+ st = avformat_new_stream(avctx, NULL);
+ if (!st) {
+ av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
+ goto error;
+ }
+ st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
+ st->codecpar->codec_id = AV_CODEC_ID_PCM_S16LE;
+ st->codecpar->sample_rate = bmdAudioSampleRate48kHz;
+ st->codecpar->channels = cctx->audio_channels;
+ avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+ ctx->audio_st=st;
+
+ st = avformat_new_stream(avctx, NULL);
+ if (!st) {
+ av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
+ goto error;
+ }
+ st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
+ st->codecpar->width = ctx->bmd_width;
+ st->codecpar->height = ctx->bmd_height;
+
+ st->time_base.den = ctx->bmd_tb_den;
+ st->time_base.num = ctx->bmd_tb_num;
+ st->codecpar->bit_rate = av_image_get_buffer_size((AVPixelFormat)st->codecpar->format, ctx->bmd_width, ctx->bmd_height, 1) * 1/av_q2d(st->time_base) * 8;
+
+ if (cctx->v210) {
+ st->codecpar->codec_id = AV_CODEC_ID_V210;
+ st->codecpar->codec_tag = MKTAG('V', '2', '1', '0');
+ } else {
+ st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
+ st->codecpar->format = AV_PIX_FMT_UYVY422;
+ st->codecpar->codec_tag = MKTAG('U', 'Y', 'V', 'Y');
+ }
+
+ avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+
+ ctx->video_st=st;
+
+ if (ctx->teletext_lines) {
+ st = avformat_new_stream(avctx, NULL);
+ if (!st) {
+ av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
+ goto error;
+ }
+ st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
+ st->time_base.den = ctx->bmd_tb_den;
+ st->time_base.num = ctx->bmd_tb_num;
+ st->codecpar->codec_id = AV_CODEC_ID_DVB_TELETEXT;
+ avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+ ctx->teletext_st = st;
+ }
+
+ av_log(avctx, AV_LOG_VERBOSE, "Using %d input audio channels\n", ctx->audio_st->codecpar->channels);
+ result = ctx->dli->EnableAudioInput(bmdAudioSampleRate48kHz, bmdAudioSampleType16bitInteger, ctx->audio_st->codecpar->channels);
+
+ if (result != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Cannot enable audio input\n");
+ goto error;
+ }
+
+ result = ctx->dli->EnableVideoInput(ctx->bmd_mode,
+ cctx->v210 ? bmdFormat10BitYUV : bmdFormat8BitYUV,
+ bmdVideoInputFlagDefault);
+
+ if (result != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Cannot enable video input\n");
+ goto error;
+ }
+
+ avpacket_queue_init (avctx, &ctx->queue);
+
+ if (decklink_start_input (avctx) != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Cannot start input stream\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+
+ ctx->dli->Release();
+ ctx->dl->Release();
+
+ return AVERROR(EIO);
+}
+
+int ff_decklink_read_packet(AVFormatContext *avctx, AVPacket *pkt)
+{
+ struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
+ struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
+ AVFrame *frame = ctx->video_st->codec->coded_frame;
+
+ avpacket_queue_get(&ctx->queue, pkt, 1);
+ if (frame && (ctx->bmd_field_dominance == bmdUpperFieldFirst || ctx->bmd_field_dominance == bmdLowerFieldFirst)) {
+ frame->interlaced_frame = 1;
+ if (ctx->bmd_field_dominance == bmdUpperFieldFirst) {
+ frame->top_field_first = 1;
+ }
+ }
+
+ return 0;
+}
+
+} /* extern "C" */
diff --git a/libavdevice/decklink_dec.h b/libavdevice/decklink_dec.h
new file mode 100644
index 0000000000..c02344efc3
--- /dev/null
+++ b/libavdevice/decklink_dec.h
@@ -0,0 +1,37 @@
+/*
+ * Blackmagic DeckLink output
+ * Copyright (c) 2013-2014 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVDEVICE_DECKLINK_DEC_H
+#define AVDEVICE_DECKLINK_DEC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int ff_decklink_read_header(AVFormatContext *avctx);
+int ff_decklink_read_packet(AVFormatContext *avctx, AVPacket *pkt);
+int ff_decklink_read_close(AVFormatContext *avctx);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* AVDEVICE_DECKLINK_DEC_H */
diff --git a/libavdevice/decklink_dec_c.c b/libavdevice/decklink_dec_c.c
new file mode 100644
index 0000000000..40c21a753b
--- /dev/null
+++ b/libavdevice/decklink_dec_c.c
@@ -0,0 +1,59 @@
+/*
+ * Blackmagic DeckLink output
+ * Copyright (c) 2014 Deti Fliegl
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavformat/avformat.h"
+#include "libavutil/opt.h"
+
+#include "decklink_common_c.h"
+#include "decklink_dec.h"
+
+#define OFFSET(x) offsetof(struct decklink_cctx, x)
+#define DEC AV_OPT_FLAG_DECODING_PARAM
+
+static const AVOption options[] = {
+ { "list_devices", "list available devices" , OFFSET(list_devices), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 1, DEC },
+ { "list_formats", "list supported formats" , OFFSET(list_formats), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 1, DEC },
+ { "bm_v210", "v210 10 bit per channel" , OFFSET(v210), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 1, DEC },
+ { "teletext_lines", "teletext lines bitmask", OFFSET(teletext_lines), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, 0x7ffffffffLL, DEC, "teletext_lines"},
+ { "standard", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0x7fff9fffeLL}, 0, 0, DEC, "teletext_lines"},
+ { "all", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0x7ffffffffLL}, 0, 0, DEC, "teletext_lines"},
+ { "channels", "number of audio channels", OFFSET(audio_channels), AV_OPT_TYPE_INT , { .i64 = 2 }, 2, 16, DEC },
+ { NULL },
+};
+
+static const AVClass decklink_demuxer_class = {
+ .class_name = "Blackmagic DeckLink demuxer",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
+};
+
+AVInputFormat ff_decklink_demuxer = {
+ .name = "decklink",
+ .long_name = NULL_IF_CONFIG_SMALL("Blackmagic DeckLink input"),
+ .flags = AVFMT_NOFILE | AVFMT_RAWPICTURE,
+ .priv_class = &decklink_demuxer_class,
+ .priv_data_size = sizeof(struct decklink_cctx),
+ .read_header = ff_decklink_read_header,
+ .read_packet = ff_decklink_read_packet,
+ .read_close = ff_decklink_read_close,
+};
diff --git a/libavdevice/decklink_enc.cpp b/libavdevice/decklink_enc.cpp
new file mode 100644
index 0000000000..6c5450f4ec
--- /dev/null
+++ b/libavdevice/decklink_enc.cpp
@@ -0,0 +1,426 @@
+/*
+ * Blackmagic DeckLink output
+ * Copyright (c) 2013-2014 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <DeckLinkAPI.h>
+
+#include <pthread.h>
+#include <semaphore.h>
+
+extern "C" {
+#include "libavformat/avformat.h"
+#include "libavformat/internal.h"
+#include "libavutil/imgutils.h"
+}
+
+#include "decklink_common.h"
+#include "decklink_enc.h"
+
+
+/* DeckLink callback class declaration */
+class decklink_frame : public IDeckLinkVideoFrame
+{
+public:
+ decklink_frame(struct decklink_ctx *ctx, AVFrame *avframe, long width,
+ long height, void *buffer) :
+ _ctx(ctx), _avframe(avframe), _width(width),
+ _height(height), _buffer(buffer), _refs(0) { }
+
+ virtual long STDMETHODCALLTYPE GetWidth (void) { return _width; }
+ virtual long STDMETHODCALLTYPE GetHeight (void) { return _height; }
+ virtual long STDMETHODCALLTYPE GetRowBytes (void) { return _width<<1; }
+ virtual BMDPixelFormat STDMETHODCALLTYPE GetPixelFormat(void) { return bmdFormat8BitYUV; }
+ virtual BMDFrameFlags STDMETHODCALLTYPE GetFlags (void) { return bmdVideoOutputFlagDefault; }
+ virtual HRESULT STDMETHODCALLTYPE GetBytes (void **buffer) { *buffer = _buffer; return S_OK; }
+
+ virtual HRESULT STDMETHODCALLTYPE GetTimecode (BMDTimecodeFormat format, IDeckLinkTimecode **timecode) { return S_FALSE; }
+ virtual HRESULT STDMETHODCALLTYPE GetAncillaryData(IDeckLinkVideoFrameAncillary **ancillary) { return S_FALSE; }
+
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
+ virtual ULONG STDMETHODCALLTYPE AddRef(void) { return ++_refs; }
+ virtual ULONG STDMETHODCALLTYPE Release(void) { if (!--_refs) delete this; return _refs; }
+
+ struct decklink_ctx *_ctx;
+ AVFrame *_avframe;
+
+private:
+ long _width;
+ long _height;
+ void *_buffer;
+ int _refs;
+};
+
+class decklink_output_callback : public IDeckLinkVideoOutputCallback
+{
+public:
+ virtual HRESULT STDMETHODCALLTYPE ScheduledFrameCompleted(IDeckLinkVideoFrame *_frame, BMDOutputFrameCompletionResult result)
+ {
+ decklink_frame *frame = static_cast<decklink_frame *>(_frame);
+ struct decklink_ctx *ctx = frame->_ctx;
+ AVFrame *avframe = frame->_avframe;
+
+ av_frame_free(&avframe);
+
+ sem_post(&ctx->semaphore);
+
+ return S_OK;
+ }
+ virtual HRESULT STDMETHODCALLTYPE ScheduledPlaybackHasStopped(void) { return S_OK; }
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
+ virtual ULONG STDMETHODCALLTYPE AddRef(void) { return 1; }
+ virtual ULONG STDMETHODCALLTYPE Release(void) { return 1; }
+};
+
+static int decklink_setup_video(AVFormatContext *avctx, AVStream *st)
+{
+ struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
+ struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
+ AVCodecContext *c = st->codec;
+
+ if (ctx->video) {
+ av_log(avctx, AV_LOG_ERROR, "Only one video stream is supported!\n");
+ return -1;
+ }
+
+ if (c->pix_fmt != AV_PIX_FMT_UYVY422) {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format!"
+ " Only AV_PIX_FMT_UYVY422 is supported.\n");
+ return -1;
+ }
+ if (ff_decklink_set_format(avctx, c->width, c->height,
+ c->time_base.num, c->time_base.den)) {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported video size or framerate!"
+ " Check available formats with -list_formats 1.\n");
+ return -1;
+ }
+ if (ctx->dlo->EnableVideoOutput(ctx->bmd_mode,
+ bmdVideoOutputFlagDefault) != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not enable video output!\n");
+ return -1;
+ }
+
+ /* Set callback. */
+ ctx->output_callback = new decklink_output_callback();
+ ctx->dlo->SetScheduledFrameCompletionCallback(ctx->output_callback);
+
+ /* Start video semaphore. */
+ ctx->frames_preroll = c->time_base.den * ctx->preroll;
+ if (c->time_base.den > 1000)
+ ctx->frames_preroll /= 1000;
+
+ /* Buffer twice as many frames as the preroll. */
+ ctx->frames_buffer = ctx->frames_preroll * 2;
+ ctx->frames_buffer = FFMIN(ctx->frames_buffer, 60);
+ sem_init(&ctx->semaphore, 0, ctx->frames_buffer);
+
+ /* The device expects the framerate to be fixed. */
+ avpriv_set_pts_info(st, 64, c->time_base.num, c->time_base.den);
+
+ ctx->video = 1;
+
+ return 0;
+}
+
+static int decklink_setup_audio(AVFormatContext *avctx, AVStream *st)
+{
+ struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
+ struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
+ AVCodecContext *c = st->codec;
+
+ if (ctx->audio) {
+ av_log(avctx, AV_LOG_ERROR, "Only one audio stream is supported!\n");
+ return -1;
+ }
+ if (c->sample_rate != 48000) {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported sample rate!"
+ " Only 48kHz is supported.\n");
+ return -1;
+ }
+ if (c->channels != 2 && c->channels != 8) {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported number of channels!"
+ " Only stereo and 7.1 are supported.\n");
+ return -1;
+ }
+ if (ctx->dlo->EnableAudioOutput(bmdAudioSampleRate48kHz,
+ bmdAudioSampleType16bitInteger,
+ c->channels,
+ bmdAudioOutputStreamTimestamped) != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not enable audio output!\n");
+ return -1;
+ }
+ if (ctx->dlo->BeginAudioPreroll() != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not begin audio preroll!\n");
+ return -1;
+ }
+
+ /* The device expects the sample rate to be fixed. */
+ avpriv_set_pts_info(st, 64, 1, c->sample_rate);
+ ctx->channels = c->channels;
+
+ ctx->audio = 1;
+
+ return 0;
+}
+
+av_cold int ff_decklink_write_trailer(AVFormatContext *avctx)
+{
+ struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
+ struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
+
+ if (ctx->playback_started) {
+ BMDTimeValue actual;
+ ctx->dlo->StopScheduledPlayback(ctx->last_pts * ctx->bmd_tb_num,
+ &actual, ctx->bmd_tb_den);
+ ctx->dlo->DisableVideoOutput();
+ if (ctx->audio)
+ ctx->dlo->DisableAudioOutput();
+ }
+
+ if (ctx->dlo)
+ ctx->dlo->Release();
+ if (ctx->dl)
+ ctx->dl->Release();
+
+ if (ctx->output_callback)
+ delete ctx->output_callback;
+
+ sem_destroy(&ctx->semaphore);
+
+ av_freep(&cctx->ctx);
+
+ return 0;
+}
+
+static int decklink_write_video_packet(AVFormatContext *avctx, AVPacket *pkt)
+{
+ struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
+ struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
+ AVPicture *avpicture = (AVPicture *) pkt->data;
+ AVFrame *avframe, *tmp;
+ decklink_frame *frame;
+ buffercount_type buffered;
+ HRESULT hr;
+
+ /* HACK while av_uncoded_frame() isn't implemented */
+ int ret;
+
+ tmp = av_frame_alloc();
+ if (!tmp)
+ return AVERROR(ENOMEM);
+ tmp->format = AV_PIX_FMT_UYVY422;
+ tmp->width = ctx->bmd_width;
+ tmp->height = ctx->bmd_height;
+ ret = av_frame_get_buffer(tmp, 32);
+ if (ret < 0) {
+ av_frame_free(&tmp);
+ return ret;
+ }
+ av_image_copy(tmp->data, tmp->linesize, (const uint8_t **) avpicture->data,
+ avpicture->linesize, (AVPixelFormat) tmp->format, tmp->width,
+ tmp->height);
+ avframe = av_frame_clone(tmp);
+ av_frame_free(&tmp);
+ if (!avframe) {
+ av_log(avctx, AV_LOG_ERROR, "Could not clone video frame.\n");
+ return AVERROR(EIO);
+ }
+ /* end HACK */
+
+ frame = new decklink_frame(ctx, avframe, ctx->bmd_width, ctx->bmd_height,
+ (void *) avframe->data[0]);
+ if (!frame) {
+ av_log(avctx, AV_LOG_ERROR, "Could not create new frame.\n");
+ return AVERROR(EIO);
+ }
+
+ /* Always keep at most one second of frames buffered. */
+ sem_wait(&ctx->semaphore);
+
+ /* Schedule frame for playback. */
+ hr = ctx->dlo->ScheduleVideoFrame((struct IDeckLinkVideoFrame *) frame,
+ pkt->pts * ctx->bmd_tb_num,
+ ctx->bmd_tb_num, ctx->bmd_tb_den);
+ if (hr != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not schedule video frame."
+ " error %08x.\n", (uint32_t) hr);
+ frame->Release();
+ return AVERROR(EIO);
+ }
+
+ ctx->dlo->GetBufferedVideoFrameCount(&buffered);
+ av_log(avctx, AV_LOG_DEBUG, "Buffered video frames: %d.\n", (int) buffered);
+ if (pkt->pts > 2 && buffered <= 2)
+ av_log(avctx, AV_LOG_WARNING, "There are not enough buffered video frames."
+ " Video may misbehave!\n");
+
+ /* Preroll video frames. */
+ if (!ctx->playback_started && pkt->pts > ctx->frames_preroll) {
+ av_log(avctx, AV_LOG_DEBUG, "Ending audio preroll.\n");
+ if (ctx->audio && ctx->dlo->EndAudioPreroll() != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not end audio preroll!\n");
+ return AVERROR(EIO);
+ }
+ av_log(avctx, AV_LOG_DEBUG, "Starting scheduled playback.\n");
+ if (ctx->dlo->StartScheduledPlayback(0, ctx->bmd_tb_den, 1.0) != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not start scheduled playback!\n");
+ return AVERROR(EIO);
+ }
+ ctx->playback_started = 1;
+ }
+
+ return 0;
+}
+
+static int decklink_write_audio_packet(AVFormatContext *avctx, AVPacket *pkt)
+{
+ struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
+ struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
+ int sample_count = pkt->size / (ctx->channels << 1);
+ buffercount_type buffered;
+
+ ctx->dlo->GetBufferedAudioSampleFrameCount(&buffered);
+ if (pkt->pts > 1 && !buffered)
+ av_log(avctx, AV_LOG_WARNING, "There's no buffered audio."
+ " Audio will misbehave!\n");
+
+ if (ctx->dlo->ScheduleAudioSamples(pkt->data, sample_count, pkt->pts,
+ bmdAudioSampleRate48kHz, NULL) != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not schedule audio samples.\n");
+ return AVERROR(EIO);
+ }
+
+ return 0;
+}
+
+extern "C" {
+
+av_cold int ff_decklink_write_header(AVFormatContext *avctx)
+{
+ struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
+ struct decklink_ctx *ctx;
+ IDeckLinkDisplayModeIterator *itermode;
+ IDeckLinkIterator *iter;
+ IDeckLink *dl = NULL;
+ unsigned int n;
+
+ ctx = (struct decklink_ctx *) av_mallocz(sizeof(struct decklink_ctx));
+ if (!ctx)
+ return AVERROR(ENOMEM);
+ ctx->list_devices = cctx->list_devices;
+ ctx->list_formats = cctx->list_formats;
+ ctx->preroll = cctx->preroll;
+ cctx->ctx = ctx;
+
+ iter = CreateDeckLinkIteratorInstance();
+ if (!iter) {
+ av_log(avctx, AV_LOG_ERROR, "Could not create DeckLink iterator\n");
+ return AVERROR(EIO);
+ }
+
+ /* List available devices. */
+ if (ctx->list_devices) {
+ ff_decklink_list_devices(avctx);
+ return AVERROR_EXIT;
+ }
+
+ /* Open device. */
+ while (iter->Next(&dl) == S_OK) {
+ const char *displayName;
+ ff_decklink_get_display_name(dl, &displayName);
+ if (!strcmp(avctx->filename, displayName)) {
+ av_free((void *) displayName);
+ ctx->dl = dl;
+ break;
+ }
+ av_free((void *) displayName);
+ dl->Release();
+ }
+ iter->Release();
+ if (!ctx->dl) {
+ av_log(avctx, AV_LOG_ERROR, "Could not open '%s'\n", avctx->filename);
+ return AVERROR(EIO);
+ }
+
+ /* Get output device. */
+ if (ctx->dl->QueryInterface(IID_IDeckLinkOutput, (void **) &ctx->dlo) != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not open output device from '%s'\n",
+ avctx->filename);
+ ctx->dl->Release();
+ return AVERROR(EIO);
+ }
+
+ /* List supported formats. */
+ if (ctx->list_formats) {
+ ff_decklink_list_formats(avctx);
+ ctx->dlo->Release();
+ ctx->dl->Release();
+ return AVERROR_EXIT;
+ }
+
+ if (ctx->dlo->GetDisplayModeIterator(&itermode) != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not get Display Mode Iterator\n");
+ ctx->dl->Release();
+ return AVERROR(EIO);
+ }
+
+ /* Setup streams. */
+ for (n = 0; n < avctx->nb_streams; n++) {
+ AVStream *st = avctx->streams[n];
+ AVCodecContext *c = st->codec;
+ if (c->codec_type == AVMEDIA_TYPE_AUDIO) {
+ if (decklink_setup_audio(avctx, st))
+ goto error;
+ } else if (c->codec_type == AVMEDIA_TYPE_VIDEO) {
+ if (decklink_setup_video(avctx, st))
+ goto error;
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported stream type.\n");
+ goto error;
+ }
+ }
+ itermode->Release();
+
+ return 0;
+
+error:
+
+ ctx->dlo->Release();
+ ctx->dl->Release();
+
+ return AVERROR(EIO);
+}
+
+int ff_decklink_write_packet(AVFormatContext *avctx, AVPacket *pkt)
+{
+ struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
+ struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
+ AVStream *st = avctx->streams[pkt->stream_index];
+
+ ctx->last_pts = FFMAX(ctx->last_pts, pkt->pts);
+
+ if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
+ return decklink_write_video_packet(avctx, pkt);
+ else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
+ return decklink_write_audio_packet(avctx, pkt);
+
+ return AVERROR(EIO);
+}
+
+} /* extern "C" */
diff --git a/libavdevice/decklink_enc.h b/libavdevice/decklink_enc.h
new file mode 100644
index 0000000000..5ffc05cd68
--- /dev/null
+++ b/libavdevice/decklink_enc.h
@@ -0,0 +1,37 @@
+/*
+ * Blackmagic DeckLink output
+ * Copyright (c) 2013-2014 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVDEVICE_DECKLINK_ENC_H
+#define AVDEVICE_DECKLINK_ENC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int ff_decklink_write_header(AVFormatContext *avctx);
+int ff_decklink_write_packet(AVFormatContext *avctx, AVPacket *pkt);
+int ff_decklink_write_trailer(AVFormatContext *avctx);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* AVDEVICE_DECKLINK_ENC_H */
diff --git a/libavdevice/decklink_enc_c.c b/libavdevice/decklink_enc_c.c
new file mode 100644
index 0000000000..c3c90184f8
--- /dev/null
+++ b/libavdevice/decklink_enc_c.c
@@ -0,0 +1,57 @@
+/*
+ * Blackmagic DeckLink output
+ * Copyright (c) 2013-2014 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavformat/avformat.h"
+#include "libavutil/opt.h"
+
+#include "decklink_common_c.h"
+#include "decklink_enc.h"
+
+#define OFFSET(x) offsetof(struct decklink_cctx, x)
+#define ENC AV_OPT_FLAG_ENCODING_PARAM
+static const AVOption options[] = {
+ { "list_devices", "list available devices" , OFFSET(list_devices), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 1, ENC },
+ { "list_formats", "list supported formats" , OFFSET(list_formats), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 1, ENC },
+ { "preroll" , "video preroll in seconds", OFFSET(preroll ), AV_OPT_TYPE_DOUBLE, { .dbl = 0.5 }, 0, 5, ENC },
+ { NULL },
+};
+
+static const AVClass decklink_muxer_class = {
+ .class_name = "Blackmagic DeckLink muxer",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
+};
+
+AVOutputFormat ff_decklink_muxer = {
+ .name = "decklink",
+ .long_name = NULL_IF_CONFIG_SMALL("Blackmagic DeckLink output"),
+ .audio_codec = AV_CODEC_ID_PCM_S16LE,
+ .video_codec = AV_CODEC_ID_RAWVIDEO,
+ .subtitle_codec = AV_CODEC_ID_NONE,
+ .flags = AVFMT_NOFILE | AVFMT_RAWPICTURE,
+ .priv_class = &decklink_muxer_class,
+ .priv_data_size = sizeof(struct decklink_cctx),
+ .write_header = ff_decklink_write_header,
+ .write_packet = ff_decklink_write_packet,
+ .write_trailer = ff_decklink_write_trailer,
+};
diff --git a/libavdevice/dshow.c b/libavdevice/dshow.c
new file mode 100644
index 0000000000..678861da4b
--- /dev/null
+++ b/libavdevice/dshow.c
@@ -0,0 +1,1310 @@
+/*
+ * Directshow capture interface
+ * Copyright (c) 2010 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "dshow_capture.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "libavformat/internal.h"
+#include "libavformat/riff.h"
+#include "avdevice.h"
+#include "libavcodec/raw.h"
+#include "objidl.h"
+#include "shlwapi.h"
+
+
+static enum AVPixelFormat dshow_pixfmt(DWORD biCompression, WORD biBitCount)
+{
+ switch(biCompression) {
+ case BI_BITFIELDS:
+ case BI_RGB:
+ switch(biBitCount) { /* 1-8 are untested */
+ case 1:
+ return AV_PIX_FMT_MONOWHITE;
+ case 4:
+ return AV_PIX_FMT_RGB4;
+ case 8:
+ return AV_PIX_FMT_RGB8;
+ case 16:
+ return AV_PIX_FMT_RGB555;
+ case 24:
+ return AV_PIX_FMT_BGR24;
+ case 32:
+ return AV_PIX_FMT_0RGB32;
+ }
+ }
+ return avpriv_find_pix_fmt(avpriv_get_raw_pix_fmt_tags(), biCompression); // all others
+}
+
+static int
+dshow_read_close(AVFormatContext *s)
+{
+ struct dshow_ctx *ctx = s->priv_data;
+ AVPacketList *pktl;
+
+ if (ctx->control) {
+ IMediaControl_Stop(ctx->control);
+ IMediaControl_Release(ctx->control);
+ }
+
+ if (ctx->media_event)
+ IMediaEvent_Release(ctx->media_event);
+
+ if (ctx->graph) {
+ IEnumFilters *fenum;
+ int r;
+ r = IGraphBuilder_EnumFilters(ctx->graph, &fenum);
+ if (r == S_OK) {
+ IBaseFilter *f;
+ IEnumFilters_Reset(fenum);
+ while (IEnumFilters_Next(fenum, 1, &f, NULL) == S_OK) {
+ if (IGraphBuilder_RemoveFilter(ctx->graph, f) == S_OK)
+ IEnumFilters_Reset(fenum); /* When a filter is removed,
+ * the list must be reset. */
+ IBaseFilter_Release(f);
+ }
+ IEnumFilters_Release(fenum);
+ }
+ IGraphBuilder_Release(ctx->graph);
+ }
+
+ if (ctx->capture_pin[VideoDevice])
+ libAVPin_Release(ctx->capture_pin[VideoDevice]);
+ if (ctx->capture_pin[AudioDevice])
+ libAVPin_Release(ctx->capture_pin[AudioDevice]);
+ if (ctx->capture_filter[VideoDevice])
+ libAVFilter_Release(ctx->capture_filter[VideoDevice]);
+ if (ctx->capture_filter[AudioDevice])
+ libAVFilter_Release(ctx->capture_filter[AudioDevice]);
+
+ if (ctx->device_pin[VideoDevice])
+ IPin_Release(ctx->device_pin[VideoDevice]);
+ if (ctx->device_pin[AudioDevice])
+ IPin_Release(ctx->device_pin[AudioDevice]);
+ if (ctx->device_filter[VideoDevice])
+ IBaseFilter_Release(ctx->device_filter[VideoDevice]);
+ if (ctx->device_filter[AudioDevice])
+ IBaseFilter_Release(ctx->device_filter[AudioDevice]);
+
+ if (ctx->device_name[0])
+ av_freep(&ctx->device_name[0]);
+ if (ctx->device_name[1])
+ av_freep(&ctx->device_name[1]);
+
+ if(ctx->mutex)
+ CloseHandle(ctx->mutex);
+ if(ctx->event[0])
+ CloseHandle(ctx->event[0]);
+ if(ctx->event[1])
+ CloseHandle(ctx->event[1]);
+
+ pktl = ctx->pktl;
+ while (pktl) {
+ AVPacketList *next = pktl->next;
+ av_packet_unref(&pktl->pkt);
+ av_free(pktl);
+ pktl = next;
+ }
+
+ CoUninitialize();
+
+ return 0;
+}
+
+static char *dup_wchar_to_utf8(wchar_t *w)
+{
+ char *s = NULL;
+ int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
+ s = av_malloc(l);
+ if (s)
+ WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
+ return s;
+}
+
+static int shall_we_drop(AVFormatContext *s, int index, enum dshowDeviceType devtype)
+{
+ struct dshow_ctx *ctx = s->priv_data;
+ static const uint8_t dropscore[] = {62, 75, 87, 100};
+ const int ndropscores = FF_ARRAY_ELEMS(dropscore);
+ unsigned int buffer_fullness = (ctx->curbufsize[index]*100)/s->max_picture_buffer;
+ const char *devtypename = (devtype == VideoDevice) ? "video" : "audio";
+
+ if(dropscore[++ctx->video_frame_num%ndropscores] <= buffer_fullness) {
+ av_log(s, AV_LOG_ERROR,
+ "real-time buffer [%s] [%s input] too full or near too full (%d%% of size: %d [rtbufsize parameter])! frame dropped!\n",
+ ctx->device_name[devtype], devtypename, buffer_fullness, s->max_picture_buffer);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void
+callback(void *priv_data, int index, uint8_t *buf, int buf_size, int64_t time, enum dshowDeviceType devtype)
+{
+ AVFormatContext *s = priv_data;
+ struct dshow_ctx *ctx = s->priv_data;
+ AVPacketList **ppktl, *pktl_next;
+
+// dump_videohdr(s, vdhdr);
+
+ WaitForSingleObject(ctx->mutex, INFINITE);
+
+ if(shall_we_drop(s, index, devtype))
+ goto fail;
+
+ pktl_next = av_mallocz(sizeof(AVPacketList));
+ if(!pktl_next)
+ goto fail;
+
+ if(av_new_packet(&pktl_next->pkt, buf_size) < 0) {
+ av_free(pktl_next);
+ goto fail;
+ }
+
+ pktl_next->pkt.stream_index = index;
+ pktl_next->pkt.pts = time;
+ memcpy(pktl_next->pkt.data, buf, buf_size);
+
+ for(ppktl = &ctx->pktl ; *ppktl ; ppktl = &(*ppktl)->next);
+ *ppktl = pktl_next;
+ ctx->curbufsize[index] += buf_size;
+
+ SetEvent(ctx->event[1]);
+ ReleaseMutex(ctx->mutex);
+
+ return;
+fail:
+ ReleaseMutex(ctx->mutex);
+ return;
+}
+
+/**
+ * Cycle through available devices using the device enumerator devenum,
+ * retrieve the device with type specified by devtype and return the
+ * pointer to the object found in *pfilter.
+ * If pfilter is NULL, list all device names.
+ */
+static int
+dshow_cycle_devices(AVFormatContext *avctx, ICreateDevEnum *devenum,
+ enum dshowDeviceType devtype, enum dshowSourceFilterType sourcetype, IBaseFilter **pfilter)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ IBaseFilter *device_filter = NULL;
+ IEnumMoniker *classenum = NULL;
+ IMoniker *m = NULL;
+ const char *device_name = ctx->device_name[devtype];
+ int skip = (devtype == VideoDevice) ? ctx->video_device_number
+ : ctx->audio_device_number;
+ int r;
+
+ const GUID *device_guid[2] = { &CLSID_VideoInputDeviceCategory,
+ &CLSID_AudioInputDeviceCategory };
+ const char *devtypename = (devtype == VideoDevice) ? "video" : "audio only";
+ const char *sourcetypename = (sourcetype == VideoSourceDevice) ? "video" : "audio";
+
+ r = ICreateDevEnum_CreateClassEnumerator(devenum, device_guid[sourcetype],
+ (IEnumMoniker **) &classenum, 0);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not enumerate %s devices (or none found).\n",
+ devtypename);
+ return AVERROR(EIO);
+ }
+
+ while (!device_filter && IEnumMoniker_Next(classenum, 1, &m, NULL) == S_OK) {
+ IPropertyBag *bag = NULL;
+ char *friendly_name = NULL;
+ char *unique_name = NULL;
+ VARIANT var;
+ IBindCtx *bind_ctx = NULL;
+ LPOLESTR olestr = NULL;
+ LPMALLOC co_malloc = NULL;
+ int i;
+
+ r = CoGetMalloc(1, &co_malloc);
+ if (r != S_OK)
+ goto fail1;
+ r = CreateBindCtx(0, &bind_ctx);
+ if (r != S_OK)
+ goto fail1;
+ /* GetDisplayname works for both video and audio, DevicePath doesn't */
+ r = IMoniker_GetDisplayName(m, bind_ctx, NULL, &olestr);
+ if (r != S_OK)
+ goto fail1;
+ unique_name = dup_wchar_to_utf8(olestr);
+ /* replace ':' with '_' since we use : to delineate between sources */
+ for (i = 0; i < strlen(unique_name); i++) {
+ if (unique_name[i] == ':')
+ unique_name[i] = '_';
+ }
+
+ r = IMoniker_BindToStorage(m, 0, 0, &IID_IPropertyBag, (void *) &bag);
+ if (r != S_OK)
+ goto fail1;
+
+ var.vt = VT_BSTR;
+ r = IPropertyBag_Read(bag, L"FriendlyName", &var, NULL);
+ if (r != S_OK)
+ goto fail1;
+ friendly_name = dup_wchar_to_utf8(var.bstrVal);
+
+ if (pfilter) {
+ if (strcmp(device_name, friendly_name) && strcmp(device_name, unique_name))
+ goto fail1;
+
+ if (!skip--) {
+ r = IMoniker_BindToObject(m, 0, 0, &IID_IBaseFilter, (void *) &device_filter);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Unable to BindToObject for %s\n", device_name);
+ goto fail1;
+ }
+ }
+ } else {
+ av_log(avctx, AV_LOG_INFO, " \"%s\"\n", friendly_name);
+ av_log(avctx, AV_LOG_INFO, " Alternative name \"%s\"\n", unique_name);
+ }
+
+fail1:
+ if (olestr && co_malloc)
+ IMalloc_Free(co_malloc, olestr);
+ if (bind_ctx)
+ IBindCtx_Release(bind_ctx);
+ av_free(friendly_name);
+ av_free(unique_name);
+ if (bag)
+ IPropertyBag_Release(bag);
+ IMoniker_Release(m);
+ }
+
+ IEnumMoniker_Release(classenum);
+
+ if (pfilter) {
+ if (!device_filter) {
+ av_log(avctx, AV_LOG_ERROR, "Could not find %s device with name [%s] among source devices of type %s.\n",
+ devtypename, device_name, sourcetypename);
+ return AVERROR(EIO);
+ }
+ *pfilter = device_filter;
+ }
+
+ return 0;
+}
+
+/**
+ * Cycle through available formats using the specified pin,
+ * try to set parameters specified through AVOptions and if successful
+ * return 1 in *pformat_set.
+ * If pformat_set is NULL, list all pin capabilities.
+ */
+static void
+dshow_cycle_formats(AVFormatContext *avctx, enum dshowDeviceType devtype,
+ IPin *pin, int *pformat_set)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ IAMStreamConfig *config = NULL;
+ AM_MEDIA_TYPE *type = NULL;
+ int format_set = 0;
+ void *caps = NULL;
+ int i, n, size, r;
+
+ if (IPin_QueryInterface(pin, &IID_IAMStreamConfig, (void **) &config) != S_OK)
+ return;
+ if (IAMStreamConfig_GetNumberOfCapabilities(config, &n, &size) != S_OK)
+ goto end;
+
+ caps = av_malloc(size);
+ if (!caps)
+ goto end;
+
+ for (i = 0; i < n && !format_set; i++) {
+ r = IAMStreamConfig_GetStreamCaps(config, i, &type, (void *) caps);
+ if (r != S_OK)
+ goto next;
+#if DSHOWDEBUG
+ ff_print_AM_MEDIA_TYPE(type);
+#endif
+
+ if (devtype == VideoDevice) {
+ VIDEO_STREAM_CONFIG_CAPS *vcaps = caps;
+ BITMAPINFOHEADER *bih;
+ int64_t *fr;
+ const AVCodecTag *const tags[] = { avformat_get_riff_video_tags(), NULL };
+#if DSHOWDEBUG
+ ff_print_VIDEO_STREAM_CONFIG_CAPS(vcaps);
+#endif
+ if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo)) {
+ VIDEOINFOHEADER *v = (void *) type->pbFormat;
+ fr = &v->AvgTimePerFrame;
+ bih = &v->bmiHeader;
+ } else if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo2)) {
+ VIDEOINFOHEADER2 *v = (void *) type->pbFormat;
+ fr = &v->AvgTimePerFrame;
+ bih = &v->bmiHeader;
+ } else {
+ goto next;
+ }
+ if (!pformat_set) {
+ enum AVPixelFormat pix_fmt = dshow_pixfmt(bih->biCompression, bih->biBitCount);
+ if (pix_fmt == AV_PIX_FMT_NONE) {
+ enum AVCodecID codec_id = av_codec_get_id(tags, bih->biCompression);
+ AVCodec *codec = avcodec_find_decoder(codec_id);
+ if (codec_id == AV_CODEC_ID_NONE || !codec) {
+ av_log(avctx, AV_LOG_INFO, " unknown compression type 0x%X", (int) bih->biCompression);
+ } else {
+ av_log(avctx, AV_LOG_INFO, " vcodec=%s", codec->name);
+ }
+ } else {
+ av_log(avctx, AV_LOG_INFO, " pixel_format=%s", av_get_pix_fmt_name(pix_fmt));
+ }
+ av_log(avctx, AV_LOG_INFO, " min s=%ldx%ld fps=%g max s=%ldx%ld fps=%g\n",
+ vcaps->MinOutputSize.cx, vcaps->MinOutputSize.cy,
+ 1e7 / vcaps->MaxFrameInterval,
+ vcaps->MaxOutputSize.cx, vcaps->MaxOutputSize.cy,
+ 1e7 / vcaps->MinFrameInterval);
+ continue;
+ }
+ if (ctx->video_codec_id != AV_CODEC_ID_RAWVIDEO) {
+ if (ctx->video_codec_id != av_codec_get_id(tags, bih->biCompression))
+ goto next;
+ }
+ if (ctx->pixel_format != AV_PIX_FMT_NONE &&
+ ctx->pixel_format != dshow_pixfmt(bih->biCompression, bih->biBitCount)) {
+ goto next;
+ }
+ if (ctx->framerate) {
+ int64_t framerate = ((int64_t) ctx->requested_framerate.den*10000000)
+ / ctx->requested_framerate.num;
+ if (framerate > vcaps->MaxFrameInterval ||
+ framerate < vcaps->MinFrameInterval)
+ goto next;
+ *fr = framerate;
+ }
+ if (ctx->requested_width && ctx->requested_height) {
+ if (ctx->requested_width > vcaps->MaxOutputSize.cx ||
+ ctx->requested_width < vcaps->MinOutputSize.cx ||
+ ctx->requested_height > vcaps->MaxOutputSize.cy ||
+ ctx->requested_height < vcaps->MinOutputSize.cy)
+ goto next;
+ bih->biWidth = ctx->requested_width;
+ bih->biHeight = ctx->requested_height;
+ }
+ } else {
+ AUDIO_STREAM_CONFIG_CAPS *acaps = caps;
+ WAVEFORMATEX *fx;
+#if DSHOWDEBUG
+ ff_print_AUDIO_STREAM_CONFIG_CAPS(acaps);
+#endif
+ if (IsEqualGUID(&type->formattype, &FORMAT_WaveFormatEx)) {
+ fx = (void *) type->pbFormat;
+ } else {
+ goto next;
+ }
+ if (!pformat_set) {
+ av_log(avctx, AV_LOG_INFO, " min ch=%lu bits=%lu rate=%6lu max ch=%lu bits=%lu rate=%6lu\n",
+ acaps->MinimumChannels, acaps->MinimumBitsPerSample, acaps->MinimumSampleFrequency,
+ acaps->MaximumChannels, acaps->MaximumBitsPerSample, acaps->MaximumSampleFrequency);
+ continue;
+ }
+ if (ctx->sample_rate) {
+ if (ctx->sample_rate > acaps->MaximumSampleFrequency ||
+ ctx->sample_rate < acaps->MinimumSampleFrequency)
+ goto next;
+ fx->nSamplesPerSec = ctx->sample_rate;
+ }
+ if (ctx->sample_size) {
+ if (ctx->sample_size > acaps->MaximumBitsPerSample ||
+ ctx->sample_size < acaps->MinimumBitsPerSample)
+ goto next;
+ fx->wBitsPerSample = ctx->sample_size;
+ }
+ if (ctx->channels) {
+ if (ctx->channels > acaps->MaximumChannels ||
+ ctx->channels < acaps->MinimumChannels)
+ goto next;
+ fx->nChannels = ctx->channels;
+ }
+ }
+ if (IAMStreamConfig_SetFormat(config, type) != S_OK)
+ goto next;
+ format_set = 1;
+next:
+ if (type->pbFormat)
+ CoTaskMemFree(type->pbFormat);
+ CoTaskMemFree(type);
+ }
+end:
+ IAMStreamConfig_Release(config);
+ av_free(caps);
+ if (pformat_set)
+ *pformat_set = format_set;
+}
+
+/**
+ * Set audio device buffer size in milliseconds (which can directly impact
+ * latency, depending on the device).
+ */
+static int
+dshow_set_audio_buffer_size(AVFormatContext *avctx, IPin *pin)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ IAMBufferNegotiation *buffer_negotiation = NULL;
+ ALLOCATOR_PROPERTIES props = { -1, -1, -1, -1 };
+ IAMStreamConfig *config = NULL;
+ AM_MEDIA_TYPE *type = NULL;
+ int ret = AVERROR(EIO);
+
+ if (IPin_QueryInterface(pin, &IID_IAMStreamConfig, (void **) &config) != S_OK)
+ goto end;
+ if (IAMStreamConfig_GetFormat(config, &type) != S_OK)
+ goto end;
+ if (!IsEqualGUID(&type->formattype, &FORMAT_WaveFormatEx))
+ goto end;
+
+ props.cbBuffer = (((WAVEFORMATEX *) type->pbFormat)->nAvgBytesPerSec)
+ * ctx->audio_buffer_size / 1000;
+
+ if (IPin_QueryInterface(pin, &IID_IAMBufferNegotiation, (void **) &buffer_negotiation) != S_OK)
+ goto end;
+ if (IAMBufferNegotiation_SuggestAllocatorProperties(buffer_negotiation, &props) != S_OK)
+ goto end;
+
+ ret = 0;
+
+end:
+ if (buffer_negotiation)
+ IAMBufferNegotiation_Release(buffer_negotiation);
+ if (type) {
+ if (type->pbFormat)
+ CoTaskMemFree(type->pbFormat);
+ CoTaskMemFree(type);
+ }
+ if (config)
+ IAMStreamConfig_Release(config);
+
+ return ret;
+}
+
+/**
+ * Pops up a user dialog allowing them to adjust properties for the given filter, if possible.
+ */
+void
+dshow_show_filter_properties(IBaseFilter *device_filter, AVFormatContext *avctx) {
+ ISpecifyPropertyPages *property_pages = NULL;
+ IUnknown *device_filter_iunknown = NULL;
+ HRESULT hr;
+ FILTER_INFO filter_info = {0}; /* a warning on this line is false positive GCC bug 53119 AFAICT */
+ CAUUID ca_guid = {0};
+
+ hr = IBaseFilter_QueryInterface(device_filter, &IID_ISpecifyPropertyPages, (void **)&property_pages);
+ if (hr != S_OK) {
+ av_log(avctx, AV_LOG_WARNING, "requested filter does not have a property page to show");
+ goto end;
+ }
+ hr = IBaseFilter_QueryFilterInfo(device_filter, &filter_info);
+ if (hr != S_OK) {
+ goto fail;
+ }
+ hr = IBaseFilter_QueryInterface(device_filter, &IID_IUnknown, (void **)&device_filter_iunknown);
+ if (hr != S_OK) {
+ goto fail;
+ }
+ hr = ISpecifyPropertyPages_GetPages(property_pages, &ca_guid);
+ if (hr != S_OK) {
+ goto fail;
+ }
+ hr = OleCreatePropertyFrame(NULL, 0, 0, filter_info.achName, 1, &device_filter_iunknown, ca_guid.cElems,
+ ca_guid.pElems, 0, 0, NULL);
+ if (hr != S_OK) {
+ goto fail;
+ }
+ goto end;
+fail:
+ av_log(avctx, AV_LOG_ERROR, "Failure showing property pages for filter");
+end:
+ if (property_pages)
+ ISpecifyPropertyPages_Release(property_pages);
+ if (device_filter_iunknown)
+ IUnknown_Release(device_filter_iunknown);
+ if (filter_info.pGraph)
+ IFilterGraph_Release(filter_info.pGraph);
+ if (ca_guid.pElems)
+ CoTaskMemFree(ca_guid.pElems);
+}
+
+/**
+ * Cycle through available pins using the device_filter device, of type
+ * devtype, retrieve the first output pin and return the pointer to the
+ * object found in *ppin.
+ * If ppin is NULL, cycle through all pins listing audio/video capabilities.
+ */
+static int
+dshow_cycle_pins(AVFormatContext *avctx, enum dshowDeviceType devtype,
+ enum dshowSourceFilterType sourcetype, IBaseFilter *device_filter, IPin **ppin)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ IEnumPins *pins = 0;
+ IPin *device_pin = NULL;
+ IPin *pin;
+ int r;
+
+ const GUID *mediatype[2] = { &MEDIATYPE_Video, &MEDIATYPE_Audio };
+ const char *devtypename = (devtype == VideoDevice) ? "video" : "audio only";
+ const char *sourcetypename = (sourcetype == VideoSourceDevice) ? "video" : "audio";
+
+ int set_format = (devtype == VideoDevice && (ctx->framerate ||
+ (ctx->requested_width && ctx->requested_height) ||
+ ctx->pixel_format != AV_PIX_FMT_NONE ||
+ ctx->video_codec_id != AV_CODEC_ID_RAWVIDEO))
+ || (devtype == AudioDevice && (ctx->channels || ctx->sample_rate));
+ int format_set = 0;
+ int should_show_properties = (devtype == VideoDevice) ? ctx->show_video_device_dialog : ctx->show_audio_device_dialog;
+
+ if (should_show_properties)
+ dshow_show_filter_properties(device_filter, avctx);
+
+ r = IBaseFilter_EnumPins(device_filter, &pins);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not enumerate pins.\n");
+ return AVERROR(EIO);
+ }
+
+ if (!ppin) {
+ av_log(avctx, AV_LOG_INFO, "DirectShow %s device options (from %s devices)\n",
+ devtypename, sourcetypename);
+ }
+
+ while (!device_pin && IEnumPins_Next(pins, 1, &pin, NULL) == S_OK) {
+ IKsPropertySet *p = NULL;
+ IEnumMediaTypes *types = NULL;
+ PIN_INFO info = {0};
+ AM_MEDIA_TYPE *type;
+ GUID category;
+ DWORD r2;
+ char *name_buf = NULL;
+ wchar_t *pin_id = NULL;
+ char *pin_buf = NULL;
+ char *desired_pin_name = devtype == VideoDevice ? ctx->video_pin_name : ctx->audio_pin_name;
+
+ IPin_QueryPinInfo(pin, &info);
+ IBaseFilter_Release(info.pFilter);
+
+ if (info.dir != PINDIR_OUTPUT)
+ goto next;
+ if (IPin_QueryInterface(pin, &IID_IKsPropertySet, (void **) &p) != S_OK)
+ goto next;
+ if (IKsPropertySet_Get(p, &AMPROPSETID_Pin, AMPROPERTY_PIN_CATEGORY,
+ NULL, 0, &category, sizeof(GUID), &r2) != S_OK)
+ goto next;
+ if (!IsEqualGUID(&category, &PIN_CATEGORY_CAPTURE))
+ goto next;
+ name_buf = dup_wchar_to_utf8(info.achName);
+
+ r = IPin_QueryId(pin, &pin_id);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not query pin id\n");
+ return AVERROR(EIO);
+ }
+ pin_buf = dup_wchar_to_utf8(pin_id);
+
+ if (!ppin) {
+ av_log(avctx, AV_LOG_INFO, " Pin \"%s\" (alternative pin name \"%s\")\n", name_buf, pin_buf);
+ dshow_cycle_formats(avctx, devtype, pin, NULL);
+ goto next;
+ }
+
+ if (desired_pin_name) {
+ if(strcmp(name_buf, desired_pin_name) && strcmp(pin_buf, desired_pin_name)) {
+ av_log(avctx, AV_LOG_DEBUG, "skipping pin \"%s\" (\"%s\") != requested \"%s\"\n",
+ name_buf, pin_buf, desired_pin_name);
+ goto next;
+ }
+ }
+
+ if (set_format) {
+ dshow_cycle_formats(avctx, devtype, pin, &format_set);
+ if (!format_set) {
+ goto next;
+ }
+ }
+ if (devtype == AudioDevice && ctx->audio_buffer_size) {
+ if (dshow_set_audio_buffer_size(avctx, pin) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "unable to set audio buffer size %d to pin, using pin anyway...", ctx->audio_buffer_size);
+ }
+ }
+
+ if (IPin_EnumMediaTypes(pin, &types) != S_OK)
+ goto next;
+
+ IEnumMediaTypes_Reset(types);
+ /* in case format_set was not called, just verify the majortype */
+ while (!device_pin && IEnumMediaTypes_Next(types, 1, &type, NULL) == S_OK) {
+ if (IsEqualGUID(&type->majortype, mediatype[devtype])) {
+ device_pin = pin;
+ av_log(avctx, AV_LOG_DEBUG, "Selecting pin %s on %s\n", name_buf, devtypename);
+ goto next;
+ }
+ CoTaskMemFree(type);
+ }
+
+next:
+ if (types)
+ IEnumMediaTypes_Release(types);
+ if (p)
+ IKsPropertySet_Release(p);
+ if (device_pin != pin)
+ IPin_Release(pin);
+ av_free(name_buf);
+ av_free(pin_buf);
+ if (pin_id)
+ CoTaskMemFree(pin_id);
+ }
+
+ IEnumPins_Release(pins);
+
+ if (ppin) {
+ if (set_format && !format_set) {
+ av_log(avctx, AV_LOG_ERROR, "Could not set %s options\n", devtypename);
+ return AVERROR(EIO);
+ }
+ if (!device_pin) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Could not find output pin from %s capture device.\n", devtypename);
+ return AVERROR(EIO);
+ }
+ *ppin = device_pin;
+ }
+
+ return 0;
+}
+
+/**
+ * List options for device with type devtype, source filter type sourcetype
+ *
+ * @param devenum device enumerator used for accessing the device
+ */
+static int
+dshow_list_device_options(AVFormatContext *avctx, ICreateDevEnum *devenum,
+ enum dshowDeviceType devtype, enum dshowSourceFilterType sourcetype)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ IBaseFilter *device_filter = NULL;
+ int r;
+
+ if ((r = dshow_cycle_devices(avctx, devenum, devtype, sourcetype, &device_filter)) < 0)
+ return r;
+ ctx->device_filter[devtype] = device_filter;
+ if ((r = dshow_cycle_pins(avctx, devtype, sourcetype, device_filter, NULL)) < 0)
+ return r;
+
+ return 0;
+}
+
+static int
+dshow_open_device(AVFormatContext *avctx, ICreateDevEnum *devenum,
+ enum dshowDeviceType devtype, enum dshowSourceFilterType sourcetype)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ IBaseFilter *device_filter = NULL;
+ IGraphBuilder *graph = ctx->graph;
+ IPin *device_pin = NULL;
+ libAVPin *capture_pin = NULL;
+ libAVFilter *capture_filter = NULL;
+ ICaptureGraphBuilder2 *graph_builder2 = NULL;
+ int ret = AVERROR(EIO);
+ int r;
+ IStream *ifile_stream = NULL;
+ IStream *ofile_stream = NULL;
+ IPersistStream *pers_stream = NULL;
+
+ const wchar_t *filter_name[2] = { L"Audio capture filter", L"Video capture filter" };
+
+
+ if ( ((ctx->audio_filter_load_file) && (strlen(ctx->audio_filter_load_file)>0) && (sourcetype == AudioSourceDevice)) ||
+ ((ctx->video_filter_load_file) && (strlen(ctx->video_filter_load_file)>0) && (sourcetype == VideoSourceDevice)) ) {
+ HRESULT hr;
+ char *filename = NULL;
+
+ if (sourcetype == AudioSourceDevice)
+ filename = ctx->audio_filter_load_file;
+ else
+ filename = ctx->video_filter_load_file;
+
+ hr = SHCreateStreamOnFile ((LPCSTR) filename, STGM_READ, &ifile_stream);
+ if (S_OK != hr) {
+ av_log(avctx, AV_LOG_ERROR, "Could not open capture filter description file.\n");
+ goto error;
+ }
+
+ hr = OleLoadFromStream(ifile_stream, &IID_IBaseFilter, (void **) &device_filter);
+ if (hr != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not load capture filter from file.\n");
+ goto error;
+ }
+
+ if (sourcetype == AudioSourceDevice)
+ av_log(avctx, AV_LOG_INFO, "Audio-");
+ else
+ av_log(avctx, AV_LOG_INFO, "Video-");
+ av_log(avctx, AV_LOG_INFO, "Capture filter loaded successfully from file \"%s\".\n", filename);
+ } else {
+
+ if ((r = dshow_cycle_devices(avctx, devenum, devtype, sourcetype, &device_filter)) < 0) {
+ ret = r;
+ goto error;
+ }
+ }
+
+ ctx->device_filter [devtype] = device_filter;
+
+ r = IGraphBuilder_AddFilter(graph, device_filter, NULL);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not add device filter to graph.\n");
+ goto error;
+ }
+
+ if ((r = dshow_cycle_pins(avctx, devtype, sourcetype, device_filter, &device_pin)) < 0) {
+ ret = r;
+ goto error;
+ }
+
+ ctx->device_pin[devtype] = device_pin;
+
+ capture_filter = libAVFilter_Create(avctx, callback, devtype);
+ if (!capture_filter) {
+ av_log(avctx, AV_LOG_ERROR, "Could not create grabber filter.\n");
+ goto error;
+ }
+ ctx->capture_filter[devtype] = capture_filter;
+
+ if ( ((ctx->audio_filter_save_file) && (strlen(ctx->audio_filter_save_file)>0) && (sourcetype == AudioSourceDevice)) ||
+ ((ctx->video_filter_save_file) && (strlen(ctx->video_filter_save_file)>0) && (sourcetype == VideoSourceDevice)) ) {
+
+ HRESULT hr;
+ char *filename = NULL;
+
+ if (sourcetype == AudioSourceDevice)
+ filename = ctx->audio_filter_save_file;
+ else
+ filename = ctx->video_filter_save_file;
+
+ hr = SHCreateStreamOnFile ((LPCSTR) filename, STGM_CREATE | STGM_READWRITE, &ofile_stream);
+ if (S_OK != hr) {
+ av_log(avctx, AV_LOG_ERROR, "Could not create capture filter description file.\n");
+ goto error;
+ }
+
+ hr = IBaseFilter_QueryInterface(device_filter, &IID_IPersistStream, (void **) &pers_stream);
+ if (hr != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Query for IPersistStream failed.\n");
+ goto error;
+ }
+
+ hr = OleSaveToStream(pers_stream, ofile_stream);
+ if (hr != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not save capture filter \n");
+ goto error;
+ }
+
+ hr = IStream_Commit(ofile_stream, STGC_DEFAULT);
+ if (S_OK != hr) {
+ av_log(avctx, AV_LOG_ERROR, "Could not commit capture filter data to file.\n");
+ goto error;
+ }
+
+ if (sourcetype == AudioSourceDevice)
+ av_log(avctx, AV_LOG_INFO, "Audio-");
+ else
+ av_log(avctx, AV_LOG_INFO, "Video-");
+ av_log(avctx, AV_LOG_INFO, "Capture filter saved successfully to file \"%s\".\n", filename);
+ }
+
+ r = IGraphBuilder_AddFilter(graph, (IBaseFilter *) capture_filter,
+ filter_name[devtype]);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not add capture filter to graph\n");
+ goto error;
+ }
+
+ libAVPin_AddRef(capture_filter->pin);
+ capture_pin = capture_filter->pin;
+ ctx->capture_pin[devtype] = capture_pin;
+
+ r = CoCreateInstance(&CLSID_CaptureGraphBuilder2, NULL, CLSCTX_INPROC_SERVER,
+ &IID_ICaptureGraphBuilder2, (void **) &graph_builder2);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not create CaptureGraphBuilder2\n");
+ goto error;
+ }
+ ICaptureGraphBuilder2_SetFiltergraph(graph_builder2, graph);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not set graph for CaptureGraphBuilder2\n");
+ goto error;
+ }
+
+ r = ICaptureGraphBuilder2_RenderStream(graph_builder2, NULL, NULL, (IUnknown *) device_pin, NULL /* no intermediate filter */,
+ (IBaseFilter *) capture_filter); /* connect pins, optionally insert intermediate filters like crossbar if necessary */
+
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not RenderStream to connect pins\n");
+ goto error;
+ }
+
+ r = dshow_try_setup_crossbar_options(graph_builder2, device_filter, devtype, avctx);
+
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not setup CrossBar\n");
+ goto error;
+ }
+
+ ret = 0;
+
+error:
+ if (graph_builder2 != NULL)
+ ICaptureGraphBuilder2_Release(graph_builder2);
+
+ if (pers_stream)
+ IPersistStream_Release(pers_stream);
+
+ if (ifile_stream)
+ IStream_Release(ifile_stream);
+
+ if (ofile_stream)
+ IStream_Release(ofile_stream);
+
+ return ret;
+}
+
+static enum AVCodecID waveform_codec_id(enum AVSampleFormat sample_fmt)
+{
+ switch (sample_fmt) {
+ case AV_SAMPLE_FMT_U8: return AV_CODEC_ID_PCM_U8;
+ case AV_SAMPLE_FMT_S16: return AV_CODEC_ID_PCM_S16LE;
+ case AV_SAMPLE_FMT_S32: return AV_CODEC_ID_PCM_S32LE;
+ default: return AV_CODEC_ID_NONE; /* Should never happen. */
+ }
+}
+
+static enum AVSampleFormat sample_fmt_bits_per_sample(int bits)
+{
+ switch (bits) {
+ case 8: return AV_SAMPLE_FMT_U8;
+ case 16: return AV_SAMPLE_FMT_S16;
+ case 32: return AV_SAMPLE_FMT_S32;
+ default: return AV_SAMPLE_FMT_NONE; /* Should never happen. */
+ }
+}
+
+static int
+dshow_add_device(AVFormatContext *avctx,
+ enum dshowDeviceType devtype)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ AM_MEDIA_TYPE type;
+ AVCodecParameters *par;
+ AVStream *st;
+ int ret = AVERROR(EIO);
+
+ st = avformat_new_stream(avctx, NULL);
+ if (!st) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
+ st->id = devtype;
+
+ ctx->capture_filter[devtype]->stream_index = st->index;
+
+ libAVPin_ConnectionMediaType(ctx->capture_pin[devtype], &type);
+
+ par = st->codecpar;
+ if (devtype == VideoDevice) {
+ BITMAPINFOHEADER *bih = NULL;
+ AVRational time_base;
+
+ if (IsEqualGUID(&type.formattype, &FORMAT_VideoInfo)) {
+ VIDEOINFOHEADER *v = (void *) type.pbFormat;
+ time_base = (AVRational) { v->AvgTimePerFrame, 10000000 };
+ bih = &v->bmiHeader;
+ } else if (IsEqualGUID(&type.formattype, &FORMAT_VideoInfo2)) {
+ VIDEOINFOHEADER2 *v = (void *) type.pbFormat;
+ time_base = (AVRational) { v->AvgTimePerFrame, 10000000 };
+ bih = &v->bmiHeader;
+ }
+ if (!bih) {
+ av_log(avctx, AV_LOG_ERROR, "Could not get media type.\n");
+ goto error;
+ }
+
+ st->avg_frame_rate = av_inv_q(time_base);
+
+ par->codec_type = AVMEDIA_TYPE_VIDEO;
+ par->width = bih->biWidth;
+ par->height = bih->biHeight;
+ par->codec_tag = bih->biCompression;
+ par->format = dshow_pixfmt(bih->biCompression, bih->biBitCount);
+ if (bih->biCompression == MKTAG('H', 'D', 'Y', 'C')) {
+ av_log(avctx, AV_LOG_DEBUG, "attempt to use full range for HDYC...\n");
+ par->color_range = AVCOL_RANGE_MPEG; // just in case it needs this...
+ }
+ if (par->format == AV_PIX_FMT_NONE) {
+ const AVCodecTag *const tags[] = { avformat_get_riff_video_tags(), NULL };
+ par->codec_id = av_codec_get_id(tags, bih->biCompression);
+ if (par->codec_id == AV_CODEC_ID_NONE) {
+ av_log(avctx, AV_LOG_ERROR, "Unknown compression type. "
+ "Please report type 0x%X.\n", (int) bih->biCompression);
+ return AVERROR_PATCHWELCOME;
+ }
+ par->bits_per_coded_sample = bih->biBitCount;
+ } else {
+ par->codec_id = AV_CODEC_ID_RAWVIDEO;
+ if (bih->biCompression == BI_RGB || bih->biCompression == BI_BITFIELDS) {
+ par->bits_per_coded_sample = bih->biBitCount;
+ par->extradata = av_malloc(9 + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (par->extradata) {
+ par->extradata_size = 9;
+ memcpy(par->extradata, "BottomUp", 9);
+ }
+ }
+ }
+ } else {
+ WAVEFORMATEX *fx = NULL;
+
+ if (IsEqualGUID(&type.formattype, &FORMAT_WaveFormatEx)) {
+ fx = (void *) type.pbFormat;
+ }
+ if (!fx) {
+ av_log(avctx, AV_LOG_ERROR, "Could not get media type.\n");
+ goto error;
+ }
+
+ par->codec_type = AVMEDIA_TYPE_AUDIO;
+ par->format = sample_fmt_bits_per_sample(fx->wBitsPerSample);
+ par->codec_id = waveform_codec_id(par->format);
+ par->sample_rate = fx->nSamplesPerSec;
+ par->channels = fx->nChannels;
+ }
+
+ avpriv_set_pts_info(st, 64, 1, 10000000);
+
+ ret = 0;
+
+error:
+ return ret;
+}
+
+static int parse_device_name(AVFormatContext *avctx)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ char **device_name = ctx->device_name;
+ char *name = av_strdup(avctx->filename);
+ char *tmp = name;
+ int ret = 1;
+ char *type;
+
+ while ((type = strtok(tmp, "="))) {
+ char *token = strtok(NULL, ":");
+ tmp = NULL;
+
+ if (!strcmp(type, "video")) {
+ device_name[0] = token;
+ } else if (!strcmp(type, "audio")) {
+ device_name[1] = token;
+ } else {
+ device_name[0] = NULL;
+ device_name[1] = NULL;
+ break;
+ }
+ }
+
+ if (!device_name[0] && !device_name[1]) {
+ ret = 0;
+ } else {
+ if (device_name[0])
+ device_name[0] = av_strdup(device_name[0]);
+ if (device_name[1])
+ device_name[1] = av_strdup(device_name[1]);
+ }
+
+ av_free(name);
+ return ret;
+}
+
+static int dshow_read_header(AVFormatContext *avctx)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ IGraphBuilder *graph = NULL;
+ ICreateDevEnum *devenum = NULL;
+ IMediaControl *control = NULL;
+ IMediaEvent *media_event = NULL;
+ HANDLE media_event_handle;
+ HANDLE proc;
+ int ret = AVERROR(EIO);
+ int r;
+
+ CoInitialize(0);
+
+ if (!ctx->list_devices && !parse_device_name(avctx)) {
+ av_log(avctx, AV_LOG_ERROR, "Malformed dshow input string.\n");
+ goto error;
+ }
+
+ ctx->video_codec_id = avctx->video_codec_id ? avctx->video_codec_id
+ : AV_CODEC_ID_RAWVIDEO;
+ if (ctx->pixel_format != AV_PIX_FMT_NONE) {
+ if (ctx->video_codec_id != AV_CODEC_ID_RAWVIDEO) {
+ av_log(avctx, AV_LOG_ERROR, "Pixel format may only be set when "
+ "video codec is not set or set to rawvideo\n");
+ ret = AVERROR(EINVAL);
+ goto error;
+ }
+ }
+ if (ctx->framerate) {
+ r = av_parse_video_rate(&ctx->requested_framerate, ctx->framerate);
+ if (r < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", ctx->framerate);
+ goto error;
+ }
+ }
+
+ r = CoCreateInstance(&CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER,
+ &IID_IGraphBuilder, (void **) &graph);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not create capture graph.\n");
+ goto error;
+ }
+ ctx->graph = graph;
+
+ r = CoCreateInstance(&CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC_SERVER,
+ &IID_ICreateDevEnum, (void **) &devenum);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not enumerate system devices.\n");
+ goto error;
+ }
+
+ if (ctx->list_devices) {
+ av_log(avctx, AV_LOG_INFO, "DirectShow video devices (some may be both video and audio devices)\n");
+ dshow_cycle_devices(avctx, devenum, VideoDevice, VideoSourceDevice, NULL);
+ av_log(avctx, AV_LOG_INFO, "DirectShow audio devices\n");
+ dshow_cycle_devices(avctx, devenum, AudioDevice, AudioSourceDevice, NULL);
+ ret = AVERROR_EXIT;
+ goto error;
+ }
+ if (ctx->list_options) {
+ if (ctx->device_name[VideoDevice])
+ if ((r = dshow_list_device_options(avctx, devenum, VideoDevice, VideoSourceDevice))) {
+ ret = r;
+ goto error;
+ }
+ if (ctx->device_name[AudioDevice]) {
+ if (dshow_list_device_options(avctx, devenum, AudioDevice, AudioSourceDevice)) {
+ /* show audio options from combined video+audio sources as fallback */
+ if ((r = dshow_list_device_options(avctx, devenum, AudioDevice, VideoSourceDevice))) {
+ ret = r;
+ goto error;
+ }
+ }
+ }
+ }
+ if (ctx->device_name[VideoDevice]) {
+ if ((r = dshow_open_device(avctx, devenum, VideoDevice, VideoSourceDevice)) < 0 ||
+ (r = dshow_add_device(avctx, VideoDevice)) < 0) {
+ ret = r;
+ goto error;
+ }
+ }
+ if (ctx->device_name[AudioDevice]) {
+ if ((r = dshow_open_device(avctx, devenum, AudioDevice, AudioSourceDevice)) < 0 ||
+ (r = dshow_add_device(avctx, AudioDevice)) < 0) {
+ av_log(avctx, AV_LOG_INFO, "Searching for audio device within video devices for %s\n", ctx->device_name[AudioDevice]);
+ /* see if there's a video source with an audio pin with the given audio name */
+ if ((r = dshow_open_device(avctx, devenum, AudioDevice, VideoSourceDevice)) < 0 ||
+ (r = dshow_add_device(avctx, AudioDevice)) < 0) {
+ ret = r;
+ goto error;
+ }
+ }
+ }
+ if (ctx->list_options) {
+ /* allow it to list crossbar options in dshow_open_device */
+ ret = AVERROR_EXIT;
+ goto error;
+ }
+ ctx->curbufsize[0] = 0;
+ ctx->curbufsize[1] = 0;
+ ctx->mutex = CreateMutex(NULL, 0, NULL);
+ if (!ctx->mutex) {
+ av_log(avctx, AV_LOG_ERROR, "Could not create Mutex\n");
+ goto error;
+ }
+ ctx->event[1] = CreateEvent(NULL, 1, 0, NULL);
+ if (!ctx->event[1]) {
+ av_log(avctx, AV_LOG_ERROR, "Could not create Event\n");
+ goto error;
+ }
+
+ r = IGraphBuilder_QueryInterface(graph, &IID_IMediaControl, (void **) &control);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not get media control.\n");
+ goto error;
+ }
+ ctx->control = control;
+
+ r = IGraphBuilder_QueryInterface(graph, &IID_IMediaEvent, (void **) &media_event);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not get media event.\n");
+ goto error;
+ }
+ ctx->media_event = media_event;
+
+ r = IMediaEvent_GetEventHandle(media_event, (void *) &media_event_handle);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not get media event handle.\n");
+ goto error;
+ }
+ proc = GetCurrentProcess();
+ r = DuplicateHandle(proc, media_event_handle, proc, &ctx->event[0],
+ 0, 0, DUPLICATE_SAME_ACCESS);
+ if (!r) {
+ av_log(avctx, AV_LOG_ERROR, "Could not duplicate media event handle.\n");
+ goto error;
+ }
+
+ r = IMediaControl_Run(control);
+ if (r == S_FALSE) {
+ OAFilterState pfs;
+ r = IMediaControl_GetState(control, 0, &pfs);
+ }
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not run graph (sometimes caused by a device already in use by other application)\n");
+ goto error;
+ }
+
+ ret = 0;
+
+error:
+
+ if (devenum)
+ ICreateDevEnum_Release(devenum);
+
+ if (ret < 0)
+ dshow_read_close(avctx);
+
+ return ret;
+}
+
+/**
+ * Checks media events from DirectShow and returns -1 on error or EOF. Also
+ * purges all events that might be in the event queue to stop the trigger
+ * of event notification.
+ */
+static int dshow_check_event_queue(IMediaEvent *media_event)
+{
+ LONG_PTR p1, p2;
+ long code;
+ int ret = 0;
+
+ while (IMediaEvent_GetEvent(media_event, &code, &p1, &p2, 0) != E_ABORT) {
+ if (code == EC_COMPLETE || code == EC_DEVICE_LOST || code == EC_ERRORABORT)
+ ret = -1;
+ IMediaEvent_FreeEventParams(media_event, code, p1, p2);
+ }
+
+ return ret;
+}
+
+static int dshow_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ struct dshow_ctx *ctx = s->priv_data;
+ AVPacketList *pktl = NULL;
+
+ while (!ctx->eof && !pktl) {
+ WaitForSingleObject(ctx->mutex, INFINITE);
+ pktl = ctx->pktl;
+ if (pktl) {
+ *pkt = pktl->pkt;
+ ctx->pktl = ctx->pktl->next;
+ av_free(pktl);
+ ctx->curbufsize[pkt->stream_index] -= pkt->size;
+ }
+ ResetEvent(ctx->event[1]);
+ ReleaseMutex(ctx->mutex);
+ if (!pktl) {
+ if (dshow_check_event_queue(ctx->media_event) < 0) {
+ ctx->eof = 1;
+ } else if (s->flags & AVFMT_FLAG_NONBLOCK) {
+ return AVERROR(EAGAIN);
+ } else {
+ WaitForMultipleObjects(2, ctx->event, 0, INFINITE);
+ }
+ }
+ }
+
+ return ctx->eof ? AVERROR(EIO) : pkt->size;
+}
+
+#define OFFSET(x) offsetof(struct dshow_ctx, x)
+#define DEC AV_OPT_FLAG_DECODING_PARAM
+static const AVOption options[] = {
+ { "video_size", "set video size given a string such as 640x480 or hd720.", OFFSET(requested_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, DEC },
+ { "pixel_format", "set video pixel format", OFFSET(pixel_format), AV_OPT_TYPE_PIXEL_FMT, {.i64 = AV_PIX_FMT_NONE}, -1, INT_MAX, DEC },
+ { "framerate", "set video frame rate", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+ { "sample_rate", "set audio sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC },
+ { "sample_size", "set audio sample size", OFFSET(sample_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 16, DEC },
+ { "channels", "set number of audio channels, such as 1 or 2", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC },
+ { "audio_buffer_size", "set audio device buffer latency size in milliseconds (default is the device's default)", OFFSET(audio_buffer_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC },
+ { "list_devices", "list available devices", OFFSET(list_devices), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, DEC },
+ { "list_options", "list available options for specified device", OFFSET(list_options), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, DEC },
+ { "video_device_number", "set video device number for devices with same name (starts at 0)", OFFSET(video_device_number), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC },
+ { "audio_device_number", "set audio device number for devices with same name (starts at 0)", OFFSET(audio_device_number), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC },
+ { "video_pin_name", "select video capture pin by name", OFFSET(video_pin_name),AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
+ { "audio_pin_name", "select audio capture pin by name", OFFSET(audio_pin_name),AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
+ { "crossbar_video_input_pin_number", "set video input pin number for crossbar device", OFFSET(crossbar_video_input_pin_number), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, DEC },
+ { "crossbar_audio_input_pin_number", "set audio input pin number for crossbar device", OFFSET(crossbar_audio_input_pin_number), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, DEC },
+ { "show_video_device_dialog", "display property dialog for video capture device", OFFSET(show_video_device_dialog), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DEC },
+ { "show_audio_device_dialog", "display property dialog for audio capture device", OFFSET(show_audio_device_dialog), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DEC },
+ { "show_video_crossbar_connection_dialog", "display property dialog for crossbar connecting pins filter on video device", OFFSET(show_video_crossbar_connection_dialog), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DEC },
+ { "show_audio_crossbar_connection_dialog", "display property dialog for crossbar connecting pins filter on audio device", OFFSET(show_audio_crossbar_connection_dialog), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DEC },
+ { "show_analog_tv_tuner_dialog", "display property dialog for analog tuner filter", OFFSET(show_analog_tv_tuner_dialog), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DEC },
+ { "show_analog_tv_tuner_audio_dialog", "display property dialog for analog tuner audio filter", OFFSET(show_analog_tv_tuner_audio_dialog), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DEC },
+ { "audio_device_load", "load audio capture filter device (and properties) from file", OFFSET(audio_filter_load_file), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+ { "audio_device_save", "save audio capture filter device (and properties) to file", OFFSET(audio_filter_save_file), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+ { "video_device_load", "load video capture filter device (and properties) from file", OFFSET(video_filter_load_file), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+ { "video_device_save", "save video capture filter device (and properties) to file", OFFSET(video_filter_save_file), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+ { NULL },
+};
+
+static const AVClass dshow_class = {
+ .class_name = "dshow indev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
+};
+
+AVInputFormat ff_dshow_demuxer = {
+ .name = "dshow",
+ .long_name = NULL_IF_CONFIG_SMALL("DirectShow capture"),
+ .priv_data_size = sizeof(struct dshow_ctx),
+ .read_header = dshow_read_header,
+ .read_packet = dshow_read_packet,
+ .read_close = dshow_read_close,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &dshow_class,
+};
diff --git a/libavdevice/dshow_capture.h b/libavdevice/dshow_capture.h
new file mode 100644
index 0000000000..f26eaf9a64
--- /dev/null
+++ b/libavdevice/dshow_capture.h
@@ -0,0 +1,352 @@
+/*
+ * DirectShow capture interface
+ * Copyright (c) 2010 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVDEVICE_DSHOW_CAPTURE_H
+#define AVDEVICE_DSHOW_CAPTURE_H
+
+#define DSHOWDEBUG 0
+
+#include "avdevice.h"
+
+#define COBJMACROS
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#define NO_DSHOW_STRSAFE
+#include <dshow.h>
+#include <dvdmedia.h>
+
+#include "libavcodec/internal.h"
+
+/* EC_DEVICE_LOST is not defined in MinGW dshow headers. */
+#ifndef EC_DEVICE_LOST
+#define EC_DEVICE_LOST 0x1f
+#endif
+
+long ff_copy_dshow_media_type(AM_MEDIA_TYPE *dst, const AM_MEDIA_TYPE *src);
+void ff_print_VIDEO_STREAM_CONFIG_CAPS(const VIDEO_STREAM_CONFIG_CAPS *caps);
+void ff_print_AUDIO_STREAM_CONFIG_CAPS(const AUDIO_STREAM_CONFIG_CAPS *caps);
+void ff_print_AM_MEDIA_TYPE(const AM_MEDIA_TYPE *type);
+void ff_printGUID(const GUID *g);
+
+extern const AVClass *ff_dshow_context_class_ptr;
+#define dshowdebug(...) ff_dlog(&ff_dshow_context_class_ptr, __VA_ARGS__)
+
+static inline void nothing(void *foo)
+{
+}
+
+struct GUIDoffset {
+ const GUID *iid;
+ int offset;
+};
+
+enum dshowDeviceType {
+ VideoDevice = 0,
+ AudioDevice = 1,
+};
+
+enum dshowSourceFilterType {
+ VideoSourceDevice = 0,
+ AudioSourceDevice = 1,
+};
+
+#define DECLARE_QUERYINTERFACE(class, ...) \
+long WINAPI \
+class##_QueryInterface(class *this, const GUID *riid, void **ppvObject) \
+{ \
+ struct GUIDoffset ifaces[] = __VA_ARGS__; \
+ int i; \
+ dshowdebug(AV_STRINGIFY(class)"_QueryInterface(%p, %p, %p)\n", this, riid, ppvObject); \
+ ff_printGUID(riid); \
+ if (!ppvObject) \
+ return E_POINTER; \
+ for (i = 0; i < sizeof(ifaces)/sizeof(ifaces[0]); i++) { \
+ if (IsEqualGUID(riid, ifaces[i].iid)) { \
+ void *obj = (void *) ((uint8_t *) this + ifaces[i].offset); \
+ class##_AddRef(this); \
+ dshowdebug("\tfound %d with offset %d\n", i, ifaces[i].offset); \
+ *ppvObject = (void *) obj; \
+ return S_OK; \
+ } \
+ } \
+ dshowdebug("\tE_NOINTERFACE\n"); \
+ *ppvObject = NULL; \
+ return E_NOINTERFACE; \
+}
+#define DECLARE_ADDREF(class) \
+unsigned long WINAPI \
+class##_AddRef(class *this) \
+{ \
+ dshowdebug(AV_STRINGIFY(class)"_AddRef(%p)\t%ld\n", this, this->ref+1); \
+ return InterlockedIncrement(&this->ref); \
+}
+#define DECLARE_RELEASE(class) \
+unsigned long WINAPI \
+class##_Release(class *this) \
+{ \
+ long ref = InterlockedDecrement(&this->ref); \
+ dshowdebug(AV_STRINGIFY(class)"_Release(%p)\t%ld\n", this, ref); \
+ if (!ref) \
+ class##_Destroy(this); \
+ return ref; \
+}
+
+#define DECLARE_DESTROY(class, func) \
+void class##_Destroy(class *this) \
+{ \
+ dshowdebug(AV_STRINGIFY(class)"_Destroy(%p)\n", this); \
+ func(this); \
+ if (this) { \
+ if (this->vtbl) \
+ CoTaskMemFree(this->vtbl); \
+ CoTaskMemFree(this); \
+ } \
+}
+#define DECLARE_CREATE(class, setup, ...) \
+class *class##_Create(__VA_ARGS__) \
+{ \
+ class *this = CoTaskMemAlloc(sizeof(class)); \
+ void *vtbl = CoTaskMemAlloc(sizeof(*this->vtbl)); \
+ dshowdebug(AV_STRINGIFY(class)"_Create(%p)\n", this); \
+ if (!this || !vtbl) \
+ goto fail; \
+ ZeroMemory(this, sizeof(class)); \
+ ZeroMemory(vtbl, sizeof(*this->vtbl)); \
+ this->ref = 1; \
+ this->vtbl = vtbl; \
+ if (!setup) \
+ goto fail; \
+ dshowdebug("created "AV_STRINGIFY(class)" %p\n", this); \
+ return this; \
+fail: \
+ class##_Destroy(this); \
+ dshowdebug("could not create "AV_STRINGIFY(class)"\n"); \
+ return NULL; \
+}
+
+#define SETVTBL(vtbl, class, fn) \
+ do { (vtbl)->fn = (void *) class##_##fn; } while(0)
+
+/*****************************************************************************
+ * Forward Declarations
+ ****************************************************************************/
+typedef struct libAVPin libAVPin;
+typedef struct libAVMemInputPin libAVMemInputPin;
+typedef struct libAVEnumPins libAVEnumPins;
+typedef struct libAVEnumMediaTypes libAVEnumMediaTypes;
+typedef struct libAVFilter libAVFilter;
+
+/*****************************************************************************
+ * libAVPin
+ ****************************************************************************/
+struct libAVPin {
+ IPinVtbl *vtbl;
+ long ref;
+ libAVFilter *filter;
+ IPin *connectedto;
+ AM_MEDIA_TYPE type;
+ IMemInputPinVtbl *imemvtbl;
+};
+
+long WINAPI libAVPin_QueryInterface (libAVPin *, const GUID *, void **);
+unsigned long WINAPI libAVPin_AddRef (libAVPin *);
+unsigned long WINAPI libAVPin_Release (libAVPin *);
+long WINAPI libAVPin_Connect (libAVPin *, IPin *, const AM_MEDIA_TYPE *);
+long WINAPI libAVPin_ReceiveConnection (libAVPin *, IPin *, const AM_MEDIA_TYPE *);
+long WINAPI libAVPin_Disconnect (libAVPin *);
+long WINAPI libAVPin_ConnectedTo (libAVPin *, IPin **);
+long WINAPI libAVPin_ConnectionMediaType (libAVPin *, AM_MEDIA_TYPE *);
+long WINAPI libAVPin_QueryPinInfo (libAVPin *, PIN_INFO *);
+long WINAPI libAVPin_QueryDirection (libAVPin *, PIN_DIRECTION *);
+long WINAPI libAVPin_QueryId (libAVPin *, wchar_t **);
+long WINAPI libAVPin_QueryAccept (libAVPin *, const AM_MEDIA_TYPE *);
+long WINAPI libAVPin_EnumMediaTypes (libAVPin *, IEnumMediaTypes **);
+long WINAPI libAVPin_QueryInternalConnections(libAVPin *, IPin **, unsigned long *);
+long WINAPI libAVPin_EndOfStream (libAVPin *);
+long WINAPI libAVPin_BeginFlush (libAVPin *);
+long WINAPI libAVPin_EndFlush (libAVPin *);
+long WINAPI libAVPin_NewSegment (libAVPin *, REFERENCE_TIME, REFERENCE_TIME, double);
+
+long WINAPI libAVMemInputPin_QueryInterface (libAVMemInputPin *, const GUID *, void **);
+unsigned long WINAPI libAVMemInputPin_AddRef (libAVMemInputPin *);
+unsigned long WINAPI libAVMemInputPin_Release (libAVMemInputPin *);
+long WINAPI libAVMemInputPin_GetAllocator (libAVMemInputPin *, IMemAllocator **);
+long WINAPI libAVMemInputPin_NotifyAllocator (libAVMemInputPin *, IMemAllocator *, BOOL);
+long WINAPI libAVMemInputPin_GetAllocatorRequirements(libAVMemInputPin *, ALLOCATOR_PROPERTIES *);
+long WINAPI libAVMemInputPin_Receive (libAVMemInputPin *, IMediaSample *);
+long WINAPI libAVMemInputPin_ReceiveMultiple (libAVMemInputPin *, IMediaSample **, long, long *);
+long WINAPI libAVMemInputPin_ReceiveCanBlock (libAVMemInputPin *);
+
+void libAVPin_Destroy(libAVPin *);
+libAVPin *libAVPin_Create (libAVFilter *filter);
+
+void libAVMemInputPin_Destroy(libAVMemInputPin *);
+
+/*****************************************************************************
+ * libAVEnumPins
+ ****************************************************************************/
+struct libAVEnumPins {
+ IEnumPinsVtbl *vtbl;
+ long ref;
+ int pos;
+ libAVPin *pin;
+ libAVFilter *filter;
+};
+
+long WINAPI libAVEnumPins_QueryInterface(libAVEnumPins *, const GUID *, void **);
+unsigned long WINAPI libAVEnumPins_AddRef (libAVEnumPins *);
+unsigned long WINAPI libAVEnumPins_Release (libAVEnumPins *);
+long WINAPI libAVEnumPins_Next (libAVEnumPins *, unsigned long, IPin **, unsigned long *);
+long WINAPI libAVEnumPins_Skip (libAVEnumPins *, unsigned long);
+long WINAPI libAVEnumPins_Reset (libAVEnumPins *);
+long WINAPI libAVEnumPins_Clone (libAVEnumPins *, libAVEnumPins **);
+
+void libAVEnumPins_Destroy(libAVEnumPins *);
+libAVEnumPins *libAVEnumPins_Create (libAVPin *pin, libAVFilter *filter);
+
+/*****************************************************************************
+ * libAVEnumMediaTypes
+ ****************************************************************************/
+struct libAVEnumMediaTypes {
+ IEnumMediaTypesVtbl *vtbl;
+ long ref;
+ int pos;
+ AM_MEDIA_TYPE type;
+};
+
+long WINAPI libAVEnumMediaTypes_QueryInterface(libAVEnumMediaTypes *, const GUID *, void **);
+unsigned long WINAPI libAVEnumMediaTypes_AddRef (libAVEnumMediaTypes *);
+unsigned long WINAPI libAVEnumMediaTypes_Release (libAVEnumMediaTypes *);
+long WINAPI libAVEnumMediaTypes_Next (libAVEnumMediaTypes *, unsigned long, AM_MEDIA_TYPE **, unsigned long *);
+long WINAPI libAVEnumMediaTypes_Skip (libAVEnumMediaTypes *, unsigned long);
+long WINAPI libAVEnumMediaTypes_Reset (libAVEnumMediaTypes *);
+long WINAPI libAVEnumMediaTypes_Clone (libAVEnumMediaTypes *, libAVEnumMediaTypes **);
+
+void libAVEnumMediaTypes_Destroy(libAVEnumMediaTypes *);
+libAVEnumMediaTypes *libAVEnumMediaTypes_Create(const AM_MEDIA_TYPE *type);
+
+/*****************************************************************************
+ * libAVFilter
+ ****************************************************************************/
+struct libAVFilter {
+ IBaseFilterVtbl *vtbl;
+ long ref;
+ const wchar_t *name;
+ libAVPin *pin;
+ FILTER_INFO info;
+ FILTER_STATE state;
+ IReferenceClock *clock;
+ enum dshowDeviceType type;
+ void *priv_data;
+ int stream_index;
+ int64_t start_time;
+ void (*callback)(void *priv_data, int index, uint8_t *buf, int buf_size, int64_t time, enum dshowDeviceType type);
+};
+
+long WINAPI libAVFilter_QueryInterface (libAVFilter *, const GUID *, void **);
+unsigned long WINAPI libAVFilter_AddRef (libAVFilter *);
+unsigned long WINAPI libAVFilter_Release (libAVFilter *);
+long WINAPI libAVFilter_GetClassID (libAVFilter *, CLSID *);
+long WINAPI libAVFilter_Stop (libAVFilter *);
+long WINAPI libAVFilter_Pause (libAVFilter *);
+long WINAPI libAVFilter_Run (libAVFilter *, REFERENCE_TIME);
+long WINAPI libAVFilter_GetState (libAVFilter *, DWORD, FILTER_STATE *);
+long WINAPI libAVFilter_SetSyncSource (libAVFilter *, IReferenceClock *);
+long WINAPI libAVFilter_GetSyncSource (libAVFilter *, IReferenceClock **);
+long WINAPI libAVFilter_EnumPins (libAVFilter *, IEnumPins **);
+long WINAPI libAVFilter_FindPin (libAVFilter *, const wchar_t *, IPin **);
+long WINAPI libAVFilter_QueryFilterInfo(libAVFilter *, FILTER_INFO *);
+long WINAPI libAVFilter_JoinFilterGraph(libAVFilter *, IFilterGraph *, const wchar_t *);
+long WINAPI libAVFilter_QueryVendorInfo(libAVFilter *, wchar_t **);
+
+void libAVFilter_Destroy(libAVFilter *);
+libAVFilter *libAVFilter_Create (void *, void *, enum dshowDeviceType);
+
+/*****************************************************************************
+ * dshow_ctx
+ ****************************************************************************/
+struct dshow_ctx {
+ const AVClass *class;
+
+ IGraphBuilder *graph;
+
+ char *device_name[2];
+ int video_device_number;
+ int audio_device_number;
+
+ int list_options;
+ int list_devices;
+ int audio_buffer_size;
+ int crossbar_video_input_pin_number;
+ int crossbar_audio_input_pin_number;
+ char *video_pin_name;
+ char *audio_pin_name;
+ int show_video_device_dialog;
+ int show_audio_device_dialog;
+ int show_video_crossbar_connection_dialog;
+ int show_audio_crossbar_connection_dialog;
+ int show_analog_tv_tuner_dialog;
+ int show_analog_tv_tuner_audio_dialog;
+ char *audio_filter_load_file;
+ char *audio_filter_save_file;
+ char *video_filter_load_file;
+ char *video_filter_save_file;
+
+ IBaseFilter *device_filter[2];
+ IPin *device_pin[2];
+ libAVFilter *capture_filter[2];
+ libAVPin *capture_pin[2];
+
+ HANDLE mutex;
+ HANDLE event[2]; /* event[0] is set by DirectShow
+ * event[1] is set by callback() */
+ AVPacketList *pktl;
+
+ int eof;
+
+ int64_t curbufsize[2];
+ unsigned int video_frame_num;
+
+ IMediaControl *control;
+ IMediaEvent *media_event;
+
+ enum AVPixelFormat pixel_format;
+ enum AVCodecID video_codec_id;
+ char *framerate;
+
+ int requested_width;
+ int requested_height;
+ AVRational requested_framerate;
+
+ int sample_rate;
+ int sample_size;
+ int channels;
+};
+
+/*****************************************************************************
+ * CrossBar
+ ****************************************************************************/
+HRESULT dshow_try_setup_crossbar_options(ICaptureGraphBuilder2 *graph_builder2,
+ IBaseFilter *device_filter, enum dshowDeviceType devtype, AVFormatContext *avctx);
+
+void dshow_show_filter_properties(IBaseFilter *pFilter, AVFormatContext *avctx);
+
+#endif /* AVDEVICE_DSHOW_CAPTURE_H */
diff --git a/libavdevice/dshow_common.c b/libavdevice/dshow_common.c
new file mode 100644
index 0000000000..f7f0dfbdbb
--- /dev/null
+++ b/libavdevice/dshow_common.c
@@ -0,0 +1,190 @@
+/*
+ * Directshow capture interface
+ * Copyright (c) 2010 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "dshow_capture.h"
+
+long ff_copy_dshow_media_type(AM_MEDIA_TYPE *dst, const AM_MEDIA_TYPE *src)
+{
+ uint8_t *pbFormat = NULL;
+
+ if (src->cbFormat) {
+ pbFormat = CoTaskMemAlloc(src->cbFormat);
+ if (!pbFormat)
+ return E_OUTOFMEMORY;
+ memcpy(pbFormat, src->pbFormat, src->cbFormat);
+ }
+
+ *dst = *src;
+ dst->pUnk = NULL;
+ dst->pbFormat = pbFormat;
+
+ return S_OK;
+}
+
+void ff_printGUID(const GUID *g)
+{
+#if DSHOWDEBUG
+ const uint32_t *d = (const uint32_t *) &g->Data1;
+ const uint16_t *w = (const uint16_t *) &g->Data2;
+ const uint8_t *c = (const uint8_t *) &g->Data4;
+
+ dshowdebug("0x%08x 0x%04x 0x%04x %02x%02x%02x%02x%02x%02x%02x%02x",
+ d[0], w[0], w[1],
+ c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
+#endif
+}
+
+static const char *dshow_context_to_name(void *ptr)
+{
+ return "dshow";
+}
+static const AVClass ff_dshow_context_class = { "DirectShow", dshow_context_to_name };
+const AVClass *ff_dshow_context_class_ptr = &ff_dshow_context_class;
+
+#define dstruct(pctx, sname, var, type) \
+ dshowdebug(" "#var":\t%"type"\n", sname->var)
+
+#if DSHOWDEBUG
+static void dump_bih(void *s, BITMAPINFOHEADER *bih)
+{
+ dshowdebug(" BITMAPINFOHEADER\n");
+ dstruct(s, bih, biSize, "lu");
+ dstruct(s, bih, biWidth, "ld");
+ dstruct(s, bih, biHeight, "ld");
+ dstruct(s, bih, biPlanes, "d");
+ dstruct(s, bih, biBitCount, "d");
+ dstruct(s, bih, biCompression, "lu");
+ dshowdebug(" biCompression:\t\"%.4s\"\n",
+ (char*) &bih->biCompression);
+ dstruct(s, bih, biSizeImage, "lu");
+ dstruct(s, bih, biXPelsPerMeter, "lu");
+ dstruct(s, bih, biYPelsPerMeter, "lu");
+ dstruct(s, bih, biClrUsed, "lu");
+ dstruct(s, bih, biClrImportant, "lu");
+}
+#endif
+
+void ff_print_VIDEO_STREAM_CONFIG_CAPS(const VIDEO_STREAM_CONFIG_CAPS *caps)
+{
+#if DSHOWDEBUG
+ dshowdebug(" VIDEO_STREAM_CONFIG_CAPS\n");
+ dshowdebug(" guid\t");
+ ff_printGUID(&caps->guid);
+ dshowdebug("\n");
+ dshowdebug(" VideoStandard\t%lu\n", caps->VideoStandard);
+ dshowdebug(" InputSize %ld\t%ld\n", caps->InputSize.cx, caps->InputSize.cy);
+ dshowdebug(" MinCroppingSize %ld\t%ld\n", caps->MinCroppingSize.cx, caps->MinCroppingSize.cy);
+ dshowdebug(" MaxCroppingSize %ld\t%ld\n", caps->MaxCroppingSize.cx, caps->MaxCroppingSize.cy);
+ dshowdebug(" CropGranularityX\t%d\n", caps->CropGranularityX);
+ dshowdebug(" CropGranularityY\t%d\n", caps->CropGranularityY);
+ dshowdebug(" CropAlignX\t%d\n", caps->CropAlignX);
+ dshowdebug(" CropAlignY\t%d\n", caps->CropAlignY);
+ dshowdebug(" MinOutputSize %ld\t%ld\n", caps->MinOutputSize.cx, caps->MinOutputSize.cy);
+ dshowdebug(" MaxOutputSize %ld\t%ld\n", caps->MaxOutputSize.cx, caps->MaxOutputSize.cy);
+ dshowdebug(" OutputGranularityX\t%d\n", caps->OutputGranularityX);
+ dshowdebug(" OutputGranularityY\t%d\n", caps->OutputGranularityY);
+ dshowdebug(" StretchTapsX\t%d\n", caps->StretchTapsX);
+ dshowdebug(" StretchTapsY\t%d\n", caps->StretchTapsY);
+ dshowdebug(" ShrinkTapsX\t%d\n", caps->ShrinkTapsX);
+ dshowdebug(" ShrinkTapsY\t%d\n", caps->ShrinkTapsY);
+ dshowdebug(" MinFrameInterval\t%"PRId64"\n", caps->MinFrameInterval);
+ dshowdebug(" MaxFrameInterval\t%"PRId64"\n", caps->MaxFrameInterval);
+ dshowdebug(" MinBitsPerSecond\t%ld\n", caps->MinBitsPerSecond);
+ dshowdebug(" MaxBitsPerSecond\t%ld\n", caps->MaxBitsPerSecond);
+#endif
+}
+
+void ff_print_AUDIO_STREAM_CONFIG_CAPS(const AUDIO_STREAM_CONFIG_CAPS *caps)
+{
+#if DSHOWDEBUG
+ dshowdebug(" AUDIO_STREAM_CONFIG_CAPS\n");
+ dshowdebug(" guid\t");
+ ff_printGUID(&caps->guid);
+ dshowdebug("\n");
+ dshowdebug(" MinimumChannels\t%lu\n", caps->MinimumChannels);
+ dshowdebug(" MaximumChannels\t%lu\n", caps->MaximumChannels);
+ dshowdebug(" ChannelsGranularity\t%lu\n", caps->ChannelsGranularity);
+ dshowdebug(" MinimumBitsPerSample\t%lu\n", caps->MinimumBitsPerSample);
+ dshowdebug(" MaximumBitsPerSample\t%lu\n", caps->MaximumBitsPerSample);
+ dshowdebug(" BitsPerSampleGranularity\t%lu\n", caps->BitsPerSampleGranularity);
+ dshowdebug(" MinimumSampleFrequency\t%lu\n", caps->MinimumSampleFrequency);
+ dshowdebug(" MaximumSampleFrequency\t%lu\n", caps->MaximumSampleFrequency);
+ dshowdebug(" SampleFrequencyGranularity\t%lu\n", caps->SampleFrequencyGranularity);
+#endif
+}
+
+void ff_print_AM_MEDIA_TYPE(const AM_MEDIA_TYPE *type)
+{
+#if DSHOWDEBUG
+ dshowdebug(" majortype\t");
+ ff_printGUID(&type->majortype);
+ dshowdebug("\n");
+ dshowdebug(" subtype\t");
+ ff_printGUID(&type->subtype);
+ dshowdebug("\n");
+ dshowdebug(" bFixedSizeSamples\t%d\n", type->bFixedSizeSamples);
+ dshowdebug(" bTemporalCompression\t%d\n", type->bTemporalCompression);
+ dshowdebug(" lSampleSize\t%lu\n", type->lSampleSize);
+ dshowdebug(" formattype\t");
+ ff_printGUID(&type->formattype);
+ dshowdebug("\n");
+ dshowdebug(" pUnk\t%p\n", type->pUnk);
+ dshowdebug(" cbFormat\t%lu\n", type->cbFormat);
+ dshowdebug(" pbFormat\t%p\n", type->pbFormat);
+
+ if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo)) {
+ VIDEOINFOHEADER *v = (void *) type->pbFormat;
+ dshowdebug(" rcSource: left %ld top %ld right %ld bottom %ld\n",
+ v->rcSource.left, v->rcSource.top, v->rcSource.right, v->rcSource.bottom);
+ dshowdebug(" rcTarget: left %ld top %ld right %ld bottom %ld\n",
+ v->rcTarget.left, v->rcTarget.top, v->rcTarget.right, v->rcTarget.bottom);
+ dshowdebug(" dwBitRate: %lu\n", v->dwBitRate);
+ dshowdebug(" dwBitErrorRate: %lu\n", v->dwBitErrorRate);
+ dshowdebug(" AvgTimePerFrame: %"PRId64"\n", v->AvgTimePerFrame);
+ dump_bih(NULL, &v->bmiHeader);
+ } else if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo2)) {
+ VIDEOINFOHEADER2 *v = (void *) type->pbFormat;
+ dshowdebug(" rcSource: left %ld top %ld right %ld bottom %ld\n",
+ v->rcSource.left, v->rcSource.top, v->rcSource.right, v->rcSource.bottom);
+ dshowdebug(" rcTarget: left %ld top %ld right %ld bottom %ld\n",
+ v->rcTarget.left, v->rcTarget.top, v->rcTarget.right, v->rcTarget.bottom);
+ dshowdebug(" dwBitRate: %lu\n", v->dwBitRate);
+ dshowdebug(" dwBitErrorRate: %lu\n", v->dwBitErrorRate);
+ dshowdebug(" AvgTimePerFrame: %"PRId64"\n", v->AvgTimePerFrame);
+ dshowdebug(" dwInterlaceFlags: %lu\n", v->dwInterlaceFlags);
+ dshowdebug(" dwCopyProtectFlags: %lu\n", v->dwCopyProtectFlags);
+ dshowdebug(" dwPictAspectRatioX: %lu\n", v->dwPictAspectRatioX);
+ dshowdebug(" dwPictAspectRatioY: %lu\n", v->dwPictAspectRatioY);
+// dshowdebug(" dwReserved1: %lu\n", v->u.dwReserved1); /* mingw-w64 is buggy and doesn't name unnamed unions */
+ dshowdebug(" dwReserved2: %lu\n", v->dwReserved2);
+ dump_bih(NULL, &v->bmiHeader);
+ } else if (IsEqualGUID(&type->formattype, &FORMAT_WaveFormatEx)) {
+ WAVEFORMATEX *fx = (void *) type->pbFormat;
+ dshowdebug(" wFormatTag: %u\n", fx->wFormatTag);
+ dshowdebug(" nChannels: %u\n", fx->nChannels);
+ dshowdebug(" nSamplesPerSec: %lu\n", fx->nSamplesPerSec);
+ dshowdebug(" nAvgBytesPerSec: %lu\n", fx->nAvgBytesPerSec);
+ dshowdebug(" nBlockAlign: %u\n", fx->nBlockAlign);
+ dshowdebug(" wBitsPerSample: %u\n", fx->wBitsPerSample);
+ dshowdebug(" cbSize: %u\n", fx->cbSize);
+ }
+#endif
+}
diff --git a/libavdevice/dshow_crossbar.c b/libavdevice/dshow_crossbar.c
new file mode 100644
index 0000000000..95fb466f4e
--- /dev/null
+++ b/libavdevice/dshow_crossbar.c
@@ -0,0 +1,208 @@
+/*
+ * DirectShow capture interface
+ * Copyright (c) 2015 Roger Pack
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "dshow_capture.h"
+
+static const char *
+GetPhysicalPinName(long pin_type)
+{
+ switch (pin_type)
+ {
+ case PhysConn_Video_Tuner: return "Video Tuner";
+ case PhysConn_Video_Composite: return "Video Composite";
+ case PhysConn_Video_SVideo: return "S-Video";
+ case PhysConn_Video_RGB: return "Video RGB";
+ case PhysConn_Video_YRYBY: return "Video YRYBY";
+ case PhysConn_Video_SerialDigital: return "Video Serial Digital";
+ case PhysConn_Video_ParallelDigital: return "Video Parallel Digital";
+ case PhysConn_Video_SCSI: return "Video SCSI";
+ case PhysConn_Video_AUX: return "Video AUX";
+ case PhysConn_Video_1394: return "Video 1394";
+ case PhysConn_Video_USB: return "Video USB";
+ case PhysConn_Video_VideoDecoder: return "Video Decoder";
+ case PhysConn_Video_VideoEncoder: return "Video Encoder";
+
+ case PhysConn_Audio_Tuner: return "Audio Tuner";
+ case PhysConn_Audio_Line: return "Audio Line";
+ case PhysConn_Audio_Mic: return "Audio Microphone";
+ case PhysConn_Audio_AESDigital: return "Audio AES/EBU Digital";
+ case PhysConn_Audio_SPDIFDigital: return "Audio S/PDIF";
+ case PhysConn_Audio_SCSI: return "Audio SCSI";
+ case PhysConn_Audio_AUX: return "Audio AUX";
+ case PhysConn_Audio_1394: return "Audio 1394";
+ case PhysConn_Audio_USB: return "Audio USB";
+ case PhysConn_Audio_AudioDecoder: return "Audio Decoder";
+ default: return "Unknown Crossbar Pin Type—Please report!";
+ }
+}
+
+static HRESULT
+setup_crossbar_options(IAMCrossbar *cross_bar, enum dshowDeviceType devtype, AVFormatContext *avctx)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ long count_output_pins, count_input_pins;
+ int i;
+ int log_level = ctx->list_options ? AV_LOG_INFO : AV_LOG_DEBUG;
+ int video_input_pin = ctx->crossbar_video_input_pin_number;
+ int audio_input_pin = ctx->crossbar_audio_input_pin_number;
+ const char *device_name = ctx->device_name[devtype];
+ HRESULT hr;
+
+ av_log(avctx, log_level, "Crossbar Switching Information for %s:\n", device_name);
+ hr = IAMCrossbar_get_PinCounts(cross_bar, &count_output_pins, &count_input_pins);
+ if (hr != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Unable to get crossbar pin counts\n");
+ return hr;
+ }
+
+ for (i = 0; i < count_output_pins; i++)
+ {
+ int j;
+ long related_pin, pin_type, route_to_pin;
+ hr = IAMCrossbar_get_CrossbarPinInfo(cross_bar, FALSE, i, &related_pin, &pin_type);
+ if (pin_type == PhysConn_Video_VideoDecoder) {
+ /* assume there is only one "Video (and one Audio) Decoder" output pin, and it's all we care about routing to...for now */
+ if (video_input_pin != -1) {
+ av_log(avctx, log_level, "Routing video input from pin %d\n", video_input_pin);
+ hr = IAMCrossbar_Route(cross_bar, i, video_input_pin);
+ if (hr != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Unable to route video input from pin %d\n", video_input_pin);
+ return AVERROR(EIO);
+ }
+ }
+ } else if (pin_type == PhysConn_Audio_AudioDecoder) {
+ if (audio_input_pin != -1) {
+ av_log(avctx, log_level, "Routing audio input from pin %d\n", audio_input_pin);
+ hr = IAMCrossbar_Route(cross_bar, i, audio_input_pin);
+ if (hr != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Unable to route audio input from pin %d\n", audio_input_pin);
+ return hr;
+ }
+ }
+ } else {
+ av_log(avctx, AV_LOG_WARNING, "Unexpected output pin type, please report the type if you want to use this (%s)", GetPhysicalPinName(pin_type));
+ }
+
+ hr = IAMCrossbar_get_IsRoutedTo(cross_bar, i, &route_to_pin);
+ if (hr != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Unable to get crossbar is routed to from pin %d\n", i);
+ return hr;
+ }
+ av_log(avctx, log_level, " Crossbar Output pin %d: \"%s\" related output pin: %ld ", i, GetPhysicalPinName(pin_type), related_pin);
+ av_log(avctx, log_level, "current input pin: %ld ", route_to_pin);
+ av_log(avctx, log_level, "compatible input pins: ");
+
+ for (j = 0; j < count_input_pins; j++)
+ {
+ hr = IAMCrossbar_CanRoute(cross_bar, i, j);
+ if (hr == S_OK)
+ av_log(avctx, log_level ,"%d ", j);
+ }
+ av_log(avctx, log_level, "\n");
+ }
+
+ for (i = 0; i < count_input_pins; i++)
+ {
+ long related_pin, pin_type;
+ hr = IAMCrossbar_get_CrossbarPinInfo(cross_bar, TRUE, i, &related_pin, &pin_type);
+ if (hr != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "unable to get crossbar info audio input from pin %d\n", i);
+ return hr;
+ }
+ av_log(avctx, log_level, " Crossbar Input pin %d - \"%s\" ", i, GetPhysicalPinName(pin_type));
+ av_log(avctx, log_level, "related input pin: %ld\n", related_pin);
+ }
+ return S_OK;
+}
+
+/**
+ * Given a fully constructed graph, check if there is a cross bar filter, and configure its pins if so.
+ */
+HRESULT
+dshow_try_setup_crossbar_options(ICaptureGraphBuilder2 *graph_builder2,
+ IBaseFilter *device_filter, enum dshowDeviceType devtype, AVFormatContext *avctx)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ IAMCrossbar *cross_bar = NULL;
+ IBaseFilter *cross_bar_base_filter = NULL;
+ IAMTVTuner *tv_tuner_filter = NULL;
+ IBaseFilter *tv_tuner_base_filter = NULL;
+ IAMAudioInputMixer *tv_audio_filter = NULL;
+ IBaseFilter *tv_audio_base_filter = NULL;
+ HRESULT hr;
+
+ hr = ICaptureGraphBuilder2_FindInterface(graph_builder2, &LOOK_UPSTREAM_ONLY, (const GUID *) NULL,
+ device_filter, &IID_IAMCrossbar, (void**) &cross_bar);
+ if (hr != S_OK) {
+ /* no crossbar found */
+ hr = S_OK;
+ goto end;
+ }
+ /* TODO some TV tuners apparently have multiple crossbars? */
+
+ if (devtype == VideoDevice && ctx->show_video_crossbar_connection_dialog ||
+ devtype == AudioDevice && ctx->show_audio_crossbar_connection_dialog) {
+ hr = IAMCrossbar_QueryInterface(cross_bar, &IID_IBaseFilter, (void **) &cross_bar_base_filter);
+ if (hr != S_OK)
+ goto end;
+ dshow_show_filter_properties(cross_bar_base_filter, avctx);
+ }
+
+ if (devtype == VideoDevice && ctx->show_analog_tv_tuner_dialog) {
+ hr = ICaptureGraphBuilder2_FindInterface(graph_builder2, &LOOK_UPSTREAM_ONLY, NULL,
+ device_filter, &IID_IAMTVTuner, (void**) &tv_tuner_filter);
+ if (hr == S_OK) {
+ hr = IAMCrossbar_QueryInterface(tv_tuner_filter, &IID_IBaseFilter, (void **) &tv_tuner_base_filter);
+ if (hr != S_OK)
+ goto end;
+ dshow_show_filter_properties(tv_tuner_base_filter, avctx);
+ } else {
+ av_log(avctx, AV_LOG_WARNING, "unable to find a tv tuner to display dialog for!");
+ }
+ }
+ if (devtype == AudioDevice && ctx->show_analog_tv_tuner_audio_dialog) {
+ hr = ICaptureGraphBuilder2_FindInterface(graph_builder2, &LOOK_UPSTREAM_ONLY, NULL,
+ device_filter, &IID_IAMTVAudio, (void**) &tv_audio_filter);
+ if (hr == S_OK) {
+ hr = IAMCrossbar_QueryInterface(tv_audio_filter, &IID_IBaseFilter, (void **) &tv_audio_base_filter);
+ if (hr != S_OK)
+ goto end;
+ dshow_show_filter_properties(tv_audio_base_filter, avctx);
+ } else {
+ av_log(avctx, AV_LOG_WARNING, "unable to find a tv audio tuner to display dialog for!");
+ }
+ }
+
+ hr = setup_crossbar_options(cross_bar, devtype, avctx);
+ if (hr != S_OK)
+ goto end;
+
+end:
+ if (cross_bar)
+ IAMCrossbar_Release(cross_bar);
+ if (cross_bar_base_filter)
+ IBaseFilter_Release(cross_bar_base_filter);
+ if (tv_tuner_filter)
+ IAMTVTuner_Release(tv_tuner_filter);
+ if (tv_tuner_base_filter)
+ IBaseFilter_Release(tv_tuner_base_filter);
+ return hr;
+}
diff --git a/libavdevice/dshow_enummediatypes.c b/libavdevice/dshow_enummediatypes.c
new file mode 100644
index 0000000000..3a66a4de14
--- /dev/null
+++ b/libavdevice/dshow_enummediatypes.c
@@ -0,0 +1,105 @@
+/*
+ * DirectShow capture interface
+ * Copyright (c) 2010 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "dshow_capture.h"
+
+DECLARE_QUERYINTERFACE(libAVEnumMediaTypes,
+ { {&IID_IUnknown,0}, {&IID_IEnumMediaTypes,0} })
+DECLARE_ADDREF(libAVEnumMediaTypes)
+DECLARE_RELEASE(libAVEnumMediaTypes)
+
+long WINAPI
+libAVEnumMediaTypes_Next(libAVEnumMediaTypes *this, unsigned long n,
+ AM_MEDIA_TYPE **types, unsigned long *fetched)
+{
+ int count = 0;
+ dshowdebug("libAVEnumMediaTypes_Next(%p)\n", this);
+ if (!types)
+ return E_POINTER;
+ if (!this->pos && n == 1) {
+ if (!IsEqualGUID(&this->type.majortype, &GUID_NULL)) {
+ AM_MEDIA_TYPE *type = av_malloc(sizeof(AM_MEDIA_TYPE));
+ if (!type)
+ return E_OUTOFMEMORY;
+ ff_copy_dshow_media_type(type, &this->type);
+ *types = type;
+ count = 1;
+ }
+ this->pos = 1;
+ }
+ if (fetched)
+ *fetched = count;
+ if (!count)
+ return S_FALSE;
+ return S_OK;
+}
+long WINAPI
+libAVEnumMediaTypes_Skip(libAVEnumMediaTypes *this, unsigned long n)
+{
+ dshowdebug("libAVEnumMediaTypes_Skip(%p)\n", this);
+ if (n) /* Any skip will always fall outside of the only valid type. */
+ return S_FALSE;
+ return S_OK;
+}
+long WINAPI
+libAVEnumMediaTypes_Reset(libAVEnumMediaTypes *this)
+{
+ dshowdebug("libAVEnumMediaTypes_Reset(%p)\n", this);
+ this->pos = 0;
+ return S_OK;
+}
+long WINAPI
+libAVEnumMediaTypes_Clone(libAVEnumMediaTypes *this, libAVEnumMediaTypes **enums)
+{
+ libAVEnumMediaTypes *new;
+ dshowdebug("libAVEnumMediaTypes_Clone(%p)\n", this);
+ if (!enums)
+ return E_POINTER;
+ new = libAVEnumMediaTypes_Create(&this->type);
+ if (!new)
+ return E_OUTOFMEMORY;
+ new->pos = this->pos;
+ *enums = new;
+ return S_OK;
+}
+
+static int
+libAVEnumMediaTypes_Setup(libAVEnumMediaTypes *this, const AM_MEDIA_TYPE *type)
+{
+ IEnumMediaTypesVtbl *vtbl = this->vtbl;
+ SETVTBL(vtbl, libAVEnumMediaTypes, QueryInterface);
+ SETVTBL(vtbl, libAVEnumMediaTypes, AddRef);
+ SETVTBL(vtbl, libAVEnumMediaTypes, Release);
+ SETVTBL(vtbl, libAVEnumMediaTypes, Next);
+ SETVTBL(vtbl, libAVEnumMediaTypes, Skip);
+ SETVTBL(vtbl, libAVEnumMediaTypes, Reset);
+ SETVTBL(vtbl, libAVEnumMediaTypes, Clone);
+
+ if (!type) {
+ this->type.majortype = GUID_NULL;
+ } else {
+ ff_copy_dshow_media_type(&this->type, type);
+ }
+
+ return 1;
+}
+DECLARE_CREATE(libAVEnumMediaTypes, libAVEnumMediaTypes_Setup(this, type), const AM_MEDIA_TYPE *type)
+DECLARE_DESTROY(libAVEnumMediaTypes, nothing)
diff --git a/libavdevice/dshow_enumpins.c b/libavdevice/dshow_enumpins.c
new file mode 100644
index 0000000000..e5c11cb54e
--- /dev/null
+++ b/libavdevice/dshow_enumpins.c
@@ -0,0 +1,105 @@
+/*
+ * DirectShow capture interface
+ * Copyright (c) 2010 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "dshow_capture.h"
+
+DECLARE_QUERYINTERFACE(libAVEnumPins,
+ { {&IID_IUnknown,0}, {&IID_IEnumPins,0} })
+DECLARE_ADDREF(libAVEnumPins)
+DECLARE_RELEASE(libAVEnumPins)
+
+long WINAPI
+libAVEnumPins_Next(libAVEnumPins *this, unsigned long n, IPin **pins,
+ unsigned long *fetched)
+{
+ int count = 0;
+ dshowdebug("libAVEnumPins_Next(%p)\n", this);
+ if (!pins)
+ return E_POINTER;
+ if (!this->pos && n == 1) {
+ libAVPin_AddRef(this->pin);
+ *pins = (IPin *) this->pin;
+ count = 1;
+ this->pos = 1;
+ }
+ if (fetched)
+ *fetched = count;
+ if (!count)
+ return S_FALSE;
+ return S_OK;
+}
+long WINAPI
+libAVEnumPins_Skip(libAVEnumPins *this, unsigned long n)
+{
+ dshowdebug("libAVEnumPins_Skip(%p)\n", this);
+ if (n) /* Any skip will always fall outside of the only valid pin. */
+ return S_FALSE;
+ return S_OK;
+}
+long WINAPI
+libAVEnumPins_Reset(libAVEnumPins *this)
+{
+ dshowdebug("libAVEnumPins_Reset(%p)\n", this);
+ this->pos = 0;
+ return S_OK;
+}
+long WINAPI
+libAVEnumPins_Clone(libAVEnumPins *this, libAVEnumPins **pins)
+{
+ libAVEnumPins *new;
+ dshowdebug("libAVEnumPins_Clone(%p)\n", this);
+ if (!pins)
+ return E_POINTER;
+ new = libAVEnumPins_Create(this->pin, this->filter);
+ if (!new)
+ return E_OUTOFMEMORY;
+ new->pos = this->pos;
+ *pins = new;
+ return S_OK;
+}
+
+static int
+libAVEnumPins_Setup(libAVEnumPins *this, libAVPin *pin, libAVFilter *filter)
+{
+ IEnumPinsVtbl *vtbl = this->vtbl;
+ SETVTBL(vtbl, libAVEnumPins, QueryInterface);
+ SETVTBL(vtbl, libAVEnumPins, AddRef);
+ SETVTBL(vtbl, libAVEnumPins, Release);
+ SETVTBL(vtbl, libAVEnumPins, Next);
+ SETVTBL(vtbl, libAVEnumPins, Skip);
+ SETVTBL(vtbl, libAVEnumPins, Reset);
+ SETVTBL(vtbl, libAVEnumPins, Clone);
+
+ this->pin = pin;
+ this->filter = filter;
+ libAVFilter_AddRef(this->filter);
+
+ return 1;
+}
+static int
+libAVEnumPins_Cleanup(libAVEnumPins *this)
+{
+ libAVFilter_Release(this->filter);
+ return 1;
+}
+DECLARE_CREATE(libAVEnumPins, libAVEnumPins_Setup(this, pin, filter),
+ libAVPin *pin, libAVFilter *filter)
+DECLARE_DESTROY(libAVEnumPins, libAVEnumPins_Cleanup)
diff --git a/libavdevice/dshow_filter.c b/libavdevice/dshow_filter.c
new file mode 100644
index 0000000000..7360adcfcd
--- /dev/null
+++ b/libavdevice/dshow_filter.c
@@ -0,0 +1,202 @@
+/*
+ * DirectShow capture interface
+ * Copyright (c) 2010 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "dshow_capture.h"
+
+DECLARE_QUERYINTERFACE(libAVFilter,
+ { {&IID_IUnknown,0}, {&IID_IBaseFilter,0} })
+DECLARE_ADDREF(libAVFilter)
+DECLARE_RELEASE(libAVFilter)
+
+long WINAPI
+libAVFilter_GetClassID(libAVFilter *this, CLSID *id)
+{
+ dshowdebug("libAVFilter_GetClassID(%p)\n", this);
+ /* I'm not creating a ClassID just for this. */
+ return E_FAIL;
+}
+long WINAPI
+libAVFilter_Stop(libAVFilter *this)
+{
+ dshowdebug("libAVFilter_Stop(%p)\n", this);
+ this->state = State_Stopped;
+ return S_OK;
+}
+long WINAPI
+libAVFilter_Pause(libAVFilter *this)
+{
+ dshowdebug("libAVFilter_Pause(%p)\n", this);
+ this->state = State_Paused;
+ return S_OK;
+}
+long WINAPI
+libAVFilter_Run(libAVFilter *this, REFERENCE_TIME start)
+{
+ dshowdebug("libAVFilter_Run(%p) %"PRId64"\n", this, start);
+ this->state = State_Running;
+ this->start_time = start;
+ return S_OK;
+}
+long WINAPI
+libAVFilter_GetState(libAVFilter *this, DWORD ms, FILTER_STATE *state)
+{
+ dshowdebug("libAVFilter_GetState(%p)\n", this);
+ if (!state)
+ return E_POINTER;
+ *state = this->state;
+ return S_OK;
+}
+long WINAPI
+libAVFilter_SetSyncSource(libAVFilter *this, IReferenceClock *clock)
+{
+ dshowdebug("libAVFilter_SetSyncSource(%p)\n", this);
+
+ if (this->clock != clock) {
+ if (this->clock)
+ IReferenceClock_Release(this->clock);
+ this->clock = clock;
+ if (clock)
+ IReferenceClock_AddRef(clock);
+ }
+
+ return S_OK;
+}
+long WINAPI
+libAVFilter_GetSyncSource(libAVFilter *this, IReferenceClock **clock)
+{
+ dshowdebug("libAVFilter_GetSyncSource(%p)\n", this);
+
+ if (!clock)
+ return E_POINTER;
+ if (this->clock)
+ IReferenceClock_AddRef(this->clock);
+ *clock = this->clock;
+
+ return S_OK;
+}
+long WINAPI
+libAVFilter_EnumPins(libAVFilter *this, IEnumPins **enumpin)
+{
+ libAVEnumPins *new;
+ dshowdebug("libAVFilter_EnumPins(%p)\n", this);
+
+ if (!enumpin)
+ return E_POINTER;
+ new = libAVEnumPins_Create(this->pin, this);
+ if (!new)
+ return E_OUTOFMEMORY;
+
+ *enumpin = (IEnumPins *) new;
+ return S_OK;
+}
+long WINAPI
+libAVFilter_FindPin(libAVFilter *this, const wchar_t *id, IPin **pin)
+{
+ libAVPin *found = NULL;
+ dshowdebug("libAVFilter_FindPin(%p)\n", this);
+
+ if (!id || !pin)
+ return E_POINTER;
+ if (!wcscmp(id, L"In")) {
+ found = this->pin;
+ libAVPin_AddRef(found);
+ }
+ *pin = (IPin *) found;
+ if (!found)
+ return VFW_E_NOT_FOUND;
+
+ return S_OK;
+}
+long WINAPI
+libAVFilter_QueryFilterInfo(libAVFilter *this, FILTER_INFO *info)
+{
+ dshowdebug("libAVFilter_QueryFilterInfo(%p)\n", this);
+
+ if (!info)
+ return E_POINTER;
+ if (this->info.pGraph)
+ IFilterGraph_AddRef(this->info.pGraph);
+ *info = this->info;
+
+ return S_OK;
+}
+long WINAPI
+libAVFilter_JoinFilterGraph(libAVFilter *this, IFilterGraph *graph,
+ const wchar_t *name)
+{
+ dshowdebug("libAVFilter_JoinFilterGraph(%p)\n", this);
+
+ this->info.pGraph = graph;
+ if (name)
+ wcscpy(this->info.achName, name);
+
+ return S_OK;
+}
+long WINAPI
+libAVFilter_QueryVendorInfo(libAVFilter *this, wchar_t **info)
+{
+ dshowdebug("libAVFilter_QueryVendorInfo(%p)\n", this);
+
+ if (!info)
+ return E_POINTER;
+ *info = wcsdup(L"libAV");
+
+ return S_OK;
+}
+
+static int
+libAVFilter_Setup(libAVFilter *this, void *priv_data, void *callback,
+ enum dshowDeviceType type)
+{
+ IBaseFilterVtbl *vtbl = this->vtbl;
+ SETVTBL(vtbl, libAVFilter, QueryInterface);
+ SETVTBL(vtbl, libAVFilter, AddRef);
+ SETVTBL(vtbl, libAVFilter, Release);
+ SETVTBL(vtbl, libAVFilter, GetClassID);
+ SETVTBL(vtbl, libAVFilter, Stop);
+ SETVTBL(vtbl, libAVFilter, Pause);
+ SETVTBL(vtbl, libAVFilter, Run);
+ SETVTBL(vtbl, libAVFilter, GetState);
+ SETVTBL(vtbl, libAVFilter, SetSyncSource);
+ SETVTBL(vtbl, libAVFilter, GetSyncSource);
+ SETVTBL(vtbl, libAVFilter, EnumPins);
+ SETVTBL(vtbl, libAVFilter, FindPin);
+ SETVTBL(vtbl, libAVFilter, QueryFilterInfo);
+ SETVTBL(vtbl, libAVFilter, JoinFilterGraph);
+ SETVTBL(vtbl, libAVFilter, QueryVendorInfo);
+
+ this->pin = libAVPin_Create(this);
+
+ this->priv_data = priv_data;
+ this->callback = callback;
+ this->type = type;
+
+ return 1;
+}
+static int
+libAVFilter_Cleanup(libAVFilter *this)
+{
+ libAVPin_Release(this->pin);
+ return 1;
+}
+DECLARE_CREATE(libAVFilter, libAVFilter_Setup(this, priv_data, callback, type),
+ void *priv_data, void *callback, enum dshowDeviceType type)
+DECLARE_DESTROY(libAVFilter, libAVFilter_Cleanup)
diff --git a/libavdevice/dshow_pin.c b/libavdevice/dshow_pin.c
new file mode 100644
index 0000000000..664246da92
--- /dev/null
+++ b/libavdevice/dshow_pin.c
@@ -0,0 +1,384 @@
+/*
+ * DirectShow capture interface
+ * Copyright (c) 2010 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "dshow_capture.h"
+
+#include <stddef.h>
+#define imemoffset offsetof(libAVPin, imemvtbl)
+
+DECLARE_QUERYINTERFACE(libAVPin,
+ { {&IID_IUnknown,0}, {&IID_IPin,0}, {&IID_IMemInputPin,imemoffset} })
+DECLARE_ADDREF(libAVPin)
+DECLARE_RELEASE(libAVPin)
+
+long WINAPI
+libAVPin_Connect(libAVPin *this, IPin *pin, const AM_MEDIA_TYPE *type)
+{
+ dshowdebug("libAVPin_Connect(%p, %p, %p)\n", this, pin, type);
+ /* Input pins receive connections. */
+ return S_FALSE;
+}
+long WINAPI
+libAVPin_ReceiveConnection(libAVPin *this, IPin *pin,
+ const AM_MEDIA_TYPE *type)
+{
+ enum dshowDeviceType devtype = this->filter->type;
+ dshowdebug("libAVPin_ReceiveConnection(%p)\n", this);
+
+ if (!pin)
+ return E_POINTER;
+ if (this->connectedto)
+ return VFW_E_ALREADY_CONNECTED;
+
+ ff_print_AM_MEDIA_TYPE(type);
+ if (devtype == VideoDevice) {
+ if (!IsEqualGUID(&type->majortype, &MEDIATYPE_Video))
+ return VFW_E_TYPE_NOT_ACCEPTED;
+ } else {
+ if (!IsEqualGUID(&type->majortype, &MEDIATYPE_Audio))
+ return VFW_E_TYPE_NOT_ACCEPTED;
+ }
+
+ IPin_AddRef(pin);
+ this->connectedto = pin;
+
+ ff_copy_dshow_media_type(&this->type, type);
+
+ return S_OK;
+}
+long WINAPI
+libAVPin_Disconnect(libAVPin *this)
+{
+ dshowdebug("libAVPin_Disconnect(%p)\n", this);
+
+ if (this->filter->state != State_Stopped)
+ return VFW_E_NOT_STOPPED;
+ if (!this->connectedto)
+ return S_FALSE;
+ IPin_Release(this->connectedto);
+ this->connectedto = NULL;
+
+ return S_OK;
+}
+long WINAPI
+libAVPin_ConnectedTo(libAVPin *this, IPin **pin)
+{
+ dshowdebug("libAVPin_ConnectedTo(%p)\n", this);
+
+ if (!pin)
+ return E_POINTER;
+ if (!this->connectedto)
+ return VFW_E_NOT_CONNECTED;
+ IPin_AddRef(this->connectedto);
+ *pin = this->connectedto;
+
+ return S_OK;
+}
+long WINAPI
+libAVPin_ConnectionMediaType(libAVPin *this, AM_MEDIA_TYPE *type)
+{
+ dshowdebug("libAVPin_ConnectionMediaType(%p)\n", this);
+
+ if (!type)
+ return E_POINTER;
+ if (!this->connectedto)
+ return VFW_E_NOT_CONNECTED;
+
+ return ff_copy_dshow_media_type(type, &this->type);
+}
+long WINAPI
+libAVPin_QueryPinInfo(libAVPin *this, PIN_INFO *info)
+{
+ dshowdebug("libAVPin_QueryPinInfo(%p)\n", this);
+
+ if (!info)
+ return E_POINTER;
+
+ if (this->filter)
+ libAVFilter_AddRef(this->filter);
+
+ info->pFilter = (IBaseFilter *) this->filter;
+ info->dir = PINDIR_INPUT;
+ wcscpy(info->achName, L"Capture");
+
+ return S_OK;
+}
+long WINAPI
+libAVPin_QueryDirection(libAVPin *this, PIN_DIRECTION *dir)
+{
+ dshowdebug("libAVPin_QueryDirection(%p)\n", this);
+ if (!dir)
+ return E_POINTER;
+ *dir = PINDIR_INPUT;
+ return S_OK;
+}
+long WINAPI
+libAVPin_QueryId(libAVPin *this, wchar_t **id)
+{
+ dshowdebug("libAVPin_QueryId(%p)\n", this);
+
+ if (!id)
+ return E_POINTER;
+
+ *id = wcsdup(L"libAV Pin");
+
+ return S_OK;
+}
+long WINAPI
+libAVPin_QueryAccept(libAVPin *this, const AM_MEDIA_TYPE *type)
+{
+ dshowdebug("libAVPin_QueryAccept(%p)\n", this);
+ return S_FALSE;
+}
+long WINAPI
+libAVPin_EnumMediaTypes(libAVPin *this, IEnumMediaTypes **enumtypes)
+{
+ const AM_MEDIA_TYPE *type = NULL;
+ libAVEnumMediaTypes *new;
+ dshowdebug("libAVPin_EnumMediaTypes(%p)\n", this);
+
+ if (!enumtypes)
+ return E_POINTER;
+ new = libAVEnumMediaTypes_Create(type);
+ if (!new)
+ return E_OUTOFMEMORY;
+
+ *enumtypes = (IEnumMediaTypes *) new;
+ return S_OK;
+}
+long WINAPI
+libAVPin_QueryInternalConnections(libAVPin *this, IPin **pin,
+ unsigned long *npin)
+{
+ dshowdebug("libAVPin_QueryInternalConnections(%p)\n", this);
+ return E_NOTIMPL;
+}
+long WINAPI
+libAVPin_EndOfStream(libAVPin *this)
+{
+ dshowdebug("libAVPin_EndOfStream(%p)\n", this);
+ /* I don't care. */
+ return S_OK;
+}
+long WINAPI
+libAVPin_BeginFlush(libAVPin *this)
+{
+ dshowdebug("libAVPin_BeginFlush(%p)\n", this);
+ /* I don't care. */
+ return S_OK;
+}
+long WINAPI
+libAVPin_EndFlush(libAVPin *this)
+{
+ dshowdebug("libAVPin_EndFlush(%p)\n", this);
+ /* I don't care. */
+ return S_OK;
+}
+long WINAPI
+libAVPin_NewSegment(libAVPin *this, REFERENCE_TIME start, REFERENCE_TIME stop,
+ double rate)
+{
+ dshowdebug("libAVPin_NewSegment(%p)\n", this);
+ /* I don't care. */
+ return S_OK;
+}
+
+static int
+libAVPin_Setup(libAVPin *this, libAVFilter *filter)
+{
+ IPinVtbl *vtbl = this->vtbl;
+ IMemInputPinVtbl *imemvtbl;
+
+ if (!filter)
+ return 0;
+
+ imemvtbl = av_malloc(sizeof(IMemInputPinVtbl));
+ if (!imemvtbl)
+ return 0;
+
+ SETVTBL(imemvtbl, libAVMemInputPin, QueryInterface);
+ SETVTBL(imemvtbl, libAVMemInputPin, AddRef);
+ SETVTBL(imemvtbl, libAVMemInputPin, Release);
+ SETVTBL(imemvtbl, libAVMemInputPin, GetAllocator);
+ SETVTBL(imemvtbl, libAVMemInputPin, NotifyAllocator);
+ SETVTBL(imemvtbl, libAVMemInputPin, GetAllocatorRequirements);
+ SETVTBL(imemvtbl, libAVMemInputPin, Receive);
+ SETVTBL(imemvtbl, libAVMemInputPin, ReceiveMultiple);
+ SETVTBL(imemvtbl, libAVMemInputPin, ReceiveCanBlock);
+
+ this->imemvtbl = imemvtbl;
+
+ SETVTBL(vtbl, libAVPin, QueryInterface);
+ SETVTBL(vtbl, libAVPin, AddRef);
+ SETVTBL(vtbl, libAVPin, Release);
+ SETVTBL(vtbl, libAVPin, Connect);
+ SETVTBL(vtbl, libAVPin, ReceiveConnection);
+ SETVTBL(vtbl, libAVPin, Disconnect);
+ SETVTBL(vtbl, libAVPin, ConnectedTo);
+ SETVTBL(vtbl, libAVPin, ConnectionMediaType);
+ SETVTBL(vtbl, libAVPin, QueryPinInfo);
+ SETVTBL(vtbl, libAVPin, QueryDirection);
+ SETVTBL(vtbl, libAVPin, QueryId);
+ SETVTBL(vtbl, libAVPin, QueryAccept);
+ SETVTBL(vtbl, libAVPin, EnumMediaTypes);
+ SETVTBL(vtbl, libAVPin, QueryInternalConnections);
+ SETVTBL(vtbl, libAVPin, EndOfStream);
+ SETVTBL(vtbl, libAVPin, BeginFlush);
+ SETVTBL(vtbl, libAVPin, EndFlush);
+ SETVTBL(vtbl, libAVPin, NewSegment);
+
+ this->filter = filter;
+
+ return 1;
+}
+DECLARE_CREATE(libAVPin, libAVPin_Setup(this, filter), libAVFilter *filter)
+DECLARE_DESTROY(libAVPin, nothing)
+
+/*****************************************************************************
+ * libAVMemInputPin
+ ****************************************************************************/
+long WINAPI
+libAVMemInputPin_QueryInterface(libAVMemInputPin *this, const GUID *riid,
+ void **ppvObject)
+{
+ libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
+ dshowdebug("libAVMemInputPin_QueryInterface(%p)\n", this);
+ return libAVPin_QueryInterface(pin, riid, ppvObject);
+}
+unsigned long WINAPI
+libAVMemInputPin_AddRef(libAVMemInputPin *this)
+{
+ libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
+ dshowdebug("libAVMemInputPin_AddRef(%p)\n", this);
+ return libAVPin_AddRef(pin);
+}
+unsigned long WINAPI
+libAVMemInputPin_Release(libAVMemInputPin *this)
+{
+ libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
+ dshowdebug("libAVMemInputPin_Release(%p)\n", this);
+ return libAVPin_Release(pin);
+}
+long WINAPI
+libAVMemInputPin_GetAllocator(libAVMemInputPin *this, IMemAllocator **alloc)
+{
+ dshowdebug("libAVMemInputPin_GetAllocator(%p)\n", this);
+ return VFW_E_NO_ALLOCATOR;
+}
+long WINAPI
+libAVMemInputPin_NotifyAllocator(libAVMemInputPin *this, IMemAllocator *alloc,
+ BOOL rdwr)
+{
+ dshowdebug("libAVMemInputPin_NotifyAllocator(%p)\n", this);
+ return S_OK;
+}
+long WINAPI
+libAVMemInputPin_GetAllocatorRequirements(libAVMemInputPin *this,
+ ALLOCATOR_PROPERTIES *props)
+{
+ dshowdebug("libAVMemInputPin_GetAllocatorRequirements(%p)\n", this);
+ return E_NOTIMPL;
+}
+long WINAPI
+libAVMemInputPin_Receive(libAVMemInputPin *this, IMediaSample *sample)
+{
+ libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
+ enum dshowDeviceType devtype = pin->filter->type;
+ void *priv_data;
+ AVFormatContext *s;
+ uint8_t *buf;
+ int buf_size; /* todo should be a long? */
+ int index;
+ int64_t curtime;
+ int64_t orig_curtime;
+ int64_t graphtime;
+ const char *devtypename = (devtype == VideoDevice) ? "video" : "audio";
+ IReferenceClock *clock = pin->filter->clock;
+ int64_t dummy;
+ struct dshow_ctx *ctx;
+
+
+ dshowdebug("libAVMemInputPin_Receive(%p)\n", this);
+
+ if (!sample)
+ return E_POINTER;
+
+ IMediaSample_GetTime(sample, &orig_curtime, &dummy);
+ orig_curtime += pin->filter->start_time;
+ IReferenceClock_GetTime(clock, &graphtime);
+ if (devtype == VideoDevice) {
+ /* PTS from video devices is unreliable. */
+ IReferenceClock_GetTime(clock, &curtime);
+ } else {
+ IMediaSample_GetTime(sample, &curtime, &dummy);
+ if(curtime > 400000000000000000LL) {
+ /* initial frames sometimes start < 0 (shown as a very large number here,
+ like 437650244077016960 which FFmpeg doesn't like.
+ TODO figure out math. For now just drop them. */
+ av_log(NULL, AV_LOG_DEBUG,
+ "dshow dropping initial (or ending) audio frame with odd PTS too high %"PRId64"\n", curtime);
+ return S_OK;
+ }
+ curtime += pin->filter->start_time;
+ }
+
+ buf_size = IMediaSample_GetActualDataLength(sample);
+ IMediaSample_GetPointer(sample, &buf);
+ priv_data = pin->filter->priv_data;
+ s = priv_data;
+ ctx = s->priv_data;
+ index = pin->filter->stream_index;
+
+ av_log(NULL, AV_LOG_VERBOSE, "dshow passing through packet of type %s size %8d "
+ "timestamp %"PRId64" orig timestamp %"PRId64" graph timestamp %"PRId64" diff %"PRId64" %s\n",
+ devtypename, buf_size, curtime, orig_curtime, graphtime, graphtime - orig_curtime, ctx->device_name[devtype]);
+ pin->filter->callback(priv_data, index, buf, buf_size, curtime, devtype);
+
+ return S_OK;
+}
+long WINAPI
+libAVMemInputPin_ReceiveMultiple(libAVMemInputPin *this,
+ IMediaSample **samples, long n, long *nproc)
+{
+ int i;
+ dshowdebug("libAVMemInputPin_ReceiveMultiple(%p)\n", this);
+
+ for (i = 0; i < n; i++)
+ libAVMemInputPin_Receive(this, samples[i]);
+
+ *nproc = n;
+ return S_OK;
+}
+long WINAPI
+libAVMemInputPin_ReceiveCanBlock(libAVMemInputPin *this)
+{
+ dshowdebug("libAVMemInputPin_ReceiveCanBlock(%p)\n", this);
+ /* I swear I will not block. */
+ return S_FALSE;
+}
+
+void
+libAVMemInputPin_Destroy(libAVMemInputPin *this)
+{
+ libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
+ dshowdebug("libAVMemInputPin_Destroy(%p)\n", this);
+ libAVPin_Destroy(pin);
+}
diff --git a/libavdevice/dv1394.c b/libavdevice/dv1394.c
index addf1ade67..9f02780e72 100644
--- a/libavdevice/dv1394.c
+++ b/libavdevice/dv1394.c
@@ -2,20 +2,20 @@
* Linux DV1394 interface
* Copyright (c) 2003 Max Krasnyansky <maxk@qualcomm.com>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -30,7 +30,7 @@
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
-#include "libavformat/avformat.h"
+#include "avdevice.h"
#include "libavformat/dv.h"
#include "dv1394.h"
@@ -186,7 +186,7 @@ restart_poll:
size = avpriv_dv_produce_packet(dv->dv_demux, pkt,
dv->ring + (dv->index * DV1394_PAL_FRAME_SIZE),
- DV1394_PAL_FRAME_SIZE);
+ DV1394_PAL_FRAME_SIZE, -1);
dv->index = (dv->index + 1) % DV1394_RING_FRAMES;
dv->done++; dv->avail--;
@@ -206,7 +206,7 @@ static int dv1394_close(AVFormatContext * context)
av_log(context, AV_LOG_ERROR, "Failed to munmap DV1394 ring buffer: %s\n", strerror(errno));
close(dv->fd);
- av_free(dv->dv_demux);
+ av_freep(&dv->dv_demux);
return 0;
}
@@ -224,6 +224,7 @@ static const AVClass dv1394_class = {
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
AVInputFormat ff_dv1394_demuxer = {
diff --git a/libavdevice/dv1394.h b/libavdevice/dv1394.h
index 9710ff56ea..b76d633ef6 100644
--- a/libavdevice/dv1394.h
+++ b/libavdevice/dv1394.h
@@ -8,20 +8,20 @@
* Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
* Peter Schlaile <udbz@rz.uni-karlsruhe.de>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavdevice/fbdev_common.c b/libavdevice/fbdev_common.c
new file mode 100644
index 0000000000..91bd8e1a91
--- /dev/null
+++ b/libavdevice/fbdev_common.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2009 Giliard B. de Freitas <giliarde@gmail.com>
+ * Copyright (C) 2002 Gunnar Monell <gmo@linux.nu>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <stdlib.h>
+#include "fbdev_common.h"
+#include "libavutil/common.h"
+#include "avdevice.h"
+
+struct rgb_pixfmt_map_entry {
+ int bits_per_pixel;
+ int red_offset, green_offset, blue_offset, alpha_offset;
+ enum AVPixelFormat pixfmt;
+};
+
+static const struct rgb_pixfmt_map_entry rgb_pixfmt_map[] = {
+ // bpp, red_offset, green_offset, blue_offset, alpha_offset, pixfmt
+ { 32, 0, 8, 16, 24, AV_PIX_FMT_RGBA },
+ { 32, 16, 8, 0, 24, AV_PIX_FMT_BGRA },
+ { 32, 8, 16, 24, 0, AV_PIX_FMT_ARGB },
+ { 32, 3, 2, 8, 0, AV_PIX_FMT_ABGR },
+ { 24, 0, 8, 16, 0, AV_PIX_FMT_RGB24 },
+ { 24, 16, 8, 0, 0, AV_PIX_FMT_BGR24 },
+ { 16, 11, 5, 0, 0, AV_PIX_FMT_RGB565 },
+};
+
+enum AVPixelFormat ff_get_pixfmt_from_fb_varinfo(struct fb_var_screeninfo *varinfo)
+{
+ int i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(rgb_pixfmt_map); i++) {
+ const struct rgb_pixfmt_map_entry *entry = &rgb_pixfmt_map[i];
+ if (entry->bits_per_pixel == varinfo->bits_per_pixel &&
+ entry->red_offset == varinfo->red.offset &&
+ entry->green_offset == varinfo->green.offset &&
+ entry->blue_offset == varinfo->blue.offset)
+ return entry->pixfmt;
+ }
+
+ return AV_PIX_FMT_NONE;
+}
+
+const char* ff_fbdev_default_device()
+{
+ const char *dev = getenv("FRAMEBUFFER");
+ if (!dev)
+ dev = "/dev/fb0";
+ return dev;
+}
+
+int ff_fbdev_get_device_list(AVDeviceInfoList *device_list)
+{
+ struct fb_var_screeninfo varinfo;
+ struct fb_fix_screeninfo fixinfo;
+ char device_file[12];
+ AVDeviceInfo *device = NULL;
+ int i, fd, ret = 0;
+ const char *default_device = ff_fbdev_default_device();
+
+ if (!device_list)
+ return AVERROR(EINVAL);
+
+ for (i = 0; i <= 31; i++) {
+ snprintf(device_file, sizeof(device_file), "/dev/fb%d", i);
+
+ if ((fd = avpriv_open(device_file, O_RDWR)) < 0) {
+ int err = AVERROR(errno);
+ if (err != AVERROR(ENOENT))
+ av_log(NULL, AV_LOG_ERROR, "Could not open framebuffer device '%s': %s\n",
+ device_file, av_err2str(err));
+ continue;
+ }
+ if (ioctl(fd, FBIOGET_VSCREENINFO, &varinfo) == -1)
+ goto fail_device;
+ if (ioctl(fd, FBIOGET_FSCREENINFO, &fixinfo) == -1)
+ goto fail_device;
+
+ device = av_mallocz(sizeof(AVDeviceInfo));
+ if (!device) {
+ ret = AVERROR(ENOMEM);
+ goto fail_device;
+ }
+ device->device_name = av_strdup(device_file);
+ device->device_description = av_strdup(fixinfo.id);
+ if (!device->device_name || !device->device_description) {
+ ret = AVERROR(ENOMEM);
+ goto fail_device;
+ }
+
+ if ((ret = av_dynarray_add_nofree(&device_list->devices,
+ &device_list->nb_devices, device)) < 0)
+ goto fail_device;
+
+ if (default_device && !strcmp(device->device_name, default_device)) {
+ device_list->default_device = device_list->nb_devices - 1;
+ default_device = NULL;
+ }
+ close(fd);
+ continue;
+
+ fail_device:
+ if (device) {
+ av_freep(&device->device_name);
+ av_freep(&device->device_description);
+ av_freep(&device);
+ }
+ if (fd >= 0)
+ close(fd);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
diff --git a/libavdevice/fbdev_common.h b/libavdevice/fbdev_common.h
new file mode 100644
index 0000000000..7b81a8daeb
--- /dev/null
+++ b/libavdevice/fbdev_common.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2009 Giliard B. de Freitas <giliarde@gmail.com>
+ * Copyright (C) 2002 Gunnar Monell <gmo@linux.nu>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVDEVICE_FBDEV_COMMON_H
+#define AVDEVICE_FBDEV_COMMON_H
+
+#include <features.h>
+#include <linux/fb.h>
+#include "libavutil/pixfmt.h"
+
+struct AVDeviceInfoList;
+
+enum AVPixelFormat ff_get_pixfmt_from_fb_varinfo(struct fb_var_screeninfo *varinfo);
+
+const char* ff_fbdev_default_device(void);
+
+int ff_fbdev_get_device_list(struct AVDeviceInfoList *device_list);
+
+#endif /* AVDEVICE_FBDEV_COMMON_H */
diff --git a/libavdevice/fbdev.c b/libavdevice/fbdev_dec.c
index 16469c56d3..1505b2557d 100644
--- a/libavdevice/fbdev.c
+++ b/libavdevice/fbdev_dec.c
@@ -3,20 +3,20 @@
* Copyright (c) 2009 Giliard B. de Freitas <giliarde@gmail.com>
* Copyright (C) 2002 Gunnar Monell <gmo@linux.nu>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -41,47 +41,14 @@
#include "libavutil/time.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
-#include "libavformat/avformat.h"
#include "libavformat/internal.h"
-
-struct rgb_pixfmt_map_entry {
- int bits_per_pixel;
- int red_offset, green_offset, blue_offset, alpha_offset;
- enum AVPixelFormat pixfmt;
-};
-
-static struct rgb_pixfmt_map_entry rgb_pixfmt_map[] = {
- // bpp, red_offset, green_offset, blue_offset, alpha_offset, pixfmt
- { 32, 0, 8, 16, 24, AV_PIX_FMT_RGBA },
- { 32, 16, 8, 0, 24, AV_PIX_FMT_BGRA },
- { 32, 8, 16, 24, 0, AV_PIX_FMT_ARGB },
- { 32, 3, 2, 8, 0, AV_PIX_FMT_ABGR },
- { 24, 0, 8, 16, 0, AV_PIX_FMT_RGB24 },
- { 24, 16, 8, 0, 0, AV_PIX_FMT_BGR24 },
- { 16, 11, 5, 0, 0, AV_PIX_FMT_RGB565 },
-};
-
-static enum AVPixelFormat get_pixfmt_from_fb_varinfo(struct fb_var_screeninfo *varinfo)
-{
- int i;
-
- for (i = 0; i < FF_ARRAY_ELEMS(rgb_pixfmt_map); i++) {
- struct rgb_pixfmt_map_entry *entry = &rgb_pixfmt_map[i];
- if (entry->bits_per_pixel == varinfo->bits_per_pixel &&
- entry->red_offset == varinfo->red.offset &&
- entry->green_offset == varinfo->green.offset &&
- entry->blue_offset == varinfo->blue.offset)
- return entry->pixfmt;
- }
-
- return AV_PIX_FMT_NONE;
-}
+#include "avdevice.h"
+#include "fbdev_common.h"
typedef struct FBDevContext {
AVClass *class; ///< class for private options
int frame_size; ///< size in bytes of a grabbed frame
AVRational framerate_q; ///< framerate
- char *framerate; ///< framerate string set by a private option
int64_t time_frame; ///< time for the next frame to output (in 1/1000000 units)
int fd; ///< framebuffer device file descriptor
@@ -101,13 +68,7 @@ static av_cold int fbdev_read_header(AVFormatContext *avctx)
AVStream *st = NULL;
enum AVPixelFormat pix_fmt;
int ret, flags = O_RDONLY;
- char errbuf[128];
-
- ret = av_parse_video_rate(&fbdev->framerate_q, fbdev->framerate);
- if (ret < 0) {
- av_log(avctx, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", fbdev->framerate);
- return ret;
- }
+ const char* device;
if (!(st = avformat_new_stream(avctx, NULL)))
return AVERROR(ENOMEM);
@@ -117,32 +78,34 @@ static av_cold int fbdev_read_header(AVFormatContext *avctx)
if (avctx->flags & AVFMT_FLAG_NONBLOCK)
flags |= O_NONBLOCK;
- if ((fbdev->fd = avpriv_open(avctx->filename, flags)) == -1) {
+ if (avctx->filename[0])
+ device = avctx->filename;
+ else
+ device = ff_fbdev_default_device();
+
+ if ((fbdev->fd = avpriv_open(device, flags)) == -1) {
ret = AVERROR(errno);
- av_strerror(ret, errbuf, sizeof(errbuf));
av_log(avctx, AV_LOG_ERROR,
"Could not open framebuffer device '%s': %s\n",
- avctx->filename, errbuf);
+ device, av_err2str(ret));
return ret;
}
if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0) {
ret = AVERROR(errno);
- av_strerror(ret, errbuf, sizeof(errbuf));
av_log(avctx, AV_LOG_ERROR,
- "FBIOGET_VSCREENINFO: %s\n", errbuf);
+ "FBIOGET_VSCREENINFO: %s\n", av_err2str(ret));
goto fail;
}
if (ioctl(fbdev->fd, FBIOGET_FSCREENINFO, &fbdev->fixinfo) < 0) {
ret = AVERROR(errno);
- av_strerror(ret, errbuf, sizeof(errbuf));
av_log(avctx, AV_LOG_ERROR,
- "FBIOGET_FSCREENINFO: %s\n", errbuf);
+ "FBIOGET_FSCREENINFO: %s\n", av_err2str(ret));
goto fail;
}
- pix_fmt = get_pixfmt_from_fb_varinfo(&fbdev->varinfo);
+ pix_fmt = ff_get_pixfmt_from_fb_varinfo(&fbdev->varinfo);
if (pix_fmt == AV_PIX_FMT_NONE) {
ret = AVERROR(EINVAL);
av_log(avctx, AV_LOG_ERROR,
@@ -159,8 +122,7 @@ static av_cold int fbdev_read_header(AVFormatContext *avctx)
fbdev->data = mmap(NULL, fbdev->fixinfo.smem_len, PROT_READ, MAP_SHARED, fbdev->fd, 0);
if (fbdev->data == MAP_FAILED) {
ret = AVERROR(errno);
- av_strerror(ret, errbuf, sizeof(errbuf));
- av_log(avctx, AV_LOG_ERROR, "Error in mmap(): %s\n", errbuf);
+ av_log(avctx, AV_LOG_ERROR, "Error in mmap(): %s\n", av_err2str(ret));
goto fail;
}
@@ -169,16 +131,16 @@ static av_cold int fbdev_read_header(AVFormatContext *avctx)
st->codecpar->width = fbdev->width;
st->codecpar->height = fbdev->height;
st->codecpar->format = pix_fmt;
+ st->avg_frame_rate = fbdev->framerate_q;
st->codecpar->bit_rate =
fbdev->width * fbdev->height * fbdev->bytes_per_pixel * av_q2d(fbdev->framerate_q) * 8;
- st->avg_frame_rate = fbdev->framerate_q;
av_log(avctx, AV_LOG_INFO,
- "w:%d h:%d bpp:%d pixfmt:%s fps:%d/%d bit_rate:%d\n",
+ "w:%d h:%d bpp:%d pixfmt:%s fps:%d/%d bit_rate:%"PRId64"\n",
fbdev->width, fbdev->height, fbdev->varinfo.bits_per_pixel,
av_get_pix_fmt_name(pix_fmt),
fbdev->framerate_q.num, fbdev->framerate_q.den,
- st->codecpar->bit_rate);
+ (int64_t)st->codecpar->bit_rate);
return 0;
fail:
@@ -198,30 +160,30 @@ static int fbdev_read_packet(AVFormatContext *avctx, AVPacket *pkt)
fbdev->time_frame = av_gettime();
/* wait based on the frame rate */
- curtime = av_gettime();
- delay = fbdev->time_frame - curtime;
- av_log(avctx, AV_LOG_TRACE,
- "time_frame:%"PRId64" curtime:%"PRId64" delay:%"PRId64"\n",
- fbdev->time_frame, curtime, delay);
- if (delay > 0) {
+ while (1) {
+ curtime = av_gettime();
+ delay = fbdev->time_frame - curtime;
+ av_log(avctx, AV_LOG_TRACE,
+ "time_frame:%"PRId64" curtime:%"PRId64" delay:%"PRId64"\n",
+ fbdev->time_frame, curtime, delay);
+ if (delay <= 0) {
+ fbdev->time_frame += INT64_C(1000000) / av_q2d(fbdev->framerate_q);
+ break;
+ }
if (avctx->flags & AVFMT_FLAG_NONBLOCK)
return AVERROR(EAGAIN);
ts.tv_sec = delay / 1000000;
ts.tv_nsec = (delay % 1000000) * 1000;
while (nanosleep(&ts, &ts) < 0 && errno == EINTR);
}
- /* compute the time of the next frame */
- fbdev->time_frame += INT64_C(1000000) / av_q2d(fbdev->framerate_q);
if ((ret = av_new_packet(pkt, fbdev->frame_size)) < 0)
return ret;
/* refresh fbdev->varinfo, visible data position may change at each call */
if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0) {
- char errbuf[128];
- av_strerror(AVERROR(errno), errbuf, sizeof(errbuf));
av_log(avctx, AV_LOG_WARNING,
- "Error refreshing variable info: %s\n", errbuf);
+ "Error refreshing variable info: %s\n", av_err2str(AVERROR(errno)));
}
pkt->pts = curtime;
@@ -231,7 +193,6 @@ static int fbdev_read_packet(AVFormatContext *avctx, AVPacket *pkt)
fbdev->varinfo.yoffset * fbdev->fixinfo.line_length;
pout = pkt->data;
- // TODO it'd be nice if the lines were aligned
for (i = 0; i < fbdev->height; i++) {
memcpy(pout, pin, fbdev->frame_linesize);
pin += fbdev->fixinfo.line_length;
@@ -245,16 +206,21 @@ static av_cold int fbdev_read_close(AVFormatContext *avctx)
{
FBDevContext *fbdev = avctx->priv_data;
- munmap(fbdev->data, fbdev->frame_size);
+ munmap(fbdev->data, fbdev->fixinfo.smem_len);
close(fbdev->fd);
return 0;
}
+static int fbdev_get_device_list(AVFormatContext *s, AVDeviceInfoList *device_list)
+{
+ return ff_fbdev_get_device_list(device_list);
+}
+
#define OFFSET(x) offsetof(FBDevContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
- { "framerate","", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, DEC },
+ { "framerate","", OFFSET(framerate_q), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, DEC },
{ NULL },
};
@@ -263,6 +229,7 @@ static const AVClass fbdev_class = {
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
AVInputFormat ff_fbdev_demuxer = {
@@ -272,6 +239,7 @@ AVInputFormat ff_fbdev_demuxer = {
.read_header = fbdev_read_header,
.read_packet = fbdev_read_packet,
.read_close = fbdev_read_close,
+ .get_device_list = fbdev_get_device_list,
.flags = AVFMT_NOFILE,
.priv_class = &fbdev_class,
};
diff --git a/libavdevice/fbdev_enc.c b/libavdevice/fbdev_enc.c
new file mode 100644
index 0000000000..b4e5f84975
--- /dev/null
+++ b/libavdevice/fbdev_enc.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2013 Lukasz Marek
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <linux/fb.h>
+#include "libavutil/pixdesc.h"
+#include "libavutil/log.h"
+#include "libavutil/mem.h"
+#include "libavutil/opt.h"
+#include "libavformat/avformat.h"
+#include "fbdev_common.h"
+#include "avdevice.h"
+
+typedef struct {
+ AVClass *class; ///< class for private options
+ int xoffset; ///< x coordinate of top left corner
+ int yoffset; ///< y coordinate of top left corner
+ struct fb_var_screeninfo varinfo; ///< framebuffer variable info
+ struct fb_fix_screeninfo fixinfo; ///< framebuffer fixed info
+ int fd; ///< framebuffer device file descriptor
+ uint8_t *data; ///< framebuffer data
+} FBDevContext;
+
+static av_cold int fbdev_write_header(AVFormatContext *h)
+{
+ FBDevContext *fbdev = h->priv_data;
+ enum AVPixelFormat pix_fmt;
+ int ret, flags = O_RDWR;
+ const char* device;
+
+ if (h->nb_streams != 1 || h->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO) {
+ av_log(fbdev, AV_LOG_ERROR, "Only a single video stream is supported.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (h->filename[0])
+ device = h->filename;
+ else
+ device = ff_fbdev_default_device();
+
+ if ((fbdev->fd = avpriv_open(device, flags)) == -1) {
+ ret = AVERROR(errno);
+ av_log(h, AV_LOG_ERROR,
+ "Could not open framebuffer device '%s': %s\n",
+ device, av_err2str(ret));
+ return ret;
+ }
+
+ if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0) {
+ ret = AVERROR(errno);
+ av_log(h, AV_LOG_ERROR, "FBIOGET_VSCREENINFO: %s\n", av_err2str(ret));
+ goto fail;
+ }
+
+ if (ioctl(fbdev->fd, FBIOGET_FSCREENINFO, &fbdev->fixinfo) < 0) {
+ ret = AVERROR(errno);
+ av_log(h, AV_LOG_ERROR, "FBIOGET_FSCREENINFO: %s\n", av_err2str(ret));
+ goto fail;
+ }
+
+ pix_fmt = ff_get_pixfmt_from_fb_varinfo(&fbdev->varinfo);
+ if (pix_fmt == AV_PIX_FMT_NONE) {
+ ret = AVERROR(EINVAL);
+ av_log(h, AV_LOG_ERROR, "Framebuffer pixel format not supported.\n");
+ goto fail;
+ }
+
+ fbdev->data = mmap(NULL, fbdev->fixinfo.smem_len, PROT_WRITE, MAP_SHARED, fbdev->fd, 0);
+ if (fbdev->data == MAP_FAILED) {
+ ret = AVERROR(errno);
+ av_log(h, AV_LOG_ERROR, "Error in mmap(): %s\n", av_err2str(ret));
+ goto fail;
+ }
+
+ return 0;
+ fail:
+ close(fbdev->fd);
+ return ret;
+}
+
+static int fbdev_write_packet(AVFormatContext *h, AVPacket *pkt)
+{
+ FBDevContext *fbdev = h->priv_data;
+ uint8_t *pin, *pout;
+ enum AVPixelFormat fb_pix_fmt;
+ int disp_height;
+ int bytes_to_copy;
+ AVCodecParameters *par = h->streams[0]->codecpar;
+ enum AVPixelFormat video_pix_fmt = par->format;
+ int video_width = par->width;
+ int video_height = par->height;
+ int bytes_per_pixel = ((par->bits_per_coded_sample + 7) >> 3);
+ int src_line_size = video_width * bytes_per_pixel;
+ int i;
+
+ if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0)
+ av_log(h, AV_LOG_WARNING,
+ "Error refreshing variable info: %s\n", av_err2str(AVERROR(errno)));
+
+ fb_pix_fmt = ff_get_pixfmt_from_fb_varinfo(&fbdev->varinfo);
+
+ if (fb_pix_fmt != video_pix_fmt) {
+ av_log(h, AV_LOG_ERROR, "Pixel format %s is not supported, use %s\n",
+ av_get_pix_fmt_name(video_pix_fmt), av_get_pix_fmt_name(fb_pix_fmt));
+ return AVERROR(EINVAL);
+ }
+
+ disp_height = FFMIN(fbdev->varinfo.yres, video_height);
+ bytes_to_copy = FFMIN(fbdev->varinfo.xres, video_width) * bytes_per_pixel;
+
+ pin = pkt->data;
+ pout = fbdev->data +
+ bytes_per_pixel * fbdev->varinfo.xoffset +
+ fbdev->varinfo.yoffset * fbdev->fixinfo.line_length;
+
+ if (fbdev->xoffset) {
+ if (fbdev->xoffset < 0) {
+ if (-fbdev->xoffset >= video_width) //nothing to display
+ return 0;
+ bytes_to_copy += fbdev->xoffset * bytes_per_pixel;
+ pin -= fbdev->xoffset * bytes_per_pixel;
+ } else {
+ int diff = (video_width + fbdev->xoffset) - fbdev->varinfo.xres;
+ if (diff > 0) {
+ if (diff >= video_width) //nothing to display
+ return 0;
+ bytes_to_copy -= diff * bytes_per_pixel;
+ }
+ pout += bytes_per_pixel * fbdev->xoffset;
+ }
+ }
+
+ if (fbdev->yoffset) {
+ if (fbdev->yoffset < 0) {
+ if (-fbdev->yoffset >= video_height) //nothing to display
+ return 0;
+ disp_height += fbdev->yoffset;
+ pin -= fbdev->yoffset * src_line_size;
+ } else {
+ int diff = (video_height + fbdev->yoffset) - fbdev->varinfo.yres;
+ if (diff > 0) {
+ if (diff >= video_height) //nothing to display
+ return 0;
+ disp_height -= diff;
+ }
+ pout += fbdev->yoffset * fbdev->fixinfo.line_length;
+ }
+ }
+
+ for (i = 0; i < disp_height; i++) {
+ memcpy(pout, pin, bytes_to_copy);
+ pout += fbdev->fixinfo.line_length;
+ pin += src_line_size;
+ }
+
+ return 0;
+}
+
+static av_cold int fbdev_write_trailer(AVFormatContext *h)
+{
+ FBDevContext *fbdev = h->priv_data;
+ munmap(fbdev->data, fbdev->fixinfo.smem_len);
+ close(fbdev->fd);
+ return 0;
+}
+
+static int fbdev_get_device_list(AVFormatContext *s, AVDeviceInfoList *device_list)
+{
+ return ff_fbdev_get_device_list(device_list);
+}
+
+#define OFFSET(x) offsetof(FBDevContext, x)
+#define ENC AV_OPT_FLAG_ENCODING_PARAM
+static const AVOption options[] = {
+ { "xoffset", "set x coordinate of top left corner", OFFSET(xoffset), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, ENC },
+ { "yoffset", "set y coordinate of top left corner", OFFSET(yoffset), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, ENC },
+ { NULL }
+};
+
+static const AVClass fbdev_class = {
+ .class_name = "fbdev outdev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
+};
+
+AVOutputFormat ff_fbdev_muxer = {
+ .name = "fbdev",
+ .long_name = NULL_IF_CONFIG_SMALL("Linux framebuffer"),
+ .priv_data_size = sizeof(FBDevContext),
+ .audio_codec = AV_CODEC_ID_NONE,
+ .video_codec = AV_CODEC_ID_RAWVIDEO,
+ .write_header = fbdev_write_header,
+ .write_packet = fbdev_write_packet,
+ .write_trailer = fbdev_write_trailer,
+ .get_device_list = fbdev_get_device_list,
+ .flags = AVFMT_NOFILE | AVFMT_VARIABLE_FPS | AVFMT_NOTIMESTAMPS,
+ .priv_class = &fbdev_class,
+};
diff --git a/libavdevice/gdigrab.c b/libavdevice/gdigrab.c
new file mode 100644
index 0000000000..4239ffae11
--- /dev/null
+++ b/libavdevice/gdigrab.c
@@ -0,0 +1,649 @@
+/*
+ * GDI video grab interface
+ *
+ * This file is part of FFmpeg.
+ *
+ * Copyright (C) 2013 Calvin Walton <calvin.walton@kepstin.ca>
+ * Copyright (C) 2007-2010 Christophe Gisquet <word1.word2@gmail.com>
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * GDI frame device demuxer
+ * @author Calvin Walton <calvin.walton@kepstin.ca>
+ * @author Christophe Gisquet <word1.word2@gmail.com>
+ */
+
+#include "config.h"
+#include "libavformat/internal.h"
+#include "libavutil/opt.h"
+#include "libavutil/time.h"
+#include <windows.h>
+
+/**
+ * GDI Device Demuxer context
+ */
+struct gdigrab {
+ const AVClass *class; /**< Class for private options */
+
+ int frame_size; /**< Size in bytes of the frame pixel data */
+ int header_size; /**< Size in bytes of the DIB header */
+ AVRational time_base; /**< Time base */
+ int64_t time_frame; /**< Current time */
+
+ int draw_mouse; /**< Draw mouse cursor (private option) */
+ int show_region; /**< Draw border (private option) */
+ AVRational framerate; /**< Capture framerate (private option) */
+ int width; /**< Width of the grab frame (private option) */
+ int height; /**< Height of the grab frame (private option) */
+ int offset_x; /**< Capture x offset (private option) */
+ int offset_y; /**< Capture y offset (private option) */
+
+ HWND hwnd; /**< Handle of the window for the grab */
+ HDC source_hdc; /**< Source device context */
+ HDC dest_hdc; /**< Destination, source-compatible DC */
+ BITMAPINFO bmi; /**< Information describing DIB format */
+ HBITMAP hbmp; /**< Information on the bitmap captured */
+ void *buffer; /**< The buffer containing the bitmap image data */
+ RECT clip_rect; /**< The subarea of the screen or window to clip */
+
+ HWND region_hwnd; /**< Handle of the region border window */
+
+ int cursor_error_printed;
+};
+
+#define WIN32_API_ERROR(str) \
+ av_log(s1, AV_LOG_ERROR, str " (error %li)\n", GetLastError())
+
+#define REGION_WND_BORDER 3
+
+/**
+ * Callback to handle Windows messages for the region outline window.
+ *
+ * In particular, this handles painting the frame rectangle.
+ *
+ * @param hwnd The region outline window handle.
+ * @param msg The Windows message.
+ * @param wparam First Windows message parameter.
+ * @param lparam Second Windows message parameter.
+ * @return 0 success, !0 failure
+ */
+static LRESULT CALLBACK
+gdigrab_region_wnd_proc(HWND hwnd, UINT msg, WPARAM wparam, LPARAM lparam)
+{
+ PAINTSTRUCT ps;
+ HDC hdc;
+ RECT rect;
+
+ switch (msg) {
+ case WM_PAINT:
+ hdc = BeginPaint(hwnd, &ps);
+
+ GetClientRect(hwnd, &rect);
+ FrameRect(hdc, &rect, GetStockObject(BLACK_BRUSH));
+
+ rect.left++; rect.top++; rect.right--; rect.bottom--;
+ FrameRect(hdc, &rect, GetStockObject(WHITE_BRUSH));
+
+ rect.left++; rect.top++; rect.right--; rect.bottom--;
+ FrameRect(hdc, &rect, GetStockObject(BLACK_BRUSH));
+
+ EndPaint(hwnd, &ps);
+ break;
+ default:
+ return DefWindowProc(hwnd, msg, wparam, lparam);
+ }
+ return 0;
+}
+
+/**
+ * Initialize the region outline window.
+ *
+ * @param s1 The format context.
+ * @param gdigrab gdigrab context.
+ * @return 0 success, !0 failure
+ */
+static int
+gdigrab_region_wnd_init(AVFormatContext *s1, struct gdigrab *gdigrab)
+{
+ HWND hwnd;
+ RECT rect = gdigrab->clip_rect;
+ HRGN region = NULL;
+ HRGN region_interior = NULL;
+
+ DWORD style = WS_POPUP | WS_VISIBLE;
+ DWORD ex = WS_EX_TOOLWINDOW | WS_EX_TOPMOST | WS_EX_TRANSPARENT;
+
+ rect.left -= REGION_WND_BORDER; rect.top -= REGION_WND_BORDER;
+ rect.right += REGION_WND_BORDER; rect.bottom += REGION_WND_BORDER;
+
+ AdjustWindowRectEx(&rect, style, FALSE, ex);
+
+ // Create a window with no owner; use WC_DIALOG instead of writing a custom
+ // window class
+ hwnd = CreateWindowEx(ex, WC_DIALOG, NULL, style, rect.left, rect.top,
+ rect.right - rect.left, rect.bottom - rect.top,
+ NULL, NULL, NULL, NULL);
+ if (!hwnd) {
+ WIN32_API_ERROR("Could not create region display window");
+ goto error;
+ }
+
+ // Set the window shape to only include the border area
+ GetClientRect(hwnd, &rect);
+ region = CreateRectRgn(0, 0,
+ rect.right - rect.left, rect.bottom - rect.top);
+ region_interior = CreateRectRgn(REGION_WND_BORDER, REGION_WND_BORDER,
+ rect.right - rect.left - REGION_WND_BORDER,
+ rect.bottom - rect.top - REGION_WND_BORDER);
+ CombineRgn(region, region, region_interior, RGN_DIFF);
+ if (!SetWindowRgn(hwnd, region, FALSE)) {
+ WIN32_API_ERROR("Could not set window region");
+ goto error;
+ }
+ // The "region" memory is now owned by the window
+ region = NULL;
+ DeleteObject(region_interior);
+
+ SetWindowLongPtr(hwnd, GWLP_WNDPROC, (LONG_PTR) gdigrab_region_wnd_proc);
+
+ ShowWindow(hwnd, SW_SHOW);
+
+ gdigrab->region_hwnd = hwnd;
+
+ return 0;
+
+error:
+ if (region)
+ DeleteObject(region);
+ if (region_interior)
+ DeleteObject(region_interior);
+ if (hwnd)
+ DestroyWindow(hwnd);
+ return 1;
+}
+
+/**
+ * Cleanup/free the region outline window.
+ *
+ * @param s1 The format context.
+ * @param gdigrab gdigrab context.
+ */
+static void
+gdigrab_region_wnd_destroy(AVFormatContext *s1, struct gdigrab *gdigrab)
+{
+ if (gdigrab->region_hwnd)
+ DestroyWindow(gdigrab->region_hwnd);
+ gdigrab->region_hwnd = NULL;
+}
+
+/**
+ * Process the Windows message queue.
+ *
+ * This is important to prevent Windows from thinking the window has become
+ * unresponsive. As well, things like WM_PAINT (to actually draw the window
+ * contents) are handled from the message queue context.
+ *
+ * @param s1 The format context.
+ * @param gdigrab gdigrab context.
+ */
+static void
+gdigrab_region_wnd_update(AVFormatContext *s1, struct gdigrab *gdigrab)
+{
+ HWND hwnd = gdigrab->region_hwnd;
+ MSG msg;
+
+ while (PeekMessage(&msg, hwnd, 0, 0, PM_REMOVE)) {
+ DispatchMessage(&msg);
+ }
+}
+
+/**
+ * Initializes the gdi grab device demuxer (public device demuxer API).
+ *
+ * @param s1 Context from avformat core
+ * @return AVERROR_IO error, 0 success
+ */
+static int
+gdigrab_read_header(AVFormatContext *s1)
+{
+ struct gdigrab *gdigrab = s1->priv_data;
+
+ HWND hwnd;
+ HDC source_hdc = NULL;
+ HDC dest_hdc = NULL;
+ BITMAPINFO bmi;
+ HBITMAP hbmp = NULL;
+ void *buffer = NULL;
+
+ const char *filename = s1->filename;
+ const char *name = NULL;
+ AVStream *st = NULL;
+
+ int bpp;
+ int vertres;
+ int desktopvertres;
+ RECT virtual_rect;
+ RECT clip_rect;
+ BITMAP bmp;
+ int ret;
+
+ if (!strncmp(filename, "title=", 6)) {
+ name = filename + 6;
+ hwnd = FindWindow(NULL, name);
+ if (!hwnd) {
+ av_log(s1, AV_LOG_ERROR,
+ "Can't find window '%s', aborting.\n", name);
+ ret = AVERROR(EIO);
+ goto error;
+ }
+ if (gdigrab->show_region) {
+ av_log(s1, AV_LOG_WARNING,
+ "Can't show region when grabbing a window.\n");
+ gdigrab->show_region = 0;
+ }
+ } else if (!strcmp(filename, "desktop")) {
+ hwnd = NULL;
+ } else {
+ av_log(s1, AV_LOG_ERROR,
+ "Please use \"desktop\" or \"title=<windowname>\" to specify your target.\n");
+ ret = AVERROR(EIO);
+ goto error;
+ }
+
+ /* This will get the device context for the selected window, or if
+ * none, the primary screen */
+ source_hdc = GetDC(hwnd);
+ if (!source_hdc) {
+ WIN32_API_ERROR("Couldn't get window device context");
+ ret = AVERROR(EIO);
+ goto error;
+ }
+ bpp = GetDeviceCaps(source_hdc, BITSPIXEL);
+
+ if (hwnd) {
+ GetClientRect(hwnd, &virtual_rect);
+ } else {
+ /* desktop -- get the right height and width for scaling DPI */
+ vertres = GetDeviceCaps(source_hdc, VERTRES);
+ desktopvertres = GetDeviceCaps(source_hdc, DESKTOPVERTRES);
+ virtual_rect.left = GetSystemMetrics(SM_XVIRTUALSCREEN);
+ virtual_rect.top = GetSystemMetrics(SM_YVIRTUALSCREEN);
+ virtual_rect.right = (virtual_rect.left + GetSystemMetrics(SM_CXVIRTUALSCREEN)) * desktopvertres / vertres;
+ virtual_rect.bottom = (virtual_rect.top + GetSystemMetrics(SM_CYVIRTUALSCREEN)) * desktopvertres / vertres;
+ }
+
+ /* If no width or height set, use full screen/window area */
+ if (!gdigrab->width || !gdigrab->height) {
+ clip_rect.left = virtual_rect.left;
+ clip_rect.top = virtual_rect.top;
+ clip_rect.right = virtual_rect.right;
+ clip_rect.bottom = virtual_rect.bottom;
+ } else {
+ clip_rect.left = gdigrab->offset_x;
+ clip_rect.top = gdigrab->offset_y;
+ clip_rect.right = gdigrab->width + gdigrab->offset_x;
+ clip_rect.bottom = gdigrab->height + gdigrab->offset_y;
+ }
+
+ if (clip_rect.left < virtual_rect.left ||
+ clip_rect.top < virtual_rect.top ||
+ clip_rect.right > virtual_rect.right ||
+ clip_rect.bottom > virtual_rect.bottom) {
+ av_log(s1, AV_LOG_ERROR,
+ "Capture area (%li,%li),(%li,%li) extends outside window area (%li,%li),(%li,%li)",
+ clip_rect.left, clip_rect.top,
+ clip_rect.right, clip_rect.bottom,
+ virtual_rect.left, virtual_rect.top,
+ virtual_rect.right, virtual_rect.bottom);
+ ret = AVERROR(EIO);
+ goto error;
+ }
+
+
+ if (name) {
+ av_log(s1, AV_LOG_INFO,
+ "Found window %s, capturing %lix%lix%i at (%li,%li)\n",
+ name,
+ clip_rect.right - clip_rect.left,
+ clip_rect.bottom - clip_rect.top,
+ bpp, clip_rect.left, clip_rect.top);
+ } else {
+ av_log(s1, AV_LOG_INFO,
+ "Capturing whole desktop as %lix%lix%i at (%li,%li)\n",
+ clip_rect.right - clip_rect.left,
+ clip_rect.bottom - clip_rect.top,
+ bpp, clip_rect.left, clip_rect.top);
+ }
+
+ if (clip_rect.right - clip_rect.left <= 0 ||
+ clip_rect.bottom - clip_rect.top <= 0 || bpp%8) {
+ av_log(s1, AV_LOG_ERROR, "Invalid properties, aborting\n");
+ ret = AVERROR(EIO);
+ goto error;
+ }
+
+ dest_hdc = CreateCompatibleDC(source_hdc);
+ if (!dest_hdc) {
+ WIN32_API_ERROR("Screen DC CreateCompatibleDC");
+ ret = AVERROR(EIO);
+ goto error;
+ }
+
+ /* Create a DIB and select it into the dest_hdc */
+ bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
+ bmi.bmiHeader.biWidth = clip_rect.right - clip_rect.left;
+ bmi.bmiHeader.biHeight = -(clip_rect.bottom - clip_rect.top);
+ bmi.bmiHeader.biPlanes = 1;
+ bmi.bmiHeader.biBitCount = bpp;
+ bmi.bmiHeader.biCompression = BI_RGB;
+ bmi.bmiHeader.biSizeImage = 0;
+ bmi.bmiHeader.biXPelsPerMeter = 0;
+ bmi.bmiHeader.biYPelsPerMeter = 0;
+ bmi.bmiHeader.biClrUsed = 0;
+ bmi.bmiHeader.biClrImportant = 0;
+ hbmp = CreateDIBSection(dest_hdc, &bmi, DIB_RGB_COLORS,
+ &buffer, NULL, 0);
+ if (!hbmp) {
+ WIN32_API_ERROR("Creating DIB Section");
+ ret = AVERROR(EIO);
+ goto error;
+ }
+
+ if (!SelectObject(dest_hdc, hbmp)) {
+ WIN32_API_ERROR("SelectObject");
+ ret = AVERROR(EIO);
+ goto error;
+ }
+
+ /* Get info from the bitmap */
+ GetObject(hbmp, sizeof(BITMAP), &bmp);
+
+ st = avformat_new_stream(s1, NULL);
+ if (!st) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
+ avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+
+ gdigrab->frame_size = bmp.bmWidthBytes * bmp.bmHeight * bmp.bmPlanes;
+ gdigrab->header_size = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) +
+ (bpp <= 8 ? (1 << bpp) : 0) * sizeof(RGBQUAD) /* palette size */;
+ gdigrab->time_base = av_inv_q(gdigrab->framerate);
+ gdigrab->time_frame = av_gettime() / av_q2d(gdigrab->time_base);
+
+ gdigrab->hwnd = hwnd;
+ gdigrab->source_hdc = source_hdc;
+ gdigrab->dest_hdc = dest_hdc;
+ gdigrab->hbmp = hbmp;
+ gdigrab->bmi = bmi;
+ gdigrab->buffer = buffer;
+ gdigrab->clip_rect = clip_rect;
+
+ gdigrab->cursor_error_printed = 0;
+
+ if (gdigrab->show_region) {
+ if (gdigrab_region_wnd_init(s1, gdigrab)) {
+ ret = AVERROR(EIO);
+ goto error;
+ }
+ }
+
+ st->avg_frame_rate = av_inv_q(gdigrab->time_base);
+
+ st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
+ st->codecpar->codec_id = AV_CODEC_ID_BMP;
+ st->codecpar->bit_rate = (gdigrab->header_size + gdigrab->frame_size) * 1/av_q2d(gdigrab->time_base) * 8;
+
+ return 0;
+
+error:
+ if (source_hdc)
+ ReleaseDC(hwnd, source_hdc);
+ if (dest_hdc)
+ DeleteDC(dest_hdc);
+ if (hbmp)
+ DeleteObject(hbmp);
+ if (source_hdc)
+ DeleteDC(source_hdc);
+ return ret;
+}
+
+/**
+ * Paints a mouse pointer in a Win32 image.
+ *
+ * @param s1 Context of the log information
+ * @param s Current grad structure
+ */
+static void paint_mouse_pointer(AVFormatContext *s1, struct gdigrab *gdigrab)
+{
+ CURSORINFO ci = {0};
+
+#define CURSOR_ERROR(str) \
+ if (!gdigrab->cursor_error_printed) { \
+ WIN32_API_ERROR(str); \
+ gdigrab->cursor_error_printed = 1; \
+ }
+
+ ci.cbSize = sizeof(ci);
+
+ if (GetCursorInfo(&ci)) {
+ HCURSOR icon = CopyCursor(ci.hCursor);
+ ICONINFO info;
+ POINT pos;
+ RECT clip_rect = gdigrab->clip_rect;
+ HWND hwnd = gdigrab->hwnd;
+ int vertres = GetDeviceCaps(gdigrab->source_hdc, VERTRES);
+ int desktopvertres = GetDeviceCaps(gdigrab->source_hdc, DESKTOPVERTRES);
+ info.hbmMask = NULL;
+ info.hbmColor = NULL;
+
+ if (ci.flags != CURSOR_SHOWING)
+ return;
+
+ if (!icon) {
+ /* Use the standard arrow cursor as a fallback.
+ * You'll probably only hit this in Wine, which can't fetch
+ * the current system cursor. */
+ icon = CopyCursor(LoadCursor(NULL, IDC_ARROW));
+ }
+
+ if (!GetIconInfo(icon, &info)) {
+ CURSOR_ERROR("Could not get icon info");
+ goto icon_error;
+ }
+
+ pos.x = ci.ptScreenPos.x - clip_rect.left - info.xHotspot;
+ pos.y = ci.ptScreenPos.y - clip_rect.top - info.yHotspot;
+
+ if (hwnd) {
+ RECT rect;
+
+ if (GetWindowRect(hwnd, &rect)) {
+ pos.x -= rect.left;
+ pos.y -= rect.top;
+ } else {
+ CURSOR_ERROR("Couldn't get window rectangle");
+ goto icon_error;
+ }
+ }
+
+ //that would keep the correct location of mouse with hidpi screens
+ pos.x = pos.x * desktopvertres / vertres;
+ pos.y = pos.y * desktopvertres / vertres;
+
+ av_log(s1, AV_LOG_DEBUG, "Cursor pos (%li,%li) -> (%li,%li)\n",
+ ci.ptScreenPos.x, ci.ptScreenPos.y, pos.x, pos.y);
+
+ if (pos.x >= 0 && pos.x <= clip_rect.right - clip_rect.left &&
+ pos.y >= 0 && pos.y <= clip_rect.bottom - clip_rect.top) {
+ if (!DrawIcon(gdigrab->dest_hdc, pos.x, pos.y, icon))
+ CURSOR_ERROR("Couldn't draw icon");
+ }
+
+icon_error:
+ if (info.hbmMask)
+ DeleteObject(info.hbmMask);
+ if (info.hbmColor)
+ DeleteObject(info.hbmColor);
+ if (icon)
+ DestroyCursor(icon);
+ } else {
+ CURSOR_ERROR("Couldn't get cursor info");
+ }
+}
+
+/**
+ * Grabs a frame from gdi (public device demuxer API).
+ *
+ * @param s1 Context from avformat core
+ * @param pkt Packet holding the grabbed frame
+ * @return frame size in bytes
+ */
+static int gdigrab_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ struct gdigrab *gdigrab = s1->priv_data;
+
+ HDC dest_hdc = gdigrab->dest_hdc;
+ HDC source_hdc = gdigrab->source_hdc;
+ RECT clip_rect = gdigrab->clip_rect;
+ AVRational time_base = gdigrab->time_base;
+ int64_t time_frame = gdigrab->time_frame;
+
+ BITMAPFILEHEADER bfh;
+ int file_size = gdigrab->header_size + gdigrab->frame_size;
+
+ int64_t curtime, delay;
+
+ /* Calculate the time of the next frame */
+ time_frame += INT64_C(1000000);
+
+ /* Run Window message processing queue */
+ if (gdigrab->show_region)
+ gdigrab_region_wnd_update(s1, gdigrab);
+
+ /* wait based on the frame rate */
+ for (;;) {
+ curtime = av_gettime();
+ delay = time_frame * av_q2d(time_base) - curtime;
+ if (delay <= 0) {
+ if (delay < INT64_C(-1000000) * av_q2d(time_base)) {
+ time_frame += INT64_C(1000000);
+ }
+ break;
+ }
+ if (s1->flags & AVFMT_FLAG_NONBLOCK) {
+ return AVERROR(EAGAIN);
+ } else {
+ av_usleep(delay);
+ }
+ }
+
+ if (av_new_packet(pkt, file_size) < 0)
+ return AVERROR(ENOMEM);
+ pkt->pts = curtime;
+
+ /* Blit screen grab */
+ if (!BitBlt(dest_hdc, 0, 0,
+ clip_rect.right - clip_rect.left,
+ clip_rect.bottom - clip_rect.top,
+ source_hdc,
+ clip_rect.left, clip_rect.top, SRCCOPY | CAPTUREBLT)) {
+ WIN32_API_ERROR("Failed to capture image");
+ return AVERROR(EIO);
+ }
+ if (gdigrab->draw_mouse)
+ paint_mouse_pointer(s1, gdigrab);
+
+ /* Copy bits to packet data */
+
+ bfh.bfType = 0x4d42; /* "BM" in little-endian */
+ bfh.bfSize = file_size;
+ bfh.bfReserved1 = 0;
+ bfh.bfReserved2 = 0;
+ bfh.bfOffBits = gdigrab->header_size;
+
+ memcpy(pkt->data, &bfh, sizeof(bfh));
+
+ memcpy(pkt->data + sizeof(bfh), &gdigrab->bmi.bmiHeader, sizeof(gdigrab->bmi.bmiHeader));
+
+ if (gdigrab->bmi.bmiHeader.biBitCount <= 8)
+ GetDIBColorTable(dest_hdc, 0, 1 << gdigrab->bmi.bmiHeader.biBitCount,
+ (RGBQUAD *) (pkt->data + sizeof(bfh) + sizeof(gdigrab->bmi.bmiHeader)));
+
+ memcpy(pkt->data + gdigrab->header_size, gdigrab->buffer, gdigrab->frame_size);
+
+ gdigrab->time_frame = time_frame;
+
+ return gdigrab->header_size + gdigrab->frame_size;
+}
+
+/**
+ * Closes gdi frame grabber (public device demuxer API).
+ *
+ * @param s1 Context from avformat core
+ * @return 0 success, !0 failure
+ */
+static int gdigrab_read_close(AVFormatContext *s1)
+{
+ struct gdigrab *s = s1->priv_data;
+
+ if (s->show_region)
+ gdigrab_region_wnd_destroy(s1, s);
+
+ if (s->source_hdc)
+ ReleaseDC(s->hwnd, s->source_hdc);
+ if (s->dest_hdc)
+ DeleteDC(s->dest_hdc);
+ if (s->hbmp)
+ DeleteObject(s->hbmp);
+ if (s->source_hdc)
+ DeleteDC(s->source_hdc);
+
+ return 0;
+}
+
+#define OFFSET(x) offsetof(struct gdigrab, x)
+#define DEC AV_OPT_FLAG_DECODING_PARAM
+static const AVOption options[] = {
+ { "draw_mouse", "draw the mouse pointer", OFFSET(draw_mouse), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, DEC },
+ { "show_region", "draw border around capture area", OFFSET(show_region), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, DEC },
+ { "framerate", "set video frame rate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "ntsc"}, 0, 0, DEC },
+ { "video_size", "set video frame size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, DEC },
+ { "offset_x", "capture area x offset", OFFSET(offset_x), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC },
+ { "offset_y", "capture area y offset", OFFSET(offset_y), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC },
+ { NULL },
+};
+
+static const AVClass gdigrab_class = {
+ .class_name = "GDIgrab indev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+/** gdi grabber device demuxer declaration */
+AVInputFormat ff_gdigrab_demuxer = {
+ .name = "gdigrab",
+ .long_name = NULL_IF_CONFIG_SMALL("GDI API Windows frame grabber"),
+ .priv_data_size = sizeof(struct gdigrab),
+ .read_header = gdigrab_read_header,
+ .read_packet = gdigrab_read_packet,
+ .read_close = gdigrab_read_close,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &gdigrab_class,
+};
diff --git a/libavdevice/iec61883.c b/libavdevice/iec61883.c
new file mode 100644
index 0000000000..c45ae9ae5c
--- /dev/null
+++ b/libavdevice/iec61883.c
@@ -0,0 +1,500 @@
+/*
+ * Copyright (c) 2012 Georg Lippitsch <georg.lippitsch@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * libiec61883 interface
+ */
+
+#include <sys/poll.h>
+#include <libraw1394/raw1394.h>
+#include <libavc1394/avc1394.h>
+#include <libavc1394/rom1394.h>
+#include <libiec61883/iec61883.h>
+#include "libavformat/dv.h"
+#include "libavformat/mpegts.h"
+#include "libavutil/opt.h"
+#include "avdevice.h"
+
+#define THREADS HAVE_PTHREADS
+
+#if THREADS
+#include <pthread.h>
+#endif
+
+#define MOTDCT_SPEC_ID 0x00005068
+#define IEC61883_AUTO 0
+#define IEC61883_DV 1
+#define IEC61883_HDV 2
+
+/**
+ * For DV, one packet corresponds exactly to one frame.
+ * For HDV, these are MPEG2 transport stream packets.
+ * The queue is implemented as linked list.
+ */
+typedef struct DVPacket {
+ uint8_t *buf; ///< actual buffer data
+ int len; ///< size of buffer allocated
+ struct DVPacket *next; ///< next DVPacket
+} DVPacket;
+
+struct iec61883_data {
+ AVClass *class;
+ raw1394handle_t raw1394; ///< handle for libraw1394
+ iec61883_dv_fb_t iec61883_dv; ///< handle for libiec61883 when used with DV
+ iec61883_mpeg2_t iec61883_mpeg2; ///< handle for libiec61883 when used with HDV
+
+ DVDemuxContext *dv_demux; ///< generic DV muxing/demuxing context
+ MpegTSContext *mpeg_demux; ///< generic HDV muxing/demuxing context
+
+ DVPacket *queue_first; ///< first element of packet queue
+ DVPacket *queue_last; ///< last element of packet queue
+
+ char *device_guid; ///< to select one of multiple DV devices
+
+ int packets; ///< Number of packets queued
+ int max_packets; ///< Max. number of packets in queue
+
+ int bandwidth; ///< returned by libiec61883
+ int channel; ///< returned by libiec61883
+ int input_port; ///< returned by libiec61883
+ int type; ///< Stream type, to distinguish DV/HDV
+ int node; ///< returned by libiec61883
+ int output_port; ///< returned by libiec61883
+ int thread_loop; ///< Condition for thread while-loop
+ int receiving; ///< True as soon data from device available
+ int receive_error; ///< Set in receive task in case of error
+ int eof; ///< True as soon as no more data available
+
+ struct pollfd raw1394_poll; ///< to poll for new data from libraw1394
+
+ /** Parse function for DV/HDV differs, so this is set before packets arrive */
+ int (*parse_queue)(struct iec61883_data *dv, AVPacket *pkt);
+
+#if THREADS
+ pthread_t receive_task_thread;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+#endif
+};
+
+static int iec61883_callback(unsigned char *data, int length,
+ int complete, void *callback_data)
+{
+ struct iec61883_data *dv = callback_data;
+ DVPacket *packet;
+ int ret;
+
+#if THREADS
+ pthread_mutex_lock(&dv->mutex);
+#endif
+
+ if (dv->packets >= dv->max_packets) {
+ av_log(NULL, AV_LOG_ERROR, "DV packet queue overrun, dropping.\n");
+ ret = 0;
+ goto exit;
+ }
+
+ packet = av_mallocz(sizeof(*packet));
+ if (!packet) {
+ ret = -1;
+ goto exit;
+ }
+
+ packet->buf = av_malloc(length);
+ if (!packet->buf) {
+ ret = -1;
+ goto exit;
+ }
+ packet->len = length;
+
+ memcpy(packet->buf, data, length);
+
+ if (dv->queue_first) {
+ dv->queue_last->next = packet;
+ dv->queue_last = packet;
+ } else {
+ dv->queue_first = packet;
+ dv->queue_last = packet;
+ }
+ dv->packets++;
+
+ ret = 0;
+
+exit:
+#if THREADS
+ pthread_cond_broadcast(&dv->cond);
+ pthread_mutex_unlock(&dv->mutex);
+#endif
+ return ret;
+}
+
+static void *iec61883_receive_task(void *opaque)
+{
+ struct iec61883_data *dv = (struct iec61883_data *)opaque;
+ int result;
+
+#if THREADS
+ while (dv->thread_loop)
+#endif
+ {
+ while ((result = poll(&dv->raw1394_poll, 1, 200)) < 0) {
+ if (!(errno == EAGAIN || errno == EINTR)) {
+ av_log(NULL, AV_LOG_ERROR, "Raw1394 poll error occurred.\n");
+ dv->receive_error = AVERROR(EIO);
+ return NULL;
+ }
+ }
+ if (result > 0 && ((dv->raw1394_poll.revents & POLLIN)
+ || (dv->raw1394_poll.revents & POLLPRI))) {
+ dv->receiving = 1;
+ raw1394_loop_iterate(dv->raw1394);
+ } else if (dv->receiving) {
+ av_log(NULL, AV_LOG_ERROR, "No more input data available\n");
+#if THREADS
+ pthread_mutex_lock(&dv->mutex);
+ dv->eof = 1;
+ pthread_cond_broadcast(&dv->cond);
+ pthread_mutex_unlock(&dv->mutex);
+#else
+ dv->eof = 1;
+#endif
+ return NULL;
+ }
+ }
+
+ return NULL;
+}
+
+static int iec61883_parse_queue_dv(struct iec61883_data *dv, AVPacket *pkt)
+{
+ DVPacket *packet;
+ int size;
+
+ size = avpriv_dv_get_packet(dv->dv_demux, pkt);
+ if (size > 0)
+ return size;
+
+ packet = dv->queue_first;
+ if (!packet)
+ return -1;
+
+ size = avpriv_dv_produce_packet(dv->dv_demux, pkt,
+ packet->buf, packet->len, -1);
+ dv->queue_first = packet->next;
+ av_free(packet);
+ dv->packets--;
+
+ if (size > 0)
+ return size;
+
+ return -1;
+}
+
+static int iec61883_parse_queue_hdv(struct iec61883_data *dv, AVPacket *pkt)
+{
+ DVPacket *packet;
+ int size;
+
+ while (dv->queue_first) {
+ packet = dv->queue_first;
+ size = avpriv_mpegts_parse_packet(dv->mpeg_demux, pkt, packet->buf,
+ packet->len);
+ dv->queue_first = packet->next;
+ av_freep(&packet->buf);
+ av_freep(&packet);
+ dv->packets--;
+
+ if (size > 0)
+ return size;
+ }
+
+ return -1;
+}
+
+static int iec61883_read_header(AVFormatContext *context)
+{
+ struct iec61883_data *dv = context->priv_data;
+ struct raw1394_portinfo pinf[16];
+ rom1394_directory rom_dir;
+ char *endptr;
+ int inport;
+ int nb_ports;
+ int port = -1;
+ int response;
+ int i, j = 0;
+ uint64_t guid = 0;
+
+ dv->input_port = -1;
+ dv->output_port = -1;
+ dv->channel = -1;
+
+ dv->raw1394 = raw1394_new_handle();
+
+ if (!dv->raw1394) {
+ av_log(context, AV_LOG_ERROR, "Failed to open IEEE1394 interface.\n");
+ return AVERROR(EIO);
+ }
+
+ if ((nb_ports = raw1394_get_port_info(dv->raw1394, pinf, 16)) < 0) {
+ av_log(context, AV_LOG_ERROR, "Failed to get number of IEEE1394 ports.\n");
+ goto fail;
+ }
+
+ inport = strtol(context->filename, &endptr, 10);
+ if (endptr != context->filename && *endptr == '\0') {
+ av_log(context, AV_LOG_INFO, "Selecting IEEE1394 port: %d\n", inport);
+ j = inport;
+ nb_ports = inport + 1;
+ } else if (strcmp(context->filename, "auto")) {
+ av_log(context, AV_LOG_ERROR, "Invalid input \"%s\", you should specify "
+ "\"auto\" for auto-detection, or the port number.\n", context->filename);
+ goto fail;
+ }
+
+ if (dv->device_guid) {
+ if (sscanf(dv->device_guid, "%"SCNu64, &guid) != 1) {
+ av_log(context, AV_LOG_INFO, "Invalid dvguid parameter: %s\n",
+ dv->device_guid);
+ goto fail;
+ }
+ }
+
+ for (; j < nb_ports && port==-1; ++j) {
+ raw1394_destroy_handle(dv->raw1394);
+
+ if (!(dv->raw1394 = raw1394_new_handle_on_port(j))) {
+ av_log(context, AV_LOG_ERROR, "Failed setting IEEE1394 port.\n");
+ goto fail;
+ }
+
+ for (i=0; i<raw1394_get_nodecount(dv->raw1394); ++i) {
+
+ /* Select device explicitly by GUID */
+
+ if (guid > 1) {
+ if (guid == rom1394_get_guid(dv->raw1394, i)) {
+ dv->node = i;
+ port = j;
+ break;
+ }
+ } else {
+
+ /* Select first AV/C tape recorder player node */
+
+ if (rom1394_get_directory(dv->raw1394, i, &rom_dir) < 0)
+ continue;
+ if (((rom1394_get_node_type(&rom_dir) == ROM1394_NODE_TYPE_AVC) &&
+ avc1394_check_subunit_type(dv->raw1394, i, AVC1394_SUBUNIT_TYPE_VCR)) ||
+ (rom_dir.unit_spec_id == MOTDCT_SPEC_ID)) {
+ rom1394_free_directory(&rom_dir);
+ dv->node = i;
+ port = j;
+ break;
+ }
+ rom1394_free_directory(&rom_dir);
+ }
+ }
+ }
+
+ if (port == -1) {
+ av_log(context, AV_LOG_ERROR, "No AV/C devices found.\n");
+ goto fail;
+ }
+
+ /* Provide bus sanity for multiple connections */
+
+ iec61883_cmp_normalize_output(dv->raw1394, 0xffc0 | dv->node);
+
+ /* Find out if device is DV or HDV */
+
+ if (dv->type == IEC61883_AUTO) {
+ response = avc1394_transaction(dv->raw1394, dv->node,
+ AVC1394_CTYPE_STATUS |
+ AVC1394_SUBUNIT_TYPE_TAPE_RECORDER |
+ AVC1394_SUBUNIT_ID_0 |
+ AVC1394_VCR_COMMAND_OUTPUT_SIGNAL_MODE |
+ 0xFF, 2);
+ response = AVC1394_GET_OPERAND0(response);
+ dv->type = (response == 0x10 || response == 0x90 || response == 0x1A || response == 0x9A) ?
+ IEC61883_HDV : IEC61883_DV;
+ }
+
+ /* Connect to device, and do initialization */
+
+ dv->channel = iec61883_cmp_connect(dv->raw1394, dv->node, &dv->output_port,
+ raw1394_get_local_id(dv->raw1394),
+ &dv->input_port, &dv->bandwidth);
+
+ if (dv->channel < 0)
+ dv->channel = 63;
+
+ if (!dv->max_packets)
+ dv->max_packets = 100;
+
+ if (CONFIG_MPEGTS_DEMUXER && dv->type == IEC61883_HDV) {
+
+ /* Init HDV receive */
+
+ avformat_new_stream(context, NULL);
+
+ dv->mpeg_demux = avpriv_mpegts_parse_open(context);
+ if (!dv->mpeg_demux)
+ goto fail;
+
+ dv->parse_queue = iec61883_parse_queue_hdv;
+
+ dv->iec61883_mpeg2 = iec61883_mpeg2_recv_init(dv->raw1394,
+ (iec61883_mpeg2_recv_t)iec61883_callback,
+ dv);
+
+ dv->max_packets *= 766;
+ } else {
+
+ /* Init DV receive */
+
+ dv->dv_demux = avpriv_dv_init_demux(context);
+ if (!dv->dv_demux)
+ goto fail;
+
+ dv->parse_queue = iec61883_parse_queue_dv;
+
+ dv->iec61883_dv = iec61883_dv_fb_init(dv->raw1394, iec61883_callback, dv);
+ }
+
+ dv->raw1394_poll.fd = raw1394_get_fd(dv->raw1394);
+ dv->raw1394_poll.events = POLLIN | POLLERR | POLLHUP | POLLPRI;
+
+ /* Actually start receiving */
+
+ if (dv->type == IEC61883_HDV)
+ iec61883_mpeg2_recv_start(dv->iec61883_mpeg2, dv->channel);
+ else
+ iec61883_dv_fb_start(dv->iec61883_dv, dv->channel);
+
+#if THREADS
+ dv->thread_loop = 1;
+ if (pthread_mutex_init(&dv->mutex, NULL))
+ goto fail;
+ if (pthread_cond_init(&dv->cond, NULL))
+ goto fail;
+ if (pthread_create(&dv->receive_task_thread, NULL, iec61883_receive_task, dv))
+ goto fail;
+#endif
+
+ return 0;
+
+fail:
+ raw1394_destroy_handle(dv->raw1394);
+ return AVERROR(EIO);
+}
+
+static int iec61883_read_packet(AVFormatContext *context, AVPacket *pkt)
+{
+ struct iec61883_data *dv = context->priv_data;
+ int size;
+
+ /**
+ * Try to parse frames from queue
+ */
+
+#if THREADS
+ pthread_mutex_lock(&dv->mutex);
+ while ((size = dv->parse_queue(dv, pkt)) == -1)
+ if (!dv->eof)
+ pthread_cond_wait(&dv->cond, &dv->mutex);
+ else
+ break;
+ pthread_mutex_unlock(&dv->mutex);
+#else
+ int result;
+ while ((size = dv->parse_queue(dv, pkt)) == -1) {
+ iec61883_receive_task((void *)dv);
+ if (dv->receive_error)
+ return dv->receive_error;
+ }
+#endif
+
+ return size;
+}
+
+static int iec61883_close(AVFormatContext *context)
+{
+ struct iec61883_data *dv = context->priv_data;
+
+#if THREADS
+ dv->thread_loop = 0;
+ pthread_join(dv->receive_task_thread, NULL);
+ pthread_cond_destroy(&dv->cond);
+ pthread_mutex_destroy(&dv->mutex);
+#endif
+
+ if (CONFIG_MPEGTS_DEMUXER && dv->type == IEC61883_HDV) {
+ iec61883_mpeg2_recv_stop(dv->iec61883_mpeg2);
+ iec61883_mpeg2_close(dv->iec61883_mpeg2);
+ avpriv_mpegts_parse_close(dv->mpeg_demux);
+ } else {
+ iec61883_dv_fb_stop(dv->iec61883_dv);
+ iec61883_dv_fb_close(dv->iec61883_dv);
+ }
+ while (dv->queue_first) {
+ DVPacket *packet = dv->queue_first;
+ dv->queue_first = packet->next;
+ av_freep(&packet->buf);
+ av_freep(&packet);
+ }
+
+ iec61883_cmp_disconnect(dv->raw1394, dv->node, dv->output_port,
+ raw1394_get_local_id(dv->raw1394),
+ dv->input_port, dv->channel, dv->bandwidth);
+
+ raw1394_destroy_handle(dv->raw1394);
+
+ return 0;
+}
+
+static const AVOption options[] = {
+ { "dvtype", "override autodetection of DV/HDV", offsetof(struct iec61883_data, type), AV_OPT_TYPE_INT, {.i64 = IEC61883_AUTO}, IEC61883_AUTO, IEC61883_HDV, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
+ { "auto", "auto detect DV/HDV", 0, AV_OPT_TYPE_CONST, {.i64 = IEC61883_AUTO}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
+ { "dv", "force device being treated as DV device", 0, AV_OPT_TYPE_CONST, {.i64 = IEC61883_DV}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
+ { "hdv" , "force device being treated as HDV device", 0, AV_OPT_TYPE_CONST, {.i64 = IEC61883_HDV}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
+ { "dvbuffer", "set queue buffer size (in packets)", offsetof(struct iec61883_data, max_packets), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
+ { "dvguid", "select one of multiple DV devices by its GUID", offsetof(struct iec61883_data, device_guid), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM },
+ { NULL },
+};
+
+static const AVClass iec61883_class = {
+ .class_name = "iec61883 indev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
+};
+
+AVInputFormat ff_iec61883_demuxer = {
+ .name = "iec61883",
+ .long_name = NULL_IF_CONFIG_SMALL("libiec61883 (new DV1394) A/V input device"),
+ .priv_data_size = sizeof(struct iec61883_data),
+ .read_header = iec61883_read_header,
+ .read_packet = iec61883_read_packet,
+ .read_close = iec61883_close,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &iec61883_class,
+};
diff --git a/libavdevice/internal.h b/libavdevice/internal.h
new file mode 100644
index 0000000000..e222cf204d
--- /dev/null
+++ b/libavdevice/internal.h
@@ -0,0 +1,28 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVDEVICE_INTERNAL_H
+#define AVDEVICE_INTERNAL_H
+
+#include "libavformat/avformat.h"
+
+av_warn_unused_result
+int ff_alloc_input_device_context(struct AVFormatContext **avctx, struct AVInputFormat *iformat,
+ const char *format);
+
+#endif
diff --git a/libavdevice/jack.c b/libavdevice/jack.c
index 0b4deee01c..34e21527a7 100644
--- a/libavdevice/jack.c
+++ b/libavdevice/jack.c
@@ -3,20 +3,20 @@
* Copyright (c) 2009 Samalyse
* Author: Olivier Guilyardi <olivier samalyse com>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -33,6 +33,17 @@
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "timefilter.h"
+#include "avdevice.h"
+
+#if HAVE_DISPATCH_DISPATCH_H
+#include <dispatch/dispatch.h>
+#define sem_t dispatch_semaphore_t
+#define sem_init(psem,x,val) *psem = dispatch_semaphore_create(val)
+#define sem_post(psem) dispatch_semaphore_signal(*psem)
+#define sem_wait(psem) dispatch_semaphore_wait(*psem, DISPATCH_TIME_FOREVER)
+#define sem_timedwait(psem, val) dispatch_semaphore_wait(*psem, dispatch_walltime(val, 0))
+#define sem_destroy(psem) dispatch_release(*psem)
+#endif
/**
* Size of the internal FIFO buffers as a number of audio packets
@@ -152,7 +163,6 @@ static int start_jack(AVFormatContext *context)
JackData *self = context->priv_data;
jack_status_t status;
int i, test;
- double o, period;
/* Register as a JACK client, using the context filename as client name. */
self->client = jack_client_open(context->filename, JackNullOption, &status);
@@ -164,7 +174,7 @@ static int start_jack(AVFormatContext *context)
sem_init(&self->packet_count, 0, 0);
self->sample_rate = jack_get_sample_rate(self->client);
- self->ports = av_malloc(self->nports * sizeof(*self->ports));
+ self->ports = av_malloc_array(self->nports, sizeof(*self->ports));
if (!self->ports)
return AVERROR(ENOMEM);
self->buffer_size = jack_get_buffer_size(self->client);
@@ -190,18 +200,16 @@ static int start_jack(AVFormatContext *context)
jack_set_xrun_callback(self->client, xrun_callback, self);
/* Create time filter */
- period = (double) self->buffer_size / self->sample_rate;
- o = 2 * M_PI * 1.5 * period; /// bandwidth: 1.5Hz
- self->timefilter = ff_timefilter_new (1.0 / self->sample_rate, sqrt(2 * o), o * o);
+ self->timefilter = ff_timefilter_new (1.0 / self->sample_rate, self->buffer_size, 1.5);
if (!self->timefilter) {
jack_client_close(self->client);
return AVERROR(ENOMEM);
}
/* Create FIFO buffers */
- self->filled_pkts = av_fifo_alloc(FIFO_PACKETS_NUM * sizeof(AVPacket));
+ self->filled_pkts = av_fifo_alloc_array(FIFO_PACKETS_NUM, sizeof(AVPacket));
/* New packets FIFO with one extra packet for safety against underruns */
- self->new_pkts = av_fifo_alloc((FIFO_PACKETS_NUM + 1) * sizeof(AVPacket));
+ self->new_pkts = av_fifo_alloc_array((FIFO_PACKETS_NUM + 1), sizeof(AVPacket));
if (!self->new_pkts) {
jack_client_close(self->client);
return AVERROR(ENOMEM);
@@ -215,14 +223,14 @@ static int start_jack(AVFormatContext *context)
}
-static void free_pkt_fifo(AVFifoBuffer *fifo)
+static void free_pkt_fifo(AVFifoBuffer **fifo)
{
AVPacket pkt;
- while (av_fifo_size(fifo)) {
- av_fifo_generic_read(fifo, &pkt, sizeof(pkt), NULL);
+ while (av_fifo_size(*fifo)) {
+ av_fifo_generic_read(*fifo, &pkt, sizeof(pkt), NULL);
av_packet_unref(&pkt);
}
- av_fifo_free(fifo);
+ av_fifo_freep(fifo);
}
static void stop_jack(JackData *self)
@@ -233,8 +241,8 @@ static void stop_jack(JackData *self)
jack_client_close(self->client);
}
sem_destroy(&self->packet_count);
- free_pkt_fifo(self->new_pkts);
- free_pkt_fifo(self->filled_pkts);
+ free_pkt_fifo(&self->new_pkts);
+ free_pkt_fifo(&self->filled_pkts);
av_freep(&self->ports);
ff_timefilter_destroy(self->timefilter);
}
@@ -345,6 +353,7 @@ static const AVClass jack_indev_class = {
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_jack_demuxer = {
diff --git a/libavdevice/lavfi.c b/libavdevice/lavfi.c
new file mode 100644
index 0000000000..a52d4730e5
--- /dev/null
+++ b/libavdevice/lavfi.c
@@ -0,0 +1,517 @@
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * libavfilter virtual input device
+ */
+
+/* #define DEBUG */
+
+#include <float.h> /* DBL_MIN, DBL_MAX */
+
+#include "libavutil/bprint.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/file.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/internal.h"
+#include "libavutil/log.h"
+#include "libavutil/mem.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavfilter/avfilter.h"
+#include "libavfilter/avfiltergraph.h"
+#include "libavfilter/buffersink.h"
+#include "libavformat/avio_internal.h"
+#include "libavformat/internal.h"
+#include "avdevice.h"
+
+typedef struct {
+ AVClass *class; ///< class for private options
+ char *graph_str;
+ char *graph_filename;
+ char *dump_graph;
+ AVFilterGraph *graph;
+ AVFilterContext **sinks;
+ int *sink_stream_map;
+ int *sink_eof;
+ int *stream_sink_map;
+ int *sink_stream_subcc_map;
+ AVFrame *decoded_frame;
+ int nb_sinks;
+ AVPacket subcc_packet;
+} LavfiContext;
+
+static int *create_all_formats(int n)
+{
+ int i, j, *fmts, count = 0;
+
+ for (i = 0; i < n; i++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
+ count++;
+ }
+
+ if (!(fmts = av_malloc((count+1) * sizeof(int))))
+ return NULL;
+ for (j = 0, i = 0; i < n; i++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
+ fmts[j++] = i;
+ }
+ fmts[j] = -1;
+ return fmts;
+}
+
+av_cold static int lavfi_read_close(AVFormatContext *avctx)
+{
+ LavfiContext *lavfi = avctx->priv_data;
+
+ av_freep(&lavfi->sink_stream_map);
+ av_freep(&lavfi->sink_eof);
+ av_freep(&lavfi->stream_sink_map);
+ av_freep(&lavfi->sink_stream_subcc_map);
+ av_freep(&lavfi->sinks);
+ avfilter_graph_free(&lavfi->graph);
+ av_frame_free(&lavfi->decoded_frame);
+
+ return 0;
+}
+
+static int create_subcc_streams(AVFormatContext *avctx)
+{
+ LavfiContext *lavfi = avctx->priv_data;
+ AVStream *st;
+ int stream_idx, sink_idx;
+
+ for (stream_idx = 0; stream_idx < lavfi->nb_sinks; stream_idx++) {
+ sink_idx = lavfi->stream_sink_map[stream_idx];
+ if (lavfi->sink_stream_subcc_map[sink_idx]) {
+ lavfi->sink_stream_subcc_map[sink_idx] = avctx->nb_streams;
+ if (!(st = avformat_new_stream(avctx, NULL)))
+ return AVERROR(ENOMEM);
+ st->codecpar->codec_id = AV_CODEC_ID_EIA_608;
+ st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
+ } else {
+ lavfi->sink_stream_subcc_map[sink_idx] = -1;
+ }
+ }
+ return 0;
+}
+
+av_cold static int lavfi_read_header(AVFormatContext *avctx)
+{
+ LavfiContext *lavfi = avctx->priv_data;
+ AVFilterInOut *input_links = NULL, *output_links = NULL, *inout;
+ AVFilter *buffersink, *abuffersink;
+ int *pix_fmts = create_all_formats(AV_PIX_FMT_NB);
+ enum AVMediaType type;
+ int ret = 0, i, n;
+
+#define FAIL(ERR) { ret = ERR; goto end; }
+
+ if (!pix_fmts)
+ FAIL(AVERROR(ENOMEM));
+
+ avfilter_register_all();
+
+ buffersink = avfilter_get_by_name("buffersink");
+ abuffersink = avfilter_get_by_name("abuffersink");
+
+ if (lavfi->graph_filename && lavfi->graph_str) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Only one of the graph or graph_file options must be specified\n");
+ FAIL(AVERROR(EINVAL));
+ }
+
+ if (lavfi->graph_filename) {
+ AVBPrint graph_file_pb;
+ AVIOContext *avio = NULL;
+ AVDictionary *options = NULL;
+ if (avctx->protocol_whitelist && (ret = av_dict_set(&options, "protocol_whitelist", avctx->protocol_whitelist, 0)) < 0)
+ goto end;
+ ret = avio_open2(&avio, lavfi->graph_filename, AVIO_FLAG_READ, &avctx->interrupt_callback, &options);
+ av_dict_set(&options, "protocol_whitelist", NULL, 0);
+ if (ret < 0)
+ goto end;
+ av_bprint_init(&graph_file_pb, 0, AV_BPRINT_SIZE_UNLIMITED);
+ ret = avio_read_to_bprint(avio, &graph_file_pb, INT_MAX);
+ avio_closep(&avio);
+ av_bprint_chars(&graph_file_pb, '\0', 1);
+ if (!ret && !av_bprint_is_complete(&graph_file_pb))
+ ret = AVERROR(ENOMEM);
+ if (ret) {
+ av_bprint_finalize(&graph_file_pb, NULL);
+ goto end;
+ }
+ if ((ret = av_bprint_finalize(&graph_file_pb, &lavfi->graph_str)))
+ goto end;
+ }
+
+ if (!lavfi->graph_str)
+ lavfi->graph_str = av_strdup(avctx->filename);
+
+ /* parse the graph, create a stream for each open output */
+ if (!(lavfi->graph = avfilter_graph_alloc()))
+ FAIL(AVERROR(ENOMEM));
+
+ if ((ret = avfilter_graph_parse_ptr(lavfi->graph, lavfi->graph_str,
+ &input_links, &output_links, avctx)) < 0)
+ goto end;
+
+ if (input_links) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Open inputs in the filtergraph are not acceptable\n");
+ FAIL(AVERROR(EINVAL));
+ }
+
+ /* count the outputs */
+ for (n = 0, inout = output_links; inout; n++, inout = inout->next);
+ lavfi->nb_sinks = n;
+
+ if (!(lavfi->sink_stream_map = av_malloc(sizeof(int) * n)))
+ FAIL(AVERROR(ENOMEM));
+ if (!(lavfi->sink_eof = av_mallocz(sizeof(int) * n)))
+ FAIL(AVERROR(ENOMEM));
+ if (!(lavfi->stream_sink_map = av_malloc(sizeof(int) * n)))
+ FAIL(AVERROR(ENOMEM));
+ if (!(lavfi->sink_stream_subcc_map = av_malloc(sizeof(int) * n)))
+ FAIL(AVERROR(ENOMEM));
+
+ for (i = 0; i < n; i++)
+ lavfi->stream_sink_map[i] = -1;
+
+ /* parse the output link names - they need to be of the form out0, out1, ...
+ * create a mapping between them and the streams */
+ for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
+ int stream_idx = 0, suffix = 0, use_subcc = 0;
+ sscanf(inout->name, "out%n%d%n", &suffix, &stream_idx, &suffix);
+ if (!suffix) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid outpad name '%s'\n", inout->name);
+ FAIL(AVERROR(EINVAL));
+ }
+ if (inout->name[suffix]) {
+ if (!strcmp(inout->name + suffix, "+subcc")) {
+ use_subcc = 1;
+ } else {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid outpad suffix '%s'\n", inout->name);
+ FAIL(AVERROR(EINVAL));
+ }
+ }
+
+ if ((unsigned)stream_idx >= n) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid index was specified in output '%s', "
+ "must be a non-negative value < %d\n",
+ inout->name, n);
+ FAIL(AVERROR(EINVAL));
+ }
+
+ if (lavfi->stream_sink_map[stream_idx] != -1) {
+ av_log(avctx, AV_LOG_ERROR,
+ "An output with stream index %d was already specified\n",
+ stream_idx);
+ FAIL(AVERROR(EINVAL));
+ }
+ lavfi->sink_stream_map[i] = stream_idx;
+ lavfi->stream_sink_map[stream_idx] = i;
+ lavfi->sink_stream_subcc_map[i] = !!use_subcc;
+ }
+
+ /* for each open output create a corresponding stream */
+ for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
+ AVStream *st;
+ if (!(st = avformat_new_stream(avctx, NULL)))
+ FAIL(AVERROR(ENOMEM));
+ st->id = i;
+ }
+
+ /* create a sink for each output and connect them to the graph */
+ lavfi->sinks = av_malloc_array(lavfi->nb_sinks, sizeof(AVFilterContext *));
+ if (!lavfi->sinks)
+ FAIL(AVERROR(ENOMEM));
+
+ for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
+ AVFilterContext *sink;
+
+ type = avfilter_pad_get_type(inout->filter_ctx->output_pads, inout->pad_idx);
+
+ if (type == AVMEDIA_TYPE_VIDEO && ! buffersink ||
+ type == AVMEDIA_TYPE_AUDIO && ! abuffersink) {
+ av_log(avctx, AV_LOG_ERROR, "Missing required buffersink filter, aborting.\n");
+ FAIL(AVERROR_FILTER_NOT_FOUND);
+ }
+
+ if (type == AVMEDIA_TYPE_VIDEO) {
+ ret = avfilter_graph_create_filter(&sink, buffersink,
+ inout->name, NULL,
+ NULL, lavfi->graph);
+ if (ret >= 0)
+ ret = av_opt_set_int_list(sink, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0)
+ goto end;
+ } else if (type == AVMEDIA_TYPE_AUDIO) {
+ enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_U8,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_S32,
+ AV_SAMPLE_FMT_FLT,
+ AV_SAMPLE_FMT_DBL, -1 };
+
+ ret = avfilter_graph_create_filter(&sink, abuffersink,
+ inout->name, NULL,
+ NULL, lavfi->graph);
+ if (ret >= 0)
+ ret = av_opt_set_int_list(sink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0)
+ goto end;
+ ret = av_opt_set_int(sink, "all_channel_counts", 1,
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0)
+ goto end;
+ } else {
+ av_log(avctx, AV_LOG_ERROR,
+ "Output '%s' is not a video or audio output, not yet supported\n", inout->name);
+ FAIL(AVERROR(EINVAL));
+ }
+
+ lavfi->sinks[i] = sink;
+ if ((ret = avfilter_link(inout->filter_ctx, inout->pad_idx, sink, 0)) < 0)
+ goto end;
+ }
+
+ /* configure the graph */
+ if ((ret = avfilter_graph_config(lavfi->graph, avctx)) < 0)
+ goto end;
+
+ if (lavfi->dump_graph) {
+ char *dump = avfilter_graph_dump(lavfi->graph, lavfi->dump_graph);
+ fputs(dump, stderr);
+ fflush(stderr);
+ av_free(dump);
+ }
+
+ /* fill each stream with the information in the corresponding sink */
+ for (i = 0; i < lavfi->nb_sinks; i++) {
+ AVFilterLink *link = lavfi->sinks[lavfi->stream_sink_map[i]]->inputs[0];
+ AVStream *st = avctx->streams[i];
+ st->codecpar->codec_type = link->type;
+ avpriv_set_pts_info(st, 64, link->time_base.num, link->time_base.den);
+ if (link->type == AVMEDIA_TYPE_VIDEO) {
+ st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
+ st->codecpar->format = link->format;
+ st->avg_frame_rate = av_inv_q(link->time_base);
+ st->codecpar->width = link->w;
+ st->codecpar->height = link->h;
+ st ->sample_aspect_ratio =
+ st->codecpar->sample_aspect_ratio = link->sample_aspect_ratio;
+ avctx->probesize = FFMAX(avctx->probesize,
+ link->w * link->h *
+ av_get_padded_bits_per_pixel(av_pix_fmt_desc_get(link->format)) *
+ 30);
+ } else if (link->type == AVMEDIA_TYPE_AUDIO) {
+ st->codecpar->codec_id = av_get_pcm_codec(link->format, -1);
+ st->codecpar->channels = avfilter_link_get_channels(link);
+ st->codecpar->format = link->format;
+ st->codecpar->sample_rate = link->sample_rate;
+ st->avg_frame_rate = av_inv_q(link->time_base);
+ st->codecpar->channel_layout = link->channel_layout;
+ if (st->codecpar->codec_id == AV_CODEC_ID_NONE)
+ av_log(avctx, AV_LOG_ERROR,
+ "Could not find PCM codec for sample format %s.\n",
+ av_get_sample_fmt_name(link->format));
+ }
+ }
+
+ if ((ret = create_subcc_streams(avctx)) < 0)
+ goto end;
+
+ if (!(lavfi->decoded_frame = av_frame_alloc()))
+ FAIL(AVERROR(ENOMEM));
+
+end:
+ av_free(pix_fmts);
+ avfilter_inout_free(&input_links);
+ avfilter_inout_free(&output_links);
+ if (ret < 0)
+ lavfi_read_close(avctx);
+ return ret;
+}
+
+static int create_subcc_packet(AVFormatContext *avctx, AVFrame *frame,
+ int sink_idx)
+{
+ LavfiContext *lavfi = avctx->priv_data;
+ AVFrameSideData *sd;
+ int stream_idx, i, ret;
+
+ if ((stream_idx = lavfi->sink_stream_subcc_map[sink_idx]) < 0)
+ return 0;
+ for (i = 0; i < frame->nb_side_data; i++)
+ if (frame->side_data[i]->type == AV_FRAME_DATA_A53_CC)
+ break;
+ if (i >= frame->nb_side_data)
+ return 0;
+ sd = frame->side_data[i];
+ if ((ret = av_new_packet(&lavfi->subcc_packet, sd->size)) < 0)
+ return ret;
+ memcpy(lavfi->subcc_packet.data, sd->data, sd->size);
+ lavfi->subcc_packet.stream_index = stream_idx;
+ lavfi->subcc_packet.pts = frame->pts;
+ lavfi->subcc_packet.pos = av_frame_get_pkt_pos(frame);
+ return 0;
+}
+
+static int lavfi_read_packet(AVFormatContext *avctx, AVPacket *pkt)
+{
+ LavfiContext *lavfi = avctx->priv_data;
+ double min_pts = DBL_MAX;
+ int stream_idx, min_pts_sink_idx = 0;
+ AVFrame *frame = lavfi->decoded_frame;
+ AVDictionary *frame_metadata;
+ int ret, i;
+ int size = 0;
+
+ if (lavfi->subcc_packet.size) {
+ *pkt = lavfi->subcc_packet;
+ av_init_packet(&lavfi->subcc_packet);
+ lavfi->subcc_packet.size = 0;
+ lavfi->subcc_packet.data = NULL;
+ return pkt->size;
+ }
+
+ /* iterate through all the graph sinks. Select the sink with the
+ * minimum PTS */
+ for (i = 0; i < lavfi->nb_sinks; i++) {
+ AVRational tb = lavfi->sinks[i]->inputs[0]->time_base;
+ double d;
+ int ret;
+
+ if (lavfi->sink_eof[i])
+ continue;
+
+ ret = av_buffersink_get_frame_flags(lavfi->sinks[i], frame,
+ AV_BUFFERSINK_FLAG_PEEK);
+ if (ret == AVERROR_EOF) {
+ ff_dlog(avctx, "EOF sink_idx:%d\n", i);
+ lavfi->sink_eof[i] = 1;
+ continue;
+ } else if (ret < 0)
+ return ret;
+ d = av_rescale_q_rnd(frame->pts, tb, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
+ ff_dlog(avctx, "sink_idx:%d time:%f\n", i, d);
+ av_frame_unref(frame);
+
+ if (d < min_pts) {
+ min_pts = d;
+ min_pts_sink_idx = i;
+ }
+ }
+ if (min_pts == DBL_MAX)
+ return AVERROR_EOF;
+
+ ff_dlog(avctx, "min_pts_sink_idx:%i\n", min_pts_sink_idx);
+
+ av_buffersink_get_frame_flags(lavfi->sinks[min_pts_sink_idx], frame, 0);
+ stream_idx = lavfi->sink_stream_map[min_pts_sink_idx];
+
+ if (frame->width /* FIXME best way of testing a video */) {
+ size = av_image_get_buffer_size(frame->format, frame->width, frame->height, 1);
+ if ((ret = av_new_packet(pkt, size)) < 0)
+ return ret;
+
+ av_image_copy_to_buffer(pkt->data, size, (const uint8_t **)frame->data, frame->linesize,
+ frame->format, frame->width, frame->height, 1);
+ } else if (av_frame_get_channels(frame) /* FIXME test audio */) {
+ size = frame->nb_samples * av_get_bytes_per_sample(frame->format) *
+ av_frame_get_channels(frame);
+ if ((ret = av_new_packet(pkt, size)) < 0)
+ return ret;
+ memcpy(pkt->data, frame->data[0], size);
+ }
+
+ frame_metadata = av_frame_get_metadata(frame);
+ if (frame_metadata) {
+ uint8_t *metadata;
+ AVDictionaryEntry *e = NULL;
+ AVBPrint meta_buf;
+
+ av_bprint_init(&meta_buf, 0, AV_BPRINT_SIZE_UNLIMITED);
+ while ((e = av_dict_get(frame_metadata, "", e, AV_DICT_IGNORE_SUFFIX))) {
+ av_bprintf(&meta_buf, "%s", e->key);
+ av_bprint_chars(&meta_buf, '\0', 1);
+ av_bprintf(&meta_buf, "%s", e->value);
+ av_bprint_chars(&meta_buf, '\0', 1);
+ }
+ if (!av_bprint_is_complete(&meta_buf) ||
+ !(metadata = av_packet_new_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA,
+ meta_buf.len))) {
+ av_bprint_finalize(&meta_buf, NULL);
+ return AVERROR(ENOMEM);
+ }
+ memcpy(metadata, meta_buf.str, meta_buf.len);
+ av_bprint_finalize(&meta_buf, NULL);
+ }
+
+ if ((ret = create_subcc_packet(avctx, frame, min_pts_sink_idx)) < 0) {
+ av_frame_unref(frame);
+ av_packet_unref(pkt);
+ return ret;
+ }
+
+ pkt->stream_index = stream_idx;
+ pkt->pts = frame->pts;
+ pkt->pos = av_frame_get_pkt_pos(frame);
+ pkt->size = size;
+ av_frame_unref(frame);
+ return size;
+}
+
+#define OFFSET(x) offsetof(LavfiContext, x)
+
+#define DEC AV_OPT_FLAG_DECODING_PARAM
+
+static const AVOption options[] = {
+ { "graph", "set libavfilter graph", OFFSET(graph_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+ { "graph_file","set libavfilter graph filename", OFFSET(graph_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC},
+ { "dumpgraph", "dump graph to stderr", OFFSET(dump_graph), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+ { NULL },
+};
+
+static const AVClass lavfi_class = {
+ .class_name = "lavfi indev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_INPUT,
+};
+
+AVInputFormat ff_lavfi_demuxer = {
+ .name = "lavfi",
+ .long_name = NULL_IF_CONFIG_SMALL("Libavfilter virtual input device"),
+ .priv_data_size = sizeof(LavfiContext),
+ .read_header = lavfi_read_header,
+ .read_packet = lavfi_read_packet,
+ .read_close = lavfi_read_close,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &lavfi_class,
+};
diff --git a/libavdevice/libavdevice.v b/libavdevice/libavdevice.v
index 663af85ba8..de7278c193 100644
--- a/libavdevice/libavdevice.v
+++ b/libavdevice/libavdevice.v
@@ -1,4 +1,4 @@
LIBAVDEVICE_$MAJOR {
- global: avdevice_*;
+ global: avdevice_*; av_*;
local: *;
};
diff --git a/libavdevice/libcdio.c b/libavdevice/libcdio.c
index f19ca997b4..f6d4fce256 100644
--- a/libavdevice/libcdio.c
+++ b/libavdevice/libcdio.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2011 Anton Khirnov <anton@khirnov.net>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -41,7 +41,7 @@
#include "libavformat/internal.h"
typedef struct CDIOContext {
- AVClass *class;
+ const AVClass *class;
cdrom_drive_t *drive;
cdrom_paranoia_t *paranoia;
int32_t last_sector;
@@ -164,11 +164,13 @@ static int read_seek(AVFormatContext *ctx, int stream_index, int64_t timestamp,
#define OFFSET(x) offsetof(CDIOContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
- { "speed", "Drive reading speed.", OFFSET(speed), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, DEC },
- { "paranoia_mode", "Error recovery mode.", OFFSET(paranoia_mode), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, DEC, "paranoia_mode" },
- { "verify", "Verify data integrity in overlap area", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_VERIFY }, 0, 0, DEC, "paranoia_mode" },
- { "overlap", "Perform overlapped reads.", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_OVERLAP }, 0, 0, DEC, "paranoia_mode" },
- { "neverskip", "Do not skip failed reads.", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_NEVERSKIP }, 0, 0, DEC, "paranoia_mode" },
+ { "speed", "set drive reading speed", OFFSET(speed), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, DEC },
+ { "paranoia_mode", "set error recovery mode", OFFSET(paranoia_mode), AV_OPT_TYPE_FLAGS, { .i64 = PARANOIA_MODE_DISABLE }, INT_MIN, INT_MAX, DEC, "paranoia_mode" },
+ { "disable", "apply no fixups", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_DISABLE }, 0, 0, DEC, "paranoia_mode" },
+ { "verify", "verify data integrity in overlap area", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_VERIFY }, 0, 0, DEC, "paranoia_mode" },
+ { "overlap", "perform overlapped reads", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_OVERLAP }, 0, 0, DEC, "paranoia_mode" },
+ { "neverskip", "do not skip failed reads", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_NEVERSKIP }, 0, 0, DEC, "paranoia_mode" },
+ { "full", "apply all recovery modes", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_FULL }, 0, 0, DEC, "paranoia_mode" },
{ NULL },
};
@@ -177,6 +179,7 @@ static const AVClass libcdio_class = {
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_libcdio_demuxer = {
diff --git a/libavdevice/libdc1394.c b/libavdevice/libdc1394.c
index 72e2e8bcc8..43fa232922 100644
--- a/libavdevice/libdc1394.c
+++ b/libavdevice/libdc1394.c
@@ -3,20 +3,20 @@
* Copyright (c) 2004 Roman Shaposhnik
* Copyright (c) 2008 Alessandro Sappia
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -72,7 +72,7 @@ typedef struct dc1394_data {
AVPacket packet;
} dc1394_data;
-struct dc1394_frame_format {
+static const struct dc1394_frame_format {
int width;
int height;
enum AVPixelFormat pix_fmt;
@@ -85,7 +85,7 @@ struct dc1394_frame_format {
{ 0, 0, 0, 0 } /* gotta be the last one */
};
-struct dc1394_frame_rate {
+static const struct dc1394_frame_rate {
int frame_rate;
int frame_rate_id;
} dc1394_frame_rates[] = {
@@ -117,16 +117,17 @@ static const AVClass libdc1394_class = {
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
static inline int dc1394_read_common(AVFormatContext *c,
- struct dc1394_frame_format **select_fmt, struct dc1394_frame_rate **select_fps)
+ const struct dc1394_frame_format **select_fmt, const struct dc1394_frame_rate **select_fps)
{
dc1394_data* dc1394 = c->priv_data;
AVStream* vst;
- struct dc1394_frame_format *fmt;
- struct dc1394_frame_rate *fps;
+ const struct dc1394_frame_format *fmt;
+ const struct dc1394_frame_rate *fps;
enum AVPixelFormat pix_fmt;
int width, height;
AVRational framerate;
@@ -293,8 +294,8 @@ static int dc1394_v2_read_header(AVFormatContext *c)
dc1394_data* dc1394 = c->priv_data;
dc1394camera_list_t *list;
int res, i;
- struct dc1394_frame_format *fmt = NULL;
- struct dc1394_frame_rate *fps = NULL;
+ const struct dc1394_frame_format *fmt = NULL;
+ const struct dc1394_frame_rate *fps = NULL;
if (dc1394_read_common(c, &fmt, &fps) != 0)
return -1;
diff --git a/libavdevice/openal-dec.c b/libavdevice/openal-dec.c
new file mode 100644
index 0000000000..0647952f9c
--- /dev/null
+++ b/libavdevice/openal-dec.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2011 Jonathan Baldwin
+ *
+ * This file is part of FFmpeg.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * @file
+ * OpenAL 1.1 capture device for libavdevice
+ **/
+
+#include <AL/al.h>
+#include <AL/alc.h>
+
+#include "libavutil/opt.h"
+#include "libavutil/time.h"
+#include "libavformat/internal.h"
+#include "avdevice.h"
+
+typedef struct {
+ AVClass *class;
+ /** OpenAL capture device context. **/
+ ALCdevice *device;
+ /** The number of channels in the captured audio. **/
+ int channels;
+ /** The sample rate (in Hz) of the captured audio. **/
+ int sample_rate;
+ /** The sample size (in bits) of the captured audio. **/
+ int sample_size;
+ /** The OpenAL sample format of the captured audio. **/
+ ALCenum sample_format;
+ /** The number of bytes between two consecutive samples of the same channel/component. **/
+ ALCint sample_step;
+ /** If true, print a list of capture devices on this system and exit. **/
+ int list_devices;
+} al_data;
+
+typedef struct {
+ ALCenum al_fmt;
+ enum AVCodecID codec_id;
+ int channels;
+} al_format_info;
+
+#define LOWEST_AL_FORMAT FFMIN(FFMIN(AL_FORMAT_MONO8,AL_FORMAT_MONO16),FFMIN(AL_FORMAT_STEREO8,AL_FORMAT_STEREO16))
+
+/**
+ * Get information about an AL_FORMAT value.
+ * @param al_fmt the AL_FORMAT value to find information about.
+ * @return A pointer to a structure containing information about the AL_FORMAT value.
+ */
+static const inline al_format_info* get_al_format_info(ALCenum al_fmt)
+{
+ static const al_format_info info_table[] = {
+ [AL_FORMAT_MONO8-LOWEST_AL_FORMAT] = {AL_FORMAT_MONO8, AV_CODEC_ID_PCM_U8, 1},
+ [AL_FORMAT_MONO16-LOWEST_AL_FORMAT] = {AL_FORMAT_MONO16, AV_NE (AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE), 1},
+ [AL_FORMAT_STEREO8-LOWEST_AL_FORMAT] = {AL_FORMAT_STEREO8, AV_CODEC_ID_PCM_U8, 2},
+ [AL_FORMAT_STEREO16-LOWEST_AL_FORMAT] = {AL_FORMAT_STEREO16, AV_NE (AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE), 2},
+ };
+
+ return &info_table[al_fmt-LOWEST_AL_FORMAT];
+}
+
+/**
+ * Get the OpenAL error code, translated into an av/errno error code.
+ * @param device The ALC device to check for errors.
+ * @param error_msg_ret A pointer to a char* in which to return the error message, or NULL if desired.
+ * @return The error code, or 0 if there is no error.
+ */
+static inline int al_get_error(ALCdevice *device, const char** error_msg_ret)
+{
+ ALCenum error = alcGetError(device);
+ if (error_msg_ret)
+ *error_msg_ret = (const char*) alcGetString(device, error);
+ switch (error) {
+ case ALC_NO_ERROR:
+ return 0;
+ case ALC_INVALID_DEVICE:
+ return AVERROR(ENODEV);
+ break;
+ case ALC_INVALID_CONTEXT:
+ case ALC_INVALID_ENUM:
+ case ALC_INVALID_VALUE:
+ return AVERROR(EINVAL);
+ break;
+ case ALC_OUT_OF_MEMORY:
+ return AVERROR(ENOMEM);
+ break;
+ default:
+ return AVERROR(EIO);
+ }
+}
+
+/**
+ * Print out a list of OpenAL capture devices on this system.
+ */
+static inline void print_al_capture_devices(void *log_ctx)
+{
+ const char *devices;
+
+ if (!(devices = alcGetString(NULL, ALC_CAPTURE_DEVICE_SPECIFIER)))
+ return;
+
+ av_log(log_ctx, AV_LOG_INFO, "List of OpenAL capture devices on this system:\n");
+
+ for (; *devices != '\0'; devices += strlen(devices) + 1)
+ av_log(log_ctx, AV_LOG_INFO, " %s\n", devices);
+}
+
+static int read_header(AVFormatContext *ctx)
+{
+ al_data *ad = ctx->priv_data;
+ static const ALCenum sample_formats[2][2] = {
+ { AL_FORMAT_MONO8, AL_FORMAT_STEREO8 },
+ { AL_FORMAT_MONO16, AL_FORMAT_STEREO16 }
+ };
+ int error = 0;
+ const char *error_msg;
+ AVStream *st = NULL;
+ AVCodecParameters *par = NULL;
+
+ if (ad->list_devices) {
+ print_al_capture_devices(ctx);
+ return AVERROR_EXIT;
+ }
+
+ ad->sample_format = sample_formats[ad->sample_size/8-1][ad->channels-1];
+
+ /* Open device for capture */
+ ad->device =
+ alcCaptureOpenDevice(ctx->filename[0] ? ctx->filename : NULL,
+ ad->sample_rate,
+ ad->sample_format,
+ ad->sample_rate); /* Maximum 1 second of sample data to be read at once */
+
+ if (error = al_get_error(ad->device, &error_msg)) goto fail;
+
+ /* Create stream */
+ if (!(st = avformat_new_stream(ctx, NULL))) {
+ error = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ /* We work in microseconds */
+ avpriv_set_pts_info(st, 64, 1, 1000000);
+
+ /* Set codec parameters */
+ par = st->codecpar;
+ par->codec_type = AVMEDIA_TYPE_AUDIO;
+ par->sample_rate = ad->sample_rate;
+ par->channels = get_al_format_info(ad->sample_format)->channels;
+ par->codec_id = get_al_format_info(ad->sample_format)->codec_id;
+
+ /* This is needed to read the audio data */
+ ad->sample_step = (av_get_bits_per_sample(get_al_format_info(ad->sample_format)->codec_id) *
+ get_al_format_info(ad->sample_format)->channels) / 8;
+
+ /* Finally, start the capture process */
+ alcCaptureStart(ad->device);
+
+ return 0;
+
+fail:
+ /* Handle failure */
+ if (ad->device)
+ alcCaptureCloseDevice(ad->device);
+ if (error_msg)
+ av_log(ctx, AV_LOG_ERROR, "Cannot open device: %s\n", error_msg);
+ return error;
+}
+
+static int read_packet(AVFormatContext* ctx, AVPacket *pkt)
+{
+ al_data *ad = ctx->priv_data;
+ int error=0;
+ const char *error_msg;
+ ALCint nb_samples;
+
+ /* Get number of samples available */
+ alcGetIntegerv(ad->device, ALC_CAPTURE_SAMPLES, (ALCsizei) sizeof(ALCint), &nb_samples);
+ if (error = al_get_error(ad->device, &error_msg)) goto fail;
+
+ /* Create a packet of appropriate size */
+ if ((error = av_new_packet(pkt, nb_samples*ad->sample_step)) < 0)
+ goto fail;
+ pkt->pts = av_gettime();
+
+ /* Fill the packet with the available samples */
+ alcCaptureSamples(ad->device, pkt->data, nb_samples);
+ if (error = al_get_error(ad->device, &error_msg)) goto fail;
+
+ return pkt->size;
+fail:
+ /* Handle failure */
+ if (pkt->data)
+ av_packet_unref(pkt);
+ if (error_msg)
+ av_log(ctx, AV_LOG_ERROR, "Error: %s\n", error_msg);
+ return error;
+}
+
+static int read_close(AVFormatContext* ctx)
+{
+ al_data *ad = ctx->priv_data;
+
+ if (ad->device) {
+ alcCaptureStop(ad->device);
+ alcCaptureCloseDevice(ad->device);
+ }
+ return 0;
+}
+
+#define OFFSET(x) offsetof(al_data, x)
+
+static const AVOption options[] = {
+ {"channels", "set number of channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, AV_OPT_FLAG_DECODING_PARAM },
+ {"sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, 192000, AV_OPT_FLAG_DECODING_PARAM },
+ {"sample_size", "set sample size", OFFSET(sample_size), AV_OPT_TYPE_INT, {.i64=16}, 8, 16, AV_OPT_FLAG_DECODING_PARAM },
+ {"list_devices", "list available devices", OFFSET(list_devices), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
+ {"true", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
+ {"false", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
+ {NULL},
+};
+
+static const AVClass class = {
+ .class_name = "openal",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
+};
+
+AVInputFormat ff_openal_demuxer = {
+ .name = "openal",
+ .long_name = NULL_IF_CONFIG_SMALL("OpenAL audio capture device"),
+ .priv_data_size = sizeof(al_data),
+ .read_probe = NULL,
+ .read_header = read_header,
+ .read_packet = read_packet,
+ .read_close = read_close,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &class
+};
diff --git a/libavdevice/opengl_enc.c b/libavdevice/opengl_enc.c
new file mode 100644
index 0000000000..1dbbb80f44
--- /dev/null
+++ b/libavdevice/opengl_enc.c
@@ -0,0 +1,1307 @@
+/*
+ * Copyright (c) 2014 Lukasz Marek
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+//TODO: support for more formats
+//TODO: support for more systems.
+//TODO: implement X11, Windows, Mac OS native default window. SDL 1.2 doesn't allow to render to custom thread.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stddef.h>
+
+#include "config.h"
+
+#if HAVE_WINDOWS_H
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#endif
+#if HAVE_OPENGL_GL3_H
+#include <OpenGL/gl3.h>
+#elif HAVE_ES2_GL_H
+#include <ES2/gl.h>
+#else
+#include <GL/gl.h>
+#include <GL/glext.h>
+#endif
+#if HAVE_GLXGETPROCADDRESS
+#include <GL/glx.h>
+#endif
+
+#if HAVE_SDL
+#include <SDL.h>
+#endif
+
+#include "libavutil/common.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/log.h"
+#include "libavutil/opt.h"
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavformat/avformat.h"
+#include "libavformat/internal.h"
+#include "libavdevice/avdevice.h"
+#include "opengl_enc_shaders.h"
+
+#ifndef APIENTRY
+#define APIENTRY
+#endif
+
+/* FF_GL_RED_COMPONENT is used for plannar pixel types.
+ * Only red component is sampled in shaders.
+ * On some platforms GL_RED is not available and GL_LUMINANCE have to be used,
+ * but since OpenGL 3.0 GL_LUMINANCE is deprecated.
+ * GL_RED produces RGBA = value, 0, 0, 1.
+ * GL_LUMINANCE produces RGBA = value, value, value, 1.
+ * Note: GL_INTENSITY may also be used which produce RGBA = value, value, value, value. */
+#if defined(GL_RED)
+#define FF_GL_RED_COMPONENT GL_RED
+#elif defined(GL_LUMINANCE)
+#define FF_GL_RED_COMPONENT GL_LUMINANCE
+#else
+#define FF_GL_RED_COMPONENT 0x1903; //GL_RED
+#endif
+
+/* Constants not defined for iOS */
+#define FF_GL_UNSIGNED_BYTE_3_3_2 0x8032
+#define FF_GL_UNSIGNED_BYTE_2_3_3_REV 0x8362
+#define FF_GL_UNSIGNED_SHORT_1_5_5_5_REV 0x8366
+#define FF_GL_UNPACK_ROW_LENGTH 0x0CF2
+
+/* MinGW exposes only OpenGL 1.1 API */
+#define FF_GL_ARRAY_BUFFER 0x8892
+#define FF_GL_ELEMENT_ARRAY_BUFFER 0x8893
+#define FF_GL_STATIC_DRAW 0x88E4
+#define FF_GL_FRAGMENT_SHADER 0x8B30
+#define FF_GL_VERTEX_SHADER 0x8B31
+#define FF_GL_COMPILE_STATUS 0x8B81
+#define FF_GL_LINK_STATUS 0x8B82
+#define FF_GL_INFO_LOG_LENGTH 0x8B84
+typedef void (APIENTRY *FF_PFNGLACTIVETEXTUREPROC) (GLenum texture);
+typedef void (APIENTRY *FF_PFNGLGENBUFFERSPROC) (GLsizei n, GLuint *buffers);
+typedef void (APIENTRY *FF_PFNGLDELETEBUFFERSPROC) (GLsizei n, const GLuint *buffers);
+typedef void (APIENTRY *FF_PFNGLBUFFERDATAPROC) (GLenum target, ptrdiff_t size, const GLvoid *data, GLenum usage);
+typedef void (APIENTRY *FF_PFNGLBINDBUFFERPROC) (GLenum target, GLuint buffer);
+typedef GLint (APIENTRY *FF_PFNGLGETATTRIBLOCATIONPROC) (GLuint program, const char *name);
+typedef void (APIENTRY *FF_PFNGLENABLEVERTEXATTRIBARRAYPROC) (GLuint index);
+typedef void (APIENTRY *FF_PFNGLVERTEXATTRIBPOINTERPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, uintptr_t pointer);
+typedef GLint (APIENTRY *FF_PFNGLGETUNIFORMLOCATIONPROC) (GLuint program, const char *name);
+typedef void (APIENTRY *FF_PFNGLUNIFORM1FPROC) (GLint location, GLfloat v0);
+typedef void (APIENTRY *FF_PFNGLUNIFORM1IPROC) (GLint location, GLint v0);
+typedef void (APIENTRY *FF_PFNGLUNIFORMMATRIX4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef GLuint (APIENTRY *FF_PFNGLCREATEPROGRAMPROC) (void);
+typedef void (APIENTRY *FF_PFNGLDELETEPROGRAMPROC) (GLuint program);
+typedef void (APIENTRY *FF_PFNGLUSEPROGRAMPROC) (GLuint program);
+typedef void (APIENTRY *FF_PFNGLLINKPROGRAMPROC) (GLuint program);
+typedef void (APIENTRY *FF_PFNGLGETPROGRAMIVPROC) (GLuint program, GLenum pname, GLint *params);
+typedef void (APIENTRY *FF_PFNGLGETPROGRAMINFOLOGPROC) (GLuint program, GLsizei bufSize, GLsizei *length, char *infoLog);
+typedef void (APIENTRY *FF_PFNGLATTACHSHADERPROC) (GLuint program, GLuint shader);
+typedef GLuint (APIENTRY *FF_PFNGLCREATESHADERPROC) (GLenum type);
+typedef void (APIENTRY *FF_PFNGLDELETESHADERPROC) (GLuint shader);
+typedef void (APIENTRY *FF_PFNGLCOMPILESHADERPROC) (GLuint shader);
+typedef void (APIENTRY *FF_PFNGLSHADERSOURCEPROC) (GLuint shader, GLsizei count, const char* *string, const GLint *length);
+typedef void (APIENTRY *FF_PFNGLGETSHADERIVPROC) (GLuint shader, GLenum pname, GLint *params);
+typedef void (APIENTRY *FF_PFNGLGETSHADERINFOLOGPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, char *infoLog);
+
+typedef struct FFOpenGLFunctions {
+ FF_PFNGLACTIVETEXTUREPROC glActiveTexture; //Require GL ARB multitexture
+ FF_PFNGLGENBUFFERSPROC glGenBuffers; //Require GL_ARB_vertex_buffer_object
+ FF_PFNGLDELETEBUFFERSPROC glDeleteBuffers; //Require GL_ARB_vertex_buffer_object
+ FF_PFNGLBUFFERDATAPROC glBufferData; //Require GL_ARB_vertex_buffer_object
+ FF_PFNGLBINDBUFFERPROC glBindBuffer; //Require GL_ARB_vertex_buffer_object
+ FF_PFNGLGETATTRIBLOCATIONPROC glGetAttribLocation; //Require GL_ARB_vertex_shader
+ FF_PFNGLENABLEVERTEXATTRIBARRAYPROC glEnableVertexAttribArray; //Require GL_ARB_vertex_shader
+ FF_PFNGLVERTEXATTRIBPOINTERPROC glVertexAttribPointer; //Require GL_ARB_vertex_shader
+ FF_PFNGLGETUNIFORMLOCATIONPROC glGetUniformLocation; //Require GL_ARB_shader_objects
+ FF_PFNGLUNIFORM1FPROC glUniform1f; //Require GL_ARB_shader_objects
+ FF_PFNGLUNIFORM1IPROC glUniform1i; //Require GL_ARB_shader_objects
+ FF_PFNGLUNIFORMMATRIX4FVPROC glUniformMatrix4fv; //Require GL_ARB_shader_objects
+ FF_PFNGLCREATEPROGRAMPROC glCreateProgram; //Require GL_ARB_shader_objects
+ FF_PFNGLDELETEPROGRAMPROC glDeleteProgram; //Require GL_ARB_shader_objects
+ FF_PFNGLUSEPROGRAMPROC glUseProgram; //Require GL_ARB_shader_objects
+ FF_PFNGLLINKPROGRAMPROC glLinkProgram; //Require GL_ARB_shader_objects
+ FF_PFNGLGETPROGRAMIVPROC glGetProgramiv; //Require GL_ARB_shader_objects
+ FF_PFNGLGETPROGRAMINFOLOGPROC glGetProgramInfoLog; //Require GL_ARB_shader_objects
+ FF_PFNGLATTACHSHADERPROC glAttachShader; //Require GL_ARB_shader_objects
+ FF_PFNGLCREATESHADERPROC glCreateShader; //Require GL_ARB_shader_objects
+ FF_PFNGLDELETESHADERPROC glDeleteShader; //Require GL_ARB_shader_objects
+ FF_PFNGLCOMPILESHADERPROC glCompileShader; //Require GL_ARB_shader_objects
+ FF_PFNGLSHADERSOURCEPROC glShaderSource; //Require GL_ARB_shader_objects
+ FF_PFNGLGETSHADERIVPROC glGetShaderiv; //Require GL_ARB_shader_objects
+ FF_PFNGLGETSHADERINFOLOGPROC glGetShaderInfoLog; //Require GL_ARB_shader_objects
+} FFOpenGLFunctions;
+
+#define OPENGL_ERROR_CHECK(ctx) \
+{\
+ GLenum err_code; \
+ if ((err_code = glGetError()) != GL_NO_ERROR) { \
+ av_log(ctx, AV_LOG_ERROR, "OpenGL error occurred in '%s', line %d: %d\n", __FUNCTION__, __LINE__, err_code); \
+ goto fail; \
+ } \
+}\
+
+typedef struct OpenGLVertexInfo
+{
+ float x, y, z; ///<Position
+ float s0, t0; ///<Texture coords
+} OpenGLVertexInfo;
+
+/* defines 2 triangles to display */
+static const GLushort g_index[6] =
+{
+ 0, 1, 2,
+ 0, 3, 2,
+};
+
+typedef struct OpenGLContext {
+ AVClass *class; ///< class for private options
+
+#if HAVE_SDL
+ SDL_Surface *surface;
+#endif
+ FFOpenGLFunctions glprocs;
+
+ int inited; ///< Set to 1 when write_header was successfully called.
+ uint8_t background[4]; ///< Background color
+ int no_window; ///< 0 for create default window
+ char *window_title; ///< Title of the window
+
+ /* OpenGL implementation limits */
+ GLint max_texture_size; ///< Maximum texture size
+ GLint max_viewport_width; ///< Maximum viewport size
+ GLint max_viewport_height; ///< Maximum viewport size
+ int non_pow_2_textures; ///< 1 when non power of 2 textures are supported
+ int unpack_subimage; ///< 1 when GL_EXT_unpack_subimage is available
+
+ /* Current OpenGL configuration */
+ GLuint program; ///< Shader program
+ GLuint vertex_shader; ///< Vertex shader
+ GLuint fragment_shader; ///< Fragment shader for current pix_pmt
+ GLuint texture_name[4]; ///< Textures' IDs
+ GLuint index_buffer; ///< Index buffer
+ GLuint vertex_buffer; ///< Vertex buffer
+ OpenGLVertexInfo vertex[4]; ///< VBO
+ GLint projection_matrix_location; ///< Uniforms' locations
+ GLint model_view_matrix_location;
+ GLint color_map_location;
+ GLint chroma_div_w_location;
+ GLint chroma_div_h_location;
+ GLint texture_location[4];
+ GLint position_attrib; ///< Attibutes' locations
+ GLint texture_coords_attrib;
+
+ GLfloat projection_matrix[16]; ///< Projection matrix
+ GLfloat model_view_matrix[16]; ///< Modev view matrix
+ GLfloat color_map[16]; ///< RGBA color map matrix
+ GLfloat chroma_div_w; ///< Chroma subsampling w ratio
+ GLfloat chroma_div_h; ///< Chroma subsampling h ratio
+
+ /* Stream information */
+ GLenum format;
+ GLenum type;
+ int width; ///< Stream width
+ int height; ///< Stream height
+ enum AVPixelFormat pix_fmt; ///< Stream pixel format
+ int picture_width; ///< Rendered width
+ int picture_height; ///< Rendered height
+ int window_width;
+ int window_height;
+} OpenGLContext;
+
+static const struct OpenGLFormatDesc {
+ enum AVPixelFormat fixel_format;
+ const char * const * fragment_shader;
+ GLenum format;
+ GLenum type;
+} opengl_format_desc[] = {
+ { AV_PIX_FMT_YUV420P, &FF_OPENGL_FRAGMENT_SHADER_YUV_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_YUV444P, &FF_OPENGL_FRAGMENT_SHADER_YUV_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_YUV422P, &FF_OPENGL_FRAGMENT_SHADER_YUV_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_YUV410P, &FF_OPENGL_FRAGMENT_SHADER_YUV_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_YUV411P, &FF_OPENGL_FRAGMENT_SHADER_YUV_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_YUV440P, &FF_OPENGL_FRAGMENT_SHADER_YUV_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_YUV420P16, &FF_OPENGL_FRAGMENT_SHADER_YUV_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_SHORT },
+ { AV_PIX_FMT_YUV422P16, &FF_OPENGL_FRAGMENT_SHADER_YUV_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_SHORT },
+ { AV_PIX_FMT_YUV444P16, &FF_OPENGL_FRAGMENT_SHADER_YUV_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_SHORT },
+ { AV_PIX_FMT_YUVA420P, &FF_OPENGL_FRAGMENT_SHADER_YUVA_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_YUVA444P, &FF_OPENGL_FRAGMENT_SHADER_YUVA_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_YUVA422P, &FF_OPENGL_FRAGMENT_SHADER_YUVA_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_YUVA420P16, &FF_OPENGL_FRAGMENT_SHADER_YUVA_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_SHORT },
+ { AV_PIX_FMT_YUVA422P16, &FF_OPENGL_FRAGMENT_SHADER_YUVA_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_SHORT },
+ { AV_PIX_FMT_YUVA444P16, &FF_OPENGL_FRAGMENT_SHADER_YUVA_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_SHORT },
+ { AV_PIX_FMT_RGB24, &FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET, GL_RGB, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_BGR24, &FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET, GL_RGB, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_0RGB, &FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET, GL_RGBA, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_RGB0, &FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET, GL_RGBA, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_0BGR, &FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET, GL_RGBA, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_BGR0, &FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET, GL_RGBA, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_RGB565, &FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET, GL_RGB, GL_UNSIGNED_SHORT_5_6_5 },
+ { AV_PIX_FMT_BGR565, &FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET, GL_RGB, GL_UNSIGNED_SHORT_5_6_5 },
+ { AV_PIX_FMT_RGB555, &FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET, GL_RGBA, FF_GL_UNSIGNED_SHORT_1_5_5_5_REV },
+ { AV_PIX_FMT_BGR555, &FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET, GL_RGBA, FF_GL_UNSIGNED_SHORT_1_5_5_5_REV },
+ { AV_PIX_FMT_RGB8, &FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET, GL_RGB, FF_GL_UNSIGNED_BYTE_3_3_2 },
+ { AV_PIX_FMT_BGR8, &FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET, GL_RGB, FF_GL_UNSIGNED_BYTE_2_3_3_REV },
+ { AV_PIX_FMT_RGB48, &FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET, GL_RGB, GL_UNSIGNED_SHORT },
+ { AV_PIX_FMT_ARGB, &FF_OPENGL_FRAGMENT_SHADER_RGBA_PACKET, GL_RGBA, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_RGBA, &FF_OPENGL_FRAGMENT_SHADER_RGBA_PACKET, GL_RGBA, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_ABGR, &FF_OPENGL_FRAGMENT_SHADER_RGBA_PACKET, GL_RGBA, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_BGRA, &FF_OPENGL_FRAGMENT_SHADER_RGBA_PACKET, GL_RGBA, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_RGBA64, &FF_OPENGL_FRAGMENT_SHADER_RGBA_PACKET, GL_RGBA, GL_UNSIGNED_SHORT },
+ { AV_PIX_FMT_BGRA64, &FF_OPENGL_FRAGMENT_SHADER_RGBA_PACKET, GL_RGBA, GL_UNSIGNED_SHORT },
+ { AV_PIX_FMT_GBRP, &FF_OPENGL_FRAGMENT_SHADER_RGB_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_GBRP16, &FF_OPENGL_FRAGMENT_SHADER_RGB_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_SHORT },
+ { AV_PIX_FMT_GBRAP, &FF_OPENGL_FRAGMENT_SHADER_RGBA_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_GBRAP16, &FF_OPENGL_FRAGMENT_SHADER_RGBA_PLANAR, FF_GL_RED_COMPONENT, GL_UNSIGNED_SHORT },
+ { AV_PIX_FMT_GRAY8, &FF_OPENGL_FRAGMENT_SHADER_GRAY, FF_GL_RED_COMPONENT, GL_UNSIGNED_BYTE },
+ { AV_PIX_FMT_GRAY16, &FF_OPENGL_FRAGMENT_SHADER_GRAY, FF_GL_RED_COMPONENT, GL_UNSIGNED_SHORT },
+ { AV_PIX_FMT_NONE, NULL }
+};
+
+static av_cold int opengl_prepare_vertex(AVFormatContext *s);
+static int opengl_draw(AVFormatContext *h, void *intput, int repaint, int is_pkt);
+static av_cold int opengl_init_context(OpenGLContext *opengl);
+
+static av_cold void opengl_deinit_context(OpenGLContext *opengl)
+{
+ glDeleteTextures(4, opengl->texture_name);
+ opengl->texture_name[0] = opengl->texture_name[1] =
+ opengl->texture_name[2] = opengl->texture_name[3] = 0;
+ if (opengl->glprocs.glUseProgram)
+ opengl->glprocs.glUseProgram(0);
+ if (opengl->glprocs.glDeleteProgram) {
+ opengl->glprocs.glDeleteProgram(opengl->program);
+ opengl->program = 0;
+ }
+ if (opengl->glprocs.glDeleteShader) {
+ opengl->glprocs.glDeleteShader(opengl->vertex_shader);
+ opengl->glprocs.glDeleteShader(opengl->fragment_shader);
+ opengl->vertex_shader = opengl->fragment_shader = 0;
+ }
+ if (opengl->glprocs.glBindBuffer) {
+ opengl->glprocs.glBindBuffer(FF_GL_ARRAY_BUFFER, 0);
+ opengl->glprocs.glBindBuffer(FF_GL_ELEMENT_ARRAY_BUFFER, 0);
+ }
+ if (opengl->glprocs.glDeleteBuffers) {
+ opengl->glprocs.glDeleteBuffers(2, &opengl->index_buffer);
+ opengl->vertex_buffer = opengl->index_buffer = 0;
+ }
+}
+
+static int opengl_resize(AVFormatContext *h, int width, int height)
+{
+ int ret = 0;
+ OpenGLContext *opengl = h->priv_data;
+ opengl->window_width = width;
+ opengl->window_height = height;
+ if (opengl->inited) {
+ if (opengl->no_window &&
+ (ret = avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER, NULL , 0)) < 0) {
+ av_log(opengl, AV_LOG_ERROR, "Application failed to prepare window buffer.\n");
+ goto end;
+ }
+ if ((ret = opengl_prepare_vertex(h)) < 0)
+ goto end;
+ ret = opengl_draw(h, NULL, 1, 0);
+ }
+ end:
+ return ret;
+}
+
+static int opengl_control_message(AVFormatContext *h, int type, void *data, size_t data_size)
+{
+ OpenGLContext *opengl = h->priv_data;
+ switch(type) {
+ case AV_APP_TO_DEV_WINDOW_SIZE:
+ if (data) {
+ AVDeviceRect *message = data;
+ return opengl_resize(h, message->width, message->height);
+ }
+ return AVERROR(EINVAL);
+ case AV_APP_TO_DEV_WINDOW_REPAINT:
+ return opengl_resize(h, opengl->window_width, opengl->window_height);
+ }
+ return AVERROR(ENOSYS);
+}
+
+#if HAVE_SDL
+static int opengl_sdl_recreate_window(OpenGLContext *opengl, int width, int height)
+{
+ opengl->surface = SDL_SetVideoMode(width, height,
+ 32, SDL_OPENGL | SDL_RESIZABLE);
+ if (!opengl->surface) {
+ av_log(opengl, AV_LOG_ERROR, "Unable to set video mode: %s\n", SDL_GetError());
+ return AVERROR_EXTERNAL;
+ }
+ SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8);
+ SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8);
+ SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8);
+ SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 8);
+ SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
+ return 0;
+}
+
+static int opengl_sdl_process_events(AVFormatContext *h)
+{
+ int ret;
+ OpenGLContext *opengl = h->priv_data;
+ SDL_Event event;
+ SDL_PumpEvents();
+ while (SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_ALLEVENTS) > 0) {
+ switch (event.type) {
+ case SDL_QUIT:
+ return AVERROR(EIO);
+ case SDL_KEYDOWN:
+ switch (event.key.keysym.sym) {
+ case SDLK_ESCAPE:
+ case SDLK_q:
+ return AVERROR(EIO);
+ }
+ return 0;
+ case SDL_VIDEORESIZE: {
+ char buffer[100];
+ int reinit;
+ AVDeviceRect message;
+ /* clean up old context because SDL_SetVideoMode may lose its state. */
+ SDL_VideoDriverName(buffer, sizeof(buffer));
+ reinit = !av_strncasecmp(buffer, "quartz", sizeof(buffer));
+ if (reinit) {
+ opengl_deinit_context(opengl);
+ }
+ if ((ret = opengl_sdl_recreate_window(opengl, event.resize.w, event.resize.h)) < 0)
+ return ret;
+ if (reinit && (ret = opengl_init_context(opengl)) < 0)
+ return ret;
+ message.width = opengl->surface->w;
+ message.height = opengl->surface->h;
+ return opengl_control_message(h, AV_APP_TO_DEV_WINDOW_SIZE, &message, sizeof(AVDeviceRect));
+ }
+ }
+ }
+ return 0;
+}
+
+static int av_cold opengl_sdl_create_window(AVFormatContext *h)
+{
+ int ret;
+ char buffer[100];
+ OpenGLContext *opengl = h->priv_data;
+ AVDeviceRect message;
+ if (SDL_Init(SDL_INIT_VIDEO)) {
+ av_log(opengl, AV_LOG_ERROR, "Unable to initialize SDL: %s\n", SDL_GetError());
+ return AVERROR_EXTERNAL;
+ }
+ if ((ret = opengl_sdl_recreate_window(opengl, opengl->window_width,
+ opengl->window_height)) < 0)
+ return ret;
+ av_log(opengl, AV_LOG_INFO, "SDL driver: '%s'.\n", SDL_VideoDriverName(buffer, sizeof(buffer)));
+ message.width = opengl->surface->w;
+ message.height = opengl->surface->h;
+ SDL_WM_SetCaption(opengl->window_title, NULL);
+ opengl_control_message(h, AV_APP_TO_DEV_WINDOW_SIZE, &message, sizeof(AVDeviceRect));
+ return 0;
+}
+
+static int av_cold opengl_sdl_load_procedures(OpenGLContext *opengl)
+{
+ FFOpenGLFunctions *procs = &opengl->glprocs;
+
+#define LOAD_OPENGL_FUN(name, type) \
+ procs->name = (type)SDL_GL_GetProcAddress(#name); \
+ if (!procs->name) { \
+ av_log(opengl, AV_LOG_ERROR, "Cannot load OpenGL function: '%s'\n", #name); \
+ return AVERROR(ENOSYS); \
+ }
+
+ LOAD_OPENGL_FUN(glActiveTexture, FF_PFNGLACTIVETEXTUREPROC)
+ LOAD_OPENGL_FUN(glGenBuffers, FF_PFNGLGENBUFFERSPROC)
+ LOAD_OPENGL_FUN(glDeleteBuffers, FF_PFNGLDELETEBUFFERSPROC)
+ LOAD_OPENGL_FUN(glBufferData, FF_PFNGLBUFFERDATAPROC)
+ LOAD_OPENGL_FUN(glBindBuffer, FF_PFNGLBINDBUFFERPROC)
+ LOAD_OPENGL_FUN(glGetAttribLocation, FF_PFNGLGETATTRIBLOCATIONPROC)
+ LOAD_OPENGL_FUN(glGetUniformLocation, FF_PFNGLGETUNIFORMLOCATIONPROC)
+ LOAD_OPENGL_FUN(glUniform1f, FF_PFNGLUNIFORM1FPROC)
+ LOAD_OPENGL_FUN(glUniform1i, FF_PFNGLUNIFORM1IPROC)
+ LOAD_OPENGL_FUN(glUniformMatrix4fv, FF_PFNGLUNIFORMMATRIX4FVPROC)
+ LOAD_OPENGL_FUN(glCreateProgram, FF_PFNGLCREATEPROGRAMPROC)
+ LOAD_OPENGL_FUN(glDeleteProgram, FF_PFNGLDELETEPROGRAMPROC)
+ LOAD_OPENGL_FUN(glUseProgram, FF_PFNGLUSEPROGRAMPROC)
+ LOAD_OPENGL_FUN(glLinkProgram, FF_PFNGLLINKPROGRAMPROC)
+ LOAD_OPENGL_FUN(glGetProgramiv, FF_PFNGLGETPROGRAMIVPROC)
+ LOAD_OPENGL_FUN(glGetProgramInfoLog, FF_PFNGLGETPROGRAMINFOLOGPROC)
+ LOAD_OPENGL_FUN(glAttachShader, FF_PFNGLATTACHSHADERPROC)
+ LOAD_OPENGL_FUN(glCreateShader, FF_PFNGLCREATESHADERPROC)
+ LOAD_OPENGL_FUN(glDeleteShader, FF_PFNGLDELETESHADERPROC)
+ LOAD_OPENGL_FUN(glCompileShader, FF_PFNGLCOMPILESHADERPROC)
+ LOAD_OPENGL_FUN(glShaderSource, FF_PFNGLSHADERSOURCEPROC)
+ LOAD_OPENGL_FUN(glGetShaderiv, FF_PFNGLGETSHADERIVPROC)
+ LOAD_OPENGL_FUN(glGetShaderInfoLog, FF_PFNGLGETSHADERINFOLOGPROC)
+ LOAD_OPENGL_FUN(glEnableVertexAttribArray, FF_PFNGLENABLEVERTEXATTRIBARRAYPROC)
+ LOAD_OPENGL_FUN(glVertexAttribPointer, FF_PFNGLVERTEXATTRIBPOINTERPROC)
+
+ return 0;
+
+#undef LOAD_OPENGL_FUN
+}
+#endif /* HAVE_SDL */
+
+#if defined(__APPLE__)
+static int av_cold opengl_load_procedures(OpenGLContext *opengl)
+{
+ FFOpenGLFunctions *procs = &opengl->glprocs;
+
+#if HAVE_SDL
+ if (!opengl->no_window)
+ return opengl_sdl_load_procedures(opengl);
+#endif
+
+ procs->glActiveTexture = glActiveTexture;
+ procs->glGenBuffers = glGenBuffers;
+ procs->glDeleteBuffers = glDeleteBuffers;
+ procs->glBufferData = glBufferData;
+ procs->glBindBuffer = glBindBuffer;
+ procs->glGetAttribLocation = glGetAttribLocation;
+ procs->glGetUniformLocation = glGetUniformLocation;
+ procs->glUniform1f = glUniform1f;
+ procs->glUniform1i = glUniform1i;
+ procs->glUniformMatrix4fv = glUniformMatrix4fv;
+ procs->glCreateProgram = glCreateProgram;
+ procs->glDeleteProgram = glDeleteProgram;
+ procs->glUseProgram = glUseProgram;
+ procs->glLinkProgram = glLinkProgram;
+ procs->glGetProgramiv = glGetProgramiv;
+ procs->glGetProgramInfoLog = glGetProgramInfoLog;
+ procs->glAttachShader = glAttachShader;
+ procs->glCreateShader = glCreateShader;
+ procs->glDeleteShader = glDeleteShader;
+ procs->glCompileShader = glCompileShader;
+ procs->glShaderSource = glShaderSource;
+ procs->glGetShaderiv = glGetShaderiv;
+ procs->glGetShaderInfoLog = glGetShaderInfoLog;
+ procs->glEnableVertexAttribArray = glEnableVertexAttribArray;
+ procs->glVertexAttribPointer = (FF_PFNGLVERTEXATTRIBPOINTERPROC) glVertexAttribPointer;
+ return 0;
+}
+#else
+static int av_cold opengl_load_procedures(OpenGLContext *opengl)
+{
+ FFOpenGLFunctions *procs = &opengl->glprocs;
+
+#if HAVE_GLXGETPROCADDRESS
+#define SelectedGetProcAddress glXGetProcAddress
+#elif HAVE_WGLGETPROCADDRESS
+#define SelectedGetProcAddress wglGetProcAddress
+#endif
+
+#define LOAD_OPENGL_FUN(name, type) \
+ procs->name = (type)SelectedGetProcAddress(#name); \
+ if (!procs->name) { \
+ av_log(opengl, AV_LOG_ERROR, "Cannot load OpenGL function: '%s'\n", #name); \
+ return AVERROR(ENOSYS); \
+ }
+
+#if HAVE_SDL
+ if (!opengl->no_window)
+ return opengl_sdl_load_procedures(opengl);
+#endif
+
+ LOAD_OPENGL_FUN(glActiveTexture, FF_PFNGLACTIVETEXTUREPROC)
+ LOAD_OPENGL_FUN(glGenBuffers, FF_PFNGLGENBUFFERSPROC)
+ LOAD_OPENGL_FUN(glDeleteBuffers, FF_PFNGLDELETEBUFFERSPROC)
+ LOAD_OPENGL_FUN(glBufferData, FF_PFNGLBUFFERDATAPROC)
+ LOAD_OPENGL_FUN(glBindBuffer, FF_PFNGLBINDBUFFERPROC)
+ LOAD_OPENGL_FUN(glGetAttribLocation, FF_PFNGLGETATTRIBLOCATIONPROC)
+ LOAD_OPENGL_FUN(glGetUniformLocation, FF_PFNGLGETUNIFORMLOCATIONPROC)
+ LOAD_OPENGL_FUN(glUniform1f, FF_PFNGLUNIFORM1FPROC)
+ LOAD_OPENGL_FUN(glUniform1i, FF_PFNGLUNIFORM1IPROC)
+ LOAD_OPENGL_FUN(glUniformMatrix4fv, FF_PFNGLUNIFORMMATRIX4FVPROC)
+ LOAD_OPENGL_FUN(glCreateProgram, FF_PFNGLCREATEPROGRAMPROC)
+ LOAD_OPENGL_FUN(glDeleteProgram, FF_PFNGLDELETEPROGRAMPROC)
+ LOAD_OPENGL_FUN(glUseProgram, FF_PFNGLUSEPROGRAMPROC)
+ LOAD_OPENGL_FUN(glLinkProgram, FF_PFNGLLINKPROGRAMPROC)
+ LOAD_OPENGL_FUN(glGetProgramiv, FF_PFNGLGETPROGRAMIVPROC)
+ LOAD_OPENGL_FUN(glGetProgramInfoLog, FF_PFNGLGETPROGRAMINFOLOGPROC)
+ LOAD_OPENGL_FUN(glAttachShader, FF_PFNGLATTACHSHADERPROC)
+ LOAD_OPENGL_FUN(glCreateShader, FF_PFNGLCREATESHADERPROC)
+ LOAD_OPENGL_FUN(glDeleteShader, FF_PFNGLDELETESHADERPROC)
+ LOAD_OPENGL_FUN(glCompileShader, FF_PFNGLCOMPILESHADERPROC)
+ LOAD_OPENGL_FUN(glShaderSource, FF_PFNGLSHADERSOURCEPROC)
+ LOAD_OPENGL_FUN(glGetShaderiv, FF_PFNGLGETSHADERIVPROC)
+ LOAD_OPENGL_FUN(glGetShaderInfoLog, FF_PFNGLGETSHADERINFOLOGPROC)
+ LOAD_OPENGL_FUN(glEnableVertexAttribArray, FF_PFNGLENABLEVERTEXATTRIBARRAYPROC)
+ LOAD_OPENGL_FUN(glVertexAttribPointer, FF_PFNGLVERTEXATTRIBPOINTERPROC)
+
+ return 0;
+
+#undef SelectedGetProcAddress
+#undef LOAD_OPENGL_FUN
+}
+#endif
+
+static void opengl_make_identity(float matrix[16])
+{
+ memset(matrix, 0, 16 * sizeof(float));
+ matrix[0] = matrix[5] = matrix[10] = matrix[15] = 1.0f;
+}
+
+static void opengl_make_ortho(float matrix[16], float left, float right,
+ float bottom, float top, float nearZ, float farZ)
+{
+ float ral = right + left;
+ float rsl = right - left;
+ float tab = top + bottom;
+ float tsb = top - bottom;
+ float fan = farZ + nearZ;
+ float fsn = farZ - nearZ;
+
+ memset(matrix, 0, 16 * sizeof(float));
+ matrix[0] = 2.0f / rsl;
+ matrix[5] = 2.0f / tsb;
+ matrix[10] = -2.0f / fsn;
+ matrix[12] = -ral / rsl;
+ matrix[13] = -tab / tsb;
+ matrix[14] = -fan / fsn;
+ matrix[15] = 1.0f;
+}
+
+static av_cold int opengl_read_limits(OpenGLContext *opengl)
+{
+ static const struct{
+ const char *extension;
+ int major;
+ int minor;
+ } required_extensions[] = {
+ { "GL_ARB_multitexture", 1, 3 },
+ { "GL_ARB_vertex_buffer_object", 1, 5 }, //GLX_ARB_vertex_buffer_object
+ { "GL_ARB_vertex_shader", 2, 0 },
+ { "GL_ARB_fragment_shader", 2, 0 },
+ { "GL_ARB_shader_objects", 2, 0 },
+ { NULL, 0, 0 }
+ };
+ int i, major, minor;
+ const char *extensions, *version;
+
+ version = glGetString(GL_VERSION);
+ extensions = glGetString(GL_EXTENSIONS);
+
+ av_log(opengl, AV_LOG_DEBUG, "OpenGL version: %s\n", version);
+ sscanf(version, "%d.%d", &major, &minor);
+
+ for (i = 0; required_extensions[i].extension; i++) {
+ if (major < required_extensions[i].major &&
+ (major == required_extensions[i].major && minor < required_extensions[i].minor) &&
+ !strstr(extensions, required_extensions[i].extension)) {
+ av_log(opengl, AV_LOG_ERROR, "Required extension %s is not supported.\n",
+ required_extensions[i].extension);
+ av_log(opengl, AV_LOG_DEBUG, "Supported extensions are: %s\n", extensions);
+ return AVERROR(ENOSYS);
+ }
+ }
+ glGetIntegerv(GL_MAX_TEXTURE_SIZE, &opengl->max_texture_size);
+ glGetIntegerv(GL_MAX_VIEWPORT_DIMS, &opengl->max_viewport_width);
+ opengl->non_pow_2_textures = major >= 2 || strstr(extensions, "GL_ARB_texture_non_power_of_two");
+#if defined(GL_ES_VERSION_2_0)
+ opengl->unpack_subimage = !!strstr(extensions, "GL_EXT_unpack_subimage");
+#else
+ opengl->unpack_subimage = 1;
+#endif
+
+ av_log(opengl, AV_LOG_DEBUG, "Non Power of 2 textures support: %s\n", opengl->non_pow_2_textures ? "Yes" : "No");
+ av_log(opengl, AV_LOG_DEBUG, "Unpack Subimage extension support: %s\n", opengl->unpack_subimage ? "Yes" : "No");
+ av_log(opengl, AV_LOG_DEBUG, "Max texture size: %dx%d\n", opengl->max_texture_size, opengl->max_texture_size);
+ av_log(opengl, AV_LOG_DEBUG, "Max viewport size: %dx%d\n",
+ opengl->max_viewport_width, opengl->max_viewport_height);
+
+ OPENGL_ERROR_CHECK(opengl);
+ return 0;
+ fail:
+ return AVERROR_EXTERNAL;
+}
+
+static const char* opengl_get_fragment_shader_code(enum AVPixelFormat format)
+{
+ int i;
+ for (i = 0; i < FF_ARRAY_ELEMS(opengl_format_desc); i++) {
+ if (opengl_format_desc[i].fixel_format == format)
+ return *opengl_format_desc[i].fragment_shader;
+ }
+ return NULL;
+}
+
+static int opengl_type_size(GLenum type)
+{
+ switch(type) {
+ case GL_UNSIGNED_SHORT:
+ case FF_GL_UNSIGNED_SHORT_1_5_5_5_REV:
+ case GL_UNSIGNED_SHORT_5_6_5:
+ return 2;
+ case GL_UNSIGNED_BYTE:
+ case FF_GL_UNSIGNED_BYTE_3_3_2:
+ case FF_GL_UNSIGNED_BYTE_2_3_3_REV:
+ default:
+ break;
+ }
+ return 1;
+}
+
+static av_cold void opengl_get_texture_params(OpenGLContext *opengl)
+{
+ int i;
+ for (i = 0; i < FF_ARRAY_ELEMS(opengl_format_desc); i++) {
+ if (opengl_format_desc[i].fixel_format == opengl->pix_fmt) {
+ opengl->format = opengl_format_desc[i].format;
+ opengl->type = opengl_format_desc[i].type;
+ break;
+ }
+ }
+}
+
+static void opengl_compute_display_area(AVFormatContext *s)
+{
+ AVRational sar, dar; /* sample and display aspect ratios */
+ OpenGLContext *opengl = s->priv_data;
+ AVStream *st = s->streams[0];
+ AVCodecParameters *par = st->codecpar;
+
+ /* compute overlay width and height from the codec context information */
+ sar = st->sample_aspect_ratio.num ? st->sample_aspect_ratio : (AVRational){ 1, 1 };
+ dar = av_mul_q(sar, (AVRational){ par->width, par->height });
+
+ /* we suppose the screen has a 1/1 sample aspect ratio */
+ /* fit in the window */
+ if (av_cmp_q(dar, (AVRational){ opengl->window_width, opengl->window_height }) > 0) {
+ /* fit in width */
+ opengl->picture_width = opengl->window_width;
+ opengl->picture_height = av_rescale(opengl->picture_width, dar.den, dar.num);
+ } else {
+ /* fit in height */
+ opengl->picture_height = opengl->window_height;
+ opengl->picture_width = av_rescale(opengl->picture_height, dar.num, dar.den);
+ }
+}
+
+static av_cold void opengl_get_texture_size(OpenGLContext *opengl, int in_width, int in_height,
+ int *out_width, int *out_height)
+{
+ if (opengl->non_pow_2_textures) {
+ *out_width = in_width;
+ *out_height = in_height;
+ } else {
+ int max = FFMIN(FFMAX(in_width, in_height), opengl->max_texture_size);
+ unsigned power_of_2 = 1;
+ while (power_of_2 < max)
+ power_of_2 *= 2;
+ *out_height = power_of_2;
+ *out_width = power_of_2;
+ av_log(opengl, AV_LOG_DEBUG, "Texture size calculated from %dx%d into %dx%d\n",
+ in_width, in_height, *out_width, *out_height);
+ }
+}
+
+static av_cold void opengl_fill_color_map(OpenGLContext *opengl)
+{
+ const AVPixFmtDescriptor *desc;
+ int shift;
+ enum AVPixelFormat pix_fmt = opengl->pix_fmt;
+
+ /* We need order of components, not exact position, some minor HACKs here */
+ if (pix_fmt == AV_PIX_FMT_RGB565 || pix_fmt == AV_PIX_FMT_BGR555 ||
+ pix_fmt == AV_PIX_FMT_BGR8 || pix_fmt == AV_PIX_FMT_RGB8)
+ pix_fmt = AV_PIX_FMT_RGB24;
+ else if (pix_fmt == AV_PIX_FMT_BGR565 || pix_fmt == AV_PIX_FMT_RGB555)
+ pix_fmt = AV_PIX_FMT_BGR24;
+
+ desc = av_pix_fmt_desc_get(pix_fmt);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_RGB))
+ return;
+
+#define FILL_COMPONENT(i) { \
+ shift = (desc->comp[i].depth - 1) >> 3; \
+ opengl->color_map[(i << 2) + (desc->comp[i].offset >> shift)] = 1.0; \
+ }
+
+ memset(opengl->color_map, 0, sizeof(opengl->color_map));
+ FILL_COMPONENT(0);
+ FILL_COMPONENT(1);
+ FILL_COMPONENT(2);
+ if (desc->flags & AV_PIX_FMT_FLAG_ALPHA)
+ FILL_COMPONENT(3);
+
+#undef FILL_COMPONENT
+}
+
+static av_cold GLuint opengl_load_shader(OpenGLContext *opengl, GLenum type, const char *source)
+{
+ GLuint shader = opengl->glprocs.glCreateShader(type);
+ GLint result;
+ if (!shader) {
+ av_log(opengl, AV_LOG_ERROR, "glCreateShader() failed\n");
+ return 0;
+ }
+ opengl->glprocs.glShaderSource(shader, 1, &source, NULL);
+ opengl->glprocs.glCompileShader(shader);
+
+ opengl->glprocs.glGetShaderiv(shader, FF_GL_COMPILE_STATUS, &result);
+ if (!result) {
+ char *log;
+ opengl->glprocs.glGetShaderiv(shader, FF_GL_INFO_LOG_LENGTH, &result);
+ if (result) {
+ if ((log = av_malloc(result))) {
+ opengl->glprocs.glGetShaderInfoLog(shader, result, NULL, log);
+ av_log(opengl, AV_LOG_ERROR, "Compile error: %s\n", log);
+ av_free(log);
+ }
+ }
+ goto fail;
+ }
+ OPENGL_ERROR_CHECK(opengl);
+ return shader;
+ fail:
+ opengl->glprocs.glDeleteShader(shader);
+ return 0;
+}
+
+static av_cold int opengl_compile_shaders(OpenGLContext *opengl, enum AVPixelFormat pix_fmt)
+{
+ GLint result;
+ const char *fragment_shader_code = opengl_get_fragment_shader_code(pix_fmt);
+
+ if (!fragment_shader_code) {
+ av_log(opengl, AV_LOG_ERROR, "Provided pixel format '%s' is not supported\n",
+ av_get_pix_fmt_name(pix_fmt));
+ return AVERROR(EINVAL);
+ }
+
+ opengl->vertex_shader = opengl_load_shader(opengl, FF_GL_VERTEX_SHADER,
+ FF_OPENGL_VERTEX_SHADER);
+ if (!opengl->vertex_shader) {
+ av_log(opengl, AV_LOG_ERROR, "Vertex shader loading failed.\n");
+ goto fail;
+ }
+ opengl->fragment_shader = opengl_load_shader(opengl, FF_GL_FRAGMENT_SHADER,
+ fragment_shader_code);
+ if (!opengl->fragment_shader) {
+ av_log(opengl, AV_LOG_ERROR, "Fragment shader loading failed.\n");
+ goto fail;
+ }
+
+ opengl->program = opengl->glprocs.glCreateProgram();
+ if (!opengl->program)
+ goto fail;
+
+ opengl->glprocs.glAttachShader(opengl->program, opengl->vertex_shader);
+ opengl->glprocs.glAttachShader(opengl->program, opengl->fragment_shader);
+ opengl->glprocs.glLinkProgram(opengl->program);
+
+ opengl->glprocs.glGetProgramiv(opengl->program, FF_GL_LINK_STATUS, &result);
+ if (!result) {
+ char *log;
+ opengl->glprocs.glGetProgramiv(opengl->program, FF_GL_INFO_LOG_LENGTH, &result);
+ if (result) {
+ log = av_malloc(result);
+ if (!log)
+ goto fail;
+ opengl->glprocs.glGetProgramInfoLog(opengl->program, result, NULL, log);
+ av_log(opengl, AV_LOG_ERROR, "Link error: %s\n", log);
+ av_free(log);
+ }
+ goto fail;
+ }
+
+ opengl->position_attrib = opengl->glprocs.glGetAttribLocation(opengl->program, "a_position");
+ opengl->texture_coords_attrib = opengl->glprocs.glGetAttribLocation(opengl->program, "a_textureCoords");
+ opengl->projection_matrix_location = opengl->glprocs.glGetUniformLocation(opengl->program, "u_projectionMatrix");
+ opengl->model_view_matrix_location = opengl->glprocs.glGetUniformLocation(opengl->program, "u_modelViewMatrix");
+ opengl->color_map_location = opengl->glprocs.glGetUniformLocation(opengl->program, "u_colorMap");
+ opengl->texture_location[0] = opengl->glprocs.glGetUniformLocation(opengl->program, "u_texture0");
+ opengl->texture_location[1] = opengl->glprocs.glGetUniformLocation(opengl->program, "u_texture1");
+ opengl->texture_location[2] = opengl->glprocs.glGetUniformLocation(opengl->program, "u_texture2");
+ opengl->texture_location[3] = opengl->glprocs.glGetUniformLocation(opengl->program, "u_texture3");
+ opengl->chroma_div_w_location = opengl->glprocs.glGetUniformLocation(opengl->program, "u_chroma_div_w");
+ opengl->chroma_div_h_location = opengl->glprocs.glGetUniformLocation(opengl->program, "u_chroma_div_h");
+
+ OPENGL_ERROR_CHECK(opengl);
+ return 0;
+ fail:
+ opengl->glprocs.glDeleteShader(opengl->vertex_shader);
+ opengl->glprocs.glDeleteShader(opengl->fragment_shader);
+ opengl->glprocs.glDeleteProgram(opengl->program);
+ opengl->fragment_shader = opengl->vertex_shader = opengl->program = 0;
+ return AVERROR_EXTERNAL;
+}
+
+static av_cold int opengl_configure_texture(OpenGLContext *opengl, GLuint texture,
+ GLsizei width, GLsizei height)
+{
+ if (texture) {
+ int new_width, new_height;
+ opengl_get_texture_size(opengl, width, height, &new_width, &new_height);
+ glBindTexture(GL_TEXTURE_2D, texture);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexImage2D(GL_TEXTURE_2D, 0, opengl->format, new_width, new_height, 0,
+ opengl->format, opengl->type, NULL);
+ OPENGL_ERROR_CHECK(NULL);
+ }
+ return 0;
+ fail:
+ return AVERROR_EXTERNAL;
+}
+
+static av_cold int opengl_prepare_vertex(AVFormatContext *s)
+{
+ OpenGLContext *opengl = s->priv_data;
+ int tex_w, tex_h;
+
+ if (opengl->window_width > opengl->max_viewport_width || opengl->window_height > opengl->max_viewport_height) {
+ opengl->window_width = FFMIN(opengl->window_width, opengl->max_viewport_width);
+ opengl->window_height = FFMIN(opengl->window_height, opengl->max_viewport_height);
+ av_log(opengl, AV_LOG_WARNING, "Too big viewport requested, limited to %dx%d", opengl->window_width, opengl->window_height);
+ }
+ glViewport(0, 0, opengl->window_width, opengl->window_height);
+ opengl_make_ortho(opengl->projection_matrix,
+ - (float)opengl->window_width / 2.0f, (float)opengl->window_width / 2.0f,
+ - (float)opengl->window_height / 2.0f, (float)opengl->window_height / 2.0f,
+ 1.0f, -1.0f);
+ opengl_make_identity(opengl->model_view_matrix);
+
+ opengl_compute_display_area(s);
+
+ opengl->vertex[0].z = opengl->vertex[1].z = opengl->vertex[2].z = opengl->vertex[3].z = 0.0f;
+ opengl->vertex[0].x = opengl->vertex[1].x = - (float)opengl->picture_width / 2.0f;
+ opengl->vertex[2].x = opengl->vertex[3].x = (float)opengl->picture_width / 2.0f;
+ opengl->vertex[1].y = opengl->vertex[2].y = - (float)opengl->picture_height / 2.0f;
+ opengl->vertex[0].y = opengl->vertex[3].y = (float)opengl->picture_height / 2.0f;
+
+ opengl_get_texture_size(opengl, opengl->width, opengl->height, &tex_w, &tex_h);
+
+ opengl->vertex[0].s0 = 0.0f;
+ opengl->vertex[0].t0 = 0.0f;
+ opengl->vertex[1].s0 = 0.0f;
+ opengl->vertex[1].t0 = (float)opengl->height / (float)tex_h;
+ opengl->vertex[2].s0 = (float)opengl->width / (float)tex_w;
+ opengl->vertex[2].t0 = (float)opengl->height / (float)tex_h;
+ opengl->vertex[3].s0 = (float)opengl->width / (float)tex_w;
+ opengl->vertex[3].t0 = 0.0f;
+
+ opengl->glprocs.glBindBuffer(FF_GL_ARRAY_BUFFER, opengl->vertex_buffer);
+ opengl->glprocs.glBufferData(FF_GL_ARRAY_BUFFER, sizeof(opengl->vertex), opengl->vertex, FF_GL_STATIC_DRAW);
+ opengl->glprocs.glBindBuffer(FF_GL_ARRAY_BUFFER, 0);
+ OPENGL_ERROR_CHECK(opengl);
+ return 0;
+ fail:
+ return AVERROR_EXTERNAL;
+}
+
+static int opengl_prepare(OpenGLContext *opengl)
+{
+ int i;
+ opengl->glprocs.glUseProgram(opengl->program);
+ opengl->glprocs.glUniformMatrix4fv(opengl->projection_matrix_location, 1, GL_FALSE, opengl->projection_matrix);
+ opengl->glprocs.glUniformMatrix4fv(opengl->model_view_matrix_location, 1, GL_FALSE, opengl->model_view_matrix);
+ for (i = 0; i < 4; i++)
+ if (opengl->texture_location[i] != -1) {
+ opengl->glprocs.glActiveTexture(GL_TEXTURE0 + i);
+ glBindTexture(GL_TEXTURE_2D, opengl->texture_name[i]);
+ opengl->glprocs.glUniform1i(opengl->texture_location[i], i);
+ }
+ if (opengl->color_map_location != -1)
+ opengl->glprocs.glUniformMatrix4fv(opengl->color_map_location, 1, GL_FALSE, opengl->color_map);
+ if (opengl->chroma_div_h_location != -1)
+ opengl->glprocs.glUniform1f(opengl->chroma_div_h_location, opengl->chroma_div_h);
+ if (opengl->chroma_div_w_location != -1)
+ opengl->glprocs.glUniform1f(opengl->chroma_div_w_location, opengl->chroma_div_w);
+
+ OPENGL_ERROR_CHECK(opengl);
+ return 0;
+ fail:
+ return AVERROR_EXTERNAL;
+}
+
+static int opengl_create_window(AVFormatContext *h)
+{
+ OpenGLContext *opengl = h->priv_data;
+ int ret;
+
+ if (!opengl->no_window) {
+#if HAVE_SDL
+ if ((ret = opengl_sdl_create_window(h)) < 0) {
+ av_log(opengl, AV_LOG_ERROR, "Cannot create default SDL window.\n");
+ return ret;
+ }
+#else
+ av_log(opengl, AV_LOG_ERROR, "FFmpeg is compiled without SDL. Cannot create default window.\n");
+ return AVERROR(ENOSYS);
+#endif
+ } else {
+ AVDeviceRect message;
+ message.x = message.y = 0;
+ message.width = opengl->window_width;
+ message.height = opengl->window_height;
+ if ((ret = avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_CREATE_WINDOW_BUFFER,
+ &message , sizeof(message))) < 0) {
+ av_log(opengl, AV_LOG_ERROR, "Application failed to create window buffer.\n");
+ return ret;
+ }
+ if ((ret = avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER, NULL , 0)) < 0) {
+ av_log(opengl, AV_LOG_ERROR, "Application failed to prepare window buffer.\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int opengl_release_window(AVFormatContext *h)
+{
+ int ret;
+ OpenGLContext *opengl = h->priv_data;
+ if (!opengl->no_window) {
+#if HAVE_SDL
+ SDL_Quit();
+#endif
+ } else if ((ret = avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER, NULL , 0)) < 0) {
+ av_log(opengl, AV_LOG_ERROR, "Application failed to release window buffer.\n");
+ return ret;
+ }
+ return 0;
+}
+
+static av_cold int opengl_write_trailer(AVFormatContext *h)
+{
+ OpenGLContext *opengl = h->priv_data;
+
+ if (opengl->no_window &&
+ avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER, NULL , 0) < 0)
+ av_log(opengl, AV_LOG_ERROR, "Application failed to prepare window buffer.\n");
+
+ opengl_deinit_context(opengl);
+ opengl_release_window(h);
+
+ return 0;
+}
+
+static av_cold int opengl_init_context(OpenGLContext *opengl)
+{
+ int i, ret;
+ const AVPixFmtDescriptor *desc;
+
+ if ((ret = opengl_compile_shaders(opengl, opengl->pix_fmt)) < 0)
+ goto fail;
+
+ desc = av_pix_fmt_desc_get(opengl->pix_fmt);
+ av_assert0(desc->nb_components > 0 && desc->nb_components <= 4);
+ glGenTextures(desc->nb_components, opengl->texture_name);
+
+ opengl->glprocs.glGenBuffers(2, &opengl->index_buffer);
+ if (!opengl->index_buffer || !opengl->vertex_buffer) {
+ av_log(opengl, AV_LOG_ERROR, "Buffer generation failed.\n");
+ ret = AVERROR_EXTERNAL;
+ goto fail;
+ }
+
+ opengl_configure_texture(opengl, opengl->texture_name[0], opengl->width, opengl->height);
+ if (desc->nb_components > 1) {
+ int has_alpha = desc->flags & AV_PIX_FMT_FLAG_ALPHA;
+ int num_planes = desc->nb_components - (has_alpha ? 1 : 0);
+ if (opengl->non_pow_2_textures) {
+ opengl->chroma_div_w = 1.0f;
+ opengl->chroma_div_h = 1.0f;
+ } else {
+ opengl->chroma_div_w = 1 << desc->log2_chroma_w;
+ opengl->chroma_div_h = 1 << desc->log2_chroma_h;
+ }
+ for (i = 1; i < num_planes; i++)
+ if (opengl->non_pow_2_textures)
+ opengl_configure_texture(opengl, opengl->texture_name[i],
+ AV_CEIL_RSHIFT(opengl->width, desc->log2_chroma_w),
+ AV_CEIL_RSHIFT(opengl->height, desc->log2_chroma_h));
+ else
+ opengl_configure_texture(opengl, opengl->texture_name[i], opengl->width, opengl->height);
+ if (has_alpha)
+ opengl_configure_texture(opengl, opengl->texture_name[3], opengl->width, opengl->height);
+ }
+
+ opengl->glprocs.glBindBuffer(FF_GL_ELEMENT_ARRAY_BUFFER, opengl->index_buffer);
+ opengl->glprocs.glBufferData(FF_GL_ELEMENT_ARRAY_BUFFER, sizeof(g_index), g_index, FF_GL_STATIC_DRAW);
+ opengl->glprocs.glBindBuffer(FF_GL_ELEMENT_ARRAY_BUFFER, 0);
+
+ glEnable(GL_BLEND);
+ glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
+
+ glClearColor((float)opengl->background[0] / 255.0f, (float)opengl->background[1] / 255.0f,
+ (float)opengl->background[2] / 255.0f, 1.0f);
+
+ ret = AVERROR_EXTERNAL;
+ OPENGL_ERROR_CHECK(opengl);
+
+ return 0;
+ fail:
+ return ret;
+}
+
+static av_cold int opengl_write_header(AVFormatContext *h)
+{
+ OpenGLContext *opengl = h->priv_data;
+ AVStream *st;
+ int ret;
+
+ if (h->nb_streams != 1 ||
+ h->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO ||
+ h->streams[0]->codecpar->codec_id != AV_CODEC_ID_RAWVIDEO) {
+ av_log(opengl, AV_LOG_ERROR, "Only a single video stream is supported.\n");
+ return AVERROR(EINVAL);
+ }
+ st = h->streams[0];
+ opengl->width = st->codecpar->width;
+ opengl->height = st->codecpar->height;
+ opengl->pix_fmt = st->codecpar->format;
+ if (!opengl->window_width)
+ opengl->window_width = opengl->width;
+ if (!opengl->window_height)
+ opengl->window_height = opengl->height;
+
+ if (!opengl->window_title && !opengl->no_window)
+ opengl->window_title = av_strdup(h->filename);
+
+ if ((ret = opengl_create_window(h)))
+ goto fail;
+
+ if ((ret = opengl_read_limits(opengl)) < 0)
+ goto fail;
+
+ if (opengl->width > opengl->max_texture_size || opengl->height > opengl->max_texture_size) {
+ av_log(opengl, AV_LOG_ERROR, "Too big picture %dx%d, max supported size is %dx%d\n",
+ opengl->width, opengl->height, opengl->max_texture_size, opengl->max_texture_size);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ if ((ret = opengl_load_procedures(opengl)) < 0)
+ goto fail;
+
+ opengl_fill_color_map(opengl);
+ opengl_get_texture_params(opengl);
+
+ if ((ret = opengl_init_context(opengl)) < 0)
+ goto fail;
+
+ if ((ret = opengl_prepare_vertex(h)) < 0)
+ goto fail;
+
+ glClear(GL_COLOR_BUFFER_BIT);
+
+#if HAVE_SDL
+ if (!opengl->no_window)
+ SDL_GL_SwapBuffers();
+#endif
+ if (opengl->no_window &&
+ (ret = avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER, NULL , 0)) < 0) {
+ av_log(opengl, AV_LOG_ERROR, "Application failed to display window buffer.\n");
+ goto fail;
+ }
+
+ ret = AVERROR_EXTERNAL;
+ OPENGL_ERROR_CHECK(opengl);
+
+ opengl->inited = 1;
+ return 0;
+
+ fail:
+ opengl_write_trailer(h);
+ return ret;
+}
+
+static uint8_t* opengl_get_plane_pointer(OpenGLContext *opengl, AVPacket *pkt, int comp_index,
+ const AVPixFmtDescriptor *desc)
+{
+ uint8_t *data = pkt->data;
+ int wordsize = opengl_type_size(opengl->type);
+ int width_chroma = AV_CEIL_RSHIFT(opengl->width, desc->log2_chroma_w);
+ int height_chroma = AV_CEIL_RSHIFT(opengl->height, desc->log2_chroma_h);
+ int plane = desc->comp[comp_index].plane;
+
+ switch(plane) {
+ case 0:
+ break;
+ case 1:
+ data += opengl->width * opengl->height * wordsize;
+ break;
+ case 2:
+ data += opengl->width * opengl->height * wordsize;
+ data += width_chroma * height_chroma * wordsize;
+ break;
+ case 3:
+ data += opengl->width * opengl->height * wordsize;
+ data += 2 * width_chroma * height_chroma * wordsize;
+ break;
+ default:
+ return NULL;
+ }
+ return data;
+}
+
+#define LOAD_TEXTURE_DATA(comp_index, sub) \
+{ \
+ int width = sub ? AV_CEIL_RSHIFT(opengl->width, desc->log2_chroma_w) : opengl->width; \
+ int height = sub ? AV_CEIL_RSHIFT(opengl->height, desc->log2_chroma_h): opengl->height; \
+ uint8_t *data; \
+ int plane = desc->comp[comp_index].plane; \
+ \
+ glBindTexture(GL_TEXTURE_2D, opengl->texture_name[comp_index]); \
+ if (!is_pkt) { \
+ GLint length = ((AVFrame *)input)->linesize[plane]; \
+ int bytes_per_pixel = opengl_type_size(opengl->type); \
+ if (!(desc->flags & AV_PIX_FMT_FLAG_PLANAR)) \
+ bytes_per_pixel *= desc->nb_components; \
+ data = ((AVFrame *)input)->data[plane]; \
+ if (!(length % bytes_per_pixel) && \
+ (opengl->unpack_subimage || ((length / bytes_per_pixel) == width))) { \
+ length /= bytes_per_pixel; \
+ if (length != width) \
+ glPixelStorei(FF_GL_UNPACK_ROW_LENGTH, length); \
+ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, \
+ opengl->format, opengl->type, data); \
+ if (length != width) \
+ glPixelStorei(FF_GL_UNPACK_ROW_LENGTH, 0); \
+ } else { \
+ int h; \
+ for (h = 0; h < height; h++) { \
+ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, h, width, 1, \
+ opengl->format, opengl->type, data); \
+ data += length; \
+ } \
+ } \
+ } else { \
+ data = opengl_get_plane_pointer(opengl, input, comp_index, desc); \
+ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, \
+ opengl->format, opengl->type, data); \
+ } \
+}
+
+static int opengl_draw(AVFormatContext *h, void *input, int repaint, int is_pkt)
+{
+ OpenGLContext *opengl = h->priv_data;
+ enum AVPixelFormat pix_fmt = h->streams[0]->codecpar->format;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
+ int ret;
+
+#if HAVE_SDL
+ if (!opengl->no_window && (ret = opengl_sdl_process_events(h)) < 0)
+ goto fail;
+#endif
+ if (opengl->no_window &&
+ (ret = avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER, NULL , 0)) < 0) {
+ av_log(opengl, AV_LOG_ERROR, "Application failed to prepare window buffer.\n");
+ goto fail;
+ }
+
+ glClear(GL_COLOR_BUFFER_BIT);
+
+ if (!repaint) {
+ if (is_pkt)
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
+ LOAD_TEXTURE_DATA(0, 0)
+ if (desc->flags & AV_PIX_FMT_FLAG_PLANAR) {
+ LOAD_TEXTURE_DATA(1, 1)
+ LOAD_TEXTURE_DATA(2, 1)
+ if (desc->flags & AV_PIX_FMT_FLAG_ALPHA)
+ LOAD_TEXTURE_DATA(3, 0)
+ }
+ }
+ ret = AVERROR_EXTERNAL;
+ OPENGL_ERROR_CHECK(opengl);
+
+ if ((ret = opengl_prepare(opengl)) < 0)
+ goto fail;
+
+ opengl->glprocs.glBindBuffer(FF_GL_ARRAY_BUFFER, opengl->vertex_buffer);
+ opengl->glprocs.glBindBuffer(FF_GL_ELEMENT_ARRAY_BUFFER, opengl->index_buffer);
+ opengl->glprocs.glVertexAttribPointer(opengl->position_attrib, 3, GL_FLOAT, GL_FALSE, sizeof(OpenGLVertexInfo), 0);
+ opengl->glprocs.glEnableVertexAttribArray(opengl->position_attrib);
+ opengl->glprocs.glVertexAttribPointer(opengl->texture_coords_attrib, 2, GL_FLOAT, GL_FALSE, sizeof(OpenGLVertexInfo), 12);
+ opengl->glprocs.glEnableVertexAttribArray(opengl->texture_coords_attrib);
+
+ glDrawElements(GL_TRIANGLES, FF_ARRAY_ELEMS(g_index), GL_UNSIGNED_SHORT, 0);
+
+ ret = AVERROR_EXTERNAL;
+ OPENGL_ERROR_CHECK(opengl);
+
+#if HAVE_SDL
+ if (!opengl->no_window)
+ SDL_GL_SwapBuffers();
+#endif
+ if (opengl->no_window &&
+ (ret = avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER, NULL , 0)) < 0) {
+ av_log(opengl, AV_LOG_ERROR, "Application failed to display window buffer.\n");
+ goto fail;
+ }
+
+ return 0;
+ fail:
+ return ret;
+}
+
+static int opengl_write_packet(AVFormatContext *h, AVPacket *pkt)
+{
+ return opengl_draw(h, pkt, 0, 1);
+}
+
+static int opengl_write_frame(AVFormatContext *h, int stream_index,
+ AVFrame **frame, unsigned flags)
+{
+ if ((flags & AV_WRITE_UNCODED_FRAME_QUERY))
+ return 0;
+ return opengl_draw(h, *frame, 0, 0);
+}
+
+#define OFFSET(x) offsetof(OpenGLContext, x)
+#define ENC AV_OPT_FLAG_ENCODING_PARAM
+static const AVOption options[] = {
+ { "background", "set background color", OFFSET(background), AV_OPT_TYPE_COLOR, {.str = "black"}, CHAR_MIN, CHAR_MAX, ENC },
+ { "no_window", "disable default window", OFFSET(no_window), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, ENC },
+ { "window_title", "set window title", OFFSET(window_title), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, ENC },
+ { "window_size", "set window size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, ENC },
+ { NULL }
+};
+
+static const AVClass opengl_class = {
+ .class_name = "opengl outdev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
+};
+
+AVOutputFormat ff_opengl_muxer = {
+ .name = "opengl",
+ .long_name = NULL_IF_CONFIG_SMALL("OpenGL output"),
+ .priv_data_size = sizeof(OpenGLContext),
+ .audio_codec = AV_CODEC_ID_NONE,
+ .video_codec = AV_CODEC_ID_RAWVIDEO,
+ .write_header = opengl_write_header,
+ .write_packet = opengl_write_packet,
+ .write_uncoded_frame = opengl_write_frame,
+ .write_trailer = opengl_write_trailer,
+ .control_message = opengl_control_message,
+ .flags = AVFMT_NOFILE | AVFMT_VARIABLE_FPS | AVFMT_NOTIMESTAMPS,
+ .priv_class = &opengl_class,
+};
diff --git a/libavdevice/opengl_enc_shaders.h b/libavdevice/opengl_enc_shaders.h
new file mode 100644
index 0000000000..67ee0ae7b4
--- /dev/null
+++ b/libavdevice/opengl_enc_shaders.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2014 Lukasz Marek
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVDEVICE_OPENGL_ENC_SHADERS_H
+#define AVDEVICE_OPENGL_ENC_SHADERS_H
+
+#include "libavutil/pixfmt.h"
+
+static const char * const FF_OPENGL_VERTEX_SHADER =
+ "uniform mat4 u_projectionMatrix;"
+ "uniform mat4 u_modelViewMatrix;"
+
+ "attribute vec4 a_position;"
+ "attribute vec2 a_textureCoords;"
+
+ "varying vec2 texture_coordinate;"
+
+ "void main()"
+ "{"
+ "gl_Position = u_projectionMatrix * (a_position * u_modelViewMatrix);"
+ "texture_coordinate = a_textureCoords;"
+ "}";
+
+/**
+ * Fragment shader for packet RGBA formats.
+ */
+static const char * const FF_OPENGL_FRAGMENT_SHADER_RGBA_PACKET =
+#if defined(GL_ES_VERSION_2_0)
+ "precision mediump float;"
+#endif
+ "uniform sampler2D u_texture0;"
+ "uniform mat4 u_colorMap;"
+
+ "varying vec2 texture_coordinate;"
+
+ "void main()"
+ "{"
+ "gl_FragColor = texture2D(u_texture0, texture_coordinate) * u_colorMap;"
+ "}";
+
+/**
+ * Fragment shader for packet RGB formats.
+ */
+static const char * const FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET =
+#if defined(GL_ES_VERSION_2_0)
+ "precision mediump float;"
+#endif
+ "uniform sampler2D u_texture0;"
+ "uniform mat4 u_colorMap;"
+
+ "varying vec2 texture_coordinate;"
+
+ "void main()"
+ "{"
+ "gl_FragColor = vec4((texture2D(u_texture0, texture_coordinate) * u_colorMap).rgb, 1.0);"
+ "}";
+
+/**
+ * Fragment shader for planar RGBA formats.
+ */
+static const char * const FF_OPENGL_FRAGMENT_SHADER_RGBA_PLANAR =
+#if defined(GL_ES_VERSION_2_0)
+ "precision mediump float;"
+#endif
+ "uniform sampler2D u_texture0;"
+ "uniform sampler2D u_texture1;"
+ "uniform sampler2D u_texture2;"
+ "uniform sampler2D u_texture3;"
+
+ "varying vec2 texture_coordinate;"
+
+ "void main()"
+ "{"
+ "gl_FragColor = vec4(texture2D(u_texture0, texture_coordinate).r,"
+ "texture2D(u_texture1, texture_coordinate).r,"
+ "texture2D(u_texture2, texture_coordinate).r,"
+ "texture2D(u_texture3, texture_coordinate).r);"
+ "}";
+
+/**
+ * Fragment shader for planar RGB formats.
+ */
+static const char * const FF_OPENGL_FRAGMENT_SHADER_RGB_PLANAR =
+#if defined(GL_ES_VERSION_2_0)
+ "precision mediump float;"
+#endif
+ "uniform sampler2D u_texture0;"
+ "uniform sampler2D u_texture1;"
+ "uniform sampler2D u_texture2;"
+
+ "varying vec2 texture_coordinate;"
+
+ "void main()"
+ "{"
+ "gl_FragColor = vec4(texture2D(u_texture0, texture_coordinate).r,"
+ "texture2D(u_texture1, texture_coordinate).r,"
+ "texture2D(u_texture2, texture_coordinate).r,"
+ "1.0);"
+ "}";
+
+/**
+ * Fragment shader for planar YUV formats.
+ */
+static const char * const FF_OPENGL_FRAGMENT_SHADER_YUV_PLANAR =
+#if defined(GL_ES_VERSION_2_0)
+ "precision mediump float;"
+#endif
+ "uniform sampler2D u_texture0;"
+ "uniform sampler2D u_texture1;"
+ "uniform sampler2D u_texture2;"
+ "uniform float u_chroma_div_w;"
+ "uniform float u_chroma_div_h;"
+
+ "varying vec2 texture_coordinate;"
+
+ "void main()"
+ "{"
+ "vec3 yuv;"
+
+ "yuv.r = texture2D(u_texture0, texture_coordinate).r - 0.0625;"
+ "yuv.g = texture2D(u_texture1, vec2(texture_coordinate.x / u_chroma_div_w, texture_coordinate.y / u_chroma_div_h)).r - 0.5;"
+ "yuv.b = texture2D(u_texture2, vec2(texture_coordinate.x / u_chroma_div_w, texture_coordinate.y / u_chroma_div_h)).r - 0.5;"
+
+ "gl_FragColor = clamp(vec4(mat3(1.1643, 1.16430, 1.1643,"
+ "0.0, -0.39173, 2.0170,"
+ "1.5958, -0.81290, 0.0) * yuv, 1.0), 0.0, 1.0);"
+
+ "}";
+
+/**
+ * Fragment shader for planar YUVA formats.
+ */
+static const char * const FF_OPENGL_FRAGMENT_SHADER_YUVA_PLANAR =
+#if defined(GL_ES_VERSION_2_0)
+ "precision mediump float;"
+#endif
+ "uniform sampler2D u_texture0;"
+ "uniform sampler2D u_texture1;"
+ "uniform sampler2D u_texture2;"
+ "uniform sampler2D u_texture3;"
+ "uniform float u_chroma_div_w;"
+ "uniform float u_chroma_div_h;"
+
+ "varying vec2 texture_coordinate;"
+
+ "void main()"
+ "{"
+ "vec3 yuv;"
+
+ "yuv.r = texture2D(u_texture0, texture_coordinate).r - 0.0625;"
+ "yuv.g = texture2D(u_texture1, vec2(texture_coordinate.x / u_chroma_div_w, texture_coordinate.y / u_chroma_div_h)).r - 0.5;"
+ "yuv.b = texture2D(u_texture2, vec2(texture_coordinate.x / u_chroma_div_w, texture_coordinate.y / u_chroma_div_h)).r - 0.5;"
+
+ "gl_FragColor = clamp(vec4(mat3(1.1643, 1.16430, 1.1643,"
+ "0.0, -0.39173, 2.0170,"
+ "1.5958, -0.81290, 0.0) * yuv, texture2D(u_texture3, texture_coordinate).r), 0.0, 1.0);"
+ "}";
+
+static const char * const FF_OPENGL_FRAGMENT_SHADER_GRAY =
+#if defined(GL_ES_VERSION_2_0)
+ "precision mediump float;"
+#endif
+ "uniform sampler2D u_texture0;"
+ "varying vec2 texture_coordinate;"
+ "void main()"
+ "{"
+ "float c = texture2D(u_texture0, texture_coordinate).r;"
+ "gl_FragColor = vec4(c, c, c, 1.0);"
+ "}";
+
+#endif /* AVDEVICE_OPENGL_ENC_SHADERS_H */
diff --git a/libavdevice/oss.c b/libavdevice/oss.c
index eb8d454422..d74112825b 100644
--- a/libavdevice/oss.c
+++ b/libavdevice/oss.c
@@ -2,20 +2,20 @@
* Linux audio play and grab interface
* Copyright (c) 2000, 2001 Fabrice Bellard
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -29,15 +29,16 @@
#include <sys/soundcard.h>
#endif
+#if HAVE_UNISTD_H
#include <unistd.h>
+#endif
#include <fcntl.h>
#include <sys/ioctl.h>
#include "libavutil/log.h"
#include "libavcodec/avcodec.h"
-
-#include "libavformat/avformat.h"
+#include "avdevice.h"
#include "oss.h"
@@ -48,14 +49,13 @@ int ff_oss_audio_open(AVFormatContext *s1, int is_output,
int audio_fd;
int tmp, err;
char *flip = getenv("AUDIO_FLIP_LEFT");
- char errbuff[128];
if (is_output)
audio_fd = avpriv_open(audio_device, O_WRONLY);
else
audio_fd = avpriv_open(audio_device, O_RDONLY);
if (audio_fd < 0) {
- av_log(s1, AV_LOG_ERROR, "%s: %s\n", audio_device, strerror(errno));
+ av_log(s1, AV_LOG_ERROR, "%s: %s\n", audio_device, av_err2str(AVERROR(errno)));
return AVERROR(EIO);
}
@@ -64,15 +64,17 @@ int ff_oss_audio_open(AVFormatContext *s1, int is_output,
}
/* non blocking mode */
- if (!is_output)
- fcntl(audio_fd, F_SETFL, O_NONBLOCK);
+ if (!is_output) {
+ if (fcntl(audio_fd, F_SETFL, O_NONBLOCK) < 0) {
+ av_log(s1, AV_LOG_WARNING, "%s: Could not enable non block mode (%s)\n", audio_device, av_err2str(AVERROR(errno)));
+ }
+ }
s->frame_size = OSS_AUDIO_BLOCK_SIZE;
#define CHECK_IOCTL_ERROR(event) \
if (err < 0) { \
- av_strerror(AVERROR(errno), errbuff, sizeof(errbuff)); \
- av_log(s1, AV_LOG_ERROR, #event ": %s\n", errbuff); \
+ av_log(s1, AV_LOG_ERROR, #event ": %s\n", av_err2str(AVERROR(errno)));\
goto fail; \
}
@@ -80,7 +82,10 @@ int ff_oss_audio_open(AVFormatContext *s1, int is_output,
* We don't CHECK_IOCTL_ERROR here because even if failed OSS still may be
* usable. If OSS is not usable the SNDCTL_DSP_SETFMTS later is going to
* fail anyway. */
- (void) ioctl(audio_fd, SNDCTL_DSP_GETFMTS, &tmp);
+ err = ioctl(audio_fd, SNDCTL_DSP_GETFMTS, &tmp);
+ if (err < 0) {
+ av_log(s1, AV_LOG_WARNING, "SNDCTL_DSP_GETFMTS: %s\n", av_err2str(AVERROR(errno)));
+ }
#if HAVE_BIGENDIAN
if (tmp & AFMT_S16_BE) {
diff --git a/libavdevice/oss.h b/libavdevice/oss.h
index 0fbe14b3ec..1f3f5e4e83 100644
--- a/libavdevice/oss.h
+++ b/libavdevice/oss.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavdevice/oss_dec.c b/libavdevice/oss_dec.c
index 6f51a30662..9f748f2bc3 100644
--- a/libavdevice/oss_dec.c
+++ b/libavdevice/oss_dec.c
@@ -2,20 +2,20 @@
* Linux audio play interface
* Copyright (c) 2000, 2001 Fabrice Bellard
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -29,7 +29,9 @@
#include <sys/soundcard.h>
#endif
+#if HAVE_UNISTD_H
#include <unistd.h>
+#endif
#include <fcntl.h>
#include <sys/ioctl.h>
@@ -39,7 +41,7 @@
#include "libavcodec/avcodec.h"
-#include "libavformat/avformat.h"
+#include "avdevice.h"
#include "libavformat/internal.h"
#include "oss.h"
@@ -132,6 +134,7 @@ static const AVClass oss_demuxer_class = {
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_oss_demuxer = {
diff --git a/libavdevice/oss_enc.c b/libavdevice/oss_enc.c
index eb6432ced0..2268b4cfe4 100644
--- a/libavdevice/oss_enc.c
+++ b/libavdevice/oss_enc.c
@@ -2,20 +2,20 @@
* Linux audio grab interface
* Copyright (c) 2000, 2001 Fabrice Bellard
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,7 +27,9 @@
#include <sys/soundcard.h>
#endif
+#if HAVE_UNISTD_H
#include <unistd.h>
+#endif
#include <fcntl.h>
#include <sys/ioctl.h>
@@ -35,7 +37,7 @@
#include "libavcodec/avcodec.h"
-#include "libavformat/avformat.h"
+#include "avdevice.h"
#include "libavformat/internal.h"
#include "oss.h"
@@ -92,6 +94,13 @@ static int audio_write_trailer(AVFormatContext *s1)
return 0;
}
+static const AVClass oss_muxer_class = {
+ .class_name = "OSS muxer",
+ .item_name = av_default_item_name,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
+};
+
AVOutputFormat ff_oss_muxer = {
.name = "oss",
.long_name = NULL_IF_CONFIG_SMALL("OSS (Open Sound System) playback"),
@@ -105,4 +114,5 @@ AVOutputFormat ff_oss_muxer = {
.write_packet = audio_write_packet,
.write_trailer = audio_write_trailer,
.flags = AVFMT_NOFILE,
+ .priv_class = &oss_muxer_class,
};
diff --git a/libavdevice/pulse.c b/libavdevice/pulse.c
deleted file mode 100644
index c4d939a0d3..0000000000
--- a/libavdevice/pulse.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Pulseaudio input
- * Copyright (c) 2011 Luca Barbato <lu_zero@gentoo.org>
- *
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * Libav is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * PulseAudio input using the simple API.
- * @author Luca Barbato <lu_zero@gentoo.org>
- */
-
-#include <pulse/simple.h>
-#include <pulse/rtclock.h>
-#include <pulse/error.h>
-
-#include "libavutil/internal.h"
-#include "libavutil/opt.h"
-#include "libavutil/time.h"
-
-#include "libavformat/avformat.h"
-#include "libavformat/internal.h"
-
-#define DEFAULT_CODEC_ID AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE)
-
-typedef struct PulseData {
- AVClass *class;
- char *server;
- char *name;
- char *stream_name;
- int sample_rate;
- int channels;
- int frame_size;
- int fragment_size;
- pa_simple *s;
- int64_t pts;
- int64_t frame_duration;
- int wallclock;
-} PulseData;
-
-static pa_sample_format_t codec_id_to_pulse_format(int codec_id) {
- switch (codec_id) {
- case AV_CODEC_ID_PCM_U8: return PA_SAMPLE_U8;
- case AV_CODEC_ID_PCM_ALAW: return PA_SAMPLE_ALAW;
- case AV_CODEC_ID_PCM_MULAW: return PA_SAMPLE_ULAW;
- case AV_CODEC_ID_PCM_S16LE: return PA_SAMPLE_S16LE;
- case AV_CODEC_ID_PCM_S16BE: return PA_SAMPLE_S16BE;
- case AV_CODEC_ID_PCM_F32LE: return PA_SAMPLE_FLOAT32LE;
- case AV_CODEC_ID_PCM_F32BE: return PA_SAMPLE_FLOAT32BE;
- case AV_CODEC_ID_PCM_S32LE: return PA_SAMPLE_S32LE;
- case AV_CODEC_ID_PCM_S32BE: return PA_SAMPLE_S32BE;
- case AV_CODEC_ID_PCM_S24LE: return PA_SAMPLE_S24LE;
- case AV_CODEC_ID_PCM_S24BE: return PA_SAMPLE_S24BE;
- default: return PA_SAMPLE_INVALID;
- }
-}
-
-static av_cold int pulse_read_header(AVFormatContext *s)
-{
- PulseData *pd = s->priv_data;
- AVStream *st;
- char *device = NULL;
- int ret;
- enum AVCodecID codec_id =
- s->audio_codec_id == AV_CODEC_ID_NONE ? DEFAULT_CODEC_ID : s->audio_codec_id;
- const pa_sample_spec ss = { codec_id_to_pulse_format(codec_id),
- pd->sample_rate,
- pd->channels };
-
- pa_buffer_attr attr = { -1 };
-
- st = avformat_new_stream(s, NULL);
-
- if (!st) {
- av_log(s, AV_LOG_ERROR, "Cannot add stream\n");
- return AVERROR(ENOMEM);
- }
-
- attr.fragsize = pd->fragment_size;
-
- if (strcmp(s->filename, "default"))
- device = s->filename;
-
- pd->s = pa_simple_new(pd->server, pd->name,
- PA_STREAM_RECORD,
- device, pd->stream_name, &ss,
- NULL, &attr, &ret);
-
- if (!pd->s) {
- av_log(s, AV_LOG_ERROR, "pa_simple_new failed: %s\n",
- pa_strerror(ret));
- return AVERROR(EIO);
- }
- /* take real parameters */
- st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
- st->codecpar->codec_id = codec_id;
- st->codecpar->sample_rate = pd->sample_rate;
- st->codecpar->channels = pd->channels;
- avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
-
- pd->pts = AV_NOPTS_VALUE;
- pd->frame_duration = (pd->frame_size * 1000000LL * 8) /
- (pd->sample_rate * pd->channels * av_get_bits_per_sample(codec_id));
-
- return 0;
-}
-
-static int pulse_read_packet(AVFormatContext *s, AVPacket *pkt)
-{
- PulseData *pd = s->priv_data;
- int res;
- pa_usec_t latency;
-
- if (av_new_packet(pkt, pd->frame_size) < 0) {
- return AVERROR(ENOMEM);
- }
-
- if ((pa_simple_read(pd->s, pkt->data, pkt->size, &res)) < 0) {
- av_log(s, AV_LOG_ERROR, "pa_simple_read failed: %s\n",
- pa_strerror(res));
- av_packet_unref(pkt);
- return AVERROR(EIO);
- }
-
- if ((latency = pa_simple_get_latency(pd->s, &res)) == (pa_usec_t) -1) {
- av_log(s, AV_LOG_ERROR, "pa_simple_get_latency() failed: %s\n",
- pa_strerror(res));
- return AVERROR(EIO);
- }
-
- if (pd->pts == AV_NOPTS_VALUE) {
- pd->pts = -latency;
- if (pd->wallclock)
- pd->pts += av_gettime();
- }
-
- pkt->pts = pd->pts;
-
- pd->pts += pd->frame_duration;
-
- return 0;
-}
-
-static av_cold int pulse_close(AVFormatContext *s)
-{
- PulseData *pd = s->priv_data;
- pa_simple_free(pd->s);
- return 0;
-}
-
-#define OFFSET(a) offsetof(PulseData, a)
-#define D AV_OPT_FLAG_DECODING_PARAM
-
-static const AVOption options[] = {
- { "server", "pulse server name", OFFSET(server), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, D },
- { "name", "application name", OFFSET(name), AV_OPT_TYPE_STRING, {.str = "libav"}, 0, 0, D },
- { "stream_name", "stream description", OFFSET(stream_name), AV_OPT_TYPE_STRING, {.str = "record"}, 0, 0, D },
- { "sample_rate", "sample rate in Hz", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, D },
- { "channels", "number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, D },
- { "frame_size", "number of bytes per frame", OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, D },
- { "fragment_size", "buffering size, affects latency and cpu usage", OFFSET(fragment_size), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, D },
- { "wallclock", "set the initial pts using the current time", OFFSET(wallclock), AV_OPT_TYPE_INT, {.i64 = 1}, -1, 1, D },
- { NULL },
-};
-
-static const AVClass pulse_demuxer_class = {
- .class_name = "Pulse demuxer",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-AVInputFormat ff_pulse_demuxer = {
- .name = "pulse",
- .long_name = NULL_IF_CONFIG_SMALL("Pulse audio input"),
- .priv_data_size = sizeof(PulseData),
- .read_header = pulse_read_header,
- .read_packet = pulse_read_packet,
- .read_close = pulse_close,
- .flags = AVFMT_NOFILE,
- .priv_class = &pulse_demuxer_class,
-};
diff --git a/libavdevice/pulse_audio_common.c b/libavdevice/pulse_audio_common.c
new file mode 100644
index 0000000000..4046641479
--- /dev/null
+++ b/libavdevice/pulse_audio_common.c
@@ -0,0 +1,249 @@
+/*
+ * Pulseaudio common
+ * Copyright (c) 2014 Lukasz Marek
+ * Copyright (c) 2011 Luca Barbato <lu_zero@gentoo.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "pulse_audio_common.h"
+#include "libavutil/attributes.h"
+#include "libavutil/avstring.h"
+#include "libavutil/mem.h"
+#include "libavutil/avassert.h"
+
+pa_sample_format_t av_cold ff_codec_id_to_pulse_format(enum AVCodecID codec_id)
+{
+ switch (codec_id) {
+ case AV_CODEC_ID_PCM_U8: return PA_SAMPLE_U8;
+ case AV_CODEC_ID_PCM_ALAW: return PA_SAMPLE_ALAW;
+ case AV_CODEC_ID_PCM_MULAW: return PA_SAMPLE_ULAW;
+ case AV_CODEC_ID_PCM_S16LE: return PA_SAMPLE_S16LE;
+ case AV_CODEC_ID_PCM_S16BE: return PA_SAMPLE_S16BE;
+ case AV_CODEC_ID_PCM_F32LE: return PA_SAMPLE_FLOAT32LE;
+ case AV_CODEC_ID_PCM_F32BE: return PA_SAMPLE_FLOAT32BE;
+ case AV_CODEC_ID_PCM_S32LE: return PA_SAMPLE_S32LE;
+ case AV_CODEC_ID_PCM_S32BE: return PA_SAMPLE_S32BE;
+ case AV_CODEC_ID_PCM_S24LE: return PA_SAMPLE_S24LE;
+ case AV_CODEC_ID_PCM_S24BE: return PA_SAMPLE_S24BE;
+ default: return PA_SAMPLE_INVALID;
+ }
+}
+
+enum PulseAudioContextState {
+ PULSE_CONTEXT_INITIALIZING,
+ PULSE_CONTEXT_READY,
+ PULSE_CONTEXT_FINISHED
+};
+
+typedef struct PulseAudioDeviceList {
+ AVDeviceInfoList *devices;
+ int error_code;
+ int output;
+ char *default_device;
+} PulseAudioDeviceList;
+
+static void pa_state_cb(pa_context *c, void *userdata)
+{
+ enum PulseAudioContextState *context_state = userdata;
+
+ switch (pa_context_get_state(c)) {
+ case PA_CONTEXT_FAILED:
+ case PA_CONTEXT_TERMINATED:
+ *context_state = PULSE_CONTEXT_FINISHED;
+ break;
+ case PA_CONTEXT_READY:
+ *context_state = PULSE_CONTEXT_READY;
+ break;
+ default:
+ break;
+ }
+}
+
+void ff_pulse_audio_disconnect_context(pa_mainloop **pa_ml, pa_context **pa_ctx)
+{
+ av_assert0(pa_ml);
+ av_assert0(pa_ctx);
+
+ if (*pa_ctx) {
+ pa_context_set_state_callback(*pa_ctx, NULL, NULL);
+ pa_context_disconnect(*pa_ctx);
+ pa_context_unref(*pa_ctx);
+ }
+ if (*pa_ml)
+ pa_mainloop_free(*pa_ml);
+ *pa_ml = NULL;
+ *pa_ctx = NULL;
+}
+
+int ff_pulse_audio_connect_context(pa_mainloop **pa_ml, pa_context **pa_ctx,
+ const char *server, const char *description)
+{
+ int ret;
+ pa_mainloop_api *pa_mlapi = NULL;
+ enum PulseAudioContextState context_state = PULSE_CONTEXT_INITIALIZING;
+
+ av_assert0(pa_ml);
+ av_assert0(pa_ctx);
+
+ *pa_ml = NULL;
+ *pa_ctx = NULL;
+
+ if (!(*pa_ml = pa_mainloop_new()))
+ return AVERROR(ENOMEM);
+ if (!(pa_mlapi = pa_mainloop_get_api(*pa_ml))) {
+ ret = AVERROR_EXTERNAL;
+ goto fail;
+ }
+ if (!(*pa_ctx = pa_context_new(pa_mlapi, description))) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ pa_context_set_state_callback(*pa_ctx, pa_state_cb, &context_state);
+ if (pa_context_connect(*pa_ctx, server, 0, NULL) < 0) {
+ ret = AVERROR_EXTERNAL;
+ goto fail;
+ }
+
+ while (context_state == PULSE_CONTEXT_INITIALIZING)
+ pa_mainloop_iterate(*pa_ml, 1, NULL);
+ if (context_state == PULSE_CONTEXT_FINISHED) {
+ ret = AVERROR_EXTERNAL;
+ goto fail;
+ }
+ return 0;
+
+ fail:
+ ff_pulse_audio_disconnect_context(pa_ml, pa_ctx);
+ return ret;
+}
+
+static void pulse_add_detected_device(PulseAudioDeviceList *info,
+ const char *name, const char *description)
+{
+ int ret;
+ AVDeviceInfo *new_device = NULL;
+
+ if (info->error_code)
+ return;
+
+ new_device = av_mallocz(sizeof(AVDeviceInfo));
+ if (!new_device) {
+ info->error_code = AVERROR(ENOMEM);
+ return;
+ }
+
+ new_device->device_description = av_strdup(description);
+ new_device->device_name = av_strdup(name);
+
+ if (!new_device->device_description || !new_device->device_name) {
+ info->error_code = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ if ((ret = av_dynarray_add_nofree(&info->devices->devices,
+ &info->devices->nb_devices, new_device)) < 0) {
+ info->error_code = ret;
+ goto fail;
+ }
+ return;
+
+ fail:
+ av_freep(&new_device->device_description);
+ av_freep(&new_device->device_name);
+ av_free(new_device);
+
+}
+
+static void pulse_audio_source_device_cb(pa_context *c, const pa_source_info *dev,
+ int eol, void *userdata)
+{
+ if (!eol)
+ pulse_add_detected_device(userdata, dev->name, dev->description);
+}
+
+static void pulse_audio_sink_device_cb(pa_context *c, const pa_sink_info *dev,
+ int eol, void *userdata)
+{
+ if (!eol)
+ pulse_add_detected_device(userdata, dev->name, dev->description);
+}
+
+static void pulse_server_info_cb(pa_context *c, const pa_server_info *i, void *userdata)
+{
+ PulseAudioDeviceList *info = userdata;
+ if (info->output)
+ info->default_device = av_strdup(i->default_sink_name);
+ else
+ info->default_device = av_strdup(i->default_source_name);
+ if (!info->default_device)
+ info->error_code = AVERROR(ENOMEM);
+}
+
+int ff_pulse_audio_get_devices(AVDeviceInfoList *devices, const char *server, int output)
+{
+ pa_mainloop *pa_ml = NULL;
+ pa_operation *pa_op = NULL;
+ pa_context *pa_ctx = NULL;
+ enum pa_operation_state op_state;
+ PulseAudioDeviceList dev_list = { 0 };
+ int i;
+
+ dev_list.output = output;
+ dev_list.devices = devices;
+ if (!devices)
+ return AVERROR(EINVAL);
+ devices->nb_devices = 0;
+ devices->devices = NULL;
+
+ if ((dev_list.error_code = ff_pulse_audio_connect_context(&pa_ml, &pa_ctx, server, "Query devices")) < 0)
+ goto fail;
+
+ if (output)
+ pa_op = pa_context_get_sink_info_list(pa_ctx, pulse_audio_sink_device_cb, &dev_list);
+ else
+ pa_op = pa_context_get_source_info_list(pa_ctx, pulse_audio_source_device_cb, &dev_list);
+ while ((op_state = pa_operation_get_state(pa_op)) == PA_OPERATION_RUNNING)
+ pa_mainloop_iterate(pa_ml, 1, NULL);
+ if (op_state != PA_OPERATION_DONE)
+ dev_list.error_code = AVERROR_EXTERNAL;
+ pa_operation_unref(pa_op);
+ if (dev_list.error_code < 0)
+ goto fail;
+
+ pa_op = pa_context_get_server_info(pa_ctx, pulse_server_info_cb, &dev_list);
+ while ((op_state = pa_operation_get_state(pa_op)) == PA_OPERATION_RUNNING)
+ pa_mainloop_iterate(pa_ml, 1, NULL);
+ if (op_state != PA_OPERATION_DONE)
+ dev_list.error_code = AVERROR_EXTERNAL;
+ pa_operation_unref(pa_op);
+ if (dev_list.error_code < 0)
+ goto fail;
+
+ devices->default_device = -1;
+ for (i = 0; i < devices->nb_devices; i++) {
+ if (!strcmp(devices->devices[i]->device_name, dev_list.default_device)) {
+ devices->default_device = i;
+ break;
+ }
+ }
+
+ fail:
+ av_free(dev_list.default_device);
+ ff_pulse_audio_disconnect_context(&pa_ml, &pa_ctx);
+ return dev_list.error_code;
+}
diff --git a/libavdevice/pulse_audio_common.h b/libavdevice/pulse_audio_common.h
new file mode 100644
index 0000000000..902795e4f7
--- /dev/null
+++ b/libavdevice/pulse_audio_common.h
@@ -0,0 +1,40 @@
+/*
+ * Pulseaudio input
+ * Copyright (c) 2011 Luca Barbato <lu_zero@gentoo.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVDEVICE_PULSE_AUDIO_COMMON_H
+#define AVDEVICE_PULSE_AUDIO_COMMON_H
+
+#include <pulse/pulseaudio.h>
+#include "libavcodec/avcodec.h"
+#include "avdevice.h"
+
+pa_sample_format_t ff_codec_id_to_pulse_format(enum AVCodecID codec_id);
+
+av_warn_unused_result
+int ff_pulse_audio_get_devices(AVDeviceInfoList *devices, const char *server, int output);
+
+av_warn_unused_result
+int ff_pulse_audio_connect_context(pa_mainloop **pa_ml, pa_context **pa_ctx,
+ const char *server, const char *description);
+
+void ff_pulse_audio_disconnect_context(pa_mainloop **pa_ml, pa_context **pa_ctx);
+
+#endif /* AVDEVICE_PULSE_AUDIO_COMMON_H */
diff --git a/libavdevice/pulse_audio_dec.c b/libavdevice/pulse_audio_dec.c
new file mode 100644
index 0000000000..95a1d6ecfa
--- /dev/null
+++ b/libavdevice/pulse_audio_dec.c
@@ -0,0 +1,376 @@
+/*
+ * Pulseaudio input
+ * Copyright (c) 2011 Luca Barbato <lu_zero@gentoo.org>
+ * Copyright 2004-2006 Lennart Poettering
+ * Copyright (c) 2014 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <pulse/rtclock.h>
+#include <pulse/error.h>
+
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "libavutil/time.h"
+
+#include "libavformat/avformat.h"
+#include "libavformat/internal.h"
+#include "pulse_audio_common.h"
+#include "timefilter.h"
+
+#define DEFAULT_CODEC_ID AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE)
+
+typedef struct PulseData {
+ AVClass *class;
+ char *server;
+ char *name;
+ char *stream_name;
+ int sample_rate;
+ int channels;
+ int frame_size;
+ int fragment_size;
+
+ pa_threaded_mainloop *mainloop;
+ pa_context *context;
+ pa_stream *stream;
+
+ TimeFilter *timefilter;
+ int last_period;
+ int wallclock;
+} PulseData;
+
+
+#define CHECK_SUCCESS_GOTO(rerror, expression, label) \
+ do { \
+ if (!(expression)) { \
+ rerror = AVERROR_EXTERNAL; \
+ goto label; \
+ } \
+ } while (0)
+
+#define CHECK_DEAD_GOTO(p, rerror, label) \
+ do { \
+ if (!(p)->context || !PA_CONTEXT_IS_GOOD(pa_context_get_state((p)->context)) || \
+ !(p)->stream || !PA_STREAM_IS_GOOD(pa_stream_get_state((p)->stream))) { \
+ rerror = AVERROR_EXTERNAL; \
+ goto label; \
+ } \
+ } while (0)
+
+static void context_state_cb(pa_context *c, void *userdata) {
+ PulseData *p = userdata;
+
+ switch (pa_context_get_state(c)) {
+ case PA_CONTEXT_READY:
+ case PA_CONTEXT_TERMINATED:
+ case PA_CONTEXT_FAILED:
+ pa_threaded_mainloop_signal(p->mainloop, 0);
+ break;
+ }
+}
+
+static void stream_state_cb(pa_stream *s, void * userdata) {
+ PulseData *p = userdata;
+
+ switch (pa_stream_get_state(s)) {
+ case PA_STREAM_READY:
+ case PA_STREAM_FAILED:
+ case PA_STREAM_TERMINATED:
+ pa_threaded_mainloop_signal(p->mainloop, 0);
+ break;
+ }
+}
+
+static void stream_request_cb(pa_stream *s, size_t length, void *userdata) {
+ PulseData *p = userdata;
+
+ pa_threaded_mainloop_signal(p->mainloop, 0);
+}
+
+static void stream_latency_update_cb(pa_stream *s, void *userdata) {
+ PulseData *p = userdata;
+
+ pa_threaded_mainloop_signal(p->mainloop, 0);
+}
+
+static av_cold int pulse_close(AVFormatContext *s)
+{
+ PulseData *pd = s->priv_data;
+
+ if (pd->mainloop)
+ pa_threaded_mainloop_stop(pd->mainloop);
+
+ if (pd->stream)
+ pa_stream_unref(pd->stream);
+ pd->stream = NULL;
+
+ if (pd->context) {
+ pa_context_disconnect(pd->context);
+ pa_context_unref(pd->context);
+ }
+ pd->context = NULL;
+
+ if (pd->mainloop)
+ pa_threaded_mainloop_free(pd->mainloop);
+ pd->mainloop = NULL;
+
+ ff_timefilter_destroy(pd->timefilter);
+ pd->timefilter = NULL;
+
+ return 0;
+}
+
+static av_cold int pulse_read_header(AVFormatContext *s)
+{
+ PulseData *pd = s->priv_data;
+ AVStream *st;
+ char *device = NULL;
+ int ret;
+ enum AVCodecID codec_id =
+ s->audio_codec_id == AV_CODEC_ID_NONE ? DEFAULT_CODEC_ID : s->audio_codec_id;
+ const pa_sample_spec ss = { ff_codec_id_to_pulse_format(codec_id),
+ pd->sample_rate,
+ pd->channels };
+
+ pa_buffer_attr attr = { -1 };
+
+ st = avformat_new_stream(s, NULL);
+
+ if (!st) {
+ av_log(s, AV_LOG_ERROR, "Cannot add stream\n");
+ return AVERROR(ENOMEM);
+ }
+
+ attr.fragsize = pd->fragment_size;
+
+ if (s->filename[0] != '\0' && strcmp(s->filename, "default"))
+ device = s->filename;
+
+ if (!(pd->mainloop = pa_threaded_mainloop_new())) {
+ pulse_close(s);
+ return AVERROR_EXTERNAL;
+ }
+
+ if (!(pd->context = pa_context_new(pa_threaded_mainloop_get_api(pd->mainloop), pd->name))) {
+ pulse_close(s);
+ return AVERROR_EXTERNAL;
+ }
+
+ pa_context_set_state_callback(pd->context, context_state_cb, pd);
+
+ if (pa_context_connect(pd->context, pd->server, 0, NULL) < 0) {
+ pulse_close(s);
+ return AVERROR(pa_context_errno(pd->context));
+ }
+
+ pa_threaded_mainloop_lock(pd->mainloop);
+
+ if (pa_threaded_mainloop_start(pd->mainloop) < 0) {
+ ret = -1;
+ goto unlock_and_fail;
+ }
+
+ for (;;) {
+ pa_context_state_t state;
+
+ state = pa_context_get_state(pd->context);
+
+ if (state == PA_CONTEXT_READY)
+ break;
+
+ if (!PA_CONTEXT_IS_GOOD(state)) {
+ ret = AVERROR(pa_context_errno(pd->context));
+ goto unlock_and_fail;
+ }
+
+ /* Wait until the context is ready */
+ pa_threaded_mainloop_wait(pd->mainloop);
+ }
+
+ if (!(pd->stream = pa_stream_new(pd->context, pd->stream_name, &ss, NULL))) {
+ ret = AVERROR(pa_context_errno(pd->context));
+ goto unlock_and_fail;
+ }
+
+ pa_stream_set_state_callback(pd->stream, stream_state_cb, pd);
+ pa_stream_set_read_callback(pd->stream, stream_request_cb, pd);
+ pa_stream_set_write_callback(pd->stream, stream_request_cb, pd);
+ pa_stream_set_latency_update_callback(pd->stream, stream_latency_update_cb, pd);
+
+ ret = pa_stream_connect_record(pd->stream, device, &attr,
+ PA_STREAM_INTERPOLATE_TIMING
+ |PA_STREAM_ADJUST_LATENCY
+ |PA_STREAM_AUTO_TIMING_UPDATE);
+
+ if (ret < 0) {
+ ret = AVERROR(pa_context_errno(pd->context));
+ goto unlock_and_fail;
+ }
+
+ for (;;) {
+ pa_stream_state_t state;
+
+ state = pa_stream_get_state(pd->stream);
+
+ if (state == PA_STREAM_READY)
+ break;
+
+ if (!PA_STREAM_IS_GOOD(state)) {
+ ret = AVERROR(pa_context_errno(pd->context));
+ goto unlock_and_fail;
+ }
+
+ /* Wait until the stream is ready */
+ pa_threaded_mainloop_wait(pd->mainloop);
+ }
+
+ pa_threaded_mainloop_unlock(pd->mainloop);
+
+ /* take real parameters */
+ st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
+ st->codecpar->codec_id = codec_id;
+ st->codecpar->sample_rate = pd->sample_rate;
+ st->codecpar->channels = pd->channels;
+ avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+
+ pd->timefilter = ff_timefilter_new(1000000.0 / pd->sample_rate,
+ 1000, 1.5E-6);
+
+ if (!pd->timefilter) {
+ pulse_close(s);
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+
+unlock_and_fail:
+ pa_threaded_mainloop_unlock(pd->mainloop);
+
+ pulse_close(s);
+ return ret;
+}
+
+static int pulse_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ PulseData *pd = s->priv_data;
+ int ret;
+ size_t read_length;
+ const void *read_data = NULL;
+ int64_t dts;
+ pa_usec_t latency;
+ int negative;
+
+ pa_threaded_mainloop_lock(pd->mainloop);
+
+ CHECK_DEAD_GOTO(pd, ret, unlock_and_fail);
+
+ while (!read_data) {
+ int r;
+
+ r = pa_stream_peek(pd->stream, &read_data, &read_length);
+ CHECK_SUCCESS_GOTO(ret, r == 0, unlock_and_fail);
+
+ if (read_length <= 0) {
+ pa_threaded_mainloop_wait(pd->mainloop);
+ CHECK_DEAD_GOTO(pd, ret, unlock_and_fail);
+ } else if (!read_data) {
+ /* There's a hole in the stream, skip it. We could generate
+ * silence, but that wouldn't work for compressed streams. */
+ r = pa_stream_drop(pd->stream);
+ CHECK_SUCCESS_GOTO(ret, r == 0, unlock_and_fail);
+ }
+ }
+
+ if (av_new_packet(pkt, read_length) < 0) {
+ ret = AVERROR(ENOMEM);
+ goto unlock_and_fail;
+ }
+
+ dts = av_gettime();
+ pa_operation_unref(pa_stream_update_timing_info(pd->stream, NULL, NULL));
+
+ if (pa_stream_get_latency(pd->stream, &latency, &negative) >= 0) {
+ enum AVCodecID codec_id =
+ s->audio_codec_id == AV_CODEC_ID_NONE ? DEFAULT_CODEC_ID : s->audio_codec_id;
+ int frame_size = ((av_get_bits_per_sample(codec_id) >> 3) * pd->channels);
+ int frame_duration = read_length / frame_size;
+
+
+ if (negative) {
+ dts += latency;
+ } else
+ dts -= latency;
+ if (pd->wallclock)
+ pkt->pts = ff_timefilter_update(pd->timefilter, dts, pd->last_period);
+
+ pd->last_period = frame_duration;
+ } else {
+ av_log(s, AV_LOG_WARNING, "pa_stream_get_latency() failed\n");
+ }
+
+ memcpy(pkt->data, read_data, read_length);
+ pa_stream_drop(pd->stream);
+
+ pa_threaded_mainloop_unlock(pd->mainloop);
+ return 0;
+
+unlock_and_fail:
+ pa_threaded_mainloop_unlock(pd->mainloop);
+ return ret;
+}
+
+static int pulse_get_device_list(AVFormatContext *h, AVDeviceInfoList *device_list)
+{
+ PulseData *s = h->priv_data;
+ return ff_pulse_audio_get_devices(device_list, s->server, 0);
+}
+
+#define OFFSET(a) offsetof(PulseData, a)
+#define D AV_OPT_FLAG_DECODING_PARAM
+
+static const AVOption options[] = {
+ { "server", "set PulseAudio server", OFFSET(server), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, D },
+ { "name", "set application name", OFFSET(name), AV_OPT_TYPE_STRING, {.str = LIBAVFORMAT_IDENT}, 0, 0, D },
+ { "stream_name", "set stream description", OFFSET(stream_name), AV_OPT_TYPE_STRING, {.str = "record"}, 0, 0, D },
+ { "sample_rate", "set sample rate in Hz", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, D },
+ { "channels", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, D },
+ { "frame_size", "set number of bytes per frame", OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, D },
+ { "fragment_size", "set buffering size, affects latency and cpu usage", OFFSET(fragment_size), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, D },
+ { "wallclock", "set the initial pts using the current time", OFFSET(wallclock), AV_OPT_TYPE_INT, {.i64 = 1}, -1, 1, D },
+ { NULL },
+};
+
+static const AVClass pulse_demuxer_class = {
+ .class_name = "Pulse demuxer",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
+};
+
+AVInputFormat ff_pulse_demuxer = {
+ .name = "pulse",
+ .long_name = NULL_IF_CONFIG_SMALL("Pulse audio input"),
+ .priv_data_size = sizeof(PulseData),
+ .read_header = pulse_read_header,
+ .read_packet = pulse_read_packet,
+ .read_close = pulse_close,
+ .get_device_list = pulse_get_device_list,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &pulse_demuxer_class,
+};
diff --git a/libavdevice/pulse_audio_enc.c b/libavdevice/pulse_audio_enc.c
new file mode 100644
index 0000000000..6fb634ee2b
--- /dev/null
+++ b/libavdevice/pulse_audio_enc.c
@@ -0,0 +1,796 @@
+/*
+ * Copyright (c) 2013 Lukasz Marek <lukasz.m.luki@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <math.h>
+#include <pulse/pulseaudio.h>
+#include <pulse/error.h>
+#include "libavformat/avformat.h"
+#include "libavformat/internal.h"
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "libavutil/time.h"
+#include "libavutil/log.h"
+#include "libavutil/attributes.h"
+#include "pulse_audio_common.h"
+
+typedef struct PulseData {
+ AVClass *class;
+ const char *server;
+ const char *name;
+ const char *stream_name;
+ const char *device;
+ int64_t timestamp;
+ int buffer_size; /**< Buffer size in bytes */
+ int buffer_duration; /**< Buffer size in ms, recalculated to buffer_size */
+ int prebuf;
+ int minreq;
+ int last_result;
+ pa_threaded_mainloop *mainloop;
+ pa_context *ctx;
+ pa_stream *stream;
+ int nonblocking;
+ int mute;
+ pa_volume_t base_volume;
+ pa_volume_t last_volume;
+} PulseData;
+
+static void pulse_audio_sink_device_cb(pa_context *ctx, const pa_sink_info *dev,
+ int eol, void *userdata)
+{
+ PulseData *s = userdata;
+
+ if (s->ctx != ctx)
+ return;
+
+ if (eol) {
+ pa_threaded_mainloop_signal(s->mainloop, 0);
+ } else {
+ if (dev->flags & PA_SINK_FLAT_VOLUME)
+ s->base_volume = dev->base_volume;
+ else
+ s->base_volume = PA_VOLUME_NORM;
+ av_log(s, AV_LOG_DEBUG, "base volume: %u\n", s->base_volume);
+ }
+}
+
+/* Mainloop must be locked before calling this function as it uses pa_threaded_mainloop_wait. */
+static int pulse_update_sink_info(AVFormatContext *h)
+{
+ PulseData *s = h->priv_data;
+ pa_operation *op;
+ if (!(op = pa_context_get_sink_info_by_name(s->ctx, s->device,
+ pulse_audio_sink_device_cb, s))) {
+ av_log(s, AV_LOG_ERROR, "pa_context_get_sink_info_by_name failed.\n");
+ return AVERROR_EXTERNAL;
+ }
+ while (pa_operation_get_state(op) == PA_OPERATION_RUNNING)
+ pa_threaded_mainloop_wait(s->mainloop);
+ pa_operation_unref(op);
+ return 0;
+}
+
+static void pulse_audio_sink_input_cb(pa_context *ctx, const pa_sink_input_info *i,
+ int eol, void *userdata)
+{
+ AVFormatContext *h = userdata;
+ PulseData *s = h->priv_data;
+
+ if (s->ctx != ctx)
+ return;
+
+ if (!eol) {
+ double val;
+ pa_volume_t vol = pa_cvolume_avg(&i->volume);
+ if (s->mute < 0 || (s->mute && !i->mute) || (!s->mute && i->mute)) {
+ s->mute = i->mute;
+ avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_MUTE_STATE_CHANGED, &s->mute, sizeof(s->mute));
+ }
+
+ vol = pa_sw_volume_divide(vol, s->base_volume);
+ if (s->last_volume != vol) {
+ val = (double)vol / PA_VOLUME_NORM;
+ avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED, &val, sizeof(val));
+ s->last_volume = vol;
+ }
+ }
+}
+
+/* This function creates new loop so may be called from PA callbacks.
+ Mainloop must be locked before calling this function as it operates on streams. */
+static int pulse_update_sink_input_info(AVFormatContext *h)
+{
+ PulseData *s = h->priv_data;
+ pa_operation *op;
+ enum pa_operation_state op_state;
+ pa_mainloop *ml = NULL;
+ pa_context *ctx = NULL;
+ int ret = 0;
+
+ if ((ret = ff_pulse_audio_connect_context(&ml, &ctx, s->server, "Update sink input information")) < 0)
+ return ret;
+
+ if (!(op = pa_context_get_sink_input_info(ctx, pa_stream_get_index(s->stream),
+ pulse_audio_sink_input_cb, h))) {
+ ret = AVERROR_EXTERNAL;
+ goto fail;
+ }
+
+ while ((op_state = pa_operation_get_state(op)) == PA_OPERATION_RUNNING)
+ pa_mainloop_iterate(ml, 1, NULL);
+ pa_operation_unref(op);
+ if (op_state != PA_OPERATION_DONE) {
+ ret = AVERROR_EXTERNAL;
+ goto fail;
+ }
+
+ fail:
+ ff_pulse_audio_disconnect_context(&ml, &ctx);
+ if (ret)
+ av_log(s, AV_LOG_ERROR, "pa_context_get_sink_input_info failed.\n");
+ return ret;
+}
+
+static void pulse_event(pa_context *ctx, pa_subscription_event_type_t t,
+ uint32_t idx, void *userdata)
+{
+ AVFormatContext *h = userdata;
+ PulseData *s = h->priv_data;
+
+ if (s->ctx != ctx)
+ return;
+
+ if ((t & PA_SUBSCRIPTION_EVENT_FACILITY_MASK) == PA_SUBSCRIPTION_EVENT_SINK_INPUT) {
+ if ((t & PA_SUBSCRIPTION_EVENT_TYPE_MASK) == PA_SUBSCRIPTION_EVENT_CHANGE)
+ // Calling from mainloop callback. No need to lock mainloop.
+ pulse_update_sink_input_info(h);
+ }
+}
+
+static void pulse_stream_writable(pa_stream *stream, size_t nbytes, void *userdata)
+{
+ AVFormatContext *h = userdata;
+ PulseData *s = h->priv_data;
+ int64_t val = nbytes;
+
+ if (stream != s->stream)
+ return;
+
+ avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_WRITABLE, &val, sizeof(val));
+ pa_threaded_mainloop_signal(s->mainloop, 0);
+}
+
+static void pulse_overflow(pa_stream *stream, void *userdata)
+{
+ AVFormatContext *h = userdata;
+ avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_OVERFLOW, NULL, 0);
+}
+
+static void pulse_underflow(pa_stream *stream, void *userdata)
+{
+ AVFormatContext *h = userdata;
+ avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_UNDERFLOW, NULL, 0);
+}
+
+static void pulse_stream_state(pa_stream *stream, void *userdata)
+{
+ PulseData *s = userdata;
+
+ if (stream != s->stream)
+ return;
+
+ switch (pa_stream_get_state(s->stream)) {
+ case PA_STREAM_READY:
+ case PA_STREAM_FAILED:
+ case PA_STREAM_TERMINATED:
+ pa_threaded_mainloop_signal(s->mainloop, 0);
+ default:
+ break;
+ }
+}
+
+static int pulse_stream_wait(PulseData *s)
+{
+ pa_stream_state_t state;
+
+ while ((state = pa_stream_get_state(s->stream)) != PA_STREAM_READY) {
+ if (state == PA_STREAM_FAILED || state == PA_STREAM_TERMINATED)
+ return AVERROR_EXTERNAL;
+ pa_threaded_mainloop_wait(s->mainloop);
+ }
+ return 0;
+}
+
+static void pulse_context_state(pa_context *ctx, void *userdata)
+{
+ PulseData *s = userdata;
+
+ if (s->ctx != ctx)
+ return;
+
+ switch (pa_context_get_state(ctx)) {
+ case PA_CONTEXT_READY:
+ case PA_CONTEXT_FAILED:
+ case PA_CONTEXT_TERMINATED:
+ pa_threaded_mainloop_signal(s->mainloop, 0);
+ default:
+ break;
+ }
+}
+
+static int pulse_context_wait(PulseData *s)
+{
+ pa_context_state_t state;
+
+ while ((state = pa_context_get_state(s->ctx)) != PA_CONTEXT_READY) {
+ if (state == PA_CONTEXT_FAILED || state == PA_CONTEXT_TERMINATED)
+ return AVERROR_EXTERNAL;
+ pa_threaded_mainloop_wait(s->mainloop);
+ }
+ return 0;
+}
+
+static void pulse_stream_result(pa_stream *stream, int success, void *userdata)
+{
+ PulseData *s = userdata;
+
+ if (stream != s->stream)
+ return;
+
+ s->last_result = success ? 0 : AVERROR_EXTERNAL;
+ pa_threaded_mainloop_signal(s->mainloop, 0);
+}
+
+static int pulse_finish_stream_operation(PulseData *s, pa_operation *op, const char *name)
+{
+ if (!op) {
+ pa_threaded_mainloop_unlock(s->mainloop);
+ av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
+ return AVERROR_EXTERNAL;
+ }
+ s->last_result = 2;
+ while (s->last_result == 2)
+ pa_threaded_mainloop_wait(s->mainloop);
+ pa_operation_unref(op);
+ pa_threaded_mainloop_unlock(s->mainloop);
+ if (s->last_result != 0)
+ av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
+ return s->last_result;
+}
+
+static int pulse_set_pause(PulseData *s, int pause)
+{
+ pa_operation *op;
+ pa_threaded_mainloop_lock(s->mainloop);
+ op = pa_stream_cork(s->stream, pause, pulse_stream_result, s);
+ return pulse_finish_stream_operation(s, op, "pa_stream_cork");
+}
+
+static int pulse_flash_stream(PulseData *s)
+{
+ pa_operation *op;
+ pa_threaded_mainloop_lock(s->mainloop);
+ op = pa_stream_flush(s->stream, pulse_stream_result, s);
+ return pulse_finish_stream_operation(s, op, "pa_stream_flush");
+}
+
+static void pulse_context_result(pa_context *ctx, int success, void *userdata)
+{
+ PulseData *s = userdata;
+
+ if (s->ctx != ctx)
+ return;
+
+ s->last_result = success ? 0 : AVERROR_EXTERNAL;
+ pa_threaded_mainloop_signal(s->mainloop, 0);
+}
+
+static int pulse_finish_context_operation(PulseData *s, pa_operation *op, const char *name)
+{
+ if (!op) {
+ pa_threaded_mainloop_unlock(s->mainloop);
+ av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
+ return AVERROR_EXTERNAL;
+ }
+ s->last_result = 2;
+ while (s->last_result == 2)
+ pa_threaded_mainloop_wait(s->mainloop);
+ pa_operation_unref(op);
+ pa_threaded_mainloop_unlock(s->mainloop);
+ if (s->last_result != 0)
+ av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
+ return s->last_result;
+}
+
+static int pulse_set_mute(PulseData *s)
+{
+ pa_operation *op;
+ pa_threaded_mainloop_lock(s->mainloop);
+ op = pa_context_set_sink_input_mute(s->ctx, pa_stream_get_index(s->stream),
+ s->mute, pulse_context_result, s);
+ return pulse_finish_context_operation(s, op, "pa_context_set_sink_input_mute");
+}
+
+static int pulse_set_volume(PulseData *s, double volume)
+{
+ pa_operation *op;
+ pa_cvolume cvol;
+ pa_volume_t vol;
+ const pa_sample_spec *ss = pa_stream_get_sample_spec(s->stream);
+
+ vol = pa_sw_volume_multiply(lrint(volume * PA_VOLUME_NORM), s->base_volume);
+ pa_cvolume_set(&cvol, ss->channels, PA_VOLUME_NORM);
+ pa_sw_cvolume_multiply_scalar(&cvol, &cvol, vol);
+ pa_threaded_mainloop_lock(s->mainloop);
+ op = pa_context_set_sink_input_volume(s->ctx, pa_stream_get_index(s->stream),
+ &cvol, pulse_context_result, s);
+ return pulse_finish_context_operation(s, op, "pa_context_set_sink_input_volume");
+}
+
+static int pulse_subscribe_events(PulseData *s)
+{
+ pa_operation *op;
+
+ pa_threaded_mainloop_lock(s->mainloop);
+ op = pa_context_subscribe(s->ctx, PA_SUBSCRIPTION_MASK_SINK_INPUT, pulse_context_result, s);
+ return pulse_finish_context_operation(s, op, "pa_context_subscribe");
+}
+
+static void pulse_map_channels_to_pulse(int64_t channel_layout, pa_channel_map *channel_map)
+{
+ channel_map->channels = 0;
+ if (channel_layout & AV_CH_FRONT_LEFT)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT;
+ if (channel_layout & AV_CH_FRONT_RIGHT)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT;
+ if (channel_layout & AV_CH_FRONT_CENTER)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_CENTER;
+ if (channel_layout & AV_CH_LOW_FREQUENCY)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_LFE;
+ if (channel_layout & AV_CH_BACK_LEFT)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_LEFT;
+ if (channel_layout & AV_CH_BACK_RIGHT)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_RIGHT;
+ if (channel_layout & AV_CH_FRONT_LEFT_OF_CENTER)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER;
+ if (channel_layout & AV_CH_FRONT_RIGHT_OF_CENTER)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER;
+ if (channel_layout & AV_CH_BACK_CENTER)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_CENTER;
+ if (channel_layout & AV_CH_SIDE_LEFT)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_SIDE_LEFT;
+ if (channel_layout & AV_CH_SIDE_RIGHT)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_SIDE_RIGHT;
+ if (channel_layout & AV_CH_TOP_CENTER)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_CENTER;
+ if (channel_layout & AV_CH_TOP_FRONT_LEFT)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_LEFT;
+ if (channel_layout & AV_CH_TOP_FRONT_CENTER)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_CENTER;
+ if (channel_layout & AV_CH_TOP_FRONT_RIGHT)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_RIGHT;
+ if (channel_layout & AV_CH_TOP_BACK_LEFT)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_LEFT;
+ if (channel_layout & AV_CH_TOP_BACK_CENTER)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_CENTER;
+ if (channel_layout & AV_CH_TOP_BACK_RIGHT)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_RIGHT;
+ if (channel_layout & AV_CH_STEREO_LEFT)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT;
+ if (channel_layout & AV_CH_STEREO_RIGHT)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT;
+ if (channel_layout & AV_CH_WIDE_LEFT)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX0;
+ if (channel_layout & AV_CH_WIDE_RIGHT)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX1;
+ if (channel_layout & AV_CH_SURROUND_DIRECT_LEFT)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX2;
+ if (channel_layout & AV_CH_SURROUND_DIRECT_RIGHT)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX3;
+ if (channel_layout & AV_CH_LOW_FREQUENCY_2)
+ channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_LFE;
+}
+
+static av_cold int pulse_write_trailer(AVFormatContext *h)
+{
+ PulseData *s = h->priv_data;
+
+ if (s->mainloop) {
+ pa_threaded_mainloop_lock(s->mainloop);
+ if (s->stream) {
+ pa_stream_disconnect(s->stream);
+ pa_stream_set_state_callback(s->stream, NULL, NULL);
+ pa_stream_set_write_callback(s->stream, NULL, NULL);
+ pa_stream_set_overflow_callback(s->stream, NULL, NULL);
+ pa_stream_set_underflow_callback(s->stream, NULL, NULL);
+ pa_stream_unref(s->stream);
+ s->stream = NULL;
+ }
+ if (s->ctx) {
+ pa_context_disconnect(s->ctx);
+ pa_context_set_state_callback(s->ctx, NULL, NULL);
+ pa_context_set_subscribe_callback(s->ctx, NULL, NULL);
+ pa_context_unref(s->ctx);
+ s->ctx = NULL;
+ }
+ pa_threaded_mainloop_unlock(s->mainloop);
+ pa_threaded_mainloop_stop(s->mainloop);
+ pa_threaded_mainloop_free(s->mainloop);
+ s->mainloop = NULL;
+ }
+
+ return 0;
+}
+
+static av_cold int pulse_write_header(AVFormatContext *h)
+{
+ PulseData *s = h->priv_data;
+ AVStream *st = NULL;
+ int ret;
+ pa_sample_spec sample_spec;
+ pa_buffer_attr buffer_attributes = { -1, -1, -1, -1, -1 };
+ pa_channel_map channel_map;
+ pa_mainloop_api *mainloop_api;
+ const char *stream_name = s->stream_name;
+ static const pa_stream_flags_t stream_flags = PA_STREAM_INTERPOLATE_TIMING |
+ PA_STREAM_AUTO_TIMING_UPDATE |
+ PA_STREAM_NOT_MONOTONIC;
+
+ if (h->nb_streams != 1 || h->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
+ av_log(s, AV_LOG_ERROR, "Only a single audio stream is supported.\n");
+ return AVERROR(EINVAL);
+ }
+ st = h->streams[0];
+
+ if (!stream_name) {
+ if (h->filename[0])
+ stream_name = h->filename;
+ else
+ stream_name = "Playback";
+ }
+ s->nonblocking = (h->flags & AVFMT_FLAG_NONBLOCK);
+
+ if (s->buffer_duration) {
+ int64_t bytes = s->buffer_duration;
+ bytes *= st->codecpar->channels * st->codecpar->sample_rate *
+ av_get_bytes_per_sample(st->codecpar->format);
+ bytes /= 1000;
+ buffer_attributes.tlength = FFMAX(s->buffer_size, av_clip64(bytes, 0, UINT32_MAX - 1));
+ av_log(s, AV_LOG_DEBUG,
+ "Buffer duration: %ums recalculated into %"PRId64" bytes buffer.\n",
+ s->buffer_duration, bytes);
+ av_log(s, AV_LOG_DEBUG, "Real buffer length is %u bytes\n", buffer_attributes.tlength);
+ } else if (s->buffer_size)
+ buffer_attributes.tlength = s->buffer_size;
+ if (s->prebuf)
+ buffer_attributes.prebuf = s->prebuf;
+ if (s->minreq)
+ buffer_attributes.minreq = s->minreq;
+
+ sample_spec.format = ff_codec_id_to_pulse_format(st->codecpar->codec_id);
+ sample_spec.rate = st->codecpar->sample_rate;
+ sample_spec.channels = st->codecpar->channels;
+ if (!pa_sample_spec_valid(&sample_spec)) {
+ av_log(s, AV_LOG_ERROR, "Invalid sample spec.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (sample_spec.channels == 1) {
+ channel_map.channels = 1;
+ channel_map.map[0] = PA_CHANNEL_POSITION_MONO;
+ } else if (st->codecpar->channel_layout) {
+ if (av_get_channel_layout_nb_channels(st->codecpar->channel_layout) != st->codecpar->channels)
+ return AVERROR(EINVAL);
+ pulse_map_channels_to_pulse(st->codecpar->channel_layout, &channel_map);
+ /* Unknown channel is present in channel_layout, let PulseAudio use its default. */
+ if (channel_map.channels != sample_spec.channels) {
+ av_log(s, AV_LOG_WARNING, "Unknown channel. Using defaul channel map.\n");
+ channel_map.channels = 0;
+ }
+ } else
+ channel_map.channels = 0;
+
+ if (!channel_map.channels)
+ av_log(s, AV_LOG_WARNING, "Using PulseAudio's default channel map.\n");
+ else if (!pa_channel_map_valid(&channel_map)) {
+ av_log(s, AV_LOG_ERROR, "Invalid channel map.\n");
+ return AVERROR(EINVAL);
+ }
+
+ /* start main loop */
+ s->mainloop = pa_threaded_mainloop_new();
+ if (!s->mainloop) {
+ av_log(s, AV_LOG_ERROR, "Cannot create threaded mainloop.\n");
+ return AVERROR(ENOMEM);
+ }
+ if ((ret = pa_threaded_mainloop_start(s->mainloop)) < 0) {
+ av_log(s, AV_LOG_ERROR, "Cannot start threaded mainloop: %s.\n", pa_strerror(ret));
+ pa_threaded_mainloop_free(s->mainloop);
+ s->mainloop = NULL;
+ return AVERROR_EXTERNAL;
+ }
+
+ pa_threaded_mainloop_lock(s->mainloop);
+
+ mainloop_api = pa_threaded_mainloop_get_api(s->mainloop);
+ if (!mainloop_api) {
+ av_log(s, AV_LOG_ERROR, "Cannot get mainloop API.\n");
+ ret = AVERROR_EXTERNAL;
+ goto fail;
+ }
+
+ s->ctx = pa_context_new(mainloop_api, s->name);
+ if (!s->ctx) {
+ av_log(s, AV_LOG_ERROR, "Cannot create context.\n");
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ pa_context_set_state_callback(s->ctx, pulse_context_state, s);
+ pa_context_set_subscribe_callback(s->ctx, pulse_event, h);
+
+ if ((ret = pa_context_connect(s->ctx, s->server, 0, NULL)) < 0) {
+ av_log(s, AV_LOG_ERROR, "Cannot connect context: %s.\n", pa_strerror(ret));
+ ret = AVERROR_EXTERNAL;
+ goto fail;
+ }
+
+ if ((ret = pulse_context_wait(s)) < 0) {
+ av_log(s, AV_LOG_ERROR, "Context failed.\n");
+ goto fail;
+ }
+
+ s->stream = pa_stream_new(s->ctx, stream_name, &sample_spec,
+ channel_map.channels ? &channel_map : NULL);
+
+ if ((ret = pulse_update_sink_info(h)) < 0) {
+ av_log(s, AV_LOG_ERROR, "Updating sink info failed.\n");
+ goto fail;
+ }
+
+ if (!s->stream) {
+ av_log(s, AV_LOG_ERROR, "Cannot create stream.\n");
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ pa_stream_set_state_callback(s->stream, pulse_stream_state, s);
+ pa_stream_set_write_callback(s->stream, pulse_stream_writable, h);
+ pa_stream_set_overflow_callback(s->stream, pulse_overflow, h);
+ pa_stream_set_underflow_callback(s->stream, pulse_underflow, h);
+
+ if ((ret = pa_stream_connect_playback(s->stream, s->device, &buffer_attributes,
+ stream_flags, NULL, NULL)) < 0) {
+ av_log(s, AV_LOG_ERROR, "pa_stream_connect_playback failed: %s.\n", pa_strerror(ret));
+ ret = AVERROR_EXTERNAL;
+ goto fail;
+ }
+
+ if ((ret = pulse_stream_wait(s)) < 0) {
+ av_log(s, AV_LOG_ERROR, "Stream failed.\n");
+ goto fail;
+ }
+
+ /* read back buffer attributes for future use */
+ buffer_attributes = *pa_stream_get_buffer_attr(s->stream);
+ s->buffer_size = buffer_attributes.tlength;
+ s->prebuf = buffer_attributes.prebuf;
+ s->minreq = buffer_attributes.minreq;
+ av_log(s, AV_LOG_DEBUG, "Real buffer attributes: size: %d, prebuf: %d, minreq: %d\n",
+ s->buffer_size, s->prebuf, s->minreq);
+
+ pa_threaded_mainloop_unlock(s->mainloop);
+
+ if ((ret = pulse_subscribe_events(s)) < 0) {
+ av_log(s, AV_LOG_ERROR, "Event subscription failed.\n");
+ /* a bit ugly but the simplest to lock here*/
+ pa_threaded_mainloop_lock(s->mainloop);
+ goto fail;
+ }
+
+ /* force control messages */
+ s->mute = -1;
+ s->last_volume = PA_VOLUME_INVALID;
+ pa_threaded_mainloop_lock(s->mainloop);
+ if ((ret = pulse_update_sink_input_info(h)) < 0) {
+ av_log(s, AV_LOG_ERROR, "Updating sink input info failed.\n");
+ goto fail;
+ }
+ pa_threaded_mainloop_unlock(s->mainloop);
+
+ avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+
+ return 0;
+ fail:
+ pa_threaded_mainloop_unlock(s->mainloop);
+ pulse_write_trailer(h);
+ return ret;
+}
+
+static int pulse_write_packet(AVFormatContext *h, AVPacket *pkt)
+{
+ PulseData *s = h->priv_data;
+ int ret;
+ int64_t writable_size;
+
+ if (!pkt)
+ return pulse_flash_stream(s);
+
+ if (pkt->dts != AV_NOPTS_VALUE)
+ s->timestamp = pkt->dts;
+
+ if (pkt->duration) {
+ s->timestamp += pkt->duration;
+ } else {
+ AVStream *st = h->streams[0];
+ AVRational r = { 1, st->codecpar->sample_rate };
+ int64_t samples = pkt->size / (av_get_bytes_per_sample(st->codecpar->format) * st->codecpar->channels);
+ s->timestamp += av_rescale_q(samples, r, st->time_base);
+ }
+
+ pa_threaded_mainloop_lock(s->mainloop);
+ if (!PA_STREAM_IS_GOOD(pa_stream_get_state(s->stream))) {
+ av_log(s, AV_LOG_ERROR, "PulseAudio stream is in invalid state.\n");
+ goto fail;
+ }
+ while (pa_stream_writable_size(s->stream) < s->minreq) {
+ if (s->nonblocking) {
+ pa_threaded_mainloop_unlock(s->mainloop);
+ return AVERROR(EAGAIN);
+ } else
+ pa_threaded_mainloop_wait(s->mainloop);
+ }
+
+ if ((ret = pa_stream_write(s->stream, pkt->data, pkt->size, NULL, 0, PA_SEEK_RELATIVE)) < 0) {
+ av_log(s, AV_LOG_ERROR, "pa_stream_write failed: %s\n", pa_strerror(ret));
+ goto fail;
+ }
+ if ((writable_size = pa_stream_writable_size(s->stream)) >= s->minreq)
+ avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_WRITABLE, &writable_size, sizeof(writable_size));
+
+ pa_threaded_mainloop_unlock(s->mainloop);
+
+ return 0;
+ fail:
+ pa_threaded_mainloop_unlock(s->mainloop);
+ return AVERROR_EXTERNAL;
+}
+
+static int pulse_write_frame(AVFormatContext *h, int stream_index,
+ AVFrame **frame, unsigned flags)
+{
+ AVPacket pkt;
+
+ /* Planar formats are not supported yet. */
+ if (flags & AV_WRITE_UNCODED_FRAME_QUERY)
+ return av_sample_fmt_is_planar(h->streams[stream_index]->codecpar->format) ?
+ AVERROR(EINVAL) : 0;
+
+ pkt.data = (*frame)->data[0];
+ pkt.size = (*frame)->nb_samples * av_get_bytes_per_sample((*frame)->format) * av_frame_get_channels(*frame);
+ pkt.dts = (*frame)->pkt_dts;
+ pkt.duration = av_frame_get_pkt_duration(*frame);
+ return pulse_write_packet(h, &pkt);
+}
+
+
+static void pulse_get_output_timestamp(AVFormatContext *h, int stream, int64_t *dts, int64_t *wall)
+{
+ PulseData *s = h->priv_data;
+ pa_usec_t latency;
+ int neg;
+ pa_threaded_mainloop_lock(s->mainloop);
+ pa_stream_get_latency(s->stream, &latency, &neg);
+ pa_threaded_mainloop_unlock(s->mainloop);
+ if (wall)
+ *wall = av_gettime();
+ if (dts)
+ *dts = s->timestamp - (neg ? -latency : latency);
+}
+
+static int pulse_get_device_list(AVFormatContext *h, AVDeviceInfoList *device_list)
+{
+ PulseData *s = h->priv_data;
+ return ff_pulse_audio_get_devices(device_list, s->server, 1);
+}
+
+static int pulse_control_message(AVFormatContext *h, int type,
+ void *data, size_t data_size)
+{
+ PulseData *s = h->priv_data;
+ int ret;
+
+ switch(type) {
+ case AV_APP_TO_DEV_PAUSE:
+ return pulse_set_pause(s, 1);
+ case AV_APP_TO_DEV_PLAY:
+ return pulse_set_pause(s, 0);
+ case AV_APP_TO_DEV_TOGGLE_PAUSE:
+ return pulse_set_pause(s, !pa_stream_is_corked(s->stream));
+ case AV_APP_TO_DEV_MUTE:
+ if (!s->mute) {
+ s->mute = 1;
+ return pulse_set_mute(s);
+ }
+ return 0;
+ case AV_APP_TO_DEV_UNMUTE:
+ if (s->mute) {
+ s->mute = 0;
+ return pulse_set_mute(s);
+ }
+ return 0;
+ case AV_APP_TO_DEV_TOGGLE_MUTE:
+ s->mute = !s->mute;
+ return pulse_set_mute(s);
+ case AV_APP_TO_DEV_SET_VOLUME:
+ return pulse_set_volume(s, *(double *)data);
+ case AV_APP_TO_DEV_GET_VOLUME:
+ s->last_volume = PA_VOLUME_INVALID;
+ pa_threaded_mainloop_lock(s->mainloop);
+ ret = pulse_update_sink_input_info(h);
+ pa_threaded_mainloop_unlock(s->mainloop);
+ return ret;
+ case AV_APP_TO_DEV_GET_MUTE:
+ s->mute = -1;
+ pa_threaded_mainloop_lock(s->mainloop);
+ ret = pulse_update_sink_input_info(h);
+ pa_threaded_mainloop_unlock(s->mainloop);
+ return ret;
+ default:
+ break;
+ }
+ return AVERROR(ENOSYS);
+}
+
+#define OFFSET(a) offsetof(PulseData, a)
+#define E AV_OPT_FLAG_ENCODING_PARAM
+static const AVOption options[] = {
+ { "server", "set PulseAudio server", OFFSET(server), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
+ { "name", "set application name", OFFSET(name), AV_OPT_TYPE_STRING, {.str = LIBAVFORMAT_IDENT}, 0, 0, E },
+ { "stream_name", "set stream description", OFFSET(stream_name), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
+ { "device", "set device name", OFFSET(device), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
+ { "buffer_size", "set buffer size in bytes", OFFSET(buffer_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
+ { "buffer_duration", "set buffer duration in millisecs", OFFSET(buffer_duration), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
+ { "prebuf", "set pre-buffering size", OFFSET(prebuf), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
+ { "minreq", "set minimum request size", OFFSET(minreq), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
+ { NULL }
+};
+
+static const AVClass pulse_muxer_class = {
+ .class_name = "PulseAudio muxer",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
+};
+
+AVOutputFormat ff_pulse_muxer = {
+ .name = "pulse",
+ .long_name = NULL_IF_CONFIG_SMALL("Pulse audio output"),
+ .priv_data_size = sizeof(PulseData),
+ .audio_codec = AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE),
+ .video_codec = AV_CODEC_ID_NONE,
+ .write_header = pulse_write_header,
+ .write_packet = pulse_write_packet,
+ .write_uncoded_frame = pulse_write_frame,
+ .write_trailer = pulse_write_trailer,
+ .get_output_timestamp = pulse_get_output_timestamp,
+ .get_device_list = pulse_get_device_list,
+ .control_message = pulse_control_message,
+ .flags = AVFMT_NOFILE | AVFMT_ALLOW_FLUSH,
+ .priv_class = &pulse_muxer_class,
+};
diff --git a/libavdevice/qtkit.m b/libavdevice/qtkit.m
new file mode 100644
index 0000000000..22a94ca561
--- /dev/null
+++ b/libavdevice/qtkit.m
@@ -0,0 +1,362 @@
+/*
+ * QTKit input device
+ * Copyright (c) 2013 Vadim Kalinsky <vadim@kalinsky.ru>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * QTKit input device
+ * @author Vadim Kalinsky <vadim@kalinsky.ru>
+ */
+
+#if defined(__clang__)
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#endif
+
+#import <QTKit/QTKit.h>
+#include <pthread.h>
+
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "libavformat/internal.h"
+#include "libavutil/internal.h"
+#include "libavutil/time.h"
+#include "avdevice.h"
+
+#define QTKIT_TIMEBASE 100
+
+static const AVRational kQTKitTimeBase_q = {
+ .num = 1,
+ .den = QTKIT_TIMEBASE
+};
+
+typedef struct
+{
+ AVClass* class;
+
+ float frame_rate;
+ int frames_captured;
+ int64_t first_pts;
+ pthread_mutex_t frame_lock;
+ pthread_cond_t frame_wait_cond;
+ id qt_delegate;
+
+ int list_devices;
+ int video_device_index;
+
+ QTCaptureSession* capture_session;
+ QTCaptureDecompressedVideoOutput* video_output;
+ CVImageBufferRef current_frame;
+} CaptureContext;
+
+static void lock_frames(CaptureContext* ctx)
+{
+ pthread_mutex_lock(&ctx->frame_lock);
+}
+
+static void unlock_frames(CaptureContext* ctx)
+{
+ pthread_mutex_unlock(&ctx->frame_lock);
+}
+
+/** FrameReciever class - delegate for QTCaptureSession
+ */
+@interface FFMPEG_FrameReceiver : NSObject
+{
+ CaptureContext* _context;
+}
+
+- (id)initWithContext:(CaptureContext*)context;
+
+- (void)captureOutput:(QTCaptureOutput *)captureOutput
+ didOutputVideoFrame:(CVImageBufferRef)videoFrame
+ withSampleBuffer:(QTSampleBuffer *)sampleBuffer
+ fromConnection:(QTCaptureConnection *)connection;
+
+@end
+
+@implementation FFMPEG_FrameReceiver
+
+- (id)initWithContext:(CaptureContext*)context
+{
+ if (self = [super init]) {
+ _context = context;
+ }
+ return self;
+}
+
+- (void)captureOutput:(QTCaptureOutput *)captureOutput
+ didOutputVideoFrame:(CVImageBufferRef)videoFrame
+ withSampleBuffer:(QTSampleBuffer *)sampleBuffer
+ fromConnection:(QTCaptureConnection *)connection
+{
+ lock_frames(_context);
+ if (_context->current_frame != nil) {
+ CVBufferRelease(_context->current_frame);
+ }
+
+ _context->current_frame = CVBufferRetain(videoFrame);
+
+ pthread_cond_signal(&_context->frame_wait_cond);
+
+ unlock_frames(_context);
+
+ ++_context->frames_captured;
+}
+
+@end
+
+static void destroy_context(CaptureContext* ctx)
+{
+ [ctx->capture_session stopRunning];
+
+ [ctx->capture_session release];
+ [ctx->video_output release];
+ [ctx->qt_delegate release];
+
+ ctx->capture_session = NULL;
+ ctx->video_output = NULL;
+ ctx->qt_delegate = NULL;
+
+ pthread_mutex_destroy(&ctx->frame_lock);
+ pthread_cond_destroy(&ctx->frame_wait_cond);
+
+ if (ctx->current_frame)
+ CVBufferRelease(ctx->current_frame);
+}
+
+static int qtkit_read_header(AVFormatContext *s)
+{
+ NSAutoreleasePool* pool = [[NSAutoreleasePool alloc] init];
+
+ CaptureContext* ctx = (CaptureContext*)s->priv_data;
+
+ ctx->first_pts = av_gettime();
+
+ pthread_mutex_init(&ctx->frame_lock, NULL);
+ pthread_cond_init(&ctx->frame_wait_cond, NULL);
+
+ // List devices if requested
+ if (ctx->list_devices) {
+ av_log(ctx, AV_LOG_INFO, "QTKit video devices:\n");
+ NSArray *devices = [QTCaptureDevice inputDevicesWithMediaType:QTMediaTypeVideo];
+ for (QTCaptureDevice *device in devices) {
+ const char *name = [[device localizedDisplayName] UTF8String];
+ int index = [devices indexOfObject:device];
+ av_log(ctx, AV_LOG_INFO, "[%d] %s\n", index, name);
+ }
+ goto fail;
+ }
+
+ // Find capture device
+ QTCaptureDevice *video_device = nil;
+
+ // check for device index given in filename
+ if (ctx->video_device_index == -1) {
+ sscanf(s->filename, "%d", &ctx->video_device_index);
+ }
+
+ if (ctx->video_device_index >= 0) {
+ NSArray *devices = [QTCaptureDevice inputDevicesWithMediaType:QTMediaTypeVideo];
+
+ if (ctx->video_device_index >= [devices count]) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid device index\n");
+ goto fail;
+ }
+
+ video_device = [devices objectAtIndex:ctx->video_device_index];
+ } else if (strncmp(s->filename, "", 1) &&
+ strncmp(s->filename, "default", 7)) {
+ NSArray *devices = [QTCaptureDevice inputDevicesWithMediaType:QTMediaTypeVideo];
+
+ for (QTCaptureDevice *device in devices) {
+ if (!strncmp(s->filename, [[device localizedDisplayName] UTF8String], strlen(s->filename))) {
+ video_device = device;
+ break;
+ }
+ }
+ if (!video_device) {
+ av_log(ctx, AV_LOG_ERROR, "Video device not found\n");
+ goto fail;
+ }
+ } else {
+ video_device = [QTCaptureDevice defaultInputDeviceWithMediaType:QTMediaTypeMuxed];
+ }
+
+ BOOL success = [video_device open:nil];
+
+ // Video capture device not found, looking for QTMediaTypeVideo
+ if (!success) {
+ video_device = [QTCaptureDevice defaultInputDeviceWithMediaType:QTMediaTypeVideo];
+ success = [video_device open:nil];
+
+ if (!success) {
+ av_log(s, AV_LOG_ERROR, "No QT capture device found\n");
+ goto fail;
+ }
+ }
+
+ NSString* dev_display_name = [video_device localizedDisplayName];
+ av_log (s, AV_LOG_DEBUG, "'%s' opened\n", [dev_display_name UTF8String]);
+
+ // Initialize capture session
+ ctx->capture_session = [[QTCaptureSession alloc] init];
+
+ QTCaptureDeviceInput* capture_dev_input = [[[QTCaptureDeviceInput alloc] initWithDevice:video_device] autorelease];
+ success = [ctx->capture_session addInput:capture_dev_input error:nil];
+
+ if (!success) {
+ av_log (s, AV_LOG_ERROR, "Failed to add QT capture device to session\n");
+ goto fail;
+ }
+
+ // Attaching output
+ // FIXME: Allow for a user defined pixel format
+ ctx->video_output = [[QTCaptureDecompressedVideoOutput alloc] init];
+
+ NSDictionary *captureDictionary = [NSDictionary dictionaryWithObject:
+ [NSNumber numberWithUnsignedInt:kCVPixelFormatType_24RGB]
+ forKey:(id)kCVPixelBufferPixelFormatTypeKey];
+
+ [ctx->video_output setPixelBufferAttributes:captureDictionary];
+
+ ctx->qt_delegate = [[FFMPEG_FrameReceiver alloc] initWithContext:ctx];
+
+ [ctx->video_output setDelegate:ctx->qt_delegate];
+ [ctx->video_output setAutomaticallyDropsLateVideoFrames:YES];
+ [ctx->video_output setMinimumVideoFrameInterval:1.0/ctx->frame_rate];
+
+ success = [ctx->capture_session addOutput:ctx->video_output error:nil];
+
+ if (!success) {
+ av_log (s, AV_LOG_ERROR, "can't add video output to capture session\n");
+ goto fail;
+ }
+
+ [ctx->capture_session startRunning];
+
+ // Take stream info from the first frame.
+ while (ctx->frames_captured < 1) {
+ CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.1, YES);
+ }
+
+ lock_frames(ctx);
+
+ AVStream* stream = avformat_new_stream(s, NULL);
+
+ if (!stream) {
+ goto fail;
+ }
+
+ avpriv_set_pts_info(stream, 64, 1, QTKIT_TIMEBASE);
+
+ stream->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
+ stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
+ stream->codec->width = (int)CVPixelBufferGetWidth (ctx->current_frame);
+ stream->codec->height = (int)CVPixelBufferGetHeight(ctx->current_frame);
+ stream->codec->pix_fmt = AV_PIX_FMT_RGB24;
+
+ CVBufferRelease(ctx->current_frame);
+ ctx->current_frame = nil;
+
+ unlock_frames(ctx);
+
+ [pool release];
+
+ return 0;
+
+fail:
+ [pool release];
+
+ destroy_context(ctx);
+
+ return AVERROR(EIO);
+}
+
+static int qtkit_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ CaptureContext* ctx = (CaptureContext*)s->priv_data;
+
+ do {
+ lock_frames(ctx);
+
+ if (ctx->current_frame != nil) {
+ if (av_new_packet(pkt, (int)CVPixelBufferGetDataSize(ctx->current_frame)) < 0) {
+ return AVERROR(EIO);
+ }
+
+ pkt->pts = pkt->dts = av_rescale_q(av_gettime() - ctx->first_pts, AV_TIME_BASE_Q, kQTKitTimeBase_q);
+ pkt->stream_index = 0;
+ pkt->flags |= AV_PKT_FLAG_KEY;
+
+ CVPixelBufferLockBaseAddress(ctx->current_frame, 0);
+
+ void* data = CVPixelBufferGetBaseAddress(ctx->current_frame);
+ memcpy(pkt->data, data, pkt->size);
+
+ CVPixelBufferUnlockBaseAddress(ctx->current_frame, 0);
+ CVBufferRelease(ctx->current_frame);
+ ctx->current_frame = nil;
+ } else {
+ pkt->data = NULL;
+ pthread_cond_wait(&ctx->frame_wait_cond, &ctx->frame_lock);
+ }
+
+ unlock_frames(ctx);
+ } while (!pkt->data);
+
+ return 0;
+}
+
+static int qtkit_close(AVFormatContext *s)
+{
+ CaptureContext* ctx = (CaptureContext*)s->priv_data;
+
+ destroy_context(ctx);
+
+ return 0;
+}
+
+static const AVOption options[] = {
+ { "frame_rate", "set frame rate", offsetof(CaptureContext, frame_rate), AV_OPT_TYPE_FLOAT, { .dbl = 30.0 }, 0.1, 30.0, AV_OPT_TYPE_VIDEO_RATE, NULL },
+ { "list_devices", "list available devices", offsetof(CaptureContext, list_devices), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
+ { "true", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
+ { "false", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
+ { "video_device_index", "select video device by index for devices with same name (starts at 0)", offsetof(CaptureContext, video_device_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
+ { NULL },
+};
+
+static const AVClass qtkit_class = {
+ .class_name = "QTKit input device",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
+};
+
+AVInputFormat ff_qtkit_demuxer = {
+ .name = "qtkit",
+ .long_name = NULL_IF_CONFIG_SMALL("QTKit input device"),
+ .priv_data_size = sizeof(CaptureContext),
+ .read_header = qtkit_read_header,
+ .read_packet = qtkit_read_packet,
+ .read_close = qtkit_close,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &qtkit_class,
+};
diff --git a/libavdevice/sdl.c b/libavdevice/sdl.c
new file mode 100644
index 0000000000..432275004c
--- /dev/null
+++ b/libavdevice/sdl.c
@@ -0,0 +1,377 @@
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * libSDL output device
+ */
+
+#include <SDL.h>
+#include <SDL_thread.h>
+
+#include "libavutil/avstring.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/time.h"
+#include "avdevice.h"
+
+typedef struct {
+ AVClass *class;
+ SDL_Surface *surface;
+ SDL_Overlay *overlay;
+ char *window_title;
+ char *icon_title;
+ int window_width, window_height; /**< size of the window */
+ int window_fullscreen;
+
+ SDL_Rect overlay_rect;
+ int overlay_fmt;
+
+ int sdl_was_already_inited;
+ SDL_Thread *event_thread;
+ SDL_mutex *mutex;
+ SDL_cond *init_cond;
+ int init_ret; /* return code used to signal initialization errors */
+ int inited;
+ int quit;
+} SDLContext;
+
+static const struct sdl_overlay_pix_fmt_entry {
+ enum AVPixelFormat pix_fmt; int overlay_fmt;
+} sdl_overlay_pix_fmt_map[] = {
+ { AV_PIX_FMT_YUV420P, SDL_IYUV_OVERLAY },
+ { AV_PIX_FMT_YUYV422, SDL_YUY2_OVERLAY },
+ { AV_PIX_FMT_UYVY422, SDL_UYVY_OVERLAY },
+ { AV_PIX_FMT_NONE, 0 },
+};
+
+static int sdl_write_trailer(AVFormatContext *s)
+{
+ SDLContext *sdl = s->priv_data;
+
+ sdl->quit = 1;
+
+ if (sdl->overlay)
+ SDL_FreeYUVOverlay(sdl->overlay);
+ sdl->overlay = NULL;
+ if (sdl->event_thread)
+ SDL_WaitThread(sdl->event_thread, NULL);
+ sdl->event_thread = NULL;
+ if (sdl->mutex)
+ SDL_DestroyMutex(sdl->mutex);
+ sdl->mutex = NULL;
+ if (sdl->init_cond)
+ SDL_DestroyCond(sdl->init_cond);
+ sdl->init_cond = NULL;
+
+ if (!sdl->sdl_was_already_inited)
+ SDL_Quit();
+
+ return 0;
+}
+
+static void compute_overlay_rect(AVFormatContext *s)
+{
+ AVRational sar, dar; /* sample and display aspect ratios */
+ SDLContext *sdl = s->priv_data;
+ AVStream *st = s->streams[0];
+ AVCodecParameters *par = st->codecpar;
+ SDL_Rect *overlay_rect = &sdl->overlay_rect;
+
+ /* compute overlay width and height from the codec context information */
+ sar = st->sample_aspect_ratio.num ? st->sample_aspect_ratio : (AVRational){ 1, 1 };
+ dar = av_mul_q(sar, (AVRational){ par->width, par->height });
+
+ /* we suppose the screen has a 1/1 sample aspect ratio */
+ if (sdl->window_width && sdl->window_height) {
+ /* fit in the window */
+ if (av_cmp_q(dar, (AVRational){ sdl->window_width, sdl->window_height }) > 0) {
+ /* fit in width */
+ overlay_rect->w = sdl->window_width;
+ overlay_rect->h = av_rescale(overlay_rect->w, dar.den, dar.num);
+ } else {
+ /* fit in height */
+ overlay_rect->h = sdl->window_height;
+ overlay_rect->w = av_rescale(overlay_rect->h, dar.num, dar.den);
+ }
+ } else {
+ if (sar.num > sar.den) {
+ overlay_rect->w = par->width;
+ overlay_rect->h = av_rescale(overlay_rect->w, dar.den, dar.num);
+ } else {
+ overlay_rect->h = par->height;
+ overlay_rect->w = av_rescale(overlay_rect->h, dar.num, dar.den);
+ }
+ sdl->window_width = overlay_rect->w;
+ sdl->window_height = overlay_rect->h;
+ }
+
+ overlay_rect->x = (sdl->window_width - overlay_rect->w) / 2;
+ overlay_rect->y = (sdl->window_height - overlay_rect->h) / 2;
+}
+
+#define SDL_BASE_FLAGS (SDL_SWSURFACE|SDL_RESIZABLE)
+
+static int event_thread(void *arg)
+{
+ AVFormatContext *s = arg;
+ SDLContext *sdl = s->priv_data;
+ int flags = SDL_BASE_FLAGS | (sdl->window_fullscreen ? SDL_FULLSCREEN : 0);
+ AVStream *st = s->streams[0];
+ AVCodecParameters *par = st->codecpar;
+
+ /* initialization */
+ if (SDL_Init(SDL_INIT_VIDEO) != 0) {
+ av_log(s, AV_LOG_ERROR, "Unable to initialize SDL: %s\n", SDL_GetError());
+ sdl->init_ret = AVERROR(EINVAL);
+ goto init_end;
+ }
+
+ SDL_WM_SetCaption(sdl->window_title, sdl->icon_title);
+ sdl->surface = SDL_SetVideoMode(sdl->window_width, sdl->window_height,
+ 24, flags);
+ if (!sdl->surface) {
+ av_log(sdl, AV_LOG_ERROR, "Unable to set video mode: %s\n", SDL_GetError());
+ sdl->init_ret = AVERROR(EINVAL);
+ goto init_end;
+ }
+
+ sdl->overlay = SDL_CreateYUVOverlay(par->width, par->height,
+ sdl->overlay_fmt, sdl->surface);
+ if (!sdl->overlay || sdl->overlay->pitches[0] < par->width) {
+ av_log(s, AV_LOG_ERROR,
+ "SDL does not support an overlay with size of %dx%d pixels\n",
+ par->width, par->height);
+ sdl->init_ret = AVERROR(EINVAL);
+ goto init_end;
+ }
+
+ sdl->init_ret = 0;
+ av_log(s, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s -> w:%d h:%d\n",
+ par->width, par->height, av_get_pix_fmt_name(par->format),
+ sdl->overlay_rect.w, sdl->overlay_rect.h);
+
+init_end:
+ SDL_LockMutex(sdl->mutex);
+ sdl->inited = 1;
+ SDL_UnlockMutex(sdl->mutex);
+ SDL_CondSignal(sdl->init_cond);
+
+ if (sdl->init_ret < 0)
+ return sdl->init_ret;
+
+ /* event loop */
+ while (!sdl->quit) {
+ int ret;
+ SDL_Event event;
+ SDL_PumpEvents();
+ ret = SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_ALLEVENTS);
+ if (ret < 0) {
+ av_log(s, AV_LOG_ERROR, "Error when getting SDL event: %s\n", SDL_GetError());
+ continue;
+ }
+ if (ret == 0) {
+ SDL_Delay(10);
+ continue;
+ }
+
+ switch (event.type) {
+ case SDL_KEYDOWN:
+ switch (event.key.keysym.sym) {
+ case SDLK_ESCAPE:
+ case SDLK_q:
+ sdl->quit = 1;
+ break;
+ }
+ break;
+ case SDL_QUIT:
+ sdl->quit = 1;
+ break;
+
+ case SDL_VIDEORESIZE:
+ sdl->window_width = event.resize.w;
+ sdl->window_height = event.resize.h;
+
+ SDL_LockMutex(sdl->mutex);
+ sdl->surface = SDL_SetVideoMode(sdl->window_width, sdl->window_height, 24, SDL_BASE_FLAGS);
+ if (!sdl->surface) {
+ av_log(s, AV_LOG_ERROR, "Failed to set SDL video mode: %s\n", SDL_GetError());
+ sdl->quit = 1;
+ } else {
+ compute_overlay_rect(s);
+ }
+ SDL_UnlockMutex(sdl->mutex);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int sdl_write_header(AVFormatContext *s)
+{
+ SDLContext *sdl = s->priv_data;
+ AVStream *st = s->streams[0];
+ AVCodecParameters *par = st->codecpar;
+ int i, ret;
+
+ if (!sdl->window_title)
+ sdl->window_title = av_strdup(s->filename);
+ if (!sdl->icon_title)
+ sdl->icon_title = av_strdup(sdl->window_title);
+
+ if (SDL_WasInit(SDL_INIT_VIDEO)) {
+ av_log(s, AV_LOG_ERROR,
+ "SDL video subsystem was already inited, aborting\n");
+ sdl->sdl_was_already_inited = 1;
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ if ( s->nb_streams > 1
+ || par->codec_type != AVMEDIA_TYPE_VIDEO
+ || par->codec_id != AV_CODEC_ID_RAWVIDEO) {
+ av_log(s, AV_LOG_ERROR, "Only supports one rawvideo stream\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ for (i = 0; sdl_overlay_pix_fmt_map[i].pix_fmt != AV_PIX_FMT_NONE; i++) {
+ if (sdl_overlay_pix_fmt_map[i].pix_fmt == par->format) {
+ sdl->overlay_fmt = sdl_overlay_pix_fmt_map[i].overlay_fmt;
+ break;
+ }
+ }
+
+ if (!sdl->overlay_fmt) {
+ av_log(s, AV_LOG_ERROR,
+ "Unsupported pixel format '%s', choose one of yuv420p, yuyv422, or uyvy422\n",
+ av_get_pix_fmt_name(par->format));
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ /* compute overlay width and height from the codec context information */
+ compute_overlay_rect(s);
+
+ sdl->init_cond = SDL_CreateCond();
+ if (!sdl->init_cond) {
+ av_log(s, AV_LOG_ERROR, "Could not create SDL condition variable: %s\n", SDL_GetError());
+ ret = AVERROR_EXTERNAL;
+ goto fail;
+ }
+ sdl->mutex = SDL_CreateMutex();
+ if (!sdl->mutex) {
+ av_log(s, AV_LOG_ERROR, "Could not create SDL mutex: %s\n", SDL_GetError());
+ ret = AVERROR_EXTERNAL;
+ goto fail;
+ }
+ sdl->event_thread = SDL_CreateThread(event_thread, s);
+ if (!sdl->event_thread) {
+ av_log(s, AV_LOG_ERROR, "Could not create SDL event thread: %s\n", SDL_GetError());
+ ret = AVERROR_EXTERNAL;
+ goto fail;
+ }
+
+ /* wait until the video system has been inited */
+ SDL_LockMutex(sdl->mutex);
+ while (!sdl->inited) {
+ SDL_CondWait(sdl->init_cond, sdl->mutex);
+ }
+ SDL_UnlockMutex(sdl->mutex);
+ if (sdl->init_ret < 0) {
+ ret = sdl->init_ret;
+ goto fail;
+ }
+ return 0;
+
+fail:
+ sdl_write_trailer(s);
+ return ret;
+}
+
+static int sdl_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ SDLContext *sdl = s->priv_data;
+ AVCodecParameters *par = s->streams[0]->codecpar;
+ uint8_t *data[4];
+ int linesize[4];
+ int i;
+
+ if (sdl->quit) {
+ sdl_write_trailer(s);
+ return AVERROR(EIO);
+ }
+ av_image_fill_arrays(data, linesize, pkt->data, par->format, par->width, par->height, 1);
+
+ SDL_LockMutex(sdl->mutex);
+ SDL_FillRect(sdl->surface, &sdl->surface->clip_rect,
+ SDL_MapRGB(sdl->surface->format, 0, 0, 0));
+ SDL_LockYUVOverlay(sdl->overlay);
+ for (i = 0; i < 3; i++) {
+ sdl->overlay->pixels [i] = data [i];
+ sdl->overlay->pitches[i] = linesize[i];
+ }
+ SDL_DisplayYUVOverlay(sdl->overlay, &sdl->overlay_rect);
+ SDL_UnlockYUVOverlay(sdl->overlay);
+
+ SDL_UpdateRect(sdl->surface,
+ sdl->overlay_rect.x, sdl->overlay_rect.y,
+ sdl->overlay_rect.w, sdl->overlay_rect.h);
+ SDL_UnlockMutex(sdl->mutex);
+
+ return 0;
+}
+
+#define OFFSET(x) offsetof(SDLContext,x)
+
+static const AVOption options[] = {
+ { "window_title", "set SDL window title", OFFSET(window_title), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
+ { "icon_title", "set SDL iconified window title", OFFSET(icon_title) , AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
+ { "window_size", "set SDL window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
+ { "window_fullscreen", "set SDL window fullscreen", OFFSET(window_fullscreen), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
+ { NULL },
+};
+
+static const AVClass sdl_class = {
+ .class_name = "sdl outdev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
+};
+
+AVOutputFormat ff_sdl_muxer = {
+ .name = "sdl",
+ .long_name = NULL_IF_CONFIG_SMALL("SDL output device"),
+ .priv_data_size = sizeof(SDLContext),
+ .audio_codec = AV_CODEC_ID_NONE,
+ .video_codec = AV_CODEC_ID_RAWVIDEO,
+ .write_header = sdl_write_header,
+ .write_packet = sdl_write_packet,
+ .write_trailer = sdl_write_trailer,
+ .flags = AVFMT_NOFILE | AVFMT_VARIABLE_FPS | AVFMT_NOTIMESTAMPS,
+ .priv_class = &sdl_class,
+};
diff --git a/libavdevice/sndio.c b/libavdevice/sndio.c
index 739551b841..46f287588d 100644
--- a/libavdevice/sndio.c
+++ b/libavdevice/sndio.c
@@ -2,27 +2,27 @@
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include <sndio.h>
-#include "libavformat/avformat.h"
+#include "avdevice.h"
#include "libavdevice/sndio.h"
diff --git a/libavdevice/sndio.h b/libavdevice/sndio.h
index cd5c55ecc5..54a5ec3353 100644
--- a/libavdevice/sndio.h
+++ b/libavdevice/sndio.h
@@ -2,20 +2,20 @@
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -25,8 +25,8 @@
#include <stdint.h>
#include <sndio.h>
-#include "libavformat/avformat.h"
#include "libavutil/log.h"
+#include "avdevice.h"
typedef struct SndioData {
AVClass *class;
diff --git a/libavdevice/sndio_dec.c b/libavdevice/sndio_dec.c
index a839a6fab2..2d13232bf1 100644
--- a/libavdevice/sndio_dec.c
+++ b/libavdevice/sndio_dec.c
@@ -2,20 +2,20 @@
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -106,6 +106,7 @@ static const AVClass sndio_demuxer_class = {
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_sndio_demuxer = {
diff --git a/libavdevice/sndio_enc.c b/libavdevice/sndio_enc.c
index 97b1827f82..47f500d71e 100644
--- a/libavdevice/sndio_enc.c
+++ b/libavdevice/sndio_enc.c
@@ -2,20 +2,20 @@
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -24,8 +24,8 @@
#include "libavutil/internal.h"
-#include "libavformat/avformat.h"
+#include "libavdevice/avdevice.h"
#include "libavdevice/sndio.h"
static av_cold int audio_write_header(AVFormatContext *s1)
@@ -79,6 +79,13 @@ static int audio_write_trailer(AVFormatContext *s1)
return 0;
}
+static const AVClass sndio_muxer_class = {
+ .class_name = "sndio outdev",
+ .item_name = av_default_item_name,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
+};
+
AVOutputFormat ff_sndio_muxer = {
.name = "sndio",
.long_name = NULL_IF_CONFIG_SMALL("sndio audio playback"),
@@ -92,4 +99,5 @@ AVOutputFormat ff_sndio_muxer = {
.write_packet = audio_write_packet,
.write_trailer = audio_write_trailer,
.flags = AVFMT_NOFILE,
+ .priv_class = &sndio_muxer_class,
};
diff --git a/libavdevice/timefilter-test.c b/libavdevice/timefilter-test.c
index 5e93f3c8d3..39432d5ed7 100644
--- a/libavdevice/timefilter-test.c
+++ b/libavdevice/timefilter-test.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -32,17 +32,21 @@ int main(void)
#define SAMPLES 1000
double ideal[SAMPLES];
double samples[SAMPLES];
+ double samplet[SAMPLES];
for (n0 = 0; n0 < 40; n0 = 2 * n0 + 1) {
for (n1 = 0; n1 < 10; n1 = 2 * n1 + 1) {
double best_error = 1000000000;
- double bestpar0 = 1;
- double bestpar1 = 0.001;
+ double bestpar0 = n0 ? 1 : 100000;
+ double bestpar1 = 1;
int better, i;
av_lfg_init(&prng, 123);
for (i = 0; i < SAMPLES; i++) {
- ideal[i] = 10 + i + n1 * i / (1000);
+ samplet[i] = 10 + i + (av_lfg_get(&prng) < LFG_MAX/2 ? 0 : 0.999);
+ ideal[i] = samplet[i] + n1 * i / (1000);
samples[i] = ideal[i] + n0 * (av_lfg_get(&prng) - LFG_MAX / 2) / (LFG_MAX * 10LL);
+ if(i && samples[i]<samples[i-1])
+ samples[i]=samples[i-1]+0.001;
}
do {
@@ -58,7 +62,9 @@ int main(void)
}
for (i = 0; i < SAMPLES; i++) {
double filtered;
- filtered = ff_timefilter_update(tf, samples[i], 1);
+ filtered = ff_timefilter_update(tf, samples[i], i ? (samplet[i] - samplet[i-1]) : 1);
+ if(filtered < 0 || filtered > 1000000000)
+ printf("filter is unstable\n");
error += (filtered - ideal[i]) * (filtered - ideal[i]);
}
ff_timefilter_destroy(tf);
@@ -83,7 +89,7 @@ int main(void)
}
ff_timefilter_destroy(tf);
#else
- printf(" [%f %f %9f]", bestpar0, bestpar1, best_error);
+ printf(" [%12f %11f %9f]", bestpar0, bestpar1, best_error);
#endif
}
printf("\n");
diff --git a/libavdevice/timefilter.c b/libavdevice/timefilter.c
index 4e0d5006d7..ad6485d5e7 100644
--- a/libavdevice/timefilter.c
+++ b/libavdevice/timefilter.c
@@ -5,20 +5,20 @@
* Author: Olivier Guilyardi <olivier samalyse com>
* Michael Niedermayer <michaelni gmx at>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -37,18 +37,25 @@ struct TimeFilter {
int count;
};
-TimeFilter *ff_timefilter_new(double clock_period,
- double feedback2_factor,
- double feedback3_factor)
+/* 1 - exp(-x) using a 3-order power series */
+static double qexpneg(double x)
{
- TimeFilter *self = av_mallocz(sizeof(TimeFilter));
+ return 1 - 1 / (1 + x * (1 + x / 2 * (1 + x / 3)));
+}
+
+TimeFilter *ff_timefilter_new(double time_base,
+ double period,
+ double bandwidth)
+{
+ TimeFilter *self = av_mallocz(sizeof(TimeFilter));
+ double o = 2 * M_PI * bandwidth * period * time_base;
if (!self)
return NULL;
- self->clock_period = clock_period;
- self->feedback2_factor = feedback2_factor;
- self->feedback3_factor = feedback3_factor;
+ self->clock_period = time_base;
+ self->feedback2_factor = qexpneg(M_SQRT2 * o);
+ self->feedback3_factor = qexpneg(o * o) / period;
return self;
}
@@ -73,7 +80,12 @@ double ff_timefilter_update(TimeFilter *self, double system_time, double period)
loop_error = system_time - self->cycle_time;
self->cycle_time += FFMAX(self->feedback2_factor, 1.0 / self->count) * loop_error;
- self->clock_period += self->feedback3_factor * loop_error / period;
+ self->clock_period += self->feedback3_factor * loop_error;
}
return self->cycle_time;
}
+
+double ff_timefilter_eval(TimeFilter *self, double delta)
+{
+ return self->cycle_time + self->clock_period * delta;
+}
diff --git a/libavdevice/timefilter.h b/libavdevice/timefilter.h
index 2235db60e3..cb3d0a788f 100644
--- a/libavdevice/timefilter.h
+++ b/libavdevice/timefilter.h
@@ -5,20 +5,20 @@
* Author: Olivier Guilyardi <olivier samalyse com>
* Michael Niedermayer <michaelni gmx at>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -45,16 +45,18 @@ typedef struct TimeFilter TimeFilter;
*
* Unless you know what you are doing, you should set these as follow:
*
- * o = 2 * M_PI * bandwidth * period
- * feedback2_factor = sqrt(2 * o)
+ * o = 2 * M_PI * bandwidth * period_in_seconds
+ * feedback2_factor = sqrt(2) * o
* feedback3_factor = o * o
*
* Where bandwidth is up to you to choose. Smaller values will filter out more
* of the jitter, but also take a longer time for the loop to settle. A good
* starting point is something between 0.3 and 3 Hz.
*
- * @param clock_period period of the hardware clock in seconds
- * (for example 1.0/44100)
+ * @param time_base period of the hardware clock in seconds
+ * (for example 1.0/44100)
+ * @param period expected update interval, in input units
+ * @param brandwidth filtering bandwidth, in Hz
*
* @return a pointer to a TimeFilter struct, or NULL on error
*
@@ -82,6 +84,15 @@ TimeFilter * ff_timefilter_new(double clock_period, double feedback2_factor, dou
double ff_timefilter_update(TimeFilter *self, double system_time, double period);
/**
+ * Evaluate the filter at a specified time
+ *
+ * @param delta difference between the requested time and the current time
+ * (last call to ff_timefilter_update).
+ * @return the filtered time
+ */
+double ff_timefilter_eval(TimeFilter *self, double delta);
+
+/**
* Reset the filter
*
* This function should mainly be called in case of XRUN.
diff --git a/libavdevice/utils.c b/libavdevice/utils.c
new file mode 100644
index 0000000000..ccd7318012
--- /dev/null
+++ b/libavdevice/utils.c
@@ -0,0 +1,59 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "internal.h"
+#include "libavutil/opt.h"
+#include "libavformat/avformat.h"
+
+int ff_alloc_input_device_context(AVFormatContext **avctx, AVInputFormat *iformat, const char *format)
+{
+ AVFormatContext *s;
+ int ret = 0;
+
+ *avctx = NULL;
+ if (!iformat && !format)
+ return AVERROR(EINVAL);
+ if (!(s = avformat_alloc_context()))
+ return AVERROR(ENOMEM);
+
+ if (!iformat)
+ iformat = av_find_input_format(format);
+ if (!iformat || !iformat->priv_class || !AV_IS_INPUT_DEVICE(iformat->priv_class->category)) {
+ ret = AVERROR(EINVAL);
+ goto error;
+ }
+ s->iformat = iformat;
+ if (s->iformat->priv_data_size > 0) {
+ s->priv_data = av_mallocz(s->iformat->priv_data_size);
+ if (!s->priv_data) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
+ if (s->iformat->priv_class) {
+ *(const AVClass**)s->priv_data= s->iformat->priv_class;
+ av_opt_set_defaults(s->priv_data);
+ }
+ } else
+ s->priv_data = NULL;
+
+ *avctx = s;
+ return 0;
+ error:
+ avformat_free_context(s);
+ return ret;
+}
diff --git a/libavdevice/v4l.c b/libavdevice/v4l.c
new file mode 100644
index 0000000000..81653e02fb
--- /dev/null
+++ b/libavdevice/v4l.c
@@ -0,0 +1,364 @@
+/*
+ * Linux video grab interface
+ * Copyright (c) 2000,2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avdevice.h"
+
+#undef __STRICT_ANSI__ //workaround due to broken kernel headers
+#include "config.h"
+#include "libavutil/rational.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/internal.h"
+#include "libavutil/log.h"
+#include "libavutil/opt.h"
+#include "libavformat/internal.h"
+#include "libavcodec/dsputil.h"
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#define _LINUX_TIME_H 1
+#include <linux/videodev.h>
+#include <time.h>
+
+typedef struct {
+ AVClass *class;
+ int fd;
+ int frame_format; /* see VIDEO_PALETTE_xxx */
+ int use_mmap;
+ AVRational time_base;
+ int64_t time_frame;
+ int frame_size;
+ struct video_capability video_cap;
+ struct video_audio audio_saved;
+ struct video_window video_win;
+ uint8_t *video_buf;
+ struct video_mbuf gb_buffers;
+ struct video_mmap gb_buf;
+ int gb_frame;
+ int standard;
+} VideoData;
+
+static const struct {
+ int palette;
+ int depth;
+ enum AVPixelFormat pix_fmt;
+} video_formats [] = {
+ {.palette = VIDEO_PALETTE_YUV420P, .depth = 12, .pix_fmt = AV_PIX_FMT_YUV420P },
+ {.palette = VIDEO_PALETTE_YUV422, .depth = 16, .pix_fmt = AV_PIX_FMT_YUYV422 },
+ {.palette = VIDEO_PALETTE_UYVY, .depth = 16, .pix_fmt = AV_PIX_FMT_UYVY422 },
+ {.palette = VIDEO_PALETTE_YUYV, .depth = 16, .pix_fmt = AV_PIX_FMT_YUYV422 },
+ /* NOTE: v4l uses BGR24, not RGB24 */
+ {.palette = VIDEO_PALETTE_RGB24, .depth = 24, .pix_fmt = AV_PIX_FMT_BGR24 },
+ {.palette = VIDEO_PALETTE_RGB565, .depth = 16, .pix_fmt = AV_PIX_FMT_BGR565 },
+ {.palette = VIDEO_PALETTE_GREY, .depth = 8, .pix_fmt = AV_PIX_FMT_GRAY8 },
+};
+
+
+static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+{
+ VideoData *s = s1->priv_data;
+ AVStream *st;
+ int video_fd;
+ int desired_palette, desired_depth;
+ struct video_tuner tuner;
+ struct video_audio audio;
+ struct video_picture pict;
+ int j;
+ int vformat_num = FF_ARRAY_ELEMS(video_formats);
+
+ av_log(s1, AV_LOG_WARNING, "V4L input device is deprecated and will be removed in the next release.");
+
+ if (ap->time_base.den <= 0) {
+ av_log(s1, AV_LOG_ERROR, "Wrong time base (%d)\n", ap->time_base.den);
+ return -1;
+ }
+ s->time_base = ap->time_base;
+
+ s->video_win.width = ap->width;
+ s->video_win.height = ap->height;
+
+ st = avformat_new_stream(s1, NULL);
+ if (!st)
+ return AVERROR(ENOMEM);
+ avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+
+ video_fd = open(s1->filename, O_RDWR);
+ if (video_fd < 0) {
+ av_log(s1, AV_LOG_ERROR, "%s: %s\n", s1->filename, strerror(errno));
+ goto fail;
+ }
+
+ if (ioctl(video_fd, VIDIOCGCAP, &s->video_cap) < 0) {
+ av_log(s1, AV_LOG_ERROR, "VIDIOCGCAP: %s\n", strerror(errno));
+ goto fail;
+ }
+
+ if (!(s->video_cap.type & VID_TYPE_CAPTURE)) {
+ av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not handle capture\n");
+ goto fail;
+ }
+
+ /* no values set, autodetect them */
+ if (s->video_win.width <= 0 || s->video_win.height <= 0) {
+ if (ioctl(video_fd, VIDIOCGWIN, &s->video_win, sizeof(s->video_win)) < 0) {
+ av_log(s1, AV_LOG_ERROR, "VIDIOCGWIN: %s\n", strerror(errno));
+ goto fail;
+ }
+ }
+
+ if(av_image_check_size(s->video_win.width, s->video_win.height, 0, s1) < 0)
+ return -1;
+
+ desired_palette = -1;
+ desired_depth = -1;
+ for (j = 0; j < vformat_num; j++) {
+ if (ap->pix_fmt == video_formats[j].pix_fmt) {
+ desired_palette = video_formats[j].palette;
+ desired_depth = video_formats[j].depth;
+ break;
+ }
+ }
+
+ /* set tv standard */
+ if (!ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
+ tuner.mode = s->standard;
+ ioctl(video_fd, VIDIOCSTUNER, &tuner);
+ }
+
+ /* unmute audio */
+ audio.audio = 0;
+ ioctl(video_fd, VIDIOCGAUDIO, &audio);
+ memcpy(&s->audio_saved, &audio, sizeof(audio));
+ audio.flags &= ~VIDEO_AUDIO_MUTE;
+ ioctl(video_fd, VIDIOCSAUDIO, &audio);
+
+ ioctl(video_fd, VIDIOCGPICT, &pict);
+ ff_dlog(s1, "v4l: colour=%d hue=%d brightness=%d constrast=%d whiteness=%d\n",
+ pict.colour, pict.hue, pict.brightness, pict.contrast, pict.whiteness);
+ /* try to choose a suitable video format */
+ pict.palette = desired_palette;
+ pict.depth= desired_depth;
+ if (desired_palette == -1 || ioctl(video_fd, VIDIOCSPICT, &pict) < 0) {
+ for (j = 0; j < vformat_num; j++) {
+ pict.palette = video_formats[j].palette;
+ pict.depth = video_formats[j].depth;
+ if (-1 != ioctl(video_fd, VIDIOCSPICT, &pict))
+ break;
+ }
+ if (j >= vformat_num)
+ goto fail1;
+ }
+
+ if (ioctl(video_fd, VIDIOCGMBUF, &s->gb_buffers) < 0) {
+ /* try to use read based access */
+ int val;
+
+ s->video_win.x = 0;
+ s->video_win.y = 0;
+ s->video_win.chromakey = -1;
+ s->video_win.flags = 0;
+
+ if (ioctl(video_fd, VIDIOCSWIN, s->video_win) < 0) {
+ av_log(s1, AV_LOG_ERROR, "VIDIOCSWIN: %s\n", strerror(errno));
+ goto fail;
+ }
+
+ s->frame_format = pict.palette;
+
+ val = 1;
+ if (ioctl(video_fd, VIDIOCCAPTURE, &val) < 0) {
+ av_log(s1, AV_LOG_ERROR, "VIDIOCCAPTURE: %s\n", strerror(errno));
+ goto fail;
+ }
+
+ s->time_frame = av_gettime() * s->time_base.den / s->time_base.num;
+ s->use_mmap = 0;
+ } else {
+ s->video_buf = mmap(0, s->gb_buffers.size, PROT_READ|PROT_WRITE, MAP_SHARED, video_fd, 0);
+ if ((unsigned char*)-1 == s->video_buf) {
+ s->video_buf = mmap(0, s->gb_buffers.size, PROT_READ|PROT_WRITE, MAP_PRIVATE, video_fd, 0);
+ if ((unsigned char*)-1 == s->video_buf) {
+ av_log(s1, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
+ goto fail;
+ }
+ }
+ s->gb_frame = 0;
+ s->time_frame = av_gettime() * s->time_base.den / s->time_base.num;
+
+ /* start to grab the first frame */
+ s->gb_buf.frame = s->gb_frame % s->gb_buffers.frames;
+ s->gb_buf.height = s->video_win.height;
+ s->gb_buf.width = s->video_win.width;
+ s->gb_buf.format = pict.palette;
+
+ if (ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) {
+ if (errno != EAGAIN) {
+ fail1:
+ av_log(s1, AV_LOG_ERROR, "VIDIOCMCAPTURE: %s\n", strerror(errno));
+ } else {
+ av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not receive any video signal\n");
+ }
+ goto fail;
+ }
+ for (j = 1; j < s->gb_buffers.frames; j++) {
+ s->gb_buf.frame = j;
+ ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
+ }
+ s->frame_format = s->gb_buf.format;
+ s->use_mmap = 1;
+ }
+
+ for (j = 0; j < vformat_num; j++) {
+ if (s->frame_format == video_formats[j].palette) {
+ s->frame_size = s->video_win.width * s->video_win.height * video_formats[j].depth / 8;
+ st->codec->pix_fmt = video_formats[j].pix_fmt;
+ break;
+ }
+ }
+
+ if (j >= vformat_num)
+ goto fail;
+
+ s->fd = video_fd;
+
+ st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
+ st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
+ st->codec->width = s->video_win.width;
+ st->codec->height = s->video_win.height;
+ st->codec->time_base = s->time_base;
+ st->codec->bit_rate = s->frame_size * 1/av_q2d(st->codec->time_base) * 8;
+
+ return 0;
+ fail:
+ if (video_fd >= 0)
+ close(video_fd);
+ return AVERROR(EIO);
+}
+
+static int v4l_mm_read_picture(VideoData *s, uint8_t *buf)
+{
+ uint8_t *ptr;
+
+ while (ioctl(s->fd, VIDIOCSYNC, &s->gb_frame) < 0 &&
+ (errno == EAGAIN || errno == EINTR));
+
+ ptr = s->video_buf + s->gb_buffers.offsets[s->gb_frame];
+ memcpy(buf, ptr, s->frame_size);
+
+ /* Setup to capture the next frame */
+ s->gb_buf.frame = s->gb_frame;
+ if (ioctl(s->fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) {
+ if (errno == EAGAIN)
+ av_log(NULL, AV_LOG_ERROR, "Cannot Sync\n");
+ else
+ av_log(NULL, AV_LOG_ERROR, "VIDIOCMCAPTURE: %s\n", strerror(errno));
+ return AVERROR(EIO);
+ }
+
+ /* This is now the grabbing frame */
+ s->gb_frame = (s->gb_frame + 1) % s->gb_buffers.frames;
+
+ return s->frame_size;
+}
+
+static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ VideoData *s = s1->priv_data;
+ int64_t curtime, delay;
+ struct timespec ts;
+
+ /* Calculate the time of the next frame */
+ s->time_frame += INT64_C(1000000);
+
+ /* wait based on the frame rate */
+ for(;;) {
+ curtime = av_gettime();
+ delay = s->time_frame * s->time_base.num / s->time_base.den - curtime;
+ if (delay <= 0) {
+ if (delay < INT64_C(-1000000) * s->time_base.num / s->time_base.den) {
+ /* printf("grabbing is %d frames late (dropping)\n", (int) -(delay / 16666)); */
+ s->time_frame += INT64_C(1000000);
+ }
+ break;
+ }
+ ts.tv_sec = delay / 1000000;
+ ts.tv_nsec = (delay % 1000000) * 1000;
+ nanosleep(&ts, NULL);
+ }
+
+ if (av_new_packet(pkt, s->frame_size) < 0)
+ return AVERROR(EIO);
+
+ pkt->pts = curtime;
+
+ /* read one frame */
+ if (s->use_mmap) {
+ return v4l_mm_read_picture(s, pkt->data);
+ } else {
+ if (read(s->fd, pkt->data, pkt->size) != pkt->size)
+ return AVERROR(EIO);
+ return s->frame_size;
+ }
+}
+
+static int grab_read_close(AVFormatContext *s1)
+{
+ VideoData *s = s1->priv_data;
+
+ if (s->use_mmap)
+ munmap(s->video_buf, s->gb_buffers.size);
+
+ /* mute audio. we must force it because the BTTV driver does not
+ return its state correctly */
+ s->audio_saved.flags |= VIDEO_AUDIO_MUTE;
+ ioctl(s->fd, VIDIOCSAUDIO, &s->audio_saved);
+
+ close(s->fd);
+ return 0;
+}
+
+static const AVOption options[] = {
+ { "standard", "", offsetof(VideoData, standard), AV_OPT_TYPE_INT, {.i64 = VIDEO_MODE_NTSC}, VIDEO_MODE_PAL, VIDEO_MODE_NTSC, AV_OPT_FLAG_DECODING_PARAM, "standard" },
+ { "PAL", "", 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_MODE_PAL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
+ { "SECAM", "", 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_MODE_SECAM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
+ { "NTSC", "", 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_MODE_NTSC}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
+ { NULL },
+};
+
+static const AVClass v4l_class = {
+ .class_name = "V4L indev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
+};
+
+AVInputFormat ff_v4l_demuxer = {
+ .name = "video4linux,v4l",
+ .long_name = NULL_IF_CONFIG_SMALL("Video4Linux device grab"),
+ .priv_data_size = sizeof(VideoData),
+ .read_header = grab_read_header,
+ .read_packet = grab_read_packet,
+ .read_close = grab_read_close,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &v4l_class,
+};
diff --git a/libavdevice/v4l2-common.c b/libavdevice/v4l2-common.c
new file mode 100644
index 0000000000..196c09b7fc
--- /dev/null
+++ b/libavdevice/v4l2-common.c
@@ -0,0 +1,105 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "v4l2-common.h"
+
+const struct fmt_map ff_fmt_conversion_table[] = {
+ //ff_fmt codec_id v4l2_fmt
+ { AV_PIX_FMT_YUV420P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV420 },
+ { AV_PIX_FMT_YUV420P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YVU420 },
+ { AV_PIX_FMT_YUV422P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV422P },
+ { AV_PIX_FMT_YUYV422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUYV },
+ { AV_PIX_FMT_UYVY422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_UYVY },
+ { AV_PIX_FMT_YUV411P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV411P },
+ { AV_PIX_FMT_YUV410P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV410 },
+ { AV_PIX_FMT_YUV410P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YVU410 },
+ { AV_PIX_FMT_RGB555LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB555 },
+ { AV_PIX_FMT_RGB555BE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB555X },
+ { AV_PIX_FMT_RGB565LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB565 },
+ { AV_PIX_FMT_RGB565BE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB565X },
+ { AV_PIX_FMT_BGR24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR24 },
+ { AV_PIX_FMT_RGB24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB24 },
+ { AV_PIX_FMT_BGR0, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR32 },
+ { AV_PIX_FMT_0RGB, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB32 },
+ { AV_PIX_FMT_GRAY8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_GREY },
+#ifdef V4L2_PIX_FMT_Y16
+ { AV_PIX_FMT_GRAY16LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_Y16 },
+#endif
+ { AV_PIX_FMT_NV12, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_NV12 },
+ { AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_MJPEG },
+ { AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_JPEG },
+#ifdef V4L2_PIX_FMT_H264
+ { AV_PIX_FMT_NONE, AV_CODEC_ID_H264, V4L2_PIX_FMT_H264 },
+#endif
+#ifdef V4L2_PIX_FMT_MPEG4
+ { AV_PIX_FMT_NONE, AV_CODEC_ID_MPEG4, V4L2_PIX_FMT_MPEG4 },
+#endif
+#ifdef V4L2_PIX_FMT_CPIA1
+ { AV_PIX_FMT_NONE, AV_CODEC_ID_CPIA, V4L2_PIX_FMT_CPIA1 },
+#endif
+#ifdef V4L2_PIX_FMT_SRGGB8
+ { AV_PIX_FMT_BAYER_BGGR8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_SBGGR8 },
+ { AV_PIX_FMT_BAYER_GBRG8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_SGBRG8 },
+ { AV_PIX_FMT_BAYER_GRBG8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_SGRBG8 },
+ { AV_PIX_FMT_BAYER_RGGB8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_SRGGB8 },
+#endif
+ { AV_PIX_FMT_NONE, AV_CODEC_ID_NONE, 0 },
+};
+
+uint32_t ff_fmt_ff2v4l(enum AVPixelFormat pix_fmt, enum AVCodecID codec_id)
+{
+ int i;
+
+ for (i = 0; ff_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
+ if ((codec_id == AV_CODEC_ID_NONE ||
+ ff_fmt_conversion_table[i].codec_id == codec_id) &&
+ (pix_fmt == AV_PIX_FMT_NONE ||
+ ff_fmt_conversion_table[i].ff_fmt == pix_fmt)) {
+ return ff_fmt_conversion_table[i].v4l2_fmt;
+ }
+ }
+
+ return 0;
+}
+
+enum AVPixelFormat ff_fmt_v4l2ff(uint32_t v4l2_fmt, enum AVCodecID codec_id)
+{
+ int i;
+
+ for (i = 0; ff_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
+ if (ff_fmt_conversion_table[i].v4l2_fmt == v4l2_fmt &&
+ ff_fmt_conversion_table[i].codec_id == codec_id) {
+ return ff_fmt_conversion_table[i].ff_fmt;
+ }
+ }
+
+ return AV_PIX_FMT_NONE;
+}
+
+enum AVCodecID ff_fmt_v4l2codec(uint32_t v4l2_fmt)
+{
+ int i;
+
+ for (i = 0; ff_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
+ if (ff_fmt_conversion_table[i].v4l2_fmt == v4l2_fmt) {
+ return ff_fmt_conversion_table[i].codec_id;
+ }
+ }
+
+ return AV_CODEC_ID_NONE;
+}
diff --git a/libavdevice/v4l2-common.h b/libavdevice/v4l2-common.h
new file mode 100644
index 0000000000..40c716489f
--- /dev/null
+++ b/libavdevice/v4l2-common.h
@@ -0,0 +1,62 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVDEVICE_V4L2_COMMON_H
+#define AVDEVICE_V4L2_COMMON_H
+
+#undef __STRICT_ANSI__ //workaround due to broken kernel headers
+#include "config.h"
+#include "libavformat/internal.h"
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#if HAVE_SYS_VIDEOIO_H
+#include <sys/videoio.h>
+#else
+#if HAVE_ASM_TYPES_H
+#include <asm/types.h>
+#endif
+#include <linux/videodev2.h>
+#endif
+#include "libavutil/atomic.h"
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/log.h"
+#include "libavutil/opt.h"
+#include "avdevice.h"
+#include "timefilter.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/time.h"
+#include "libavutil/avstring.h"
+
+struct fmt_map {
+ enum AVPixelFormat ff_fmt;
+ enum AVCodecID codec_id;
+ uint32_t v4l2_fmt;
+};
+
+extern const struct fmt_map ff_fmt_conversion_table[];
+
+uint32_t ff_fmt_ff2v4l(enum AVPixelFormat pix_fmt, enum AVCodecID codec_id);
+enum AVPixelFormat ff_fmt_v4l2ff(uint32_t v4l2_fmt, enum AVCodecID codec_id);
+enum AVCodecID ff_fmt_v4l2codec(uint32_t v4l2_fmt);
+
+#endif /* AVDEVICE_V4L2_COMMON_H */
diff --git a/libavdevice/v4l2.c b/libavdevice/v4l2.c
index 46db25dbd3..103fb105f2 100644
--- a/libavdevice/v4l2.c
+++ b/libavdevice/v4l2.c
@@ -1,57 +1,41 @@
/*
- * Video4Linux2 grab interface
* Copyright (c) 2000,2001 Fabrice Bellard
* Copyright (c) 2006 Luca Abeni
*
- * Part of this file is based on the V4L2 video capture example
- * (http://v4l2spec.bytesex.org/v4l2spec/capture.c)
- *
- * Thanks to Michael Niedermayer for providing the mapping between
- * V4L2_PIX_FMT_* and AV_PIX_FMT_*
+ * This file is part of FFmpeg.
*
- *
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#undef __STRICT_ANSI__ //workaround due to broken kernel headers
-#include "config.h"
-#include "libavformat/avformat.h"
-#include "libavformat/internal.h"
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/time.h>
-#include <poll.h>
-#if HAVE_SYS_VIDEOIO_H
-#include <sys/videoio.h>
-#else
-#include <linux/videodev2.h>
+/**
+ * @file
+ * Video4Linux2 grab interface
+ *
+ * Part of this file is based on the V4L2 video capture example
+ * (http://linuxtv.org/downloads/v4l-dvb-apis/capture-example.html)
+ *
+ * Thanks to Michael Niedermayer for providing the mapping between
+ * V4L2_PIX_FMT_* and AV_PIX_FMT_*
+ */
+
+#include "v4l2-common.h"
+#include <dirent.h>
+
+#if CONFIG_LIBV4L2
+#include <libv4l2.h>
#endif
-#include "libavutil/atomic.h"
-#include "libavutil/avassert.h"
-#include "libavutil/imgutils.h"
-#include "libavutil/internal.h"
-#include "libavutil/log.h"
-#include "libavutil/opt.h"
-#include "libavutil/parseutils.h"
-#include "libavutil/pixdesc.h"
-#include "libavutil/avstring.h"
-#include "libavutil/mathematics.h"
static const int desired_video_buffers = 256;
@@ -59,103 +43,129 @@ static const int desired_video_buffers = 256;
#define V4L_RAWFORMATS 1
#define V4L_COMPFORMATS 2
+/**
+ * Return timestamps to the user exactly as returned by the kernel
+ */
+#define V4L_TS_DEFAULT 0
+/**
+ * Autodetect the kind of timestamps returned by the kernel and convert to
+ * absolute (wall clock) timestamps.
+ */
+#define V4L_TS_ABS 1
+/**
+ * Assume kernel timestamps are from the monotonic clock and convert to
+ * absolute timestamps.
+ */
+#define V4L_TS_MONO2ABS 2
+
+/**
+ * Once the kind of timestamps returned by the kernel have been detected,
+ * the value of the timefilter (NULL or not) determines whether a conversion
+ * takes place.
+ */
+#define V4L_TS_CONVERT_READY V4L_TS_DEFAULT
+
struct video_data {
AVClass *class;
int fd;
- int frame_format; /* V4L2_PIX_FMT_* */
+ int pixelformat; /* V4L2_PIX_FMT_* */
int width, height;
int frame_size;
- int timeout;
int interlaced;
int top_field_first;
+ int ts_mode;
+ TimeFilter *timefilter;
+ int64_t last_time_m;
int buffers;
volatile int buffers_queued;
void **buf_start;
unsigned int *buf_len;
char *standard;
+ v4l2_std_id std_id;
int channel;
- char *video_size; /**< String describing video size,
- set by a private option. */
char *pixel_format; /**< Set by a private option. */
int list_format; /**< Set by a private option. */
+ int list_standard; /**< Set by a private option. */
char *framerate; /**< Set by a private option. */
+
+ int use_libv4l2;
+ int (*open_f)(const char *file, int oflag, ...);
+ int (*close_f)(int fd);
+ int (*dup_f)(int fd);
+ int (*ioctl_f)(int fd, unsigned long int request, ...);
+ ssize_t (*read_f)(int fd, void *buffer, size_t n);
+ void *(*mmap_f)(void *start, size_t length, int prot, int flags, int fd, int64_t offset);
+ int (*munmap_f)(void *_start, size_t length);
};
struct buff_data {
struct video_data *s;
int index;
- int fd;
-};
-
-struct fmt_map {
- enum AVPixelFormat ff_fmt;
- enum AVCodecID codec_id;
- uint32_t v4l2_fmt;
-};
-
-static struct fmt_map fmt_conversion_table[] = {
- //ff_fmt codec_id v4l2_fmt
- { AV_PIX_FMT_YUV420P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV420 },
- { AV_PIX_FMT_YUV422P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV422P },
- { AV_PIX_FMT_YUYV422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUYV },
- { AV_PIX_FMT_UYVY422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_UYVY },
- { AV_PIX_FMT_YUV411P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV411P },
- { AV_PIX_FMT_YUV410P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV410 },
- { AV_PIX_FMT_RGB555, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB555 },
- { AV_PIX_FMT_RGB565, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB565 },
- { AV_PIX_FMT_BGR24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR24 },
- { AV_PIX_FMT_RGB24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB24 },
- { AV_PIX_FMT_BGRA, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR32 },
- { AV_PIX_FMT_GRAY8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_GREY },
- { AV_PIX_FMT_NV12, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_NV12 },
- { AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_MJPEG },
- { AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_JPEG },
-#ifdef V4L2_PIX_FMT_H264
- { AV_PIX_FMT_NONE, AV_CODEC_ID_H264, V4L2_PIX_FMT_H264 },
-#endif
};
static int device_open(AVFormatContext *ctx)
{
+ struct video_data *s = ctx->priv_data;
struct v4l2_capability cap;
int fd;
- int res, err;
+ int err;
int flags = O_RDWR;
- char errbuf[128];
+
+#define SET_WRAPPERS(prefix) do { \
+ s->open_f = prefix ## open; \
+ s->close_f = prefix ## close; \
+ s->dup_f = prefix ## dup; \
+ s->ioctl_f = prefix ## ioctl; \
+ s->read_f = prefix ## read; \
+ s->mmap_f = prefix ## mmap; \
+ s->munmap_f = prefix ## munmap; \
+} while (0)
+
+ if (s->use_libv4l2) {
+#if CONFIG_LIBV4L2
+ SET_WRAPPERS(v4l2_);
+#else
+ av_log(ctx, AV_LOG_ERROR, "libavdevice is not built with libv4l2 support.\n");
+ return AVERROR(EINVAL);
+#endif
+ } else {
+ SET_WRAPPERS();
+ }
+
+#define v4l2_open s->open_f
+#define v4l2_close s->close_f
+#define v4l2_dup s->dup_f
+#define v4l2_ioctl s->ioctl_f
+#define v4l2_read s->read_f
+#define v4l2_mmap s->mmap_f
+#define v4l2_munmap s->munmap_f
if (ctx->flags & AVFMT_FLAG_NONBLOCK) {
flags |= O_NONBLOCK;
}
- fd = avpriv_open(ctx->filename, flags);
+ fd = v4l2_open(ctx->filename, flags, 0);
if (fd < 0) {
err = AVERROR(errno);
- av_strerror(err, errbuf, sizeof(errbuf));
-
- av_log(ctx, AV_LOG_ERROR, "Cannot open video device %s : %s\n",
- ctx->filename, errbuf);
-
+ av_log(ctx, AV_LOG_ERROR, "Cannot open video device %s: %s\n",
+ ctx->filename, av_err2str(err));
return err;
}
- res = ioctl(fd, VIDIOC_QUERYCAP, &cap);
- if (res < 0) {
+ if (v4l2_ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0) {
err = AVERROR(errno);
- av_strerror(err, errbuf, sizeof(errbuf));
av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYCAP): %s\n",
- errbuf);
-
+ av_err2str(err));
goto fail;
}
- av_log(ctx, AV_LOG_VERBOSE, "[%d]Capabilities: %x\n",
+ av_log(ctx, AV_LOG_VERBOSE, "fd:%d capabilities:%x\n",
fd, cap.capabilities);
if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
av_log(ctx, AV_LOG_ERROR, "Not a video capture device.\n");
err = AVERROR(ENODEV);
-
goto fail;
}
@@ -163,33 +173,32 @@ static int device_open(AVFormatContext *ctx)
av_log(ctx, AV_LOG_ERROR,
"The device does not support the streaming I/O method.\n");
err = AVERROR(ENOSYS);
-
goto fail;
}
return fd;
fail:
- close(fd);
+ v4l2_close(fd);
return err;
}
static int device_init(AVFormatContext *ctx, int *width, int *height,
- uint32_t pix_fmt)
+ uint32_t pixelformat)
{
struct video_data *s = ctx->priv_data;
- int fd = s->fd;
struct v4l2_format fmt = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE };
- struct v4l2_pix_format *pix = &fmt.fmt.pix;
-
- int res;
+ int res = 0;
- pix->width = *width;
- pix->height = *height;
- pix->pixelformat = pix_fmt;
- pix->field = V4L2_FIELD_ANY;
+ fmt.fmt.pix.width = *width;
+ fmt.fmt.pix.height = *height;
+ fmt.fmt.pix.pixelformat = pixelformat;
+ fmt.fmt.pix.field = V4L2_FIELD_ANY;
- res = ioctl(fd, VIDIOC_S_FMT, &fmt);
+ /* Some drivers will fail and return EINVAL when the pixelformat
+ is not supported (even if type field is valid and supported) */
+ if (v4l2_ioctl(s->fd, VIDIOC_S_FMT, &fmt) < 0)
+ res = AVERROR(errno);
if ((*width != fmt.fmt.pix.width) || (*height != fmt.fmt.pix.height)) {
av_log(ctx, AV_LOG_INFO,
@@ -199,87 +208,44 @@ static int device_init(AVFormatContext *ctx, int *width, int *height,
*height = fmt.fmt.pix.height;
}
- if (pix_fmt != fmt.fmt.pix.pixelformat) {
+ if (pixelformat != fmt.fmt.pix.pixelformat) {
av_log(ctx, AV_LOG_DEBUG,
"The V4L2 driver changed the pixel format "
"from 0x%08X to 0x%08X\n",
- pix_fmt, fmt.fmt.pix.pixelformat);
- res = -1;
+ pixelformat, fmt.fmt.pix.pixelformat);
+ res = AVERROR(EINVAL);
}
if (fmt.fmt.pix.field == V4L2_FIELD_INTERLACED) {
- av_log(ctx, AV_LOG_DEBUG, "The V4L2 driver using the interlaced mode");
+ av_log(ctx, AV_LOG_DEBUG,
+ "The V4L2 driver is using the interlaced mode\n");
s->interlaced = 1;
}
return res;
}
-static int first_field(int fd)
+static int first_field(const struct video_data *s)
{
int res;
v4l2_std_id std;
- res = ioctl(fd, VIDIOC_G_STD, &std);
- if (res < 0) {
+ res = v4l2_ioctl(s->fd, VIDIOC_G_STD, &std);
+ if (res < 0)
return 0;
- }
- if (std & V4L2_STD_NTSC) {
+ if (std & V4L2_STD_NTSC)
return 0;
- }
return 1;
}
-static uint32_t fmt_ff2v4l(enum AVPixelFormat pix_fmt, enum AVCodecID codec_id)
-{
- int i;
-
- for (i = 0; i < FF_ARRAY_ELEMS(fmt_conversion_table); i++) {
- if ((codec_id == AV_CODEC_ID_NONE ||
- fmt_conversion_table[i].codec_id == codec_id) &&
- (pix_fmt == AV_PIX_FMT_NONE ||
- fmt_conversion_table[i].ff_fmt == pix_fmt)) {
- return fmt_conversion_table[i].v4l2_fmt;
- }
- }
-
- return 0;
-}
-
-static enum AVPixelFormat fmt_v4l2ff(uint32_t v4l2_fmt, enum AVCodecID codec_id)
-{
- int i;
-
- for (i = 0; i < FF_ARRAY_ELEMS(fmt_conversion_table); i++) {
- if (fmt_conversion_table[i].v4l2_fmt == v4l2_fmt &&
- fmt_conversion_table[i].codec_id == codec_id) {
- return fmt_conversion_table[i].ff_fmt;
- }
- }
-
- return AV_PIX_FMT_NONE;
-}
-
-static enum AVCodecID fmt_v4l2codec(uint32_t v4l2_fmt)
-{
- int i;
-
- for (i = 0; i < FF_ARRAY_ELEMS(fmt_conversion_table); i++) {
- if (fmt_conversion_table[i].v4l2_fmt == v4l2_fmt) {
- return fmt_conversion_table[i].codec_id;
- }
- }
-
- return AV_CODEC_ID_NONE;
-}
-
#if HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE
-static void list_framesizes(AVFormatContext *ctx, int fd, uint32_t pixelformat)
+static void list_framesizes(AVFormatContext *ctx, uint32_t pixelformat)
{
+ const struct video_data *s = ctx->priv_data;
struct v4l2_frmsizeenum vfse = { .pixel_format = pixelformat };
- while(!ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &vfse)) {
+ while(!v4l2_ioctl(s->fd, VIDIOC_ENUM_FRAMESIZES, &vfse)) {
switch (vfse.type) {
case V4L2_FRMSIZE_TYPE_DISCRETE:
av_log(ctx, AV_LOG_INFO, " %ux%u",
@@ -300,26 +266,27 @@ static void list_framesizes(AVFormatContext *ctx, int fd, uint32_t pixelformat)
}
#endif
-static void list_formats(AVFormatContext *ctx, int fd, int type)
+static void list_formats(AVFormatContext *ctx, int type)
{
+ const struct video_data *s = ctx->priv_data;
struct v4l2_fmtdesc vfd = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE };
- while(!ioctl(fd, VIDIOC_ENUM_FMT, &vfd)) {
- enum AVCodecID codec_id = fmt_v4l2codec(vfd.pixelformat);
- enum AVPixelFormat pix_fmt = fmt_v4l2ff(vfd.pixelformat, codec_id);
+ while(!v4l2_ioctl(s->fd, VIDIOC_ENUM_FMT, &vfd)) {
+ enum AVCodecID codec_id = ff_fmt_v4l2codec(vfd.pixelformat);
+ enum AVPixelFormat pix_fmt = ff_fmt_v4l2ff(vfd.pixelformat, codec_id);
vfd.index++;
if (!(vfd.flags & V4L2_FMT_FLAG_COMPRESSED) &&
type & V4L_RAWFORMATS) {
const char *fmt_name = av_get_pix_fmt_name(pix_fmt);
- av_log(ctx, AV_LOG_INFO, "R : %9s : %20s :",
+ av_log(ctx, AV_LOG_INFO, "Raw : %11s : %20s :",
fmt_name ? fmt_name : "Unsupported",
vfd.description);
} else if (vfd.flags & V4L2_FMT_FLAG_COMPRESSED &&
type & V4L_COMPFORMATS) {
const AVCodecDescriptor *desc = avcodec_descriptor_get(codec_id);
- av_log(ctx, AV_LOG_INFO, "C : %9s : %20s :",
+ av_log(ctx, AV_LOG_INFO, "Compressed: %11s : %20s :",
desc ? desc->name : "Unsupported",
vfd.description);
} else {
@@ -327,18 +294,40 @@ static void list_formats(AVFormatContext *ctx, int fd, int type)
}
#ifdef V4L2_FMT_FLAG_EMULATED
- if (vfd.flags & V4L2_FMT_FLAG_EMULATED) {
- av_log(ctx, AV_LOG_WARNING, "%s", "Emulated");
- continue;
- }
+ if (vfd.flags & V4L2_FMT_FLAG_EMULATED)
+ av_log(ctx, AV_LOG_INFO, " Emulated :");
#endif
#if HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE
- list_framesizes(ctx, fd, vfd.pixelformat);
+ list_framesizes(ctx, vfd.pixelformat);
#endif
av_log(ctx, AV_LOG_INFO, "\n");
}
}
+static void list_standards(AVFormatContext *ctx)
+{
+ int ret;
+ struct video_data *s = ctx->priv_data;
+ struct v4l2_standard standard;
+
+ if (s->std_id == 0)
+ return;
+
+ for (standard.index = 0; ; standard.index++) {
+ if (v4l2_ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
+ ret = AVERROR(errno);
+ if (ret == AVERROR(EINVAL)) {
+ break;
+ } else {
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_ENUMSTD): %s\n", av_err2str(ret));
+ return;
+ }
+ }
+ av_log(ctx, AV_LOG_INFO, "%2d, %16"PRIx64", %s\n",
+ standard.index, (uint64_t)standard.id, standard.name);
+ }
+}
+
static int mmap_init(AVFormatContext *ctx)
{
int i, res;
@@ -349,35 +338,26 @@ static int mmap_init(AVFormatContext *ctx)
.memory = V4L2_MEMORY_MMAP
};
- res = ioctl(s->fd, VIDIOC_REQBUFS, &req);
- if (res < 0) {
+ if (v4l2_ioctl(s->fd, VIDIOC_REQBUFS, &req) < 0) {
res = AVERROR(errno);
- if (res == AVERROR(EINVAL)) {
- av_log(ctx, AV_LOG_ERROR, "Device does not support mmap\n");
- } else {
- av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_REQBUFS)\n");
- }
-
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_REQBUFS): %s\n", av_err2str(res));
return res;
}
if (req.count < 2) {
av_log(ctx, AV_LOG_ERROR, "Insufficient buffer memory\n");
-
return AVERROR(ENOMEM);
}
s->buffers = req.count;
- s->buf_start = av_malloc(sizeof(void *) * s->buffers);
+ s->buf_start = av_malloc_array(s->buffers, sizeof(void *));
if (!s->buf_start) {
av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer pointers\n");
-
return AVERROR(ENOMEM);
}
- s->buf_len = av_malloc(sizeof(unsigned int) * s->buffers);
+ s->buf_len = av_malloc_array(s->buffers, sizeof(unsigned int));
if (!s->buf_len) {
av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer sizes\n");
- av_free(s->buf_start);
-
+ av_freep(&s->buf_start);
return AVERROR(ENOMEM);
}
@@ -387,33 +367,26 @@ static int mmap_init(AVFormatContext *ctx)
.index = i,
.memory = V4L2_MEMORY_MMAP
};
-
- res = ioctl(s->fd, VIDIOC_QUERYBUF, &buf);
- if (res < 0) {
+ if (v4l2_ioctl(s->fd, VIDIOC_QUERYBUF, &buf) < 0) {
res = AVERROR(errno);
- av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYBUF)\n");
-
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYBUF): %s\n", av_err2str(res));
return res;
}
s->buf_len[i] = buf.length;
if (s->frame_size > 0 && s->buf_len[i] < s->frame_size) {
av_log(ctx, AV_LOG_ERROR,
- "Buffer len [%d] = %d != %d\n",
+ "buf_len[%d] = %d < expected frame size %d\n",
i, s->buf_len[i], s->frame_size);
-
- return -1;
+ return AVERROR(ENOMEM);
}
- s->buf_start[i] = mmap(NULL, buf.length,
+ s->buf_start[i] = v4l2_mmap(NULL, buf.length,
PROT_READ | PROT_WRITE, MAP_SHARED,
s->fd, buf.m.offset);
if (s->buf_start[i] == MAP_FAILED) {
- char errbuf[128];
res = AVERROR(errno);
- av_strerror(res, errbuf, sizeof(errbuf));
- av_log(ctx, AV_LOG_ERROR, "mmap: %s\n", errbuf);
-
+ av_log(ctx, AV_LOG_ERROR, "mmap: %s\n", av_err2str(res));
return res;
}
}
@@ -421,27 +394,93 @@ static int mmap_init(AVFormatContext *ctx)
return 0;
}
+static int enqueue_buffer(struct video_data *s, struct v4l2_buffer *buf)
+{
+ int res = 0;
+
+ if (v4l2_ioctl(s->fd, VIDIOC_QBUF, buf) < 0) {
+ res = AVERROR(errno);
+ av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n", av_err2str(res));
+ } else {
+ avpriv_atomic_int_add_and_fetch(&s->buffers_queued, 1);
+ }
+
+ return res;
+}
+
static void mmap_release_buffer(void *opaque, uint8_t *data)
{
struct v4l2_buffer buf = { 0 };
- int res, fd;
struct buff_data *buf_descriptor = opaque;
struct video_data *s = buf_descriptor->s;
- char errbuf[128];
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = buf_descriptor->index;
- fd = buf_descriptor->fd;
av_free(buf_descriptor);
- res = ioctl(fd, VIDIOC_QBUF, &buf);
- if (res < 0) {
- av_strerror(AVERROR(errno), errbuf, sizeof(errbuf));
- av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n",
- errbuf);
+ enqueue_buffer(s, &buf);
+}
+
+#if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC)
+static int64_t av_gettime_monotonic(void)
+{
+ return av_gettime_relative();
+}
+#endif
+
+static int init_convert_timestamp(AVFormatContext *ctx, int64_t ts)
+{
+ struct video_data *s = ctx->priv_data;
+ int64_t now;
+
+ now = av_gettime();
+ if (s->ts_mode == V4L_TS_ABS &&
+ ts <= now + 1 * AV_TIME_BASE && ts >= now - 10 * AV_TIME_BASE) {
+ av_log(ctx, AV_LOG_INFO, "Detected absolute timestamps\n");
+ s->ts_mode = V4L_TS_CONVERT_READY;
+ return 0;
+ }
+#if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC)
+ if (ctx->streams[0]->avg_frame_rate.num) {
+ now = av_gettime_monotonic();
+ if (s->ts_mode == V4L_TS_MONO2ABS ||
+ (ts <= now + 1 * AV_TIME_BASE && ts >= now - 10 * AV_TIME_BASE)) {
+ AVRational tb = {AV_TIME_BASE, 1};
+ int64_t period = av_rescale_q(1, tb, ctx->streams[0]->avg_frame_rate);
+ av_log(ctx, AV_LOG_INFO, "Detected monotonic timestamps, converting\n");
+ /* microseconds instead of seconds, MHz instead of Hz */
+ s->timefilter = ff_timefilter_new(1, period, 1.0E-6);
+ if (!s->timefilter)
+ return AVERROR(ENOMEM);
+ s->ts_mode = V4L_TS_CONVERT_READY;
+ return 0;
+ }
}
- avpriv_atomic_int_add_and_fetch(&s->buffers_queued, 1);
+#endif
+ av_log(ctx, AV_LOG_ERROR, "Unknown timestamps\n");
+ return AVERROR(EIO);
+}
+
+static int convert_timestamp(AVFormatContext *ctx, int64_t *ts)
+{
+ struct video_data *s = ctx->priv_data;
+
+ if (s->ts_mode) {
+ int r = init_convert_timestamp(ctx, *ts);
+ if (r < 0)
+ return r;
+ }
+#if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC)
+ if (s->timefilter) {
+ int64_t nowa = av_gettime();
+ int64_t nowm = av_gettime_monotonic();
+ ff_timefilter_update(s->timefilter, nowa, nowm - s->last_time_m);
+ s->last_time_m = nowm;
+ *ts = ff_timefilter_eval(s->timefilter, *ts - nowm);
+ }
+#endif
+ return 0;
}
static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt)
@@ -451,30 +490,19 @@ static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt)
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.memory = V4L2_MEMORY_MMAP
};
- struct pollfd p = { .fd = s->fd, .events = POLLIN };
int res;
- res = poll(&p, 1, s->timeout);
- if (res < 0)
- return AVERROR(errno);
-
- if (!(p.revents & (POLLIN | POLLERR | POLLHUP)))
- return AVERROR(EAGAIN);
+ pkt->size = 0;
/* FIXME: Some special treatment might be needed in case of loss of signal... */
- while ((res = ioctl(s->fd, VIDIOC_DQBUF, &buf)) < 0 && (errno == EINTR));
+ while ((res = v4l2_ioctl(s->fd, VIDIOC_DQBUF, &buf)) < 0 && (errno == EINTR));
if (res < 0) {
- char errbuf[128];
- if (errno == EAGAIN) {
- pkt->size = 0;
-
+ if (errno == EAGAIN)
return AVERROR(EAGAIN);
- }
+
res = AVERROR(errno);
- av_strerror(res, errbuf, sizeof(errbuf));
av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_DQBUF): %s\n",
- errbuf);
-
+ av_err2str(res));
return res;
}
@@ -486,12 +514,27 @@ static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt)
// always keep at least one buffer queued
av_assert0(avpriv_atomic_int_get(&s->buffers_queued) >= 1);
- if (s->frame_size > 0 && buf.bytesused != s->frame_size) {
- av_log(ctx, AV_LOG_ERROR,
- "The v4l2 frame is %d bytes, but %d bytes are expected\n",
- buf.bytesused, s->frame_size);
+#ifdef V4L2_BUF_FLAG_ERROR
+ if (buf.flags & V4L2_BUF_FLAG_ERROR) {
+ av_log(ctx, AV_LOG_WARNING,
+ "Dequeued v4l2 buffer contains corrupted data (%d bytes).\n",
+ buf.bytesused);
+ buf.bytesused = 0;
+ } else
+#endif
+ {
+ /* CPIA is a compressed format and we don't know the exact number of bytes
+ * used by a frame, so set it here as the driver announces it. */
+ if (ctx->video_codec_id == AV_CODEC_ID_CPIA)
+ s->frame_size = buf.bytesused;
- return AVERROR_INVALIDDATA;
+ if (s->frame_size > 0 && buf.bytesused != s->frame_size) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Dequeued v4l2 buffer contains %d bytes, but %d were expected. Flags: 0x%08X.\n",
+ buf.bytesused, s->frame_size, buf.flags);
+ enqueue_buffer(s, &buf);
+ return AVERROR_INVALIDDATA;
+ }
}
/* Image is at s->buff_start[buf.index] */
@@ -500,18 +543,16 @@ static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt)
res = av_new_packet(pkt, buf.bytesused);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "Error allocating a packet.\n");
+ enqueue_buffer(s, &buf);
return res;
}
memcpy(pkt->data, s->buf_start[buf.index], buf.bytesused);
- res = ioctl(s->fd, VIDIOC_QBUF, &buf);
- if (res < 0) {
- res = AVERROR(errno);
- av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF)\n");
+ res = enqueue_buffer(s, &buf);
+ if (res) {
av_packet_unref(pkt);
return res;
}
- avpriv_atomic_int_add_and_fetch(&s->buffers_queued, 1);
} else {
struct buff_data *buf_descriptor;
@@ -524,32 +565,33 @@ static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt)
* allocate a buffer for memcpying into it
*/
av_log(ctx, AV_LOG_ERROR, "Failed to allocate a buffer descriptor\n");
- res = ioctl(s->fd, VIDIOC_QBUF, &buf);
+ enqueue_buffer(s, &buf);
return AVERROR(ENOMEM);
}
- buf_descriptor->fd = s->fd;
buf_descriptor->index = buf.index;
buf_descriptor->s = s;
pkt->buf = av_buffer_create(pkt->data, pkt->size, mmap_release_buffer,
buf_descriptor, 0);
if (!pkt->buf) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to create a buffer\n");
+ enqueue_buffer(s, &buf);
av_freep(&buf_descriptor);
return AVERROR(ENOMEM);
}
}
pkt->pts = buf.timestamp.tv_sec * INT64_C(1000000) + buf.timestamp.tv_usec;
+ convert_timestamp(ctx, &pkt->pts);
- return s->buf_len[buf.index];
+ return pkt->size;
}
static int mmap_start(AVFormatContext *ctx)
{
struct video_data *s = ctx->priv_data;
enum v4l2_buf_type type;
- int i, res, err;
- char errbuf[128];
+ int i, res;
for (i = 0; i < s->buffers; i++) {
struct v4l2_buffer buf = {
@@ -558,27 +600,21 @@ static int mmap_start(AVFormatContext *ctx)
.memory = V4L2_MEMORY_MMAP
};
- res = ioctl(s->fd, VIDIOC_QBUF, &buf);
- if (res < 0) {
- err = AVERROR(errno);
- av_strerror(err, errbuf, sizeof(errbuf));
+ if (v4l2_ioctl(s->fd, VIDIOC_QBUF, &buf) < 0) {
+ res = AVERROR(errno);
av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n",
- errbuf);
-
- return err;
+ av_err2str(res));
+ return res;
}
}
s->buffers_queued = s->buffers;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- res = ioctl(s->fd, VIDIOC_STREAMON, &type);
- if (res < 0) {
- err = AVERROR(errno);
- av_strerror(err, errbuf, sizeof(errbuf));
+ if (v4l2_ioctl(s->fd, VIDIOC_STREAMON, &type) < 0) {
+ res = AVERROR(errno);
av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_STREAMON): %s\n",
- errbuf);
-
- return err;
+ av_err2str(res));
+ return res;
}
return 0;
@@ -593,276 +629,361 @@ static void mmap_close(struct video_data *s)
/* We do not check for the result, because we could
* not do anything about it anyway...
*/
- ioctl(s->fd, VIDIOC_STREAMOFF, &type);
+ v4l2_ioctl(s->fd, VIDIOC_STREAMOFF, &type);
for (i = 0; i < s->buffers; i++) {
- munmap(s->buf_start[i], s->buf_len[i]);
+ v4l2_munmap(s->buf_start[i], s->buf_len[i]);
}
- av_free(s->buf_start);
- av_free(s->buf_len);
+ av_freep(&s->buf_start);
+ av_freep(&s->buf_len);
}
-static int v4l2_set_parameters(AVFormatContext *s1)
+static int v4l2_set_parameters(AVFormatContext *ctx)
{
- struct video_data *s = s1->priv_data;
- struct v4l2_input input = { 0 };
+ struct video_data *s = ctx->priv_data;
struct v4l2_standard standard = { 0 };
struct v4l2_streamparm streamparm = { 0 };
- struct v4l2_fract *tpf = &streamparm.parm.capture.timeperframe;
+ struct v4l2_fract *tpf;
AVRational framerate_q = { 0 };
int i, ret;
- streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
if (s->framerate &&
(ret = av_parse_video_rate(&framerate_q, s->framerate)) < 0) {
- av_log(s1, AV_LOG_ERROR, "Could not parse framerate '%s'.\n",
+ av_log(ctx, AV_LOG_ERROR, "Could not parse framerate '%s'.\n",
s->framerate);
return ret;
}
- /* set tv video input */
- input.index = s->channel;
- if (ioctl(s->fd, VIDIOC_ENUMINPUT, &input) < 0) {
- av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl enum input failed:\n");
- return AVERROR(EIO);
- }
+ if (s->standard) {
+ if (s->std_id) {
+ ret = 0;
+ av_log(ctx, AV_LOG_DEBUG, "Setting standard: %s\n", s->standard);
+ /* set tv standard */
+ for (i = 0; ; i++) {
+ standard.index = i;
+ if (v4l2_ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
+ ret = AVERROR(errno);
+ break;
+ }
+ if (!av_strcasecmp(standard.name, s->standard))
+ break;
+ }
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Unknown or unsupported standard '%s'\n", s->standard);
+ return ret;
+ }
- av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set input_id: %d, input: %s\n",
- s->channel, input.name);
- if (ioctl(s->fd, VIDIOC_S_INPUT, &input.index) < 0) {
- av_log(s1, AV_LOG_ERROR,
- "The V4L2 driver ioctl set input(%d) failed\n",
- s->channel);
- return AVERROR(EIO);
+ if (v4l2_ioctl(s->fd, VIDIOC_S_STD, &standard.id) < 0) {
+ ret = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_S_STD): %s\n", av_err2str(ret));
+ return ret;
+ }
+ } else {
+ av_log(ctx, AV_LOG_WARNING,
+ "This device does not support any standard\n");
+ }
}
- if (s->standard) {
- av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set standard: %s\n",
- s->standard);
- /* set tv standard */
- for(i=0;;i++) {
+ /* get standard */
+ if (v4l2_ioctl(s->fd, VIDIOC_G_STD, &s->std_id) == 0) {
+ tpf = &standard.frameperiod;
+ for (i = 0; ; i++) {
standard.index = i;
- if (ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
- av_log(s1, AV_LOG_ERROR,
- "The V4L2 driver ioctl set standard(%s) failed\n",
- s->standard);
- return AVERROR(EIO);
+ if (v4l2_ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
+ ret = AVERROR(errno);
+ if (ret == AVERROR(EINVAL)
+#ifdef ENODATA
+ || ret == AVERROR(ENODATA)
+#endif
+ ) {
+ tpf = &streamparm.parm.capture.timeperframe;
+ break;
+ }
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_ENUMSTD): %s\n", av_err2str(ret));
+ return ret;
}
-
- if (!av_strcasecmp(standard.name, s->standard)) {
+ if (standard.id == s->std_id) {
+ av_log(ctx, AV_LOG_DEBUG,
+ "Current standard: %s, id: %"PRIx64", frameperiod: %d/%d\n",
+ standard.name, (uint64_t)standard.id, tpf->numerator, tpf->denominator);
break;
}
}
+ } else {
+ tpf = &streamparm.parm.capture.timeperframe;
+ }
- av_log(s1, AV_LOG_DEBUG,
- "The V4L2 driver set standard: %s, id: %"PRIu64"\n",
- s->standard, (uint64_t)standard.id);
- if (ioctl(s->fd, VIDIOC_S_STD, &standard.id) < 0) {
- av_log(s1, AV_LOG_ERROR,
- "The V4L2 driver ioctl set standard(%s) failed\n",
- s->standard);
- return AVERROR(EIO);
- }
+ streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (v4l2_ioctl(s->fd, VIDIOC_G_PARM, &streamparm) < 0) {
+ ret = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_G_PARM): %s\n", av_err2str(ret));
+ return ret;
}
if (framerate_q.num && framerate_q.den) {
- av_log(s1, AV_LOG_DEBUG, "Setting time per frame to %d/%d\n",
- framerate_q.den, framerate_q.num);
- tpf->numerator = framerate_q.den;
- tpf->denominator = framerate_q.num;
-
- if (ioctl(s->fd, VIDIOC_S_PARM, &streamparm) != 0) {
- av_log(s1, AV_LOG_ERROR,
- "ioctl set time per frame(%d/%d) failed\n",
+ if (streamparm.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) {
+ tpf = &streamparm.parm.capture.timeperframe;
+
+ av_log(ctx, AV_LOG_DEBUG, "Setting time per frame to %d/%d\n",
framerate_q.den, framerate_q.num);
- return AVERROR(EIO);
- }
+ tpf->numerator = framerate_q.den;
+ tpf->denominator = framerate_q.num;
+
+ if (v4l2_ioctl(s->fd, VIDIOC_S_PARM, &streamparm) < 0) {
+ ret = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_S_PARM): %s\n",
+ av_err2str(ret));
+ return ret;
+ }
- if (framerate_q.num != tpf->denominator ||
- framerate_q.den != tpf->numerator) {
- av_log(s1, AV_LOG_INFO,
- "The driver changed the time per frame from "
- "%d/%d to %d/%d\n",
- framerate_q.den, framerate_q.num,
- tpf->numerator, tpf->denominator);
- }
- } else {
- if (ioctl(s->fd, VIDIOC_G_PARM, &streamparm) != 0) {
- char errbuf[128];
- ret = AVERROR(errno);
- av_strerror(ret, errbuf, sizeof(errbuf));
- av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_PARM): %s\n",
- errbuf);
- return ret;
+ if (framerate_q.num != tpf->denominator ||
+ framerate_q.den != tpf->numerator) {
+ av_log(ctx, AV_LOG_INFO,
+ "The driver changed the time per frame from "
+ "%d/%d to %d/%d\n",
+ framerate_q.den, framerate_q.num,
+ tpf->numerator, tpf->denominator);
+ }
+ } else {
+ av_log(ctx, AV_LOG_WARNING,
+ "The driver does not permit changing the time per frame\n");
}
}
- s1->streams[0]->avg_frame_rate.num = tpf->denominator;
- s1->streams[0]->avg_frame_rate.den = tpf->numerator;
-
- s->timeout = 100 +
- av_rescale_q(1, s1->streams[0]->avg_frame_rate,
- (AVRational){1, 1000});
+ if (tpf->denominator > 0 && tpf->numerator > 0) {
+ ctx->streams[0]->avg_frame_rate.num = tpf->denominator;
+ ctx->streams[0]->avg_frame_rate.den = tpf->numerator;
+ ctx->streams[0]->r_frame_rate = ctx->streams[0]->avg_frame_rate;
+ } else
+ av_log(ctx, AV_LOG_WARNING, "Time per frame unknown\n");
return 0;
}
-static uint32_t device_try_init(AVFormatContext *s1,
- enum AVPixelFormat pix_fmt,
- int *width,
- int *height,
- enum AVCodecID *codec_id)
+static int device_try_init(AVFormatContext *ctx,
+ enum AVPixelFormat pix_fmt,
+ int *width,
+ int *height,
+ uint32_t *desired_format,
+ enum AVCodecID *codec_id)
{
- uint32_t desired_format = fmt_ff2v4l(pix_fmt, s1->video_codec_id);
-
- if (desired_format == 0 ||
- device_init(s1, width, height, desired_format) < 0) {
- int i;
-
- desired_format = 0;
- for (i = 0; i<FF_ARRAY_ELEMS(fmt_conversion_table); i++) {
- if (s1->video_codec_id == AV_CODEC_ID_NONE ||
- fmt_conversion_table[i].codec_id == s1->video_codec_id) {
- desired_format = fmt_conversion_table[i].v4l2_fmt;
- if (device_init(s1, width, height, desired_format) >= 0) {
+ int ret, i;
+
+ *desired_format = ff_fmt_ff2v4l(pix_fmt, ctx->video_codec_id);
+
+ if (*desired_format) {
+ ret = device_init(ctx, width, height, *desired_format);
+ if (ret < 0) {
+ *desired_format = 0;
+ if (ret != AVERROR(EINVAL))
+ return ret;
+ }
+ }
+
+ if (!*desired_format) {
+ for (i = 0; ff_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
+ if (ctx->video_codec_id == AV_CODEC_ID_NONE ||
+ ff_fmt_conversion_table[i].codec_id == ctx->video_codec_id) {
+ av_log(ctx, AV_LOG_DEBUG, "Trying to set codec:%s pix_fmt:%s\n",
+ avcodec_get_name(ff_fmt_conversion_table[i].codec_id),
+ (char *)av_x_if_null(av_get_pix_fmt_name(ff_fmt_conversion_table[i].ff_fmt), "none"));
+
+ *desired_format = ff_fmt_conversion_table[i].v4l2_fmt;
+ ret = device_init(ctx, width, height, *desired_format);
+ if (ret >= 0)
break;
- }
- desired_format = 0;
+ else if (ret != AVERROR(EINVAL))
+ return ret;
+ *desired_format = 0;
}
}
- }
- if (desired_format != 0) {
- *codec_id = fmt_v4l2codec(desired_format);
- assert(*codec_id != AV_CODEC_ID_NONE);
+ if (*desired_format == 0) {
+ av_log(ctx, AV_LOG_ERROR, "Cannot find a proper format for "
+ "codec '%s' (id %d), pixel format '%s' (id %d)\n",
+ avcodec_get_name(ctx->video_codec_id), ctx->video_codec_id,
+ (char *)av_x_if_null(av_get_pix_fmt_name(pix_fmt), "none"), pix_fmt);
+ ret = AVERROR(EINVAL);
+ }
}
- return desired_format;
+ *codec_id = ff_fmt_v4l2codec(*desired_format);
+ av_assert0(*codec_id != AV_CODEC_ID_NONE);
+ return ret;
+}
+
+static int v4l2_read_probe(AVProbeData *p)
+{
+ if (av_strstart(p->filename, "/dev/video", NULL))
+ return AVPROBE_SCORE_MAX - 1;
+ return 0;
}
-static int v4l2_read_header(AVFormatContext *s1)
+static int v4l2_read_header(AVFormatContext *ctx)
{
- struct video_data *s = s1->priv_data;
+ struct video_data *s = ctx->priv_data;
AVStream *st;
int res = 0;
uint32_t desired_format;
- enum AVCodecID codec_id;
+ enum AVCodecID codec_id = AV_CODEC_ID_NONE;
enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE;
+ struct v4l2_input input = { 0 };
- st = avformat_new_stream(s1, NULL);
+ st = avformat_new_stream(ctx, NULL);
if (!st)
return AVERROR(ENOMEM);
- s->fd = device_open(s1);
+#if CONFIG_LIBV4L2
+ /* silence libv4l2 logging. if fopen() fails v4l2_log_file will be NULL
+ and errors will get sent to stderr */
+ if (s->use_libv4l2)
+ v4l2_log_file = fopen("/dev/null", "w");
+#endif
+
+ s->fd = device_open(ctx);
if (s->fd < 0)
return s->fd;
- if (s->list_format) {
- list_formats(s1, s->fd, s->list_format);
- return AVERROR_EXIT;
+ if (s->channel != -1) {
+ /* set video input */
+ av_log(ctx, AV_LOG_DEBUG, "Selecting input_channel: %d\n", s->channel);
+ if (v4l2_ioctl(s->fd, VIDIOC_S_INPUT, &s->channel) < 0) {
+ res = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_S_INPUT): %s\n", av_err2str(res));
+ goto fail;
+ }
+ } else {
+ /* get current video input */
+ if (v4l2_ioctl(s->fd, VIDIOC_G_INPUT, &s->channel) < 0) {
+ res = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_G_INPUT): %s\n", av_err2str(res));
+ goto fail;
+ }
}
- avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+ /* enum input */
+ input.index = s->channel;
+ if (v4l2_ioctl(s->fd, VIDIOC_ENUMINPUT, &input) < 0) {
+ res = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_ENUMINPUT): %s\n", av_err2str(res));
+ goto fail;
+ }
+ s->std_id = input.std;
+ av_log(ctx, AV_LOG_DEBUG, "Current input_channel: %d, input_name: %s, input_std: %"PRIx64"\n",
+ s->channel, input.name, (uint64_t)input.std);
- if (s->video_size &&
- (res = av_parse_video_size(&s->width, &s->height, s->video_size)) < 0) {
- av_log(s1, AV_LOG_ERROR, "Could not parse video size '%s'.\n",
- s->video_size);
- return res;
+ if (s->list_format) {
+ list_formats(ctx, s->list_format);
+ res = AVERROR_EXIT;
+ goto fail;
}
+ if (s->list_standard) {
+ list_standards(ctx);
+ res = AVERROR_EXIT;
+ goto fail;
+ }
+
+ avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+
if (s->pixel_format) {
AVCodec *codec = avcodec_find_decoder_by_name(s->pixel_format);
- if (codec) {
- s1->video_codec_id = codec->id;
- st->need_parsing = AVSTREAM_PARSE_HEADERS;
- }
+ if (codec)
+ ctx->video_codec_id = codec->id;
pix_fmt = av_get_pix_fmt(s->pixel_format);
if (pix_fmt == AV_PIX_FMT_NONE && !codec) {
- av_log(s1, AV_LOG_ERROR, "No such input format: %s.\n",
+ av_log(ctx, AV_LOG_ERROR, "No such input format: %s.\n",
s->pixel_format);
- return AVERROR(EINVAL);
+ res = AVERROR(EINVAL);
+ goto fail;
}
}
if (!s->width && !s->height) {
- struct v4l2_format fmt;
+ struct v4l2_format fmt = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE };
- av_log(s1, AV_LOG_VERBOSE,
+ av_log(ctx, AV_LOG_VERBOSE,
"Querying the device for the current frame size\n");
- fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- if (ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) {
- char errbuf[128];
+ if (v4l2_ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) {
res = AVERROR(errno);
- av_strerror(res, errbuf, sizeof(errbuf));
- av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n",
- errbuf);
- return res;
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n",
+ av_err2str(res));
+ goto fail;
}
s->width = fmt.fmt.pix.width;
s->height = fmt.fmt.pix.height;
- av_log(s1, AV_LOG_VERBOSE,
+ av_log(ctx, AV_LOG_VERBOSE,
"Setting frame size to %dx%d\n", s->width, s->height);
}
- desired_format = device_try_init(s1, pix_fmt, &s->width, &s->height,
- &codec_id);
- if (desired_format == 0) {
- av_log(s1, AV_LOG_ERROR, "Cannot find a proper format for "
- "codec_id %d, pix_fmt %d.\n", s1->video_codec_id, pix_fmt);
- close(s->fd);
+ res = device_try_init(ctx, pix_fmt, &s->width, &s->height, &desired_format, &codec_id);
+ if (res < 0)
+ goto fail;
- return AVERROR(EIO);
- }
+ /* If no pixel_format was specified, the codec_id was not known up
+ * until now. Set video_codec_id in the context, as codec_id will
+ * not be available outside this function
+ */
+ if (codec_id != AV_CODEC_ID_NONE && ctx->video_codec_id == AV_CODEC_ID_NONE)
+ ctx->video_codec_id = codec_id;
- if ((res = av_image_check_size(s->width, s->height, 0, s1) < 0))
- return res;
+ if ((res = av_image_check_size(s->width, s->height, 0, ctx)) < 0)
+ goto fail;
- s->frame_format = desired_format;
+ s->pixelformat = desired_format;
- if ((res = v4l2_set_parameters(s1) < 0))
- return res;
+ if ((res = v4l2_set_parameters(ctx)) < 0)
+ goto fail;
- st->codecpar->format = fmt_v4l2ff(desired_format, codec_id);
+ st->codecpar->format = ff_fmt_v4l2ff(desired_format, codec_id);
s->frame_size = av_image_get_buffer_size(st->codecpar->format,
s->width, s->height, 1);
- if ((res = mmap_init(s1)) ||
- (res = mmap_start(s1)) < 0) {
- close(s->fd);
- return res;
- }
+ if ((res = mmap_init(ctx)) ||
+ (res = mmap_start(ctx)) < 0)
+ goto fail;
- s->top_field_first = first_field(s->fd);
+ s->top_field_first = first_field(s);
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = codec_id;
if (codec_id == AV_CODEC_ID_RAWVIDEO)
st->codecpar->codec_tag =
avcodec_pix_fmt_to_codec_tag(st->codecpar->format);
+ else if (codec_id == AV_CODEC_ID_H264) {
+ st->need_parsing = AVSTREAM_PARSE_FULL_ONCE;
+ }
+ if (desired_format == V4L2_PIX_FMT_YVU420)
+ st->codecpar->codec_tag = MKTAG('Y', 'V', '1', '2');
+ else if (desired_format == V4L2_PIX_FMT_YVU410)
+ st->codecpar->codec_tag = MKTAG('Y', 'V', 'U', '9');
st->codecpar->width = s->width;
st->codecpar->height = s->height;
- st->codecpar->bit_rate = s->frame_size * av_q2d(st->avg_frame_rate) * 8;
+ if (st->avg_frame_rate.den)
+ st->codecpar->bit_rate = s->frame_size * av_q2d(st->avg_frame_rate) * 8;
return 0;
+
+fail:
+ v4l2_close(s->fd);
+ return res;
}
-static int v4l2_read_packet(AVFormatContext *s1, AVPacket *pkt)
+static int v4l2_read_packet(AVFormatContext *ctx, AVPacket *pkt)
{
- struct video_data *s = s1->priv_data;
+ struct video_data *s = ctx->priv_data;
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
- AVFrame *frame = s1->streams[0]->codec->coded_frame;
+ AVFrame *frame = ctx->streams[0]->codec->coded_frame;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
int res;
av_init_packet(pkt);
- if ((res = mmap_read_frame(s1, pkt)) < 0) {
+ if ((res = mmap_read_frame(ctx, pkt)) < 0) {
return res;
}
@@ -878,33 +999,120 @@ FF_ENABLE_DEPRECATION_WARNINGS
return pkt->size;
}
-static int v4l2_read_close(AVFormatContext *s1)
+static int v4l2_read_close(AVFormatContext *ctx)
{
- struct video_data *s = s1->priv_data;
+ struct video_data *s = ctx->priv_data;
if (avpriv_atomic_int_get(&s->buffers_queued) != s->buffers)
- av_log(s1, AV_LOG_WARNING, "Some buffers are still owned by the caller on "
+ av_log(ctx, AV_LOG_WARNING, "Some buffers are still owned by the caller on "
"close.\n");
mmap_close(s);
- close(s->fd);
+ v4l2_close(s->fd);
return 0;
}
+static int v4l2_is_v4l_dev(const char *name)
+{
+ return !strncmp(name, "video", 5) ||
+ !strncmp(name, "radio", 5) ||
+ !strncmp(name, "vbi", 3) ||
+ !strncmp(name, "v4l-subdev", 10);
+}
+
+static int v4l2_get_device_list(AVFormatContext *ctx, AVDeviceInfoList *device_list)
+{
+ struct video_data *s = ctx->priv_data;
+ DIR *dir;
+ struct dirent *entry;
+ AVDeviceInfo *device = NULL;
+ struct v4l2_capability cap;
+ int ret = 0;
+
+ if (!device_list)
+ return AVERROR(EINVAL);
+
+ dir = opendir("/dev");
+ if (!dir) {
+ ret = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "Couldn't open the directory: %s\n", av_err2str(ret));
+ return ret;
+ }
+ while ((entry = readdir(dir))) {
+ if (!v4l2_is_v4l_dev(entry->d_name))
+ continue;
+
+ snprintf(ctx->filename, sizeof(ctx->filename), "/dev/%s", entry->d_name);
+ if ((s->fd = device_open(ctx)) < 0)
+ continue;
+
+ if (v4l2_ioctl(s->fd, VIDIOC_QUERYCAP, &cap) < 0) {
+ ret = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYCAP): %s\n", av_err2str(ret));
+ goto fail;
+ }
+
+ device = av_mallocz(sizeof(AVDeviceInfo));
+ if (!device) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ device->device_name = av_strdup(ctx->filename);
+ device->device_description = av_strdup(cap.card);
+ if (!device->device_name || !device->device_description) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ if ((ret = av_dynarray_add_nofree(&device_list->devices,
+ &device_list->nb_devices, device)) < 0)
+ goto fail;
+
+ v4l2_close(s->fd);
+ s->fd = -1;
+ continue;
+
+ fail:
+ if (device) {
+ av_freep(&device->device_name);
+ av_freep(&device->device_description);
+ av_freep(&device);
+ }
+ if (s->fd >= 0)
+ v4l2_close(s->fd);
+ s->fd = -1;
+ break;
+ }
+ closedir(dir);
+ return ret;
+}
+
#define OFFSET(x) offsetof(struct video_data, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
+
static const AVOption options[] = {
- { "standard", "TV standard, used only by analog frame grabber", OFFSET(standard), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC },
- { "channel", "TV channel, used only by frame grabber", OFFSET(channel), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, DEC },
- { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
- { "pixel_format", "Preferred pixel format", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
- { "input_format", "Preferred pixel format (for raw video) or codec name", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
- { "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
- { "list_formats", "List available formats and exit", OFFSET(list_format), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, DEC, "list_formats" },
- { "all", "Show all available formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_ALLFORMATS }, 0, INT_MAX, DEC, "list_formats" },
- { "raw", "Show only non-compressed formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_RAWFORMATS }, 0, INT_MAX, DEC, "list_formats" },
- { "compressed", "Show only compressed formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_COMPFORMATS }, 0, INT_MAX, DEC, "list_formats" },
+ { "standard", "set TV standard, used only by analog frame grabber", OFFSET(standard), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC },
+ { "channel", "set TV channel, used only by frame grabber", OFFSET(channel), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, INT_MAX, DEC },
+ { "video_size", "set frame size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, DEC },
+ { "pixel_format", "set preferred pixel format", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+ { "input_format", "set preferred pixel format (for raw video) or codec name", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+ { "framerate", "set frame rate", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+
+ { "list_formats", "list available formats and exit", OFFSET(list_format), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, DEC, "list_formats" },
+ { "all", "show all available formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_ALLFORMATS }, 0, INT_MAX, DEC, "list_formats" },
+ { "raw", "show only non-compressed formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_RAWFORMATS }, 0, INT_MAX, DEC, "list_formats" },
+ { "compressed", "show only compressed formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_COMPFORMATS }, 0, INT_MAX, DEC, "list_formats" },
+
+ { "list_standards", "list supported standards and exit", OFFSET(list_standard), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, DEC, "list_standards" },
+ { "all", "show all supported standards", OFFSET(list_standard), AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, DEC, "list_standards" },
+
+ { "timestamps", "set type of timestamps for grabbed frames", OFFSET(ts_mode), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 2, DEC, "timestamps" },
+ { "ts", "set type of timestamps for grabbed frames", OFFSET(ts_mode), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 2, DEC, "timestamps" },
+ { "default", "use timestamps from the kernel", OFFSET(ts_mode), AV_OPT_TYPE_CONST, {.i64 = V4L_TS_DEFAULT }, 0, 2, DEC, "timestamps" },
+ { "abs", "use absolute timestamps (wall clock)", OFFSET(ts_mode), AV_OPT_TYPE_CONST, {.i64 = V4L_TS_ABS }, 0, 2, DEC, "timestamps" },
+ { "mono2abs", "force conversion from monotonic to absolute timestamps", OFFSET(ts_mode), AV_OPT_TYPE_CONST, {.i64 = V4L_TS_MONO2ABS }, 0, 2, DEC, "timestamps" },
+ { "use_libv4l2", "use libv4l2 (v4l-utils) conversion functions", OFFSET(use_libv4l2), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DEC },
{ NULL },
};
@@ -913,15 +1121,18 @@ static const AVClass v4l2_class = {
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
AVInputFormat ff_v4l2_demuxer = {
- .name = "video4linux2",
+ .name = "video4linux2,v4l2",
.long_name = NULL_IF_CONFIG_SMALL("Video4Linux2 device grab"),
.priv_data_size = sizeof(struct video_data),
+ .read_probe = v4l2_read_probe,
.read_header = v4l2_read_header,
.read_packet = v4l2_read_packet,
.read_close = v4l2_read_close,
+ .get_device_list = v4l2_get_device_list,
.flags = AVFMT_NOFILE,
.priv_class = &v4l2_class,
};
diff --git a/libavdevice/v4l2enc.c b/libavdevice/v4l2enc.c
new file mode 100644
index 0000000000..faf6e07f86
--- /dev/null
+++ b/libavdevice/v4l2enc.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2013 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "v4l2-common.h"
+#include "avdevice.h"
+
+typedef struct {
+ AVClass *class;
+ int fd;
+} V4L2Context;
+
+static av_cold int write_header(AVFormatContext *s1)
+{
+ int res = 0, flags = O_RDWR;
+ struct v4l2_format fmt = {
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT
+ };
+ V4L2Context *s = s1->priv_data;
+ AVCodecParameters *par;
+ uint32_t v4l2_pixfmt;
+
+ if (s1->flags & AVFMT_FLAG_NONBLOCK)
+ flags |= O_NONBLOCK;
+
+ s->fd = open(s1->filename, flags);
+ if (s->fd < 0) {
+ res = AVERROR(errno);
+ av_log(s1, AV_LOG_ERROR, "Unable to open V4L2 device '%s'\n", s1->filename);
+ return res;
+ }
+
+ if (s1->nb_streams != 1 ||
+ s1->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO ||
+ s1->streams[0]->codecpar->codec_id != AV_CODEC_ID_RAWVIDEO) {
+ av_log(s1, AV_LOG_ERROR,
+ "V4L2 output device supports only a single raw video stream\n");
+ return AVERROR(EINVAL);
+ }
+
+ par = s1->streams[0]->codecpar;
+
+ v4l2_pixfmt = ff_fmt_ff2v4l(par->format, AV_CODEC_ID_RAWVIDEO);
+ if (!v4l2_pixfmt) { // XXX: try to force them one by one?
+ av_log(s1, AV_LOG_ERROR, "Unknown V4L2 pixel format equivalent for %s\n",
+ av_get_pix_fmt_name(par->format));
+ return AVERROR(EINVAL);
+ }
+
+ if (ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) {
+ res = AVERROR(errno);
+ av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n", av_err2str(res));
+ return res;
+ }
+
+ fmt.fmt.pix.width = par->width;
+ fmt.fmt.pix.height = par->height;
+ fmt.fmt.pix.pixelformat = v4l2_pixfmt;
+ fmt.fmt.pix.sizeimage = av_image_get_buffer_size(par->format, par->width, par->height, 1);
+
+ if (ioctl(s->fd, VIDIOC_S_FMT, &fmt) < 0) {
+ res = AVERROR(errno);
+ av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_S_FMT): %s\n", av_err2str(res));
+ return res;
+ }
+
+ return res;
+}
+
+static int write_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ const V4L2Context *s = s1->priv_data;
+ if (write(s->fd, pkt->data, pkt->size) == -1)
+ return AVERROR(errno);
+ return 0;
+}
+
+static int write_trailer(AVFormatContext *s1)
+{
+ const V4L2Context *s = s1->priv_data;
+ close(s->fd);
+ return 0;
+}
+
+static const AVClass v4l2_class = {
+ .class_name = "V4L2 outdev",
+ .item_name = av_default_item_name,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
+};
+
+AVOutputFormat ff_v4l2_muxer = {
+ .name = "v4l2",
+ .long_name = NULL_IF_CONFIG_SMALL("Video4Linux2 output device"),
+ .priv_data_size = sizeof(V4L2Context),
+ .audio_codec = AV_CODEC_ID_NONE,
+ .video_codec = AV_CODEC_ID_RAWVIDEO,
+ .write_header = write_header,
+ .write_packet = write_packet,
+ .write_trailer = write_trailer,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &v4l2_class,
+};
diff --git a/libavdevice/version.h b/libavdevice/version.h
index a5297320da..b226a76120 100644
--- a/libavdevice/version.h
+++ b/libavdevice/version.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,9 +27,9 @@
#include "libavutil/version.h"
-#define LIBAVDEVICE_VERSION_MAJOR 56
-#define LIBAVDEVICE_VERSION_MINOR 1
-#define LIBAVDEVICE_VERSION_MICRO 0
+#define LIBAVDEVICE_VERSION_MAJOR 57
+#define LIBAVDEVICE_VERSION_MINOR 0
+#define LIBAVDEVICE_VERSION_MICRO 101
#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \
LIBAVDEVICE_VERSION_MINOR, \
diff --git a/libavdevice/vfwcap.c b/libavdevice/vfwcap.c
index b0b2086883..2dcf5aa2ed 100644
--- a/libavdevice/vfwcap.c
+++ b/libavdevice/vfwcap.c
@@ -2,20 +2,20 @@
* VFW capture interface
* Copyright (c) 2006-2008 Ramiro Polla
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -24,7 +24,6 @@
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
-#include "libavformat/avformat.h"
#include "libavformat/internal.h"
// windows.h must no be included before winsock2.h, and libavformat internal
@@ -33,6 +32,8 @@
// windows.h needs to be included before vfw.h
#include <vfw.h>
+#include "avdevice.h"
+
/* Some obsolete versions of MinGW32 before 4.0.0 lack this. */
#ifndef HWND_MESSAGE
#define HWND_MESSAGE ((HWND) -3)
@@ -160,7 +161,7 @@ static void dump_bih(AVFormatContext *s, BITMAPINFOHEADER *bih)
static int shall_we_drop(AVFormatContext *s)
{
struct vfw_ctx *ctx = s->priv_data;
- const uint8_t dropscore[] = {62, 75, 87, 100};
+ static const uint8_t dropscore[] = {62, 75, 87, 100};
const int ndropscores = FF_ARRAY_ELEMS(dropscore);
unsigned int buffer_fullness = (ctx->curbufsize*100)/s->max_picture_buffer;
@@ -248,7 +249,7 @@ static int vfw_read_header(AVFormatContext *s)
AVStream *st;
int devnum;
int bisize;
- BITMAPINFO *bi;
+ BITMAPINFO *bi = NULL;
CAPTUREPARMS cparms;
DWORD biCompression;
WORD biBitCount;
@@ -294,7 +295,7 @@ static int vfw_read_header(AVFormatContext *s)
(LPARAM) videostream_cb);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not set video stream callback.\n");
- goto fail_io;
+ goto fail;
}
SetWindowLongPtr(ctx->hwnd, GWLP_USERDATA, (LONG_PTR) s);
@@ -308,7 +309,7 @@ static int vfw_read_header(AVFormatContext *s)
/* Set video format */
bisize = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, 0, 0);
if(!bisize)
- goto fail_io;
+ goto fail;
bi = av_malloc(bisize);
if(!bi) {
vfw_read_close(s);
@@ -316,16 +317,21 @@ static int vfw_read_header(AVFormatContext *s)
}
ret = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, bisize, (LPARAM) bi);
if(!ret)
- goto fail_bi;
+ goto fail;
dump_bih(s, &bi->bmiHeader);
+ ret = av_parse_video_rate(&framerate_q, ctx->framerate);
+ if (ret < 0) {
+ av_log(s, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", ctx->framerate);
+ goto fail;
+ }
if (ctx->video_size) {
ret = av_parse_video_size(&bi->bmiHeader.biWidth, &bi->bmiHeader.biHeight, ctx->video_size);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Couldn't parse video size.\n");
- goto fail_bi;
+ goto fail;
}
}
@@ -344,19 +350,17 @@ static int vfw_read_header(AVFormatContext *s)
ret = SendMessage(ctx->hwnd, WM_CAP_SET_VIDEOFORMAT, bisize, (LPARAM) bi);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not set Video Format.\n");
- goto fail_bi;
+ goto fail;
}
biCompression = bi->bmiHeader.biCompression;
biBitCount = bi->bmiHeader.biBitCount;
- av_free(bi);
-
/* Set sequence setup */
ret = SendMessage(ctx->hwnd, WM_CAP_GET_SEQUENCE_SETUP, sizeof(cparms),
(LPARAM) &cparms);
if(!ret)
- goto fail_io;
+ goto fail;
dump_captureparms(s, &cparms);
@@ -371,7 +375,7 @@ static int vfw_read_header(AVFormatContext *s)
ret = SendMessage(ctx->hwnd, WM_CAP_SET_SEQUENCE_SETUP, sizeof(cparms),
(LPARAM) &cparms);
if(!ret)
- goto fail_io;
+ goto fail;
st->avg_frame_rate = framerate_q;
@@ -401,31 +405,31 @@ static int vfw_read_header(AVFormatContext *s)
}
}
+ av_freep(&bi);
+
avpriv_set_pts_info(st, 32, 1, 1000);
ctx->mutex = CreateMutex(NULL, 0, NULL);
if(!ctx->mutex) {
av_log(s, AV_LOG_ERROR, "Could not create Mutex.\n" );
- goto fail_io;
+ goto fail;
}
ctx->event = CreateEvent(NULL, 1, 0, NULL);
if(!ctx->event) {
av_log(s, AV_LOG_ERROR, "Could not create Event.\n" );
- goto fail_io;
+ goto fail;
}
ret = SendMessage(ctx->hwnd, WM_CAP_SEQUENCE_NOFILE, 0, 0);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not start capture sequence.\n" );
- goto fail_io;
+ goto fail;
}
return 0;
-fail_bi:
- av_free(bi);
-
-fail_io:
+fail:
+ av_freep(&bi);
vfw_read_close(s);
return AVERROR(EIO);
}
@@ -472,6 +476,7 @@ static const AVClass vfw_class = {
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT
};
AVInputFormat ff_vfwcap_demuxer = {
diff --git a/libavdevice/x11grab.c b/libavdevice/x11grab.c
index 20b299995e..a78e7a47a6 100644
--- a/libavdevice/x11grab.c
+++ b/libavdevice/x11grab.c
@@ -1,9 +1,9 @@
/*
* X11 video grab interface
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav integration:
+ * FFmpeg integration:
* Copyright (C) 2006 Clemens Fruhwirth <clemens@endorphin.org>
* Edouard Gomez <ed.gomez@free.fr>
*
@@ -14,18 +14,18 @@
* Copyright (C) 1997-1998 Rasca, Berlin
* 2003-2004 Karl H. Beckers, Frankfurt
*
- * Libav is free software; you can redistribute it and/or modify
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with Libav; if not, write to the Free Software
+ * along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -41,6 +41,7 @@
#include <time.h>
#include <sys/shm.h>
+#include <X11/cursorfont.h>
#include <X11/X.h>
#include <X11/Xlib.h>
#include <X11/Xlibint.h>
@@ -57,9 +58,10 @@
#include "libavutil/parseutils.h"
#include "libavutil/time.h"
-#include "libavformat/avformat.h"
#include "libavformat/internal.h"
+#include "avdevice.h"
+
/** X11 device demuxer context */
typedef struct X11GrabContext {
const AVClass *class; /**< Class for private options. */
@@ -67,9 +69,8 @@ typedef struct X11GrabContext {
AVRational time_base; /**< Time base */
int64_t time_frame; /**< Current time */
- char *video_size; /**< String describing video size, set by a private option. */
- int height; /**< Height of the grab frame */
int width; /**< Width of the grab frame */
+ int height; /**< Height of the grab frame */
int x_off; /**< Horizontal top-left corner coordinate */
int y_off; /**< Vertical top-left corner coordinate */
@@ -80,8 +81,11 @@ typedef struct X11GrabContext {
int draw_mouse; /**< Set by a private option. */
int follow_mouse; /**< Set by a private option. */
int show_region; /**< set by a private option. */
- char *framerate; /**< Set by a private option. */
+ AVRational framerate; /**< Set by a private option. */
+ int palette_changed;
+ uint32_t palette[256];
+ Cursor c;
Window region_win; /**< This is used by show_region option. */
} X11GrabContext;
@@ -191,6 +195,8 @@ static int pixfmt_from_image(AVFormatContext *s, XImage *image, int *pix_fmt)
image->blue_mask,
image->bits_per_pixel);
+ *pix_fmt = AV_PIX_FMT_NONE;
+
switch (image->bits_per_pixel) {
case 8:
*pix_fmt = AV_PIX_FMT_PAL8;
@@ -218,9 +224,14 @@ static int pixfmt_from_image(AVFormatContext *s, XImage *image, int *pix_fmt)
}
break;
case 32:
- *pix_fmt = AV_PIX_FMT_RGB32;
+ if (image->red_mask == 0xff0000 &&
+ image->green_mask == 0x00ff00 &&
+ image->blue_mask == 0x0000ff ) {
+ *pix_fmt = AV_PIX_FMT_0RGB32;
+ }
break;
- default:
+ }
+ if (*pix_fmt == AV_PIX_FMT_NONE) {
av_log(s, AV_LOG_ERROR,
"XImages with RGB mask 0x%.6lx 0x%.6lx 0x%.6lx and depth %i "
"are currently not supported.\n",
@@ -251,39 +262,34 @@ static int x11grab_read_header(AVFormatContext *s1)
Display *dpy;
AVStream *st = NULL;
XImage *image;
- int x_off = 0, y_off = 0, ret = 0, screen, use_shm;
- char *param, *offset;
- AVRational framerate;
-
- param = av_strdup(s1->filename);
- if (!param)
+ int x_off = 0, y_off = 0, ret = 0, screen, use_shm = 0;
+ char *dpyname, *offset;
+ Colormap color_map;
+ XColor color[256];
+ int i;
+
+ dpyname = av_strdup(s1->filename);
+ if (!dpyname)
goto out;
- offset = strchr(param, '+');
+ offset = strchr(dpyname, '+');
if (offset) {
sscanf(offset, "%d,%d", &x_off, &y_off);
- x11grab->draw_mouse = !strstr(offset, "nomouse");
+ if (strstr(offset, "nomouse")) {
+ av_log(s1, AV_LOG_WARNING,
+ "'nomouse' specification in argument is deprecated: "
+ "use 'draw_mouse' option with value 0 instead\n");
+ x11grab->draw_mouse = 0;
+ }
*offset = 0;
}
- ret = av_parse_video_size(&x11grab->width, &x11grab->height,
- x11grab->video_size);
- if (ret < 0) {
- av_log(s1, AV_LOG_ERROR, "Couldn't parse video size.\n");
- goto out;
- }
-
- ret = av_parse_video_rate(&framerate, x11grab->framerate);
- if (ret < 0) {
- av_log(s1, AV_LOG_ERROR, "Could not parse framerate: %s.\n",
- x11grab->framerate);
- goto out;
- }
av_log(s1, AV_LOG_INFO,
"device: %s -> display: %s x: %d y: %d width: %d height: %d\n",
- s1->filename, param, x_off, y_off, x11grab->width, x11grab->height);
+ s1->filename, dpyname, x_off, y_off, x11grab->width, x11grab->height);
- dpy = XOpenDisplay(param);
+ dpy = XOpenDisplay(dpyname);
+ av_freep(&dpyname);
if (!dpy) {
av_log(s1, AV_LOG_ERROR, "Could not open X display.\n");
ret = AVERROR(EIO);
@@ -309,16 +315,18 @@ static int x11grab_read_header(AVFormatContext *s1)
&ret, &ret, &ret);
x_off -= x11grab->width / 2;
y_off -= x11grab->height / 2;
- x_off = FFMIN(FFMAX(x_off, 0), screen_w - x11grab->width);
- y_off = FFMIN(FFMAX(y_off, 0), screen_h - x11grab->height);
+ x_off = av_clip(x_off, 0, screen_w - x11grab->width);
+ y_off = av_clip(y_off, 0, screen_h - x11grab->height);
av_log(s1, AV_LOG_INFO,
"followmouse is enabled, resetting grabbing region to x: %d y: %d\n",
x_off, y_off);
}
- use_shm = XShmQueryExtension(dpy);
- av_log(s1, AV_LOG_INFO,
- "shared memory extension %sfound\n", use_shm ? "" : "not ");
+ if (x11grab->use_shm) {
+ use_shm = XShmQueryExtension(dpy);
+ av_log(s1, AV_LOG_INFO,
+ "shared memory extension %sfound\n", use_shm ? "" : "not ");
+ }
if (use_shm && setup_shm(s1, dpy, &image) < 0) {
av_log(s1, AV_LOG_WARNING, "Falling back to XGetImage\n");
@@ -340,7 +348,7 @@ static int x11grab_read_header(AVFormatContext *s1)
x11grab->frame_size = x11grab->width * x11grab->height * image->bits_per_pixel / 8;
x11grab->dpy = dpy;
- x11grab->time_base = (AVRational) { framerate.den, framerate.num };
+ x11grab->time_base = av_inv_q(x11grab->framerate);
x11grab->time_frame = av_gettime() / av_q2d(x11grab->time_base);
x11grab->x_off = x_off;
x11grab->y_off = y_off;
@@ -351,6 +359,19 @@ static int x11grab_read_header(AVFormatContext *s1)
if (ret < 0)
goto out;
+ if (st->codecpar->format == AV_PIX_FMT_PAL8) {
+ color_map = DefaultColormap(dpy, screen);
+ for (i = 0; i < 256; ++i)
+ color[i].pixel = i;
+ XQueryColors(dpy, color_map, color, 256);
+ for (i = 0; i < 256; ++i)
+ x11grab->palette[i] = (color[i].red & 0xFF00) << 8 |
+ (color[i].green & 0xFF00) |
+ (color[i].blue & 0xFF00) >> 8;
+ x11grab->palette_changed = 1;
+ }
+
+
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codecpar->width = x11grab->width;
@@ -360,7 +381,7 @@ static int x11grab_read_header(AVFormatContext *s1)
st->avg_frame_rate = av_inv_q(x11grab->time_base);
out:
- av_free(param);
+ av_free(dpyname);
return ret;
}
@@ -371,8 +392,9 @@ out:
* @param s context used to retrieve original grabbing rectangle
* coordinates
*/
-static void paint_mouse_pointer(XImage *image, X11GrabContext *s)
+static void paint_mouse_pointer(XImage *image, AVFormatContext *s1)
{
+ X11GrabContext *s = s1->priv_data;
int x_off = s->x_off;
int y_off = s->y_off;
int width = s->width;
@@ -388,14 +410,25 @@ static void paint_mouse_pointer(XImage *image, X11GrabContext *s)
* Anyone who performs further investigation of the xlib API likely risks
* permanent brain damage. */
uint8_t *pix = image->data;
+ Window root;
+ XSetWindowAttributes attr;
/* Code doesn't currently support 16-bit or PAL8 */
if (image->bits_per_pixel != 24 && image->bits_per_pixel != 32)
return;
+ if (!s->c)
+ s->c = XCreateFontCursor(dpy, XC_left_ptr);
+ root = DefaultRootWindow(dpy);
+ attr.cursor = s->c;
+ XChangeWindowAttributes(dpy, root, CWCursor, &attr);
+
xcim = XFixesGetCursorImage(dpy);
- if (!xcim)
+ if (!xcim) {
+ av_log(s1, AV_LOG_WARNING,
+ "XFixesGetCursorImage failed\n");
return;
+ }
x = xcim->x - xcim->xhot;
y = xcim->y - xcim->yhot;
@@ -494,16 +527,11 @@ static int x11grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
int64_t curtime, delay;
struct timespec ts;
- /* Calculate the time of the next frame */
- s->time_frame += INT64_C(1000000);
-
/* wait based on the frame rate */
for (;;) {
curtime = av_gettime();
delay = s->time_frame * av_q2d(s->time_base) - curtime;
if (delay <= 0) {
- if (delay < INT64_C(-1000000) * av_q2d(s->time_base))
- s->time_frame += INT64_C(1000000);
break;
}
ts.tv_sec = delay / 1000000;
@@ -511,10 +539,25 @@ static int x11grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
nanosleep(&ts, NULL);
}
+ /* Calculate the time of the next frame */
+ do {
+ s->time_frame += INT64_C(1000000);
+ } while ((s->time_frame * av_q2d(s->time_base) - curtime) <= 0);
+
av_init_packet(pkt);
pkt->data = image->data;
pkt->size = s->frame_size;
pkt->pts = curtime;
+ if (s->palette_changed) {
+ uint8_t *pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE,
+ AVPALETTE_SIZE);
+ if (!pal) {
+ av_log(s, AV_LOG_ERROR, "Cannot append palette to packet\n");
+ } else {
+ memcpy(pal, s->palette, AVPALETTE_SIZE);
+ s->palette_changed = 0;
+ }
+ }
screen = DefaultScreen(dpy);
root = RootWindow(dpy, screen);
@@ -545,8 +588,8 @@ static int x11grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
y_off -= (y_off + follow_mouse) - pointer_y;
}
// adjust grabbing region position if it goes out of screen.
- s->x_off = x_off = FFMIN(FFMAX(x_off, 0), screen_w - s->width);
- s->y_off = y_off = FFMIN(FFMAX(y_off, 0), screen_h - s->height);
+ s->x_off = x_off = av_clip(x_off, 0, screen_w - s->width);
+ s->y_off = y_off = av_clip(y_off, 0, screen_h - s->height);
if (s->show_region && s->region_win)
XMoveWindow(dpy, s->region_win,
@@ -577,7 +620,7 @@ static int x11grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
}
if (s->draw_mouse && same_screen)
- paint_mouse_pointer(image, s);
+ paint_mouse_pointer(image, s1);
return s->frame_size;
}
@@ -618,13 +661,17 @@ static int x11grab_read_close(AVFormatContext *s1)
static const AVOption options[] = {
{ "grab_x", "Initial x coordinate.", OFFSET(x_off), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, DEC },
{ "grab_y", "Initial y coordinate.", OFFSET(y_off), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, DEC },
- { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), AV_OPT_TYPE_STRING, {.str = "vga"}, 0, 0, DEC },
- { "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC },
- { "draw_mouse", "Draw the mouse pointer.", OFFSET(draw_mouse), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, DEC },
- { "follow_mouse", "Move the grabbing region when the mouse pointer reaches within specified amount of pixels to the edge of region.",
- OFFSET(follow_mouse), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT_MAX, DEC, "follow_mouse" },
- { "centered", "Keep the mouse pointer at the center of grabbing region when following.", 0, AV_OPT_TYPE_CONST, { .i64 = -1 }, INT_MIN, INT_MAX, DEC, "follow_mouse" },
- { "show_region", "Show the grabbing region.", OFFSET(show_region), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, DEC },
+ { "draw_mouse", "draw the mouse pointer", OFFSET(draw_mouse), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, DEC },
+
+ { "follow_mouse", "move the grabbing region when the mouse pointer reaches within specified amount of pixels to the edge of region",
+ OFFSET(follow_mouse), AV_OPT_TYPE_INT, {.i64 = 0}, -1, INT_MAX, DEC, "follow_mouse" },
+ { "centered", "keep the mouse pointer at the center of grabbing region when following",
+ 0, AV_OPT_TYPE_CONST, {.i64 = -1}, INT_MIN, INT_MAX, DEC, "follow_mouse" },
+
+ { "framerate", "set video frame rate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "ntsc"}, 0, 0, DEC },
+ { "show_region", "show the grabbing region", OFFSET(show_region), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, DEC },
+ { "video_size", "set video frame size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = "vga"}, 0, 0, DEC },
+ { "use_shm", "use MIT-SHM extension", OFFSET(use_shm), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, DEC },
{ NULL },
};
@@ -633,6 +680,7 @@ static const AVClass x11_class = {
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
/** x11 grabber device demuxer declaration */
diff --git a/libavdevice/xcbgrab.c b/libavdevice/xcbgrab.c
index 9b85c28e24..9da46c8e0d 100644
--- a/libavdevice/xcbgrab.c
+++ b/libavdevice/xcbgrab.c
@@ -2,20 +2,20 @@
* XCB input grabber
* Copyright (C) 2014 Luca Barbato <lu_zero@gentoo.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -23,7 +23,6 @@
#include <stdlib.h>
#include <xcb/xcb.h>
-#include <xcb/shape.h>
#if CONFIG_LIBXCB_XFIXES
#include <xcb/xfixes.h>
@@ -34,6 +33,10 @@
#include <xcb/shm.h>
#endif
+#if CONFIG_LIBXCB_SHAPE
+#include <xcb/shape.h>
+#endif
+
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
@@ -97,6 +100,7 @@ static const AVClass xcbgrab_class = {
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
static int xcbgrab_reposition(AVFormatContext *s,
@@ -469,11 +473,11 @@ static int pixfmt_from_pixmap_format(AVFormatContext *s, int depth,
switch (depth) {
case 32:
if (fmt->bits_per_pixel == 32)
- *pix_fmt = AV_PIX_FMT_ARGB;
+ *pix_fmt = AV_PIX_FMT_0RGB;
break;
case 24:
if (fmt->bits_per_pixel == 32)
- *pix_fmt = AV_PIX_FMT_RGB32;
+ *pix_fmt = AV_PIX_FMT_0RGB32;
else if (fmt->bits_per_pixel == 24)
*pix_fmt = AV_PIX_FMT_RGB24;
break;
@@ -586,7 +590,7 @@ static void setup_window(AVFormatContext *s)
uint32_t values[] = { 1,
XCB_EVENT_MASK_EXPOSURE |
XCB_EVENT_MASK_STRUCTURE_NOTIFY };
- xcb_rectangle_t rect = { 0, 0, c->width, c->height };
+ av_unused xcb_rectangle_t rect = { 0, 0, c->width, c->height };
c->window = xcb_generate_id(c->conn);
@@ -602,11 +606,13 @@ static void setup_window(AVFormatContext *s)
XCB_COPY_FROM_PARENT,
mask, values);
+#if CONFIG_LIBXCB_SHAPE
xcb_shape_rectangles(c->conn, XCB_SHAPE_SO_SUBTRACT,
XCB_SHAPE_SK_BOUNDING, XCB_CLIP_ORDERING_UNSORTED,
c->window,
c->region_border, c->region_border,
1, &rect);
+#endif
xcb_map_window(c->conn, c->window);
@@ -618,30 +624,25 @@ static av_cold int xcbgrab_read_header(AVFormatContext *s)
XCBGrabContext *c = s->priv_data;
int screen_num, ret;
const xcb_setup_t *setup;
- char *host = s->filename[0] ? s->filename : NULL;
- const char *opts = strchr(s->filename, '+');
-
- if (opts) {
- sscanf(opts, "%d,%d", &c->x, &c->y);
- host = av_strdup(s->filename);
- if (!host)
- return AVERROR(ENOMEM);
- host[opts - s->filename] = '\0';
+ char *display_name = av_strdup(s->filename);
+
+ if (!display_name)
+ return AVERROR(ENOMEM);
+
+ if (!sscanf(s->filename, "%[^+]+%d,%d", display_name, &c->x, &c->y)) {
+ *display_name = 0;
+ sscanf(s->filename, "+%d,%d", &c->x, &c->y);
}
- c->conn = xcb_connect(host, &screen_num);
+ c->conn = xcb_connect(display_name[0] ? display_name : NULL, &screen_num);
+ av_freep(&display_name);
if ((ret = xcb_connection_has_error(c->conn))) {
av_log(s, AV_LOG_ERROR, "Cannot open display %s, error %d.\n",
- s->filename[0] ? host : "default", ret);
- if (opts)
- av_freep(&host);
+ s->filename[0] ? s->filename : "default", ret);
return AVERROR(EIO);
}
- if (opts)
- av_freep(&host);
-
setup = xcb_get_setup(c->conn);
c->screen = get_screen(setup, screen_num);
diff --git a/libavdevice/xv.c b/libavdevice/xv.c
new file mode 100644
index 0000000000..185de7569e
--- /dev/null
+++ b/libavdevice/xv.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright (c) 2013 Jeff Moguillansky
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * XVideo output device
+ *
+ * TODO:
+ * - add support to more formats
+ */
+
+#include <X11/Xlib.h>
+#include <X11/extensions/Xv.h>
+#include <X11/extensions/XShm.h>
+#include <X11/extensions/Xvlib.h>
+#include <sys/shm.h>
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/imgutils.h"
+#include "libavformat/internal.h"
+#include "avdevice.h"
+
+typedef struct {
+ AVClass *class;
+ GC gc;
+
+ Window window;
+ int64_t window_id;
+ char *window_title;
+ int window_width, window_height;
+ int window_x, window_y;
+ int dest_x, dest_y; /**< display area position */
+ unsigned int dest_w, dest_h; /**< display area dimensions */
+
+ Display* display;
+ char *display_name;
+
+ XvImage* yuv_image;
+ enum AVPixelFormat image_format;
+ int image_width, image_height;
+ XShmSegmentInfo yuv_shminfo;
+ int xv_port;
+ Atom wm_delete_message;
+} XVContext;
+
+typedef struct XVTagFormatMap
+{
+ int tag;
+ enum AVPixelFormat format;
+} XVTagFormatMap;
+
+static const XVTagFormatMap tag_codec_map[] = {
+ { MKTAG('I','4','2','0'), AV_PIX_FMT_YUV420P },
+ { MKTAG('U','Y','V','Y'), AV_PIX_FMT_UYVY422 },
+ { MKTAG('Y','U','Y','2'), AV_PIX_FMT_YUYV422 },
+ { 0, AV_PIX_FMT_NONE }
+};
+
+static int xv_get_tag_from_format(enum AVPixelFormat format)
+{
+ const XVTagFormatMap *m = tag_codec_map;
+ int i;
+ for (i = 0; m->tag; m = &tag_codec_map[++i]) {
+ if (m->format == format)
+ return m->tag;
+ }
+ return 0;
+}
+
+static int xv_write_trailer(AVFormatContext *s)
+{
+ XVContext *xv = s->priv_data;
+ if (xv->display) {
+ XShmDetach(xv->display, &xv->yuv_shminfo);
+ if (xv->yuv_image)
+ shmdt(xv->yuv_image->data);
+ XFree(xv->yuv_image);
+ if (xv->gc)
+ XFreeGC(xv->display, xv->gc);
+ XCloseDisplay(xv->display);
+ }
+ return 0;
+}
+
+static int xv_write_header(AVFormatContext *s)
+{
+ XVContext *xv = s->priv_data;
+ unsigned int num_adaptors;
+ XvAdaptorInfo *ai;
+ XvImageFormatValues *fv;
+ XColor fgcolor;
+ XWindowAttributes window_attrs;
+ int num_formats = 0, j, tag, ret;
+ AVCodecParameters *par = s->streams[0]->codecpar;
+
+ if ( s->nb_streams > 1
+ || par->codec_type != AVMEDIA_TYPE_VIDEO
+ || par->codec_id != AV_CODEC_ID_RAWVIDEO) {
+ av_log(s, AV_LOG_ERROR, "Only supports one rawvideo stream\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (!(tag = xv_get_tag_from_format(par->format))) {
+ av_log(s, AV_LOG_ERROR,
+ "Unsupported pixel format '%s', only yuv420p, uyvy422, yuyv422 are currently supported\n",
+ av_get_pix_fmt_name(par->format));
+ return AVERROR_PATCHWELCOME;
+ }
+ xv->image_format = par->format;
+
+ xv->display = XOpenDisplay(xv->display_name);
+ if (!xv->display) {
+ av_log(s, AV_LOG_ERROR, "Could not open the X11 display '%s'\n", xv->display_name);
+ return AVERROR(EINVAL);
+ }
+
+ xv->image_width = par->width;
+ xv->image_height = par->height;
+ if (!xv->window_width && !xv->window_height) {
+ AVRational sar = par->sample_aspect_ratio;
+ xv->window_width = par->width;
+ xv->window_height = par->height;
+ if (sar.num) {
+ if (sar.num > sar.den)
+ xv->window_width = av_rescale(xv->window_width, sar.num, sar.den);
+ if (sar.num < sar.den)
+ xv->window_height = av_rescale(xv->window_height, sar.den, sar.num);
+ }
+ }
+ if (!xv->window_id) {
+ xv->window = XCreateSimpleWindow(xv->display, DefaultRootWindow(xv->display),
+ xv->window_x, xv->window_y,
+ xv->window_width, xv->window_height,
+ 0, 0, 0);
+ if (!xv->window_title) {
+ if (!(xv->window_title = av_strdup(s->filename))) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ }
+ XStoreName(xv->display, xv->window, xv->window_title);
+ xv->wm_delete_message = XInternAtom(xv->display, "WM_DELETE_WINDOW", False);
+ XSetWMProtocols(xv->display, xv->window, &xv->wm_delete_message, 1);
+ XMapWindow(xv->display, xv->window);
+ } else
+ xv->window = xv->window_id;
+
+ if (XvQueryAdaptors(xv->display, DefaultRootWindow(xv->display), &num_adaptors, &ai) != Success) {
+ ret = AVERROR_EXTERNAL;
+ goto fail;
+ }
+ if (!num_adaptors) {
+ av_log(s, AV_LOG_ERROR, "No X-Video adaptors present\n");
+ return AVERROR(ENODEV);
+ }
+ xv->xv_port = ai[0].base_id;
+ XvFreeAdaptorInfo(ai);
+
+ fv = XvListImageFormats(xv->display, xv->xv_port, &num_formats);
+ if (!fv) {
+ ret = AVERROR_EXTERNAL;
+ goto fail;
+ }
+ for (j = 0; j < num_formats; j++) {
+ if (fv[j].id == tag) {
+ break;
+ }
+ }
+ XFree(fv);
+
+ if (j >= num_formats) {
+ av_log(s, AV_LOG_ERROR,
+ "Device does not support pixel format %s, aborting\n",
+ av_get_pix_fmt_name(par->format));
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ xv->gc = XCreateGC(xv->display, xv->window, 0, 0);
+ xv->image_width = par->width;
+ xv->image_height = par->height;
+ xv->yuv_image = XvShmCreateImage(xv->display, xv->xv_port, tag, 0,
+ xv->image_width, xv->image_height, &xv->yuv_shminfo);
+ xv->yuv_shminfo.shmid = shmget(IPC_PRIVATE, xv->yuv_image->data_size,
+ IPC_CREAT | 0777);
+ xv->yuv_shminfo.shmaddr = (char *)shmat(xv->yuv_shminfo.shmid, 0, 0);
+ xv->yuv_image->data = xv->yuv_shminfo.shmaddr;
+ xv->yuv_shminfo.readOnly = False;
+
+ XShmAttach(xv->display, &xv->yuv_shminfo);
+ XSync(xv->display, False);
+ shmctl(xv->yuv_shminfo.shmid, IPC_RMID, 0);
+
+ XGetWindowAttributes(xv->display, xv->window, &window_attrs);
+ fgcolor.red = fgcolor.green = fgcolor.blue = 0;
+ fgcolor.flags = DoRed | DoGreen | DoBlue;
+ XAllocColor(xv->display, window_attrs.colormap, &fgcolor);
+ XSetForeground(xv->display, xv->gc, fgcolor.pixel);
+ //force display area recalculation at first frame
+ xv->window_width = xv->window_height = 0;
+
+ return 0;
+ fail:
+ xv_write_trailer(s);
+ return ret;
+}
+
+static void compute_display_area(AVFormatContext *s)
+{
+ XVContext *xv = s->priv_data;
+ AVRational sar, dar; /* sample and display aspect ratios */
+ AVStream *st = s->streams[0];
+ AVCodecParameters *par = st->codecpar;
+
+ /* compute overlay width and height from the codec context information */
+ sar = st->sample_aspect_ratio.num ? st->sample_aspect_ratio : (AVRational){ 1, 1 };
+ dar = av_mul_q(sar, (AVRational){ par->width, par->height });
+
+ /* we suppose the screen has a 1/1 sample aspect ratio */
+ /* fit in the window */
+ if (av_cmp_q(dar, (AVRational){ xv->dest_w, xv->dest_h }) > 0) {
+ /* fit in width */
+ xv->dest_y = xv->dest_h;
+ xv->dest_x = 0;
+ xv->dest_h = av_rescale(xv->dest_w, dar.den, dar.num);
+ xv->dest_y -= xv->dest_h;
+ xv->dest_y /= 2;
+ } else {
+ /* fit in height */
+ xv->dest_x = xv->dest_w;
+ xv->dest_y = 0;
+ xv->dest_w = av_rescale(xv->dest_h, dar.num, dar.den);
+ xv->dest_x -= xv->dest_w;
+ xv->dest_x /= 2;
+ }
+}
+
+static int xv_repaint(AVFormatContext *s)
+{
+ XVContext *xv = s->priv_data;
+ XWindowAttributes window_attrs;
+
+ XGetWindowAttributes(xv->display, xv->window, &window_attrs);
+ if (window_attrs.width != xv->window_width || window_attrs.height != xv->window_height) {
+ XRectangle rect[2];
+ xv->dest_w = window_attrs.width;
+ xv->dest_h = window_attrs.height;
+ compute_display_area(s);
+ if (xv->dest_x) {
+ rect[0].width = rect[1].width = xv->dest_x;
+ rect[0].height = rect[1].height = window_attrs.height;
+ rect[0].y = rect[1].y = 0;
+ rect[0].x = 0;
+ rect[1].x = xv->dest_w + xv->dest_x;
+ XFillRectangles(xv->display, xv->window, xv->gc, rect, 2);
+ }
+ if (xv->dest_y) {
+ rect[0].width = rect[1].width = window_attrs.width;
+ rect[0].height = rect[1].height = xv->dest_y;
+ rect[0].x = rect[1].x = 0;
+ rect[0].y = 0;
+ rect[1].y = xv->dest_h + xv->dest_y;
+ XFillRectangles(xv->display, xv->window, xv->gc, rect, 2);
+ }
+ }
+
+ if (XvShmPutImage(xv->display, xv->xv_port, xv->window, xv->gc,
+ xv->yuv_image, 0, 0, xv->image_width, xv->image_height,
+ xv->dest_x, xv->dest_y, xv->dest_w, xv->dest_h, True) != Success) {
+ av_log(s, AV_LOG_ERROR, "Could not copy image to XV shared memory buffer\n");
+ return AVERROR_EXTERNAL;
+ }
+ return 0;
+}
+
+static int write_picture(AVFormatContext *s, uint8_t *input_data[4],
+ int linesize[4])
+{
+ XVContext *xv = s->priv_data;
+ XvImage *img = xv->yuv_image;
+ uint8_t *data[3] = {
+ img->data + img->offsets[0],
+ img->data + img->offsets[1],
+ img->data + img->offsets[2]
+ };
+
+ /* Check messages. Window might get closed. */
+ if (!xv->window_id) {
+ XEvent event;
+ while (XPending(xv->display)) {
+ XNextEvent(xv->display, &event);
+ if (event.type == ClientMessage && event.xclient.data.l[0] == xv->wm_delete_message) {
+ av_log(xv, AV_LOG_DEBUG, "Window close event.\n");
+ return AVERROR(EPIPE);
+ }
+ }
+ }
+
+ av_image_copy(data, img->pitches, (const uint8_t **)input_data, linesize,
+ xv->image_format, img->width, img->height);
+ return xv_repaint(s);
+}
+
+static int xv_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVCodecParameters *par = s->streams[0]->codecpar;
+ uint8_t *data[4];
+ int linesize[4];
+
+ av_image_fill_arrays(data, linesize, pkt->data, par->format,
+ par->width, par->height, 1);
+ return write_picture(s, data, linesize);
+}
+
+static int xv_write_frame(AVFormatContext *s, int stream_index, AVFrame **frame,
+ unsigned flags)
+{
+ /* xv_write_header() should have accepted only supported formats */
+ if ((flags & AV_WRITE_UNCODED_FRAME_QUERY))
+ return 0;
+ return write_picture(s, (*frame)->data, (*frame)->linesize);
+}
+
+static int xv_control_message(AVFormatContext *s, int type, void *data, size_t data_size)
+{
+ switch(type) {
+ case AV_APP_TO_DEV_WINDOW_REPAINT:
+ return xv_repaint(s);
+ default:
+ break;
+ }
+ return AVERROR(ENOSYS);
+}
+
+#define OFFSET(x) offsetof(XVContext, x)
+static const AVOption options[] = {
+ { "display_name", "set display name", OFFSET(display_name), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
+ { "window_id", "set existing window id", OFFSET(window_id), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, AV_OPT_FLAG_ENCODING_PARAM },
+ { "window_size", "set window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
+ { "window_title", "set window title", OFFSET(window_title), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
+ { "window_x", "set window x offset", OFFSET(window_x), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
+ { "window_y", "set window y offset", OFFSET(window_y), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
+ { NULL }
+
+};
+
+static const AVClass xv_class = {
+ .class_name = "xvideo outdev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
+};
+
+AVOutputFormat ff_xv_muxer = {
+ .name = "xv",
+ .long_name = NULL_IF_CONFIG_SMALL("XV (XVideo) output device"),
+ .priv_data_size = sizeof(XVContext),
+ .audio_codec = AV_CODEC_ID_NONE,
+ .video_codec = AV_CODEC_ID_RAWVIDEO,
+ .write_header = xv_write_header,
+ .write_packet = xv_write_packet,
+ .write_uncoded_frame = xv_write_frame,
+ .write_trailer = xv_write_trailer,
+ .control_message = xv_control_message,
+ .flags = AVFMT_NOFILE | AVFMT_VARIABLE_FPS | AVFMT_NOTIMESTAMPS,
+ .priv_class = &xv_class,
+};