summaryrefslogtreecommitdiff
path: root/libavformat
diff options
context:
space:
mode:
authorLuca Abeni <lucabe72@email.it>2007-11-22 16:10:02 +0000
committerLuca Abeni <lucabe72@email.it>2007-11-22 16:10:02 +0000
commitc721d803cbbaa4e5f35693b3c60f6d17c6434916 (patch)
treeda952683212c132d54d3c6a44598d7a90acf22e1 /libavformat
parent489b0d4d9897676877f598a74902237f9d830f79 (diff)
Introduce libavdevice
Originally committed as revision 11077 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavformat')
-rw-r--r--libavformat/Makefile11
-rw-r--r--libavformat/allformats.c8
-rw-r--r--libavformat/audio.c344
-rw-r--r--libavformat/beosaudio.cpp465
-rw-r--r--libavformat/bktr.c320
-rw-r--r--libavformat/dv1394.c236
-rw-r--r--libavformat/dv1394.h356
-rw-r--r--libavformat/libdc1394.c193
-rw-r--r--libavformat/v4l.c852
-rw-r--r--libavformat/v4l2.c643
-rw-r--r--libavformat/x11grab.c529
11 files changed, 0 insertions, 3957 deletions
diff --git a/libavformat/Makefile b/libavformat/Makefile
index 378364f0c5..14b2729fde 100644
--- a/libavformat/Makefile
+++ b/libavformat/Makefile
@@ -31,7 +31,6 @@ OBJS-$(CONFIG_AVI_MUXER) += avienc.o riff.o
OBJS-$(CONFIG_AVISYNTH) += avisynth.o
OBJS-$(CONFIG_AVS_DEMUXER) += avs.o vocdec.o voc.o
OBJS-$(CONFIG_BETHSOFTVID_DEMUXER) += bethsoftvid.o
-OBJS-$(CONFIG_BKTR_DEMUXER) += bktr.o
OBJS-$(CONFIG_C93_DEMUXER) += c93.o vocdec.o voc.o
OBJS-$(CONFIG_CRC_MUXER) += crcenc.o
OBJS-$(CONFIG_DAUD_DEMUXER) += daud.o
@@ -39,7 +38,6 @@ OBJS-$(CONFIG_DSICIN_DEMUXER) += dsicin.o
OBJS-$(CONFIG_DTS_DEMUXER) += raw.o
OBJS-$(CONFIG_DV_DEMUXER) += dv.o
OBJS-$(CONFIG_DV_MUXER) += dvenc.o
-OBJS-$(CONFIG_DV1394_DEMUXER) += dv1394.o dv.o
OBJS-$(CONFIG_DXA_DEMUXER) += dxa.o riff.o
OBJS-$(CONFIG_EA_CDATA_DEMUXER) += eacdata.o
OBJS-$(CONFIG_EA_DEMUXER) += electronicarts.o
@@ -114,8 +112,6 @@ OBJS-$(CONFIG_OGG_DEMUXER) += oggdec.o \
oggparseogm.o \
riff.o
OBJS-$(CONFIG_OGG_MUXER) += oggenc.o
-OBJS-$(CONFIG_OSS_DEMUXER) += audio.o
-OBJS-$(CONFIG_OSS_MUXER) += audio.o
OBJS-$(CONFIG_PSP_MUXER) += movenc.o riff.o isom.o
OBJS-$(CONFIG_RAWVIDEO_DEMUXER) += raw.o
OBJS-$(CONFIG_RAWVIDEO_MUXER) += raw.o
@@ -141,8 +137,6 @@ OBJS-$(CONFIG_THP_DEMUXER) += thp.o
OBJS-$(CONFIG_TIERTEXSEQ_DEMUXER) += tiertexseq.o
OBJS-$(CONFIG_TTA_DEMUXER) += tta.o
OBJS-$(CONFIG_TXD_DEMUXER) += txd.o
-OBJS-$(CONFIG_V4L2_DEMUXER) += v4l2.o
-OBJS-$(CONFIG_V4L_DEMUXER) += v4l.o
OBJS-$(CONFIG_VC1_DEMUXER) += raw.o
OBJS-$(CONFIG_VMD_DEMUXER) += sierravmd.o
OBJS-$(CONFIG_VOC_DEMUXER) += vocdec.o voc.o
@@ -153,12 +147,10 @@ OBJS-$(CONFIG_WC3_DEMUXER) += wc3movie.o
OBJS-$(CONFIG_WSAUD_DEMUXER) += westwood.o
OBJS-$(CONFIG_WSVQA_DEMUXER) += westwood.o
OBJS-$(CONFIG_WV_DEMUXER) += wv.o
-OBJS-$(CONFIG_X11_GRAB_DEVICE_DEMUXER) += x11grab.o
OBJS-$(CONFIG_YUV4MPEGPIPE_MUXER) += yuv4mpeg.o
OBJS-$(CONFIG_YUV4MPEGPIPE_DEMUXER) += yuv4mpeg.o
# external libraries
-OBJS-$(CONFIG_LIBDC1394_DEMUXER) += libdc1394.o
OBJS-$(CONFIG_LIBNUT_DEMUXER) += libnut.o riff.o
OBJS-$(CONFIG_LIBNUT_MUXER) += libnut.o riff.o
@@ -167,9 +159,6 @@ OBJS-$(CONFIG_VHOOK) += framehook.o
EXTRALIBS := -L$(BUILD_ROOT)/libavutil -lavutil$(BUILDSUF) \
-lavcodec$(BUILDSUF) -L$(BUILD_ROOT)/libavcodec $(EXTRALIBS)
-CPPOBJS-$(CONFIG_AUDIO_BEOS_DEMUXER) += beosaudio.o
-CPPOBJS-$(CONFIG_AUDIO_BEOS_MUXER) += beosaudio.o
-
# protocols I/O
OBJS+= avio.o aviobuf.o
diff --git a/libavformat/allformats.c b/libavformat/allformats.c
index 52529974d1..b663b315e4 100644
--- a/libavformat/allformats.c
+++ b/libavformat/allformats.c
@@ -60,19 +60,16 @@ void av_register_all(void)
REGISTER_MUXDEMUX (ASF, asf);
REGISTER_MUXER (ASF_STREAM, asf_stream);
REGISTER_MUXDEMUX (AU, au);
- REGISTER_MUXDEMUX (AUDIO_BEOS, audio_beos);
REGISTER_MUXDEMUX (AVI, avi);
REGISTER_DEMUXER (AVISYNTH, avisynth);
REGISTER_DEMUXER (AVS, avs);
REGISTER_DEMUXER (BETHSOFTVID, bethsoftvid);
- REGISTER_DEMUXER (BKTR, bktr);
REGISTER_DEMUXER (C93, c93);
REGISTER_MUXER (CRC, crc);
REGISTER_DEMUXER (DAUD, daud);
REGISTER_DEMUXER (DSICIN, dsicin);
REGISTER_DEMUXER (DTS, dts);
REGISTER_MUXDEMUX (DV, dv);
- REGISTER_DEMUXER (DV1394, dv1394);
REGISTER_DEMUXER (DXA, dxa);
REGISTER_DEMUXER (EA, ea);
REGISTER_DEMUXER (EA_CDATA, ea_cdata);
@@ -123,7 +120,6 @@ void av_register_all(void)
REGISTER_MUXDEMUX (NUT, nut);
REGISTER_DEMUXER (NUV, nuv);
REGISTER_MUXDEMUX (OGG, ogg);
- REGISTER_MUXDEMUX (OSS, oss);
REGISTER_MUXDEMUX (PCM_ALAW, pcm_alaw);
REGISTER_MUXDEMUX (PCM_MULAW, pcm_mulaw);
REGISTER_MUXDEMUX (PCM_S16BE, pcm_s16be);
@@ -156,8 +152,6 @@ void av_register_all(void)
REGISTER_DEMUXER (TIERTEXSEQ, tiertexseq);
REGISTER_DEMUXER (TTA, tta);
REGISTER_DEMUXER (TXD, txd);
- REGISTER_DEMUXER (V4L2, v4l2);
- REGISTER_DEMUXER (V4L, v4l);
REGISTER_DEMUXER (VC1, vc1);
REGISTER_DEMUXER (VMD, vmd);
REGISTER_MUXDEMUX (VOC, voc);
@@ -166,11 +160,9 @@ void av_register_all(void)
REGISTER_DEMUXER (WSAUD, wsaud);
REGISTER_DEMUXER (WSVQA, wsvqa);
REGISTER_DEMUXER (WV, wv);
- REGISTER_DEMUXER (X11_GRAB_DEVICE, x11_grab_device);
REGISTER_MUXDEMUX (YUV4MPEGPIPE, yuv4mpegpipe);
/* external libraries */
- REGISTER_DEMUXER (LIBDC1394, libdc1394);
REGISTER_MUXDEMUX (LIBNUT, libnut);
/* protocols */
diff --git a/libavformat/audio.c b/libavformat/audio.c
deleted file mode 100644
index 151cbffd51..0000000000
--- a/libavformat/audio.c
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * Linux audio play and grab interface
- * Copyright (c) 2000, 2001 Fabrice Bellard.
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "avformat.h"
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#ifdef HAVE_SOUNDCARD_H
-#include <soundcard.h>
-#else
-#include <sys/soundcard.h>
-#endif
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/time.h>
-
-#define AUDIO_BLOCK_SIZE 4096
-
-typedef struct {
- int fd;
- int sample_rate;
- int channels;
- int frame_size; /* in bytes ! */
- int codec_id;
- int flip_left : 1;
- uint8_t buffer[AUDIO_BLOCK_SIZE];
- int buffer_ptr;
-} AudioData;
-
-static int audio_open(AudioData *s, int is_output, const char *audio_device)
-{
- int audio_fd;
- int tmp, err;
- char *flip = getenv("AUDIO_FLIP_LEFT");
-
- if (is_output)
- audio_fd = open(audio_device, O_WRONLY);
- else
- audio_fd = open(audio_device, O_RDONLY);
- if (audio_fd < 0) {
- av_log(NULL, AV_LOG_ERROR, "%s: %s\n", audio_device, strerror(errno));
- return AVERROR(EIO);
- }
-
- if (flip && *flip == '1') {
- s->flip_left = 1;
- }
-
- /* non blocking mode */
- if (!is_output)
- fcntl(audio_fd, F_SETFL, O_NONBLOCK);
-
- s->frame_size = AUDIO_BLOCK_SIZE;
-#if 0
- tmp = (NB_FRAGMENTS << 16) | FRAGMENT_BITS;
- err = ioctl(audio_fd, SNDCTL_DSP_SETFRAGMENT, &tmp);
- if (err < 0) {
- perror("SNDCTL_DSP_SETFRAGMENT");
- }
-#endif
-
- /* select format : favour native format */
- err = ioctl(audio_fd, SNDCTL_DSP_GETFMTS, &tmp);
-
-#ifdef WORDS_BIGENDIAN
- if (tmp & AFMT_S16_BE) {
- tmp = AFMT_S16_BE;
- } else if (tmp & AFMT_S16_LE) {
- tmp = AFMT_S16_LE;
- } else {
- tmp = 0;
- }
-#else
- if (tmp & AFMT_S16_LE) {
- tmp = AFMT_S16_LE;
- } else if (tmp & AFMT_S16_BE) {
- tmp = AFMT_S16_BE;
- } else {
- tmp = 0;
- }
-#endif
-
- switch(tmp) {
- case AFMT_S16_LE:
- s->codec_id = CODEC_ID_PCM_S16LE;
- break;
- case AFMT_S16_BE:
- s->codec_id = CODEC_ID_PCM_S16BE;
- break;
- default:
- av_log(NULL, AV_LOG_ERROR, "Soundcard does not support 16 bit sample format\n");
- close(audio_fd);
- return AVERROR(EIO);
- }
- err=ioctl(audio_fd, SNDCTL_DSP_SETFMT, &tmp);
- if (err < 0) {
- av_log(NULL, AV_LOG_ERROR, "SNDCTL_DSP_SETFMT: %s\n", strerror(errno));
- goto fail;
- }
-
- tmp = (s->channels == 2);
- err = ioctl(audio_fd, SNDCTL_DSP_STEREO, &tmp);
- if (err < 0) {
- av_log(NULL, AV_LOG_ERROR, "SNDCTL_DSP_STEREO: %s\n", strerror(errno));
- goto fail;
- }
- if (tmp)
- s->channels = 2;
-
- tmp = s->sample_rate;
- err = ioctl(audio_fd, SNDCTL_DSP_SPEED, &tmp);
- if (err < 0) {
- av_log(NULL, AV_LOG_ERROR, "SNDCTL_DSP_SPEED: %s\n", strerror(errno));
- goto fail;
- }
- s->sample_rate = tmp; /* store real sample rate */
- s->fd = audio_fd;
-
- return 0;
- fail:
- close(audio_fd);
- return AVERROR(EIO);
-}
-
-static int audio_close(AudioData *s)
-{
- close(s->fd);
- return 0;
-}
-
-/* sound output support */
-static int audio_write_header(AVFormatContext *s1)
-{
- AudioData *s = s1->priv_data;
- AVStream *st;
- int ret;
-
- st = s1->streams[0];
- s->sample_rate = st->codec->sample_rate;
- s->channels = st->codec->channels;
- ret = audio_open(s, 1, s1->filename);
- if (ret < 0) {
- return AVERROR(EIO);
- } else {
- return 0;
- }
-}
-
-static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
-{
- AudioData *s = s1->priv_data;
- int len, ret;
- int size= pkt->size;
- uint8_t *buf= pkt->data;
-
- while (size > 0) {
- len = AUDIO_BLOCK_SIZE - s->buffer_ptr;
- if (len > size)
- len = size;
- memcpy(s->buffer + s->buffer_ptr, buf, len);
- s->buffer_ptr += len;
- if (s->buffer_ptr >= AUDIO_BLOCK_SIZE) {
- for(;;) {
- ret = write(s->fd, s->buffer, AUDIO_BLOCK_SIZE);
- if (ret > 0)
- break;
- if (ret < 0 && (errno != EAGAIN && errno != EINTR))
- return AVERROR(EIO);
- }
- s->buffer_ptr = 0;
- }
- buf += len;
- size -= len;
- }
- return 0;
-}
-
-static int audio_write_trailer(AVFormatContext *s1)
-{
- AudioData *s = s1->priv_data;
-
- audio_close(s);
- return 0;
-}
-
-/* grab support */
-
-static int audio_read_header(AVFormatContext *s1, AVFormatParameters *ap)
-{
- AudioData *s = s1->priv_data;
- AVStream *st;
- int ret;
-
- if (ap->sample_rate <= 0 || ap->channels <= 0)
- return -1;
-
- st = av_new_stream(s1, 0);
- if (!st) {
- return AVERROR(ENOMEM);
- }
- s->sample_rate = ap->sample_rate;
- s->channels = ap->channels;
-
- ret = audio_open(s, 0, s1->filename);
- if (ret < 0) {
- av_free(st);
- return AVERROR(EIO);
- }
-
- /* take real parameters */
- st->codec->codec_type = CODEC_TYPE_AUDIO;
- st->codec->codec_id = s->codec_id;
- st->codec->sample_rate = s->sample_rate;
- st->codec->channels = s->channels;
-
- av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
- return 0;
-}
-
-static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
-{
- AudioData *s = s1->priv_data;
- int ret, bdelay;
- int64_t cur_time;
- struct audio_buf_info abufi;
-
- if (av_new_packet(pkt, s->frame_size) < 0)
- return AVERROR(EIO);
- for(;;) {
- struct timeval tv;
- fd_set fds;
-
- tv.tv_sec = 0;
- tv.tv_usec = 30 * 1000; /* 30 msecs -- a bit shorter than 1 frame at 30fps */
-
- FD_ZERO(&fds);
- FD_SET(s->fd, &fds);
-
- /* This will block until data is available or we get a timeout */
- (void) select(s->fd + 1, &fds, 0, 0, &tv);
-
- ret = read(s->fd, pkt->data, pkt->size);
- if (ret > 0)
- break;
- if (ret == -1 && (errno == EAGAIN || errno == EINTR)) {
- av_free_packet(pkt);
- pkt->size = 0;
- pkt->pts = av_gettime();
- return 0;
- }
- if (!(ret == 0 || (ret == -1 && (errno == EAGAIN || errno == EINTR)))) {
- av_free_packet(pkt);
- return AVERROR(EIO);
- }
- }
- pkt->size = ret;
-
- /* compute pts of the start of the packet */
- cur_time = av_gettime();
- bdelay = ret;
- if (ioctl(s->fd, SNDCTL_DSP_GETISPACE, &abufi) == 0) {
- bdelay += abufi.bytes;
- }
- /* substract time represented by the number of bytes in the audio fifo */
- cur_time -= (bdelay * 1000000LL) / (s->sample_rate * s->channels);
-
- /* convert to wanted units */
- pkt->pts = cur_time;
-
- if (s->flip_left && s->channels == 2) {
- int i;
- short *p = (short *) pkt->data;
-
- for (i = 0; i < ret; i += 4) {
- *p = ~*p;
- p += 2;
- }
- }
- return 0;
-}
-
-static int audio_read_close(AVFormatContext *s1)
-{
- AudioData *s = s1->priv_data;
-
- audio_close(s);
- return 0;
-}
-
-#ifdef CONFIG_OSS_DEMUXER
-AVInputFormat oss_demuxer = {
- "oss",
- "audio grab and output",
- sizeof(AudioData),
- NULL,
- audio_read_header,
- audio_read_packet,
- audio_read_close,
- .flags = AVFMT_NOFILE,
-};
-#endif
-
-#ifdef CONFIG_OSS_MUXER
-AVOutputFormat oss_muxer = {
- "oss",
- "audio grab and output",
- "",
- "",
- sizeof(AudioData),
- /* XXX: we make the assumption that the soundcard accepts this format */
- /* XXX: find better solution with "preinit" method, needed also in
- other formats */
-#ifdef WORDS_BIGENDIAN
- CODEC_ID_PCM_S16BE,
-#else
- CODEC_ID_PCM_S16LE,
-#endif
- CODEC_ID_NONE,
- audio_write_header,
- audio_write_packet,
- audio_write_trailer,
- .flags = AVFMT_NOFILE,
-};
-#endif
diff --git a/libavformat/beosaudio.cpp b/libavformat/beosaudio.cpp
deleted file mode 100644
index d942d7e45d..0000000000
--- a/libavformat/beosaudio.cpp
+++ /dev/null
@@ -1,465 +0,0 @@
-/*
- * BeOS audio play interface
- * Copyright (c) 2000, 2001 Fabrice Bellard.
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <signal.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <unistd.h>
-#include <sys/time.h>
-
-#include <Application.h>
-#include <SoundPlayer.h>
-
-extern "C" {
-#include "avformat.h"
-}
-
-#ifdef HAVE_BSOUNDRECORDER
-#include <SoundRecorder.h>
-using namespace BPrivate::Media::Experimental;
-#endif
-
-/* enable performance checks */
-//#define PERF_CHECK
-
-/* enable Media Kit latency checks */
-//#define LATENCY_CHECK
-
-#define AUDIO_BLOCK_SIZE 4096
-#define AUDIO_BLOCK_COUNT 8
-
-#define AUDIO_BUFFER_SIZE (AUDIO_BLOCK_SIZE*AUDIO_BLOCK_COUNT)
-
-typedef struct {
- int fd; // UNUSED
- int sample_rate;
- int channels;
- int frame_size; /* in bytes ! */
- CodecID codec_id;
- uint8_t buffer[AUDIO_BUFFER_SIZE];
- int buffer_ptr;
- /* ring buffer */
- sem_id input_sem;
- int input_index;
- sem_id output_sem;
- int output_index;
- BSoundPlayer *player;
-#ifdef HAVE_BSOUNDRECORDER
- BSoundRecorder *recorder;
-#endif
- int has_quit; /* signal callbacks not to wait */
- volatile bigtime_t starve_time;
-} AudioData;
-
-static thread_id main_thid;
-static thread_id bapp_thid;
-static int own_BApp_created = 0;
-static int refcount = 0;
-
-/* create the BApplication and Run() it */
-static int32 bapp_thread(void *arg)
-{
- new BApplication("application/x-vnd.ffmpeg");
- own_BApp_created = 1;
- be_app->Run();
- /* kill the process group */
-// kill(0, SIGINT);
-// kill(main_thid, SIGHUP);
- return B_OK;
-}
-
-/* create the BApplication only if needed */
-static void create_bapp_if_needed(void)
-{
- if (refcount++ == 0) {
- /* needed by libmedia */
- if (be_app == NULL) {
- bapp_thid = spawn_thread(bapp_thread, "ffmpeg BApplication", B_NORMAL_PRIORITY, NULL);
- resume_thread(bapp_thid);
- while (!own_BApp_created)
- snooze(50000);
- }
- }
-}
-
-static void destroy_bapp_if_needed(void)
-{
- if (--refcount == 0 && own_BApp_created) {
- be_app->Lock();
- be_app->Quit();
- be_app = NULL;
- }
-}
-
-/* called back by BSoundPlayer */
-static void audioplay_callback(void *cookie, void *buffer, size_t bufferSize, const media_raw_audio_format &format)
-{
- AudioData *s;
- size_t len, amount;
- unsigned char *buf = (unsigned char *)buffer;
-
- s = (AudioData *)cookie;
- if (s->has_quit)
- return;
- while (bufferSize > 0) {
-#ifdef PERF_CHECK
- bigtime_t t;
- t = system_time();
-#endif
- len = MIN(AUDIO_BLOCK_SIZE, bufferSize);
- if (acquire_sem_etc(s->output_sem, len, B_CAN_INTERRUPT, 0LL) < B_OK) {
- s->has_quit = 1;
- s->player->SetHasData(false);
- return;
- }
- amount = MIN(len, (AUDIO_BUFFER_SIZE - s->output_index));
- memcpy(buf, &s->buffer[s->output_index], amount);
- s->output_index += amount;
- if (s->output_index >= AUDIO_BUFFER_SIZE) {
- s->output_index %= AUDIO_BUFFER_SIZE;
- memcpy(buf + amount, &s->buffer[s->output_index], len - amount);
- s->output_index += len-amount;
- s->output_index %= AUDIO_BUFFER_SIZE;
- }
- release_sem_etc(s->input_sem, len, 0);
-#ifdef PERF_CHECK
- t = system_time() - t;
- s->starve_time = MAX(s->starve_time, t);
-#endif
- buf += len;
- bufferSize -= len;
- }
-}
-
-#ifdef HAVE_BSOUNDRECORDER
-/* called back by BSoundRecorder */
-static void audiorecord_callback(void *cookie, bigtime_t timestamp, void *buffer, size_t bufferSize, const media_multi_audio_format &format)
-{
- AudioData *s;
- size_t len, amount;
- unsigned char *buf = (unsigned char *)buffer;
-
- s = (AudioData *)cookie;
- if (s->has_quit)
- return;
-
- while (bufferSize > 0) {
- len = MIN(bufferSize, AUDIO_BLOCK_SIZE);
- //printf("acquire_sem(input, %d)\n", len);
- if (acquire_sem_etc(s->input_sem, len, B_CAN_INTERRUPT, 0LL) < B_OK) {
- s->has_quit = 1;
- return;
- }
- amount = MIN(len, (AUDIO_BUFFER_SIZE - s->input_index));
- memcpy(&s->buffer[s->input_index], buf, amount);
- s->input_index += amount;
- if (s->input_index >= AUDIO_BUFFER_SIZE) {
- s->input_index %= AUDIO_BUFFER_SIZE;
- memcpy(&s->buffer[s->input_index], buf + amount, len - amount);
- s->input_index += len - amount;
- }
- release_sem_etc(s->output_sem, len, 0);
- //printf("release_sem(output, %d)\n", len);
- buf += len;
- bufferSize -= len;
- }
-}
-#endif
-
-static int audio_open(AudioData *s, int is_output, const char *audio_device)
-{
- int p[2];
- int ret;
- media_raw_audio_format format;
- media_multi_audio_format iformat;
-
-#ifndef HAVE_BSOUNDRECORDER
- if (!is_output)
- return AVERROR(EIO); /* not for now */
-#endif
- s->input_sem = create_sem(AUDIO_BUFFER_SIZE, "ffmpeg_ringbuffer_input");
- if (s->input_sem < B_OK)
- return AVERROR(EIO);
- s->output_sem = create_sem(0, "ffmpeg_ringbuffer_output");
- if (s->output_sem < B_OK) {
- delete_sem(s->input_sem);
- return AVERROR(EIO);
- }
- s->input_index = 0;
- s->output_index = 0;
- create_bapp_if_needed();
- s->frame_size = AUDIO_BLOCK_SIZE;
- /* bump up the priority (avoid realtime though) */
- set_thread_priority(find_thread(NULL), B_DISPLAY_PRIORITY+1);
-#ifdef HAVE_BSOUNDRECORDER
- if (!is_output) {
- bool wait_for_input = false;
- if (audio_device && !strcmp(audio_device, "wait:"))
- wait_for_input = true;
- s->recorder = new BSoundRecorder(&iformat, wait_for_input, "ffmpeg input", audiorecord_callback);
- if (wait_for_input && (s->recorder->InitCheck() == B_OK)) {
- s->recorder->WaitForIncomingConnection(&iformat);
- }
- if (s->recorder->InitCheck() != B_OK || iformat.format != media_raw_audio_format::B_AUDIO_SHORT) {
- delete s->recorder;
- s->recorder = NULL;
- if (s->input_sem)
- delete_sem(s->input_sem);
- if (s->output_sem)
- delete_sem(s->output_sem);
- return AVERROR(EIO);
- }
- s->codec_id = (iformat.byte_order == B_MEDIA_LITTLE_ENDIAN)?CODEC_ID_PCM_S16LE:CODEC_ID_PCM_S16BE;
- s->channels = iformat.channel_count;
- s->sample_rate = (int)iformat.frame_rate;
- s->frame_size = iformat.buffer_size;
- s->recorder->SetCookie(s);
- s->recorder->SetVolume(1.0);
- s->recorder->Start();
- return 0;
- }
-#endif
- format = media_raw_audio_format::wildcard;
- format.format = media_raw_audio_format::B_AUDIO_SHORT;
- format.byte_order = B_HOST_IS_LENDIAN ? B_MEDIA_LITTLE_ENDIAN : B_MEDIA_BIG_ENDIAN;
- format.channel_count = s->channels;
- format.buffer_size = s->frame_size;
- format.frame_rate = s->sample_rate;
- s->player = new BSoundPlayer(&format, "ffmpeg output", audioplay_callback);
- if (s->player->InitCheck() != B_OK) {
- delete s->player;
- s->player = NULL;
- if (s->input_sem)
- delete_sem(s->input_sem);
- if (s->output_sem)
- delete_sem(s->output_sem);
- return AVERROR(EIO);
- }
- s->player->SetCookie(s);
- s->player->SetVolume(1.0);
- s->player->Start();
- s->player->SetHasData(true);
- return 0;
-}
-
-static int audio_close(AudioData *s)
-{
- if (s->input_sem)
- delete_sem(s->input_sem);
- if (s->output_sem)
- delete_sem(s->output_sem);
- s->has_quit = 1;
- if (s->player) {
- s->player->Stop();
- }
- if (s->player)
- delete s->player;
-#ifdef HAVE_BSOUNDRECORDER
- if (s->recorder)
- delete s->recorder;
-#endif
- destroy_bapp_if_needed();
- return 0;
-}
-
-/* sound output support */
-static int audio_write_header(AVFormatContext *s1)
-{
- AudioData *s = (AudioData *)s1->priv_data;
- AVStream *st;
- int ret;
-
- st = s1->streams[0];
- s->sample_rate = st->codec->sample_rate;
- s->channels = st->codec->channels;
- ret = audio_open(s, 1, NULL);
- if (ret < 0)
- return AVERROR(EIO);
- return 0;
-}
-
-static int audio_write_packet(AVFormatContext *s1, int stream_index,
- const uint8_t *buf, int size, int64_t force_pts)
-{
- AudioData *s = (AudioData *)s1->priv_data;
- int len, ret;
-#ifdef LATENCY_CHECK
-bigtime_t lat1, lat2;
-lat1 = s->player->Latency();
-#endif
-#ifdef PERF_CHECK
- bigtime_t t = s->starve_time;
- s->starve_time = 0;
- printf("starve_time: %lld \n", t);
-#endif
- while (size > 0) {
- int amount;
- len = MIN(size, AUDIO_BLOCK_SIZE);
- if (acquire_sem_etc(s->input_sem, len, B_CAN_INTERRUPT, 0LL) < B_OK)
- return AVERROR(EIO);
- amount = MIN(len, (AUDIO_BUFFER_SIZE - s->input_index));
- memcpy(&s->buffer[s->input_index], buf, amount);
- s->input_index += amount;
- if (s->input_index >= AUDIO_BUFFER_SIZE) {
- s->input_index %= AUDIO_BUFFER_SIZE;
- memcpy(&s->buffer[s->input_index], buf + amount, len - amount);
- s->input_index += len - amount;
- }
- release_sem_etc(s->output_sem, len, 0);
- buf += len;
- size -= len;
- }
-#ifdef LATENCY_CHECK
-lat2 = s->player->Latency();
-printf("#### BSoundPlayer::Latency(): before= %lld, after= %lld\n", lat1, lat2);
-#endif
- return 0;
-}
-
-static int audio_write_trailer(AVFormatContext *s1)
-{
- AudioData *s = (AudioData *)s1->priv_data;
-
- audio_close(s);
- return 0;
-}
-
-/* grab support */
-
-static int audio_read_header(AVFormatContext *s1, AVFormatParameters *ap)
-{
- AudioData *s = (AudioData *)s1->priv_data;
- AVStream *st;
- int ret;
-
- if (!ap || ap->sample_rate <= 0 || ap->channels <= 0)
- return -1;
-
- st = av_new_stream(s1, 0);
- if (!st) {
- return AVERROR(ENOMEM);
- }
- s->sample_rate = ap->sample_rate;
- s->channels = ap->channels;
-
- ret = audio_open(s, 0, s1->filename);
- if (ret < 0) {
- av_free(st);
- return AVERROR(EIO);
- }
- /* take real parameters */
- st->codec->codec_type = CODEC_TYPE_AUDIO;
- st->codec->codec_id = s->codec_id;
- st->codec->sample_rate = s->sample_rate;
- st->codec->channels = s->channels;
- return 0;
- av_set_pts_info(s1, 48, 1, 1000000); /* 48 bits pts in us */
-}
-
-static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
-{
- AudioData *s = (AudioData *)s1->priv_data;
- int size;
- size_t len, amount;
- unsigned char *buf;
- status_t err;
-
- if (av_new_packet(pkt, s->frame_size) < 0)
- return AVERROR(EIO);
- buf = (unsigned char *)pkt->data;
- size = pkt->size;
- while (size > 0) {
- len = MIN(AUDIO_BLOCK_SIZE, size);
- //printf("acquire_sem(output, %d)\n", len);
- while ((err=acquire_sem_etc(s->output_sem, len, B_CAN_INTERRUPT, 0LL)) == B_INTERRUPTED);
- if (err < B_OK) {
- av_free_packet(pkt);
- return AVERROR(EIO);
- }
- amount = MIN(len, (AUDIO_BUFFER_SIZE - s->output_index));
- memcpy(buf, &s->buffer[s->output_index], amount);
- s->output_index += amount;
- if (s->output_index >= AUDIO_BUFFER_SIZE) {
- s->output_index %= AUDIO_BUFFER_SIZE;
- memcpy(buf + amount, &s->buffer[s->output_index], len - amount);
- s->output_index += len-amount;
- s->output_index %= AUDIO_BUFFER_SIZE;
- }
- release_sem_etc(s->input_sem, len, 0);
- //printf("release_sem(input, %d)\n", len);
- buf += len;
- size -= len;
- }
- //XXX: add pts info
- return 0;
-}
-
-static int audio_read_close(AVFormatContext *s1)
-{
- AudioData *s = (AudioData *)s1->priv_data;
-
- audio_close(s);
- return 0;
-}
-
-static AVInputFormat audio_beos_demuxer = {
- "audio_beos",
- "audio grab and output",
- sizeof(AudioData),
- NULL,
- audio_read_header,
- audio_read_packet,
- audio_read_close,
- NULL,
- AVFMT_NOFILE,
-};
-
-AVOutputFormat audio_beos_muxer = {
- "audio_beos",
- "audio grab and output",
- "",
- "",
- sizeof(AudioData),
-#ifdef WORDS_BIGENDIAN
- CODEC_ID_PCM_S16BE,
-#else
- CODEC_ID_PCM_S16LE,
-#endif
- CODEC_ID_NONE,
- audio_write_header,
- audio_write_packet,
- audio_write_trailer,
- AVFMT_NOFILE,
-};
-
-extern "C" {
-
-int audio_init(void)
-{
- main_thid = find_thread(NULL);
- av_register_input_format(&audio_beos_demuxer);
- av_register_output_format(&audio_beos_muxer);
- return 0;
-}
-
-} // "C"
-
diff --git a/libavformat/bktr.c b/libavformat/bktr.c
deleted file mode 100644
index 0ea8dfb02c..0000000000
--- a/libavformat/bktr.c
+++ /dev/null
@@ -1,320 +0,0 @@
-/*
- * *BSD video grab interface
- * Copyright (c) 2002 Steve O'Hara-Smith
- * based on
- * Linux video grab interface
- * Copyright (c) 2000,2001 Gerard Lantau.
- * and
- * simple_grab.c Copyright (c) 1999 Roger Hardiman
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "avformat.h"
-#if defined (HAVE_DEV_BKTR_IOCTL_METEOR_H) && defined (HAVE_DEV_BKTR_IOCTL_BT848_H)
-# include <dev/bktr/ioctl_meteor.h>
-# include <dev/bktr/ioctl_bt848.h>
-#elif defined (HAVE_MACHINE_IOCTL_METEOR_H) && defined (HAVE_MACHINE_IOCTL_BT848_H)
-# include <machine/ioctl_meteor.h>
-# include <machine/ioctl_bt848.h>
-#elif defined (HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H) && defined (HAVE_DEV_VIDEO_METEOR_IOCTL_BT848_H)
-# include <dev/video/meteor/ioctl_meteor.h>
-# include <dev/video/bktr/ioctl_bt848.h>
-#elif HAVE_DEV_IC_BT8XX_H
-# include <dev/ic/bt8xx.h>
-#endif
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/time.h>
-#include <signal.h>
-
-typedef struct {
- int video_fd;
- int tuner_fd;
- int width, height;
- int frame_rate;
- int frame_rate_base;
- u_int64_t per_frame;
-} VideoData;
-
-
-#define PAL 1
-#define PALBDGHI 1
-#define NTSC 2
-#define NTSCM 2
-#define SECAM 3
-#define PALN 4
-#define PALM 5
-#define NTSCJ 6
-
-/* PAL is 768 x 576. NTSC is 640 x 480 */
-#define PAL_HEIGHT 576
-#define SECAM_HEIGHT 576
-#define NTSC_HEIGHT 480
-
-#ifndef VIDEO_FORMAT
-#define VIDEO_FORMAT NTSC
-#endif
-
-static int bktr_dev[] = { METEOR_DEV0, METEOR_DEV1, METEOR_DEV2,
- METEOR_DEV3, METEOR_DEV_SVIDEO };
-
-uint8_t *video_buf;
-size_t video_buf_size;
-u_int64_t last_frame_time;
-volatile sig_atomic_t nsignals;
-
-
-static void catchsignal(int signal)
-{
- nsignals++;
- return;
-}
-
-static int bktr_init(const char *video_device, int width, int height,
- int format, int *video_fd, int *tuner_fd, int idev, double frequency)
-{
- struct meteor_geomet geo;
- int h_max;
- long ioctl_frequency;
- char *arg;
- int c;
- struct sigaction act, old;
-
- if (idev < 0 || idev > 4)
- {
- arg = getenv ("BKTR_DEV");
- if (arg)
- idev = atoi (arg);
- if (idev < 0 || idev > 4)
- idev = 1;
- }
-
- if (format < 1 || format > 6)
- {
- arg = getenv ("BKTR_FORMAT");
- if (arg)
- format = atoi (arg);
- if (format < 1 || format > 6)
- format = VIDEO_FORMAT;
- }
-
- if (frequency <= 0)
- {
- arg = getenv ("BKTR_FREQUENCY");
- if (arg)
- frequency = atof (arg);
- if (frequency <= 0)
- frequency = 0.0;
- }
-
- memset(&act, 0, sizeof(act));
- sigemptyset(&act.sa_mask);
- act.sa_handler = catchsignal;
- sigaction(SIGUSR1, &act, &old);
-
- *tuner_fd = open("/dev/tuner0", O_RDONLY);
- if (*tuner_fd < 0)
- av_log(NULL, AV_LOG_ERROR, "Warning. Tuner not opened, continuing: %s\n", strerror(errno));
-
- *video_fd = open(video_device, O_RDONLY);
- if (*video_fd < 0) {
- av_log(NULL, AV_LOG_ERROR, "%s: %s\n", video_device, strerror(errno));
- return -1;
- }
-
- geo.rows = height;
- geo.columns = width;
- geo.frames = 1;
- geo.oformat = METEOR_GEO_YUV_422 | METEOR_GEO_YUV_12;
-
- switch (format) {
- case PAL: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALBDGHI; break;
- case PALN: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALN; break;
- case PALM: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALM; break;
- case SECAM: h_max = SECAM_HEIGHT; c = BT848_IFORM_F_SECAM; break;
- case NTSC: h_max = NTSC_HEIGHT; c = BT848_IFORM_F_NTSCM; break;
- case NTSCJ: h_max = NTSC_HEIGHT; c = BT848_IFORM_F_NTSCJ; break;
- default: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALBDGHI; break;
- }
-
- if (height <= h_max / 2)
- geo.oformat |= METEOR_GEO_EVEN_ONLY;
-
- if (ioctl(*video_fd, METEORSETGEO, &geo) < 0) {
- av_log(NULL, AV_LOG_ERROR, "METEORSETGEO: %s\n", strerror(errno));
- return -1;
- }
-
- if (ioctl(*video_fd, BT848SFMT, &c) < 0) {
- av_log(NULL, AV_LOG_ERROR, "BT848SFMT: %s\n", strerror(errno));
- return -1;
- }
-
- c = bktr_dev[idev];
- if (ioctl(*video_fd, METEORSINPUT, &c) < 0) {
- av_log(NULL, AV_LOG_ERROR, "METEORSINPUT: %s\n", strerror(errno));
- return -1;
- }
-
- video_buf_size = width * height * 12 / 8;
-
- video_buf = (uint8_t *)mmap((caddr_t)0, video_buf_size,
- PROT_READ, MAP_SHARED, *video_fd, (off_t)0);
- if (video_buf == MAP_FAILED) {
- av_log(NULL, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
- return -1;
- }
-
- if (frequency != 0.0) {
- ioctl_frequency = (unsigned long)(frequency*16);
- if (ioctl(*tuner_fd, TVTUNER_SETFREQ, &ioctl_frequency) < 0)
- av_log(NULL, AV_LOG_ERROR, "TVTUNER_SETFREQ: %s\n", strerror(errno));
- }
-
- c = AUDIO_UNMUTE;
- if (ioctl(*tuner_fd, BT848_SAUDIO, &c) < 0)
- av_log(NULL, AV_LOG_ERROR, "TVTUNER_SAUDIO: %s\n", strerror(errno));
-
- c = METEOR_CAP_CONTINOUS;
- ioctl(*video_fd, METEORCAPTUR, &c);
-
- c = SIGUSR1;
- ioctl(*video_fd, METEORSSIGNAL, &c);
-
- return 0;
-}
-
-static void bktr_getframe(u_int64_t per_frame)
-{
- u_int64_t curtime;
-
- curtime = av_gettime();
- if (!last_frame_time
- || ((last_frame_time + per_frame) > curtime)) {
- if (!usleep(last_frame_time + per_frame + per_frame / 8 - curtime)) {
- if (!nsignals)
- av_log(NULL, AV_LOG_INFO,
- "SLEPT NO signals - %d microseconds late\n",
- (int)(av_gettime() - last_frame_time - per_frame));
- }
- }
- nsignals = 0;
- last_frame_time = curtime;
-}
-
-
-/* note: we support only one picture read at a time */
-static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
-{
- VideoData *s = s1->priv_data;
-
- if (av_new_packet(pkt, video_buf_size) < 0)
- return AVERROR(EIO);
-
- bktr_getframe(s->per_frame);
-
- pkt->pts = av_gettime();
- memcpy(pkt->data, video_buf, video_buf_size);
-
- return video_buf_size;
-}
-
-static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
-{
- VideoData *s = s1->priv_data;
- AVStream *st;
- int width, height;
- int frame_rate;
- int frame_rate_base;
- int format = -1;
-
- if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0)
- return -1;
-
- width = ap->width;
- height = ap->height;
- frame_rate = ap->time_base.den;
- frame_rate_base = ap->time_base.num;
-
- st = av_new_stream(s1, 0);
- if (!st)
- return AVERROR(ENOMEM);
- av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in use */
-
- s->width = width;
- s->height = height;
- s->frame_rate = frame_rate;
- s->frame_rate_base = frame_rate_base;
- s->per_frame = ((u_int64_t)1000000 * s->frame_rate_base) / s->frame_rate;
-
- st->codec->codec_type = CODEC_TYPE_VIDEO;
- st->codec->pix_fmt = PIX_FMT_YUV420P;
- st->codec->codec_id = CODEC_ID_RAWVIDEO;
- st->codec->width = width;
- st->codec->height = height;
- st->codec->time_base.den = frame_rate;
- st->codec->time_base.num = frame_rate_base;
-
- if (ap->standard) {
- if (!strcasecmp(ap->standard, "pal"))
- format = PAL;
- else if (!strcasecmp(ap->standard, "secam"))
- format = SECAM;
- else if (!strcasecmp(ap->standard, "ntsc"))
- format = NTSC;
- }
-
- if (bktr_init(s1->filename, width, height, format,
- &(s->video_fd), &(s->tuner_fd), -1, 0.0) < 0)
- return AVERROR(EIO);
-
- nsignals = 0;
- last_frame_time = 0;
-
- return 0;
-}
-
-static int grab_read_close(AVFormatContext *s1)
-{
- VideoData *s = s1->priv_data;
- int c;
-
- c = METEOR_CAP_STOP_CONT;
- ioctl(s->video_fd, METEORCAPTUR, &c);
- close(s->video_fd);
-
- c = AUDIO_MUTE;
- ioctl(s->tuner_fd, BT848_SAUDIO, &c);
- close(s->tuner_fd);
-
- munmap((caddr_t)video_buf, video_buf_size);
-
- return 0;
-}
-
-AVInputFormat bktr_demuxer = {
- "bktr",
- "video grab",
- sizeof(VideoData),
- NULL,
- grab_read_header,
- grab_read_packet,
- grab_read_close,
- .flags = AVFMT_NOFILE,
-};
diff --git a/libavformat/dv1394.c b/libavformat/dv1394.c
deleted file mode 100644
index 8e2e2f607c..0000000000
--- a/libavformat/dv1394.c
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Linux DV1394 interface
- * Copyright (c) 2003 Max Krasnyansky <maxk@qualcomm.com>
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <unistd.h>
-#include <fcntl.h>
-#include <errno.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/poll.h>
-#include <sys/time.h>
-#include <time.h>
-
-#include "avformat.h"
-
-#undef DV1394_DEBUG
-
-#include "dv1394.h"
-#include "dv.h"
-
-struct dv1394_data {
- int fd;
- int channel;
- int format;
-
- uint8_t *ring; /* Ring buffer */
- int index; /* Current frame index */
- int avail; /* Number of frames available for reading */
- int done; /* Number of completed frames */
-
- DVDemuxContext* dv_demux; /* Generic DV muxing/demuxing context */
-};
-
-/*
- * The trick here is to kludge around well known problem with kernel Ooopsing
- * when you try to capture PAL on a device node configure for NTSC. That's
- * why we have to configure the device node for PAL, and then read only NTSC
- * amount of data.
- */
-static int dv1394_reset(struct dv1394_data *dv)
-{
- struct dv1394_init init;
-
- init.channel = dv->channel;
- init.api_version = DV1394_API_VERSION;
- init.n_frames = DV1394_RING_FRAMES;
- init.format = DV1394_PAL;
-
- if (ioctl(dv->fd, DV1394_INIT, &init) < 0)
- return -1;
-
- dv->avail = dv->done = 0;
- return 0;
-}
-
-static int dv1394_start(struct dv1394_data *dv)
-{
- /* Tell DV1394 driver to enable receiver */
- if (ioctl(dv->fd, DV1394_START_RECEIVE, 0) < 0) {
- av_log(NULL, AV_LOG_ERROR, "Failed to start receiver: %s\n", strerror(errno));
- return -1;
- }
- return 0;
-}
-
-static int dv1394_read_header(AVFormatContext * context, AVFormatParameters * ap)
-{
- struct dv1394_data *dv = context->priv_data;
-
- dv->dv_demux = dv_init_demux(context);
- if (!dv->dv_demux)
- goto failed;
-
- if (ap->standard && !strcasecmp(ap->standard, "pal"))
- dv->format = DV1394_PAL;
- else
- dv->format = DV1394_NTSC;
-
- if (ap->channel)
- dv->channel = ap->channel;
- else
- dv->channel = DV1394_DEFAULT_CHANNEL;
-
- /* Open and initialize DV1394 device */
- dv->fd = open(context->filename, O_RDONLY);
- if (dv->fd < 0) {
- av_log(context, AV_LOG_ERROR, "Failed to open DV interface: %s\n", strerror(errno));
- goto failed;
- }
-
- if (dv1394_reset(dv) < 0) {
- av_log(context, AV_LOG_ERROR, "Failed to initialize DV interface: %s\n", strerror(errno));
- goto failed;
- }
-
- dv->ring = mmap(NULL, DV1394_PAL_FRAME_SIZE * DV1394_RING_FRAMES,
- PROT_READ, MAP_PRIVATE, dv->fd, 0);
- if (dv->ring == MAP_FAILED) {
- av_log(context, AV_LOG_ERROR, "Failed to mmap DV ring buffer: %s\n", strerror(errno));
- goto failed;
- }
-
- if (dv1394_start(dv) < 0)
- goto failed;
-
- return 0;
-
-failed:
- close(dv->fd);
- return AVERROR(EIO);
-}
-
-static int dv1394_read_packet(AVFormatContext *context, AVPacket *pkt)
-{
- struct dv1394_data *dv = context->priv_data;
- int size;
-
- size = dv_get_packet(dv->dv_demux, pkt);
- if (size > 0)
- return size;
-
- if (!dv->avail) {
- struct dv1394_status s;
- struct pollfd p;
-
- if (dv->done) {
- /* Request more frames */
- if (ioctl(dv->fd, DV1394_RECEIVE_FRAMES, dv->done) < 0) {
- /* This usually means that ring buffer overflowed.
- * We have to reset :(.
- */
-
- av_log(context, AV_LOG_ERROR, "DV1394: Ring buffer overflow. Reseting ..\n");
-
- dv1394_reset(dv);
- dv1394_start(dv);
- }
- dv->done = 0;
- }
-
- /* Wait until more frames are available */
-restart_poll:
- p.fd = dv->fd;
- p.events = POLLIN | POLLERR | POLLHUP;
- if (poll(&p, 1, -1) < 0) {
- if (errno == EAGAIN || errno == EINTR)
- goto restart_poll;
- av_log(context, AV_LOG_ERROR, "Poll failed: %s\n", strerror(errno));
- return AVERROR(EIO);
- }
-
- if (ioctl(dv->fd, DV1394_GET_STATUS, &s) < 0) {
- av_log(context, AV_LOG_ERROR, "Failed to get status: %s\n", strerror(errno));
- return AVERROR(EIO);
- }
-#ifdef DV1394_DEBUG
- av_log(context, AV_LOG_DEBUG, "DV1394: status\n"
- "\tactive_frame\t%d\n"
- "\tfirst_clear_frame\t%d\n"
- "\tn_clear_frames\t%d\n"
- "\tdropped_frames\t%d\n",
- s.active_frame, s.first_clear_frame,
- s.n_clear_frames, s.dropped_frames);
-#endif
-
- dv->avail = s.n_clear_frames;
- dv->index = s.first_clear_frame;
- dv->done = 0;
-
- if (s.dropped_frames) {
- av_log(context, AV_LOG_ERROR, "DV1394: Frame drop detected (%d). Reseting ..\n",
- s.dropped_frames);
-
- dv1394_reset(dv);
- dv1394_start(dv);
- }
- }
-
-#ifdef DV1394_DEBUG
- av_log(context, AV_LOG_DEBUG, "index %d, avail %d, done %d\n", dv->index, dv->avail,
- dv->done);
-#endif
-
- size = dv_produce_packet(dv->dv_demux, pkt,
- dv->ring + (dv->index * DV1394_PAL_FRAME_SIZE),
- DV1394_PAL_FRAME_SIZE);
- dv->index = (dv->index + 1) % DV1394_RING_FRAMES;
- dv->done++; dv->avail--;
-
- return size;
-}
-
-static int dv1394_close(AVFormatContext * context)
-{
- struct dv1394_data *dv = context->priv_data;
-
- /* Shutdown DV1394 receiver */
- if (ioctl(dv->fd, DV1394_SHUTDOWN, 0) < 0)
- av_log(context, AV_LOG_ERROR, "Failed to shutdown DV1394: %s\n", strerror(errno));
-
- /* Unmap ring buffer */
- if (munmap(dv->ring, DV1394_NTSC_FRAME_SIZE * DV1394_RING_FRAMES) < 0)
- av_log(context, AV_LOG_ERROR, "Failed to munmap DV1394 ring buffer: %s\n", strerror(errno));
-
- close(dv->fd);
- av_free(dv->dv_demux);
-
- return 0;
-}
-
-AVInputFormat dv1394_demuxer = {
- .name = "dv1394",
- .long_name = "dv1394 A/V grab",
- .priv_data_size = sizeof(struct dv1394_data),
- .read_header = dv1394_read_header,
- .read_packet = dv1394_read_packet,
- .read_close = dv1394_close,
- .flags = AVFMT_NOFILE
-};
diff --git a/libavformat/dv1394.h b/libavformat/dv1394.h
deleted file mode 100644
index 7f3521d6e8..0000000000
--- a/libavformat/dv1394.h
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * dv1394.h - DV input/output over IEEE 1394 on OHCI chips
- * Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
- * receive, proc_fs by Dan Dennedy <dan@dennedy.org>
- *
- * based on:
- * video1394.h - driver for OHCI 1394 boards
- * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
- * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef FFMPEG_DV1394_H
-#define FFMPEG_DV1394_H
-
-#define DV1394_DEFAULT_CHANNEL 63
-#define DV1394_DEFAULT_CARD 0
-#define DV1394_RING_FRAMES 20
-
-#define DV1394_WIDTH 720
-#define DV1394_NTSC_HEIGHT 480
-#define DV1394_PAL_HEIGHT 576
-
-/* This is the public user-space interface. Try not to break it. */
-
-#define DV1394_API_VERSION 0x20011127
-
-/* ********************
- ** **
- ** DV1394 API **
- ** **
- ********************
-
- There are two methods of operating the DV1394 DV output device.
-
- 1)
-
- The simplest is an interface based on write(): simply write
- full DV frames of data to the device, and they will be transmitted
- as quickly as possible. The FD may be set for non-blocking I/O,
- in which case you can use select() or poll() to wait for output
- buffer space.
-
- To set the DV output parameters (e.g. whether you want NTSC or PAL
- video), use the DV1394_INIT ioctl, passing in the parameters you
- want in a struct dv1394_init.
-
- Example 1:
- To play a raw .DV file: cat foo.DV > /dev/dv1394
- (cat will use write() internally)
-
- Example 2:
- static struct dv1394_init init = {
- 0x63, (broadcast channel)
- 4, (four-frame ringbuffer)
- DV1394_NTSC, (send NTSC video)
- 0, 0 (default empty packet rate)
- }
-
- ioctl(fd, DV1394_INIT, &init);
-
- while(1) {
- read( <a raw DV file>, buf, DV1394_NTSC_FRAME_SIZE );
- write( <the dv1394 FD>, buf, DV1394_NTSC_FRAME_SIZE );
- }
-
- 2)
-
- For more control over buffering, and to avoid unnecessary copies
- of the DV data, you can use the more sophisticated the mmap() interface.
- First, call the DV1394_INIT ioctl to specify your parameters,
- including the number of frames in the ringbuffer. Then, calling mmap()
- on the dv1394 device will give you direct access to the ringbuffer
- from which the DV card reads your frame data.
-
- The ringbuffer is simply one large, contiguous region of memory
- containing two or more frames of packed DV data. Each frame of DV data
- is 120000 bytes (NTSC) or 144000 bytes (PAL).
-
- Fill one or more frames in the ringbuffer, then use the DV1394_SUBMIT_FRAMES
- ioctl to begin I/O. You can use either the DV1394_WAIT_FRAMES ioctl
- or select()/poll() to wait until the frames are transmitted. Next, you'll
- need to call the DV1394_GET_STATUS ioctl to determine which ringbuffer
- frames are clear (ready to be filled with new DV data). Finally, use
- DV1394_SUBMIT_FRAMES again to send the new data to the DV output.
-
-
- Example: here is what a four-frame ringbuffer might look like
- during DV transmission:
-
-
- frame 0 frame 1 frame 2 frame 3
-
- *--------------------------------------*
- | CLEAR | DV data | DV data | CLEAR |
- *--------------------------------------*
- <ACTIVE>
-
- transmission goes in this direction --->>>
-
-
- The DV hardware is currently transmitting the data in frame 1.
- Once frame 1 is finished, it will automatically transmit frame 2.
- (if frame 2 finishes before frame 3 is submitted, the device
- will continue to transmit frame 2, and will increase the dropped_frames
- counter each time it repeats the transmission).
-
-
- If you called DV1394_GET_STATUS at this instant, you would
- receive the following values:
-
- n_frames = 4
- active_frame = 1
- first_clear_frame = 3
- n_clear_frames = 2
-
- At this point, you should write new DV data into frame 3 and optionally
- frame 0. Then call DV1394_SUBMIT_FRAMES to inform the device that
- it may transmit the new frames.
-
- ERROR HANDLING
-
- An error (buffer underflow/overflow or a break in the DV stream due
- to a 1394 bus reset) can be detected by checking the dropped_frames
- field of struct dv1394_status (obtained through the
- DV1394_GET_STATUS ioctl).
-
- The best way to recover from such an error is to re-initialize
- dv1394, either by using the DV1394_INIT ioctl call, or closing the
- file descriptor and opening it again. (note that you must unmap all
- ringbuffer mappings when closing the file descriptor, or else
- dv1394 will still be considered 'in use').
-
- MAIN LOOP
-
- For maximum efficiency and robustness against bus errors, you are
- advised to model the main loop of your application after the
- following pseudo-code example:
-
- (checks of system call return values omitted for brevity; always
- check return values in your code!)
-
- while( frames left ) {
-
- struct pollfd *pfd = ...;
-
- pfd->fd = dv1394_fd;
- pfd->revents = 0;
- pfd->events = POLLOUT | POLLIN; (OUT for transmit, IN for receive)
-
- (add other sources of I/O here)
-
- poll(pfd, 1, -1); (or select(); add a timeout if you want)
-
- if(pfd->revents) {
- struct dv1394_status status;
-
- ioctl(dv1394_fd, DV1394_GET_STATUS, &status);
-
- if(status.dropped_frames > 0) {
- reset_dv1394();
- } else {
- for(int i = 0; i < status.n_clear_frames; i++) {
- copy_DV_frame();
- }
- }
- }
- }
-
- where copy_DV_frame() reads or writes on the dv1394 file descriptor
- (read/write mode) or copies data to/from the mmap ringbuffer and
- then calls ioctl(DV1394_SUBMIT_FRAMES) to notify dv1394 that new
- frames are availble (mmap mode).
-
- reset_dv1394() is called in the event of a buffer
- underflow/overflow or a halt in the DV stream (e.g. due to a 1394
- bus reset). To guarantee recovery from the error, this function
- should close the dv1394 file descriptor (and munmap() all
- ringbuffer mappings, if you are using them), then re-open the
- dv1394 device (and re-map the ringbuffer).
-
-*/
-
-
-/* maximum number of frames in the ringbuffer */
-#define DV1394_MAX_FRAMES 32
-
-/* number of *full* isochronous packets per DV frame */
-#define DV1394_NTSC_PACKETS_PER_FRAME 250
-#define DV1394_PAL_PACKETS_PER_FRAME 300
-
-/* size of one frame's worth of DV data, in bytes */
-#define DV1394_NTSC_FRAME_SIZE (480 * DV1394_NTSC_PACKETS_PER_FRAME)
-#define DV1394_PAL_FRAME_SIZE (480 * DV1394_PAL_PACKETS_PER_FRAME)
-
-
-/* ioctl() commands */
-
-enum {
- /* I don't like using 0 as a valid ioctl() */
- DV1394_INVALID = 0,
-
-
- /* get the driver ready to transmit video.
- pass a struct dv1394_init* as the parameter (see below),
- or NULL to get default parameters */
- DV1394_INIT,
-
-
- /* stop transmitting video and free the ringbuffer */
- DV1394_SHUTDOWN,
-
-
- /* submit N new frames to be transmitted, where
- the index of the first new frame is first_clear_buffer,
- and the index of the last new frame is
- (first_clear_buffer + N) % n_frames */
- DV1394_SUBMIT_FRAMES,
-
-
- /* block until N buffers are clear (pass N as the parameter)
- Because we re-transmit the last frame on underrun, there
- will at most be n_frames - 1 clear frames at any time */
- DV1394_WAIT_FRAMES,
-
- /* capture new frames that have been received, where
- the index of the first new frame is first_clear_buffer,
- and the index of the last new frame is
- (first_clear_buffer + N) % n_frames */
- DV1394_RECEIVE_FRAMES,
-
-
- DV1394_START_RECEIVE,
-
-
- /* pass a struct dv1394_status* as the parameter (see below) */
- DV1394_GET_STATUS,
-};
-
-
-
-enum pal_or_ntsc {
- DV1394_NTSC = 0,
- DV1394_PAL
-};
-
-
-
-
-/* this is the argument to DV1394_INIT */
-struct dv1394_init {
- /* DV1394_API_VERSION */
- unsigned int api_version;
-
- /* isochronous transmission channel to use */
- unsigned int channel;
-
- /* number of frames in the ringbuffer. Must be at least 2
- and at most DV1394_MAX_FRAMES. */
- unsigned int n_frames;
-
- /* send/receive PAL or NTSC video format */
- enum pal_or_ntsc format;
-
- /* the following are used only for transmission */
-
- /* set these to zero unless you want a
- non-default empty packet rate (see below) */
- unsigned long cip_n;
- unsigned long cip_d;
-
- /* set this to zero unless you want a
- non-default SYT cycle offset (default = 3 cycles) */
- unsigned int syt_offset;
-};
-
-/* NOTE: you may only allocate the DV frame ringbuffer once each time
- you open the dv1394 device. DV1394_INIT will fail if you call it a
- second time with different 'n_frames' or 'format' arguments (which
- would imply a different size for the ringbuffer). If you need a
- different buffer size, simply close and re-open the device, then
- initialize it with your new settings. */
-
-/* Q: What are cip_n and cip_d? */
-
-/*
- A: DV video streams do not utilize 100% of the potential bandwidth offered
- by IEEE 1394 (FireWire). To achieve the correct rate of data transmission,
- DV devices must periodically insert empty packets into the 1394 data stream.
- Typically there is one empty packet per 14-16 data-carrying packets.
-
- Some DV devices will accept a wide range of empty packet rates, while others
- require a precise rate. If the dv1394 driver produces empty packets at
- a rate that your device does not accept, you may see ugly patterns on the
- DV output, or even no output at all.
-
- The default empty packet insertion rate seems to work for many people; if
- your DV output is stable, you can simply ignore this discussion. However,
- we have exposed the empty packet rate as a parameter to support devices that
- do not work with the default rate.
-
- The decision to insert an empty packet is made with a numerator/denominator
- algorithm. Empty packets are produced at an average rate of CIP_N / CIP_D.
- You can alter the empty packet rate by passing non-zero values for cip_n
- and cip_d to the INIT ioctl.
-
- */
-
-
-
-struct dv1394_status {
- /* this embedded init struct returns the current dv1394
- parameters in use */
- struct dv1394_init init;
-
- /* the ringbuffer frame that is currently being
- displayed. (-1 if the device is not transmitting anything) */
- int active_frame;
-
- /* index of the first buffer (ahead of active_frame) that
- is ready to be filled with data */
- unsigned int first_clear_frame;
-
- /* how many buffers, including first_clear_buffer, are
- ready to be filled with data */
- unsigned int n_clear_frames;
-
- /* how many times the DV stream has underflowed, overflowed,
- or otherwise encountered an error, since the previous call
- to DV1394_GET_STATUS */
- unsigned int dropped_frames;
-
- /* N.B. The dropped_frames counter is only a lower bound on the actual
- number of dropped frames, with the special case that if dropped_frames
- is zero, then it is guaranteed that NO frames have been dropped
- since the last call to DV1394_GET_STATUS.
- */
-};
-
-
-#endif /* FFMPEG_DV1394_H */
diff --git a/libavformat/libdc1394.c b/libavformat/libdc1394.c
deleted file mode 100644
index 095a35af15..0000000000
--- a/libavformat/libdc1394.c
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * IIDC1394 grab interface (uses libdc1394 and libraw1394)
- * Copyright (c) 2004 Roman Shaposhnik
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "avformat.h"
-
-#include <libraw1394/raw1394.h>
-#include <libdc1394/dc1394_control.h>
-
-#undef free
-
-typedef struct dc1394_data {
- raw1394handle_t handle;
- dc1394_cameracapture camera;
- int current_frame;
- int fps;
-
- AVPacket packet;
-} dc1394_data;
-
-struct dc1394_frame_format {
- int width;
- int height;
- enum PixelFormat pix_fmt;
- int frame_size_id;
-} dc1394_frame_formats[] = {
- { 320, 240, PIX_FMT_UYVY422, MODE_320x240_YUV422 },
- { 640, 480, PIX_FMT_UYYVYY411, MODE_640x480_YUV411 },
- { 640, 480, PIX_FMT_UYVY422, MODE_640x480_YUV422 },
- { 0, 0, 0, MODE_320x240_YUV422 } /* default -- gotta be the last one */
-};
-
-struct dc1394_frame_rate {
- int frame_rate;
- int frame_rate_id;
-} dc1394_frame_rates[] = {
- { 1875, FRAMERATE_1_875 },
- { 3750, FRAMERATE_3_75 },
- { 7500, FRAMERATE_7_5 },
- { 15000, FRAMERATE_15 },
- { 30000, FRAMERATE_30 },
- { 60000, FRAMERATE_60 },
- { 0, FRAMERATE_30 } /* default -- gotta be the last one */
-};
-
-static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap)
-{
- dc1394_data* dc1394 = c->priv_data;
- AVStream* vst;
- nodeid_t* camera_nodes;
- int res;
- struct dc1394_frame_format *fmt;
- struct dc1394_frame_rate *fps;
-
- for (fmt = dc1394_frame_formats; fmt->width; fmt++)
- if (fmt->pix_fmt == ap->pix_fmt && fmt->width == ap->width && fmt->height == ap->height)
- break;
-
- for (fps = dc1394_frame_rates; fps->frame_rate; fps++)
- if (fps->frame_rate == av_rescale(1000, ap->time_base.den, ap->time_base.num))
- break;
-
- /* create a video stream */
- vst = av_new_stream(c, 0);
- if (!vst)
- return -1;
- av_set_pts_info(vst, 64, 1, 1000);
- vst->codec->codec_type = CODEC_TYPE_VIDEO;
- vst->codec->codec_id = CODEC_ID_RAWVIDEO;
- vst->codec->time_base.den = fps->frame_rate;
- vst->codec->time_base.num = 1000;
- vst->codec->width = fmt->width;
- vst->codec->height = fmt->height;
- vst->codec->pix_fmt = fmt->pix_fmt;
-
- /* packet init */
- av_init_packet(&dc1394->packet);
- dc1394->packet.size = avpicture_get_size(fmt->pix_fmt, fmt->width, fmt->height);
- dc1394->packet.stream_index = vst->index;
- dc1394->packet.flags |= PKT_FLAG_KEY;
-
- dc1394->current_frame = 0;
- dc1394->fps = fps->frame_rate;
-
- vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, fps->frame_rate, 1000);
-
- /* Now lets prep the hardware */
- dc1394->handle = dc1394_create_handle(0); /* FIXME: gotta have ap->port */
- if (!dc1394->handle) {
- av_log(c, AV_LOG_ERROR, "Can't acquire dc1394 handle on port %d\n", 0 /* ap->port */);
- goto out;
- }
- camera_nodes = dc1394_get_camera_nodes(dc1394->handle, &res, 1);
- if (!camera_nodes || camera_nodes[ap->channel] == DC1394_NO_CAMERA) {
- av_log(c, AV_LOG_ERROR, "There's no IIDC camera on the channel %d\n", ap->channel);
- goto out_handle;
- }
- res = dc1394_dma_setup_capture(dc1394->handle, camera_nodes[ap->channel],
- 0,
- FORMAT_VGA_NONCOMPRESSED,
- fmt->frame_size_id,
- SPEED_400,
- fps->frame_rate_id, 8, 1,
- c->filename,
- &dc1394->camera);
- dc1394_free_camera_nodes(camera_nodes);
- if (res != DC1394_SUCCESS) {
- av_log(c, AV_LOG_ERROR, "Can't prepare camera for the DMA capture\n");
- goto out_handle;
- }
-
- res = dc1394_start_iso_transmission(dc1394->handle, dc1394->camera.node);
- if (res != DC1394_SUCCESS) {
- av_log(c, AV_LOG_ERROR, "Can't start isochronous transmission\n");
- goto out_handle_dma;
- }
-
- return 0;
-
-out_handle_dma:
- dc1394_dma_unlisten(dc1394->handle, &dc1394->camera);
- dc1394_dma_release_camera(dc1394->handle, &dc1394->camera);
-out_handle:
- dc1394_destroy_handle(dc1394->handle);
-out:
- return -1;
-}
-
-static int dc1394_read_packet(AVFormatContext *c, AVPacket *pkt)
-{
- struct dc1394_data *dc1394 = c->priv_data;
- int res;
-
- /* discard stale frame */
- if (dc1394->current_frame++) {
- if (dc1394_dma_done_with_buffer(&dc1394->camera) != DC1394_SUCCESS)
- av_log(c, AV_LOG_ERROR, "failed to release %d frame\n", dc1394->current_frame);
- }
-
- res = dc1394_dma_single_capture(&dc1394->camera);
-
- if (res == DC1394_SUCCESS) {
- dc1394->packet.data = (uint8_t *)(dc1394->camera.capture_buffer);
- dc1394->packet.pts = (dc1394->current_frame * 1000000) / dc1394->fps;
- res = dc1394->packet.size;
- } else {
- av_log(c, AV_LOG_ERROR, "DMA capture failed\n");
- dc1394->packet.data = NULL;
- res = -1;
- }
-
- *pkt = dc1394->packet;
- return res;
-}
-
-static int dc1394_close(AVFormatContext * context)
-{
- struct dc1394_data *dc1394 = context->priv_data;
-
- dc1394_stop_iso_transmission(dc1394->handle, dc1394->camera.node);
- dc1394_dma_unlisten(dc1394->handle, &dc1394->camera);
- dc1394_dma_release_camera(dc1394->handle, &dc1394->camera);
- dc1394_destroy_handle(dc1394->handle);
-
- return 0;
-}
-
-AVInputFormat libdc1394_demuxer = {
- .name = "libdc1394",
- .long_name = "dc1394 A/V grab",
- .priv_data_size = sizeof(struct dc1394_data),
- .read_header = dc1394_read_header,
- .read_packet = dc1394_read_packet,
- .read_close = dc1394_close,
- .flags = AVFMT_NOFILE
-};
diff --git a/libavformat/v4l.c b/libavformat/v4l.c
deleted file mode 100644
index 8005378559..0000000000
--- a/libavformat/v4l.c
+++ /dev/null
@@ -1,852 +0,0 @@
-/*
- * Linux video grab interface
- * Copyright (c) 2000,2001 Fabrice Bellard.
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "avformat.h"
-#include "dsputil.h"
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/time.h>
-#define _LINUX_TIME_H 1
-#include <linux/videodev.h>
-#include <time.h>
-
-typedef struct {
- int fd;
- int frame_format; /* see VIDEO_PALETTE_xxx */
- int use_mmap;
- int width, height;
- int frame_rate;
- int frame_rate_base;
- int64_t time_frame;
- int frame_size;
- struct video_capability video_cap;
- struct video_audio audio_saved;
- uint8_t *video_buf;
- struct video_mbuf gb_buffers;
- struct video_mmap gb_buf;
- int gb_frame;
-
- /* ATI All In Wonder specific stuff */
- /* XXX: remove and merge in libavcodec/imgconvert.c */
- int aiw_enabled;
- int deint;
- int halfw;
- uint8_t *src_mem;
- uint8_t *lum_m4_mem;
-} VideoData;
-
-struct {
- int palette;
- int depth;
- enum PixelFormat pix_fmt;
-} video_formats [] = {
- {.palette = VIDEO_PALETTE_YUV420P, .depth = 12, .pix_fmt = PIX_FMT_YUV420P },
- {.palette = VIDEO_PALETTE_YUV422, .depth = 16, .pix_fmt = PIX_FMT_YUYV422 },
- {.palette = VIDEO_PALETTE_UYVY, .depth = 16, .pix_fmt = PIX_FMT_UYVY422 },
- {.palette = VIDEO_PALETTE_YUYV, .depth = 16, .pix_fmt = PIX_FMT_YUYV422 },
- /* NOTE: v4l uses BGR24, not RGB24 */
- {.palette = VIDEO_PALETTE_RGB24, .depth = 24, .pix_fmt = PIX_FMT_BGR24 },
- {.palette = VIDEO_PALETTE_RGB565, .depth = 16, .pix_fmt = PIX_FMT_BGR565 },
- {.palette = VIDEO_PALETTE_GREY, .depth = 8, .pix_fmt = PIX_FMT_GRAY8 },
-};
-
-
-static int aiw_init(VideoData *s);
-static int aiw_read_picture(VideoData *s, uint8_t *data);
-static int aiw_close(VideoData *s);
-
-static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
-{
- VideoData *s = s1->priv_data;
- AVStream *st;
- int width, height;
- int video_fd, frame_size;
- int ret, frame_rate, frame_rate_base;
- int desired_palette, desired_depth;
- struct video_tuner tuner;
- struct video_audio audio;
- struct video_picture pict;
- int j;
- int vformat_num = sizeof(video_formats) / sizeof(video_formats[0]);
-
- if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
- av_log(s1, AV_LOG_ERROR, "Bad capture size (%dx%d) or wrong time base (%d)\n",
- ap->width, ap->height, ap->time_base.den);
-
- return -1;
- }
-
- width = ap->width;
- height = ap->height;
- frame_rate = ap->time_base.den;
- frame_rate_base = ap->time_base.num;
-
- if((unsigned)width > 32767 || (unsigned)height > 32767) {
- av_log(s1, AV_LOG_ERROR, "Capture size is out of range: %dx%d\n",
- width, height);
-
- return -1;
- }
-
- st = av_new_stream(s1, 0);
- if (!st)
- return AVERROR(ENOMEM);
- av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
-
- s->width = width;
- s->height = height;
- s->frame_rate = frame_rate;
- s->frame_rate_base = frame_rate_base;
-
- video_fd = open(s1->filename, O_RDWR);
- if (video_fd < 0) {
- av_log(s1, AV_LOG_ERROR, "%s: %s\n", s1->filename, strerror(errno));
- goto fail;
- }
-
- if (ioctl(video_fd,VIDIOCGCAP, &s->video_cap) < 0) {
- av_log(s1, AV_LOG_ERROR, "VIDIOCGCAP: %s\n", strerror(errno));
- goto fail;
- }
-
- if (!(s->video_cap.type & VID_TYPE_CAPTURE)) {
- av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not handle capture\n");
- goto fail;
- }
-
- desired_palette = -1;
- desired_depth = -1;
- for (j = 0; j < vformat_num; j++) {
- if (ap->pix_fmt == video_formats[j].pix_fmt) {
- desired_palette = video_formats[j].palette;
- desired_depth = video_formats[j].depth;
- break;
- }
- }
-
- /* set tv standard */
- if (ap->standard && !ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
- if (!strcasecmp(ap->standard, "pal"))
- tuner.mode = VIDEO_MODE_PAL;
- else if (!strcasecmp(ap->standard, "secam"))
- tuner.mode = VIDEO_MODE_SECAM;
- else
- tuner.mode = VIDEO_MODE_NTSC;
- ioctl(video_fd, VIDIOCSTUNER, &tuner);
- }
-
- /* unmute audio */
- audio.audio = 0;
- ioctl(video_fd, VIDIOCGAUDIO, &audio);
- memcpy(&s->audio_saved, &audio, sizeof(audio));
- audio.flags &= ~VIDEO_AUDIO_MUTE;
- ioctl(video_fd, VIDIOCSAUDIO, &audio);
-
- ioctl(video_fd, VIDIOCGPICT, &pict);
-#if 0
- printf("v4l: colour=%d hue=%d brightness=%d constrast=%d whiteness=%d\n",
- pict.colour,
- pict.hue,
- pict.brightness,
- pict.contrast,
- pict.whiteness);
-#endif
- /* try to choose a suitable video format */
- pict.palette = desired_palette;
- pict.depth= desired_depth;
- if (desired_palette == -1 || (ret = ioctl(video_fd, VIDIOCSPICT, &pict)) < 0) {
- for (j = 0; j < vformat_num; j++) {
- pict.palette = video_formats[j].palette;
- pict.depth = video_formats[j].depth;
- if (-1 != ioctl(video_fd, VIDIOCSPICT, &pict))
- break;
- }
- if (j >= vformat_num)
- goto fail1;
- }
-
- ret = ioctl(video_fd,VIDIOCGMBUF,&s->gb_buffers);
- if (ret < 0) {
- /* try to use read based access */
- struct video_window win;
- int val;
-
- win.x = 0;
- win.y = 0;
- win.width = width;
- win.height = height;
- win.chromakey = -1;
- win.flags = 0;
-
- ioctl(video_fd, VIDIOCSWIN, &win);
-
- s->frame_format = pict.palette;
-
- val = 1;
- ioctl(video_fd, VIDIOCCAPTURE, &val);
-
- s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
- s->use_mmap = 0;
-
- /* ATI All In Wonder automatic activation */
- if (!strcmp(s->video_cap.name, "Km")) {
- if (aiw_init(s) < 0)
- goto fail;
- s->aiw_enabled = 1;
- /* force 420P format because conversion from YUV422 to YUV420P
- is done in this driver (ugly) */
- s->frame_format = VIDEO_PALETTE_YUV420P;
- }
- } else {
- s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_SHARED,video_fd,0);
- if ((unsigned char*)-1 == s->video_buf) {
- s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_PRIVATE,video_fd,0);
- if ((unsigned char*)-1 == s->video_buf) {
- av_log(s1, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
- goto fail;
- }
- }
- s->gb_frame = 0;
- s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
-
- /* start to grab the first frame */
- s->gb_buf.frame = s->gb_frame % s->gb_buffers.frames;
- s->gb_buf.height = height;
- s->gb_buf.width = width;
- s->gb_buf.format = pict.palette;
-
- ret = ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
- if (ret < 0) {
- if (errno != EAGAIN) {
- fail1:
- av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not support suitable format\n");
- } else {
- av_log(s1, AV_LOG_ERROR,"Fatal: grab device does not receive any video signal\n");
- }
- goto fail;
- }
- for (j = 1; j < s->gb_buffers.frames; j++) {
- s->gb_buf.frame = j;
- ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
- }
- s->frame_format = s->gb_buf.format;
- s->use_mmap = 1;
- }
-
- for (j = 0; j < vformat_num; j++) {
- if (s->frame_format == video_formats[j].palette) {
- frame_size = width * height * video_formats[j].depth / 8;
- st->codec->pix_fmt = video_formats[j].pix_fmt;
- break;
- }
- }
-
- if (j >= vformat_num)
- goto fail;
-
- s->fd = video_fd;
- s->frame_size = frame_size;
-
- st->codec->codec_type = CODEC_TYPE_VIDEO;
- st->codec->codec_id = CODEC_ID_RAWVIDEO;
- st->codec->width = width;
- st->codec->height = height;
- st->codec->time_base.den = frame_rate;
- st->codec->time_base.num = frame_rate_base;
- st->codec->bit_rate = frame_size * 1/av_q2d(st->codec->time_base) * 8;
-
- return 0;
- fail:
- if (video_fd >= 0)
- close(video_fd);
- av_free(st);
- return AVERROR(EIO);
-}
-
-static int v4l_mm_read_picture(VideoData *s, uint8_t *buf)
-{
- uint8_t *ptr;
-
- while (ioctl(s->fd, VIDIOCSYNC, &s->gb_frame) < 0 &&
- (errno == EAGAIN || errno == EINTR));
-
- ptr = s->video_buf + s->gb_buffers.offsets[s->gb_frame];
- memcpy(buf, ptr, s->frame_size);
-
- /* Setup to capture the next frame */
- s->gb_buf.frame = s->gb_frame;
- if (ioctl(s->fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) {
- if (errno == EAGAIN)
- av_log(NULL, AV_LOG_ERROR, "Cannot Sync\n");
- else
- av_log(NULL, AV_LOG_ERROR, "VIDIOCMCAPTURE: %s\n", strerror(errno));
- return AVERROR(EIO);
- }
-
- /* This is now the grabbing frame */
- s->gb_frame = (s->gb_frame + 1) % s->gb_buffers.frames;
-
- return s->frame_size;
-}
-
-static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
-{
- VideoData *s = s1->priv_data;
- int64_t curtime, delay;
- struct timespec ts;
-
- /* Calculate the time of the next frame */
- s->time_frame += INT64_C(1000000);
-
- /* wait based on the frame rate */
- for(;;) {
- curtime = av_gettime();
- delay = s->time_frame * s->frame_rate_base / s->frame_rate - curtime;
- if (delay <= 0) {
- if (delay < INT64_C(-1000000) * s->frame_rate_base / s->frame_rate) {
- /* printf("grabbing is %d frames late (dropping)\n", (int) -(delay / 16666)); */
- s->time_frame += INT64_C(1000000);
- }
- break;
- }
- ts.tv_sec = delay / 1000000;
- ts.tv_nsec = (delay % 1000000) * 1000;
- nanosleep(&ts, NULL);
- }
-
- if (av_new_packet(pkt, s->frame_size) < 0)
- return AVERROR(EIO);
-
- pkt->pts = curtime;
-
- /* read one frame */
- if (s->aiw_enabled) {
- return aiw_read_picture(s, pkt->data);
- } else if (s->use_mmap) {
- return v4l_mm_read_picture(s, pkt->data);
- } else {
- if (read(s->fd, pkt->data, pkt->size) != pkt->size)
- return AVERROR(EIO);
- return s->frame_size;
- }
-}
-
-static int grab_read_close(AVFormatContext *s1)
-{
- VideoData *s = s1->priv_data;
-
- if (s->aiw_enabled)
- aiw_close(s);
-
- if (s->use_mmap)
- munmap(s->video_buf, s->gb_buffers.size);
-
- /* mute audio. we must force it because the BTTV driver does not
- return its state correctly */
- s->audio_saved.flags |= VIDEO_AUDIO_MUTE;
- ioctl(s->fd, VIDIOCSAUDIO, &s->audio_saved);
-
- close(s->fd);
- return 0;
-}
-
-AVInputFormat v4l_demuxer = {
- "video4linux",
- "video grab",
- sizeof(VideoData),
- NULL,
- grab_read_header,
- grab_read_packet,
- grab_read_close,
- .flags = AVFMT_NOFILE,
-};
-
-/* All in Wonder specific stuff */
-/* XXX: remove and merge in libavcodec/imgconvert.c */
-
-static int aiw_init(VideoData *s)
-{
- int width, height;
-
- width = s->width;
- height = s->height;
-
- if ((width == s->video_cap.maxwidth && height == s->video_cap.maxheight) ||
- (width == s->video_cap.maxwidth && height == s->video_cap.maxheight*2) ||
- (width == s->video_cap.maxwidth/2 && height == s->video_cap.maxheight)) {
-
- s->deint=0;
- s->halfw=0;
- if (height == s->video_cap.maxheight*2) s->deint=1;
- if (width == s->video_cap.maxwidth/2) s->halfw=1;
- } else {
- av_log(NULL, AV_LOG_ERROR, "\nIncorrect Grab Size Supplied - Supported Sizes Are:\n");
- av_log(NULL, AV_LOG_ERROR, " %dx%d %dx%d %dx%d\n\n",
- s->video_cap.maxwidth,s->video_cap.maxheight,
- s->video_cap.maxwidth,s->video_cap.maxheight*2,
- s->video_cap.maxwidth/2,s->video_cap.maxheight);
- goto fail;
- }
-
- if (s->halfw == 0) {
- s->src_mem = av_malloc(s->width*2);
- } else {
- s->src_mem = av_malloc(s->width*4);
- }
- if (!s->src_mem) goto fail;
-
- s->lum_m4_mem = av_malloc(s->width);
- if (!s->lum_m4_mem)
- goto fail;
- return 0;
- fail:
- av_freep(&s->src_mem);
- av_freep(&s->lum_m4_mem);
- return -1;
-}
-
-#ifdef HAVE_MMX
-#include "i386/mmx.h"
-
-#define LINE_WITH_UV \
- movq_m2r(ptr[0],mm0); \
- movq_m2r(ptr[8],mm1); \
- movq_r2r(mm0, mm4); \
- punpcklbw_r2r(mm1,mm0); \
- punpckhbw_r2r(mm1,mm4); \
- movq_r2r(mm0,mm5); \
- punpcklbw_r2r(mm4,mm0); \
- punpckhbw_r2r(mm4,mm5); \
- movq_r2r(mm0,mm1); \
- punpcklbw_r2r(mm5,mm1); \
- movq_r2m(mm1,lum[0]); \
- movq_m2r(ptr[16],mm2); \
- movq_m2r(ptr[24],mm1); \
- movq_r2r(mm2,mm4); \
- punpcklbw_r2r(mm1,mm2); \
- punpckhbw_r2r(mm1,mm4); \
- movq_r2r(mm2,mm3); \
- punpcklbw_r2r(mm4,mm2); \
- punpckhbw_r2r(mm4,mm3); \
- movq_r2r(mm2,mm1); \
- punpcklbw_r2r(mm3,mm1); \
- movq_r2m(mm1,lum[8]); \
- punpckhdq_r2r(mm2,mm0); \
- punpckhdq_r2r(mm3,mm5); \
- movq_r2m(mm0,cb[0]); \
- movq_r2m(mm5,cr[0]);
-
-#define LINE_NO_UV \
- movq_m2r(ptr[0],mm0);\
- movq_m2r(ptr[8],mm1);\
- movq_r2r(mm0, mm4);\
- punpcklbw_r2r(mm1,mm0); \
- punpckhbw_r2r(mm1,mm4);\
- movq_r2r(mm0,mm5);\
- punpcklbw_r2r(mm4,mm0);\
- punpckhbw_r2r(mm4,mm5);\
- movq_r2r(mm0,mm1);\
- punpcklbw_r2r(mm5,mm1);\
- movq_r2m(mm1,lum[0]);\
- movq_m2r(ptr[16],mm2);\
- movq_m2r(ptr[24],mm1);\
- movq_r2r(mm2,mm4);\
- punpcklbw_r2r(mm1,mm2);\
- punpckhbw_r2r(mm1,mm4);\
- movq_r2r(mm2,mm3);\
- punpcklbw_r2r(mm4,mm2);\
- punpckhbw_r2r(mm4,mm3);\
- movq_r2r(mm2,mm1);\
- punpcklbw_r2r(mm3,mm1);\
- movq_r2m(mm1,lum[8]);
-
-#define LINE_WITHUV_AVG \
- movq_m2r(ptr[0], mm0);\
- movq_m2r(ptr[8], mm1);\
- movq_r2r(mm0, mm4);\
- punpcklbw_r2r(mm1,mm0);\
- punpckhbw_r2r(mm1,mm4);\
- movq_r2r(mm0,mm5);\
- punpcklbw_r2r(mm4,mm0);\
- punpckhbw_r2r(mm4,mm5);\
- movq_r2r(mm0,mm1);\
- movq_r2r(mm5,mm2);\
- punpcklbw_r2r(mm7,mm1);\
- punpcklbw_r2r(mm7,mm2);\
- paddw_r2r(mm6,mm1);\
- paddw_r2r(mm2,mm1);\
- psraw_i2r(1,mm1);\
- packuswb_r2r(mm7,mm1);\
- movd_r2m(mm1,lum[0]);\
- movq_m2r(ptr[16],mm2);\
- movq_m2r(ptr[24],mm1);\
- movq_r2r(mm2,mm4);\
- punpcklbw_r2r(mm1,mm2);\
- punpckhbw_r2r(mm1,mm4);\
- movq_r2r(mm2,mm3);\
- punpcklbw_r2r(mm4,mm2);\
- punpckhbw_r2r(mm4,mm3);\
- movq_r2r(mm2,mm1);\
- movq_r2r(mm3,mm4);\
- punpcklbw_r2r(mm7,mm1);\
- punpcklbw_r2r(mm7,mm4);\
- paddw_r2r(mm6,mm1);\
- paddw_r2r(mm4,mm1);\
- psraw_i2r(1,mm1);\
- packuswb_r2r(mm7,mm1);\
- movd_r2m(mm1,lum[4]);\
- punpckhbw_r2r(mm7,mm0);\
- punpckhbw_r2r(mm7,mm2);\
- paddw_r2r(mm6,mm0);\
- paddw_r2r(mm2,mm0);\
- psraw_i2r(1,mm0);\
- packuswb_r2r(mm7,mm0);\
- punpckhbw_r2r(mm7,mm5);\
- punpckhbw_r2r(mm7,mm3);\
- paddw_r2r(mm6,mm5);\
- paddw_r2r(mm3,mm5);\
- psraw_i2r(1,mm5);\
- packuswb_r2r(mm7,mm5);\
- movd_r2m(mm0,cb[0]);\
- movd_r2m(mm5,cr[0]);
-
-#define LINE_NOUV_AVG \
- movq_m2r(ptr[0],mm0);\
- movq_m2r(ptr[8],mm1);\
- pand_r2r(mm5,mm0);\
- pand_r2r(mm5,mm1);\
- pmaddwd_r2r(mm6,mm0);\
- pmaddwd_r2r(mm6,mm1);\
- packssdw_r2r(mm1,mm0);\
- paddw_r2r(mm6,mm0);\
- psraw_i2r(1,mm0);\
- movq_m2r(ptr[16],mm2);\
- movq_m2r(ptr[24],mm3);\
- pand_r2r(mm5,mm2);\
- pand_r2r(mm5,mm3);\
- pmaddwd_r2r(mm6,mm2);\
- pmaddwd_r2r(mm6,mm3);\
- packssdw_r2r(mm3,mm2);\
- paddw_r2r(mm6,mm2);\
- psraw_i2r(1,mm2);\
- packuswb_r2r(mm2,mm0);\
- movq_r2m(mm0,lum[0]);
-
-#define DEINT_LINE_LUM(ptroff) \
- movd_m2r(lum_m4[(ptroff)],mm0);\
- movd_m2r(lum_m3[(ptroff)],mm1);\
- movd_m2r(lum_m2[(ptroff)],mm2);\
- movd_m2r(lum_m1[(ptroff)],mm3);\
- movd_m2r(lum[(ptroff)],mm4);\
- punpcklbw_r2r(mm7,mm0);\
- movd_r2m(mm2,lum_m4[(ptroff)]);\
- punpcklbw_r2r(mm7,mm1);\
- punpcklbw_r2r(mm7,mm2);\
- punpcklbw_r2r(mm7,mm3);\
- punpcklbw_r2r(mm7,mm4);\
- psllw_i2r(2,mm1);\
- psllw_i2r(1,mm2);\
- paddw_r2r(mm6,mm1);\
- psllw_i2r(2,mm3);\
- paddw_r2r(mm2,mm1);\
- paddw_r2r(mm4,mm0);\
- paddw_r2r(mm3,mm1);\
- psubusw_r2r(mm0,mm1);\
- psrlw_i2r(3,mm1);\
- packuswb_r2r(mm7,mm1);\
- movd_r2m(mm1,lum_m2[(ptroff)]);
-
-#else
-#include "dsputil.h"
-
-#define LINE_WITH_UV \
- lum[0]=ptr[0];lum[1]=ptr[2];lum[2]=ptr[4];lum[3]=ptr[6];\
- cb[0]=ptr[1];cb[1]=ptr[5];\
- cr[0]=ptr[3];cr[1]=ptr[7];\
- lum[4]=ptr[8];lum[5]=ptr[10];lum[6]=ptr[12];lum[7]=ptr[14];\
- cb[2]=ptr[9];cb[3]=ptr[13];\
- cr[2]=ptr[11];cr[3]=ptr[15];\
- lum[8]=ptr[16];lum[9]=ptr[18];lum[10]=ptr[20];lum[11]=ptr[22];\
- cb[4]=ptr[17];cb[5]=ptr[21];\
- cr[4]=ptr[19];cr[5]=ptr[23];\
- lum[12]=ptr[24];lum[13]=ptr[26];lum[14]=ptr[28];lum[15]=ptr[30];\
- cb[6]=ptr[25];cb[7]=ptr[29];\
- cr[6]=ptr[27];cr[7]=ptr[31];
-
-#define LINE_NO_UV \
- lum[0]=ptr[0];lum[1]=ptr[2];lum[2]=ptr[4];lum[3]=ptr[6];\
- lum[4]=ptr[8];lum[5]=ptr[10];lum[6]=ptr[12];lum[7]=ptr[14];\
- lum[8]=ptr[16];lum[9]=ptr[18];lum[10]=ptr[20];lum[11]=ptr[22];\
- lum[12]=ptr[24];lum[13]=ptr[26];lum[14]=ptr[28];lum[15]=ptr[30];
-
-#define LINE_WITHUV_AVG \
- sum=(ptr[0]+ptr[2]+1) >> 1;lum[0]=sum; \
- sum=(ptr[4]+ptr[6]+1) >> 1;lum[1]=sum; \
- sum=(ptr[1]+ptr[5]+1) >> 1;cb[0]=sum; \
- sum=(ptr[3]+ptr[7]+1) >> 1;cr[0]=sum; \
- sum=(ptr[8]+ptr[10]+1) >> 1;lum[2]=sum; \
- sum=(ptr[12]+ptr[14]+1) >> 1;lum[3]=sum; \
- sum=(ptr[9]+ptr[13]+1) >> 1;cb[1]=sum; \
- sum=(ptr[11]+ptr[15]+1) >> 1;cr[1]=sum; \
- sum=(ptr[16]+ptr[18]+1) >> 1;lum[4]=sum; \
- sum=(ptr[20]+ptr[22]+1) >> 1;lum[5]=sum; \
- sum=(ptr[17]+ptr[21]+1) >> 1;cb[2]=sum; \
- sum=(ptr[19]+ptr[23]+1) >> 1;cr[2]=sum; \
- sum=(ptr[24]+ptr[26]+1) >> 1;lum[6]=sum; \
- sum=(ptr[28]+ptr[30]+1) >> 1;lum[7]=sum; \
- sum=(ptr[25]+ptr[29]+1) >> 1;cb[3]=sum; \
- sum=(ptr[27]+ptr[31]+1) >> 1;cr[3]=sum;
-
-#define LINE_NOUV_AVG \
- sum=(ptr[0]+ptr[2]+1) >> 1;lum[0]=sum; \
- sum=(ptr[4]+ptr[6]+1) >> 1;lum[1]=sum; \
- sum=(ptr[8]+ptr[10]+1) >> 1;lum[2]=sum; \
- sum=(ptr[12]+ptr[14]+1) >> 1;lum[3]=sum; \
- sum=(ptr[16]+ptr[18]+1) >> 1;lum[4]=sum; \
- sum=(ptr[20]+ptr[22]+1) >> 1;lum[5]=sum; \
- sum=(ptr[24]+ptr[26]+1) >> 1;lum[6]=sum; \
- sum=(ptr[28]+ptr[30]+1) >> 1;lum[7]=sum;
-
-#define DEINT_LINE_LUM(ptroff) \
- sum=(-lum_m4[(ptroff)]+(lum_m3[(ptroff)]<<2)+(lum_m2[(ptroff)]<<1)+(lum_m1[(ptroff)]<<2)-lum[(ptroff)]); \
- lum_m4[(ptroff)]=lum_m2[(ptroff)];\
- lum_m2[(ptroff)]=cm[(sum+4)>>3];\
- sum=(-lum_m4[(ptroff)+1]+(lum_m3[(ptroff)+1]<<2)+(lum_m2[(ptroff)+1]<<1)+(lum_m1[(ptroff)+1]<<2)-lum[(ptroff)+1]); \
- lum_m4[(ptroff)+1]=lum_m2[(ptroff)+1];\
- lum_m2[(ptroff)+1]=cm[(sum+4)>>3];\
- sum=(-lum_m4[(ptroff)+2]+(lum_m3[(ptroff)+2]<<2)+(lum_m2[(ptroff)+2]<<1)+(lum_m1[(ptroff)+2]<<2)-lum[(ptroff)+2]); \
- lum_m4[(ptroff)+2]=lum_m2[(ptroff)+2];\
- lum_m2[(ptroff)+2]=cm[(sum+4)>>3];\
- sum=(-lum_m4[(ptroff)+3]+(lum_m3[(ptroff)+3]<<2)+(lum_m2[(ptroff)+3]<<1)+(lum_m1[(ptroff)+3]<<2)-lum[(ptroff)+3]); \
- lum_m4[(ptroff)+3]=lum_m2[(ptroff)+3];\
- lum_m2[(ptroff)+3]=cm[(sum+4)>>3];
-
-#endif
-
-
-/* Read two fields separately. */
-static int aiw_read_picture(VideoData *s, uint8_t *data)
-{
- uint8_t *ptr, *lum, *cb, *cr;
- int h;
-#ifndef HAVE_MMX
- int sum;
-#endif
- uint8_t* src = s->src_mem;
- uint8_t *ptrend = &src[s->width*2];
- lum=data;
- cb=&lum[s->width*s->height];
- cr=&cb[(s->width*s->height)/4];
- if (s->deint == 0 && s->halfw == 0) {
- while (read(s->fd,src,s->width*2) < 0) {
- usleep(100);
- }
- for (h = 0; h < s->height-2; h+=2) {
- for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
- LINE_WITH_UV
- }
- read(s->fd,src,s->width*2);
- for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16) {
- LINE_NO_UV
- }
- read(s->fd,src,s->width*2);
- }
- /*
- * Do last two lines
- */
- for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
- LINE_WITH_UV
- }
- read(s->fd,src,s->width*2);
- for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16) {
- LINE_NO_UV
- }
- /* drop second field */
- while (read(s->fd,src,s->width*2) < 0) {
- usleep(100);
- }
- for (h = 0; h < s->height - 1; h++) {
- read(s->fd,src,s->width*2);
- }
- } else if (s->halfw == 1) {
-#ifdef HAVE_MMX
- mmx_t rounder;
- mmx_t masker;
- rounder.uw[0]=1;
- rounder.uw[1]=1;
- rounder.uw[2]=1;
- rounder.uw[3]=1;
- masker.ub[0]=0xff;
- masker.ub[1]=0;
- masker.ub[2]=0xff;
- masker.ub[3]=0;
- masker.ub[4]=0xff;
- masker.ub[5]=0;
- masker.ub[6]=0xff;
- masker.ub[7]=0;
- pxor_r2r(mm7,mm7);
- movq_m2r(rounder,mm6);
-#endif
- while (read(s->fd,src,s->width*4) < 0) {
- usleep(100);
- }
- ptrend = &src[s->width*4];
- for (h = 0; h < s->height-2; h+=2) {
- for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=8, cb+=4, cr+=4) {
- LINE_WITHUV_AVG
- }
- read(s->fd,src,s->width*4);
-#ifdef HAVE_MMX
- movq_m2r(masker,mm5);
-#endif
- for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=8) {
- LINE_NOUV_AVG
- }
- read(s->fd,src,s->width*4);
- }
- /*
- * Do last two lines
- */
- for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=8, cb+=4, cr+=4) {
- LINE_WITHUV_AVG
- }
- read(s->fd,src,s->width*4);
-#ifdef HAVE_MMX
- movq_m2r(masker,mm5);
-#endif
- for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=8) {
- LINE_NOUV_AVG
- }
- /* drop second field */
- while (read(s->fd,src,s->width*4) < 0) {
- usleep(100);
- }
- for (h = 0; h < s->height - 1; h++) {
- read(s->fd,src,s->width*4);
- }
- } else {
- uint8_t *lum_m1, *lum_m2, *lum_m3, *lum_m4;
-#ifdef HAVE_MMX
- mmx_t rounder;
- rounder.uw[0]=4;
- rounder.uw[1]=4;
- rounder.uw[2]=4;
- rounder.uw[3]=4;
- movq_m2r(rounder,mm6);
- pxor_r2r(mm7,mm7);
-#else
- uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
-#endif
-
- /* read two fields and deinterlace them */
- while (read(s->fd,src,s->width*2) < 0) {
- usleep(100);
- }
- for (h = 0; h < (s->height/2)-2; h+=2) {
- for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
- LINE_WITH_UV
- }
- read(s->fd,src,s->width*2);
- /* skip a luminance line - will be filled in later */
- lum += s->width;
- for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
- LINE_WITH_UV
- }
- /* skip a luminance line - will be filled in later */
- lum += s->width;
- read(s->fd,src,s->width*2);
- }
- /*
- * Do last two lines
- */
- for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
- LINE_WITH_UV
- }
- /* skip a luminance line - will be filled in later */
- lum += s->width;
- read(s->fd,src,s->width*2);
- for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
- LINE_WITH_UV
- }
- /*
- *
- * SECOND FIELD
- *
- */
- lum=&data[s->width];
- while (read(s->fd,src,s->width*2) < 0) {
- usleep(10);
- }
- /* First (and last) two lines not interlaced */
- for (h = 0; h < 2; h++) {
- for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16) {
- LINE_NO_UV
- }
- read(s->fd,src,s->width*2);
- /* skip a luminance line */
- lum += s->width;
- }
- lum_m1=&lum[-s->width];
- lum_m2=&lum_m1[-s->width];
- lum_m3=&lum_m2[-s->width];
- memmove(s->lum_m4_mem,&lum_m3[-s->width],s->width);
- for (; h < (s->height/2)-1; h++) {
- lum_m4=s->lum_m4_mem;
- for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16,lum_m1+=16,lum_m2+=16,lum_m3+=16,lum_m4+=16) {
- LINE_NO_UV
-
- DEINT_LINE_LUM(0)
- DEINT_LINE_LUM(4)
- DEINT_LINE_LUM(8)
- DEINT_LINE_LUM(12)
- }
- read(s->fd,src,s->width*2);
- /* skip a luminance line */
- lum += s->width;
- lum_m1 += s->width;
- lum_m2 += s->width;
- lum_m3 += s->width;
- // lum_m4 += s->width;
- }
- /*
- * Do last line
- */
- lum_m4=s->lum_m4_mem;
- for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, lum_m1+=16, lum_m2+=16, lum_m3+=16, lum_m4+=16) {
- LINE_NO_UV
-
- DEINT_LINE_LUM(0)
- DEINT_LINE_LUM(4)
- DEINT_LINE_LUM(8)
- DEINT_LINE_LUM(12)
- }
- }
- emms_c();
- return s->frame_size;
-}
-
-static int aiw_close(VideoData *s)
-{
- av_freep(&s->lum_m4_mem);
- av_freep(&s->src_mem);
- return 0;
-}
diff --git a/libavformat/v4l2.c b/libavformat/v4l2.c
deleted file mode 100644
index d385d57c7c..0000000000
--- a/libavformat/v4l2.c
+++ /dev/null
@@ -1,643 +0,0 @@
-/*
- * Video4Linux2 grab interface
- * Copyright (c) 2000,2001 Fabrice Bellard.
- * Copyright (c) 2006 Luca Abeni.
- *
- * Part of this file is based on the V4L2 video capture example
- * (http://v4l2spec.bytesex.org/v4l2spec/capture.c)
- *
- * Thanks to Michael Niedermayer for providing the mapping between
- * V4L2_PIX_FMT_* and PIX_FMT_*
- *
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "avformat.h"
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/time.h>
-#include <asm/types.h>
-#include <linux/videodev2.h>
-#include <time.h>
-
-static const int desired_video_buffers = 256;
-
-enum io_method {
- io_read,
- io_mmap,
- io_userptr
-};
-
-struct video_data {
- int fd;
- int frame_format; /* V4L2_PIX_FMT_* */
- enum io_method io_method;
- int width, height;
- int frame_rate;
- int frame_rate_base;
- int frame_size;
- int top_field_first;
-
- int buffers;
- void **buf_start;
- unsigned int *buf_len;
-};
-
-struct buff_data {
- int index;
- int fd;
-};
-
-struct fmt_map {
- enum PixelFormat ff_fmt;
- int32_t v4l2_fmt;
-};
-
-static struct fmt_map fmt_conversion_table[] = {
- {
- .ff_fmt = PIX_FMT_YUV420P,
- .v4l2_fmt = V4L2_PIX_FMT_YUV420,
- },
- {
- .ff_fmt = PIX_FMT_YUV422P,
- .v4l2_fmt = V4L2_PIX_FMT_YUV422P,
- },
- {
- .ff_fmt = PIX_FMT_YUYV422,
- .v4l2_fmt = V4L2_PIX_FMT_YUYV,
- },
- {
- .ff_fmt = PIX_FMT_UYVY422,
- .v4l2_fmt = V4L2_PIX_FMT_UYVY,
- },
- {
- .ff_fmt = PIX_FMT_YUV411P,
- .v4l2_fmt = V4L2_PIX_FMT_YUV411P,
- },
- {
- .ff_fmt = PIX_FMT_YUV410P,
- .v4l2_fmt = V4L2_PIX_FMT_YUV410,
- },
- {
- .ff_fmt = PIX_FMT_BGR24,
- .v4l2_fmt = V4L2_PIX_FMT_BGR24,
- },
- {
- .ff_fmt = PIX_FMT_RGB24,
- .v4l2_fmt = V4L2_PIX_FMT_RGB24,
- },
- /*
- {
- .ff_fmt = PIX_FMT_RGB32,
- .v4l2_fmt = V4L2_PIX_FMT_BGR32,
- },
- */
- {
- .ff_fmt = PIX_FMT_GRAY8,
- .v4l2_fmt = V4L2_PIX_FMT_GREY,
- },
-};
-
-static int device_open(AVFormatContext *ctx, uint32_t *capabilities)
-{
- struct v4l2_capability cap;
- int fd;
- int res;
- int flags = O_RDWR;
-
- if (ctx->flags & AVFMT_FLAG_NONBLOCK) {
- flags |= O_NONBLOCK;
- }
- fd = open(ctx->filename, flags, 0);
- if (fd < 0) {
- av_log(ctx, AV_LOG_ERROR, "Cannot open video device %s : %s\n",
- ctx->filename, strerror(errno));
-
- return -1;
- }
-
- res = ioctl(fd, VIDIOC_QUERYCAP, &cap);
- // ENOIOCTLCMD definition only availble on __KERNEL__
- if (res < 0 && errno == 515)
- {
- av_log(ctx, AV_LOG_ERROR, "QUERYCAP not implemented, probably V4L device but not supporting V4L2\n");
- close(fd);
-
- return -1;
- }
- if (res < 0) {
- av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYCAP): %s\n",
- strerror(errno));
- close(fd);
-
- return -1;
- }
- if ((cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) {
- av_log(ctx, AV_LOG_ERROR, "Not a video capture device\n");
- close(fd);
-
- return -1;
- }
- *capabilities = cap.capabilities;
-
- return fd;
-}
-
-static int device_init(AVFormatContext *ctx, int *width, int *height, int pix_fmt)
-{
- struct video_data *s = ctx->priv_data;
- int fd = s->fd;
- struct v4l2_format fmt;
- int res;
-
- memset(&fmt, 0, sizeof(struct v4l2_format));
- fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- fmt.fmt.pix.width = *width;
- fmt.fmt.pix.height = *height;
- fmt.fmt.pix.pixelformat = pix_fmt;
- fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
- res = ioctl(fd, VIDIOC_S_FMT, &fmt);
- if ((*width != fmt.fmt.pix.width) || (*height != fmt.fmt.pix.height)) {
- av_log(ctx, AV_LOG_INFO, "The V4L2 driver changed the video from %dx%d to %dx%d\n", *width, *height, fmt.fmt.pix.width, fmt.fmt.pix.height);
- *width = fmt.fmt.pix.width;
- *height = fmt.fmt.pix.height;
- }
-
- return res;
-}
-
-static int first_field(int fd)
-{
- int res;
- v4l2_std_id std;
-
- res = ioctl(fd, VIDIOC_G_STD, &std);
- if (res < 0) {
- return 0;
- }
- if (std & V4L2_STD_NTSC) {
- return 0;
- }
-
- return 1;
-}
-
-static uint32_t fmt_ff2v4l(enum PixelFormat pix_fmt)
-{
- int i;
-
- for (i = 0; i < sizeof(fmt_conversion_table) / sizeof(struct fmt_map); i++) {
- if (fmt_conversion_table[i].ff_fmt == pix_fmt) {
- return fmt_conversion_table[i].v4l2_fmt;
- }
- }
-
- return 0;
-}
-
-static enum PixelFormat fmt_v4l2ff(uint32_t pix_fmt)
-{
- int i;
-
- for (i = 0; i < sizeof(fmt_conversion_table) / sizeof(struct fmt_map); i++) {
- if (fmt_conversion_table[i].v4l2_fmt == pix_fmt) {
- return fmt_conversion_table[i].ff_fmt;
- }
- }
-
- return -1;
-}
-
-static int mmap_init(AVFormatContext *ctx)
-{
- struct video_data *s = ctx->priv_data;
- struct v4l2_requestbuffers req;
- int i, res;
-
- memset(&req, 0, sizeof(struct v4l2_requestbuffers));
- req.count = desired_video_buffers;
- req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- req.memory = V4L2_MEMORY_MMAP;
- res = ioctl (s->fd, VIDIOC_REQBUFS, &req);
- if (res < 0) {
- if (errno == EINVAL) {
- av_log(ctx, AV_LOG_ERROR, "Device does not support mmap\n");
- } else {
- av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_REQBUFS)\n");
- }
-
- return -1;
- }
-
- if (req.count < 2) {
- av_log(ctx, AV_LOG_ERROR, "Insufficient buffer memory\n");
-
- return -1;
- }
- s->buffers = req.count;
- s->buf_start = av_malloc(sizeof(void *) * s->buffers);
- if (s->buf_start == NULL) {
- av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer pointers\n");
-
- return -1;
- }
- s->buf_len = av_malloc(sizeof(unsigned int) * s->buffers);
- if (s->buf_len == NULL) {
- av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer sizes\n");
- av_free(s->buf_start);
-
- return -1;
- }
-
- for (i = 0; i < req.count; i++) {
- struct v4l2_buffer buf;
-
- memset(&buf, 0, sizeof(struct v4l2_buffer));
- buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf.memory = V4L2_MEMORY_MMAP;
- buf.index = i;
- res = ioctl (s->fd, VIDIOC_QUERYBUF, &buf);
- if (res < 0) {
- av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYBUF)\n");
-
- return -1;
- }
-
- s->buf_len[i] = buf.length;
- if (s->buf_len[i] < s->frame_size) {
- av_log(ctx, AV_LOG_ERROR, "Buffer len [%d] = %d != %d\n", i, s->buf_len[i], s->frame_size);
-
- return -1;
- }
- s->buf_start[i] = mmap (NULL, buf.length,
- PROT_READ | PROT_WRITE, MAP_SHARED, s->fd, buf.m.offset);
- if (s->buf_start[i] == MAP_FAILED) {
- av_log(ctx, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
-
- return -1;
- }
- }
-
- return 0;
-}
-
-static int read_init(AVFormatContext *ctx)
-{
- return -1;
-}
-
-static void mmap_release_buffer(AVPacket *pkt)
-{
- struct v4l2_buffer buf;
- int res, fd;
- struct buff_data *buf_descriptor = pkt->priv;
-
- memset(&buf, 0, sizeof(struct v4l2_buffer));
- buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf.memory = V4L2_MEMORY_MMAP;
- buf.index = buf_descriptor->index;
- fd = buf_descriptor->fd;
- av_free(buf_descriptor);
-
- res = ioctl (fd, VIDIOC_QBUF, &buf);
- if (res < 0) {
- av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF)\n");
- }
- pkt->data = NULL;
- pkt->size = 0;
-}
-
-static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt)
-{
- struct video_data *s = ctx->priv_data;
- struct v4l2_buffer buf;
- struct buff_data *buf_descriptor;
- int res;
-
- memset(&buf, 0, sizeof(struct v4l2_buffer));
- buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf.memory = V4L2_MEMORY_MMAP;
-
- /* FIXME: Some special treatment might be needed in case of loss of signal... */
- while ((res = ioctl(s->fd, VIDIOC_DQBUF, &buf)) < 0 && (errno == EINTR));
- if (res < 0) {
- if (errno == EAGAIN) {
- pkt->size = 0;
-
- return AVERROR(EAGAIN);
- }
- av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_DQBUF): %s\n", strerror(errno));
-
- return -1;
- }
- assert (buf.index < s->buffers);
- if (buf.bytesused != s->frame_size) {
- av_log(ctx, AV_LOG_ERROR, "The v4l2 frame is %d bytes, but %d bytes are expected\n", buf.bytesused, s->frame_size);
-
- return -1;
- }
-
- /* Image is at s->buff_start[buf.index] */
- pkt->data= s->buf_start[buf.index];
- pkt->size = buf.bytesused;
- pkt->pts = buf.timestamp.tv_sec * INT64_C(1000000) + buf.timestamp.tv_usec;
- pkt->destruct = mmap_release_buffer;
- buf_descriptor = av_malloc(sizeof(struct buff_data));
- if (buf_descriptor == NULL) {
- /* Something went wrong... Since av_malloc() failed, we cannot even
- * allocate a buffer for memcopying into it
- */
- av_log(ctx, AV_LOG_ERROR, "Failed to allocate a buffer descriptor\n");
- res = ioctl (s->fd, VIDIOC_QBUF, &buf);
-
- return -1;
- }
- buf_descriptor->fd = s->fd;
- buf_descriptor->index = buf.index;
- pkt->priv = buf_descriptor;
-
- return s->buf_len[buf.index];
-}
-
-static int read_frame(AVFormatContext *ctx, AVPacket *pkt)
-{
- return -1;
-}
-
-static int mmap_start(AVFormatContext *ctx)
-{
- struct video_data *s = ctx->priv_data;
- enum v4l2_buf_type type;
- int i, res;
-
- for (i = 0; i < s->buffers; i++) {
- struct v4l2_buffer buf;
-
- memset(&buf, 0, sizeof(struct v4l2_buffer));
- buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf.memory = V4L2_MEMORY_MMAP;
- buf.index = i;
-
- res = ioctl (s->fd, VIDIOC_QBUF, &buf);
- if (res < 0) {
- av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n", strerror(errno));
-
- return -1;
- }
- }
-
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- res = ioctl (s->fd, VIDIOC_STREAMON, &type);
- if (res < 0) {
- av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_STREAMON): %s\n", strerror(errno));
-
- return -1;
- }
-
- return 0;
-}
-
-static void mmap_close(struct video_data *s)
-{
- enum v4l2_buf_type type;
- int i;
-
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- /* We do not check for the result, because we could
- * not do anything about it anyway...
- */
- ioctl(s->fd, VIDIOC_STREAMOFF, &type);
- for (i = 0; i < s->buffers; i++) {
- munmap(s->buf_start[i], s->buf_len[i]);
- }
- av_free(s->buf_start);
- av_free(s->buf_len);
-}
-
-static int v4l2_set_parameters( AVFormatContext *s1, AVFormatParameters *ap )
-{
- struct video_data *s = s1->priv_data;
- struct v4l2_input input;
- struct v4l2_standard standard;
- int i;
-
- if(ap->channel>=0) {
- /* set tv video input */
- memset (&input, 0, sizeof (input));
- input.index = ap->channel;
- if(ioctl (s->fd, VIDIOC_ENUMINPUT, &input) < 0) {
- av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl enum input failed:\n");
- return AVERROR(EIO);
- }
-
- av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set input_id: %d, input: %s\n",
- ap->channel, input.name);
- if(ioctl (s->fd, VIDIOC_S_INPUT, &input.index) < 0 ) {
- av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl set input(%d) failed\n",
- ap->channel);
- return AVERROR(EIO);
- }
- }
-
- if(ap->standard) {
- av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set standard: %s\n",
- ap->standard );
- /* set tv standard */
- memset (&standard, 0, sizeof (standard));
- for(i=0;;i++) {
- standard.index = i;
- if (ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
- av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl set standard(%s) failed\n",
- ap->standard);
- return AVERROR(EIO);
- }
-
- if(!strcasecmp(standard.name, ap->standard)) {
- break;
- }
- }
-
- av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set standard: %s, id: %"PRIu64"\n",
- ap->standard, standard.id);
- if (ioctl(s->fd, VIDIOC_S_STD, &standard.id) < 0) {
- av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl set standard(%s) failed\n",
- ap->standard);
- return AVERROR(EIO);
- }
- }
-
- return 0;
-}
-
-static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
-{
- struct video_data *s = s1->priv_data;
- AVStream *st;
- int width, height;
- int res, frame_rate, frame_rate_base;
- uint32_t desired_format, capabilities;
-
- if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
- av_log(s1, AV_LOG_ERROR, "Missing/Wrong parameters\n");
-
- return -1;
- }
-
- width = ap->width;
- height = ap->height;
- frame_rate = ap->time_base.den;
- frame_rate_base = ap->time_base.num;
-
- if((unsigned)width > 32767 || (unsigned)height > 32767) {
- av_log(s1, AV_LOG_ERROR, "Wrong size %dx%d\n", width, height);
-
- return -1;
- }
-
- st = av_new_stream(s1, 0);
- if (!st) {
- return AVERROR(ENOMEM);
- }
- av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
-
- s->width = width;
- s->height = height;
- s->frame_rate = frame_rate;
- s->frame_rate_base = frame_rate_base;
-
- capabilities = 0;
- s->fd = device_open(s1, &capabilities);
- if (s->fd < 0) {
- av_free(st);
-
- return AVERROR(EIO);
- }
- av_log(s1, AV_LOG_INFO, "[%d]Capabilities: %x\n", s->fd, capabilities);
-
- desired_format = fmt_ff2v4l(ap->pix_fmt);
- if (desired_format == 0 || (device_init(s1, &width, &height, desired_format) < 0)) {
- int i, done;
-
- done = 0; i = 0;
- while (!done) {
- desired_format = fmt_conversion_table[i].v4l2_fmt;
- if (device_init(s1, &width, &height, desired_format) < 0) {
- desired_format = 0;
- i++;
- } else {
- done = 1;
- }
- if (i == sizeof(fmt_conversion_table) / sizeof(struct fmt_map)) {
- done = 1;
- }
- }
- }
- if (desired_format == 0) {
- av_log(s1, AV_LOG_ERROR, "Cannot find a proper format.\n");
- close(s->fd);
- av_free(st);
-
- return AVERROR(EIO);
- }
- s->frame_format = desired_format;
-
- if( v4l2_set_parameters( s1, ap ) < 0 )
- return AVERROR(EIO);
-
- st->codec->pix_fmt = fmt_v4l2ff(desired_format);
- s->frame_size = avpicture_get_size(st->codec->pix_fmt, width, height);
- if (capabilities & V4L2_CAP_STREAMING) {
- s->io_method = io_mmap;
- res = mmap_init(s1);
- if (res == 0) {
- res = mmap_start(s1);
- }
- } else {
- s->io_method = io_read;
- res = read_init(s1);
- }
- if (res < 0) {
- close(s->fd);
- av_free(st);
-
- return AVERROR(EIO);
- }
- s->top_field_first = first_field(s->fd);
-
- st->codec->codec_type = CODEC_TYPE_VIDEO;
- st->codec->codec_id = CODEC_ID_RAWVIDEO;
- st->codec->width = width;
- st->codec->height = height;
- st->codec->time_base.den = frame_rate;
- st->codec->time_base.num = frame_rate_base;
- st->codec->bit_rate = s->frame_size * 1/av_q2d(st->codec->time_base) * 8;
-
- return 0;
-}
-
-static int v4l2_read_packet(AVFormatContext *s1, AVPacket *pkt)
-{
- struct video_data *s = s1->priv_data;
- int res;
-
- if (s->io_method == io_mmap) {
- av_init_packet(pkt);
- res = mmap_read_frame(s1, pkt);
- } else if (s->io_method == io_read) {
- if (av_new_packet(pkt, s->frame_size) < 0)
- return AVERROR(EIO);
-
- res = read_frame(s1, pkt);
- } else {
- return AVERROR(EIO);
- }
- if (res < 0) {
- return res;
- }
-
- if (s1->streams[0]->codec->coded_frame) {
- s1->streams[0]->codec->coded_frame->interlaced_frame = 1;
- s1->streams[0]->codec->coded_frame->top_field_first = s->top_field_first;
- }
-
- return s->frame_size;
-}
-
-static int v4l2_read_close(AVFormatContext *s1)
-{
- struct video_data *s = s1->priv_data;
-
- if (s->io_method == io_mmap) {
- mmap_close(s);
- }
-
- close(s->fd);
- return 0;
-}
-
-AVInputFormat v4l2_demuxer = {
- "video4linux2",
- "video grab",
- sizeof(struct video_data),
- NULL,
- v4l2_read_header,
- v4l2_read_packet,
- v4l2_read_close,
- .flags = AVFMT_NOFILE,
-};
diff --git a/libavformat/x11grab.c b/libavformat/x11grab.c
deleted file mode 100644
index 61c14355aa..0000000000
--- a/libavformat/x11grab.c
+++ /dev/null
@@ -1,529 +0,0 @@
-/*
- * X11 video grab interface
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg integration:
- * Copyright (C) 2006 Clemens Fruhwirth <clemens@endorphin.org>
- * Edouard Gomez <ed.gomez@free.fr>
- *
- * This file contains code from grab.c:
- * Copyright (c) 2000-2001 Fabrice Bellard
- *
- * This file contains code from the xvidcap project:
- * Copyright (C) 1997-1998 Rasca, Berlin
- * 2003-2004 Karl H. Beckers, Frankfurt
- *
- * FFmpeg is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file x11grab.c
- * X11 frame device demuxer by Clemens Fruhwirth <clemens@endorphin.org>
- * and Edouard Gomez <ed.gomez@free.fr>.
- */
-
-#include "avformat.h"
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/time.h>
-#define _LINUX_TIME_H 1
-#include <time.h>
-#include <X11/X.h>
-#include <X11/Xlib.h>
-#include <X11/Xlibint.h>
-#include <X11/Xproto.h>
-#include <X11/Xutil.h>
-#include <sys/ipc.h>
-#include <sys/shm.h>
-#include <X11/extensions/XShm.h>
-
-/**
- * X11 Device Demuxer context
- */
-typedef struct x11_grab_s
-{
- int frame_size; /**< Size in bytes of a grabbed frame */
- AVRational time_base; /**< Time base */
- int64_t time_frame; /**< Current time */
-
- int height; /**< Height of the grab frame */
- int width; /**< Width of the grab frame */
- int x_off; /**< Horizontal top-left corner coordinate */
- int y_off; /**< Vertical top-left corner coordinate */
-
- Display *dpy; /**< X11 display from which x11grab grabs frames */
- XImage *image; /**< X11 image holding the grab */
- int use_shm; /**< !0 when using XShm extension */
- XShmSegmentInfo shminfo; /**< When using XShm, keeps track of XShm infos */
- int mouse_warning_shown;
-} x11_grab_t;
-
-/**
- * Initializes the x11 grab device demuxer (public device demuxer API).
- *
- * @param s1 Context from avformat core
- * @param ap Parameters from avformat core
- * @return <ul>
- * <li>AVERROR(ENOMEM) no memory left</li>
- * <li>AVERROR(EIO) other failure case</li>
- * <li>0 success</li>
- * </ul>
- */
-static int
-x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
-{
- x11_grab_t *x11grab = s1->priv_data;
- Display *dpy;
- AVStream *st = NULL;
- int input_pixfmt;
- XImage *image;
- int x_off = 0;
- int y_off = 0;
- int use_shm;
- char *param, *offset;
-
- param = av_strdup(s1->filename);
- offset = strchr(param, '+');
- if (offset) {
- sscanf(offset, "%d,%d", &x_off, &y_off);
- *offset= 0;
- }
-
- av_log(s1, AV_LOG_INFO, "device: %s -> display: %s x: %d y: %d width: %d height: %d\n", s1->filename, param, x_off, y_off, ap->width, ap->height);
-
- dpy = XOpenDisplay(param);
- if(!dpy) {
- av_log(s1, AV_LOG_ERROR, "Could not open X display.\n");
- return AVERROR(EIO);
- }
-
- if (!ap || ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
- av_log(s1, AV_LOG_ERROR, "AVParameters don't have any video size. Use -s.\n");
- return AVERROR(EIO);
- }
-
- st = av_new_stream(s1, 0);
- if (!st) {
- return AVERROR(ENOMEM);
- }
- av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
-
- use_shm = XShmQueryExtension(dpy);
- av_log(s1, AV_LOG_INFO, "shared memory extension %s found\n", use_shm ? "" : "not");
-
- if(use_shm) {
- int scr = XDefaultScreen(dpy);
- image = XShmCreateImage(dpy,
- DefaultVisual(dpy, scr),
- DefaultDepth(dpy, scr),
- ZPixmap,
- NULL,
- &x11grab->shminfo,
- ap->width, ap->height);
- x11grab->shminfo.shmid = shmget(IPC_PRIVATE,
- image->bytes_per_line * image->height,
- IPC_CREAT|0777);
- if (x11grab->shminfo.shmid == -1) {
- av_log(s1, AV_LOG_ERROR, "Fatal: Can't get shared memory!\n");
- return AVERROR(ENOMEM);
- }
- x11grab->shminfo.shmaddr = image->data = shmat(x11grab->shminfo.shmid, 0, 0);
- x11grab->shminfo.readOnly = False;
-
- if (!XShmAttach(dpy, &x11grab->shminfo)) {
- av_log(s1, AV_LOG_ERROR, "Fatal: Failed to attach shared memory!\n");
- /* needs some better error subroutine :) */
- return AVERROR(EIO);
- }
- } else {
- image = XGetImage(dpy, RootWindow(dpy, DefaultScreen(dpy)),
- x_off,y_off,
- ap->width,ap->height,
- AllPlanes, ZPixmap);
- }
-
- switch (image->bits_per_pixel) {
- case 8:
- av_log (s1, AV_LOG_DEBUG, "8 bit palette\n");
- input_pixfmt = PIX_FMT_PAL8;
- break;
- case 16:
- if ( image->red_mask == 0xf800 &&
- image->green_mask == 0x07e0 &&
- image->blue_mask == 0x001f ) {
- av_log (s1, AV_LOG_DEBUG, "16 bit RGB565\n");
- input_pixfmt = PIX_FMT_RGB565;
- } else if (image->red_mask == 0x7c00 &&
- image->green_mask == 0x03e0 &&
- image->blue_mask == 0x001f ) {
- av_log(s1, AV_LOG_DEBUG, "16 bit RGB555\n");
- input_pixfmt = PIX_FMT_RGB555;
- } else {
- av_log(s1, AV_LOG_ERROR, "RGB ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel);
- av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask);
- return AVERROR(EIO);
- }
- break;
- case 24:
- if ( image->red_mask == 0xff0000 &&
- image->green_mask == 0x00ff00 &&
- image->blue_mask == 0x0000ff ) {
- input_pixfmt = PIX_FMT_BGR24;
- } else if ( image->red_mask == 0x0000ff &&
- image->green_mask == 0x00ff00 &&
- image->blue_mask == 0xff0000 ) {
- input_pixfmt = PIX_FMT_RGB24;
- } else {
- av_log(s1, AV_LOG_ERROR,"rgb ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel);
- av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask);
- return AVERROR(EIO);
- }
- break;
- case 32:
-#if 0
- GetColorInfo (image, &c_info);
- if ( c_info.alpha_mask == 0xff000000 && image->green_mask == 0x0000ff00) {
- /* byte order is relevant here, not endianness
- * endianness is handled by avcodec, but atm no such thing
- * as having ABGR, instead of ARGB in a word. Since we
- * need this for Solaris/SPARC, but need to do the conversion
- * for every frame we do it outside of this loop, cf. below
- * this matches both ARGB32 and ABGR32 */
- input_pixfmt = PIX_FMT_ARGB32;
- } else {
- av_log(s1, AV_LOG_ERROR,"image depth %i not supported ... aborting\n", image->bits_per_pixel);
- return AVERROR(EIO);
- }
-#endif
- input_pixfmt = PIX_FMT_RGB32;
- break;
- default:
- av_log(s1, AV_LOG_ERROR, "image depth %i not supported ... aborting\n", image->bits_per_pixel);
- return -1;
- }
-
- x11grab->frame_size = ap->width * ap->height * image->bits_per_pixel/8;
- x11grab->dpy = dpy;
- x11grab->width = ap->width;
- x11grab->height = ap->height;
- x11grab->time_base = ap->time_base;
- x11grab->time_frame = av_gettime() / av_q2d(ap->time_base);
- x11grab->x_off = x_off;
- x11grab->y_off = y_off;
- x11grab->image = image;
- x11grab->use_shm = use_shm;
- x11grab->mouse_warning_shown = 0;
-
- st->codec->codec_type = CODEC_TYPE_VIDEO;
- st->codec->codec_id = CODEC_ID_RAWVIDEO;
- st->codec->width = ap->width;
- st->codec->height = ap->height;
- st->codec->pix_fmt = input_pixfmt;
- st->codec->time_base = ap->time_base;
- st->codec->bit_rate = x11grab->frame_size * 1/av_q2d(ap->time_base) * 8;
-
- return 0;
-}
-
-/**
- * Get pointer coordinates from X11.
- *
- * @param x Integer where horizontal coordinate will be returned
- * @param y Integer where vertical coordinate will be returned
- * @param dpy X11 display from where pointer coordinates are retrieved
- * @param s1 Context used for logging errors if necessary
- */
-static void
-get_pointer_coordinates(int *x, int *y, Display *dpy, AVFormatContext *s1)
-{
- Window mrootwindow, childwindow;
- int dummy;
-
- mrootwindow = DefaultRootWindow(dpy);
-
- if (XQueryPointer(dpy, mrootwindow, &mrootwindow, &childwindow,
- x, y, &dummy, &dummy, (unsigned int*)&dummy)) {
- } else {
- x11_grab_t *s = s1->priv_data;
- if (!s->mouse_warning_shown) {
- av_log(s1, AV_LOG_INFO, "couldn't find mouse pointer\n");
- s->mouse_warning_shown = 1;
- }
- *x = -1;
- *y = -1;
- }
-}
-
-/**
- * Mouse painting helper function that applies an 'and' and 'or' mask pair to
- * '*dst' pixel. It actually draws a mouse pointer pixel to grabbed frame.
- *
- * @param dst Destination pixel
- * @param and Part of the mask that must be applied using a bitwise 'and'
- * operator
- * @param or Part of the mask that must be applied using a bitwise 'or'
- * operator
- * @param bits_per_pixel Bits per pixel used in the grabbed image
- */
-static void inline
-apply_masks(uint8_t *dst, int and, int or, int bits_per_pixel)
-{
- switch (bits_per_pixel) {
- case 32:
- *(uint32_t*)dst = (*(uint32_t*)dst & and) | or;
- break;
- case 16:
- *(uint16_t*)dst = (*(uint16_t*)dst & and) | or;
- break;
- case 8:
- *dst = !!or;
- break;
- }
-}
-
-/**
- * Paints a mouse pointer in an X11 image.
- *
- * @param image Image where to paint the mouse pointer
- * @param s context used to retrieve original grabbing rectangle
- * coordinates
- * @param x Mouse pointer coordinate
- * @param y Mouse pointer coordinate
- */
-static void
-paint_mouse_pointer(XImage *image, x11_grab_t *s, int x, int y)
-{
- /* 16x20x1bpp bitmap for the black channel of the mouse pointer */
- static const uint16_t const mousePointerBlack[] =
- {
- 0x0000, 0x0003, 0x0005, 0x0009, 0x0011,
- 0x0021, 0x0041, 0x0081, 0x0101, 0x0201,
- 0x03c1, 0x0049, 0x0095, 0x0093, 0x0120,
- 0x0120, 0x0240, 0x0240, 0x0380, 0x0000
- };
-
- /* 16x20x1bpp bitmap for the white channel of the mouse pointer */
- static const uint16_t const mousePointerWhite[] =
- {
- 0x0000, 0x0000, 0x0002, 0x0006, 0x000e,
- 0x001e, 0x003e, 0x007e, 0x00fe, 0x01fe,
- 0x003e, 0x0036, 0x0062, 0x0060, 0x00c0,
- 0x00c0, 0x0180, 0x0180, 0x0000, 0x0000
- };
-
- int x_off = s->x_off;
- int y_off = s->y_off;
- int width = s->width;
- int height = s->height;
-
- if ( x - x_off >= 0 && x < width + x_off
- && y - y_off >= 0 && y < height + y_off) {
- uint8_t *im_data = (uint8_t*)image->data;
- int bytes_per_pixel;
- int line;
- int masks;
-
- /* Select correct masks and pixel size */
- if (image->bits_per_pixel == 8) {
- masks = 1;
- } else {
- masks = (image->red_mask|image->green_mask|image->blue_mask);
- }
- bytes_per_pixel = image->bits_per_pixel>>3;
-
- /* Shift to right line */
- im_data += image->bytes_per_line * (y - y_off);
- /* Shift to right pixel in the line */
- im_data += bytes_per_pixel * (x - x_off);
-
- /* Draw the cursor - proper loop */
- for (line = 0; line < FFMIN(20, (y_off + height) - y); line++) {
- uint8_t *cursor = im_data;
- int column;
- uint16_t bm_b;
- uint16_t bm_w;
-
- bm_b = mousePointerBlack[line];
- bm_w = mousePointerWhite[line];
-
- for (column = 0; column < FFMIN(16, (x_off + width) - x); column++) {
- apply_masks(cursor, ~(masks*(bm_b&1)), masks*(bm_w&1),
- image->bits_per_pixel);
- cursor += bytes_per_pixel;
- bm_b >>= 1;
- bm_w >>= 1;
- }
- im_data += image->bytes_per_line;
- }
- }
-}
-
-
-/**
- * Reads new data in the image structure.
- *
- * @param dpy X11 display to grab from
- * @param d
- * @param image Image where the grab will be put
- * @param x Top-Left grabbing rectangle horizontal coordinate
- * @param y Top-Left grabbing rectangle vertical coordinate
- * @return 0 if error, !0 if successful
- */
-static int
-xget_zpixmap(Display *dpy, Drawable d, XImage *image, int x, int y)
-{
- xGetImageReply rep;
- xGetImageReq *req;
- long nbytes;
-
- if (!image) {
- return 0;
- }
-
- LockDisplay(dpy);
- GetReq(GetImage, req);
-
- /* First set up the standard stuff in the request */
- req->drawable = d;
- req->x = x;
- req->y = y;
- req->width = image->width;
- req->height = image->height;
- req->planeMask = (unsigned int)AllPlanes;
- req->format = ZPixmap;
-
- if (!_XReply(dpy, (xReply *)&rep, 0, xFalse) || !rep.length) {
- UnlockDisplay(dpy);
- SyncHandle();
- return 0;
- }
-
- nbytes = (long)rep.length << 2;
- _XReadPad(dpy, image->data, nbytes);
-
- UnlockDisplay(dpy);
- SyncHandle();
- return 1;
-}
-
-/**
- * Grabs a frame from x11 (public device demuxer API).
- *
- * @param s1 Context from avformat core
- * @param pkt Packet holding the brabbed frame
- * @return frame size in bytes
- */
-static int
-x11grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
-{
- x11_grab_t *s = s1->priv_data;
- Display *dpy = s->dpy;
- XImage *image = s->image;
- int x_off = s->x_off;
- int y_off = s->y_off;
-
- int64_t curtime, delay;
- struct timespec ts;
-
- /* Calculate the time of the next frame */
- s->time_frame += INT64_C(1000000);
-
- /* wait based on the frame rate */
- for(;;) {
- curtime = av_gettime();
- delay = s->time_frame * av_q2d(s->time_base) - curtime;
- if (delay <= 0) {
- if (delay < INT64_C(-1000000) * av_q2d(s->time_base)) {
- s->time_frame += INT64_C(1000000);
- }
- break;
- }
- ts.tv_sec = delay / 1000000;
- ts.tv_nsec = (delay % 1000000) * 1000;
- nanosleep(&ts, NULL);
- }
-
- if (av_new_packet(pkt, s->frame_size) < 0) {
- return AVERROR(EIO);
- }
-
- pkt->pts = curtime;
-
- if(s->use_shm) {
- if (!XShmGetImage(dpy, RootWindow(dpy, DefaultScreen(dpy)), image, x_off, y_off, AllPlanes)) {
- av_log (s1, AV_LOG_INFO, "XShmGetImage() failed\n");
- }
- } else {
- if (!xget_zpixmap(dpy, RootWindow(dpy, DefaultScreen(dpy)), image, x_off, y_off)) {
- av_log (s1, AV_LOG_INFO, "XGetZPixmap() failed\n");
- }
- }
-
- {
- int pointer_x, pointer_y;
- get_pointer_coordinates(&pointer_x, &pointer_y, dpy, s1);
- paint_mouse_pointer(image, s, pointer_x, pointer_y);
- }
-
-
- /* XXX: avoid memcpy */
- memcpy(pkt->data, image->data, s->frame_size);
- return s->frame_size;
-}
-
-/**
- * Closes x11 frame grabber (public device demuxer API).
- *
- * @param s1 Context from avformat core
- * @return 0 success, !0 failure
- */
-static int
-x11grab_read_close(AVFormatContext *s1)
-{
- x11_grab_t *x11grab = s1->priv_data;
-
- /* Detach cleanly from shared mem */
- if (x11grab->use_shm) {
- XShmDetach(x11grab->dpy, &x11grab->shminfo);
- shmdt(x11grab->shminfo.shmaddr);
- shmctl(x11grab->shminfo.shmid, IPC_RMID, NULL);
- }
-
- /* Destroy X11 image */
- if (x11grab->image) {
- XDestroyImage(x11grab->image);
- x11grab->image = NULL;
- }
-
- /* Free X11 display */
- XCloseDisplay(x11grab->dpy);
- return 0;
-}
-
-/** x11 grabber device demuxer declaration */
-AVInputFormat x11_grab_device_demuxer =
-{
- "x11grab",
- "X11grab",
- sizeof(x11_grab_t),
- NULL,
- x11grab_read_header,
- x11grab_read_packet,
- x11grab_read_close,
- .flags = AVFMT_NOFILE,
-};