summaryrefslogtreecommitdiff
path: root/libavformat
diff options
context:
space:
mode:
Diffstat (limited to 'libavformat')
-rw-r--r--libavformat/4xm.c8
-rw-r--r--libavformat/Makefile12
-rw-r--r--libavformat/allformats.c4
-rw-r--r--libavformat/amr.c38
-rw-r--r--libavformat/asf-enc.c36
-rw-r--r--libavformat/asf.c30
-rw-r--r--libavformat/asf.h2
-rw-r--r--libavformat/au.c6
-rw-r--r--libavformat/audio.c8
-rw-r--r--libavformat/avformat.h56
-rw-r--r--libavformat/avi.h2
-rw-r--r--libavformat/avidec.c82
-rw-r--r--libavformat/avienc.c142
-rw-r--r--libavformat/avio.c14
-rw-r--r--libavformat/avio.h2
-rw-r--r--libavformat/aviobuf.c62
-rw-r--r--libavformat/barpainet.c6
-rw-r--r--libavformat/crc.c2
-rw-r--r--libavformat/cutils.c12
-rw-r--r--libavformat/dc1394.c32
-rw-r--r--libavformat/dv.c196
-rw-r--r--libavformat/dv.h4
-rw-r--r--libavformat/dv1394.c14
-rw-r--r--libavformat/dv1394.h34
-rw-r--r--libavformat/electronicarts.c4
-rw-r--r--libavformat/ffm.c14
-rw-r--r--libavformat/file.c2
-rw-r--r--libavformat/flic.c6
-rw-r--r--libavformat/flvdec.c18
-rw-r--r--libavformat/flvenc.c12
-rw-r--r--libavformat/framehook.h2
-rw-r--r--libavformat/gif.c32
-rw-r--r--libavformat/gifdec.c24
-rw-r--r--libavformat/grab.c32
-rw-r--r--libavformat/grab_bktr.c4
-rw-r--r--libavformat/http.c26
-rw-r--r--libavformat/idcin.c2
-rw-r--r--libavformat/idroq.c14
-rw-r--r--libavformat/img.c24
-rw-r--r--libavformat/img2.c32
-rw-r--r--libavformat/ipmovie.c8
-rw-r--r--libavformat/jpeg.c12
-rw-r--r--libavformat/matroska.c8
-rw-r--r--libavformat/mmf.c8
-rw-r--r--libavformat/mov.c150
-rw-r--r--libavformat/movenc.c72
-rw-r--r--libavformat/mp3.c12
-rw-r--r--libavformat/mpeg.c174
-rw-r--r--libavformat/mpegts.c94
-rw-r--r--libavformat/mpegts.h2
-rw-r--r--libavformat/mpegtsenc.c58
-rw-r--r--libavformat/nsvdec.c80
-rw-r--r--libavformat/nut.c156
-rw-r--r--libavformat/ogg.c38
-rw-r--r--libavformat/ogg2.c6
-rw-r--r--libavformat/oggparseflac.c6
-rw-r--r--libavformat/oggparsetheora.c2
-rw-r--r--libavformat/oggparsevorbis.c4
-rw-r--r--libavformat/os_support.c2
-rw-r--r--libavformat/png.c74
-rw-r--r--libavformat/pnm.c42
-rw-r--r--libavformat/psxstr.c16
-rw-r--r--libavformat/qtpalette.h4
-rw-r--r--libavformat/raw.c48
-rw-r--r--libavformat/rm.c84
-rw-r--r--libavformat/rtp.c44
-rw-r--r--libavformat/rtp.h2
-rw-r--r--libavformat/rtpproto.c20
-rw-r--r--libavformat/rtsp.c104
-rw-r--r--libavformat/rtsp.h4
-rw-r--r--libavformat/segafilm.c12
-rw-r--r--libavformat/sgi.c70
-rw-r--r--libavformat/sierravmd.c16
-rw-r--r--libavformat/sol.c14
-rw-r--r--libavformat/swf.c112
-rw-r--r--libavformat/tcp.c12
-rw-r--r--libavformat/udp.c46
-rw-r--r--libavformat/utils.c302
-rw-r--r--libavformat/wav.c16
-rw-r--r--libavformat/wc3movie.c42
-rw-r--r--libavformat/westwood.c4
-rw-r--r--libavformat/yuv.c18
-rw-r--r--libavformat/yuv4mpeg.c38
83 files changed, 1544 insertions, 1544 deletions
diff --git a/libavformat/4xm.c b/libavformat/4xm.c
index 6826511fcb..9ff496e7b3 100644
--- a/libavformat/4xm.c
+++ b/libavformat/4xm.c
@@ -163,7 +163,7 @@ static int fourxm_read_header(AVFormatContext *s,
fourxm->track_count = current_track + 1;
if((unsigned)fourxm->track_count >= UINT_MAX / sizeof(AudioTrack))
return -1;
- fourxm->tracks = av_realloc(fourxm->tracks,
+ fourxm->tracks = av_realloc(fourxm->tracks,
fourxm->track_count * sizeof(AudioTrack));
if (!fourxm->tracks) {
av_free(header);
@@ -277,7 +277,7 @@ static int fourxm_read_packet(AVFormatContext *s,
ret= av_get_packet(&s->pb, pkt, size);
if(ret<0)
return AVERROR_IO;
- pkt->stream_index =
+ pkt->stream_index =
fourxm->tracks[fourxm->selected_track].stream_index;
pkt->pts = fourxm->audio_pts;
packet_read = 1;
@@ -285,13 +285,13 @@ static int fourxm_read_packet(AVFormatContext *s,
/* pts accounting */
audio_frame_count = size;
if (fourxm->tracks[fourxm->selected_track].adpcm)
- audio_frame_count -=
+ audio_frame_count -=
2 * (fourxm->tracks[fourxm->selected_track].channels);
audio_frame_count /=
fourxm->tracks[fourxm->selected_track].channels;
if (fourxm->tracks[fourxm->selected_track].adpcm)
audio_frame_count *= 2;
- else
+ else
audio_frame_count /=
(fourxm->tracks[fourxm->selected_track].bits / 8);
fourxm->audio_pts += audio_frame_count;
diff --git a/libavformat/Makefile b/libavformat/Makefile
index 378c895db7..41927f7035 100644
--- a/libavformat/Makefile
+++ b/libavformat/Makefile
@@ -33,8 +33,8 @@ OBJS+= $(AMROBJS)
# image formats
OBJS+= pnm.o yuv.o png.o jpeg.o gifdec.o sgi.o
# file I/O
-OBJS+= avio.o aviobuf.o file.o
-OBJS+= framehook.o
+OBJS+= avio.o aviobuf.o file.o
+OBJS+= framehook.o
ifeq ($(CONFIG_VIDEO4LINUX),yes)
OBJS+= grab.o
@@ -53,7 +53,7 @@ OBJS+= dc1394.o
endif
ifeq ($(CONFIG_AUDIO_OSS),yes)
-OBJS+= audio.o
+OBJS+= audio.o
endif
EXTRALIBS += -L../libavutil -lavutil$(BUILDSUF)
@@ -135,13 +135,13 @@ install-headers:
install -m 644 ../libavformat.pc "$(libdir)/pkgconfig"
%.o: %.c
- $(CC) $(CFLAGS) $(LIBOBJFLAGS) -c -o $@ $<
+ $(CC) $(CFLAGS) $(LIBOBJFLAGS) -c -o $@ $<
# BeOS: remove -Wall to get rid of all the "multibyte constant" warnings
%.o: %.cpp
- g++ $(subst -Wall,,$(CFLAGS)) -c -o $@ $<
+ g++ $(subst -Wall,,$(CFLAGS)) -c -o $@ $<
-distclean clean:
+distclean clean:
rm -f *.o *.d .depend *~ *.a *$(SLIBSUF) $(LIB)
#
diff --git a/libavformat/allformats.c b/libavformat/allformats.c
index 6603a43d4d..a8c64532d5 100644
--- a/libavformat/allformats.c
+++ b/libavformat/allformats.c
@@ -27,7 +27,7 @@
void av_register_all(void)
{
static int inited = 0;
-
+
if (inited != 0)
return;
inited = 1;
@@ -126,7 +126,7 @@ void av_register_all(void)
#endif
av_register_image_format(&jpeg_image_format);
#endif
- av_register_image_format(&gif_image_format);
+ av_register_image_format(&gif_image_format);
// av_register_image_format(&sgi_image_format); heap corruption, dont enable
#endif //CONFIG_MUXERS
diff --git a/libavformat/amr.c b/libavformat/amr.c
index cbe8695330..b6eaeb6ffc 100644
--- a/libavformat/amr.c
+++ b/libavformat/amr.c
@@ -1,4 +1,4 @@
-/*
+/*
* amr file format
* Copyright (c) 2001 ffmpeg project
*
@@ -65,8 +65,8 @@ static int amr_write_trailer(AVFormatContext *s)
static int amr_probe(AVProbeData *p)
{
- //Only check for "#!AMR" which could be amr-wb, amr-nb.
- //This will also trigger multichannel files: "#!AMR_MC1.0\n" and
+ //Only check for "#!AMR" which could be amr-wb, amr-nb.
+ //This will also trigger multichannel files: "#!AMR_MC1.0\n" and
//"#!AMR-WB_MC1.0\n" (not supported)
if (p->buf_size < 5)
@@ -99,7 +99,7 @@ static int amr_read_header(AVFormatContext *s,
{
return AVERROR_NOMEM;
}
-
+
st->codec->codec_type = CODEC_TYPE_AUDIO;
st->codec->codec_tag = MKTAG('s', 'a', 'w', 'b');
st->codec->codec_id = CODEC_ID_AMR_WB;
@@ -113,7 +113,7 @@ static int amr_read_header(AVFormatContext *s,
{
return AVERROR_NOMEM;
}
-
+
st->codec->codec_type = CODEC_TYPE_AUDIO;
st->codec->codec_tag = MKTAG('s', 'a', 'm', 'r');
st->codec->codec_id = CODEC_ID_AMR_NB;
@@ -137,18 +137,18 @@ static int amr_read_packet(AVFormatContext *s,
uint8_t toc, q, ft;
int read;
int size;
-
+
if (url_feof(&s->pb))
{
return AVERROR_IO;
}
-
+
toc=get_byte(&s->pb);
q = (toc >> 2) & 0x01;
ft = (toc >> 3) & 0x0F;
-
+
size=packed_size[ft];
-
+
if (av_new_packet(pkt, size+1))
{
return AVERROR_IO;
@@ -156,15 +156,15 @@ static int amr_read_packet(AVFormatContext *s,
pkt->stream_index = 0;
pkt->pos= url_ftell(&s->pb);
pkt->data[0]=toc;
-
+
read = get_buffer(&s->pb, pkt->data+1, size);
-
+
if (read != size)
{
av_free_packet(pkt);
return AVERROR_IO;
}
-
+
return 0;
}
else if(enc->codec_id == CODEC_ID_AMR_WB)
@@ -173,33 +173,33 @@ static int amr_read_packet(AVFormatContext *s,
uint8_t toc, mode;
int read;
int size;
-
+
if (url_feof(&s->pb))
{
return AVERROR_IO;
}
-
+
toc=get_byte(&s->pb);
mode = (uint8_t)((toc >> 3) & 0x0F);
size = packed_size[mode];
-
+
if ( (size==0) || av_new_packet(pkt, size))
{
return AVERROR_IO;
}
-
+
pkt->stream_index = 0;
pkt->pos= url_ftell(&s->pb);
pkt->data[0]=toc;
-
+
read = get_buffer(&s->pb, pkt->data+1, size-1);
-
+
if (read != (size-1))
{
av_free_packet(pkt);
return AVERROR_IO;
}
-
+
return 0;
}
else
diff --git a/libavformat/asf-enc.c b/libavformat/asf-enc.c
index adbf89c88e..d2dd03d8b8 100644
--- a/libavformat/asf-enc.c
+++ b/libavformat/asf-enc.c
@@ -164,7 +164,7 @@
ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE + \
ASF_PAYLOAD_REPLICATED_DATA_LENGTH \
)
-
+
#define PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS (\
1 + /*Stream Number*/ \
ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE + \
@@ -357,7 +357,7 @@ static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data
asf->streams[n].num = n + 1;
asf->streams[n].seq = 0;
-
+
if (enc->codec_type == CODEC_TYPE_AUDIO) {
if (enc->codec_id == CODEC_ID_ADPCM_G726) {
er_spr = (uint8_t *)error_spread_ADPCM_G726;
@@ -443,8 +443,8 @@ static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data
put_le16(pb, asf->streams[n].num);
put_str16(pb, p ? p->name : enc->codec_name);
put_le16(pb, 0); /* no parameters */
-
-
+
+
/* id */
if (enc->codec_type == CODEC_TYPE_AUDIO) {
put_le16(pb, 2);
@@ -500,7 +500,7 @@ static int asf_write_header(AVFormatContext *s)
asf->packet_size = PACKET_SIZE;
asf->nb_packets = 0;
-
+
asf->last_indexed_pts = 0;
asf->index_ptr = (ASFIndex*)av_malloc( sizeof(ASFIndex) * ASF_INDEX_BLOCK );
asf->nb_index_memory_alloc = ASF_INDEX_BLOCK;
@@ -535,9 +535,9 @@ static int asf_write_stream_header(AVFormatContext *s)
static int put_payload_parsing_info(
AVFormatContext *s,
- unsigned int sendtime,
+ unsigned int sendtime,
unsigned int duration,
- int nb_payloads,
+ int nb_payloads,
int padsize
)
{
@@ -547,7 +547,7 @@ static int put_payload_parsing_info(
unsigned char *start_ppi_ptr = pb->buf_ptr;
int iLengthTypeFlags = ASF_PPI_LENGTH_TYPE_FLAGS;
-
+
put_byte(pb, ASF_PACKET_ERROR_CORRECTION_FLAGS);
for (i = 0; i < ASF_PACKET_ERROR_CORRECTION_DATA_SIZE; i++){
put_byte(pb, 0x0);
@@ -626,25 +626,25 @@ static void put_payload_header(
ASFContext *asf = s->priv_data;
ByteIOContext *pb = &asf->pb;
int val;
-
+
val = stream->num;
if (flags & PKT_FLAG_KEY)
val |= ASF_PL_FLAG_KEY_FRAME;
put_byte(pb, val);
-
+
put_byte(pb, stream->seq); //Media object number
put_le32(pb, m_obj_offset); //Offset Into Media Object
-
+
// Replicated Data shall be at least 8 bytes long.
- // The first 4 bytes of data shall contain the
+ // The first 4 bytes of data shall contain the
// Size of the Media Object that the payload belongs to.
- // The next 4 bytes of data shall contain the
+ // The next 4 bytes of data shall contain the
// Presentation Time for the media object that the payload belongs to.
put_byte(pb, ASF_PAYLOAD_REPLICATED_DATA_LENGTH);
put_le32(pb, m_obj_size); //Replicated Data - Media Object Size
put_le32(pb, presentation_time);//Replicated Data - Presentation Time
-
+
if (asf->multi_payloads_present){
put_le16(pb, payload_len); //payload length
}
@@ -667,7 +667,7 @@ static void put_frame(
payload_len = m_obj_size - m_obj_offset;
if (asf->packet_timestamp_start == -1) {
asf->multi_payloads_present = (payload_len < MULTI_PAYLOAD_CONSTANT);
-
+
if (asf->multi_payloads_present){
asf->packet_size_left = PACKET_SIZE; //For debug
asf->packet_size_left = PACKET_SIZE - PACKET_HEADER_MIN_SIZE - 1;
@@ -696,7 +696,7 @@ static void put_frame(
payload_len = frag_len1;
else if (payload_len == (frag_len1 - 1))
payload_len = frag_len1 - 2; //additional byte need to put padding length
-
+
put_payload_header(s, stream, timestamp+preroll_time, m_obj_size, m_obj_offset, payload_len, flags);
put_buffer(&asf->pb, buf, payload_len);
@@ -705,7 +705,7 @@ static void put_frame(
else
asf->packet_size_left -= (payload_len + PAYLOAD_HEADER_SIZE_SINGLE_PAYLOAD);
asf->packet_timestamp_end = timestamp;
-
+
asf->packet_nb_payloads++;
} else {
payload_len = 0;
@@ -727,7 +727,7 @@ static int asf_write_packet(AVFormatContext *s, AVPacket *pkt)
ASFStream *stream;
int64_t duration;
AVCodecContext *codec;
- int64_t packet_st,pts;
+ int64_t packet_st,pts;
int start_sec,i;
codec = s->streams[pkt->stream_index]->codec;
diff --git a/libavformat/asf.c b/libavformat/asf.c
index 7867713e1e..5822e5e123 100644
--- a/libavformat/asf.c
+++ b/libavformat/asf.c
@@ -25,7 +25,7 @@
#include <assert.h>
#define FRAME_HEADER_SIZE 17
-// Fix Me! FRAME_HEADER_SIZE may be different.
+// Fix Me! FRAME_HEADER_SIZE may be different.
static const GUID index_guid = {
0x33000890, 0xe5b1, 0x11cf, { 0x89, 0xf4, 0x00, 0xa0, 0xc9, 0x03, 0x49, 0xcb },
@@ -198,7 +198,7 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
goto fail;
st->priv_data = asf_st;
st->start_time = asf->hdr.preroll;
- st->duration = asf->hdr.send_time /
+ st->duration = asf->hdr.send_time /
(10000000 / 1000) - st->start_time;
get_guid(pb, &g);
if (!memcmp(&g, &audio_stream, sizeof(GUID))) {
@@ -424,9 +424,9 @@ static int asf_get_packet(AVFormatContext *s)
uint32_t packet_length, padsize;
int rsize = 9;
int c;
-
+
assert((url_ftell(&s->pb) - s->data_offset) % asf->packet_size == 0);
-
+
c = get_byte(pb);
if (c != 0x82) {
if (!url_feof(pb))
@@ -607,12 +607,12 @@ static int asf_read_packet(AVFormatContext *s, AVPacket *pkt)
asf_st->seq = asf->packet_seq;
asf_st->pkt.pts = asf->packet_frag_timestamp;
asf_st->pkt.stream_index = asf->stream_index;
- asf_st->pkt.pos =
- asf_st->packet_pos= asf->packet_pos;
-//printf("new packet: stream:%d key:%d packet_key:%d audio:%d size:%d\n",
+ asf_st->pkt.pos =
+ asf_st->packet_pos= asf->packet_pos;
+//printf("new packet: stream:%d key:%d packet_key:%d audio:%d size:%d\n",
//asf->stream_index, asf->packet_key_frame, asf_st->pkt.flags & PKT_FLAG_KEY,
//s->streams[asf->stream_index]->codec->codec_type == CODEC_TYPE_AUDIO, asf->packet_obj_size);
- if (s->streams[asf->stream_index]->codec->codec_type == CODEC_TYPE_AUDIO)
+ if (s->streams[asf->stream_index]->codec->codec_type == CODEC_TYPE_AUDIO)
asf->packet_key_frame = 1;
if (asf->packet_key_frame)
asf_st->pkt.flags |= PKT_FLAG_KEY;
@@ -705,7 +705,7 @@ static void asf_reset_header(AVFormatContext *s)
asf->packet_obj_size = 0;
asf->packet_time_delta = 0;
asf->packet_time_start = 0;
-
+
for(i=0; i<s->nb_streams; i++){
asf_st= s->streams[i]->priv_data;
av_free_packet(&asf_st->pkt);
@@ -724,15 +724,15 @@ static int64_t asf_read_pts(AVFormatContext *s, int stream_index, int64_t *ppos,
int64_t pos= *ppos;
int i;
int64_t start_pos[s->nb_streams];
-
+
for(i=0; i<s->nb_streams; i++){
start_pos[i]= pos;
}
-
+
pos= (pos+asf->packet_size-1-s->data_offset)/asf->packet_size*asf->packet_size+ s->data_offset;
*ppos= pos;
url_fseek(&s->pb, pos, SEEK_SET);
-
+
//printf("asf_read_pts\n");
asf_reset_header(s);
for(;;){
@@ -740,7 +740,7 @@ static int64_t asf_read_pts(AVFormatContext *s, int stream_index, int64_t *ppos,
av_log(s, AV_LOG_INFO, "seek failed\n");
return AV_NOPTS_VALUE;
}
-
+
pts= pkt->pts * 1000 / AV_TIME_BASE;
av_free_packet(pkt);
@@ -754,7 +754,7 @@ static int64_t asf_read_pts(AVFormatContext *s, int stream_index, int64_t *ppos,
av_add_index_entry(s->streams[i], pos, pts, pos - start_pos[i] + 1, AVINDEX_KEYFRAME);
start_pos[i]= asf_st->packet_pos + 1;
-
+
if(pkt->stream_index == stream_index)
break;
}
@@ -769,7 +769,7 @@ static int64_t asf_read_pts(AVFormatContext *s, int stream_index, int64_t *ppos,
static int asf_read_seek(AVFormatContext *s, int stream_index, int64_t pts, int flags)
{
ASFContext *asf = s->priv_data;
-
+
if (asf->packet_size <= 0)
return -1;
diff --git a/libavformat/asf.h b/libavformat/asf.h
index 2cf976fa2c..58b5349898 100644
--- a/libavformat/asf.h
+++ b/libavformat/asf.h
@@ -31,7 +31,7 @@ typedef struct {
int ds_chunk_size;
int ds_data_size;
int ds_silence_data;
-
+
int packet_pos;
} ASFStream;
diff --git a/libavformat/au.c b/libavformat/au.c
index 055c59a22b..fbf4e98383 100644
--- a/libavformat/au.c
+++ b/libavformat/au.c
@@ -1,4 +1,4 @@
-/*
+/*
* AU encoder and decoder
* Copyright (c) 2001 Fabrice Bellard.
*
@@ -127,11 +127,11 @@ static int au_read_header(AVFormatContext *s,
return -1;
size = get_be32(pb); /* header size */
get_be32(pb); /* data size */
-
+
id = get_be32(pb);
rate = get_be32(pb);
channels = get_be32(pb);
-
+
codec = codec_get_id(codec_au_tags, id);
if (size >= 24) {
diff --git a/libavformat/audio.c b/libavformat/audio.c
index 054ced2266..826554bd02 100644
--- a/libavformat/audio.c
+++ b/libavformat/audio.c
@@ -87,7 +87,7 @@ static int audio_open(AudioData *s, int is_output, const char *audio_device)
/* select format : favour native format */
err = ioctl(audio_fd, SNDCTL_DSP_GETFMTS, &tmp);
-
+
#ifdef WORDS_BIGENDIAN
if (tmp & AFMT_S16_BE) {
tmp = AFMT_S16_BE;
@@ -123,7 +123,7 @@ static int audio_open(AudioData *s, int is_output, const char *audio_device)
perror("SNDCTL_DSP_SETFMT");
goto fail;
}
-
+
tmp = (s->channels == 2);
err = ioctl(audio_fd, SNDCTL_DSP_STEREO, &tmp);
if (err < 0) {
@@ -132,7 +132,7 @@ static int audio_open(AudioData *s, int is_output, const char *audio_device)
}
if (tmp)
s->channels = 2;
-
+
tmp = s->sample_rate;
err = ioctl(audio_fd, SNDCTL_DSP_SPEED, &tmp);
if (err < 0) {
@@ -249,7 +249,7 @@ static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
int ret, bdelay;
int64_t cur_time;
struct audio_buf_info abufi;
-
+
if (av_new_packet(pkt, s->frame_size) < 0)
return AVERROR_IO;
for(;;) {
diff --git a/libavformat/avformat.h b/libavformat/avformat.h
index b3c055694e..c9bda50f24 100644
--- a/libavformat/avformat.h
+++ b/libavformat/avformat.h
@@ -38,7 +38,7 @@ typedef struct AVPacket {
void (*destruct)(struct AVPacket *);
void *priv;
int64_t pos; ///< byte position in stream, -1 if unknown
-} AVPacket;
+} AVPacket;
#define PKT_FLAG_KEY 0x0001
void av_destruct_packet_nofree(AVPacket *pkt);
@@ -78,7 +78,7 @@ static inline void av_free_packet(AVPacket *pkt)
/* the exact value of the fractional number is: 'val + num / den'. num
is assumed to be such as 0 <= num < den */
typedef struct AVFrac {
- int64_t val, num, den;
+ int64_t val, num, den;
} AVFrac;
void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
@@ -121,7 +121,7 @@ typedef struct AVFormatParameters {
} AVFormatParameters;
#define AVFMT_NOFILE 0x0001 /* no file should be opened */
-#define AVFMT_NEEDNUMBER 0x0002 /* needs '%d' in filename */
+#define AVFMT_NEEDNUMBER 0x0002 /* needs '%d' in filename */
#define AVFMT_SHOW_IDS 0x0008 /* show format stream IDs numbers */
#define AVFMT_RAWPICTURE 0x0020 /* format wants AVPicture structure for
raw picture data */
@@ -169,14 +169,14 @@ typedef struct AVInputFormat {
/* close the stream. The AVFormatContext and AVStreams are not
freed by this function */
int (*read_close)(struct AVFormatContext *);
- /**
- * seek to a given timestamp relative to the frames in
+ /**
+ * seek to a given timestamp relative to the frames in
* stream component stream_index
* @param stream_index must not be -1
- * @param flags selects which direction should be preferred if no exact
+ * @param flags selects which direction should be preferred if no exact
* match is available
*/
- int (*read_seek)(struct AVFormatContext *,
+ int (*read_seek)(struct AVFormatContext *,
int stream_index, int64_t timestamp, int flags);
/**
* gets the next timestamp in AV_TIME_BASE units.
@@ -219,13 +219,13 @@ typedef struct AVStream {
AVCodecContext *codec; /* codec context */
/**
* real base frame rate of the stream.
- * for example if the timebase is 1/90000 and all frames have either
+ * for example if the timebase is 1/90000 and all frames have either
* approximately 3600 or 1800 timer ticks then r_frame_rate will be 50/1
*/
AVRational r_frame_rate;
void *priv_data;
/* internal data used in av_find_stream_info() */
- int64_t codec_info_duration;
+ int64_t codec_info_duration;
int codec_info_nb_frames;
/* encoding: PTS generation when outputing stream */
AVFrac pts;
@@ -244,10 +244,10 @@ typedef struct AVStream {
//FIXME move stuff to a flags field?
/* quality, as it has been removed from AVCodecContext and put in AVVideoFrame
* MN:dunno if thats the right place, for it */
- float quality;
+ float quality;
/* decoding: position of the first frame of the component, in
AV_TIME_BASE fractional seconds. */
- int64_t start_time;
+ int64_t start_time;
/* decoding: duration of the stream, in AV_TIME_BASE fractional
seconds. */
int64_t duration;
@@ -266,7 +266,7 @@ typedef struct AVStream {
support seeking natively */
int nb_index_entries;
int index_entries_allocated_size;
-
+
int64_t nb_frames; ///< number of frames in this stream if known or 0
} AVStream;
@@ -307,7 +307,7 @@ typedef struct AVFormatContext {
/* decoding: position of the first frame of the component, in
AV_TIME_BASE fractional seconds. NEVER set this value directly:
it is deduced from the AVStream values. */
- int64_t start_time;
+ int64_t start_time;
/* decoding: duration of the stream, in AV_TIME_BASE fractional
seconds. NEVER set this value directly: it is deduced from the
AVStream values. */
@@ -328,17 +328,17 @@ typedef struct AVFormatContext {
/* av_seek_frame() support */
int64_t data_offset; /* offset of the first packet */
int index_built;
-
+
int mux_rate;
int packet_size;
int preload;
int max_delay;
-#define AVFMT_NOOUTPUTLOOP -1
-#define AVFMT_INFINITEOUTPUTLOOP 0
+#define AVFMT_NOOUTPUTLOOP -1
+#define AVFMT_INFINITEOUTPUTLOOP 0
/* number of times to loop output in formats that support it */
int loop_output;
-
+
int flags;
#define AVFMT_FLAG_GENPTS 0x0001 ///< generate pts if missing even if it requires parsing future frames
} AVFormatContext;
@@ -375,7 +375,7 @@ typedef struct AVImageFormat {
known so that the caller can allocate the image. If 'allo_cb'
returns non zero, then the parsing is aborted. Return '0' if
OK. */
- int (*img_read)(ByteIOContext *,
+ int (*img_read)(ByteIOContext *,
int (*alloc_cb)(void *, AVImageInfo *info), void *);
/* write the image */
int supported_pixel_formats; /* mask of supported formats for output */
@@ -475,7 +475,7 @@ int ff_wav_init(void);
int ff_mmf_init(void);
/* raw.c */
-int pcm_read_seek(AVFormatContext *s,
+int pcm_read_seek(AVFormatContext *s,
int stream_index, int64_t timestamp, int flags);
int raw_init(void);
@@ -559,11 +559,11 @@ extern AVOutputFormat yuv4mpegpipe_oformat;
/* utils.c */
void av_register_input_format(AVInputFormat *format);
void av_register_output_format(AVOutputFormat *format);
-AVOutputFormat *guess_stream_format(const char *short_name,
+AVOutputFormat *guess_stream_format(const char *short_name,
const char *filename, const char *mime_type);
-AVOutputFormat *guess_format(const char *short_name,
+AVOutputFormat *guess_format(const char *short_name,
const char *filename, const char *mime_type);
-enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
+enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
const char *filename, const char *mime_type, enum CodecType type);
void av_hex_dump(FILE *f, uint8_t *buf, int size);
@@ -587,10 +587,10 @@ void fifo_realloc(FifoBuffer *f, unsigned int size);
/* media file input */
AVInputFormat *av_find_input_format(const char *short_name);
AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened);
-int av_open_input_stream(AVFormatContext **ic_ptr,
- ByteIOContext *pb, const char *filename,
+int av_open_input_stream(AVFormatContext **ic_ptr,
+ ByteIOContext *pb, const char *filename,
AVInputFormat *fmt, AVFormatParameters *ap);
-int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
+int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
AVInputFormat *fmt,
int buf_size,
AVFormatParameters *ap);
@@ -604,7 +604,7 @@ AVFormatContext *av_alloc_format_context(void);
#define AVERROR_NOMEM (-5) /* not enough memory */
#define AVERROR_NOFMT (-6) /* unknown format */
#define AVERROR_NOTSUPP (-7) /* operation not supported */
-
+
int av_find_stream_info(AVFormatContext *ic);
int av_read_packet(AVFormatContext *s, AVPacket *pkt);
int av_read_frame(AVFormatContext *s, AVPacket *pkt);
@@ -635,7 +635,7 @@ int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt);
int av_write_trailer(AVFormatContext *s);
void dump_format(AVFormatContext *ic,
- int index,
+ int index,
const char *url,
int is_output);
int parse_image_size(int *width_ptr, int *height_ptr, const char *str);
@@ -692,7 +692,7 @@ do {\
time_t mktimegm(struct tm *tm);
struct tm *brktimegm(time_t secs, struct tm *tm);
-const char *small_strptime(const char *p, const char *fmt,
+const char *small_strptime(const char *p, const char *fmt,
struct tm *dt);
struct in_addr;
diff --git a/libavformat/avi.h b/libavformat/avi.h
index 0068a9af8b..9dfbf324c8 100644
--- a/libavformat/avi.h
+++ b/libavformat/avi.h
@@ -28,7 +28,7 @@ typedef struct CodecTag {
void put_bmp_header(ByteIOContext *pb, AVCodecContext *enc, const CodecTag *tags, int for_asf);
int put_wav_header(ByteIOContext *pb, AVCodecContext *enc);
int wav_codec_get_id(unsigned int tag, int bps);
-void get_wav_header(ByteIOContext *pb, AVCodecContext *codec, int size);
+void get_wav_header(ByteIOContext *pb, AVCodecContext *codec, int size);
extern const CodecTag codec_bmp_tags[];
extern const CodecTag codec_wav_tags[];
diff --git a/libavformat/avidec.c b/libavformat/avidec.c
index b08711a5f3..8ac71d9a32 100644
--- a/libavformat/avidec.c
+++ b/libavformat/avidec.c
@@ -33,12 +33,12 @@ typedef struct AVIStream {
int packet_size;
int scale;
- int rate;
+ int rate;
int sample_size; /* audio only data */
int start;
-
+
int cum_len; /* temporary storage (used during seek) */
-
+
int prefix; ///< normally 'd'<<8 + 'c' or 'w'<<8 + 'b'
int prefix_count;
} AVIStream;
@@ -70,7 +70,7 @@ static void print_tag(const char *str, unsigned int tag, int size)
static int get_riff(AVIContext *avi, ByteIOContext *pb)
{
- uint32_t tag;
+ uint32_t tag;
/* check RIFF header */
tag = get_le32(pb);
@@ -81,7 +81,7 @@ static int get_riff(AVIContext *avi, ByteIOContext *pb)
tag = get_le32(pb);
if (tag != MKTAG('A', 'V', 'I', ' ') && tag != MKTAG('A', 'V', 'I', 'X'))
return -1;
-
+
return 0;
}
@@ -98,7 +98,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
int xan_video = 0; /* hack to support Xan A/V */
avi->stream_index= -1;
-
+
if (get_riff(avi, pb) < 0)
return -1;
@@ -165,18 +165,18 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
print_tag("strh", tag1, -1);
#endif
if(tag1 == MKTAG('i', 'a', 'v', 's') || tag1 == MKTAG('i', 'v', 'a', 's')){
- /*
- * After some consideration -- I don't think we
+ /*
+ * After some consideration -- I don't think we
* have to support anything but DV in a type1 AVIs.
*/
if (s->nb_streams != 1)
goto fail;
-
+
if (handler != MKTAG('d', 'v', 's', 'd') &&
handler != MKTAG('d', 'v', 'h', 'd') &&
handler != MKTAG('d', 'v', 's', 'l'))
goto fail;
-
+
ast = s->streams[0]->priv_data;
av_freep(&s->streams[0]->codec->extradata);
av_freep(&s->streams[0]);
@@ -196,7 +196,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
if (stream_index >= s->nb_streams) {
url_fskip(pb, size - 8);
break;
- }
+ }
st = s->streams[stream_index];
ast = st->priv_data;
st->codec->stream_codec_tag= handler;
@@ -216,7 +216,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
ast->scale = 1;
}
av_set_pts_info(st, 64, ast->scale, ast->rate);
-
+
ast->start= get_le32(pb); /* start */
nb_frames = get_le32(pb);
@@ -237,7 +237,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
codec_type = CODEC_TYPE_AUDIO;
break;
case MKTAG('t', 'x', 't', 's'):
- //FIXME
+ //FIXME
codec_type = CODEC_TYPE_DATA; //CODEC_TYPE_SUB ? FIXME
break;
case MKTAG('p', 'a', 'd', 's'):
@@ -275,7 +275,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
}
-
+
if(st->codec->extradata_size & 1) //FIXME check if the encoder really did this correctly
get_byte(pb);
@@ -347,7 +347,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
assert(!avi->index_loaded);
avi_load_index(s);
avi->index_loaded = 1;
-
+
return 0;
}
@@ -358,20 +358,20 @@ static int avi_read_packet(AVFormatContext *s, AVPacket *pkt)
int n, d[8], size;
offset_t i, sync;
void* dstr;
-
+
if (avi->dv_demux) {
size = dv_get_packet(avi->dv_demux, pkt);
if (size >= 0)
return size;
}
-
+
if(avi->non_interleaved){
int best_stream_index = 0;
AVStream *best_st= NULL;
AVIStream *best_ast;
int64_t best_ts= INT64_MAX;
int i;
-
+
for(i=0; i<s->nb_streams; i++){
AVStream *st = s->streams[i];
AVIStream *ast = st->priv_data;
@@ -401,23 +401,23 @@ static int avi_read_packet(AVFormatContext *s, AVPacket *pkt)
pos += avi->movi_list + best_ast->packet_size - best_ast->remaining;
url_fseek(&s->pb, pos, SEEK_SET);
// av_log(NULL, AV_LOG_DEBUG, "pos=%Ld\n", pos);
-
+
if(best_ast->remaining)
avi->stream_index= best_stream_index;
else
avi->stream_index= -1;
}
}
-
+
resync:
if(avi->stream_index >= 0){
AVStream *st= s->streams[ avi->stream_index ];
AVIStream *ast= st->priv_data;
int size;
-
+
if(ast->sample_size == 0)
size= INT_MAX;
- else if(ast->sample_size < 32)
+ else if(ast->sample_size < 32)
size= 64*ast->sample_size;
else
size= ast->sample_size;
@@ -425,7 +425,7 @@ resync:
if(size > ast->remaining)
size= ast->remaining;
av_get_packet(pb, pkt, size);
-
+
if (avi->dv_demux) {
dstr = pkt->destruct;
size = dv_produce_packet(avi->dv_demux, pkt,
@@ -448,7 +448,7 @@ resync:
index= av_index_search_timestamp(st, pkt->dts, 0);
e= &st->index_entries[index];
-
+
if(index >= 0 && e->timestamp == ast->frame_offset){
if (e->flags & AVINDEX_KEYFRAME)
pkt->flags |= PKT_FLAG_KEY;
@@ -459,7 +459,7 @@ resync:
pkt->flags |= PKT_FLAG_KEY;
}
} else {
- pkt->flags |= PKT_FLAG_KEY;
+ pkt->flags |= PKT_FLAG_KEY;
}
if(ast->sample_size)
ast->frame_offset += pkt->size;
@@ -494,9 +494,9 @@ resync:
for(j=0; j<7; j++)
d[j]= d[j+1];
d[7]= get_byte(pb);
-
+
size= d[4] + (d[5]<<8) + (d[6]<<16) + (d[7]<<24);
-
+
if( d[2] >= '0' && d[2] <= '9'
&& d[3] >= '0' && d[3] <= '9'){
n= (d[2] - '0') * 10 + (d[3] - '0');
@@ -506,7 +506,7 @@ resync:
//av_log(NULL, AV_LOG_DEBUG, "%X %X %X %X %X %X %X %X %lld %d %d\n", d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], i, size, n);
if(i + size > avi->movi_end || d[0]<0)
continue;
-
+
//parse ix##
if( (d[0] == 'i' && d[1] == 'x' && n < s->nb_streams)
//parse JUNK
@@ -522,14 +522,14 @@ resync:
}else{
n= 100; //invalid stream id
}
-
+
//parse ##dc/##wb
if(n < s->nb_streams){
AVStream *st;
AVIStream *ast;
st = s->streams[n];
ast = st->priv_data;
-
+
if( (st->discard >= AVDISCARD_DEFAULT && size==0)
/*|| (st->discard >= AVDISCARD_NONKEY && !(pkt->flags & PKT_FLAG_KEY))*/ //FIXME needs a little reordering
|| st->discard >= AVDISCARD_ALL){
@@ -539,9 +539,9 @@ resync:
goto resync;
}
- if( ((ast->prefix_count<5 || sync+9 > i) && d[2]<128 && d[3]<128) ||
+ if( ((ast->prefix_count<5 || sync+9 > i) && d[2]<128 && d[3]<128) ||
d[2]*256+d[3] == ast->prefix /*||
- (d[2] == 'd' && d[3] == 'c') ||
+ (d[2] == 'd' && d[3] == 'c') ||
(d[2] == 'w' && d[3] == 'b')*/) {
//av_log(NULL, AV_LOG_DEBUG, "OK\n");
@@ -603,7 +603,7 @@ static int avi_read_idx1(AVFormatContext *s, int size)
AVIStream *ast;
unsigned int index, tag, flags, pos, len;
unsigned last_pos= -1;
-
+
nb_index_entries = size / 16;
if (nb_index_entries <= 0)
return -1;
@@ -615,7 +615,7 @@ static int avi_read_idx1(AVFormatContext *s, int size)
pos = get_le32(pb);
len = get_le32(pb);
#if defined(DEBUG_SEEK)
- av_log(NULL, AV_LOG_DEBUG, "%d: tag=0x%x flags=0x%x pos=0x%x len=%d/",
+ av_log(NULL, AV_LOG_DEBUG, "%d: tag=0x%x flags=0x%x pos=0x%x len=%d/",
i, tag, flags, pos, len);
#endif
if(i==0 && pos > avi->movi_list)
@@ -627,7 +627,7 @@ static int avi_read_idx1(AVFormatContext *s, int size)
continue;
st = s->streams[index];
ast = st->priv_data;
-
+
#if defined(DEBUG_SEEK)
av_log(NULL, AV_LOG_DEBUG, "%d cum_len=%d\n", len, ast->cum_len);
#endif
@@ -648,7 +648,7 @@ static int guess_ni_flag(AVFormatContext *s){
int i;
int64_t last_start=0;
int64_t first_end= INT64_MAX;
-
+
for(i=0; i<s->nb_streams; i++){
AVStream *st = s->streams[i];
int n= st->nb_index_entries;
@@ -670,7 +670,7 @@ static int avi_load_index(AVFormatContext *s)
ByteIOContext *pb = &s->pb;
uint32_t tag, size;
offset_t pos= url_ftell(pb);
-
+
url_fseek(pb, avi->movi_end, SEEK_SET);
#ifdef DEBUG_SEEK
printf("movi_end=0x%llx\n", avi->movi_end);
@@ -726,7 +726,7 @@ static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
index= av_index_search_timestamp(st, timestamp, flags);
if(index<0)
return -1;
-
+
/* find the position */
pos = st->index_entries[index].pos;
timestamp = st->index_entries[index].timestamp;
@@ -742,17 +742,17 @@ static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
if (st2->nb_index_entries <= 0)
continue;
-
+
// assert(st2->codec.block_align);
assert(st2->time_base.den == ast2->rate);
assert(st2->time_base.num == ast2->scale);
index = av_index_search_timestamp(
- st2,
+ st2,
av_rescale(timestamp, st2->time_base.den*(int64_t)st->time_base.num, st->time_base.den * (int64_t)st2->time_base.num),
flags | AVSEEK_FLAG_BACKWARD);
if(index<0)
index=0;
-
+
if(!avi->non_interleaved){
while(index>0 && st2->index_entries[index].pos > pos)
index--;
diff --git a/libavformat/avienc.c b/libavformat/avienc.c
index 7014589527..d9a5274a2e 100644
--- a/libavformat/avienc.c
+++ b/libavformat/avienc.c
@@ -20,7 +20,7 @@
#include "avi.h"
/*
- * TODO:
+ * TODO:
* - fill all fields if non streamed (nb_frames for example)
*/
@@ -48,7 +48,7 @@ typedef struct {
AVIIndex indexes[MAX_STREAMS];
} AVIContext;
-static inline AVIIentry* avi_get_ientry(AVIIndex* idx, int ent_id)
+static inline AVIIentry* avi_get_ientry(AVIIndex* idx, int ent_id)
{
int cl = ent_id / AVI_INDEX_CLUSTER_SIZE;
int id = ent_id % AVI_INDEX_CLUSTER_SIZE;
@@ -108,39 +108,39 @@ const CodecTag codec_bmp_tags[] = {
{ CODEC_ID_MPEG4, MKTAG('W', 'V', '1', 'F') },
{ CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '3'), .invalid_asf = 1 }, /* default signature when using MSMPEG4 */
- { CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', '4', '3') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', '4', '3') },
/* added based on MPlayer */
- { CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', 'G', '3') },
- { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '5') },
- { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '6') },
- { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '4') },
- { CODEC_ID_MSMPEG4V3, MKTAG('A', 'P', '4', '1') },
- { CODEC_ID_MSMPEG4V3, MKTAG('C', 'O', 'L', '1') },
- { CODEC_ID_MSMPEG4V3, MKTAG('C', 'O', 'L', '0') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', 'G', '3') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '5') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '6') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '4') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('A', 'P', '4', '1') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('C', 'O', 'L', '1') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('C', 'O', 'L', '0') },
- { CODEC_ID_MSMPEG4V2, MKTAG('M', 'P', '4', '2') },
+ { CODEC_ID_MSMPEG4V2, MKTAG('M', 'P', '4', '2') },
/* added based on MPlayer */
{ CODEC_ID_MSMPEG4V2, MKTAG('D', 'I', 'V', '2') },
-
- { CODEC_ID_MSMPEG4V1, MKTAG('M', 'P', 'G', '4') },
- { CODEC_ID_WMV1, MKTAG('W', 'M', 'V', '1') },
+ { CODEC_ID_MSMPEG4V1, MKTAG('M', 'P', 'G', '4') },
+
+ { CODEC_ID_WMV1, MKTAG('W', 'M', 'V', '1') },
/* added based on MPlayer */
- { CODEC_ID_WMV2, MKTAG('W', 'M', 'V', '2') },
- { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 's', 'd') },
- { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', 'd') },
- { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 's', 'l') },
+ { CODEC_ID_WMV2, MKTAG('W', 'M', 'V', '2') },
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 's', 'd') },
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', 'd') },
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 's', 'l') },
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', '2', '5') },
- { CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'g', '1') },
- { CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'g', '2') },
- { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'p', 'g', '2') },
- { CODEC_ID_MPEG1VIDEO, MKTAG('P', 'I', 'M', '1') },
- { CODEC_ID_MPEG1VIDEO, MKTAG('V', 'C', 'R', '2') },
- { CODEC_ID_MPEG1VIDEO, 0x10000001 },
- { CODEC_ID_MPEG2VIDEO, 0x10000002 },
+ { CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'g', '1') },
+ { CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'g', '2') },
+ { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'p', 'g', '2') },
+ { CODEC_ID_MPEG1VIDEO, MKTAG('P', 'I', 'M', '1') },
+ { CODEC_ID_MPEG1VIDEO, MKTAG('V', 'C', 'R', '2') },
+ { CODEC_ID_MPEG1VIDEO, 0x10000001 },
+ { CODEC_ID_MPEG2VIDEO, 0x10000002 },
{ CODEC_ID_MPEG2VIDEO, MKTAG('D', 'V', 'R', ' ') },
{ CODEC_ID_MJPEG, MKTAG('M', 'J', 'P', 'G') },
{ CODEC_ID_MJPEG, MKTAG('L', 'J', 'P', 'G') },
@@ -258,7 +258,7 @@ void put_bmp_header(ByteIOContext *pb, AVCodecContext *enc, const CodecTag *tags
put_le32(pb, enc->width);
put_le32(pb, enc->height);
put_le16(pb, 1); /* planes */
-
+
put_le16(pb, enc->bits_per_sample ? enc->bits_per_sample : 24); /* depth */
/* compression type */
put_le32(pb, for_asf ? (enc->codec_tag ? enc->codec_tag : codec_get_asf_tag(tags, enc->codec_id)) : enc->codec_tag); //
@@ -267,7 +267,7 @@ void put_bmp_header(ByteIOContext *pb, AVCodecContext *enc, const CodecTag *tags
put_le32(pb, 0);
put_le32(pb, 0);
put_le32(pb, 0);
-
+
put_buffer(pb, enc->extradata, enc->extradata_size);
if (enc->extradata_size & 1)
@@ -294,16 +294,16 @@ void ff_parse_specific_params(AVCodecContext *stream, int *au_rate, int *au_ssiz
*au_rate /= gcd;
}
-static offset_t avi_start_new_riff(AVIContext *avi, ByteIOContext *pb,
+static offset_t avi_start_new_riff(AVIContext *avi, ByteIOContext *pb,
const char* riff_tag, const char* list_tag)
{
offset_t loff;
int i;
-
+
avi->riff_id++;
for (i=0; i<MAX_STREAMS; i++)
avi->indexes[i].entry = 0;
-
+
avi->riff_start = start_tag(pb, "RIFF");
put_tag(pb, riff_tag);
loff = start_tag(pb, "LIST");
@@ -311,7 +311,7 @@ static offset_t avi_start_new_riff(AVIContext *avi, ByteIOContext *pb,
return loff;
}
-static unsigned char* avi_stream2fourcc(unsigned char* tag, int index,
+static unsigned char* avi_stream2fourcc(unsigned char* tag, int index,
enum CodecType type)
{
tag[0] = '0';
@@ -351,7 +351,7 @@ static int avi_write_header(AVFormatContext *s)
if (stream->codec_type == CODEC_TYPE_VIDEO)
video_enc = stream;
}
-
+
nb_frames = 0;
if(video_enc){
@@ -370,23 +370,23 @@ static int avi_write_header(AVFormatContext *s)
put_le32(pb, 0); /* initial frame */
put_le32(pb, s->nb_streams); /* nb streams */
put_le32(pb, 1024 * 1024); /* suggested buffer size */
- if(video_enc){
+ if(video_enc){
put_le32(pb, video_enc->width);
put_le32(pb, video_enc->height);
} else {
put_le32(pb, 0);
put_le32(pb, 0);
- }
+ }
put_le32(pb, 0); /* reserved */
put_le32(pb, 0); /* reserved */
put_le32(pb, 0); /* reserved */
put_le32(pb, 0); /* reserved */
-
+
/* stream list */
for(i=0;i<n;i++) {
list2 = start_tag(pb, "LIST");
put_tag(pb, "strl");
-
+
stream = s->streams[i]->codec;
/* FourCC should really be set by the codec itself */
@@ -423,14 +423,14 @@ static int avi_write_header(AVFormatContext *s)
put_le32(pb, AVI_MAX_RIFF_SIZE); /* FIXME: this may be broken, but who cares */
else
put_le32(pb, 0); /* length, XXX: filled later */
-
+
/* suggested buffer size */ //FIXME set at the end to largest chunk
if(stream->codec_type == CODEC_TYPE_VIDEO)
- put_le32(pb, 1024 * 1024);
+ put_le32(pb, 1024 * 1024);
else if(stream->codec_type == CODEC_TYPE_AUDIO)
- put_le32(pb, 12 * 1024);
+ put_le32(pb, 12 * 1024);
else
- put_le32(pb, 0);
+ put_le32(pb, 0);
put_le32(pb, -1); /* quality */
put_le32(pb, au_ssize); /* sample size */
put_le32(pb, 0);
@@ -455,18 +455,18 @@ static int avi_write_header(AVFormatContext *s)
}
end_tag(pb, strf);
}
-
+
if (!url_is_streamed(pb)) {
unsigned char tag[5];
int j;
-
- /* Starting to lay out AVI OpenDML master index.
+
+ /* Starting to lay out AVI OpenDML master index.
* We want to make it JUNK entry for now, since we'd
- * like to get away without making AVI an OpenDML one
+ * like to get away without making AVI an OpenDML one
* for compatibility reasons.
*/
avi->indexes[i].entry = avi->indexes[i].ents_allocated = 0;
- avi->indexes[i].indx_start = start_tag(pb, "JUNK");
+ avi->indexes[i].indx_start = start_tag(pb, "JUNK");
put_le16(pb, 4); /* wLongsPerEntry */
put_byte(pb, 0); /* bIndexSubType (0 == frame index) */
put_byte(pb, 0); /* bIndexType (0 == AVI_INDEX_OF_INDEXES) */
@@ -479,10 +479,10 @@ static int avi_write_header(AVFormatContext *s)
put_le64(pb, 0);
end_tag(pb, avi->indexes[i].indx_start);
}
-
+
end_tag(pb, list2);
}
-
+
if (!url_is_streamed(pb)) {
/* AVI could become an OpenDML one, if it grows beyond 2Gb range */
avi->odml_list = start_tag(pb, "JUNK");
@@ -495,7 +495,7 @@ static int avi_write_header(AVFormatContext *s)
}
end_tag(pb, list1);
-
+
avi->movi_list = start_tag(pb, "LIST");
put_tag(pb, "movi");
@@ -511,28 +511,28 @@ static int avi_write_ix(AVFormatContext *s)
unsigned char tag[5];
unsigned char ix_tag[] = "ix00";
int i, j;
-
+
if (url_is_streamed(pb))
return -1;
if (avi->riff_id > AVI_MASTER_INDEX_SIZE)
return -1;
-
+
for (i=0;i<s->nb_streams;i++) {
offset_t ix, pos;
-
+
avi_stream2fourcc(&tag[0], i, s->streams[i]->codec->codec_type);
ix_tag[3] = '0' + i;
-
+
/* Writing AVI OpenDML leaf index chunk */
- ix = url_ftell(pb);
+ ix = url_ftell(pb);
put_tag(pb, &ix_tag[0]); /* ix?? */
- put_le32(pb, avi->indexes[i].entry * 8 + 24);
+ put_le32(pb, avi->indexes[i].entry * 8 + 24);
/* chunk size */
put_le16(pb, 2); /* wLongsPerEntry */
- put_byte(pb, 0); /* bIndexSubType (0 == frame index) */
+ put_byte(pb, 0); /* bIndexSubType (0 == frame index) */
put_byte(pb, 1); /* bIndexType (1 == AVI_INDEX_OF_CHUNKS) */
- put_le32(pb, avi->indexes[i].entry);
+ put_le32(pb, avi->indexes[i].entry);
/* nEntriesInUse */
put_tag(pb, &tag[0]); /* dwChunkId */
put_le64(pb, avi->movi_list);/* qwBaseOffset */
@@ -546,7 +546,7 @@ static int avi_write_ix(AVFormatContext *s)
}
put_flush_packet(pb);
pos = url_ftell(pb);
-
+
/* Updating one entry in the AVI OpenDML master index */
url_fseek(pb, avi->indexes[i].indx_start - 8, SEEK_SET);
put_tag(pb, "indx"); /* enabling this entry */
@@ -583,17 +583,17 @@ static int avi_write_idx1(AVFormatContext *s)
for (i=0; i<s->nb_streams; i++) {
if (avi->indexes[i].entry <= entry[i])
continue;
-
+
tie = avi_get_ientry(&avi->indexes[i], entry[i]);
if (empty || tie->pos < ie->pos) {
- ie = tie;
+ ie = tie;
stream_id = i;
}
empty = 0;
}
if (!empty) {
- avi_stream2fourcc(&tag[0], stream_id,
- s->streams[stream_id]->codec->codec_type);
+ avi_stream2fourcc(&tag[0], stream_id,
+ s->streams[stream_id]->codec->codec_type);
put_tag(pb, &tag[0]);
put_le32(pb, ie->flags);
put_le32(pb, ie->pos);
@@ -621,7 +621,7 @@ static int avi_write_idx1(AVFormatContext *s)
}
if (avi->frames_hdr_all != 0) {
url_fseek(pb, avi->frames_hdr_all, SEEK_SET);
- put_le32(pb, nb_frames);
+ put_le32(pb, nb_frames);
}
url_fseek(pb, file_size, SEEK_SET);
}
@@ -652,19 +652,19 @@ static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
avi->packet_count[stream_index]++;
// Make sure to put an OpenDML chunk when the file size exceeds the limits
- if (!url_is_streamed(pb) &&
+ if (!url_is_streamed(pb) &&
(url_ftell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE)) {
-
+
avi_write_ix(s);
end_tag(pb, avi->movi_list);
-
+
if (avi->riff_id == 1)
avi_write_idx1(s);
end_tag(pb, avi->riff_start);
avi->movi_list = avi_start_new_riff(avi, pb, "AVIX", "movi");
}
-
+
avi_stream2fourcc(&tag[0], stream_index, enc->codec_type);
if(pkt->flags&PKT_FLAG_KEY)
flags = 0x10;
@@ -677,7 +677,7 @@ static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
int cl = idx->entry / AVI_INDEX_CLUSTER_SIZE;
int id = idx->entry % AVI_INDEX_CLUSTER_SIZE;
if (idx->ents_allocated <= idx->entry) {
- idx->cluster = av_realloc(idx->cluster, (cl+1)*sizeof(void*));
+ idx->cluster = av_realloc(idx->cluster, (cl+1)*sizeof(void*));
if (!idx->cluster)
return -1;
idx->cluster[cl] = av_malloc(AVI_INDEX_CLUSTER_SIZE*sizeof(AVIIentry));
@@ -685,13 +685,13 @@ static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
return -1;
idx->ents_allocated += AVI_INDEX_CLUSTER_SIZE;
}
-
- idx->cluster[cl][id].flags = flags;
+
+ idx->cluster[cl][id].flags = flags;
idx->cluster[cl][id].pos = url_ftell(pb) - avi->movi_list;
idx->cluster[cl][id].len = size;
idx->entry++;
}
-
+
put_buffer(pb, tag, 4);
put_le32(pb, size);
put_buffer(pb, pkt->data, size);
@@ -750,7 +750,7 @@ static int avi_write_trailer(AVFormatContext *s)
avi->indexes[i].cluster = NULL;
avi->indexes[i].ents_allocated = avi->indexes[i].entry = 0;
}
-
+
return res;
}
diff --git a/libavformat/avio.c b/libavformat/avio.c
index 3f5efd62ce..0d85a3a729 100644
--- a/libavformat/avio.c
+++ b/libavformat/avio.c
@@ -58,7 +58,7 @@ int url_open(URLContext **puc, const char *filename, int flags)
} else {
*q = '\0';
}
-
+
up = first_protocol;
while (up != NULL) {
if (!strcmp(proto_str, up->name))
@@ -108,7 +108,7 @@ int url_write(URLContext *h, unsigned char *buf, int size)
return AVERROR_IO;
/* avoid sending too big packets */
if (h->max_packet_size && size > h->max_packet_size)
- return AVERROR_IO;
+ return AVERROR_IO;
ret = h->prot->url_write(h, buf, size);
return ret;
}
@@ -145,18 +145,18 @@ int url_exist(const char *filename)
offset_t url_filesize(URLContext *h)
{
offset_t pos, size;
-
+
pos = url_seek(h, 0, SEEK_CUR);
size = url_seek(h, -1, SEEK_END)+1;
url_seek(h, pos, SEEK_SET);
return size;
}
-/*
+/*
* Return the maximum packet size associated to packetized file
* handle. If the file is not packetized (stream like http or file on
* disk), then 0 is returned.
- *
+ *
* @param h file handle
* @return maximum packet size in bytes
*/
@@ -176,11 +176,11 @@ static int default_interrupt_cb(void)
return 0;
}
-/**
+/**
* The callback is called in blocking functions to test regulary if
* asynchronous interruption is needed. -EINTR is returned in this
* case by the interrupted function. 'NULL' means no interrupt
- * callback is given.
+ * callback is given.
*/
void url_set_interrupt_cb(URLInterruptCB *interrupt_cb)
{
diff --git a/libavformat/avio.h b/libavformat/avio.h
index 51ef472244..f000cfb2c7 100644
--- a/libavformat/avio.h
+++ b/libavformat/avio.h
@@ -9,7 +9,7 @@ typedef int64_t offset_t;
struct URLContext {
struct URLProtocol *prot;
- int flags;
+ int flags;
int is_streamed; /* true if streamed (no seek possible), default = false */
int max_packet_size; /* if non zero, the stream is packetized with this max packet size */
void *priv_data;
diff --git a/libavformat/aviobuf.c b/libavformat/aviobuf.c
index cead878f02..e9b4f679dc 100644
--- a/libavformat/aviobuf.c
+++ b/libavformat/aviobuf.c
@@ -35,7 +35,7 @@ int init_put_byte(ByteIOContext *s,
s->buffer_size = buffer_size;
s->buf_ptr = buffer;
s->write_flag = write_flag;
- if (!s->write_flag)
+ if (!s->write_flag)
s->buf_end = buffer;
else
s->buf_end = buffer + buffer_size;
@@ -52,7 +52,7 @@ int init_put_byte(ByteIOContext *s,
s->update_checksum= NULL;
return 0;
}
-
+
#ifdef CONFIG_MUXERS
static void flush_buffer(ByteIOContext *s)
@@ -76,7 +76,7 @@ static void flush_buffer(ByteIOContext *s)
void put_byte(ByteIOContext *s, int b)
{
*(s->buf_ptr)++ = b;
- if (s->buf_ptr >= s->buf_end)
+ if (s->buf_ptr >= s->buf_end)
flush_buffer(s);
}
@@ -91,7 +91,7 @@ void put_buffer(ByteIOContext *s, const unsigned char *buf, int size)
memcpy(s->buf_ptr, buf, len);
s->buf_ptr += len;
- if (s->buf_ptr >= s->buf_end)
+ if (s->buf_ptr >= s->buf_end)
flush_buffer(s);
buf += len;
@@ -112,7 +112,7 @@ offset_t url_fseek(ByteIOContext *s, offset_t offset, int whence)
if (whence != SEEK_CUR && whence != SEEK_SET)
return -EINVAL;
-
+
#ifdef CONFIG_MUXERS
if (s->write_flag) {
if (whence == SEEK_CUR) {
@@ -122,7 +122,7 @@ offset_t url_fseek(ByteIOContext *s, offset_t offset, int whence)
offset += offset1;
}
offset1 = offset - s->pos;
- if (!s->must_flush &&
+ if (!s->must_flush &&
offset1 >= 0 && offset1 < (s->buf_end - s->buffer)) {
/* can do the seek inside the buffer */
s->buf_ptr = s->buffer + offset1;
@@ -135,7 +135,7 @@ offset_t url_fseek(ByteIOContext *s, offset_t offset, int whence)
s->seek(s->opaque, offset, SEEK_SET);
s->pos = offset;
}
- } else
+ } else
#endif //CONFIG_MUXERS
{
if (whence == SEEK_CUR) {
@@ -175,7 +175,7 @@ offset_t url_ftell(ByteIOContext *s)
offset_t url_fsize(ByteIOContext *s)
{
offset_t size;
-
+
if (!s->seek)
return -EPIPE;
size = s->seek(s->opaque, -1, SEEK_END) + 1;
@@ -375,7 +375,7 @@ int get_buffer(ByteIOContext *s, unsigned char *buf, int size)
int get_partial_buffer(ByteIOContext *s, unsigned char *buf, int size)
{
int len;
-
+
if(size<0)
return -1;
@@ -447,7 +447,7 @@ char *get_strz(ByteIOContext *s, char *buf, int maxlen)
if (i < maxlen-1)
buf[i++] = c;
}
-
+
buf[i] = 0; /* Ensure null terminated, but may be truncated */
return buf;
@@ -491,7 +491,7 @@ int url_fdopen(ByteIOContext *s, URLContext *h)
uint8_t *buffer;
int buffer_size, max_packet_size;
-
+
max_packet_size = url_get_max_packet_size(h);
if (max_packet_size) {
buffer_size = max_packet_size; /* no need to bufferize more than one packet */
@@ -502,7 +502,7 @@ int url_fdopen(ByteIOContext *s, URLContext *h)
if (!buffer)
return -ENOMEM;
- if (init_put_byte(s, buffer, buffer_size,
+ if (init_put_byte(s, buffer, buffer_size,
(h->flags & URL_WRONLY || h->flags & URL_RDWR), h,
url_read_packet, url_write_packet, url_seek_packet) < 0) {
av_free(buffer);
@@ -525,7 +525,7 @@ int url_setbufsize(ByteIOContext *s, int buf_size)
s->buffer = buffer;
s->buffer_size = buf_size;
s->buf_ptr = buffer;
- if (!s->write_flag)
+ if (!s->write_flag)
s->buf_end = buffer;
else
s->buf_end = buffer + buf_size;
@@ -553,7 +553,7 @@ int url_fopen(ByteIOContext *s, const char *filename, int flags)
int url_fclose(ByteIOContext *s)
{
URLContext *h = s->opaque;
-
+
av_free(s->buffer);
memset(s, 0, sizeof(ByteIOContext));
return url_close(h);
@@ -603,11 +603,11 @@ char *url_fgets(ByteIOContext *s, char *buf, int buf_size)
return buf;
}
-/*
+/*
* Return the maximum packet size associated to packetized buffered file
* handle. If the file is not packetized (stream like http or file on
* disk), then 0 is returned.
- *
+ *
* @param h buffered file handle
* @return maximum packet size in bytes
*/
@@ -620,7 +620,7 @@ int url_fget_max_packet_size(ByteIOContext *s)
/* buffer handling */
int url_open_buf(ByteIOContext *s, uint8_t *buf, int buf_size, int flags)
{
- return init_put_byte(s, buf, buf_size,
+ return init_put_byte(s, buf, buf_size,
(flags & URL_WRONLY || flags & URL_RDWR),
NULL, NULL, NULL, NULL);
}
@@ -645,7 +645,7 @@ static int dyn_buf_write(void *opaque, uint8_t *buf, int buf_size)
{
DynBuffer *d = opaque;
int new_size, new_allocated_size;
-
+
/* reallocate buffer if needed */
new_size = d->pos + buf_size;
new_allocated_size = d->allocated_size;
@@ -655,9 +655,9 @@ static int dyn_buf_write(void *opaque, uint8_t *buf, int buf_size)
if (!new_allocated_size)
new_allocated_size = new_size;
else
- new_allocated_size += new_allocated_size / 2 + 1;
+ new_allocated_size += new_allocated_size / 2 + 1;
}
-
+
if (new_allocated_size > d->allocated_size) {
d->buffer = av_realloc(d->buffer, new_allocated_size);
if(d->buffer == NULL)
@@ -707,12 +707,12 @@ static int url_open_dyn_buf_internal(ByteIOContext *s, int max_packet_size)
{
DynBuffer *d;
int io_buffer_size, ret;
-
- if (max_packet_size)
+
+ if (max_packet_size)
io_buffer_size = max_packet_size;
else
io_buffer_size = 1024;
-
+
if(sizeof(DynBuffer) + io_buffer_size < io_buffer_size)
return -1;
d = av_malloc(sizeof(DynBuffer) + io_buffer_size);
@@ -723,9 +723,9 @@ static int url_open_dyn_buf_internal(ByteIOContext *s, int max_packet_size)
d->pos = 0;
d->size = 0;
d->allocated_size = 0;
- ret = init_put_byte(s, d->io_buffer, io_buffer_size,
- 1, d, NULL,
- max_packet_size ? dyn_packet_buf_write : dyn_buf_write,
+ ret = init_put_byte(s, d->io_buffer, io_buffer_size,
+ 1, d, NULL,
+ max_packet_size ? dyn_packet_buf_write : dyn_buf_write,
max_packet_size ? NULL : dyn_buf_seek);
if (ret == 0) {
s->max_packet_size = max_packet_size;
@@ -735,7 +735,7 @@ static int url_open_dyn_buf_internal(ByteIOContext *s, int max_packet_size)
/*
* Open a write only memory stream.
- *
+ *
* @param s new IO context
* @return zero if no error.
*/
@@ -748,9 +748,9 @@ int url_open_dyn_buf(ByteIOContext *s)
* Open a write only packetized memory stream with a maximum packet
* size of 'max_packet_size'. The stream is stored in a memory buffer
* with a big endian 4 byte header giving the packet size in bytes.
- *
+ *
* @param s new IO context
- * @param max_packet_size maximum packet size (must be > 0)
+ * @param max_packet_size maximum packet size (must be > 0)
* @return zero if no error.
*/
int url_open_dyn_packet_buf(ByteIOContext *s, int max_packet_size)
@@ -760,9 +760,9 @@ int url_open_dyn_packet_buf(ByteIOContext *s, int max_packet_size)
return url_open_dyn_buf_internal(s, max_packet_size);
}
-/*
+/*
* Return the written size and a pointer to the buffer. The buffer
- * must be freed with av_free().
+ * must be freed with av_free().
* @param s IO context
* @param pointer to a byte buffer
* @return the length of the byte buffer
diff --git a/libavformat/barpainet.c b/libavformat/barpainet.c
index c1e8877718..8e66098b07 100644
--- a/libavformat/barpainet.c
+++ b/libavformat/barpainet.c
@@ -6,7 +6,7 @@
int inet_aton (const char * str, struct in_addr * add) {
const char * pch = str;
unsigned int add1 = 0, add2 = 0, add3 = 0, add4 = 0;
-
+
add1 = atoi(pch);
pch = strpbrk(pch,".");
if (pch == 0 || ++pch == 0) goto done;
@@ -20,6 +20,6 @@ int inet_aton (const char * str, struct in_addr * add) {
done:
add->s_addr=(add4<<24)+(add3<<16)+(add2<<8)+add1;
-
- return 1;
+
+ return 1;
}
diff --git a/libavformat/crc.c b/libavformat/crc.c
index 991ccf8a27..dfe2d4c870 100644
--- a/libavformat/crc.c
+++ b/libavformat/crc.c
@@ -1,4 +1,4 @@
-/*
+/*
* CRC decoder (for codec/format testing)
* Copyright (c) 2002 Fabrice Bellard.
*
diff --git a/libavformat/cutils.c b/libavformat/cutils.c
index 1415e4e392..e9dcfb8859 100644
--- a/libavformat/cutils.c
+++ b/libavformat/cutils.c
@@ -74,7 +74,7 @@ int stristart(const char *str, const char *val, const char **ptr)
* 1 then it is clamped to buf_size - 1.
* NOTE: this function does what strncpy should have done to be
* useful. NEVER use strncpy.
- *
+ *
* @param buf destination buffer
* @param buf_size size of destination buffer
* @param str source string
@@ -101,7 +101,7 @@ char *pstrcat(char *buf, int buf_size, const char *s)
{
int len;
len = strlen(buf);
- if (len < buf_size)
+ if (len < buf_size)
pstrcpy(buf + len, buf_size - len, s);
return buf;
}
@@ -139,7 +139,7 @@ time_t mktimegm(struct tm *tm)
y--;
}
- t = 86400 *
+ t = 86400 *
(d + (153 * m - 457) / 5 + 365 * y + y / 4 - y / 100 + y / 400 - 719469);
t += 3600 * tm->tm_hour + 60 * tm->tm_min + tm->tm_sec;
@@ -150,10 +150,10 @@ time_t mktimegm(struct tm *tm)
#define ISLEAP(y) (((y) % 4 == 0) && (((y) % 100) != 0 || ((y) % 400) == 0))
#define LEAPS_COUNT(y) ((y)/4 - (y)/100 + (y)/400)
-/* this is our own gmtime_r. it differs from its POSIX counterpart in a
+/* this is our own gmtime_r. it differs from its POSIX counterpart in a
couple of places, though. */
struct tm *brktimegm(time_t secs, struct tm *tm)
-{
+{
int days, y, ny, m;
int md[] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
@@ -208,7 +208,7 @@ static int date_get_num(const char **pp,
}
/* small strptime for ffmpeg */
-const char *small_strptime(const char *p, const char *fmt,
+const char *small_strptime(const char *p, const char *fmt,
struct tm *dt)
{
int c, val;
diff --git a/libavformat/dc1394.c b/libavformat/dc1394.c
index bcc9bd9b04..1c8de2a3eb 100644
--- a/libavformat/dc1394.c
+++ b/libavformat/dc1394.c
@@ -70,11 +70,11 @@ static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap)
for (fmt = dc1394_frame_formats; fmt->width; fmt++)
if (fmt->pix_fmt == ap->pix_fmt && fmt->width == ap->width && fmt->height == ap->height)
break;
-
+
for (fps = dc1394_frame_rates; fps->frame_rate; fps++)
if (fps->frame_rate == av_rescale(1000, ap->time_base.den, ap->time_base.num))
break;
-
+
/* create a video stream */
vst = av_new_stream(c, 0);
if (!vst)
@@ -93,12 +93,12 @@ static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap)
dc1394->packet.size = avpicture_get_size(fmt->pix_fmt, fmt->width, fmt->height);
dc1394->packet.stream_index = vst->index;
dc1394->packet.flags |= PKT_FLAG_KEY;
-
+
dc1394->current_frame = 0;
dc1394->fps = fps->frame_rate;
vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, fps->frame_rate, 1000);
-
+
/* Now lets prep the hardware */
dc1394->handle = dc1394_create_handle(0); /* FIXME: gotta have ap->port */
if (!dc1394->handle) {
@@ -108,15 +108,15 @@ static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap)
camera_nodes = dc1394_get_camera_nodes(dc1394->handle, &res, 1);
if (!camera_nodes || camera_nodes[ap->channel] == DC1394_NO_CAMERA) {
av_log(c, AV_LOG_ERROR, "There's no IIDC camera on the channel %d\n", ap->channel);
- goto out_handle;
+ goto out_handle;
}
- res = dc1394_dma_setup_capture(dc1394->handle, camera_nodes[ap->channel],
+ res = dc1394_dma_setup_capture(dc1394->handle, camera_nodes[ap->channel],
0,
FORMAT_VGA_NONCOMPRESSED,
fmt->frame_size_id,
SPEED_400,
- fps->frame_rate_id, 8, 1,
- ap->device,
+ fps->frame_rate_id, 8, 1,
+ ap->device,
&dc1394->camera);
dc1394_free_camera_nodes(camera_nodes);
if (res != DC1394_SUCCESS) {
@@ -129,7 +129,7 @@ static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap)
av_log(c, AV_LOG_ERROR, "Can't start isochronous transmission\n");
goto out_handle_dma;
}
-
+
return 0;
out_handle_dma:
@@ -145,25 +145,25 @@ static int dc1394_read_packet(AVFormatContext *c, AVPacket *pkt)
{
struct dc1394_data *dc1394 = c->priv_data;
int res;
-
+
/* discard stale frame */
if (dc1394->current_frame++) {
if (dc1394_dma_done_with_buffer(&dc1394->camera) != DC1394_SUCCESS)
av_log(c, AV_LOG_ERROR, "failed to release %d frame\n", dc1394->current_frame);
}
-
+
res = dc1394_dma_single_capture(&dc1394->camera);
if (res == DC1394_SUCCESS) {
- dc1394->packet.data = (uint8_t *)(dc1394->camera.capture_buffer);
- dc1394->packet.pts = (dc1394->current_frame * 1000000) / dc1394->fps;
+ dc1394->packet.data = (uint8_t *)(dc1394->camera.capture_buffer);
+ dc1394->packet.pts = (dc1394->current_frame * 1000000) / dc1394->fps;
res = dc1394->packet.size;
} else {
av_log(c, AV_LOG_ERROR, "DMA capture failed\n");
dc1394->packet.data = NULL;
- res = -1;
+ res = -1;
}
-
+
*pkt = dc1394->packet;
return res;
}
@@ -176,7 +176,7 @@ static int dc1394_close(AVFormatContext * context)
dc1394_dma_unlisten(dc1394->handle, &dc1394->camera);
dc1394_dma_release_camera(dc1394->handle, &dc1394->camera);
dc1394_destroy_handle(dc1394->handle);
-
+
return 0;
}
diff --git a/libavformat/dv.c b/libavformat/dv.c
index 83019fc9a9..24abc54786 100644
--- a/libavformat/dv.c
+++ b/libavformat/dv.c
@@ -1,5 +1,5 @@
-/*
- * General DV muxer/demuxer
+/*
+ * General DV muxer/demuxer
* Copyright (c) 2003 Roman Shaposhnick
*
* Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth
@@ -30,7 +30,7 @@
struct DVDemuxContext {
AVFormatContext* fctx;
AVStream* vst;
- AVStream* ast[2];
+ AVStream* ast[2];
AVPacket audio_pkt[2];
uint8_t audio_buf[2][8192];
int ach;
@@ -58,7 +58,7 @@ enum dv_section_type {
};
enum dv_pack_type {
- dv_header525 = 0x3f, /* see dv_write_pack for important details on */
+ dv_header525 = 0x3f, /* see dv_write_pack for important details on */
dv_header625 = 0xbf, /* these two packs */
dv_timecode = 0x13,
dv_audio_source = 0x50,
@@ -76,15 +76,15 @@ enum dv_pack_type {
/*
* The reason why the following three big ugly looking tables are
- * here is my lack of DV spec IEC 61834. The tables were basically
- * constructed to make code that places packs in SSYB, VAUX and
+ * here is my lack of DV spec IEC 61834. The tables were basically
+ * constructed to make code that places packs in SSYB, VAUX and
* AAUX blocks very simple and table-driven. They conform to the
* SMPTE 314M and the output of my personal DV camcorder, neither
* of which is sufficient for a reliable DV stream producing. Thus
* while code is still in development I'll be gathering input from
* people with different DV equipment and modifying the tables to
* accommodate all the quirks. Later on, if possible, some of them
- * will be folded into smaller tables and/or switch-if logic. For
+ * will be folded into smaller tables and/or switch-if logic. For
* now, my only excuse is -- they don't eat up that much of a space.
*/
@@ -104,29 +104,29 @@ static const int dv_ssyb_packs_dist[12][6] = {
};
static const int dv_vaux_packs_dist[12][15] = {
- { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
+ { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
0x60, 0x61, 0x62, 0x63, 0xff, 0xff },
- { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
+ { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
0x60, 0x61, 0x62, 0x63, 0xff, 0xff },
- { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
+ { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
0x60, 0x61, 0x62, 0x63, 0xff, 0xff },
- { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
+ { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
0x60, 0x61, 0x62, 0x63, 0xff, 0xff },
- { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
+ { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
0x60, 0x61, 0x62, 0x63, 0xff, 0xff },
- { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
+ { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
0x60, 0x61, 0x62, 0x63, 0xff, 0xff },
- { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
+ { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
0x60, 0x61, 0x62, 0x63, 0xff, 0xff },
- { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
+ { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
0x60, 0x61, 0x62, 0x63, 0xff, 0xff },
- { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
+ { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
0x60, 0x61, 0x62, 0x63, 0xff, 0xff },
- { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
+ { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
0x60, 0x61, 0x62, 0x63, 0xff, 0xff },
- { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
+ { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
0x60, 0x61, 0x62, 0x63, 0xff, 0xff },
- { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
+ { 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff,
0x60, 0x61, 0x62, 0x63, 0xff, 0xff },
};
@@ -148,7 +148,7 @@ static const int dv_aaux_packs_dist[12][9] = {
static inline uint16_t dv_audio_12to16(uint16_t sample)
{
uint16_t shift, result;
-
+
sample = (sample < 0x800) ? sample : sample | 0xf000;
shift = (sample & 0xf00) >> 8;
@@ -186,19 +186,19 @@ static int dv_write_pack(enum dv_pack_type pack_id, DVMuxContext *c, uint8_t* bu
buf[2] = (0 << 7) | /* TF1: audio data is 0 - valid; 1 - invalid */
(0x0f << 3) | /* reserved -- always 1 */
(0 & 0x07); /* AP1: Audio application ID */
- buf[3] = (0 << 7) | /* TF2: video data is 0 - valid; 1 - invalid */
+ buf[3] = (0 << 7) | /* TF2: video data is 0 - valid; 1 - invalid */
(0x0f << 3) | /* reserved -- always 1 */
(0 & 0x07); /* AP2: Video application ID */
- buf[4] = (0 << 7) | /* TF3: subcode(SSYB) is 0 - valid; 1 - invalid */
+ buf[4] = (0 << 7) | /* TF3: subcode(SSYB) is 0 - valid; 1 - invalid */
(0x0f << 3) | /* reserved -- always 1 */
(0 & 0x07); /* AP3: Subcode application ID */
break;
case dv_timecode:
- ct = (time_t)(c->frames / ((float)c->sys->frame_rate /
+ ct = (time_t)(c->frames / ((float)c->sys->frame_rate /
(float)c->sys->frame_rate_base));
brktimegm(ct, &tc);
- /*
- * LTC drop-frame frame counter drops two frames (0 and 1) every
+ /*
+ * LTC drop-frame frame counter drops two frames (0 and 1) every
* minute, unless it is exactly divisible by 10
*/
ltc_frame = (c->frames + 2*ct/60 - 2*ct/600) % c->sys->ltc_divisor;
@@ -234,7 +234,7 @@ static int dv_write_pack(enum dv_pack_type pack_id, DVMuxContext *c, uint8_t* bu
buf[4] = (1 << 7) | /* emphasis: 1 -- off */
(0 << 6) | /* emphasis time constant: 0 -- reserved */
(0 << 3) | /* frequency: 0 -- 48Khz, 1 -- 44,1Khz, 2 -- 32Khz */
- 0; /* quantization: 0 -- 16bit linear, 1 -- 12bit nonlinear */
+ 0; /* quantization: 0 -- 16bit linear, 1 -- 12bit nonlinear */
break;
case dv_audio_control:
buf[1] = (0 << 6) | /* copy protection: 0 -- unrestricted */
@@ -244,7 +244,7 @@ static int dv_write_pack(enum dv_pack_type pack_id, DVMuxContext *c, uint8_t* bu
buf[2] = (1 << 7) | /* recording start point: 1 -- no */
(1 << 6) | /* recording end point: 1 -- no */
(1 << 3) | /* recording mode: 1 -- original */
- 7;
+ 7;
buf[3] = (1 << 7) | /* direction: 1 -- forward */
0x20; /* speed */
buf[4] = (1 << 7) | /* reserved -- always 1 */
@@ -252,7 +252,7 @@ static int dv_write_pack(enum dv_pack_type pack_id, DVMuxContext *c, uint8_t* bu
break;
case dv_audio_recdate:
case dv_video_recdate: /* VAUX recording date */
- ct = c->start_time + (time_t)(c->frames /
+ ct = c->start_time + (time_t)(c->frames /
((float)c->sys->frame_rate / (float)c->sys->frame_rate_base));
brktimegm(ct, &tc);
buf[1] = 0xff; /* ds, tm, tens of time zone, units of time zone */
@@ -268,18 +268,18 @@ static int dv_write_pack(enum dv_pack_type pack_id, DVMuxContext *c, uint8_t* bu
break;
case dv_audio_rectime: /* AAUX recording time */
case dv_video_rectime: /* VAUX recording time */
- ct = c->start_time + (time_t)(c->frames /
+ ct = c->start_time + (time_t)(c->frames /
((float)c->sys->frame_rate / (float)c->sys->frame_rate_base));
brktimegm(ct, &tc);
buf[1] = (3 << 6) | /* reserved -- always 1 */
0x3f; /* tens of frame, units of frame: 0x3f - "unknown" ? */
- buf[2] = (1 << 7) | /* reserved -- always 1 */
+ buf[2] = (1 << 7) | /* reserved -- always 1 */
((tc.tm_sec / 10) << 4) | /* Tens of seconds */
(tc.tm_sec % 10); /* Units of seconds */
buf[3] = (1 << 7) | /* reserved -- always 1 */
((tc.tm_min / 10) << 4) | /* Tens of minutes */
(tc.tm_min % 10); /* Units of minutes */
- buf[4] = (3 << 6) | /* reserved -- always 1 */
+ buf[4] = (3 << 6) | /* reserved -- always 1 */
((tc.tm_hour / 10) << 4) | /* Tens of hours */
(tc.tm_hour % 10); /* Units of hours */
break;
@@ -312,7 +312,7 @@ static int dv_write_pack(enum dv_pack_type pack_id, DVMuxContext *c, uint8_t* bu
return 5;
}
-static inline int dv_write_dif_id(enum dv_section_type t, uint8_t seq_num,
+static inline int dv_write_dif_id(enum dv_section_type t, uint8_t seq_num,
uint8_t dif_num, uint8_t* buf)
{
buf[0] = (uint8_t)t; /* Section type */
@@ -329,7 +329,7 @@ static inline int dv_write_ssyb_id(uint8_t syb_num, uint8_t fr, uint8_t* buf)
buf[0] = (fr<<7) | /* FR ID 1 - first half of each channel; 0 - second */
(0<<4) | /* AP3 (Subcode application ID) */
0x0f; /* reserved -- always 1 */
- }
+ }
else if (syb_num == 11) {
buf[0] = (fr<<7) | /* FR ID 1 - first half of each channel; 0 - second */
0x7f; /* reserved -- always 1 */
@@ -348,15 +348,15 @@ static inline int dv_write_ssyb_id(uint8_t syb_num, uint8_t fr, uint8_t* buf)
static void dv_format_frame(DVMuxContext *c, uint8_t* buf)
{
int i, j, k;
-
+
for (i = 0; i < c->sys->difseg_size; i++) {
memset(buf, 0xff, 80 * 6); /* First 6 DIF blocks are for control data */
-
+
/* DV header: 1DIF */
buf += dv_write_dif_id(dv_sect_header, i, 0, buf);
buf += dv_write_pack((c->sys->dsf ? dv_header625 : dv_header525), c, buf);
buf += 72; /* unused bytes */
-
+
/* DV subcode: 2DIFs */
for (j = 0; j < 2; j++) {
buf += dv_write_dif_id( dv_sect_subcode, i, j, buf);
@@ -366,15 +366,15 @@ static void dv_format_frame(DVMuxContext *c, uint8_t* buf)
}
buf += 29; /* unused bytes */
}
-
+
/* DV VAUX: 3DIFs */
for (j = 0; j < 3; j++) {
buf += dv_write_dif_id(dv_sect_vaux, i, j, buf);
for (k = 0; k < 15 ; k++)
buf += dv_write_pack(dv_vaux_packs_dist[i][k], c, buf);
buf += 2; /* unused bytes */
- }
-
+ }
+
/* DV Audio/Video: 135 Video DIFs + 9 Audio DIFs */
for (j = 0; j < 135; j++) {
if (j%15 == 0) {
@@ -403,9 +403,9 @@ static void dv_inject_audio(DVMuxContext *c, const uint8_t* pcm, uint8_t* frame_
of = c->sys->audio_shuffle[i][j] + (d - 8)/2 * c->sys->audio_stride;
if (of*2 >= size)
continue;
-
+
frame_ptr[d] = pcm[of*2+1]; // FIXME: may be we have to admit
- frame_ptr[d+1] = pcm[of*2]; // that DV is a big endian PCM
+ frame_ptr[d+1] = pcm[of*2]; // that DV is a big endian PCM
}
frame_ptr += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */
}
@@ -429,7 +429,7 @@ static void dv_inject_video(DVMuxContext *c, const uint8_t* video_data, uint8_t*
}
}
-/*
+/*
* This is the dumbest implementation of all -- it simply looks at
* a fixed offset and if pack isn't there -- fails. We might want
* to have a fallback mechanism for complete search of missing packs.
@@ -437,7 +437,7 @@ static void dv_inject_video(DVMuxContext *c, const uint8_t* video_data, uint8_t*
static const uint8_t* dv_extract_pack(uint8_t* frame, enum dv_pack_type t)
{
int offs;
-
+
switch (t) {
case dv_audio_source:
offs = (80*6 + 80*16*3 + 3);
@@ -450,12 +450,12 @@ static const uint8_t* dv_extract_pack(uint8_t* frame, enum dv_pack_type t)
break;
default:
return NULL;
- }
+ }
return (frame[offs] == t ? &frame[offs] : NULL);
}
-/*
+/*
* There's a couple of assumptions being made here:
* 1. By default we silence erroneous (0x8000/16bit 0x800/12bit) audio samples.
* We can pass them upwards when ffmpeg will be ready to deal with them.
@@ -469,16 +469,16 @@ static int dv_extract_audio(uint8_t* frame, uint8_t* pcm, uint8_t* pcm2)
uint16_t lc, rc;
const DVprofile* sys;
const uint8_t* as_pack;
-
+
as_pack = dv_extract_pack(frame, dv_audio_source);
if (!as_pack) /* No audio ? */
return 0;
-
+
sys = dv_frame_profile(frame);
smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */
freq = (as_pack[4] >> 3) & 0x07; /* 0 - 48KHz, 1 - 44,1kHz, 2 - 32 kHz */
quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */
-
+
if (quant > 1)
return -1; /* Unsupported quantization */
@@ -501,13 +501,13 @@ static int dv_extract_audio(uint8_t* frame, uint8_t* pcm, uint8_t* pcm2)
of = sys->audio_shuffle[i][j] + (d - 8)/2 * sys->audio_stride;
if (of*2 >= size)
continue;
-
+
pcm[of*2] = frame[d+1]; // FIXME: may be we have to admit
pcm[of*2+1] = frame[d]; // that DV is a big endian PCM
if (pcm[of*2+1] == 0x80 && pcm[of*2] == 0x00)
pcm[of*2+1] = 0;
} else { /* 12bit quantization */
- lc = ((uint16_t)frame[d] << 4) |
+ lc = ((uint16_t)frame[d] << 4) |
((uint16_t)frame[d+2] >> 4);
rc = ((uint16_t)frame[d+1] << 4) |
((uint16_t)frame[d+2] & 0x0f);
@@ -520,14 +520,14 @@ static int dv_extract_audio(uint8_t* frame, uint8_t* pcm, uint8_t* pcm2)
pcm[of*2] = lc & 0xff; // FIXME: may be we have to admit
pcm[of*2+1] = lc >> 8; // that DV is a big endian PCM
- of = sys->audio_shuffle[i%half_ch+half_ch][j] +
+ of = sys->audio_shuffle[i%half_ch+half_ch][j] +
(d - 8)/3 * sys->audio_stride;
pcm[of*2] = rc & 0xff; // FIXME: may be we have to admit
pcm[of*2+1] = rc >> 8; // that DV is a big endian PCM
++d;
}
}
-
+
frame += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */
}
}
@@ -547,7 +547,7 @@ static int dv_extract_audio_info(DVDemuxContext* c, uint8_t* frame)
c->ach = 0;
return 0;
}
-
+
smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */
freq = (as_pack[4] >> 3) & 0x07; /* 0 - 48KHz, 1 - 44,1kHz, 2 - 32 kHz */
quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */
@@ -575,22 +575,22 @@ static int dv_extract_audio_info(DVDemuxContext* c, uint8_t* frame)
c->ast[i]->start_time = 0;
}
c->ach = i;
-
+
return (sys->audio_min_samples[freq] + smpls) * 4; /* 2ch, 2bytes */;
}
static int dv_extract_video_info(DVDemuxContext *c, uint8_t* frame)
{
- const DVprofile* sys;
+ const DVprofile* sys;
const uint8_t* vsc_pack;
AVCodecContext* avctx;
int apt, is16_9;
int size = 0;
-
+
sys = dv_frame_profile(frame);
if (sys) {
avctx = c->vst->codec;
-
+
av_set_pts_info(c->vst, 64, sys->frame_rate_base, sys->frame_rate);
avctx->time_base= (AVRational){sys->frame_rate_base, sys->frame_rate};
if(!avctx->width){
@@ -598,20 +598,20 @@ static int dv_extract_video_info(DVDemuxContext *c, uint8_t* frame)
avctx->height = sys->height;
}
avctx->pix_fmt = sys->pix_fmt;
-
+
/* finding out SAR is a little bit messy */
vsc_pack = dv_extract_pack(frame, dv_video_control);
apt = frame[4] & 0x07;
is16_9 = (vsc_pack && ((vsc_pack[2] & 0x07) == 0x02 ||
(!apt && (vsc_pack[2] & 0x07) == 0x07)));
avctx->sample_aspect_ratio = sys->sar[is16_9];
-
+
size = sys->frame_size;
}
return size;
}
-/*
+/*
* The following 6 functions constitute our interface to the world
*/
@@ -620,7 +620,7 @@ int dv_assemble_frame(DVMuxContext *c, AVStream* st,
{
uint8_t pcm[8192];
int fsize, reqasize;
-
+
*frame = &c->frame_buf[0];
if (c->has_audio && c->has_video) { /* must be a stale frame */
dv_format_frame(c, *frame);
@@ -629,22 +629,22 @@ int dv_assemble_frame(DVMuxContext *c, AVStream* st,
c->has_audio = 0;
c->has_video = 0;
}
-
+
if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
/* FIXME: we have to have more sensible approach than this one */
if (c->has_video)
av_log(st->codec, AV_LOG_ERROR, "Can't process DV frame #%d. Insufficient audio data or severe sync problem.\n", c->frames);
-
+
dv_inject_video(c, data, *frame);
c->has_video = 1;
data_size = 0;
if (c->has_audio < 0)
goto out;
- }
-
+ }
+
reqasize = 4 * dv_audio_frame_size(c->sys, c->frames);
fsize = fifo_size(&c->audio_data, c->audio_data.rptr);
- if (st->codec->codec_type == CODEC_TYPE_AUDIO || (c->has_video && fsize >= reqasize)) {
+ if (st->codec->codec_type == CODEC_TYPE_AUDIO || (c->has_video && fsize >= reqasize)) {
if (fsize + data_size >= reqasize && !c->has_audio) {
if (fsize >= reqasize) {
fifo_read(&c->audio_data, &pcm[0], reqasize, &c->audio_data.rptr);
@@ -657,7 +657,7 @@ int dv_assemble_frame(DVMuxContext *c, AVStream* st,
dv_inject_audio(c, &pcm[0], *frame);
c->has_audio = 1;
}
-
+
/* FIXME: we have to have more sensible approach than this one */
if (fifo_size(&c->audio_data, c->audio_data.rptr) + data_size >= 100*AVCODEC_MAX_AUDIO_FRAME_SIZE)
av_log(st->codec, AV_LOG_ERROR, "Can't process DV frame #%d. Insufficient video data or severe sync problem.\n", c->frames);
@@ -695,7 +695,7 @@ DVMuxContext* dv_init_mux(AVFormatContext* s)
goto bail_out;
}
}
-
+
/* Some checks -- DV format is very picky about its incoming streams */
if (!vst || vst->codec->codec_id != CODEC_ID_DVVIDEO)
goto bail_out;
@@ -706,14 +706,14 @@ DVMuxContext* dv_init_mux(AVFormatContext* s)
c->sys = dv_codec_profile(vst->codec);
if (!c->sys)
goto bail_out;
-
+
/* Ok, everything seems to be in working order */
c->frames = 0;
c->has_audio = ast ? 0 : -1;
c->has_video = 0;
c->start_time = (time_t)s->timestamp;
c->aspect = 0; /* 4:3 is the default */
- if ((int)(av_q2d(vst->codec->sample_aspect_ratio) * vst->codec->width / vst->codec->height * 10) == 17) /* 16:9 */
+ if ((int)(av_q2d(vst->codec->sample_aspect_ratio) * vst->codec->width / vst->codec->height * 10) == 17) /* 16:9 */
c->aspect = 0x07;
if (ast && fifo_init(&c->audio_data, 100*AVCODEC_MAX_AUDIO_FRAME_SIZE) < 0)
@@ -722,14 +722,14 @@ DVMuxContext* dv_init_mux(AVFormatContext* s)
dv_format_frame(c, &c->frame_buf[0]);
return c;
-
+
bail_out:
av_free(c);
return NULL;
}
void dv_delete_mux(DVMuxContext *c)
-{
+{
fifo_free(&c->audio_data);
}
@@ -743,7 +743,7 @@ DVDemuxContext* dv_init_demux(AVFormatContext *s)
c->vst = av_new_stream(s, 0);
if (!c->vst) {
- av_free(c);
+ av_free(c);
return NULL;
}
@@ -757,7 +757,7 @@ DVDemuxContext* dv_init_demux(AVFormatContext *s)
c->vst->codec->codec_id = CODEC_ID_DVVIDEO;
c->vst->codec->bit_rate = 25000000;
c->vst->start_time = 0;
-
+
return c;
}
@@ -778,12 +778,12 @@ int dv_get_packet(DVDemuxContext *c, AVPacket *pkt)
return size;
}
-int dv_produce_packet(DVDemuxContext *c, AVPacket *pkt,
+int dv_produce_packet(DVDemuxContext *c, AVPacket *pkt,
uint8_t* buf, int buf_size)
{
int size, i;
const DVprofile* sys = dv_frame_profile(buf);
-
+
if (buf_size < 4 || buf_size < sys->frame_size)
return -1; /* Broken frame, or not enough data */
@@ -796,21 +796,21 @@ int dv_produce_packet(DVDemuxContext *c, AVPacket *pkt,
}
dv_extract_audio(buf, c->audio_buf[0], c->audio_buf[1]);
c->abytes += size;
-
+
/* Now it's time to return video packet */
size = dv_extract_video_info(c, buf);
av_init_packet(pkt);
pkt->data = buf;
- pkt->size = size;
+ pkt->size = size;
pkt->flags |= PKT_FLAG_KEY;
pkt->stream_index = c->vst->id;
pkt->pts = c->frames;
-
+
c->frames++;
return size;
}
-
+
static int64_t dv_frame_offset(AVFormatContext *s, DVDemuxContext *c,
int64_t timestamp, int flags)
{
@@ -821,7 +821,7 @@ static int64_t dv_frame_offset(AVFormatContext *s, DVDemuxContext *c,
int64_t max_offset = ((size-1) / sys->frame_size) * sys->frame_size;
offset = sys->frame_size * timestamp;
-
+
if (offset > max_offset) offset = max_offset;
else if (offset < 0) offset = 0;
@@ -836,7 +836,7 @@ void dv_flush_audio_packets(DVDemuxContext *c)
/************************************************************
* Implementation of the easiest DV storage of all -- raw DV.
************************************************************/
-
+
typedef struct RawDVContext {
uint8_t buf[144000];
DVDemuxContext* dv_demux;
@@ -850,13 +850,13 @@ static int dv_read_header(AVFormatContext *s,
c->dv_demux = dv_init_demux(s);
if (!c->dv_demux)
return -1;
-
+
if (get_buffer(&s->pb, c->buf, 4) <= 0 || url_fseek(&s->pb, -4, SEEK_CUR) < 0)
return AVERROR_IO;
sys = dv_frame_profile(c->buf);
s->bit_rate = av_rescale(sys->frame_size * 8, sys->frame_rate, sys->frame_rate_base);
-
+
return 0;
}
@@ -864,25 +864,25 @@ static int dv_read_header(AVFormatContext *s,
static int dv_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int size;
- RawDVContext *c = s->priv_data;
-
+ RawDVContext *c = s->priv_data;
+
size = dv_get_packet(c->dv_demux, pkt);
-
+
if (size < 0) {
- if (get_buffer(&s->pb, c->buf, 4) <= 0)
+ if (get_buffer(&s->pb, c->buf, 4) <= 0)
return AVERROR_IO;
-
+
size = dv_frame_profile(c->buf)->frame_size;
if (get_buffer(&s->pb, c->buf + 4, size - 4) <= 0)
return AVERROR_IO;
size = dv_produce_packet(c->dv_demux, pkt, c->buf, size);
- }
-
+ }
+
return size;
}
-static int dv_read_seek(AVFormatContext *s, int stream_index,
+static int dv_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
RawDVContext *r = s->priv_data;
@@ -895,7 +895,7 @@ static int dv_read_seek(AVFormatContext *s, int stream_index,
c->abytes= av_rescale(c->frames,
c->ast[0]->codec->bit_rate * (int64_t)sys->frame_rate_base,
8*sys->frame_rate);
-
+
dv_flush_audio_packets(c);
return url_fseek(&s->pb, offset, SEEK_SET);
}
@@ -923,20 +923,20 @@ static int dv_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
uint8_t* frame;
int fsize;
-
+
fsize = dv_assemble_frame((DVMuxContext *)s->priv_data, s->streams[pkt->stream_index],
pkt->data, pkt->size, &frame);
if (fsize > 0) {
- put_buffer(&s->pb, frame, fsize);
+ put_buffer(&s->pb, frame, fsize);
put_flush_packet(&s->pb);
- }
+ }
return 0;
}
-/*
+/*
* We might end up with some extra A/V data without matching counterpart.
* E.g. video data without enough audio to write the complete frame.
- * Currently we simply drop the last frame. I don't know whether this
+ * Currently we simply drop the last frame. I don't know whether this
* is the best strategy of all
*/
static int dv_write_trailer(struct AVFormatContext *s)
diff --git a/libavformat/dv.h b/libavformat/dv.h
index ff1e53ff10..1920682c92 100644
--- a/libavformat/dv.h
+++ b/libavformat/dv.h
@@ -1,5 +1,5 @@
-/*
- * General DV muxer/demuxer
+/*
+ * General DV muxer/demuxer
* Copyright (c) 2003 Roman Shaposhnick
*
* Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth
diff --git a/libavformat/dv1394.c b/libavformat/dv1394.c
index 127a5f77c9..8e61df3df4 100644
--- a/libavformat/dv1394.c
+++ b/libavformat/dv1394.c
@@ -46,9 +46,9 @@ struct dv1394_data {
DVDemuxContext* dv_demux; /* Generic DV muxing/demuxing context */
};
-/*
+/*
* The trick here is to kludge around well known problem with kernel Ooopsing
- * when you try to capture PAL on a device node configure for NTSC. That's
+ * when you try to capture PAL on a device node configure for NTSC. That's
* why we have to configure the device node for PAL, and then read only NTSC
* amount of data.
*/
@@ -148,9 +148,9 @@ static int dv1394_read_packet(AVFormatContext *context, AVPacket *pkt)
/* This usually means that ring buffer overflowed.
* We have to reset :(.
*/
-
+
av_log(context, AV_LOG_ERROR, "DV1394: Ring buffer overflow. Reseting ..\n");
-
+
dv1394_reset(dv);
dv1394_start(dv);
}
@@ -200,12 +200,12 @@ restart_poll:
dv->done);
#endif
- size = dv_produce_packet(dv->dv_demux, pkt,
- dv->ring + (dv->index * DV1394_PAL_FRAME_SIZE),
+ size = dv_produce_packet(dv->dv_demux, pkt,
+ dv->ring + (dv->index * DV1394_PAL_FRAME_SIZE),
DV1394_PAL_FRAME_SIZE);
dv->index = (dv->index + 1) % DV1394_RING_FRAMES;
dv->done++; dv->avail--;
-
+
return size;
}
diff --git a/libavformat/dv1394.h b/libavformat/dv1394.h
index 8c0422a5d9..18650cddb6 100644
--- a/libavformat/dv1394.h
+++ b/libavformat/dv1394.h
@@ -57,7 +57,7 @@
To set the DV output parameters (e.g. whether you want NTSC or PAL
video), use the DV1394_INIT ioctl, passing in the parameters you
want in a struct dv1394_init.
-
+
Example 1:
To play a raw .DV file: cat foo.DV > /dev/dv1394
(cat will use write() internally)
@@ -80,9 +80,9 @@
2)
For more control over buffering, and to avoid unnecessary copies
- of the DV data, you can use the more sophisticated the mmap() interface.
- First, call the DV1394_INIT ioctl to specify your parameters,
- including the number of frames in the ringbuffer. Then, calling mmap()
+ of the DV data, you can use the more sophisticated the mmap() interface.
+ First, call the DV1394_INIT ioctl to specify your parameters,
+ including the number of frames in the ringbuffer. Then, calling mmap()
on the dv1394 device will give you direct access to the ringbuffer
from which the DV card reads your frame data.
@@ -107,7 +107,7 @@
*--------------------------------------*
| CLEAR | DV data | DV data | CLEAR |
*--------------------------------------*
- <ACTIVE>
+ <ACTIVE>
transmission goes in this direction --->>>
@@ -118,10 +118,10 @@
will continue to transmit frame 2, and will increase the dropped_frames
counter each time it repeats the transmission).
-
+
If you called DV1394_GET_STATUS at this instant, you would
receive the following values:
-
+
n_frames = 4
active_frame = 1
first_clear_frame = 3
@@ -152,9 +152,9 @@
(checks of system call return values omitted for brevity; always
check return values in your code!)
-
+
while( frames left ) {
-
+
struct pollfd *pfd = ...;
pfd->fd = dv1394_fd;
@@ -162,12 +162,12 @@
pfd->events = POLLOUT | POLLIN; (OUT for transmit, IN for receive)
(add other sources of I/O here)
-
+
poll(pfd, 1, -1); (or select(); add a timeout if you want)
if(pfd->revents) {
struct dv1394_status status;
-
+
ioctl(dv1394_fd, DV1394_GET_STATUS, &status);
if(status.dropped_frames > 0) {
@@ -191,7 +191,7 @@
should close the dv1394 file descriptor (and munmap() all
ringbuffer mappings, if you are using them), then re-open the
dv1394 device (and re-map the ringbuffer).
-
+
*/
@@ -264,7 +264,7 @@ enum pal_or_ntsc {
struct dv1394_init {
/* DV1394_API_VERSION */
unsigned int api_version;
-
+
/* isochronous transmission channel to use */
unsigned int channel;
@@ -276,7 +276,7 @@ struct dv1394_init {
enum pal_or_ntsc format;
/* the following are used only for transmission */
-
+
/* set these to zero unless you want a
non-default empty packet rate (see below) */
unsigned long cip_n;
@@ -293,7 +293,7 @@ struct dv1394_init {
would imply a different size for the ringbuffer). If you need a
different buffer size, simply close and re-open the device, then
initialize it with your new settings. */
-
+
/* Q: What are cip_n and cip_d? */
/*
@@ -310,13 +310,13 @@ struct dv1394_init {
The default empty packet insertion rate seems to work for many people; if
your DV output is stable, you can simply ignore this discussion. However,
we have exposed the empty packet rate as a parameter to support devices that
- do not work with the default rate.
+ do not work with the default rate.
The decision to insert an empty packet is made with a numerator/denominator
algorithm. Empty packets are produced at an average rate of CIP_N / CIP_D.
You can alter the empty packet rate by passing non-zero values for cip_n
and cip_d to the INIT ioctl.
-
+
*/
diff --git a/libavformat/electronicarts.c b/libavformat/electronicarts.c
index 7101652dc0..7684de9d80 100644
--- a/libavformat/electronicarts.c
+++ b/libavformat/electronicarts.c
@@ -191,7 +191,7 @@ static int ea_read_header(AVFormatContext *s,
st->codec->codec_type = CODEC_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_EA_MJPEG;
st->codec->codec_tag = 0; /* no fourcc */
-#endif
+#endif
/* initialize the audio decoder stream */
st = av_new_stream(s, 0);
@@ -243,7 +243,7 @@ static int ea_read_packet(AVFormatContext *s,
pkt->pts *= ea->audio_frame_counter;
pkt->pts /= EA_SAMPLE_RATE;
- /* 2 samples/byte, 1 or 2 samples per frame depending
+ /* 2 samples/byte, 1 or 2 samples per frame depending
* on stereo; chunk also has 12-byte header */
ea->audio_frame_counter += ((chunk_size - 12) * 2) /
ea->num_channels;
diff --git a/libavformat/ffm.c b/libavformat/ffm.c
index 775a89c0c2..6511639362 100644
--- a/libavformat/ffm.c
+++ b/libavformat/ffm.c
@@ -66,7 +66,7 @@ static void flush_packet(AVFormatContext *s)
fill_size = ffm->packet_end - ffm->packet_ptr;
memset(ffm->packet_ptr, 0, fill_size);
- if (url_ftell(pb) % ffm->packet_size)
+ if (url_ftell(pb) % ffm->packet_size)
av_abort();
/* put header */
@@ -403,7 +403,7 @@ static void adjust_write_index(AVFormatContext *s)
pts = get_pts(s, pos_max);
- if (pts - 100000 > pts_start)
+ if (pts - 100000 > pts_start)
goto end;
ffm->write_index = FFM_PACKET_SIZE;
@@ -481,9 +481,9 @@ static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap)
if (!fst)
goto fail;
s->streams[i] = st;
-
+
av_set_pts_info(st, 64, 1, 1000000);
-
+
st->priv_data = fst;
codec = st->codec;
@@ -581,7 +581,7 @@ static int ffm_read_packet(AVFormatContext *s, AVPacket *pkt)
printf("pos=%08Lx spos=%Lx, write_index=%Lx size=%Lx\n",
url_ftell(&s->pb), s->pb.pos, ffm->write_index, ffm->file_size);
#endif
- if (ffm_read_data(s, ffm->header, FRAME_HEADER_SIZE, 1) !=
+ if (ffm_read_data(s, ffm->header, FRAME_HEADER_SIZE, 1) !=
FRAME_HEADER_SIZE)
return -EAGAIN;
#if 0
@@ -604,7 +604,7 @@ static int ffm_read_packet(AVFormatContext *s, AVPacket *pkt)
av_new_packet(pkt, size);
pkt->stream_index = ffm->header[0];
- pkt->pos = url_ftell(&s->pb);
+ pkt->pos = url_ftell(&s->pb);
if (ffm->header[1] & FLAG_KEY_FRAME)
pkt->flags |= PKT_FLAG_KEY;
@@ -747,7 +747,7 @@ static int ffm_read_close(AVFormatContext *s)
static int ffm_probe(AVProbeData *p)
{
if (p->buf_size >= 4 &&
- p->buf[0] == 'F' && p->buf[1] == 'F' && p->buf[2] == 'M' &&
+ p->buf[0] == 'F' && p->buf[1] == 'F' && p->buf[2] == 'M' &&
p->buf[3] == '1')
return AVPROBE_SCORE_MAX + 1;
return 0;
diff --git a/libavformat/file.c b/libavformat/file.c
index fc75ee015c..3bc94dfc96 100644
--- a/libavformat/file.c
+++ b/libavformat/file.c
@@ -70,7 +70,7 @@ static int file_write(URLContext *h, unsigned char *buf, int size)
static offset_t file_seek(URLContext *h, offset_t pos, int whence)
{
int fd = (size_t)h->priv_data;
-#if defined(CONFIG_WIN32) && !defined(__CYGWIN__)
+#if defined(CONFIG_WIN32) && !defined(__CYGWIN__)
return _lseeki64(fd, pos, whence);
#else
return lseek(fd, pos, whence);
diff --git a/libavformat/flic.c b/libavformat/flic.c
index 027a4c3933..18addfc4c6 100644
--- a/libavformat/flic.c
+++ b/libavformat/flic.c
@@ -33,7 +33,7 @@
#define FLIC_FILE_MAGIC_1 0xAF11
#define FLIC_FILE_MAGIC_2 0xAF12
-#define FLIC_FILE_MAGIC_3 0xAF44 /* Flic Type for Extended FLX Format which
+#define FLIC_FILE_MAGIC_3 0xAF44 /* Flic Type for Extended FLX Format which
originated in Dave's Targa Animator (DTA) */
#define FLIC_CHUNK_MAGIC_1 0xF1FA
#define FLIC_CHUNK_MAGIC_2 0xF5FA
@@ -182,9 +182,9 @@ static int flic_read_packet(AVFormatContext *s,
}
pkt->stream_index = flic->video_stream_index;
pkt->pts = flic->pts;
- pkt->pos = url_ftell(pb);
+ pkt->pos = url_ftell(pb);
memcpy(pkt->data, preamble, FLIC_PREAMBLE_SIZE);
- ret = get_buffer(pb, pkt->data + FLIC_PREAMBLE_SIZE,
+ ret = get_buffer(pb, pkt->data + FLIC_PREAMBLE_SIZE,
size - FLIC_PREAMBLE_SIZE);
if (ret != size - FLIC_PREAMBLE_SIZE) {
av_free_packet(pkt);
diff --git a/libavformat/flvdec.c b/libavformat/flvdec.c
index 7884a17f72..e908651214 100644
--- a/libavformat/flvdec.c
+++ b/libavformat/flvdec.c
@@ -35,7 +35,7 @@ static int flv_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
int offset, flags;
-
+
s->ctx_flags |= AVFMTCTX_NOHEADER; //ok we have a header but theres no fps, codec type, sample_rate, ...
url_fskip(&s->pb, 4);
@@ -51,7 +51,7 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, i, type, size, pts, flags, is_audio, next;
AVStream *st = NULL;
-
+
for(;;){
url_fskip(&s->pb, 4); /* size of previous packet */
type = get_byte(&s->pb);
@@ -62,10 +62,10 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
return AVERROR_IO;
url_fskip(&s->pb, 4); /* reserved */
flags = 0;
-
+
if(size == 0)
continue;
-
+
next= size + url_ftell(&s->pb);
if (type == 8) {
@@ -83,13 +83,13 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
char tmp[128];
int type, len;
double d= 0;
-
+
len= get_be16(&s->pb);
if(len >= sizeof(tmp) || !len)
break;
get_buffer(&s->pb, tmp, len);
tmp[len]=0;
-
+
type= get_byte(&s->pb);
if(type==0){
d= av_int2dbl(get_be64(&s->pb));
@@ -105,7 +105,7 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
d= av_int2dbl(get_be64(&s->pb));
get_be16(&s->pb);
}
-
+
if(!strcmp(tmp, "duration")){
s->duration = d*AV_TIME_BASE;
}else if(!strcmp(tmp, "videodatarate")){
@@ -187,10 +187,10 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
pkt->size = ret;
pkt->pts = pts;
pkt->stream_index = st->index;
-
+
if (is_audio || ((flags >> 4)==1))
pkt->flags |= PKT_FLAG_KEY;
-
+
return ret;
}
diff --git a/libavformat/flvenc.c b/libavformat/flvenc.c
index 5d448a1b49..a5ec9833e2 100644
--- a/libavformat/flvenc.c
+++ b/libavformat/flvenc.c
@@ -52,7 +52,7 @@ static int get_audio_flags(AVCodecContext *enc){
if (enc->channels > 1) {
flags |= 0x01;
}
-
+
switch(enc->codec_id){
case CODEC_ID_MP3:
flags |= 0x20 | 0x2;
@@ -75,7 +75,7 @@ static int get_audio_flags(AVCodecContext *enc){
av_log(enc, AV_LOG_ERROR, "codec not compatible with flv\n");
return -1;
}
-
+
return flags;
}
@@ -93,7 +93,7 @@ static int flv_write_header(AVFormatContext *s)
put_byte(pb,0); // delayed write
put_be32(pb,9);
put_be32(pb,0);
-
+
for(i=0; i<s->nb_streams; i++){
AVCodecContext *enc = s->streams[i]->codec;
av_set_pts_info(s->streams[i], 24, 1, 1000); /* 24 bit pts in ms */
@@ -138,7 +138,7 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
int flags;
// av_log(s, AV_LOG_DEBUG, "type:%d pts: %lld size:%d\n", enc->codec_type, timestamp, size);
-
+
if (enc->codec_type == CODEC_TYPE_VIDEO) {
put_byte(pb, 9);
flags = 2; // choose h263
@@ -147,7 +147,7 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
} else {
assert(enc->codec_type == CODEC_TYPE_AUDIO);
flags = get_audio_flags(enc);
-
+
assert(size);
put_byte(pb, 8);
@@ -162,7 +162,7 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
put_byte(pb,flags);
put_buffer(pb, pkt->data, size);
put_be32(pb,size+1+11); // previous tag size
-
+
put_flush_packet(pb);
return 0;
}
diff --git a/libavformat/framehook.h b/libavformat/framehook.h
index ed24c66bae..68e928ca4c 100644
--- a/libavformat/framehook.h
+++ b/libavformat/framehook.h
@@ -2,7 +2,7 @@
#define _FRAMEHOOK_H
/*
- * Prototypes for interface to .so that implement a video processing hook
+ * Prototypes for interface to .so that implement a video processing hook
*/
#include "avcodec.h"
diff --git a/libavformat/gif.c b/libavformat/gif.c
index be5b7b973d..1784bf9838 100644
--- a/libavformat/gif.c
+++ b/libavformat/gif.c
@@ -56,7 +56,7 @@ typedef struct {
/* we use the standard 216 color palette */
/* this script was used to create the palette:
- * for r in 00 33 66 99 cc ff; do for g in 00 33 66 99 cc ff; do echo -n " "; for b in 00 33 66 99 cc ff; do
+ * for r in 00 33 66 99 cc ff; do for g in 00 33 66 99 cc ff; do echo -n " "; for b in 00 33 66 99 cc ff; do
* echo -n "{ 0x$r, 0x$g, 0x$b }, "; done; echo ""; done; done
*/
@@ -129,12 +129,12 @@ static void gif_put_bits_rev(PutBitContext *s, int n, unsigned int value)
bit_cnt+=n;
} else {
bit_buf |= value << (bit_cnt);
-
+
*s->buf_ptr = bit_buf & 0xff;
s->buf_ptr[1] = (bit_buf >> 8) & 0xff;
s->buf_ptr[2] = (bit_buf >> 16) & 0xff;
s->buf_ptr[3] = (bit_buf >> 24) & 0xff;
-
+
//printf("bitbuf = %08x\n", bit_buf);
s->buf_ptr+=4;
if (s->buf_ptr >= s->buf_end)
@@ -169,7 +169,7 @@ static void gif_flush_put_bits_rev(PutBitContext *s)
/* !RevPutBitContext */
/* GIF header */
-static int gif_image_write_header(ByteIOContext *pb,
+static int gif_image_write_header(ByteIOContext *pb,
int width, int height, int loop_count,
uint32_t *palette)
{
@@ -204,16 +204,16 @@ static int gif_image_write_header(ByteIOContext *pb,
byte 1 : 33 (hex 0x21) GIF Extension code
byte 2 : 255 (hex 0xFF) Application Extension Label
- byte 3 : 11 (hex (0x0B) Length of Application Block
+ byte 3 : 11 (hex (0x0B) Length of Application Block
(eleven bytes of data to follow)
bytes 4 to 11 : "NETSCAPE"
bytes 12 to 14 : "2.0"
- byte 15 : 3 (hex 0x03) Length of Data Sub-Block
+ byte 15 : 3 (hex 0x03) Length of Data Sub-Block
(three bytes of data to follow)
byte 16 : 1 (hex 0x01)
- bytes 17 to 18 : 0 to 65535, an unsigned integer in
- lo-hi byte format. This indicate the
- number of iterations the loop should
+ bytes 17 to 18 : 0 to 65535, an unsigned integer in
+ lo-hi byte format. This indicate the
+ number of iterations the loop should
be executed.
bytes 19 : 0 (hex 0x00) a Data Sub-block Terminator
*/
@@ -241,7 +241,7 @@ static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
}
-static int gif_image_write_image(ByteIOContext *pb,
+static int gif_image_write_image(ByteIOContext *pb,
int x1, int y1, int width, int height,
const uint8_t *buf, int linesize, int pix_fmt)
{
@@ -302,7 +302,7 @@ static int gif_image_write_image(ByteIOContext *pb,
left-=GIF_CHUNKS;
}
put_byte(pb, 0x00); /* end of image block */
-
+
return 0;
}
@@ -351,7 +351,7 @@ static int gif_write_header(AVFormatContext *s)
return 0;
}
-static int gif_write_video(AVFormatContext *s,
+static int gif_write_video(AVFormatContext *s,
AVCodecContext *enc, const uint8_t *buf, int size)
{
ByteIOContext *pb = &s->pb;
@@ -364,7 +364,7 @@ static int gif_write_video(AVFormatContext *s,
put_byte(pb, 0xf9);
put_byte(pb, 0x04); /* block size */
put_byte(pb, 0x04); /* flags */
-
+
/* 1 jiffy is 1/70 s */
/* the delay_time field indicates the number of jiffies - 1 */
delay = gif->file_time - gif->time;
@@ -407,10 +407,10 @@ static int gif_write_trailer(AVFormatContext *s)
/* better than nothing gif image writer */
int gif_write(ByteIOContext *pb, AVImageInfo *info)
{
- gif_image_write_header(pb, info->width, info->height, AVFMT_NOOUTPUTLOOP,
+ gif_image_write_header(pb, info->width, info->height, AVFMT_NOOUTPUTLOOP,
(uint32_t *)info->pict.data[1]);
- gif_image_write_image(pb, 0, 0, info->width, info->height,
- info->pict.data[0], info->pict.linesize[0],
+ gif_image_write_image(pb, 0, 0, info->width, info->height,
+ info->pict.data[0], info->pict.linesize[0],
PIX_FMT_PAL8);
put_byte(pb, 0x3b);
put_flush_packet(pb);
diff --git a/libavformat/gifdec.c b/libavformat/gifdec.c
index 429a30cee5..162da564e7 100644
--- a/libavformat/gifdec.c
+++ b/libavformat/gifdec.c
@@ -46,7 +46,7 @@ typedef struct GifState {
int gce_disposal;
/* delay during which the frame is shown */
int gce_delay;
-
+
/* LZW compatible decoder */
ByteIOContext *f;
int eob_reached;
@@ -312,7 +312,7 @@ static int gif_read_image(GifState *s)
palette = s->global_palette;
bits_per_pixel = s->bits_per_pixel;
}
-
+
/* verify that all the image is inside the screen dimensions */
if (left + width > s->screen_width ||
top + height > s->screen_height)
@@ -327,7 +327,7 @@ static int gif_read_image(GifState *s)
n = (1 << bits_per_pixel);
spal = palette;
for(i = 0; i < n; i++) {
- s->image_palette[i] = (0xff << 24) |
+ s->image_palette[i] = (0xff << 24) |
(spal[0] << 16) | (spal[1] << 8) | (spal[2]);
spal += 3;
}
@@ -376,7 +376,7 @@ static int gif_read_image(GifState *s)
ptr += linesize * 8;
if (y1 >= height) {
y1 = 4;
- if (pass == 0)
+ if (pass == 0)
ptr = ptr1 + linesize * 4;
else
ptr = ptr1 + linesize * 2;
@@ -402,7 +402,7 @@ static int gif_read_image(GifState *s)
}
}
av_free(line);
-
+
/* read the garbage data until end marker is found */
while (!s->eob_reached)
GetCode(s);
@@ -434,14 +434,14 @@ static int gif_read_extension(GifState *s)
s->transparent_color_index = -1;
s->gce_disposal = (gce_flags >> 2) & 0x7;
#ifdef DEBUG
- printf("gif: gce_flags=%x delay=%d tcolor=%d disposal=%d\n",
- gce_flags, s->gce_delay,
+ printf("gif: gce_flags=%x delay=%d tcolor=%d disposal=%d\n",
+ gce_flags, s->gce_delay,
s->transparent_color_index, s->gce_disposal);
#endif
ext_len = get_byte(f);
break;
}
-
+
/* NOTE: many extension blocks can come after */
discard_ext:
while (ext_len != 0) {
@@ -474,11 +474,11 @@ static int gif_read_header1(GifState *s)
s->transparent_color_index = -1;
s->screen_width = get_le16(f);
s->screen_height = get_le16(f);
- if( (unsigned)s->screen_width > 32767
+ if( (unsigned)s->screen_width > 32767
|| (unsigned)s->screen_height > 32767){
av_log(NULL, AV_LOG_ERROR, "picture size too large\n");
return -1;
- }
+ }
v = get_byte(f);
s->color_resolution = ((v & 0x70) >> 4) + 1;
@@ -543,7 +543,7 @@ static int gif_read_header(AVFormatContext * s1,
s->f = f;
if (gif_read_header1(s) < 0)
return -1;
-
+
/* allocate image buffer */
s->image_linesize = s->screen_width * 3;
s->image_buf = av_malloc(s->screen_height * s->image_linesize);
@@ -593,7 +593,7 @@ static int gif_read_close(AVFormatContext *s1)
}
/* read gif as image */
-static int gif_read(ByteIOContext *f,
+static int gif_read(ByteIOContext *f,
int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
{
GifState s1, *s = &s1;
diff --git a/libavformat/grab.c b/libavformat/grab.c
index 201db86304..c8e2716ca0 100644
--- a/libavformat/grab.c
+++ b/libavformat/grab.c
@@ -70,7 +70,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
if (!ap || ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0)
return -1;
-
+
width = ap->width;
height = ap->height;
frame_rate = ap->time_base.den;
@@ -78,7 +78,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
if((unsigned)width > 32767 || (unsigned)height > 32767)
return -1;
-
+
st = av_new_stream(s1, 0);
if (!st)
return -ENOMEM;
@@ -97,7 +97,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
perror(video_device);
goto fail;
}
-
+
if (ioctl(video_fd,VIDIOCGCAP, &s->video_cap) < 0) {
perror("VIDIOCGCAP");
goto fail;
@@ -115,7 +115,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
desired_palette = VIDEO_PALETTE_YUV422;
} else if (st->codec->pix_fmt == PIX_FMT_BGR24) {
desired_palette = VIDEO_PALETTE_RGB24;
- }
+ }
/* set tv standard */
if (ap->standard && !ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
@@ -127,7 +127,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
tuner.mode = VIDEO_MODE_NTSC;
ioctl(video_fd, VIDIOCSTUNER, &tuner);
}
-
+
/* unmute audio */
audio.audio = 0;
ioctl(video_fd, VIDIOCGAUDIO, &audio);
@@ -159,7 +159,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
pict.brightness,
pict.contrast,
pict.whiteness);
-#endif
+#endif
/* try to choose a suitable video format */
pict.palette = desired_palette;
if (desired_palette == -1 || (ret = ioctl(video_fd, VIDIOCSPICT, &pict)) < 0) {
@@ -171,7 +171,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
if (ret < 0) {
pict.palette=VIDEO_PALETTE_RGB24;
ret = ioctl(video_fd, VIDIOCSPICT, &pict);
- if (ret < 0)
+ if (ret < 0)
goto fail1;
}
}
@@ -184,7 +184,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
s->use_mmap = 0;
-
+
/* ATI All In Wonder automatic activation */
if (!strcmp(s->video_cap.name, "Km")) {
if (aiw_init(s) < 0)
@@ -202,7 +202,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
}
s->gb_frame = 0;
s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
-
+
/* start to grab the first frame */
s->gb_buf.frame = s->gb_frame % s->gb_buffers.frames;
s->gb_buf.height = height;
@@ -211,12 +211,12 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
if (desired_palette == -1 || (ret = ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf)) < 0) {
s->gb_buf.format = VIDEO_PALETTE_YUV420P;
-
+
ret = ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
if (ret < 0 && errno != EAGAIN) {
/* try YUV422 */
s->gb_buf.format = VIDEO_PALETTE_YUV422;
-
+
ret = ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
if (ret < 0 && errno != EAGAIN) {
/* try RGB24 */
@@ -260,7 +260,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
}
s->fd = video_fd;
s->frame_size = frame_size;
-
+
st->codec->codec_type = CODEC_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_RAWVIDEO;
st->codec->width = width;
@@ -322,7 +322,7 @@ static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
s->time_frame += int64_t_C(1000000);
}
break;
- }
+ }
ts.tv_sec = delay / 1000000;
ts.tv_nsec = (delay % 1000000) * 1000;
nanosleep(&ts, NULL);
@@ -388,7 +388,7 @@ static int aiw_init(VideoData *s)
if ((width == s->video_cap.maxwidth && height == s->video_cap.maxheight) ||
(width == s->video_cap.maxwidth && height == s->video_cap.maxheight*2) ||
(width == s->video_cap.maxwidth/2 && height == s->video_cap.maxheight)) {
-
+
s->deint=0;
s->halfw=0;
if (height == s->video_cap.maxheight*2) s->deint=1;
@@ -609,7 +609,7 @@ static int aiw_init(VideoData *s)
sum=(ptr[24]+ptr[26]+1) >> 1;lum[6]=sum; \
sum=(ptr[28]+ptr[30]+1) >> 1;lum[7]=sum; \
sum=(ptr[25]+ptr[29]+1) >> 1;cb[3]=sum; \
- sum=(ptr[27]+ptr[31]+1) >> 1;cr[3]=sum;
+ sum=(ptr[27]+ptr[31]+1) >> 1;cr[3]=sum;
#define LINE_NOUV_AVG \
sum=(ptr[0]+ptr[2]+1) >> 1;lum[0]=sum; \
@@ -619,7 +619,7 @@ static int aiw_init(VideoData *s)
sum=(ptr[16]+ptr[18]+1) >> 1;lum[4]=sum; \
sum=(ptr[20]+ptr[22]+1) >> 1;lum[5]=sum; \
sum=(ptr[24]+ptr[26]+1) >> 1;lum[6]=sum; \
- sum=(ptr[28]+ptr[30]+1) >> 1;lum[7]=sum;
+ sum=(ptr[28]+ptr[30]+1) >> 1;lum[7]=sum;
#define DEINT_LINE_LUM(ptroff) \
sum=(-lum_m4[(ptroff)]+(lum_m3[(ptroff)]<<2)+(lum_m2[(ptroff)]<<1)+(lum_m1[(ptroff)]<<2)-lum[(ptroff)]); \
diff --git a/libavformat/grab_bktr.c b/libavformat/grab_bktr.c
index cfa75e3408..45027454b9 100644
--- a/libavformat/grab_bktr.c
+++ b/libavformat/grab_bktr.c
@@ -174,7 +174,7 @@ static int bktr_init(const char *video_device, int width, int height,
video_buf_size = width * height * 12 / 8;
- video_buf = (uint8_t *)mmap((caddr_t)0, video_buf_size,
+ video_buf = (uint8_t *)mmap((caddr_t)0, video_buf_size,
PROT_READ, MAP_SHARED, *video_fd, (off_t)0);
if (video_buf == MAP_FAILED) {
perror("mmap");
@@ -182,7 +182,7 @@ static int bktr_init(const char *video_device, int width, int height,
}
if (frequency != 0.0) {
- ioctl_frequency = (unsigned long)(frequency*16);
+ ioctl_frequency = (unsigned long)(frequency*16);
if (ioctl(*tuner_fd, TVTUNER_SETFREQ, &ioctl_frequency) < 0)
perror("TVTUNER_SETFREQ");
}
diff --git a/libavformat/http.c b/libavformat/http.c
index 2198cd548d..bc6954f640 100644
--- a/libavformat/http.c
+++ b/libavformat/http.c
@@ -73,13 +73,13 @@ static int http_open(URLContext *h, const char *uri, int flags)
h->priv_data = s;
proxy_path = getenv("http_proxy");
- use_proxy = (proxy_path != NULL) && !getenv("no_proxy") &&
+ use_proxy = (proxy_path != NULL) && !getenv("no_proxy") &&
strstart(proxy_path, "http://", NULL);
/* fill the dest addr */
redo:
/* needed in any case to build the host string */
- url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
+ url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
path1, sizeof(path1), uri);
if (port > 0) {
snprintf(hoststr, sizeof(hoststr), "%s:%d", hostname, port);
@@ -88,7 +88,7 @@ static int http_open(URLContext *h, const char *uri, int flags)
}
if (use_proxy) {
- url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
+ url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
NULL, 0, proxy_path);
path = uri;
} else {
@@ -142,7 +142,7 @@ static int http_getc(HTTPContext *s)
static int process_line(HTTPContext *s, char *line, int line_count)
{
char *tag, *p;
-
+
/* end of header */
if (line[0] == '\0')
return 0;
@@ -160,9 +160,9 @@ static int process_line(HTTPContext *s, char *line, int line_count)
} else {
while (*p != '\0' && *p != ':')
p++;
- if (*p != ':')
+ if (*p != ':')
return 1;
-
+
*p = '\0';
tag = line;
p++;
@@ -198,10 +198,10 @@ static int http_connect(URLContext *h, const char *path, const char *hoststr,
LIBAVFORMAT_IDENT,
hoststr,
b64_encode(auth));
-
+
if (http_write(h, s->buffer, strlen(s->buffer)) < 0)
return AVERROR_IO;
-
+
/* init input buffer */
s->buf_ptr = s->buffer;
s->buf_end = s->buffer;
@@ -211,7 +211,7 @@ static int http_connect(URLContext *h, const char *path, const char *hoststr,
sleep(1);
return 0;
}
-
+
/* wait for header */
q = line;
for(;;) {
@@ -286,7 +286,7 @@ URLProtocol http_protocol = {
/*****************************************************************************
* b64_encode: stolen from VLC's http.c
*****************************************************************************/
-
+
static char *b64_encode( const unsigned char *src )
{
static const char b64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
@@ -317,16 +317,16 @@ static char *b64_encode( const unsigned char *src )
*dst++ = '=';
break;
}
-
+
while( i_shift >= 6 )
{
i_shift -= 6;
*dst++ = b64[(i_bits >> i_shift)&0x3f];
}
}
-
+
*dst++ = '\0';
-
+
return ret;
}
diff --git a/libavformat/idcin.c b/libavformat/idcin.c
index ce56d2a8a5..cbbf980233 100644
--- a/libavformat/idcin.c
+++ b/libavformat/idcin.c
@@ -255,7 +255,7 @@ static int idcin_read_packet(AVFormatContext *s,
/* skip the number of decoded bytes (always equal to width * height) */
url_fseek(pb, 4, SEEK_CUR);
chunk_size -= 4;
- ret= av_get_packet(pb, pkt, chunk_size);
+ ret= av_get_packet(pb, pkt, chunk_size);
if (ret != chunk_size)
return AVERROR_IO;
pkt->stream_index = idcin->video_stream_index;
diff --git a/libavformat/idroq.c b/libavformat/idroq.c
index 62536b0b6b..955152b2d4 100644
--- a/libavformat/idroq.c
+++ b/libavformat/idroq.c
@@ -78,19 +78,19 @@ static int roq_read_header(AVFormatContext *s,
unsigned int chunk_type;
/* get the main header */
- if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
+ if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
RoQ_CHUNK_PREAMBLE_SIZE)
return AVERROR_IO;
roq->framerate = LE_16(&preamble[6]);
roq->frame_pts_inc = 90000 / roq->framerate;
/* init private context parameters */
- roq->width = roq->height = roq->audio_channels = roq->video_pts =
+ roq->width = roq->height = roq->audio_channels = roq->video_pts =
roq->audio_frame_count = 0;
/* scan the first n chunks searching for A/V parameters */
for (i = 0; i < RoQ_CHUNKS_TO_SCAN; i++) {
- if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
+ if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
RoQ_CHUNK_PREAMBLE_SIZE)
return AVERROR_IO;
@@ -101,7 +101,7 @@ static int roq_read_header(AVFormatContext *s,
case RoQ_INFO:
/* fetch the width and height; reuse the preamble bytes */
- if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
+ if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
RoQ_CHUNK_PREAMBLE_SIZE)
return AVERROR_IO;
roq->width = LE_16(&preamble[0]);
@@ -190,7 +190,7 @@ static int roq_read_packet(AVFormatContext *s,
return AVERROR_IO;
/* get the next chunk preamble */
- if ((ret = get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE)) !=
+ if ((ret = get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE)) !=
RoQ_CHUNK_PREAMBLE_SIZE)
return AVERROR_IO;
@@ -211,10 +211,10 @@ static int roq_read_packet(AVFormatContext *s,
codebook_offset = url_ftell(pb) - RoQ_CHUNK_PREAMBLE_SIZE;
codebook_size = chunk_size;
url_fseek(pb, codebook_size, SEEK_CUR);
- if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
+ if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
RoQ_CHUNK_PREAMBLE_SIZE)
return AVERROR_IO;
- chunk_size = LE_32(&preamble[2]) + RoQ_CHUNK_PREAMBLE_SIZE * 2 +
+ chunk_size = LE_32(&preamble[2]) + RoQ_CHUNK_PREAMBLE_SIZE * 2 +
codebook_size;
/* rewind */
diff --git a/libavformat/img.c b/libavformat/img.c
index b30e78eb14..d389a521ee 100644
--- a/libavformat/img.c
+++ b/libavformat/img.c
@@ -39,7 +39,7 @@ typedef struct {
/* return -1 if no image found */
-static int find_image_range(int *pfirst_index, int *plast_index,
+static int find_image_range(int *pfirst_index, int *plast_index,
const char *path)
{
char buf[1024];
@@ -54,7 +54,7 @@ static int find_image_range(int *pfirst_index, int *plast_index,
}
if (first_index == 5)
goto fail;
-
+
/* find the last image */
last_index = first_index;
for(;;) {
@@ -64,7 +64,7 @@ static int find_image_range(int *pfirst_index, int *plast_index,
range1 = 1;
else
range1 = 2 * range;
- if (get_frame_filename(buf, sizeof(buf), path,
+ if (get_frame_filename(buf, sizeof(buf), path,
last_index + range1) < 0)
goto fail;
if (!url_exist(buf))
@@ -126,7 +126,7 @@ static int img_read_header(AVFormatContext *s1, AVFormatParameters *ap)
pstrcpy(s->path, sizeof(s->path), s1->filename);
s->img_number = 0;
s->img_count = 0;
-
+
/* find format */
if (s1->iformat->flags & AVFMT_NOFILE)
s->is_pipe = 0;
@@ -138,7 +138,7 @@ static int img_read_header(AVFormatContext *s1, AVFormatParameters *ap)
} else {
st->codec->time_base= ap->time_base;
}
-
+
if (!s->is_pipe) {
if (find_image_range(&first_index, &last_index, s->path) < 0)
goto fail;
@@ -155,7 +155,7 @@ static int img_read_header(AVFormatContext *s1, AVFormatParameters *ap)
} else {
f = &s1->pb;
}
-
+
ret = av_read_image(f, s1->filename, s->img_fmt, read_header_alloc_cb, s);
if (ret < 0)
goto fail1;
@@ -165,7 +165,7 @@ static int img_read_header(AVFormatContext *s1, AVFormatParameters *ap)
} else {
url_fseek(f, 0, SEEK_SET);
}
-
+
st->codec->codec_type = CODEC_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_RAWVIDEO;
st->codec->width = s->width;
@@ -265,7 +265,7 @@ static int img_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
if (s->nb_streams != 1)
return -1;
-
+
st = s->streams[0];
/* we select the first matching format */
for(i=0;i<PIX_FMT_NB;i++) {
@@ -292,7 +292,7 @@ static int img_write_header(AVFormatContext *s)
img->is_pipe = 0;
else
img->is_pipe = 1;
-
+
return 0;
}
@@ -308,11 +308,11 @@ static int img_write_packet(AVFormatContext *s, AVPacket *pkt)
width = st->codec->width;
height = st->codec->height;
-
+
picture = (AVPicture *)pkt->data;
if (!img->is_pipe) {
- if (get_frame_filename(filename, sizeof(filename),
+ if (get_frame_filename(filename, sizeof(filename),
img->path, img->img_number) < 0)
return AVERROR_IO;
pb = &pb1;
@@ -406,6 +406,6 @@ int img_init(void)
av_register_input_format(&imagepipe_iformat);
av_register_output_format(&imagepipe_oformat);
-
+
return 0;
}
diff --git a/libavformat/img2.c b/libavformat/img2.c
index c4cc2ba958..4085fb8ed2 100644
--- a/libavformat/img2.c
+++ b/libavformat/img2.c
@@ -99,7 +99,7 @@ static enum CodecID av_str2id(const IdStrMap *tags, const char *str)
}
/* return -1 if no image found */
-static int find_image_range(int *pfirst_index, int *plast_index,
+static int find_image_range(int *pfirst_index, int *plast_index,
const char *path)
{
char buf[1024];
@@ -108,7 +108,7 @@ static int find_image_range(int *pfirst_index, int *plast_index,
/* find the first image */
for(first_index = 0; first_index < 5; first_index++) {
if (get_frame_filename(buf, sizeof(buf), path, first_index) < 0){
- *pfirst_index =
+ *pfirst_index =
*plast_index = 1;
return 0;
}
@@ -117,7 +117,7 @@ static int find_image_range(int *pfirst_index, int *plast_index,
}
if (first_index == 5)
goto fail;
-
+
/* find the last image */
last_index = first_index;
for(;;) {
@@ -127,7 +127,7 @@ static int find_image_range(int *pfirst_index, int *plast_index,
range1 = 1;
else
range1 = 2 * range;
- if (get_frame_filename(buf, sizeof(buf), path,
+ if (get_frame_filename(buf, sizeof(buf), path,
last_index + range1) < 0)
goto fail;
if (!url_exist(buf))
@@ -178,7 +178,7 @@ static int img_read_header(AVFormatContext *s1, AVFormatParameters *ap)
pstrcpy(s->path, sizeof(s->path), s1->filename);
s->img_number = 0;
s->img_count = 0;
-
+
/* find format */
if (s1->iformat->flags & AVFMT_NOFILE)
s->is_pipe = 0;
@@ -186,18 +186,18 @@ static int img_read_header(AVFormatContext *s1, AVFormatParameters *ap)
s->is_pipe = 1;
st->need_parsing= 1;
}
-
+
if (!ap || !ap->time_base.num) {
av_set_pts_info(st, 60, 1, 25);
} else {
av_set_pts_info(st, 60, ap->time_base.num, ap->time_base.den);
}
-
+
if(ap && ap->width && ap->height){
st->codec->width = ap->width;
st->codec->height= ap->height;
}
-
+
if (!s->is_pipe) {
if (find_image_range(&first_index, &last_index, s->path) < 0)
return AVERROR_IO;
@@ -208,7 +208,7 @@ static int img_read_header(AVFormatContext *s1, AVFormatParameters *ap)
st->start_time = 0;
st->duration = last_index - first_index + 1;
}
-
+
if(ap->video_codec_id){
st->codec->codec_type = CODEC_TYPE_VIDEO;
st->codec->codec_id = ap->video_codec_id;
@@ -246,12 +246,12 @@ static int img_read_packet(AVFormatContext *s1, AVPacket *pkt)
if (url_fopen(f[i], filename, URL_RDONLY) < 0)
return AVERROR_IO;
size[i]= url_fsize(f[i]);
-
+
if(codec->codec_id != CODEC_ID_RAWVIDEO)
break;
filename[ strlen(filename) - 1 ]= 'U' + i;
}
-
+
if(codec->codec_id == CODEC_ID_RAWVIDEO && !codec->width)
infer_size(&codec->width, &codec->height, size[0]);
} else {
@@ -306,7 +306,7 @@ static int img_write_header(AVFormatContext *s)
img->is_pipe = 0;
else
img->is_pipe = 1;
-
+
return 0;
}
@@ -319,13 +319,13 @@ static int img_write_packet(AVFormatContext *s, AVPacket *pkt)
int i;
if (!img->is_pipe) {
- if (get_frame_filename(filename, sizeof(filename),
+ if (get_frame_filename(filename, sizeof(filename),
img->path, img->img_number) < 0 && img->img_number>1)
return AVERROR_IO;
for(i=0; i<3; i++){
if (url_fopen(pb[i], filename, URL_WRONLY) < 0)
return AVERROR_IO;
-
+
if(codec->codec_id != CODEC_ID_RAWVIDEO)
break;
filename[ strlen(filename) - 1 ]= 'U' + i;
@@ -333,7 +333,7 @@ static int img_write_packet(AVFormatContext *s, AVPacket *pkt)
} else {
pb[0] = &s->pb;
}
-
+
if(codec->codec_id == CODEC_ID_RAWVIDEO){
int ysize = codec->width * codec->height;
put_buffer(pb[0], pkt->data , ysize);
@@ -423,6 +423,6 @@ int img2_init(void)
av_register_input_format(&image2pipe_iformat);
av_register_output_format(&image2pipe_oformat);
-
+
return 0;
}
diff --git a/libavformat/ipmovie.c b/libavformat/ipmovie.c
index c62a007473..aaef7d3e51 100644
--- a/libavformat/ipmovie.c
+++ b/libavformat/ipmovie.c
@@ -118,7 +118,7 @@ typedef struct IPMVEContext {
} IPMVEContext;
-static int load_ipmovie_packet(IPMVEContext *s, ByteIOContext *pb,
+static int load_ipmovie_packet(IPMVEContext *s, ByteIOContext *pb,
AVPacket *pkt) {
int chunk_type;
@@ -170,7 +170,7 @@ static int load_ipmovie_packet(IPMVEContext *s, ByteIOContext *pb,
url_fseek(pb, s->decode_map_chunk_offset, SEEK_SET);
s->decode_map_chunk_offset = 0;
- if (get_buffer(pb, pkt->data, s->decode_map_chunk_size) !=
+ if (get_buffer(pb, pkt->data, s->decode_map_chunk_size) !=
s->decode_map_chunk_size) {
av_free_packet(pkt);
return CHUNK_EOF;
@@ -207,7 +207,7 @@ static int load_ipmovie_packet(IPMVEContext *s, ByteIOContext *pb,
/* This function loads and processes a single chunk in an IP movie file.
* It returns the type of chunk that was processed. */
-static int process_ipmovie_chunk(IPMVEContext *s, ByteIOContext *pb,
+static int process_ipmovie_chunk(IPMVEContext *s, ByteIOContext *pb,
AVPacket *pkt)
{
unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
@@ -358,7 +358,7 @@ static int process_ipmovie_chunk(IPMVEContext *s, ByteIOContext *pb,
s->audio_bits,
s->audio_sample_rate,
(s->audio_channels == 2) ? "stereo" : "mono",
- (s->audio_type == CODEC_ID_INTERPLAY_DPCM) ?
+ (s->audio_type == CODEC_ID_INTERPLAY_DPCM) ?
"Interplay audio" : "PCM");
break;
diff --git a/libavformat/jpeg.c b/libavformat/jpeg.c
index bf640d87a6..f957aa49dc 100644
--- a/libavformat/jpeg.c
+++ b/libavformat/jpeg.c
@@ -68,7 +68,7 @@ static int jpeg_get_buffer(AVCodecContext *c, AVFrame *picture)
}
}
-static void jpeg_img_copy(uint8_t *dst, int dst_wrap,
+static void jpeg_img_copy(uint8_t *dst, int dst_wrap,
uint8_t *src, int src_wrap,
int width, int height)
{
@@ -82,7 +82,7 @@ static void jpeg_img_copy(uint8_t *dst, int dst_wrap,
/* XXX: libavcodec is broken for truncated jpegs! */
#define IO_BUF_SIZE (1024*1024)
-static int jpeg_read(ByteIOContext *f,
+static int jpeg_read(ByteIOContext *f,
int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
{
AVCodecContext *c;
@@ -94,7 +94,7 @@ static int jpeg_read(ByteIOContext *f,
jctx.alloc_cb = alloc_cb;
jctx.opaque = opaque;
jctx.ret_code = -1; /* default return code is error */
-
+
c = avcodec_alloc_context();
if (!c)
return -1;
@@ -114,7 +114,7 @@ static int jpeg_read(ByteIOContext *f,
break;
inbuf_ptr = inbuf;
while (size > 0) {
- len = avcodec_decode_video(c, &picture1, &got_picture,
+ len = avcodec_decode_video(c, &picture1, &got_picture,
inbuf_ptr, size);
if (len < 0)
goto fail;
@@ -198,10 +198,10 @@ static int jpeg_write(ByteIOContext *pb, AVImageInfo *info)
/* set the quality */
picture->quality = 3; /* XXX: a parameter should be used */
c->flags |= CODEC_FLAG_QSCALE;
-
+
if (avcodec_open(c, &mjpeg_encoder) < 0)
goto fail1;
-
+
/* XXX: needs to sort out that size problem */
outbuf_size = 1000000;
outbuf = av_malloc(outbuf_size);
diff --git a/libavformat/matroska.c b/libavformat/matroska.c
index 8279915f54..3a5e2f528d 100644
--- a/libavformat/matroska.c
+++ b/libavformat/matroska.c
@@ -1661,7 +1661,7 @@ matroska_parse_index (MatroskaDemuxContext *matroska)
break;
}
- /* position in the file + track to which it
+ /* position in the file + track to which it
* belongs */
case MATROSKA_ID_CUETRACKPOSITION:
if ((res = ebml_read_master(matroska, &id)) < 0)
@@ -1897,7 +1897,7 @@ matroska_parse_seekhead (MatroskaDemuxContext *matroska)
"cannot parse further.\n", EBML_MAX_DEPTH);
return AVERROR_UNKNOWN;
}
-
+
level.start = 0;
level.length = (uint64_t)-1;
matroska->levels[matroska->num_levels] = level;
@@ -2179,7 +2179,7 @@ matroska_read_header (AVFormatContext *s,
/* This is the MS compatibility mode which stores a
* WAVEFORMATEX in the CodecPrivate. */
- else if (!strcmp(track->codec_id,
+ else if (!strcmp(track->codec_id,
MATROSKA_CODEC_ID_AUDIO_ACM) &&
(track->codec_priv_size >= 18) &&
(track->codec_priv != NULL)) {
@@ -2368,7 +2368,7 @@ matroska_parse_blockgroup (MatroskaDemuxContext *matroska,
}
if(matroska->ctx->streams[ matroska->tracks[track]->stream_index ]->discard >= AVDISCARD_ALL){
av_free(origdata);
- break;
+ break;
}
/* time (relative to cluster time) */
diff --git a/libavformat/mmf.c b/libavformat/mmf.c
index 5cdaefa973..9700884a20 100644
--- a/libavformat/mmf.c
+++ b/libavformat/mmf.c
@@ -1,4 +1,4 @@
-/*
+/*
* Yamaha SMAF format
* Copyright (c) 2005 Vidar Madsen
*
@@ -66,7 +66,7 @@ static int mmf_write_header(AVFormatContext *s)
av_log(s, AV_LOG_ERROR, "Unsupported sample rate %d\n", s->streams[0]->codec->sample_rate);
return -1;
}
-
+
put_tag(pb, "MMMD");
put_be32(pb, 0);
pos = start_tag(pb, "CNTI");
@@ -270,7 +270,7 @@ static int mmf_read_packet(AVFormatContext *s,
if(!size)
return AVERROR_IO;
-
+
if (av_new_packet(pkt, size))
return AVERROR_IO;
pkt->stream_index = 0;
@@ -290,7 +290,7 @@ static int mmf_read_close(AVFormatContext *s)
return 0;
}
-static int mmf_read_seek(AVFormatContext *s,
+static int mmf_read_seek(AVFormatContext *s,
int stream_index, int64_t timestamp, int flags)
{
return pcm_read_seek(s, stream_index, timestamp, flags);
diff --git a/libavformat/mov.c b/libavformat/mov.c
index 3df408457b..a2de0d3c75 100644
--- a/libavformat/mov.c
+++ b/libavformat/mov.c
@@ -18,7 +18,7 @@
*/
#include <limits.h>
-
+
#include "avformat.h"
#include "avi.h"
@@ -28,7 +28,7 @@
/*
* First version by Francois Revol revol@free.fr
- * Seek function by Gael Chardon gael.dev@4now.net
+ * Seek function by Gael Chardon gael.dev@4now.net
*
* Features and limitations:
* - reads most of the QT files I have (at least the structure),
@@ -242,9 +242,9 @@ typedef struct MOVStreamContext {
long sample_to_chunk_sz;
MOV_sample_to_chunk_tbl *sample_to_chunk;
long sample_to_chunk_index;
- int sample_to_time_index;
- long sample_to_time_sample;
- uint64_t sample_to_time_time;
+ int sample_to_time_index;
+ long sample_to_time_sample;
+ uint64_t sample_to_time_time;
int sample_to_ctime_index;
int sample_to_ctime_sample;
long sample_size;
@@ -381,10 +381,10 @@ static int mov_read_default(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
/* empty */;
a.size -= 8;
-
+
if(a.size < 0)
break;
-
+
// av_log(NULL, AV_LOG_DEBUG, " i=%ld\n", i);
if (c->parse_table[i].type == 0) { /* skip leaf atoms data */
// url_seek(pb, atom.offset+atom.size, SEEK_SET);
@@ -693,7 +693,7 @@ static int mov_read_smi(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
if((uint64_t)atom.size > (1<<30))
return -1;
-
+
// currently SVQ3 decoder expect full STSD header - so let's fake it
// this should be fixed and just SMI header should be passed
av_free(st->codec->extradata);
@@ -716,7 +716,7 @@ static int mov_read_wave(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
if((uint64_t)atom.size > (1<<30))
return -1;
-
+
// pass all frma atom to codec, needed at least for QDM2
av_free(st->codec->extradata);
st->codec->extradata_size = atom.size;
@@ -763,10 +763,10 @@ static int mov_read_stco(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
entries = get_be32(pb);
-
+
if(entries >= UINT_MAX/sizeof(int64_t))
return -1;
-
+
sc->chunk_count = entries;
sc->chunk_offsets = (int64_t*) av_malloc(entries * sizeof(int64_t));
if (!sc->chunk_offsets)
@@ -781,7 +781,7 @@ static int mov_read_stco(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
}
} else
return -1;
-
+
for(i=0; i<c->fc->nb_streams; i++){
MOVStreamContext *sc2 = (MOVStreamContext *)c->fc->streams[i]->priv_data;
if(sc2 && sc2->chunk_offsets){
@@ -966,7 +966,7 @@ static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
color_greyscale = st->codec->bits_per_sample & 0x20;
/* if the depth is 2, 4, or 8 bpp, file is palettized */
- if ((color_depth == 2) || (color_depth == 4) ||
+ if ((color_depth == 2) || (color_depth == 4) ||
(color_depth == 8)) {
if (color_greyscale) {
@@ -1049,7 +1049,7 @@ static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
get_be16(pb);//Reserved_2
//AMRSpecificBox.(10 bytes)
-
+
get_be32(pb); //size
get_be32(pb); //type=='damr'
get_be32(pb); //vendor
@@ -1070,7 +1070,7 @@ static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
st->codec->channels=1;
}
st->codec->bits_per_sample=16;
- st->codec->bit_rate=0; /*It is not possible to tell this before we have
+ st->codec->bit_rate=0; /*It is not possible to tell this before we have
an audio frame and even then every frame can be different*/
}
else if( st->codec->codec_tag == MKTAG( 'm', 'p', '4', 's' ))
@@ -1099,7 +1099,7 @@ static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
get_be16(pb);
c->mp4=1;
-
+
if(mp4_version==1)
{
url_fskip(pb,16);
@@ -1109,13 +1109,13 @@ static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
a.size=size-(16+20);
a.offset=url_ftell(pb);
-
+
mov_read_default(c, pb, a);
/* Get correct sample rate from extradata */
if(st->codec->extradata_size) {
const int samplerate_table[] = {
- 96000, 88200, 64000, 48000, 44100, 32000,
+ 96000, 88200, 64000, 48000, 44100, 32000,
24000, 22050, 16000, 12000, 11025, 8000,
7350, 0, 0, 0
};
@@ -1139,7 +1139,7 @@ static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
/* fetch the 36-byte extradata needed for alac decoding */
st->codec->extradata_size = 36;
- st->codec->extradata = (uint8_t*)
+ st->codec->extradata = (uint8_t*)
av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
}
@@ -1222,7 +1222,7 @@ static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
}
}
}
-
+
if(st->codec->codec_type==CODEC_TYPE_AUDIO && st->codec->sample_rate==0 && sc->time_scale>1) {
st->codec->sample_rate= sc->time_scale;
}
@@ -1242,10 +1242,10 @@ static int mov_read_stsc(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
entries = get_be32(pb);
-
+
if(entries >= UINT_MAX / sizeof(MOV_sample_to_chunk_tbl))
return -1;
-
+
#ifdef DEBUG
av_log(NULL, AV_LOG_DEBUG, "track[%i].stsc.entries = %i\n", c->fc->nb_streams-1, entries);
#endif
@@ -1276,10 +1276,10 @@ static int mov_read_stss(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
entries = get_be32(pb);
-
+
if(entries >= UINT_MAX / sizeof(long))
return -1;
-
+
sc->keyframe_count = entries;
#ifdef DEBUG
av_log(NULL, AV_LOG_DEBUG, "keyframe_count = %ld\n", sc->keyframe_count);
@@ -1565,7 +1565,7 @@ static int mov_read_elst(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
get_byte(pb); /* version */
get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
edit_count= c->streams[c->fc->nb_streams-1]->edit_count = get_be32(pb); /* entries */
-
+
for(i=0; i<edit_count; i++){
get_be32(pb); /* Track duration */
get_be32(pb); /* Media time */
@@ -1609,7 +1609,7 @@ static const MOVParseTableEntry mov_default_parse_table[] = {
{ MKTAG( 's', 'k', 'i', 'p' ), mov_read_leaf },
{ MKTAG( 's', 'm', 'h', 'd' ), mov_read_leaf }, /* sound media info header */
{ MKTAG( 'S', 'M', 'I', ' ' ), mov_read_smi }, /* Sorenson extension ??? */
-{ MKTAG( 'a', 'v', 'c', 'C' ), mov_read_avcC },
+{ MKTAG( 'a', 'v', 'c', 'C' ), mov_read_avcC },
{ MKTAG( 's', 't', 'b', 'l' ), mov_read_default },
{ MKTAG( 's', 't', 'c', 'o' ), mov_read_stco },
{ MKTAG( 's', 't', 'd', 'p' ), mov_read_default },
@@ -1663,8 +1663,8 @@ static void mov_free_stream_context(MOVStreamContext *sc)
av_freep(&sc->sample_to_chunk);
av_freep(&sc->sample_sizes);
av_freep(&sc->keyframes);
- av_freep(&sc->stts_data);
- av_freep(&sc->ctts_data);
+ av_freep(&sc->stts_data);
+ av_freep(&sc->ctts_data);
av_freep(&sc);
}
}
@@ -1901,11 +1901,11 @@ again:
}
#ifdef MOV_MINOLTA_FIX
- //Make sure that size is according to sample_size (Needed by .mov files
+ //Make sure that size is according to sample_size (Needed by .mov files
//created on a Minolta Dimage Xi where audio chunks contains waste data in the end)
//Maybe we should really not only check sc->sample_size, but also sc->sample_sizes
//but I have no such movies
- if (sc->sample_size > 0) {
+ if (sc->sample_size > 0) {
int foundsize=0;
for(i=0; i<(sc->sample_to_chunk_sz); i++) {
if( (sc->sample_to_chunk[i].first)<=(sc->next_chunk) )
@@ -1966,22 +1966,22 @@ readchunk:
av_get_packet(&s->pb, pkt, size);
pkt->stream_index = sc->ffindex;
-
+
// If the keyframes table exists, mark any samples that are in the table as key frames.
// If no table exists, treat very sample as a key frame.
- if (sc->keyframes) {
+ if (sc->keyframes) {
a = 0;
b = sc->keyframe_count - 1;
-
+
while (a < b) {
m = (a + b + 1) >> 1;
if (sc->keyframes[m] > sc->current_sample) {
b = m - 1;
} else {
a = m;
- }
+ }
}
-
+
if (sc->keyframes[a] == sc->current_sample)
pkt->flags |= PKT_FLAG_KEY;
}
@@ -2000,19 +2000,19 @@ readchunk:
#endif
mov->next_chunk_offset = offset + size;
-
- /* find the corresponding dts */
- if (sc && sc->sample_to_time_index < sc->stts_count && pkt) {
+
+ /* find the corresponding dts */
+ if (sc && sc->sample_to_time_index < sc->stts_count && pkt) {
unsigned int count;
uint64_t dts, pts;
unsigned int duration = sc->stts_data[sc->sample_to_time_index].duration;
count = sc->stts_data[sc->sample_to_time_index].count;
- if ((sc->sample_to_time_sample + count) < sc->current_sample) {
- sc->sample_to_time_sample += count;
- sc->sample_to_time_time += count*duration;
- sc->sample_to_time_index ++;
+ if ((sc->sample_to_time_sample + count) < sc->current_sample) {
+ sc->sample_to_time_sample += count;
+ sc->sample_to_time_time += count*duration;
+ sc->sample_to_time_index ++;
duration = sc->stts_data[sc->sample_to_time_index].duration;
- }
+ }
dts = sc->sample_to_time_time + (sc->current_sample-1 - sc->sample_to_time_sample) * (int64_t)duration;
/* find the corresponding pts */
if (sc->sample_to_ctime_index < sc->ctts_count) {
@@ -2063,9 +2063,9 @@ static int mov_read_seek(AVFormatContext *s, int stream_index, int64_t sample_ti
int64_t sample_file_offset;
int32_t first_chunk_sample;
int32_t sample_to_chunk_idx;
- int sample_to_time_index;
- long sample_to_time_sample = 0;
- uint64_t sample_to_time_time = 0;
+ int sample_to_time_index;
+ long sample_to_time_sample = 0;
+ uint64_t sample_to_time_time = 0;
int mov_idx;
// Find the corresponding mov stream
@@ -2097,20 +2097,20 @@ static int mov_read_seek(AVFormatContext *s, int stream_index, int64_t sample_ti
for (i = 0; i < sc->stts_count; i++) {
count = sc->stts_data[i].count;
duration = sc->stts_data[i].duration;
-//av_log(s, AV_LOG_DEBUG, "> sample_time %lli \n", (long)sample_time);
-//av_log(s, AV_LOG_DEBUG, "> count=%i duration=%i\n", count, duration);
+//av_log(s, AV_LOG_DEBUG, "> sample_time %lli \n", (long)sample_time);
+//av_log(s, AV_LOG_DEBUG, "> count=%i duration=%i\n", count, duration);
if ((start_time + count*duration) > sample_time) {
- sample_to_time_time = start_time;
- sample_to_time_index = i;
- sample_to_time_sample = sample;
+ sample_to_time_time = start_time;
+ sample_to_time_index = i;
+ sample_to_time_sample = sample;
sample += (sample_time - start_time) / duration;
break;
}
sample += count;
start_time += count * duration;
- }
- sample_to_time_time = start_time;
- sample_to_time_index = i;
+ }
+ sample_to_time_time = start_time;
+ sample_to_time_index = i;
/* NOTE: despite what qt doc say, the dt value (Display Time in qt vocabulary) computed with the stts atom
is a decoding time stamp (dts) not a presentation time stamp. And as usual dts != pts for stream with b frames */
@@ -2198,7 +2198,7 @@ static int mov_read_seek(AVFormatContext *s, int stream_index, int64_t sample_ti
sc->next_chunk = chunk; // +1 -1 (zero based)
sc->sample_to_chunk_index = sample_to_chunk_idx;
- // Update other streams
+ // Update other streams
for (i = 0; i<mov->total_streams; i++) {
MOVStreamContext *msc;
if (i == mov_idx) continue;
@@ -2213,17 +2213,17 @@ static int mov_read_seek(AVFormatContext *s, int stream_index, int64_t sample_ti
} else {
a = m;
}
-#ifdef DEBUG
+#ifdef DEBUG
/* av_log(s, AV_LOG_DEBUG, "a=%i (%li) b=%i (%li) m=%i (%li) stream #%i\n"
, a, (long)msc->chunk_offsets[a], b, (long)msc->chunk_offsets[b], m, (long)msc->chunk_offsets[m], i); */
-#endif
+#endif
}
msc->next_chunk = a;
if (msc->chunk_offsets[a] < chunk_file_offset && a < (msc->chunk_count-1))
msc->next_chunk ++;
-#ifdef DEBUG
+#ifdef DEBUG
av_log(s, AV_LOG_DEBUG, "Nearest next chunk for stream #%i is #%i @%lli\n", i, msc->next_chunk+1, msc->chunk_offsets[msc->next_chunk]);
-#endif
+#endif
// Compute sample count and index in the sample_to_chunk table (what a pity)
msc->sample_to_chunk_index = 0;
msc->current_sample = 0;
@@ -2234,19 +2234,19 @@ static int mov_read_seek(AVFormatContext *s, int stream_index, int64_t sample_ti
}
msc->current_sample += (msc->next_chunk - (msc->sample_to_chunk[msc->sample_to_chunk_index].first - 1)) * sc->sample_to_chunk[msc->sample_to_chunk_index].count;
msc->left_in_chunk = msc->sample_to_chunk[msc->sample_to_chunk_index].count - 1;
- // Find corresponding position in stts (used later to compute dts)
- sample = 0;
- start_time = 0;
- for (msc->sample_to_time_index = 0; msc->sample_to_time_index < msc->stts_count; msc->sample_to_time_index++) {
+ // Find corresponding position in stts (used later to compute dts)
+ sample = 0;
+ start_time = 0;
+ for (msc->sample_to_time_index = 0; msc->sample_to_time_index < msc->stts_count; msc->sample_to_time_index++) {
count = msc->stts_data[msc->sample_to_time_index].count;
duration = msc->stts_data[msc->sample_to_time_index].duration;
- if ((sample + count - 1) > msc->current_sample) {
- msc->sample_to_time_time = start_time;
- msc->sample_to_time_sample = sample;
- break;
- }
- sample += count;
- start_time += count * duration;
+ if ((sample + count - 1) > msc->current_sample) {
+ msc->sample_to_time_time = start_time;
+ msc->sample_to_time_sample = sample;
+ break;
+ }
+ sample += count;
+ start_time += count * duration;
}
sample = 0;
for (msc->sample_to_ctime_index = 0; msc->sample_to_ctime_index < msc->ctts_count; msc->sample_to_ctime_index++) {
@@ -2257,11 +2257,11 @@ static int mov_read_seek(AVFormatContext *s, int stream_index, int64_t sample_ti
break;
}
sample += count;
- }
-#ifdef DEBUG
+ }
+#ifdef DEBUG
av_log(s, AV_LOG_DEBUG, "Next Sample for stream #%i is #%i @%i\n", i, msc->current_sample + 1, msc->sample_to_chunk_index + 1);
-#endif
- }
+#endif
+ }
return 0;
}
#endif
@@ -2289,7 +2289,7 @@ static AVInputFormat mov_iformat = {
mov_read_close,
#if defined(MOV_SPLIT_CHUNKS) && defined(MOV_SEEK)
mov_read_seek,
-#endif
+#endif
};
int mov_init(void)
diff --git a/libavformat/movenc.c b/libavformat/movenc.c
index 38bb687b09..26bc0ee03e 100644
--- a/libavformat/movenc.c
+++ b/libavformat/movenc.c
@@ -30,7 +30,7 @@
#define MODE_MP4 0
#define MODE_MOV 1
#define MODE_3GP 2
-#define MODE_PSP 3 // example working PSP command line:
+#define MODE_PSP 3 // example working PSP command line:
// ffmpeg -i testinput.avi -f psp -r 14.985 -s 320x240 -b 768 -ar 24000 -ab 32 M4V00001.MP4
#define MODE_3G2 4
@@ -143,12 +143,12 @@ static int mov_write_stsz_tag(ByteIOContext *pb, MOVTrack* track)
}
if (equalChunks) {
int sSize = track->cluster[0][0].size/track->cluster[0][0].entries;
- put_be32(pb, sSize); // sample size
+ put_be32(pb, sSize); // sample size
put_be32(pb, entries); // sample count
}
else {
- put_be32(pb, 0); // sample size
- put_be32(pb, entries); // sample count
+ put_be32(pb, 0); // sample size
+ put_be32(pb, entries); // sample count
for (i=0; i<track->entry; i++) {
int cl = i / MOV_INDEX_CLUSTER_SIZE;
int id = i % MOV_INDEX_CLUSTER_SIZE;
@@ -169,24 +169,24 @@ static int mov_write_stsc_tag(ByteIOContext *pb, MOVTrack* track)
int pos = url_ftell(pb);
put_be32(pb, 0); /* size */
put_tag(pb, "stsc");
- put_be32(pb, 0); // version & flags
+ put_be32(pb, 0); // version & flags
entryPos = url_ftell(pb);
- put_be32(pb, track->entry); // entry count
+ put_be32(pb, track->entry); // entry count
for (i=0; i<track->entry; i++) {
int cl = i / MOV_INDEX_CLUSTER_SIZE;
int id = i % MOV_INDEX_CLUSTER_SIZE;
if(oldval != track->cluster[cl][id].samplesInChunk)
{
- put_be32(pb, i+1); // first chunk
+ put_be32(pb, i+1); // first chunk
put_be32(pb, track->cluster[cl][id].samplesInChunk); // samples per chunk
- put_be32(pb, 0x1); // sample description index
+ put_be32(pb, 0x1); // sample description index
oldval = track->cluster[cl][id].samplesInChunk;
index++;
}
}
curpos = url_ftell(pb);
url_fseek(pb, entryPos, SEEK_SET);
- put_be32(pb, index); // rewrite size
+ put_be32(pb, index); // rewrite size
url_fseek(pb, curpos, SEEK_SET);
return updateSize (pb, pos);
@@ -198,11 +198,11 @@ static int mov_write_stss_tag(ByteIOContext *pb, MOVTrack* track)
long curpos;
int i, index = 0, entryPos;
int pos = url_ftell(pb);
- put_be32(pb, 0); // size
+ put_be32(pb, 0); // size
put_tag(pb, "stss");
- put_be32(pb, 0); // version & flags
+ put_be32(pb, 0); // version & flags
entryPos = url_ftell(pb);
- put_be32(pb, track->entry); // entry count
+ put_be32(pb, track->entry); // entry count
for (i=0; i<track->entry; i++) {
int cl = i / MOV_INDEX_CLUSTER_SIZE;
int id = i % MOV_INDEX_CLUSTER_SIZE;
@@ -213,7 +213,7 @@ static int mov_write_stss_tag(ByteIOContext *pb, MOVTrack* track)
}
curpos = url_ftell(pb);
url_fseek(pb, entryPos, SEEK_SET);
- put_be32(pb, index); // rewrite size
+ put_be32(pb, index); // rewrite size
url_fseek(pb, curpos, SEEK_SET);
return updateSize (pb, pos);
}
@@ -278,7 +278,7 @@ static int mov_write_audio_tag(ByteIOContext *pb, MOVTrack* track)
{
int pos = url_ftell(pb);
int tag;
-
+
put_be32(pb, 0); /* size */
tag = track->enc->codec_tag;
@@ -359,7 +359,7 @@ static int mov_write_svq3_tag(ByteIOContext *pb)
put_be32(pb, 0x5);
put_be32(pb, 0xe2c0211d);
put_be32(pb, 0xc0000000);
- put_byte(pb, 0);
+ put_byte(pb, 0);
return 0x15;
}
@@ -383,9 +383,9 @@ static void putDescr(ByteIOContext *pb, int tag, int size)
len = size;
vals[3] = (uint8_t)(len & 0x7f);
len >>= 7;
- vals[2] = (uint8_t)((len & 0x7f) | 0x80);
+ vals[2] = (uint8_t)((len & 0x7f) | 0x80);
len >>= 7;
- vals[1] = (uint8_t)((len & 0x7f) | 0x80);
+ vals[1] = (uint8_t)((len & 0x7f) | 0x80);
len >>= 7;
vals[0] = (uint8_t)((len & 0x7f) | 0x80);
@@ -421,12 +421,12 @@ static int mov_write_esds_tag(ByteIOContext *pb, MOVTrack* track) // Basic
int pos = url_ftell(pb);
void *vosDataBackup=track->vosData;
int vosLenBackup=track->vosLen;
-
+
// we should be able to have these passed in, via vosData, then we wouldn't need to attack this routine at all
static const char PSPAACData[]={0x13,0x10};
static const char PSPMP4Data[]={0x00,0x00,0x01,0xB0,0x03,0x00,0x00,0x01,0xB5,0x09,0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x20,0x00,0x84,0x5D,0x4C,0x28,0x50,0x20,0xF0,0xA3,0x1F };
-
-
+
+
if (track->mode == MODE_PSP) // fails on psp if this is not here
{
if (track->enc->codec_id == CODEC_ID_AAC)
@@ -538,13 +538,13 @@ static int mov_write_video_tag(ByteIOContext *pb, MOVTrack* track)
put_be32(pb, 0x00480000); /* Vertical resolution 72dpi */
put_be32(pb, 0); /* Data size (= 0) */
put_be16(pb, 1); /* Frame count (= 1) */
-
+
memset(compressor_name,0,32);
if (track->enc->codec && track->enc->codec->name)
strncpy(compressor_name,track->enc->codec->name,31);
put_byte(pb, strlen(compressor_name));
put_buffer(pb, compressor_name, 31);
-
+
put_be16(pb, 0x18); /* Reserved */
put_be16(pb, 0xffff); /* Reserved */
if(track->enc->codec_id == CODEC_ID_MPEG4)
@@ -552,7 +552,7 @@ static int mov_write_video_tag(ByteIOContext *pb, MOVTrack* track)
else if(track->enc->codec_id == CODEC_ID_H263)
mov_write_d263_tag(pb);
else if(track->enc->codec_id == CODEC_ID_SVQ3)
- mov_write_svq3_tag(pb);
+ mov_write_svq3_tag(pb);
return updateSize (pb, pos);
}
@@ -647,7 +647,7 @@ static int mov_write_hdlr_tag(ByteIOContext *pb, MOVTrack* track)
{
char *descr, *hdlr, *hdlr_type;
int pos = url_ftell(pb);
-
+
if (!track) { /* no media --> data handler */
hdlr = "dhlr";
hdlr_type = "url ";
@@ -662,7 +662,7 @@ static int mov_write_hdlr_tag(ByteIOContext *pb, MOVTrack* track)
descr = "SoundHandler";
}
}
-
+
put_be32(pb, 0); /* size */
put_tag(pb, "hdlr");
put_be32(pb, 0); /* Version & flags */
@@ -699,7 +699,7 @@ static int mov_write_mdhd_tag(ByteIOContext *pb, MOVTrack* track)
put_be32(pb, 0); /* Version & flags */
put_be32(pb, track->time); /* creation time */
put_be32(pb, track->time); /* modification time */
- put_be32(pb, track->timescale); /* time scale (sample rate for audio) */
+ put_be32(pb, track->timescale); /* time scale (sample rate for audio) */
put_be32(pb, track->trackDuration); /* duration */
put_be16(pb, 0); /* language, 0 = english */
put_be16(pb, 0); /* reserved (quality) */
@@ -805,10 +805,10 @@ static int mov_write_trak_tag(ByteIOContext *pb, MOVTrack* track)
put_be32(pb, 0); /* size */
put_tag(pb, "trak");
mov_write_tkhd_tag(pb, track);
- if (track->mode == MODE_PSP)
+ if (track->mode == MODE_PSP)
mov_write_edts_tag(pb, track); // PSP Movies require edts box
mov_write_mdia_tag(pb, track);
- if (track->mode == MODE_PSP)
+ if (track->mode == MODE_PSP)
mov_write_uuid_tag_psp(pb,track); // PSP Movies require this uuid box
return updateSize(pb, pos);
}
@@ -1078,7 +1078,7 @@ static int mov_write_meta_tag(ByteIOContext *pb, MOVContext* mov,
int size = 0;
// only save meta tag if required
- if ( s->title[0] || s->author[0] || s->album[0] || s->year ||
+ if ( s->title[0] || s->author[0] || s->album[0] || s->year ||
s->comment[0] || s->genre[0] || s->track ) {
int pos = url_ftell(pb);
put_be32(pb, 0); /* size */
@@ -1090,7 +1090,7 @@ static int mov_write_meta_tag(ByteIOContext *pb, MOVContext* mov,
}
return size;
}
-
+
static int mov_write_udta_tag(ByteIOContext *pb, MOVContext* mov,
AVFormatContext *s)
{
@@ -1196,7 +1196,7 @@ static int mov_write_moov_tag(ByteIOContext *pb, MOVContext *mov,
}
}
- mov->tracks[i].trackDuration =
+ mov->tracks[i].trackDuration =
mov->tracks[i].sampleCount * mov->tracks[i].sampleDuration;
mov->tracks[i].time = mov->time;
mov->tracks[i].trackID = i+1;
@@ -1217,7 +1217,7 @@ static int mov_write_moov_tag(ByteIOContext *pb, MOVContext *mov,
int mov_write_mdat_tag(ByteIOContext *pb, MOVContext* mov)
{
- mov->mdat_pos = url_ftell(pb);
+ mov->mdat_pos = url_ftell(pb);
put_be32(pb, 0); /* size placeholder*/
put_tag(pb, "mdat");
return 0;
@@ -1258,7 +1258,7 @@ static void mov_write_uuidprof_tag(ByteIOContext *pb, AVFormatContext *s)
{
int AudioRate = s->streams[1]->codec->sample_rate;
int FrameRate = ((s->streams[0]->codec->time_base.den) * (0x10000))/ (s->streams[0]->codec->time_base.num);
-
+
//printf("audiorate = %d\n",AudioRate);
//printf("framerate = %d / %d = 0x%x\n",s->streams[0]->codec->time_base.den,s->streams[0]->codec->time_base.num,FrameRate);
@@ -1300,7 +1300,7 @@ static void mov_write_uuidprof_tag(ByteIOContext *pb, AVFormatContext *s)
put_be32(pb, 0x0 );
put_be32(pb, 0xc0 );
put_be32(pb, 0xc0 );
- put_be32(pb, FrameRate); // was 0xefc29
+ put_be32(pb, FrameRate); // was 0xefc29
put_be32(pb, FrameRate ); // was 0xefc29
put_be16(pb, s->streams[0]->codec->width);
put_be16(pb, s->streams[0]->codec->height);
@@ -1396,7 +1396,7 @@ static int mov_write_packet(AVFormatContext *s, AVPacket *pkt)
}
else if(enc->codec_id == CODEC_ID_PCM_S16BE || enc->codec_id == CODEC_ID_PCM_S16LE) {
samplesInChunk = size/(2*enc->channels);
- }
+ }
else {
samplesInChunk = 1;
}
@@ -1415,7 +1415,7 @@ static int mov_write_packet(AVFormatContext *s, AVPacket *pkt)
id = trk->entry % MOV_INDEX_CLUSTER_SIZE;
if (trk->ents_allocated <= trk->entry) {
- trk->cluster = av_realloc(trk->cluster, (cl+1)*sizeof(void*));
+ trk->cluster = av_realloc(trk->cluster, (cl+1)*sizeof(void*));
if (!trk->cluster)
return -1;
trk->cluster[cl] = av_malloc(MOV_INDEX_CLUSTER_SIZE*sizeof(MOVIentry));
diff --git a/libavformat/mp3.c b/libavformat/mp3.c
index 8eb98fcb38..4fb22ae836 100644
--- a/libavformat/mp3.c
+++ b/libavformat/mp3.c
@@ -1,4 +1,4 @@
-/*
+/*
* MP3 encoder and decoder
* Copyright (c) 2003 Fabrice Bellard.
*
@@ -166,7 +166,7 @@ static int id3_match(const uint8_t *buf)
(buf[9] & 0x80) == 0);
}
-static void id3_get_string(char *str, int str_size,
+static void id3_get_string(char *str, int str_size,
const uint8_t *buf, int buf_size)
{
int i, c;
@@ -189,7 +189,7 @@ static int id3_parse_tag(AVFormatContext *s, const uint8_t *buf)
{
char str[5];
int genre;
-
+
if (!(buf[0] == 'T' &&
buf[1] == 'A' &&
buf[2] == 'G'))
@@ -254,7 +254,7 @@ static int mp3_read_header(AVFormatContext *s,
st->codec->codec_type = CODEC_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_MP3;
st->need_parsing = 1;
-
+
/* try to get the TAG */
if (!url_is_streamed(&s->pb)) {
/* XXX: change that */
@@ -294,7 +294,7 @@ static int mp3_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, size;
// AVStream *st = s->streams[0];
-
+
size= MP3_PACKET_SIZE;
ret= av_get_packet(&s->pb, pkt, size);
@@ -394,7 +394,7 @@ int mp3_init(void)
av_register_output_format(&mp2_oformat);
#ifdef CONFIG_MP3LAME
av_register_output_format(&mp3_oformat);
-#endif
+#endif
#endif //CONFIG_MUXERS
return 0;
}
diff --git a/libavformat/mpeg.c b/libavformat/mpeg.c
index 1e4d2a79f1..4bebdba6fa 100644
--- a/libavformat/mpeg.c
+++ b/libavformat/mpeg.c
@@ -77,7 +77,7 @@ typedef struct {
#define PACKET_START_CODE_MASK ((unsigned int)0xffffff00)
#define PACKET_START_CODE_PREFIX ((unsigned int)0x00000100)
#define ISO_11172_END_CODE ((unsigned int)0x000001b9)
-
+
/* mpeg2 */
#define PROGRAM_STREAM_MAP 0x1bc
#define PRIVATE_STREAM_1 0x1bd
@@ -114,12 +114,12 @@ static AVOutputFormat mpeg2vob_mux;
static AVOutputFormat mpeg2svcd_mux;
static AVOutputFormat mpeg2dvd_mux;
-static int put_pack_header(AVFormatContext *ctx,
+static int put_pack_header(AVFormatContext *ctx,
uint8_t *buf, int64_t timestamp)
{
MpegMuxContext *s = ctx->priv_data;
PutBitContext pb;
-
+
init_put_bits(&pb, buf, 128);
put_bits(&pb, 32, PACK_START_CODE);
@@ -161,7 +161,7 @@ static int put_system_header(AVFormatContext *ctx, uint8_t *buf,int only_for_str
put_bits(&pb, 32, SYSTEM_HEADER_START_CODE);
put_bits(&pb, 16, 0);
put_bits(&pb, 1, 1);
-
+
put_bits(&pb, 22, s->mux_rate); /* maximum bit rate of the multiplexed stream */
put_bits(&pb, 1, 1); /* marker */
if (s->is_vcd && only_for_stream_id==VIDEO_ID) {
@@ -172,13 +172,13 @@ static int put_system_header(AVFormatContext *ctx, uint8_t *buf,int only_for_str
if (s->is_vcd) {
/* see VCD standard, p. IV-7*/
- put_bits(&pb, 1, 0);
+ put_bits(&pb, 1, 0);
put_bits(&pb, 1, 1);
} else {
put_bits(&pb, 1, 0); /* variable bitrate*/
put_bits(&pb, 1, 0); /* non constrainted bit stream */
}
-
+
if (s->is_vcd || s->is_dvd) {
/* see VCD standard p IV-7 */
put_bits(&pb, 1, 1); /* audio locked */
@@ -195,24 +195,24 @@ static int put_system_header(AVFormatContext *ctx, uint8_t *buf,int only_for_str
put_bits(&pb, 5, 0);
} else
put_bits(&pb, 5, s->video_bound);
-
+
if (s->is_dvd) {
put_bits(&pb, 1, 0); /* packet_rate_restriction_flag */
put_bits(&pb, 7, 0x7f); /* reserved byte */
} else
put_bits(&pb, 8, 0xff); /* reserved byte */
-
+
/* DVD-Video Stream_bound entries
- id (0xB9) video, maximum P-STD for stream 0xE0. (P-STD_buffer_bound_scale = 1)
- id (0xB8) audio, maximum P-STD for any MPEG audio (0xC0 to 0xC7) streams. If there are none set to 4096 (32x128). (P-STD_buffer_bound_scale = 0)
- id (0xBD) private stream 1 (audio other than MPEG and subpictures). (P-STD_buffer_bound_scale = 1)
+ id (0xB9) video, maximum P-STD for stream 0xE0. (P-STD_buffer_bound_scale = 1)
+ id (0xB8) audio, maximum P-STD for any MPEG audio (0xC0 to 0xC7) streams. If there are none set to 4096 (32x128). (P-STD_buffer_bound_scale = 0)
+ id (0xBD) private stream 1 (audio other than MPEG and subpictures). (P-STD_buffer_bound_scale = 1)
id (0xBF) private stream 2, NAV packs, set to 2x1024. */
if (s->is_dvd) {
-
+
int P_STD_max_video = 0;
int P_STD_max_mpeg_audio = 0;
int P_STD_max_mpeg_PS1 = 0;
-
+
for(i=0;i<ctx->nb_streams;i++) {
StreamInfo *stream = ctx->streams[i]->priv_data;
@@ -257,7 +257,7 @@ static int put_system_header(AVFormatContext *ctx, uint8_t *buf,int only_for_str
private_stream_coded = 0;
for(i=0;i<ctx->nb_streams;i++) {
StreamInfo *stream = ctx->streams[i]->priv_data;
-
+
/* For VCDs, only include the stream info for the stream
that the pack which contains this system belongs to.
@@ -334,15 +334,15 @@ static int mpeg_mux_init(AVFormatContext *ctx)
s->is_svcd = (ctx->oformat == &mpeg2svcd_mux);
s->is_mpeg2 = (ctx->oformat == &mpeg2vob_mux || ctx->oformat == &mpeg2svcd_mux || ctx->oformat == &mpeg2dvd_mux);
s->is_dvd = (ctx->oformat == &mpeg2dvd_mux);
-
+
if(ctx->packet_size)
s->packet_size = ctx->packet_size;
else
s->packet_size = 2048;
-
+
s->vcd_padding_bytes_written = 0;
s->vcd_padding_bitrate=0;
-
+
s->audio_bound = 0;
s->video_bound = 0;
mpa_id = AUDIO_ID;
@@ -386,7 +386,7 @@ static int mpeg_mux_init(AVFormatContext *ctx)
/* This value HAS to be used for VCD (see VCD standard, p. IV-7).
Right now it is also used for everything else.*/
- stream->max_buffer_size = 4 * 1024;
+ stream->max_buffer_size = 4 * 1024;
s->audio_bound++;
break;
case CODEC_TYPE_VIDEO:
@@ -397,11 +397,11 @@ static int mpeg_mux_init(AVFormatContext *ctx)
stream->max_buffer_size = 230*1024; //FIXME this is probably too small as default
#if 0
/* see VCD standard, p. IV-7*/
- stream->max_buffer_size = 46 * 1024;
+ stream->max_buffer_size = 46 * 1024;
else
/* This value HAS to be used for SVCD (see SVCD standard, p. 26 V.2.3.2).
Right now it is also used for everything else.*/
- stream->max_buffer_size = 230 * 1024;
+ stream->max_buffer_size = 230 * 1024;
#endif
s->video_bound++;
break;
@@ -426,10 +426,10 @@ static int mpeg_mux_init(AVFormatContext *ctx)
codec_rate= st->codec->rc_max_rate;
else
codec_rate= st->codec->bit_rate;
-
+
if(!codec_rate)
codec_rate= (1<<21)*8*50/ctx->nb_streams;
-
+
bitrate += codec_rate;
if (stream->id==AUDIO_ID)
@@ -437,7 +437,7 @@ static int mpeg_mux_init(AVFormatContext *ctx)
else if (stream->id==VIDEO_ID)
video_bitrate += codec_rate;
}
-
+
if(ctx->mux_rate){
s->mux_rate= (ctx->mux_rate + (8 * 50) - 1) / (8 * 50);
} else {
@@ -472,11 +472,11 @@ static int mpeg_mux_init(AVFormatContext *ctx)
overhead_rate = ((audio_bitrate / 8.0) / 2279) * (2324 - 2279);
overhead_rate += ((video_bitrate / 8.0) / 2294) * (2324 - 2294);
overhead_rate *= 8;
-
+
/* Add padding so that the full bitrate is 2324*75 bytes/sec */
s->vcd_padding_bitrate = 2324 * 75 * 8 - (bitrate + overhead_rate);
}
-
+
if (s->is_vcd || s->is_mpeg2)
/* every packet */
s->pack_header_freq = 1;
@@ -487,7 +487,7 @@ static int mpeg_mux_init(AVFormatContext *ctx)
/* the above seems to make pack_header_freq zero sometimes */
if (s->pack_header_freq == 0)
s->pack_header_freq = 1;
-
+
if (s->is_mpeg2)
/* every 200 packets. Need to look at the spec. */
s->system_header_freq = s->pack_header_freq * 40;
@@ -498,7 +498,7 @@ static int mpeg_mux_init(AVFormatContext *ctx)
s->system_header_freq = 0x7fffffff;
else
s->system_header_freq = s->pack_header_freq * 5;
-
+
for(i=0;i<ctx->nb_streams;i++) {
stream = ctx->streams[i]->priv_data;
stream->packet_number = 0;
@@ -515,9 +515,9 @@ static int mpeg_mux_init(AVFormatContext *ctx)
static inline void put_timestamp(ByteIOContext *pb, int id, int64_t timestamp)
{
- put_byte(pb,
- (id << 4) |
- (((timestamp >> 30) & 0x07) << 1) |
+ put_byte(pb,
+ (id << 4) |
+ (((timestamp >> 30) & 0x07) << 1) |
1);
put_be16(pb, (uint16_t)((((timestamp >> 15) & 0x7fff) << 1) | 1));
put_be16(pb, (uint16_t)((((timestamp) & 0x7fff) << 1) | 1));
@@ -534,7 +534,7 @@ static int get_vcd_padding_size(AVFormatContext *ctx, int64_t pts)
if (s->vcd_padding_bitrate > 0 && pts!=AV_NOPTS_VALUE)
{
int64_t full_pad_bytes;
-
+
full_pad_bytes = (int64_t)((s->vcd_padding_bitrate * (pts / 90000.0)) / 8.0); //FIXME this is wrong
pad_bytes = (int) (full_pad_bytes - s->vcd_padding_bytes_written);
@@ -564,22 +564,22 @@ static int get_packet_payload_size(AVFormatContext *ctx, int stream_index,
buf_index = 0;
if (((s->packet_number % s->pack_header_freq) == 0)) {
/* pack header size */
- if (s->is_mpeg2)
+ if (s->is_mpeg2)
buf_index += 14;
else
buf_index += 12;
-
+
if (s->is_vcd) {
/* there is exactly one system header for each stream in a VCD MPEG,
One in the very first video packet and one in the very first
audio packet (see VCD standard p. IV-7 and IV-8).*/
-
+
if (stream->packet_number==0)
/* The system headers refer only to the stream they occur in,
so they have a constant size.*/
buf_index += 15;
- } else {
+ } else {
if ((s->packet_number % s->system_header_freq) == 0)
buf_index += s->system_header_size;
}
@@ -588,7 +588,7 @@ static int get_packet_payload_size(AVFormatContext *ctx, int stream_index,
if ((s->is_vcd && stream->packet_number==0)
|| (s->is_svcd && s->packet_number==0))
/* the first pack of each stream contains only the pack header,
- the system header and some padding (see VCD standard p. IV-6)
+ the system header and some padding (see VCD standard p. IV-6)
Add the padding size, so that the actual payload becomes 0.*/
buf_index += s->packet_size - buf_index;
else {
@@ -610,7 +610,7 @@ static int get_packet_payload_size(AVFormatContext *ctx, int stream_index,
if (!s->is_mpeg2)
buf_index++;
}
-
+
if (stream->id < 0xc0) {
/* AC3/LPCM private data header */
buf_index += 4;
@@ -630,7 +630,7 @@ static int get_packet_payload_size(AVFormatContext *ctx, int stream_index,
each audio packet (see standard p. IV-8).*/
buf_index+=20;
}
- return s->packet_size - buf_index;
+ return s->packet_size - buf_index;
}
#endif
@@ -639,7 +639,7 @@ static void put_padding_packet(AVFormatContext *ctx, ByteIOContext *pb,int packe
{
MpegMuxContext *s = ctx->priv_data;
int i;
-
+
put_be32(pb, PADDING_STREAM);
put_be16(pb, packet_bytes - 6);
if (!s->is_mpeg2) {
@@ -656,7 +656,7 @@ static int get_nb_frames(AVFormatContext *ctx, StreamInfo *stream, int len){
int nb_frames=0;
PacketDesc *pkt_desc= stream->premux_packet;
- while(len>0){
+ while(len>0){
if(pkt_desc->size == pkt_desc->unwritten_size)
nb_frames++;
len -= pkt_desc->unwritten_size;
@@ -667,7 +667,7 @@ static int get_nb_frames(AVFormatContext *ctx, StreamInfo *stream, int len){
}
/* flush the packet on stream stream_index */
-static int flush_packet(AVFormatContext *ctx, int stream_index,
+static int flush_packet(AVFormatContext *ctx, int stream_index,
int64_t pts, int64_t dts, int64_t scr, int trailer_size)
{
MpegMuxContext *s = ctx->priv_data;
@@ -681,11 +681,11 @@ static int flush_packet(AVFormatContext *ctx, int stream_index,
int pes_flags;
int general_pack = 0; /*"general" pack without data specific to one stream?*/
int nb_frames;
-
+
id = stream->id;
-
+
#if 0
- printf("packet ID=%2x PTS=%0.3f\n",
+ printf("packet ID=%2x PTS=%0.3f\n",
id, pts / 90000.0);
#endif
@@ -701,7 +701,7 @@ static int flush_packet(AVFormatContext *ctx, int stream_index,
/* there is exactly one system header for each stream in a VCD MPEG,
One in the very first video packet and one in the very first
audio packet (see VCD standard p. IV-7 and IV-8).*/
-
+
if (stream->packet_number==0) {
size = put_system_header(ctx, buf_ptr, id);
buf_ptr += size;
@@ -771,7 +771,7 @@ static int flush_packet(AVFormatContext *ctx, int stream_index,
/* The VCD standard demands that 20 zero bytes follow
each audio pack (see standard p. IV-8).*/
zero_trail_bytes += 20;
-
+
if ((s->is_vcd && stream->packet_number==0)
|| (s->is_svcd && s->packet_number==0)) {
/* for VCD the first pack of each stream contains only the pack header,
@@ -791,7 +791,7 @@ static int flush_packet(AVFormatContext *ctx, int stream_index,
/* packet header size */
packet_size -= 6;
-
+
/* packet header */
if (s->is_mpeg2) {
header_len = 3;
@@ -829,7 +829,7 @@ static int flush_packet(AVFormatContext *ctx, int stream_index,
// first byte doesnt fit -> reset pts/dts + stuffing
if(payload_size <= trailer_size && pts != AV_NOPTS_VALUE){
int timestamp_len=0;
- if(dts != pts)
+ if(dts != pts)
timestamp_len += 5;
if(pts != AV_NOPTS_VALUE)
timestamp_len += s->is_mpeg2 ? 5 : 4;
@@ -865,13 +865,13 @@ static int flush_packet(AVFormatContext *ctx, int stream_index,
payload_size -= stuffing_size;
stuffing_size = 0;
}
-
+
nb_frames= get_nb_frames(ctx, stream, payload_size - stuffing_size);
put_be32(&ctx->pb, startcode);
put_be16(&ctx->pb, packet_size);
-
+
if (!s->is_mpeg2)
for(i=0;i<stuffing_size;i++)
put_byte(&ctx->pb, 0xff);
@@ -901,11 +901,11 @@ static int flush_packet(AVFormatContext *ctx, int stream_index,
put_timestamp(&ctx->pb, (pes_flags & 0x40) ? 0x03 : 0x02, pts);
if (pes_flags & 0x40) /*write dts*/
put_timestamp(&ctx->pb, 0x01, dts);
-
+
if (pes_flags & 0x01) { /*write pes extension*/
put_byte(&ctx->pb, 0x10); /* flags */
- /* P-STD buffer info */
+ /* P-STD buffer info */
if (id == AUDIO_ID)
put_be16(&ctx->pb, 0x4000 | stream->max_buffer_size/128);
else
@@ -959,13 +959,13 @@ static int flush_packet(AVFormatContext *ctx, int stream_index,
}
if (pad_packet_bytes > 0)
- put_padding_packet(ctx,&ctx->pb, pad_packet_bytes);
+ put_padding_packet(ctx,&ctx->pb, pad_packet_bytes);
for(i=0;i<zero_trail_bytes;i++)
put_byte(&ctx->pb, 0x00);
-
+
put_flush_packet(&ctx->pb);
-
+
s->packet_number++;
/* only increase the stream packet number if this pack actually contains
@@ -973,7 +973,7 @@ static int flush_packet(AVFormatContext *ctx, int stream_index,
or some data.*/
if (!general_pack)
stream->packet_number++;
-
+
return payload_size - stuffing_size;
}
@@ -992,9 +992,9 @@ static void put_vcd_padding_sector(AVFormatContext *ctx)
put_byte(&ctx->pb, 0);
s->vcd_padding_bytes_written += s->packet_size;
-
+
put_flush_packet(&ctx->pb);
-
+
/* increasing the packet number is correct. The SCR of the following packs
is calculated from the packet_number and it has to include the padding
sector (it represents the sector index, not the MPEG pack index)
@@ -1021,7 +1021,7 @@ static int64_t get_vcd_scr(AVFormatContext *ctx,int stream_index,int64_t pts)
scr = 36000 + s->packet_number * 1200;
return scr;
-}
+}
#endif
static int remove_decoded_packets(AVFormatContext *ctx, int64_t scr){
@@ -1032,9 +1032,9 @@ static int remove_decoded_packets(AVFormatContext *ctx, int64_t scr){
AVStream *st = ctx->streams[i];
StreamInfo *stream = st->priv_data;
PacketDesc *pkt_desc= stream->predecode_packet;
-
+
while(pkt_desc && scr > pkt_desc->dts){ //FIXME > vs >=
- if(stream->buffer_index < pkt_desc->size ||
+ if(stream->buffer_index < pkt_desc->size ||
stream->predecode_packet == stream->premux_packet){
av_log(ctx, AV_LOG_ERROR, "buffer underflow\n");
break;
@@ -1045,7 +1045,7 @@ static int remove_decoded_packets(AVFormatContext *ctx, int64_t scr){
av_freep(&pkt_desc);
}
}
-
+
return 0;
}
@@ -1081,17 +1081,17 @@ retry:
if(space < s->packet_size && !ignore_constraints)
continue;
-
+
if(next_pkt && next_pkt->dts - scr > max_delay)
continue;
-
+
if(rel_space > best_score){
best_score= rel_space;
best_i = i;
avail_space= space;
}
}
-
+
if(best_i < 0){
int64_t best_dts= INT64_MAX;
@@ -1104,7 +1104,7 @@ retry:
}
#if 0
- av_log(ctx, AV_LOG_DEBUG, "bumping scr, scr:%f, dts:%f\n",
+ av_log(ctx, AV_LOG_DEBUG, "bumping scr, scr:%f, dts:%f\n",
scr/90000.0, best_dts/90000.0);
#endif
if(best_dts == INT64_MAX)
@@ -1121,14 +1121,14 @@ retry:
}
assert(best_i >= 0);
-
+
st = ctx->streams[best_i];
stream = st->priv_data;
-
+
assert(fifo_size(&stream->fifo, stream->fifo.rptr) > 0);
assert(avail_space >= s->packet_size || ignore_constraints);
-
+
timestamp_packet= stream->premux_packet;
if(timestamp_packet->unwritten_size == timestamp_packet->size){
trailer_size= 0;
@@ -1155,17 +1155,17 @@ retry:
s->last_scr += s->packet_size*90000LL / (s->mux_rate*50LL); //FIXME rounding and first few bytes of each packet
}
}
-
+
stream->buffer_index += es_size;
s->last_scr += s->packet_size*90000LL / (s->mux_rate*50LL); //FIXME rounding and first few bytes of each packet
-
+
while(stream->premux_packet && stream->premux_packet->unwritten_size <= es_size){
es_size -= stream->premux_packet->unwritten_size;
stream->premux_packet= stream->premux_packet->next;
}
if(es_size)
stream->premux_packet->unwritten_size -= es_size;
-
+
if(remove_decoded_packets(ctx, s->last_scr) < 0)
return -1;
@@ -1184,7 +1184,7 @@ static int mpeg_mux_write_packet(AVFormatContext *ctx, AVPacket *pkt)
PacketDesc *pkt_desc;
const int preload= av_rescale(ctx->preload, 90000, AV_TIME_BASE);
const int is_iframe = st->codec->codec_type == CODEC_TYPE_VIDEO && (pkt->flags & PKT_FLAG_KEY);
-
+
pts= pkt->pts;
dts= pkt->dts;
@@ -1220,7 +1220,7 @@ static int mpeg_mux_write_packet(AVFormatContext *ctx, AVPacket *pkt)
for(;;){
int ret= output_packet(ctx, 0);
- if(ret<=0)
+ if(ret<=0)
return ret;
}
}
@@ -1230,10 +1230,10 @@ static int mpeg_mux_end(AVFormatContext *ctx)
// MpegMuxContext *s = ctx->priv_data;
StreamInfo *stream;
int i;
-
+
for(;;){
int ret= output_packet(ctx, 1);
- if(ret<0)
+ if(ret<0)
return ret;
else if(ret==0)
break;
@@ -1323,7 +1323,7 @@ static int64_t get_pts(ByteIOContext *pb, int c)
return pts;
}
-static int find_next_start_code(ByteIOContext *pb, int *size_ptr,
+static int find_next_start_code(ByteIOContext *pb, int *size_ptr,
uint32_t *header_state)
{
unsigned int state, v;
@@ -1388,7 +1388,7 @@ static int find_prev_start_code(ByteIOContext *pb, int *size_ptr)
/**
* Extracts stream types from a program stream map
* According to ISO/IEC 13818-1 ('MPEG-2 Systems') table 2-35
- *
+ *
* @return number of bytes occupied by PSM in the bitstream
*/
static long mpegps_psm_parse(MpegDemuxContext *m, ByteIOContext *pb)
@@ -1419,11 +1419,11 @@ static long mpegps_psm_parse(MpegDemuxContext *m, ByteIOContext *pb)
return 2 + psm_length;
}
-/* read the next PES header. Return its position in ppos
+/* read the next PES header. Return its position in ppos
(if not NULL), and its start code, pts and dts.
*/
static int mpegps_read_pes_header(AVFormatContext *s,
- int64_t *ppos, int *pstart_code,
+ int64_t *ppos, int *pstart_code,
int64_t *ppts, int64_t *pdts)
{
MpegDemuxContext *m = s->priv_data;
@@ -1454,7 +1454,7 @@ static int mpegps_read_pes_header(AVFormatContext *s,
mpegps_psm_parse(m, &s->pb);
goto redo;
}
-
+
/* find matching stream */
if (!((startcode >= 0x1c0 && startcode <= 0x1df) ||
(startcode >= 0x1e0 && startcode <= 0x1ef) ||
@@ -1473,7 +1473,7 @@ static int mpegps_read_pes_header(AVFormatContext *s,
c = get_byte(&s->pb);
len--;
/* XXX: for mpeg1, should test only bit 7 */
- if (c != 0xff)
+ if (c != 0xff)
break;
}
if ((c & 0xc0) == 0x40) {
@@ -1552,7 +1552,7 @@ static int mpegps_read_pes_header(AVFormatContext *s,
}
}
}
-
+
*pstart_code = startcode;
*ppts = pts;
*pdts = dts;
@@ -1571,7 +1571,7 @@ static int mpegps_read_packet(AVFormatContext *s,
len = mpegps_read_pes_header(s, &dummy_pos, &startcode, &pts, &dts);
if (len < 0)
return len;
-
+
/* now find stream */
for(i=0;i<s->nb_streams;i++) {
st = s->streams[i];
@@ -1632,7 +1632,7 @@ static int mpegps_read_packet(AVFormatContext *s,
}
/* no stream found: add a new stream */
st = av_new_stream(s, startcode);
- if (!st)
+ if (!st)
goto skip;
st->codec->codec_type = type;
st->codec->codec_id = codec_id;
@@ -1675,7 +1675,7 @@ static int mpegps_read_close(AVFormatContext *s)
return 0;
}
-static int64_t mpegps_read_dts(AVFormatContext *s, int stream_index,
+static int64_t mpegps_read_dts(AVFormatContext *s, int stream_index,
int64_t *ppos, int64_t pos_limit)
{
int len, startcode;
@@ -1694,7 +1694,7 @@ static int64_t mpegps_read_dts(AVFormatContext *s, int stream_index,
#endif
return AV_NOPTS_VALUE;
}
- if (startcode == s->streams[stream_index]->id &&
+ if (startcode == s->streams[stream_index]->id &&
dts != AV_NOPTS_VALUE) {
break;
}
diff --git a/libavformat/mpegts.c b/libavformat/mpegts.c
index 83c6002c1f..321cad8510 100644
--- a/libavformat/mpegts.c
+++ b/libavformat/mpegts.c
@@ -92,7 +92,7 @@ struct MpegTSContext {
int64_t cur_pcr;
int pcr_incr;
int pcr_pid;
-
+
/* data needed to handle file based ts */
int stop_parse; /* stop parsing loop */
AVPacket *pkt; /* packet containing av data */
@@ -103,7 +103,7 @@ struct MpegTSContext {
MpegTSFilter *sdt_filter;
int nb_services;
MpegTSService **services;
-
+
/* set service context (XXX: allocated it ?) */
SetServiceCallback *set_service_cb;
void *set_service_opaque;
@@ -119,7 +119,7 @@ static void write_section_data(AVFormatContext *s, MpegTSFilter *tss1,
{
MpegTSSectionFilter *tss = &tss1->u.section_filter;
int len;
-
+
if (is_start) {
memcpy(tss->section_buf, buf, buf_size);
tss->section_index = buf_size;
@@ -151,21 +151,21 @@ static void write_section_data(AVFormatContext *s, MpegTSFilter *tss1,
}
}
-MpegTSFilter *mpegts_open_section_filter(MpegTSContext *ts, unsigned int pid,
+MpegTSFilter *mpegts_open_section_filter(MpegTSContext *ts, unsigned int pid,
SectionCallback *section_cb, void *opaque,
int check_crc)
{
MpegTSFilter *filter;
MpegTSSectionFilter *sec;
-
+
#ifdef DEBUG_SI
printf("Filter: pid=0x%x\n", pid);
#endif
if (pid >= NB_PID_MAX || ts->pids[pid])
return NULL;
filter = av_mallocz(sizeof(MpegTSFilter));
- if (!filter)
+ if (!filter)
return NULL;
ts->pids[pid] = filter;
filter->type = MPEGTS_SECTION;
@@ -183,7 +183,7 @@ MpegTSFilter *mpegts_open_section_filter(MpegTSContext *ts, unsigned int pid,
return filter;
}
-MpegTSFilter *mpegts_open_pes_filter(MpegTSContext *ts, unsigned int pid,
+MpegTSFilter *mpegts_open_pes_filter(MpegTSContext *ts, unsigned int pid,
PESCallback *pes_cb,
void *opaque)
{
@@ -193,7 +193,7 @@ MpegTSFilter *mpegts_open_pes_filter(MpegTSContext *ts, unsigned int pid,
if (pid >= NB_PID_MAX || ts->pids[pid])
return NULL;
filter = av_mallocz(sizeof(MpegTSFilter));
- if (!filter)
+ if (!filter)
return NULL;
ts->pids[pid] = filter;
filter->type = MPEGTS_PES;
@@ -250,11 +250,11 @@ static int get_packet_size(const uint8_t *buf, int size)
if (size < (TS_FEC_PACKET_SIZE * 5 + 1))
return -1;
-
+
score = analyze(buf, size, TS_PACKET_SIZE, NULL);
fec_score= analyze(buf, size, TS_FEC_PACKET_SIZE, NULL);
// av_log(NULL, AV_LOG_DEBUG, "score: %d, fec_score: %d \n", score, fec_score);
-
+
if (score > fec_score) return TS_PACKET_SIZE;
else if(score < fec_score) return TS_FEC_PACKET_SIZE;
else return -1;
@@ -318,7 +318,7 @@ static char *getstr8(const uint8_t **pp, const uint8_t *p_end)
return str;
}
-static int parse_section_header(SectionHeader *h,
+static int parse_section_header(SectionHeader *h,
const uint8_t **pp, const uint8_t *p_end)
{
int val;
@@ -347,13 +347,13 @@ static int parse_section_header(SectionHeader *h,
return 0;
}
-static MpegTSService *new_service(MpegTSContext *ts, int sid,
+static MpegTSService *new_service(MpegTSContext *ts, int sid,
char *provider_name, char *name)
{
MpegTSService *service;
#ifdef DEBUG_SI
- printf("new_service: sid=0x%04x provider='%s' name='%s'\n",
+ printf("new_service: sid=0x%04x provider='%s' name='%s'\n",
sid, provider_name, name);
#endif
@@ -378,7 +378,7 @@ static void pmt_cb(void *opaque, const uint8_t *section, int section_len)
int desc_list_len, desc_len, desc_tag;
int comp_page = 0, anc_page = 0; /* initialize to kill warnings */
char language[4];
-
+
#ifdef DEBUG_SI
printf("PMT:\n");
av_hex_dump(stdout, (uint8_t *)section, section_len);
@@ -535,7 +535,7 @@ static void pat_cb(void *opaque, const uint8_t *section, int section_len)
/* NIT info */
} else {
if (ts->req_sid == sid) {
- ts->pmt_filter = mpegts_open_section_filter(ts, pmt_pid,
+ ts->pmt_filter = mpegts_open_section_filter(ts, pmt_pid,
pmt_cb, ts, 1);
goto found;
}
@@ -608,7 +608,7 @@ void mpegts_set_service(MpegTSContext *ts, int sid,
ts->set_service_cb = set_service_cb;
ts->set_service_opaque = opaque;
ts->req_sid = sid;
- ts->pat_filter = mpegts_open_section_filter(ts, PAT_PID,
+ ts->pat_filter = mpegts_open_section_filter(ts, PAT_PID,
pat_cb, ts, 1);
}
@@ -691,7 +691,7 @@ static void sdt_cb(void *opaque, const uint8_t *section, int section_len)
/* scan services in a transport stream by looking at the SDT */
void mpegts_scan_sdt(MpegTSContext *ts)
{
- ts->sdt_filter = mpegts_open_section_filter(ts, SDT_PID,
+ ts->sdt_filter = mpegts_open_section_filter(ts, SDT_PID,
sdt_cb, ts, 1);
}
@@ -699,7 +699,7 @@ void mpegts_scan_sdt(MpegTSContext *ts)
than nothing !) */
void mpegts_scan_pat(MpegTSContext *ts)
{
- ts->pat_filter = mpegts_open_section_filter(ts, PAT_PID,
+ ts->pat_filter = mpegts_open_section_filter(ts, PAT_PID,
pat_scan_cb, ts, 1);
}
@@ -752,7 +752,7 @@ static void mpegts_push_data(void *opaque,
MpegTSContext *ts = pes->ts;
const uint8_t *p;
int len, code;
-
+
if (is_start) {
pes->state = MPEGTS_HEADER;
pes->data_index = 0;
@@ -973,7 +973,7 @@ static void handle_packet(MpegTSContext *ts, const uint8_t *packet)
cc = (packet[3] & 0xf);
cc_ok = (tss->last_cc < 0) || ((((tss->last_cc + 1) & 0x0f) == cc));
tss->last_cc = cc;
-
+
/* skip adaptation field */
afc = (packet[3] >> 4) & 3;
p = packet + 4;
@@ -989,7 +989,7 @@ static void handle_packet(MpegTSContext *ts, const uint8_t *packet)
p_end = packet + TS_PACKET_SIZE;
if (p >= p_end)
return;
-
+
if (tss->type == MPEGTS_SECTION) {
if (is_start) {
/* pointer field present */
@@ -998,7 +998,7 @@ static void handle_packet(MpegTSContext *ts, const uint8_t *packet)
return;
if (len && cc_ok) {
/* write remaining section bytes */
- write_section_data(s, tss,
+ write_section_data(s, tss,
p, len, 0);
/* check whether filter has been closed */
if (!ts->pids[pid])
@@ -1006,17 +1006,17 @@ static void handle_packet(MpegTSContext *ts, const uint8_t *packet)
}
p += len;
if (p < p_end) {
- write_section_data(s, tss,
+ write_section_data(s, tss,
p, p_end - p, 1);
}
} else {
if (cc_ok) {
- write_section_data(s, tss,
+ write_section_data(s, tss,
p, p_end - p, 0);
}
}
} else {
- tss->u.pes_filter.pes_cb(tss->u.pes_filter.opaque,
+ tss->u.pes_filter.pes_cb(tss->u.pes_filter.opaque,
p, p_end - p, is_start);
}
}
@@ -1096,14 +1096,14 @@ static int mpegts_probe(AVProbeData *p)
const int size= p->buf_size;
int score, fec_score;
#define CHECK_COUNT 10
-
+
if (size < (TS_FEC_PACKET_SIZE * CHECK_COUNT))
return -1;
-
+
score = analyze(p->buf, TS_PACKET_SIZE *CHECK_COUNT, TS_PACKET_SIZE, NULL);
fec_score= analyze(p->buf, TS_FEC_PACKET_SIZE*CHECK_COUNT, TS_FEC_PACKET_SIZE, NULL);
// av_log(NULL, AV_LOG_DEBUG, "score: %d, fec_score: %d \n", score, fec_score);
-
+
// we need a clear definition for the returned score otherwise things will become messy sooner or later
if (score > fec_score && score > 6) return AVPROBE_SCORE_MAX + score - CHECK_COUNT;
else if( fec_score > 6) return AVPROBE_SCORE_MAX + fec_score - CHECK_COUNT;
@@ -1126,7 +1126,7 @@ void set_service_cb(void *opaque, int ret)
/* return the 90 kHz PCR and the extension for the 27 MHz PCR. return
(-1) if not available */
-static int parse_pcr(int64_t *ppcr_high, int *ppcr_low,
+static int parse_pcr(int64_t *ppcr_high, int *ppcr_low,
const uint8_t *packet)
{
int afc, len, flags;
@@ -1188,12 +1188,12 @@ static int mpegts_read_header(AVFormatContext *s,
/* first do a scaning to get all the services */
url_fseek(pb, pos, SEEK_SET);
mpegts_scan_sdt(ts);
-
+
handle_packets(ts, MAX_SCAN_PACKETS);
-
+
if (ts->nb_services <= 0) {
/* no SDT found, we try to look at the PAT */
-
+
/* First remove the SDT filters from each PID */
int i;
for (i=0; i < NB_PID_MAX; i++) {
@@ -1202,17 +1202,17 @@ static int mpegts_read_header(AVFormatContext *s,
}
url_fseek(pb, pos, SEEK_SET);
mpegts_scan_pat(ts);
-
+
handle_packets(ts, MAX_SCAN_PACKETS);
}
-
+
if (ts->nb_services <= 0) {
/* raw transport stream */
ts->auto_guess = 1;
s->ctx_flags |= AVFMTCTX_NOHEADER;
goto do_pcr;
}
-
+
/* tune to first service found */
for(i=0; i<ts->nb_services && ts->set_service_ret; i++){
service = ts->services[i];
@@ -1220,20 +1220,20 @@ static int mpegts_read_header(AVFormatContext *s,
#ifdef DEBUG_SI
printf("tuning to '%s'\n", service->name);
#endif
-
+
/* now find the info for the first service if we found any,
otherwise try to filter all PATs */
-
+
url_fseek(pb, pos, SEEK_SET);
mpegts_set_service(ts, sid, set_service_cb, ts);
-
+
handle_packets(ts, MAX_SCAN_PACKETS);
}
/* if could not find service, exit */
-
+
if (ts->set_service_ret != 0)
return -1;
-
+
#ifdef DEBUG_SI
printf("tuning done\n");
#endif
@@ -1245,9 +1245,9 @@ static int mpegts_read_header(AVFormatContext *s,
int64_t pcrs[2], pcr_h;
int packet_count[2];
uint8_t packet[TS_PACKET_SIZE];
-
+
/* only read packets */
-
+
do_pcr:
st = av_new_stream(s, 0);
if (!st)
@@ -1255,7 +1255,7 @@ static int mpegts_read_header(AVFormatContext *s,
av_set_pts_info(st, 60, 1, 27000000);
st->codec->codec_type = CODEC_TYPE_DATA;
st->codec->codec_id = CODEC_ID_MPEG2TS;
-
+
/* we iterate until we find two PCRs to estimate the bitrate */
pcr_pid = -1;
nb_pcrs = 0;
@@ -1326,7 +1326,7 @@ static int mpegts_raw_read_packet(AVFormatContext *s,
get_buffer(&s->pb, pcr_buf, 12);
if (parse_pcr(&next_pcr_h, &next_pcr_l, pcr_buf) == 0) {
/* XXX: not precise enough */
- ts->pcr_incr = ((next_pcr_h - pcr_h) * 300 + (next_pcr_l - pcr_l)) /
+ ts->pcr_incr = ((next_pcr_h - pcr_h) * 300 + (next_pcr_l - pcr_l)) /
(i + 1);
break;
}
@@ -1365,7 +1365,7 @@ static int mpegts_read_close(AVFormatContext *s)
return 0;
}
-static int64_t mpegts_get_pcr(AVFormatContext *s, int stream_index,
+static int64_t mpegts_get_pcr(AVFormatContext *s, int stream_index,
int64_t *ppos, int64_t pos_limit)
{
MpegTSContext *ts = s->priv_data;
@@ -1423,7 +1423,7 @@ static int read_seek(AVFormatContext *s, int stream_index, int64_t target_ts, in
// pid = ((buf[1] & 0x1f) << 8) | buf[2];
if(buf[1] & 0x40) break;
pos += ts->raw_packet_size;
- }
+ }
url_fseek(&s->pb, pos, SEEK_SET);
return 0;
@@ -1435,7 +1435,7 @@ static int read_seek(AVFormatContext *s, int stream_index, int64_t target_ts, in
MpegTSContext *mpegts_parse_open(AVFormatContext *s)
{
MpegTSContext *ts;
-
+
ts = av_mallocz(sizeof(MpegTSContext));
if (!ts)
return NULL;
diff --git a/libavformat/mpegts.h b/libavformat/mpegts.h
index 848ecdb27a..5e96d740ca 100644
--- a/libavformat/mpegts.h
+++ b/libavformat/mpegts.h
@@ -28,7 +28,7 @@
/* table ids */
#define PAT_TID 0x00
-#define PMT_TID 0x02
+#define PMT_TID 0x02
#define SDT_TID 0x42
/* descriptor ids */
diff --git a/libavformat/mpegtsenc.c b/libavformat/mpegtsenc.c
index 4fe8f3e15c..9a294aa599 100644
--- a/libavformat/mpegtsenc.c
+++ b/libavformat/mpegtsenc.c
@@ -72,10 +72,10 @@ unsigned int mpegts_crc32(const uint8_t *data, int len)
{
register int i;
unsigned int crc = 0xffffffff;
-
+
for (i=0; i<len; i++)
crc = (crc << 8) ^ crc_table[((crc >> 24) ^ *data++) & 0xff];
-
+
return crc;
}
@@ -103,7 +103,7 @@ void mpegts_write_section(MpegTSSection *s, uint8_t *buf, int len)
buf[len - 3] = (crc >> 16) & 0xff;
buf[len - 2] = (crc >> 8) & 0xff;
buf[len - 1] = (crc) & 0xff;
-
+
/* send each packet */
buf_ptr = buf;
while (len > 0) {
@@ -120,7 +120,7 @@ void mpegts_write_section(MpegTSSection *s, uint8_t *buf, int len)
if (first)
*q++ = 0; /* 0 offset */
len1 = TS_PACKET_SIZE - (q - packet);
- if (len1 > len)
+ if (len1 > len)
len1 = len;
memcpy(q, buf_ptr, len1);
q += len1;
@@ -145,13 +145,13 @@ static inline void put16(uint8_t **q_ptr, int val)
*q_ptr = q;
}
-int mpegts_write_section1(MpegTSSection *s, int tid, int id,
+int mpegts_write_section1(MpegTSSection *s, int tid, int id,
int version, int sec_num, int last_sec_num,
uint8_t *buf, int len)
{
uint8_t section[1024], *q;
unsigned int tot_len;
-
+
tot_len = 3 + 5 + len + 4;
/* check if not too big */
if (tot_len > 1024)
@@ -165,7 +165,7 @@ int mpegts_write_section1(MpegTSSection *s, int tid, int id,
*q++ = sec_num;
*q++ = last_sec_num;
memcpy(q, buf, len);
-
+
mpegts_write_section(s, section, tot_len);
return 0;
}
@@ -230,7 +230,7 @@ static void mpegts_write_pat(AVFormatContext *s)
MpegTSService *service;
uint8_t data[1012], *q;
int i;
-
+
q = data;
for(i = 0; i < ts->nb_services; i++) {
service = ts->services[i];
@@ -258,7 +258,7 @@ static void mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
val = 0xf000 | (q - program_info_length_ptr - 2);
program_info_length_ptr[0] = val >> 8;
program_info_length_ptr[1] = val;
-
+
for(i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MpegTSWriteStream *ts_st = st->priv_data;
@@ -328,7 +328,7 @@ static void mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
}
mpegts_write_section1(&service->pmt, PMT_TID, service->sid, 0, 0, 0,
data, q - data);
-}
+}
/* NOTE: str == NULL is accepted for an empty string */
static void putstr8(uint8_t **q_ptr, const char *str)
@@ -353,7 +353,7 @@ static void mpegts_write_sdt(AVFormatContext *s)
MpegTSService *service;
uint8_t data[1012], *q, *desc_list_len_ptr, *desc_len_ptr;
int i, running_status, free_ca_mode, val;
-
+
q = data;
put16(&q, ts->onid);
*q++ = 0xff;
@@ -376,7 +376,7 @@ static void mpegts_write_sdt(AVFormatContext *s)
desc_len_ptr[0] = q - desc_len_ptr - 1;
/* fill descriptor length */
- val = (running_status << 13) | (free_ca_mode << 12) |
+ val = (running_status << 13) | (free_ca_mode << 12) |
(q - desc_list_len_ptr - 2);
desc_list_len_ptr[0] = val >> 8;
desc_list_len_ptr[1] = val;
@@ -385,9 +385,9 @@ static void mpegts_write_sdt(AVFormatContext *s)
data, q - data);
}
-static MpegTSService *mpegts_add_service(MpegTSWrite *ts,
- int sid,
- const char *provider_name,
+static MpegTSService *mpegts_add_service(MpegTSWrite *ts,
+ int sid,
+ const char *provider_name,
const char *name)
{
MpegTSService *service;
@@ -418,14 +418,14 @@ static int mpegts_write_header(AVFormatContext *s)
AVStream *st;
int i, total_bit_rate;
const char *service_name;
-
+
ts->tsid = DEFAULT_TSID;
ts->onid = DEFAULT_ONID;
/* allocate a single DVB service */
service_name = s->title;
if (service_name[0] == '\0')
service_name = DEFAULT_SERVICE_NAME;
- service = mpegts_add_service(ts, DEFAULT_SID,
+ service = mpegts_add_service(ts, DEFAULT_SID,
DEFAULT_PROVIDER_NAME, service_name);
service->pmt.write_packet = section_write_packet;
service->pmt.opaque = s;
@@ -452,12 +452,12 @@ static int mpegts_write_header(AVFormatContext *s)
ts_st->pid = DEFAULT_START_PID + i;
ts_st->payload_pts = AV_NOPTS_VALUE;
/* update PCR pid by using the first video stream */
- if (st->codec->codec_type == CODEC_TYPE_VIDEO &&
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO &&
service->pcr_pid == 0x1fff)
service->pcr_pid = ts_st->pid;
total_bit_rate += st->codec->bit_rate;
}
-
+
/* if no video stream, use the first stream as PCR */
if (service->pcr_pid == 0x1fff && s->nb_streams > 0) {
ts_st = s->streams[0]->priv_data;
@@ -466,14 +466,14 @@ static int mpegts_write_header(AVFormatContext *s)
if (total_bit_rate <= 8 * 1024)
total_bit_rate = 8 * 1024;
- service->pcr_packet_freq = (total_bit_rate * PCR_RETRANS_TIME) /
+ service->pcr_packet_freq = (total_bit_rate * PCR_RETRANS_TIME) /
(TS_PACKET_SIZE * 8 * 1000);
- ts->sdt_packet_freq = (total_bit_rate * SDT_RETRANS_TIME) /
+ ts->sdt_packet_freq = (total_bit_rate * SDT_RETRANS_TIME) /
(TS_PACKET_SIZE * 8 * 1000);
- ts->pat_packet_freq = (total_bit_rate * PAT_RETRANS_TIME) /
+ ts->pat_packet_freq = (total_bit_rate * PAT_RETRANS_TIME) /
(TS_PACKET_SIZE * 8 * 1000);
#if 0
- printf("%d %d %d\n",
+ printf("%d %d %d\n",
total_bit_rate, ts->sdt_packet_freq, ts->pat_packet_freq);
#endif
@@ -534,7 +534,7 @@ static void mpegts_write_pes(AVFormatContext *s, AVStream *st,
write_pcr = 0;
if (ts_st->pid == ts_st->service->pcr_pid) {
ts_st->service->pcr_packet_count++;
- if (ts_st->service->pcr_packet_count >=
+ if (ts_st->service->pcr_packet_count >=
ts_st->service->pcr_packet_freq) {
ts_st->service->pcr_packet_count = 0;
write_pcr = 1;
@@ -575,7 +575,7 @@ static void mpegts_write_pes(AVFormatContext *s, AVStream *st,
} else if (st->codec->codec_type == CODEC_TYPE_AUDIO &&
(st->codec->codec_id == CODEC_ID_MP2 ||
st->codec->codec_id == CODEC_ID_MP3)) {
- *q++ = 0xc0;
+ *q++ = 0xc0;
} else {
*q++ = 0xbd;
if (st->codec->codec_type == CODEC_TYPE_SUBTITLE) {
@@ -599,7 +599,7 @@ static void mpegts_write_pes(AVFormatContext *s, AVStream *st,
if (pts != AV_NOPTS_VALUE) {
*q++ = 0x80; /* PTS only */
*q++ = 0x05; /* header len */
- val = (0x02 << 4) |
+ val = (0x02 << 4) |
(((pts >> 30) & 0x07) << 1) | 1;
*q++ = val;
val = (((pts >> 15) & 0x7fff) << 1) | 1;
@@ -629,7 +629,7 @@ static void mpegts_write_pes(AVFormatContext *s, AVStream *st,
/* stuffing already present: increase its size */
afc_len = buf[4] + 1;
memmove(buf + 4 + afc_len + stuffing_len,
- buf + 4 + afc_len,
+ buf + 4 + afc_len,
header_len - (4 + afc_len));
buf[4] += stuffing_len;
memset(buf + 4 + afc_len, 0xff, stuffing_len);
@@ -665,7 +665,7 @@ static int mpegts_write_packet(AVFormatContext *s, AVPacket *pkt)
mpegts_write_pes(s, st, buf, size, pkt->pts);
return 0;
}
-
+
max_payload_size = DEFAULT_PES_PAYLOAD_SIZE;
while (size > 0) {
len = max_payload_size - ts_st->payload_index;
@@ -705,7 +705,7 @@ static int mpegts_write_end(AVFormatContext *s)
}
}
put_flush_packet(&s->pb);
-
+
for(i = 0; i < ts->nb_services; i++) {
service = ts->services[i];
av_freep(&service->provider_name);
diff --git a/libavformat/nsvdec.c b/libavformat/nsvdec.c
index 61abb6dde2..a2871ded04 100644
--- a/libavformat/nsvdec.c
+++ b/libavformat/nsvdec.c
@@ -46,8 +46,8 @@
/*
* notes on the header (Francois Revol):
- *
- * It is followed by strings, then a table, but nothing tells
+ *
+ * It is followed by strings, then a table, but nothing tells
* where the table begins according to (1). After checking faster.nsv,
* I believe NVSf[16-19] gives the size of the strings data
* (that is the offset of the data table after the header).
@@ -56,15 +56,15 @@
* Then, about NSVf[12-15], faster.nsf has 179700. When veiwing it in VLC,
* I noticed there was about 1 NVSs chunk/s, so I ran
* strings faster.nsv | grep NSVs | wc -l
- * which gave me 180. That leads me to think that NSVf[12-15] might be the
+ * which gave me 180. That leads me to think that NSVf[12-15] might be the
* file length in milliseconds.
* Let's try that:
* for f in *.nsv; do HTIME="$(od -t x4 "$f" | head -1 | sed 's/.* //')"; echo "'$f' $((0x$HTIME))s = $((0x$HTIME/1000/60)):$((0x$HTIME/1000%60))"; done
* except for nstrailer (which doesn't have an NSVf header), it repports correct time.
*
- * nsvtrailer.nsv (S1) does not have any NSVf header, only NSVs chunks,
+ * nsvtrailer.nsv (S1) does not have any NSVf header, only NSVs chunks,
* so the header seems to not be mandatory. (for streaming).
- *
+ *
* index slice duration check (excepts nsvtrailer.nsv):
* for f in [^n]*.nsv; do DUR="$(ffmpeg -i "$f" 2>/dev/null | grep 'NSVf duration' | cut -d ' ' -f 4)"; IC="$(ffmpeg -i "$f" 2>/dev/null | grep 'INDEX ENTRIES' | cut -d ' ' -f 2)"; echo "duration $DUR, slite time $(($DUR/$IC))"; done
*/
@@ -80,7 +80,7 @@
#ifdef DEBUG
#define PRINT(_v) printf _v
#else
-#define PRINT(_v)
+#define PRINT(_v)
#endif
#if 0
@@ -156,10 +156,10 @@ typedef struct NSVStream {
int frame_offset; /* current frame (video) or byte (audio) counter
(used to compute the pts) */
int scale;
- int rate;
+ int rate;
int sample_size; /* audio only data */
int start;
-
+
int new_frame_offset; /* temporary storage (used during seek) */
int cum_len; /* temporary storage (used during seek) */
} NSVStream;
@@ -235,11 +235,11 @@ static int nsv_resync(AVFormatContext *s)
ByteIOContext *pb = &s->pb;
uint32_t v = 0;
int i;
-
+
PRINT(("%s(), offset = %"PRId64", state = %d\n", __FUNCTION__, url_ftell(pb), nsv->state));
-
+
//nsv->state = NSV_UNSYNC;
-
+
for (i = 0; i < NSV_MAX_RESYNC; i++) {
if (url_feof(pb)) {
PRINT(("NSV EOF\n"));
@@ -253,7 +253,7 @@ static int nsv_resync(AVFormatContext *s)
PRINT(("NSV resync: [%d] = %02x\n", i, v & 0x0FF));
}
*/
-
+
if ((v & 0x0000ffff) == 0xefbe) { /* BEEF */
PRINT(("NSV resynced on BEEF after %d bytes\n", i+1));
nsv->state = NSV_FOUND_BEEF;
@@ -270,7 +270,7 @@ static int nsv_resync(AVFormatContext *s)
nsv->state = NSV_FOUND_NSVS;
return 0;
}
-
+
}
PRINT(("NSV sync lost\n"));
return -1;
@@ -289,7 +289,7 @@ static int nsv_parse_NSVf_header(AVFormatContext *s, AVFormatParameters *ap)
PRINT(("%s()\n", __FUNCTION__));
nsv->state = NSV_UNSYNC; /* in case we fail */
-
+
size = get_le32(pb);
if (size < 28)
return -1;
@@ -307,11 +307,11 @@ static int nsv_parse_NSVf_header(AVFormatContext *s, AVFormatParameters *ap)
strings_size = get_le32(pb);
table_entries = get_le32(pb);
table_entries_used = get_le32(pb);
- PRINT(("NSV NSVf info-strings size: %d, table entries: %d, bis %d\n",
+ PRINT(("NSV NSVf info-strings size: %d, table entries: %d, bis %d\n",
strings_size, table_entries, table_entries_used));
if (url_feof(pb))
return -1;
-
+
PRINT(("NSV got header; filepos %"PRId64"\n", url_ftell(pb)));
if (strings_size > 0) {
@@ -354,7 +354,7 @@ static int nsv_parse_NSVf_header(AVFormatContext *s, AVFormatParameters *ap)
}
if (url_feof(pb))
return -1;
-
+
PRINT(("NSV got infos; filepos %"PRId64"\n", url_ftell(pb)));
if (table_entries_used > 0) {
@@ -367,7 +367,7 @@ static int nsv_parse_NSVf_header(AVFormatContext *s, AVFormatParameters *ap)
}
PRINT(("NSV got index; filepos %"PRId64"\n", url_ftell(pb)));
-
+
#ifdef DEBUG_DUMP_INDEX
#define V(v) ((v<0x20 || v > 127)?'.':v)
/* dump index */
@@ -380,15 +380,15 @@ static int nsv_parse_NSVf_header(AVFormatContext *s, AVFormatParameters *ap)
PRINT(("NSV [0x%08lx][0x%08lx]: %02x %02x %02x %02x %02x %02x %02x %02x"
"%c%c%c%c%c%c%c%c\n",
nsv->nsvf_index_data[i], size + nsv->nsvf_index_data[i],
- b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
+ b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
V(b[0]), V(b[1]), V(b[2]), V(b[3]), V(b[4]), V(b[5]), V(b[6]), V(b[7]) ));
}
//url_fseek(pb, size, SEEK_SET); /* go back to end of header */
#undef V
#endif
-
+
url_fseek(pb, nsv->base_offset + size, SEEK_SET); /* required for dumbdriving-271.nsv (2 extra bytes) */
-
+
if (url_feof(pb))
return -1;
nsv->state = NSV_HAS_READ_NSVF;
@@ -424,7 +424,7 @@ static int nsv_parse_NSVs_header(AVFormatContext *s, AVFormatParameters *ap)
print_tag("NSV NSVs atag", atag, 0);
PRINT(("NSV NSVs vsize %dx%d\n", vwidth, vheight));
#endif
-
+
/* XXX change to ap != NULL ? */
if (s->nb_streams == 0) { /* streams not yet published, let's do that */
nsv->vtag = vtag;
@@ -466,7 +466,7 @@ static int nsv_parse_NSVs_header(AVFormatContext *s, AVFormatParameters *ap)
st->codec->codec_id = codec_get_id(nsv_codec_audio_tags, atag);
st->start_time = 0;
// st->duration = nsv->duration; //FIXME
-
+
st->need_parsing = 1; /* for PCM we will read a chunk later and put correct info */
/* XXX:FIXME */
//st->codec->channels = 2; //XXX:channels;
@@ -502,7 +502,7 @@ static int nsv_read_header(AVFormatContext *s, AVFormatParameters *ap)
nsv->state = NSV_UNSYNC;
nsv->ahead[0].data = nsv->ahead[1].data = NULL;
-
+
for (i = 0; i < NSV_MAX_RESYNC_TRIES; i++) {
if (nsv_resync(s) < 0)
return -1;
@@ -518,7 +518,7 @@ static int nsv_read_header(AVFormatContext *s, AVFormatParameters *ap)
return -1;
/* now read the first chunk, so we can attempt to decode more info */
err = nsv_read_chunk(s, 1);
-
+
PRINT(("parsed header\n"));
return 0;
}
@@ -536,16 +536,16 @@ static int nsv_read_chunk(AVFormatContext *s, int fill_header)
uint16_t asize;
uint16_t auxsize;
uint32_t auxtag;
-
+
PRINT(("%s(%d)\n", __FUNCTION__, fill_header));
-
+
if (nsv->ahead[0].data || nsv->ahead[1].data)
return 0; //-1; /* hey! eat what you've in your plate first! */
null_chunk_retry:
if (url_feof(pb))
return -1;
-
+
for (i = 0; i < NSV_MAX_RESYNC_TRIES && nsv->state < NSV_FOUND_NSVS && !err; i++)
err = nsv_resync(s);
if (err < 0)
@@ -556,7 +556,7 @@ null_chunk_retry:
return err;
if (nsv->state != NSV_HAS_READ_NSVS && nsv->state != NSV_FOUND_BEEF)
return -1;
-
+
auxcount = get_byte(pb);
vsize = get_le16(pb);
asize = get_le16(pb);
@@ -567,29 +567,29 @@ null_chunk_retry:
for (i = 0; i < auxcount; i++) {
auxsize = get_le16(pb);
auxtag = get_le32(pb);
- PRINT(("NSV aux data: '%c%c%c%c', %d bytes\n",
- (auxtag & 0x0ff),
- ((auxtag >> 8) & 0x0ff),
+ PRINT(("NSV aux data: '%c%c%c%c', %d bytes\n",
+ (auxtag & 0x0ff),
+ ((auxtag >> 8) & 0x0ff),
((auxtag >> 16) & 0x0ff),
((auxtag >> 24) & 0x0ff),
auxsize));
url_fskip(pb, auxsize);
vsize -= auxsize + sizeof(uint16_t) + sizeof(uint32_t); /* that's becoming braindead */
}
-
+
if (url_feof(pb))
return -1;
if (!vsize && !asize) {
nsv->state = NSV_UNSYNC;
goto null_chunk_retry;
}
-
+
/* map back streams to v,a */
if (s->streams[0])
st[s->streams[0]->id] = s->streams[0];
if (s->streams[1])
st[s->streams[1]->id] = s->streams[1];
-
+
if (vsize/* && st[NSV_ST_VIDEO]*/) {
nst = st[NSV_ST_VIDEO]->priv_data;
pkt = &nsv->ahead[NSV_ST_VIDEO];
@@ -628,7 +628,7 @@ null_chunk_retry:
channels = 1;
st[NSV_ST_AUDIO]->codec->channels = channels;
st[NSV_ST_AUDIO]->codec->sample_rate = samplerate;
- av_set_pts_info(st[NSV_ST_AUDIO], 64, 1,
+ av_set_pts_info(st[NSV_ST_AUDIO], 64, 1,
st[NSV_ST_AUDIO]->codec->sample_rate);
PRINT(("NSV RAWAUDIO: bps %d, nchan %d, srate %d\n", bps, channels, samplerate));
}
@@ -640,7 +640,7 @@ null_chunk_retry:
// pkt->dts /= nst->sample_size;
nst->frame_offset += asize; // XXX: that's valid only for PCM !?
}
-
+
//pkt->flags |= PKT_FLAG_KEY;
nsv->state = NSV_UNSYNC;
return 0;
@@ -653,13 +653,13 @@ static int nsv_read_packet(AVFormatContext *s, AVPacket *pkt)
int i, err = 0;
PRINT(("%s()\n", __FUNCTION__));
-
+
/* in case we don't already have something to eat ... */
if (nsv->ahead[0].data == NULL && nsv->ahead[1].data == NULL)
err = nsv_read_chunk(s, 0);
if (err < 0)
return err;
-
+
/* now pick one of the plates */
for (i = 0; i < 2; i++) {
if (nsv->ahead[i].data) {
@@ -670,7 +670,7 @@ static int nsv_read_packet(AVFormatContext *s, AVPacket *pkt)
return pkt->size;
}
}
-
+
/* this restaurant is not approvisionned :^] */
return -1;
}
diff --git a/libavformat/nut.c b/libavformat/nut.c
index 4bcfbb9fae..ac696cb628 100644
--- a/libavformat/nut.c
+++ b/libavformat/nut.c
@@ -42,11 +42,11 @@
//from /dev/random
-#define MAIN_STARTCODE (0x7A561F5F04ADULL + (((uint64_t)('N'<<8) + 'M')<<48))
-#define STREAM_STARTCODE (0x11405BF2F9DBULL + (((uint64_t)('N'<<8) + 'S')<<48))
-#define KEYFRAME_STARTCODE (0xE4ADEECA4569ULL + (((uint64_t)('N'<<8) + 'K')<<48))
-#define INDEX_STARTCODE (0xDD672F23E64EULL + (((uint64_t)('N'<<8) + 'X')<<48))
-#define INFO_STARTCODE (0xAB68B596BA78ULL + (((uint64_t)('N'<<8) + 'I')<<48))
+#define MAIN_STARTCODE (0x7A561F5F04ADULL + (((uint64_t)('N'<<8) + 'M')<<48))
+#define STREAM_STARTCODE (0x11405BF2F9DBULL + (((uint64_t)('N'<<8) + 'S')<<48))
+#define KEYFRAME_STARTCODE (0xE4ADEECA4569ULL + (((uint64_t)('N'<<8) + 'K')<<48))
+#define INDEX_STARTCODE (0xDD672F23E64EULL + (((uint64_t)('N'<<8) + 'X')<<48))
+#define INFO_STARTCODE (0xAB68B596BA78ULL + (((uint64_t)('N'<<8) + 'I')<<48))
#define ID_STRING "nut/multimedia container\0"
@@ -115,7 +115,7 @@ void ff_parse_specific_params(AVCodecContext *stream, int *au_rate, int *au_ssiz
static void update(NUTContext *nut, int stream_index, int64_t frame_start, int frame_type, int frame_code, int key_frame, int size, int64_t pts){
StreamContext *stream= &nut->stream[stream_index];
-
+
stream->last_key_frame= key_frame;
nut->packet_start[ frame_type ]= frame_start;
stream->last_pts= pts;
@@ -124,10 +124,10 @@ static void update(NUTContext *nut, int stream_index, int64_t frame_start, int f
static void reset(AVFormatContext *s, int64_t global_ts){
NUTContext *nut = s->priv_data;
int i;
-
+
for(i=0; i<s->nb_streams; i++){
StreamContext *stream= &nut->stream[i];
-
+
stream->last_key_frame= 1;
stream->last_pts= av_rescale(global_ts, stream->rate_num*(int64_t)nut->rate_den, stream->rate_den*(int64_t)nut->rate_num);
@@ -163,7 +163,7 @@ static void build_frame_code(AVFormatContext *s){
for(key_frame=0; key_frame<2; key_frame++){
if(intra_only && keyframe_0_esc && key_frame==0)
continue;
-
+
{
FrameCode *ft= &nut->frame_code[start2];
ft->flags= FLAG_KEY_FRAME*key_frame;
@@ -258,7 +258,7 @@ static uint64_t get_v(ByteIOContext *bc)
static int get_str(ByteIOContext *bc, char *string, unsigned int maxlen){
unsigned int len= get_v(bc);
-
+
if(len && maxlen)
get_buffer(bc, string, FFMIN(len, maxlen));
while(len > maxlen){
@@ -268,7 +268,7 @@ static int get_str(ByteIOContext *bc, char *string, unsigned int maxlen){
if(maxlen)
string[FFMIN(len, maxlen-1)]= 0;
-
+
if(maxlen == len)
return -1;
else
@@ -285,13 +285,13 @@ static int64_t get_s(ByteIOContext *bc){
static uint64_t get_vb(ByteIOContext *bc){
uint64_t val=0;
unsigned int i= get_v(bc);
-
+
if(i>8)
return UINT64_MAX;
-
+
while(i--)
val = (val<<8) + get_byte(bc);
-
+
//av_log(NULL, AV_LOG_DEBUG, "get_vb()= %lld\n", val);
return val;
}
@@ -344,7 +344,7 @@ static int check_checksum(ByteIOContext *bc){
}
/**
- *
+ *
*/
static int get_length(uint64_t val){
int i;
@@ -356,7 +356,7 @@ static int get_length(uint64_t val){
static uint64_t find_any_startcode(ByteIOContext *bc, int64_t pos){
uint64_t state=0;
-
+
if(pos >= 0)
url_fseek(bc, pos, SEEK_SET); //note, this may fail if the stream isnt seekable, but that shouldnt matter, as in this case we simply start where we are currently
@@ -416,7 +416,7 @@ static void put_v(ByteIOContext *bc, uint64_t val)
*/
static void put_str(ByteIOContext *bc, const char *string){
int len= strlen(string);
-
+
put_v(bc, len);
put_buffer(bc, string, len);
}
@@ -428,7 +428,7 @@ static void put_s(ByteIOContext *bc, int64_t val){
static void put_vb(ByteIOContext *bc, uint64_t val){
int i;
-
+
for (i=8; val>>i; i+=8);
put_v(bc, i>>3);
@@ -439,19 +439,19 @@ static void put_vb(ByteIOContext *bc, uint64_t val){
#ifdef TRACE
static inline void put_v_trace(ByteIOContext *bc, uint64_t v, char *file, char *func, int line){
printf("get_v %5lld / %llX in %s %s:%d\n", v, v, file, func, line);
-
+
put_v(bc, v);
}
static inline void put_s_trace(ByteIOContext *bc, int64_t v, char *file, char *func, int line){
printf("get_s %5lld / %llX in %s %s:%d\n", v, v, file, func, line);
-
+
put_s(bc, v);
}
static inline void put_vb_trace(ByteIOContext *bc, uint64_t v, char *file, char *func, int line){
printf("get_vb %5lld / %llX in %s %s:%d\n", v, v, file, func, line);
-
+
put_vb(bc, v);
}
#define put_v(bc, v) put_v_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
@@ -464,7 +464,7 @@ static int put_packetheader(NUTContext *nut, ByteIOContext *bc, int max_size, in
put_flush_packet(bc);
nut->packet_start[2]= url_ftell(bc) - 8;
nut->written_packet_size = max_size;
-
+
/* packet header */
put_v(bc, nut->written_packet_size); /* forward ptr */
@@ -482,15 +482,15 @@ static int update_packetheader(NUTContext *nut, ByteIOContext *bc, int additiona
int64_t start= nut->packet_start[2];
int64_t cur= url_ftell(bc);
int size= cur - start - get_length(nut->written_packet_size)/7 - 8;
-
+
if(calculate_checksum)
size += 4;
-
+
if(size != nut->written_packet_size){
int i;
assert( size <= nut->written_packet_size );
-
+
url_fseek(bc, start + 8, SEEK_SET);
for(i=get_length(size); i < get_length(nut->written_packet_size); i+=7)
put_byte(bc, 0x80);
@@ -498,11 +498,11 @@ static int update_packetheader(NUTContext *nut, ByteIOContext *bc, int additiona
url_fseek(bc, cur, SEEK_SET);
nut->written_packet_size= size; //FIXME may fail if multiple updates with differing sizes, as get_length may differ
-
+
if(calculate_checksum)
put_be32(bc, get_checksum(bc));
}
-
+
return 0;
}
@@ -514,15 +514,15 @@ static int nut_write_header(AVFormatContext *s)
int i, j, tmp_time, tmp_flags,tmp_stream, tmp_mul, tmp_size, tmp_fields;
nut->avf= s;
-
- nut->stream =
+
+ nut->stream =
av_mallocz(sizeof(StreamContext)*s->nb_streams);
-
+
put_buffer(bc, ID_STRING, strlen(ID_STRING));
put_byte(bc, 0);
nut->packet_start[2]= url_ftell(bc);
-
+
/* main header */
put_be64(bc, MAIN_STARTCODE);
put_packetheader(nut, bc, 120+5*256, 1);
@@ -530,14 +530,14 @@ static int nut_write_header(AVFormatContext *s)
put_v(bc, s->nb_streams);
put_v(bc, MAX_DISTANCE);
put_v(bc, MAX_SHORT_DISTANCE);
-
+
put_v(bc, nut->rate_num=1);
put_v(bc, nut->rate_den=2);
put_v(bc, nut->short_startcode=0x4EFE79);
-
+
build_frame_code(s);
assert(nut->frame_code['N'].flags == FLAG_INVALID);
-
+
tmp_time= tmp_flags= tmp_stream= tmp_mul= tmp_size= /*tmp_res=*/ INT_MAX;
for(i=0; i<256;){
tmp_fields=0;
@@ -554,7 +554,7 @@ static int nut_write_header(AVFormatContext *s)
tmp_mul = nut->frame_code[i].size_mul;
tmp_size = nut->frame_code[i].size_lsb;
// tmp_res = nut->frame_code[i].res;
-
+
for(j=0; i<256; j++,i++){
if(nut->frame_code[i].timestamp_delta != tmp_time ) break;
if(nut->frame_code[i].flags != tmp_flags ) break;
@@ -576,14 +576,14 @@ static int nut_write_header(AVFormatContext *s)
}
update_packetheader(nut, bc, 0, 1);
-
+
/* stream headers */
for (i = 0; i < s->nb_streams; i++)
{
int nom, denom, ssize;
codec = s->streams[i]->codec;
-
+
put_be64(bc, STREAM_STARTCODE);
put_packetheader(nut, bc, 120 + codec->extradata_size, 1);
put_v(bc, i /*s->streams[i]->index*/);
@@ -606,7 +606,7 @@ static int nut_write_header(AVFormatContext *s)
}
else
put_vb(bc, 0);
-
+
ff_parse_specific_params(codec, &nom, &ssize, &denom);
nut->stream[i].rate_num= nom;
@@ -624,14 +624,14 @@ static int nut_write_header(AVFormatContext *s)
put_v(bc, nut->stream[i].msb_timestamp_shift);
put_v(bc, codec->has_b_frames);
put_byte(bc, 0); /* flags: 0x1 - fixed_fps, 0x2 - index_present */
-
+
if(codec->extradata_size){
put_v(bc, 1);
put_v(bc, codec->extradata_size);
- put_buffer(bc, codec->extradata, codec->extradata_size);
+ put_buffer(bc, codec->extradata, codec->extradata_size);
}
put_v(bc, 0); /* end of codec specific headers */
-
+
switch(codec->codec_type)
{
case CODEC_TYPE_AUDIO:
@@ -655,7 +655,7 @@ static int nut_write_header(AVFormatContext *s)
/* info header */
put_be64(bc, INFO_STARTCODE);
put_packetheader(nut, bc, 30+strlen(s->author)+strlen(s->title)+
- strlen(s->comment)+strlen(s->copyright)+strlen(LIBAVFORMAT_IDENT), 1);
+ strlen(s->comment)+strlen(s->copyright)+strlen(LIBAVFORMAT_IDENT), 1);
if (s->author[0])
{
put_v(bc, 9); /* type */
@@ -681,12 +681,12 @@ static int nut_write_header(AVFormatContext *s)
put_v(bc, 13); /* type */
put_str(bc, LIBAVFORMAT_IDENT);
}
-
+
put_v(bc, 0); /* eof info */
update_packetheader(nut, bc, 0, 1);
-
+
put_flush_packet(bc);
-
+
return 0;
}
@@ -712,7 +712,7 @@ static int nut_write_packet(AVFormatContext *s, AVPacket *pkt)
enc = s->streams[stream_index]->codec;
key_frame = !!(pkt->flags & PKT_FLAG_KEY);
-
+
frame_type=0;
if(frame_start + size + 20 - FFMAX(nut->packet_start[1], nut->packet_start[2]) > MAX_DISTANCE)
frame_type=2;
@@ -745,7 +745,7 @@ static int nut_write_packet(AVFormatContext *s, AVPacket *pkt)
flags= nut->frame_code[i].flags;
assert(size_mul > size_lsb);
-
+
if(stream_id_plus1 == 0) length+= get_length(stream_index);
else if(stream_id_plus1 - 1 != stream_index)
continue;
@@ -764,7 +764,7 @@ static int nut_write_packet(AVFormatContext *s, AVPacket *pkt)
if(full_pts && time_delta)
continue;
-
+
if(!time_delta){
length += get_length(coded_pts);
}else{
@@ -809,11 +809,11 @@ static int nut_write_packet(AVFormatContext *s, AVPacket *pkt)
if(size > MAX_DISTANCE){
assert(frame_type > 1);
}
-
+
put_buffer(bc, pkt->data, size);
update(nut, stream_index, frame_start, frame_type, frame_code, key_frame, size, pts);
-
+
return 0;
}
@@ -838,7 +838,7 @@ static int nut_write_trailer(AVFormatContext *s)
#endif
put_flush_packet(bc);
-
+
av_freep(&nut->stream);
return 0;
@@ -863,7 +863,7 @@ static int decode_main_header(NUTContext *nut){
ByteIOContext *bc = &s->pb;
uint64_t tmp;
int i, j, tmp_stream, tmp_mul, tmp_time, tmp_size, count, tmp_res;
-
+
get_packetheader(nut, bc, 1);
tmp = get_v(bc);
@@ -871,7 +871,7 @@ static int decode_main_header(NUTContext *nut){
av_log(s, AV_LOG_ERROR, "bad version (%"PRId64")\n", tmp);
return -1;
}
-
+
nut->stream_count = get_v(bc);
if(nut->stream_count > MAX_STREAMS){
av_log(s, AV_LOG_ERROR, "too many streams\n");
@@ -886,7 +886,7 @@ static int decode_main_header(NUTContext *nut){
av_log(s, AV_LOG_ERROR, "invalid short startcode %X\n", nut->short_startcode);
return -1;
}
-
+
for(i=0; i<256;){
int tmp_flags = get_v(bc);
int tmp_fields= get_v(bc);
@@ -899,10 +899,10 @@ static int decode_main_header(NUTContext *nut){
else tmp_res = 0;
if(tmp_fields>5) count = get_v(bc);
else count = tmp_mul - tmp_size;
-
- while(tmp_fields-- > 6)
+
+ while(tmp_fields-- > 6)
get_v(bc);
-
+
if(count == 0 || i+count > 256){
av_log(s, AV_LOG_ERROR, "illegal count %d at %d\n", count, i);
return -1;
@@ -940,12 +940,12 @@ static int decode_stream_header(NUTContext *nut){
int class, nom, denom, stream_id;
uint64_t tmp;
AVStream *st;
-
+
get_packetheader(nut, bc, 1);
stream_id= get_v(bc);
if(stream_id >= nut->stream_count || s->streams[stream_id])
return -1;
-
+
st = av_new_stream(s, stream_id);
if (!st)
return AVERROR_NOMEM;
@@ -993,10 +993,10 @@ static int decode_stream_header(NUTContext *nut){
if((unsigned)st->codec->extradata_size > (1<<30))
return -1;
st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
- get_buffer(bc, st->codec->extradata, st->codec->extradata_size);
+ get_buffer(bc, st->codec->extradata, st->codec->extradata_size);
// url_fskip(bc, get_v(bc));
}
-
+
if (st->codec->codec_type == CODEC_TYPE_VIDEO) /* VIDEO */
{
st->codec->width = get_v(bc);
@@ -1024,7 +1024,7 @@ static int decode_stream_header(NUTContext *nut){
static int decode_info_header(NUTContext *nut){
AVFormatContext *s= nut->avf;
ByteIOContext *bc = &s->pb;
-
+
get_packetheader(nut, bc, 1);
for(;;){
@@ -1050,7 +1050,7 @@ static int decode_info_header(NUTContext *nut){
get_str(bc, custom_name, sizeof(custom_name));
name= custom_name;
}
-
+
if(!strcmp(type, "v")){
get_v(bc);
}else{
@@ -1081,7 +1081,7 @@ static int nut_read_header(AVFormatContext *s, AVFormatParameters *ap)
int inited_stream_count;
nut->avf= s;
-
+
/* main header */
pos=0;
for(;;){
@@ -1093,8 +1093,8 @@ static int nut_read_header(AVFormatContext *s, AVFormatParameters *ap)
if(decode_main_header(nut) >= 0)
break;
}
-
-
+
+
s->bit_rate = 0;
nut->stream = av_malloc(sizeof(StreamContext)*nut->stream_count);
@@ -1147,13 +1147,13 @@ static int decode_frame_header(NUTContext *nut, int *key_frame_ret, int64_t *pts
if(frame_type)
nut->packet_start[ frame_type ]= frame_start; //otherwise 1 goto 1 may happen
-
+
flags= nut->frame_code[frame_code].flags;
size_mul= nut->frame_code[frame_code].size_mul;
size_lsb= nut->frame_code[frame_code].size_lsb;
stream_id= nut->frame_code[frame_code].stream_id_plus1 - 1;
time_delta= nut->frame_code[frame_code].timestamp_delta;
-
+
if(stream_id==-1)
stream_id= get_v(bc);
if(stream_id >= s->nb_streams){
@@ -1189,9 +1189,9 @@ static int decode_frame_header(NUTContext *nut, int *key_frame_ret, int64_t *pts
if(*key_frame_ret){
// av_log(s, AV_LOG_DEBUG, "stream:%d start:%lld pts:%lld length:%lld\n",stream_id, frame_start, av_pts, frame_start - nut->stream[stream_id].last_sync_pos);
av_add_index_entry(
- s->streams[stream_id],
- frame_start,
- pts,
+ s->streams[stream_id],
+ frame_start,
+ pts,
frame_start - nut->stream[stream_id].last_sync_pos,
AVINDEX_KEYFRAME);
nut->stream[stream_id].last_sync_pos= frame_start;
@@ -1202,7 +1202,7 @@ static int decode_frame_header(NUTContext *nut, int *key_frame_ret, int64_t *pts
size= size_lsb;
if(flags & FLAG_DATA_SIZE)
size+= size_mul*get_v(bc);
-
+
#ifdef TRACE
av_log(s, AV_LOG_DEBUG, "fs:%lld fc:%d ft:%d kf:%d pts:%lld size:%d mul:%d lsb:%d flags:%d delta:%d\n", frame_start, frame_code, frame_type, *key_frame_ret, pts, size, size_mul, size_lsb, flags, time_delta);
#endif
@@ -1211,7 +1211,7 @@ av_log(s, AV_LOG_DEBUG, "fs:%lld fc:%d ft:%d kf:%d pts:%lld size:%d mul:%d lsb:%
av_log(s, AV_LOG_ERROR, "frame size too large\n");
return -1;
}
-
+
*stream_id_ret = stream_id;
*pts_ret = pts;
@@ -1225,7 +1225,7 @@ static int decode_frame(NUTContext *nut, AVPacket *pkt, int frame_code, int fram
ByteIOContext *bc = &s->pb;
int size, stream_id, key_frame, discard;
int64_t pts, last_IP_pts;
-
+
size= decode_frame_header(nut, &key_frame, &pts, &stream_id, frame_code, frame_type, frame_start);
if(size < 0)
return -1;
@@ -1336,12 +1336,12 @@ av_log(s, AV_LOG_DEBUG, "read_timestamp(X,%d,%"PRId64",%"PRId64")\n", stream_ind
url_fseek(bc, -8, SEEK_CUR);
for(i=0; i<s->nb_streams; i++)
nut->stream[i].last_sync_pos= url_ftell(bc);
-
+
for(;;){
int frame_type=0;
int64_t pos= url_ftell(bc);
uint64_t tmp=0;
-
+
if(pos > pos_limit || url_feof(bc))
return AV_NOPTS_VALUE;
@@ -1370,13 +1370,13 @@ av_log(s, AV_LOG_DEBUG, "read_timestamp(X,%d,%"PRId64",%"PRId64")\n", stream_ind
size= decode_frame_header(nut, &key_frame, &pts, &stream_id, frame_code, frame_type, pos);
if(size < 0)
goto resync;
-
+
stream= &nut->stream[stream_id];
if(stream_id != stream_index || !key_frame || pos < *pos_arg){
url_fseek(bc, size, SEEK_CUR);
break;
}
-
+
*pos_arg= pos;
return pts;
default:
diff --git a/libavformat/ogg.c b/libavformat/ogg.c
index 53c8b05826..68d678ba5f 100644
--- a/libavformat/ogg.c
+++ b/libavformat/ogg.c
@@ -30,14 +30,14 @@ typedef struct OggContext {
#ifdef CONFIG_MUXERS
-static int ogg_write_header(AVFormatContext *avfcontext)
+static int ogg_write_header(AVFormatContext *avfcontext)
{
OggContext *context = avfcontext->priv_data;
- ogg_packet *op= &context->op;
+ ogg_packet *op= &context->op;
int n;
ogg_stream_init(&context->os, 31415);
-
+
for(n = 0 ; n < avfcontext->nb_streams ; n++) {
AVCodecContext *codec = avfcontext->streams[n]->codec;
uint8_t *headers = codec->extradata;
@@ -45,7 +45,7 @@ static int ogg_write_header(AVFormatContext *avfcontext)
uint8_t *header_start[3];
int header_len[3];
int i, j;
-
+
av_set_pts_info(avfcontext->streams[n], 60, 1, AV_TIME_BASE);
for(j=1,i=0;i<2;++i, ++j) {
@@ -75,7 +75,7 @@ static int ogg_write_header(AVFormatContext *avfcontext)
context->header_handled = 0 ;
}
-
+
return 0 ;
}
@@ -108,14 +108,14 @@ static int ogg_write_packet(AVFormatContext *avfcontext, AVPacket *pkt)
op->granulepos= pts;
/* correct the fields in the packet -- essential for streaming */
-
- ogg_stream_packetin(&context->os, op);
-
+
+ ogg_stream_packetin(&context->os, op);
+
while(ogg_stream_pageout(&context->os, &og)) {
put_buffer(&avfcontext->pb, og.header, og.header_len);
- put_buffer(&avfcontext->pb, og.body, og.body_len);
+ put_buffer(&avfcontext->pb, og.body, og.body_len);
put_flush_packet(&avfcontext->pb);
- }
+ }
op->packetno++;
return 0;
@@ -164,9 +164,9 @@ static int next_packet(AVFormatContext *avfcontext, ogg_packet *op) {
buf = ogg_sync_buffer(&context->oy, DECODER_BUFFER_SIZE) ;
if(get_buffer(&avfcontext->pb, buf, DECODER_BUFFER_SIZE) <= 0)
return 1 ;
- ogg_sync_wrote(&context->oy, DECODER_BUFFER_SIZE) ;
- }
-
+ ogg_sync_wrote(&context->oy, DECODER_BUFFER_SIZE) ;
+ }
+
/* got a page. Feed it into the stream and get the packet */
if(ogg_stream_pagein(&context->os, &og) != 0)
return 1 ;
@@ -179,25 +179,25 @@ static int next_packet(AVFormatContext *avfcontext, ogg_packet *op) {
static int ogg_read_header(AVFormatContext *avfcontext, AVFormatParameters *ap)
{
OggContext *context = avfcontext->priv_data;
- ogg_packet op ;
+ ogg_packet op ;
char *buf ;
ogg_page og ;
AVStream *ast ;
AVCodecContext *codec;
uint8_t *p;
int i;
-
+
ogg_sync_init(&context->oy) ;
buf = ogg_sync_buffer(&context->oy, DECODER_BUFFER_SIZE) ;
if(get_buffer(&avfcontext->pb, buf, DECODER_BUFFER_SIZE) <= 0)
return AVERROR_IO ;
-
- ogg_sync_wrote(&context->oy, DECODER_BUFFER_SIZE) ;
+
+ ogg_sync_wrote(&context->oy, DECODER_BUFFER_SIZE) ;
ogg_sync_pageout(&context->oy, &og) ;
ogg_stream_init(&context->os, ogg_page_serialno(&og)) ;
ogg_stream_pagein(&context->os, &og) ;
-
+
/* currently only one vorbis stream supported */
ast = av_new_stream(avfcontext, 0) ;
@@ -230,7 +230,7 @@ static int ogg_read_header(AVFormatContext *avfcontext, AVFormatParameters *ap)
static int ogg_read_packet(AVFormatContext *avfcontext, AVPacket *pkt) {
ogg_packet op ;
- if(next_packet(avfcontext, &op))
+ if(next_packet(avfcontext, &op))
return AVERROR_IO ;
if(av_new_packet(pkt, op.bytes) < 0)
return AVERROR_IO ;
diff --git a/libavformat/ogg2.c b/libavformat/ogg2.c
index 8e00946826..c3783c8412 100644
--- a/libavformat/ogg2.c
+++ b/libavformat/ogg2.c
@@ -2,7 +2,7 @@
* Ogg bitstream support
* Luca Barbato <lu_zero@gentoo.org>
* Based on tcvp implementation
- *
+ *
*/
/**
@@ -503,7 +503,7 @@ ogg_read_packet (AVFormatContext * s, AVPacket * pkt)
ogg_stream_t *os;
int idx = -1;
- //Get an ogg packet
+ //Get an ogg packet
do{
if (ogg_packet (s, &idx) < 0)
return AVERROR_IO;
@@ -643,7 +643,7 @@ static AVInputFormat ogg_iformat = {
ogg_read_packet,
ogg_read_close,
ogg_read_seek,
-// ogg_read_timestamp,
+// ogg_read_timestamp,
.extensions = "ogg",
};
diff --git a/libavformat/oggparseflac.c b/libavformat/oggparseflac.c
index dc80065cfd..efebb51891 100644
--- a/libavformat/oggparseflac.c
+++ b/libavformat/oggparseflac.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2005 Matthieu CASTET
- *
+ *
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
@@ -45,7 +45,7 @@ flac_header (AVFormatContext * s, int idx)
return -1;
skip_bits(&gb, 8 + 16); /* minor version + header count */
skip_bits(&gb, 4*8); /* "fLaC" */
-
+
/* METADATA_BLOCK_HEADER */
if (get_bits(&gb, 32) != FLAC_STREAMINFO_SIZE)
return -1;
@@ -54,7 +54,7 @@ flac_header (AVFormatContext * s, int idx)
st->codec->sample_rate = get_bits_long(&gb, 20);
st->codec->channels = get_bits(&gb, 3) + 1;
-
+
st->codec->codec_type = CODEC_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_FLAC;
diff --git a/libavformat/oggparsetheora.c b/libavformat/oggparsetheora.c
index 299eb4e506..56e7a1c06c 100644
--- a/libavformat/oggparsetheora.c
+++ b/libavformat/oggparsetheora.c
@@ -68,7 +68,7 @@ theora_header (AVFormatContext * s, int idx)
skip_bits(&gb, 64);
st->codec->time_base.den = get_bits(&gb, 32);
st->codec->time_base.num = get_bits(&gb, 32);
-
+
st->codec->sample_aspect_ratio.num = get_bits(&gb, 24);
st->codec->sample_aspect_ratio.den = get_bits(&gb, 24);
diff --git a/libavformat/oggparsevorbis.c b/libavformat/oggparsevorbis.c
index f9f53fd552..202601fa9e 100644
--- a/libavformat/oggparsevorbis.c
+++ b/libavformat/oggparsevorbis.c
@@ -86,7 +86,7 @@ vorbis_comment (AVFormatContext * as, char *buf, int size)
memcpy (ct, v, vl);
ct[vl] = 0;
- // took from Vorbis_I_spec
+ // took from Vorbis_I_spec
if (!strcmp (tt, "AUTHOR"))
strncpy (as->author, ct, FFMIN(sizeof (as->author), vl));
else if (!strcmp (tt, "TITLE"))
@@ -117,7 +117,7 @@ vorbis_comment (AVFormatContext * as, char *buf, int size)
* Vorbis Identification header from Vorbis_I_spec.html#vorbis-spec-codec
* [vorbis_version] = read 32 bits as unsigned integer | Not used
* [audio_channels] = read 8 bit integer as unsigned | Used
- * [audio_sample_rate] = read 32 bits as unsigned integer | Used
+ * [audio_sample_rate] = read 32 bits as unsigned integer | Used
* [bitrate_maximum] = read 32 bits as signed integer | Not used yet
* [bitrate_nominal] = read 32 bits as signed integer | Not used yet
* [bitrate_minimum] = read 32 bits as signed integer | Used as bitrate
diff --git a/libavformat/os_support.c b/libavformat/os_support.c
index 16b7d5dce2..2d387d072d 100644
--- a/libavformat/os_support.c
+++ b/libavformat/os_support.c
@@ -51,7 +51,7 @@ int64_t av_gettime(void)
struct tm *localtime_r(const time_t *t, struct tm *tp)
{
struct tm *l;
-
+
l = localtime(t);
if (!l)
return 0;
diff --git a/libavformat/png.c b/libavformat/png.c
index 179f9c7f4f..e01a103644 100644
--- a/libavformat/png.c
+++ b/libavformat/png.c
@@ -64,7 +64,7 @@ typedef struct PNGDecodeState {
int channels;
int bits_per_pixel;
int bpp;
-
+
uint8_t *image_buf;
int image_linesize;
uint32_t palette[256];
@@ -107,7 +107,7 @@ static const uint8_t png_pass_mask[NB_PASSES] = {
};
/* Mask to determine which pixels to overwrite while displaying */
-static const uint8_t png_pass_dsp_mask[NB_PASSES] = {
+static const uint8_t png_pass_dsp_mask[NB_PASSES] = {
0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff
};
@@ -158,14 +158,14 @@ static int png_pass_row_size(int pass, int bits_per_pixel, int width)
/* NOTE: we try to construct a good looking image at each pass. width
is the original image width. We also do pixel format convertion at
this stage */
-static void png_put_interlaced_row(uint8_t *dst, int width,
- int bits_per_pixel, int pass,
+static void png_put_interlaced_row(uint8_t *dst, int width,
+ int bits_per_pixel, int pass,
int color_type, const uint8_t *src)
{
int x, mask, dsp_mask, j, src_x, b, bpp;
uint8_t *d;
const uint8_t *s;
-
+
mask = png_pass_mask[pass];
dsp_mask = png_pass_dsp_mask[pass];
switch(bits_per_pixel) {
@@ -213,8 +213,8 @@ static void png_put_interlaced_row(uint8_t *dst, int width,
}
}
-static void png_get_interlaced_row(uint8_t *dst, int row_size,
- int bits_per_pixel, int pass,
+static void png_get_interlaced_row(uint8_t *dst, int row_size,
+ int bits_per_pixel, int pass,
const uint8_t *src, int width)
{
int x, mask, dst_x, j, b, bpp;
@@ -253,7 +253,7 @@ static void png_get_interlaced_row(uint8_t *dst, int row_size,
/* XXX: optimize */
/* NOTE: 'dst' can be equal to 'last' */
-static void png_filter_row(uint8_t *dst, int filter_type,
+static void png_filter_row(uint8_t *dst, int filter_type,
uint8_t *src, uint8_t *last, int size, int bpp)
{
int i, p;
@@ -323,7 +323,7 @@ static void convert_from_rgba32(uint8_t *dst, const uint8_t *src, int width)
uint8_t *d;
int j;
unsigned int v;
-
+
d = dst;
for(j = 0; j < width; j++) {
v = ((uint32_t *)src)[j];
@@ -356,12 +356,12 @@ static void png_handle_row(PNGDecodeState *s)
{
uint8_t *ptr, *last_row;
int got_line;
-
+
if (!s->interlace_type) {
ptr = s->image_buf + s->image_linesize * s->y;
/* need to swap bytes correctly for RGB_ALPHA */
if (s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
- png_filter_row(s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
+ png_filter_row(s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
s->last_row, s->row_size, s->bpp);
memcpy(s->last_row, s->tmp_row, s->row_size);
convert_to_rgba32(ptr, s->tmp_row, s->width);
@@ -371,8 +371,8 @@ static void png_handle_row(PNGDecodeState *s)
last_row = s->last_row;
else
last_row = ptr - s->image_linesize;
-
- png_filter_row(ptr, s->crow_buf[0], s->crow_buf + 1,
+
+ png_filter_row(ptr, s->crow_buf[0], s->crow_buf + 1,
last_row, s->row_size, s->bpp);
}
s->y++;
@@ -388,14 +388,14 @@ static void png_handle_row(PNGDecodeState *s)
wait for the next one */
if (got_line)
break;
- png_filter_row(s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
+ png_filter_row(s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
s->last_row, s->pass_row_size, s->bpp);
memcpy(s->last_row, s->tmp_row, s->pass_row_size);
got_line = 1;
}
if ((png_pass_dsp_ymask[s->pass] << (s->y & 7)) & 0x80) {
/* NOTE: rgba32 is handled directly in png_put_interlaced_row */
- png_put_interlaced_row(ptr, s->width, s->bits_per_pixel, s->pass,
+ png_put_interlaced_row(ptr, s->width, s->bits_per_pixel, s->pass,
s->color_type, s->last_row);
}
s->y++;
@@ -407,8 +407,8 @@ static void png_handle_row(PNGDecodeState *s)
} else {
s->pass++;
s->y = 0;
- s->pass_row_size = png_pass_row_size(s->pass,
- s->bits_per_pixel,
+ s->pass_row_size = png_pass_row_size(s->pass,
+ s->bits_per_pixel,
s->width);
s->crow_size = s->pass_row_size + 1;
if (s->pass_row_size != 0)
@@ -456,7 +456,7 @@ static int png_decode_idat(PNGDecodeState *s, ByteIOContext *f, int length)
return 0;
}
-static int png_read(ByteIOContext *f,
+static int png_read(ByteIOContext *f,
int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
{
AVImageInfo info1, *info = &info1;
@@ -487,7 +487,7 @@ static int png_read(ByteIOContext *f,
goto fail;
tag = get_le32(f);
#ifdef DEBUG
- printf("png: tag=%c%c%c%c length=%u\n",
+ printf("png: tag=%c%c%c%c length=%u\n",
(tag & 0xff),
((tag >> 8) & 0xff),
((tag >> 16) & 0xff),
@@ -507,8 +507,8 @@ static int png_read(ByteIOContext *f,
crc = get_be32(f);
s->state |= PNG_IHDR;
#ifdef DEBUG
- printf("width=%d height=%d depth=%d color_type=%d compression_type=%d filter_type=%d interlace_type=%d\n",
- s->width, s->height, s->bit_depth, s->color_type,
+ printf("width=%d height=%d depth=%d color_type=%d compression_type=%d filter_type=%d interlace_type=%d\n",
+ s->width, s->height, s->bit_depth, s->color_type,
s->compression_type, s->filter_type, s->interlace_type);
#endif
break;
@@ -526,16 +526,16 @@ static int png_read(ByteIOContext *f,
s->bpp = (s->bits_per_pixel + 7) >> 3;
s->row_size = (info->width * s->bits_per_pixel + 7) >> 3;
- if (s->bit_depth == 8 &&
+ if (s->bit_depth == 8 &&
s->color_type == PNG_COLOR_TYPE_RGB) {
info->pix_fmt = PIX_FMT_RGB24;
- } else if (s->bit_depth == 8 &&
+ } else if (s->bit_depth == 8 &&
s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
info->pix_fmt = PIX_FMT_RGBA32;
- } else if (s->bit_depth == 8 &&
+ } else if (s->bit_depth == 8 &&
s->color_type == PNG_COLOR_TYPE_GRAY) {
info->pix_fmt = PIX_FMT_GRAY8;
- } else if (s->bit_depth == 1 &&
+ } else if (s->bit_depth == 1 &&
s->color_type == PNG_COLOR_TYPE_GRAY) {
info->pix_fmt = PIX_FMT_MONOBLACK;
} else if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
@@ -544,7 +544,7 @@ static int png_read(ByteIOContext *f,
goto fail;
}
ret = alloc_cb(opaque, info);
- if (ret)
+ if (ret)
goto the_end;
/* compute the compressed row size */
@@ -552,13 +552,13 @@ static int png_read(ByteIOContext *f,
s->crow_size = s->row_size + 1;
} else {
s->pass = 0;
- s->pass_row_size = png_pass_row_size(s->pass,
- s->bits_per_pixel,
+ s->pass_row_size = png_pass_row_size(s->pass,
+ s->bits_per_pixel,
s->width);
s->crow_size = s->pass_row_size + 1;
}
#ifdef DEBUG
- printf("row_size=%d crow_size =%d\n",
+ printf("row_size=%d crow_size =%d\n",
s->row_size, s->crow_size);
#endif
s->image_buf = info->pict.data[0];
@@ -592,7 +592,7 @@ static int png_read(ByteIOContext *f,
case MKTAG('P', 'L', 'T', 'E'):
{
int n, i, r, g, b;
-
+
if ((length % 3) != 0 || length > 256 * 3)
goto skip_tag;
/* read the palette */
@@ -716,7 +716,7 @@ static int png_write(ByteIOContext *f, AVImageInfo *info)
uint8_t *ptr;
uint8_t *crow_buf = NULL;
uint8_t *tmp_buf = NULL;
-
+
s->f = f;
is_progressive = info->interleaved;
switch(info->pix_fmt) {
@@ -764,7 +764,7 @@ static int png_write(ByteIOContext *f, AVImageInfo *info)
/* write png header */
put_buffer(f, pngsig, 8);
-
+
to_be32(s->buf, info->width);
to_be32(s->buf + 4, info->height);
s->buf[8] = bit_depth;
@@ -772,7 +772,7 @@ static int png_write(ByteIOContext *f, AVImageInfo *info)
s->buf[10] = 0; /* compression type */
s->buf[11] = 0; /* filter type */
s->buf[12] = is_progressive; /* interlace type */
-
+
png_write_chunk(f, MKTAG('I', 'H', 'D', 'R'), s->buf, 13);
/* put the palette if needed */
@@ -781,7 +781,7 @@ static int png_write(ByteIOContext *f, AVImageInfo *info)
unsigned int v;
uint32_t *palette;
uint8_t *alpha_ptr;
-
+
palette = (uint32_t *)info->pict.data[1];
ptr = s->buf;
alpha_ptr = s->buf + 256 * 3;
@@ -824,8 +824,8 @@ static int png_write(ByteIOContext *f, AVImageInfo *info)
} else {
ptr1 = ptr;
}
- png_get_interlaced_row(crow_buf + 1, pass_row_size,
- bits_per_pixel, pass,
+ png_get_interlaced_row(crow_buf + 1, pass_row_size,
+ bits_per_pixel, pass,
ptr1, info->width);
crow_buf[0] = PNG_FILTER_VALUE_NONE;
png_write_row(s, crow_buf, pass_row_size + 1);
@@ -879,7 +879,7 @@ AVImageFormat png_image_format = {
"png",
png_probe,
png_read,
- (1 << PIX_FMT_RGBA32) | (1 << PIX_FMT_RGB24) | (1 << PIX_FMT_GRAY8) |
+ (1 << PIX_FMT_RGBA32) | (1 << PIX_FMT_RGB24) | (1 << PIX_FMT_GRAY8) |
(1 << PIX_FMT_MONOBLACK) | (1 << PIX_FMT_PAL8),
png_write,
AVIMAGE_INTERLEAVED,
diff --git a/libavformat/pnm.c b/libavformat/pnm.c
index fb4d3d2ad3..2331bcdc42 100644
--- a/libavformat/pnm.c
+++ b/libavformat/pnm.c
@@ -18,16 +18,16 @@
*/
#include "avformat.h"
-static inline int pnm_space(int c)
+static inline int pnm_space(int c)
{
return (c == ' ' || c == '\n' || c == '\r' || c == '\t');
}
-static void pnm_get(ByteIOContext *f, char *str, int buf_size)
+static void pnm_get(ByteIOContext *f, char *str, int buf_size)
{
char *s;
int c;
-
+
/* skip spaces and comments */
for(;;) {
c = url_fgetc(f);
@@ -39,7 +39,7 @@ static void pnm_get(ByteIOContext *f, char *str, int buf_size)
break;
}
}
-
+
s = str;
while (c != URL_EOF && !pnm_space(c)) {
if ((s - str) < buf_size - 1)
@@ -49,7 +49,7 @@ static void pnm_get(ByteIOContext *f, char *str, int buf_size)
*s = '\0';
}
-static int pnm_read1(ByteIOContext *f,
+static int pnm_read1(ByteIOContext *f,
int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque,
int allow_yuv)
{
@@ -63,7 +63,7 @@ static int pnm_read1(ByteIOContext *f,
if (!strcmp(buf1, "P4")) {
info->pix_fmt = PIX_FMT_MONOWHITE;
} else if (!strcmp(buf1, "P5")) {
- if (allow_yuv)
+ if (allow_yuv)
info->pix_fmt = PIX_FMT_YUV420P;
else
info->pix_fmt = PIX_FMT_GRAY8;
@@ -94,11 +94,11 @@ static int pnm_read1(ByteIOContext *f,
h /= 3;
info->height = h;
}
-
+
ret = alloc_cb(opaque, info);
if (ret)
return ret;
-
+
switch(info->pix_fmt) {
default:
return AVERROR_INVALIDDATA;
@@ -145,13 +145,13 @@ static int pnm_read1(ByteIOContext *f,
return 0;
}
-static int pnm_read(ByteIOContext *f,
+static int pnm_read(ByteIOContext *f,
int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
{
return pnm_read1(f, alloc_cb, opaque, 0);
}
-static int pgmyuv_read(ByteIOContext *f,
+static int pgmyuv_read(ByteIOContext *f,
int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
{
return pnm_read1(f, alloc_cb, opaque, 1);
@@ -186,23 +186,23 @@ static int pnm_write(ByteIOContext *pb, AVImageInfo *info)
default:
return AVERROR_INVALIDDATA;
}
- snprintf(buf, sizeof(buf),
+ snprintf(buf, sizeof(buf),
"P%c\n%d %d\n",
c, info->width, h1);
put_buffer(pb, buf, strlen(buf));
if (info->pix_fmt != PIX_FMT_MONOWHITE) {
- snprintf(buf, sizeof(buf),
+ snprintf(buf, sizeof(buf),
"%d\n", 255);
put_buffer(pb, buf, strlen(buf));
}
-
+
ptr = info->pict.data[0];
linesize = info->pict.linesize[0];
for(i=0;i<h;i++) {
put_buffer(pb, ptr, n);
ptr += linesize;
}
-
+
if (info->pix_fmt == PIX_FMT_YUV420P) {
h >>= 1;
n >>= 1;
@@ -219,7 +219,7 @@ static int pnm_write(ByteIOContext *pb, AVImageInfo *info)
return 0;
}
-static int pam_read(ByteIOContext *f,
+static int pam_read(ByteIOContext *f,
int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
{
int i, n, linesize, h, w, depth, maxval;
@@ -267,7 +267,7 @@ static int pam_read(ByteIOContext *f,
if (depth == 1) {
if (maxval == 1)
info->pix_fmt = PIX_FMT_MONOWHITE;
- else
+ else
info->pix_fmt = PIX_FMT_GRAY8;
} else if (depth == 3) {
info->pix_fmt = PIX_FMT_RGB24;
@@ -279,7 +279,7 @@ static int pam_read(ByteIOContext *f,
ret = alloc_cb(opaque, info);
if (ret)
return ret;
-
+
switch(info->pix_fmt) {
default:
return AVERROR_INVALIDDATA;
@@ -356,14 +356,14 @@ static int pam_write(ByteIOContext *pb, AVImageInfo *info)
default:
return AVERROR_INVALIDDATA;
}
- snprintf(buf, sizeof(buf),
+ snprintf(buf, sizeof(buf),
"P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLETYPE %s\nENDHDR\n",
w, h, depth, maxval, tuple_type);
put_buffer(pb, buf, strlen(buf));
-
+
ptr = info->pict.data[0];
linesize = info->pict.linesize[0];
-
+
if (info->pix_fmt == PIX_FMT_RGBA32) {
int j;
unsigned int v;
@@ -461,7 +461,7 @@ AVImageFormat pam_image_format = {
"pam",
pam_probe,
pam_read,
- (1 << PIX_FMT_MONOWHITE) | (1 << PIX_FMT_GRAY8) | (1 << PIX_FMT_RGB24) |
+ (1 << PIX_FMT_MONOWHITE) | (1 << PIX_FMT_GRAY8) | (1 << PIX_FMT_RGB24) |
(1 << PIX_FMT_RGBA32),
pam_write,
};
diff --git a/libavformat/psxstr.c b/libavformat/psxstr.c
index 74d4646494..952a6a35e1 100644
--- a/libavformat/psxstr.c
+++ b/libavformat/psxstr.c
@@ -182,7 +182,7 @@ static int str_read_header(AVFormatContext *s,
str->channels[channel].video_stream_index = st->index;
st->codec->codec_type = CODEC_TYPE_VIDEO;
- st->codec->codec_id = CODEC_ID_MDEC;
+ st->codec->codec_id = CODEC_ID_MDEC;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = str->channels[channel].width;
st->codec->height = str->channels[channel].height;
@@ -195,11 +195,11 @@ static int str_read_header(AVFormatContext *s,
int fmt;
str->audio_channel = channel;
str->channels[channel].type = STR_AUDIO;
- str->channels[channel].channels =
+ str->channels[channel].channels =
(sector[0x13] & 0x01) ? 2 : 1;
- str->channels[channel].sample_rate =
+ str->channels[channel].sample_rate =
(sector[0x13] & 0x04) ? 18900 : 37800;
- str->channels[channel].bits =
+ str->channels[channel].bits =
(sector[0x13] & 0x10) ? 8 : 4;
/* allocate a new AVStream */
@@ -212,7 +212,7 @@ static int str_read_header(AVFormatContext *s,
fmt = sector[0x13];
st->codec->codec_type = CODEC_TYPE_AUDIO;
- st->codec->codec_id = CODEC_ID_ADPCM_XA;
+ st->codec->codec_id = CODEC_ID_ADPCM_XA;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->channels = (fmt&1)?2:1;
st->codec->sample_rate = (fmt&4)?18900:37800;
@@ -232,7 +232,7 @@ if (str->video_channel != -1)
str->channels[str->video_channel].width,
str->channels[str->video_channel].height,str->channels[str->video_channel].video_stream_index);
if (str->audio_channel != -1)
- av_log (s, AV_LOG_DEBUG, " audio channel = %d, %d Hz, %d channels, %d bits/sample %d\n",
+ av_log (s, AV_LOG_DEBUG, " audio channel = %d, %d Hz, %d channels, %d bits/sample %d\n",
str->audio_channel,
str->channels[str->audio_channel].sample_rate,
str->channels[str->audio_channel].channels,
@@ -283,7 +283,7 @@ static int str_read_packet(AVFormatContext *s,
return AVERROR_IO;
pkt->pos= url_ftell(pb) - RAW_CD_SECTOR_SIZE;
- pkt->stream_index =
+ pkt->stream_index =
str->channels[channel].video_stream_index;
// pkt->pts = str->pts;
@@ -320,7 +320,7 @@ printf (" dropping audio sector\n");
return AVERROR_IO;
memcpy(pkt->data,sector+24,2304);
- pkt->stream_index =
+ pkt->stream_index =
str->channels[channel].audio_stream_index;
//pkt->pts = str->pts;
return 0;
diff --git a/libavformat/qtpalette.h b/libavformat/qtpalette.h
index d963a82add..ef4ccfa91e 100644
--- a/libavformat/qtpalette.h
+++ b/libavformat/qtpalette.h
@@ -11,7 +11,7 @@ unsigned char ff_qt_default_palette_4[4 * 4] = {
0x93, 0x65, 0x5E, 0x00,
0xFF, 0xFF, 0xFF, 0x00,
0xDF, 0xD0, 0xAB, 0x00,
- 0x00, 0x00, 0x00, 0x00
+ 0x00, 0x00, 0x00, 0x00
};
unsigned char ff_qt_default_palette_16[16 * 4] = {
@@ -30,7 +30,7 @@ unsigned char ff_qt_default_palette_16[16 * 4] = {
0xFF, 0xFB, 0xF9, 0x00,
0xE8, 0xCA, 0xC5, 0x00,
0x8A, 0x7C, 0x77, 0x00,
- 0x00, 0x00, 0x00, 0x00
+ 0x00, 0x00, 0x00, 0x00
};
unsigned char ff_qt_default_palette_256[256 * 4] = {
diff --git a/libavformat/raw.c b/libavformat/raw.c
index 41a6546915..f48bbf7fff 100644
--- a/libavformat/raw.c
+++ b/libavformat/raw.c
@@ -1,4 +1,4 @@
-/*
+/*
* RAW encoder and decoder
* Copyright (c) 2001 Fabrice Bellard.
* Copyright (c) 2005 Alex Beregszaszi
@@ -86,7 +86,7 @@ static int raw_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, size;
// AVStream *st = s->streams[0];
-
+
size= RAW_PACKET_SIZE;
ret= av_get_packet(&s->pb, pkt, size);
@@ -109,7 +109,7 @@ static int raw_read_partial_packet(AVFormatContext *s, AVPacket *pkt)
if (av_new_packet(pkt, size) < 0)
return AVERROR_IO;
-
+
pkt->pos= url_ftell(&s->pb);
pkt->stream_index = 0;
ret = get_partial_buffer(&s->pb, pkt->data, size);
@@ -125,21 +125,21 @@ static int raw_read_partial_packet(AVFormatContext *s, AVPacket *pkt)
static int ingenient_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, size, w, h, unk1, unk2;
-
+
if (get_le32(&s->pb) != MKTAG('M', 'J', 'P', 'G'))
return AVERROR_IO; // FIXME
size = get_le32(&s->pb);
-
+
w = get_le16(&s->pb);
h = get_le16(&s->pb);
-
+
url_fskip(&s->pb, 8); // zero + size (padded?)
url_fskip(&s->pb, 2);
unk1 = get_le16(&s->pb);
unk2 = get_le16(&s->pb);
url_fskip(&s->pb, 22); // ascii timestamp
-
+
av_log(NULL, AV_LOG_DEBUG, "Ingenient packet: size=%d, width=%d, height=%d, unk1=%d unk2=%d\n",
size, w, h, unk1, unk2);
@@ -162,7 +162,7 @@ static int raw_read_close(AVFormatContext *s)
return 0;
}
-int pcm_read_seek(AVFormatContext *s,
+int pcm_read_seek(AVFormatContext *s,
int stream_index, int64_t timestamp, int flags)
{
AVStream *st;
@@ -190,13 +190,13 @@ int pcm_read_seek(AVFormatContext *s,
byte_rate = st->codec->bit_rate / 8;
break;
}
-
+
if (block_align <= 0 || byte_rate <= 0)
return -1;
/* compute the position by aligning it to block_align */
- pos = av_rescale_rnd(timestamp * byte_rate,
- st->time_base.num,
+ pos = av_rescale_rnd(timestamp * byte_rate,
+ st->time_base.num,
st->time_base.den * (int64_t)block_align,
(flags & AVSEEK_FLAG_BACKWARD) ? AV_ROUND_DOWN : AV_ROUND_UP);
pos *= block_align;
@@ -274,7 +274,7 @@ static int video_read_header(AVFormatContext *s,
/* for mpeg4 specify it too (most mpeg4 streams dont have the fixed_vop_rate set ...)*/
if (ap && ap->time_base.num) {
av_set_pts_info(st, 64, ap->time_base.num, ap->time_base.den);
- } else if ( st->codec->codec_id == CODEC_ID_MJPEG ||
+ } else if ( st->codec->codec_id == CODEC_ID_MJPEG ||
st->codec->codec_id == CODEC_ID_MPEG4 ||
st->codec->codec_id == CODEC_ID_H264) {
av_set_pts_info(st, 64, 1, 25);
@@ -370,7 +370,7 @@ AVInputFormat ac3_iformat = {
AVOutputFormat ac3_oformat = {
"ac3",
"raw ac3",
- "audio/x-ac3",
+ "audio/x-ac3",
"ac3",
0,
CODEC_ID_AC3,
@@ -629,28 +629,28 @@ AVOutputFormat pcm_ ## name ## _oformat = {\
#endif
-PCMDEF(s16le, "pcm signed 16 bit little endian format",
+PCMDEF(s16le, "pcm signed 16 bit little endian format",
LE_DEF("sw"), CODEC_ID_PCM_S16LE)
-PCMDEF(s16be, "pcm signed 16 bit big endian format",
+PCMDEF(s16be, "pcm signed 16 bit big endian format",
BE_DEF("sw"), CODEC_ID_PCM_S16BE)
-PCMDEF(u16le, "pcm unsigned 16 bit little endian format",
+PCMDEF(u16le, "pcm unsigned 16 bit little endian format",
LE_DEF("uw"), CODEC_ID_PCM_U16LE)
-PCMDEF(u16be, "pcm unsigned 16 bit big endian format",
+PCMDEF(u16be, "pcm unsigned 16 bit big endian format",
BE_DEF("uw"), CODEC_ID_PCM_U16BE)
-PCMDEF(s8, "pcm signed 8 bit format",
+PCMDEF(s8, "pcm signed 8 bit format",
"sb", CODEC_ID_PCM_S8)
-PCMDEF(u8, "pcm unsigned 8 bit format",
+PCMDEF(u8, "pcm unsigned 8 bit format",
"ub", CODEC_ID_PCM_U8)
-PCMDEF(mulaw, "pcm mu law format",
+PCMDEF(mulaw, "pcm mu law format",
"ul", CODEC_ID_PCM_MULAW)
-PCMDEF(alaw, "pcm A law format",
+PCMDEF(alaw, "pcm A law format",
"al", CODEC_ID_PCM_ALAW)
static int rawvideo_read_packet(AVFormatContext *s, AVPacket *pkt)
@@ -749,10 +749,10 @@ int raw_init(void)
av_register_input_format(&h263_iformat);
av_register_output_format(&h263_oformat);
-
+
av_register_input_format(&m4v_iformat);
av_register_output_format(&m4v_oformat);
-
+
av_register_input_format(&h264_iformat);
av_register_output_format(&h264_oformat);
@@ -763,7 +763,7 @@ int raw_init(void)
av_register_input_format(&mjpeg_iformat);
av_register_output_format(&mjpeg_oformat);
-
+
av_register_input_format(&ingenient_iformat);
av_register_input_format(&pcm_s16le_iformat);
diff --git a/libavformat/rm.c b/libavformat/rm.c
index 5433da73a1..109201154b 100644
--- a/libavformat/rm.c
+++ b/libavformat/rm.c
@@ -19,7 +19,7 @@
#include "avformat.h"
/* in ms */
-#define BUFFER_DURATION 0
+#define BUFFER_DURATION 0
typedef struct {
int nb_packets;
@@ -69,7 +69,7 @@ static void put_str8(ByteIOContext *s, const char *tag)
}
}
-static void rv10_write_header(AVFormatContext *ctx,
+static void rv10_write_header(AVFormatContext *ctx,
int data_size, int index_pos)
{
RMContext *rm = ctx->priv_data;
@@ -128,11 +128,11 @@ static void rv10_write_header(AVFormatContext *ctx,
if (url_is_streamed(s))
flags |= 4; /* live broadcast */
put_be16(s, flags);
-
+
/* comments */
put_tag(s,"CONT");
- size = strlen(ctx->title) + strlen(ctx->author) + strlen(ctx->copyright) +
+ size = strlen(ctx->title) + strlen(ctx->author) + strlen(ctx->copyright) +
strlen(ctx->comment) + 4 * 2 + 10;
put_be32(s,size);
put_be16(s,0);
@@ -140,12 +140,12 @@ static void rv10_write_header(AVFormatContext *ctx,
put_str(s, ctx->author);
put_str(s, ctx->copyright);
put_str(s, ctx->comment);
-
+
for(i=0;i<ctx->nb_streams;i++) {
int codec_data_size;
stream = &rm->streams[i];
-
+
if (stream->enc->codec_type == CODEC_TYPE_AUDIO) {
desc = "The Audio Stream";
mimetype = "audio/x-pn-realaudio";
@@ -166,7 +166,7 @@ static void rv10_write_header(AVFormatContext *ctx,
put_be32(s, stream->bit_rate); /* avg bit rate */
put_be32(s, stream->packet_max_size); /* max packet size */
if (stream->nb_packets > 0)
- packet_avg_size = stream->packet_total_size /
+ packet_avg_size = stream->packet_total_size /
stream->nb_packets;
else
packet_avg_size = 0;
@@ -181,11 +181,11 @@ static void rv10_write_header(AVFormatContext *ctx,
put_str8(s, desc);
put_str8(s, mimetype);
put_be32(s, codec_data_size);
-
+
if (stream->enc->codec_type == CODEC_TYPE_AUDIO) {
int coded_frame_size, fscode, sample_rate;
sample_rate = stream->enc->sample_rate;
- coded_frame_size = (stream->enc->bit_rate *
+ coded_frame_size = (stream->enc->bit_rate *
stream->enc->frame_size) / (8 * sample_rate);
/* audio codec info */
put_tag(s, ".ra");
@@ -224,7 +224,7 @@ static void rv10_write_header(AVFormatContext *ctx,
put_be32(s, 0x249f0); /* unknown */
put_be16(s, 0x01);
/* frame length : seems to be very important */
- put_be16(s, coded_frame_size);
+ put_be16(s, coded_frame_size);
put_be32(s, 0); /* unknown */
put_be16(s, stream->enc->sample_rate); /* sample rate */
put_be32(s, 0x10); /* unknown */
@@ -253,10 +253,10 @@ static void rv10_write_header(AVFormatContext *ctx,
versions seems to add a diffential DC coding as in
MPEG... nothing new under the sun */
if(stream->enc->codec_id == CODEC_ID_RV10)
- put_be32(s,0x10000000);
+ put_be32(s,0x10000000);
else
- put_be32(s,0x20103001);
- //put_be32(s,0x10003000);
+ put_be32(s,0x20103001);
+ //put_be32(s,0x10003000);
}
}
@@ -267,7 +267,7 @@ static void rv10_write_header(AVFormatContext *ctx,
data_offset_ptr[1] = data_pos >> 16;
data_offset_ptr[2] = data_pos >> 8;
data_offset_ptr[3] = data_pos;
-
+
/* data stream */
put_tag(s,"DATA");
put_be32(s,data_size + 10 + 8);
@@ -277,7 +277,7 @@ static void rv10_write_header(AVFormatContext *ctx,
put_be32(s,0); /* next data header */
}
-static void write_packet_header(AVFormatContext *ctx, StreamInfo *stream,
+static void write_packet_header(AVFormatContext *ctx, StreamInfo *stream,
int length, int key_frame)
{
int timestamp;
@@ -350,9 +350,9 @@ static int rm_write_audio(AVFormatContext *s, const uint8_t *buf, int size, int
/* XXX: suppress this malloc */
buf1= (uint8_t*) av_malloc( size * sizeof(uint8_t) );
-
+
write_packet_header(s, stream, size, !!(flags & PKT_FLAG_KEY));
-
+
/* for AC3, the words seems to be reversed */
for(i=0;i<size;i+=2) {
buf1[i] = buf[i+1];
@@ -379,25 +379,25 @@ static int rm_write_video(AVFormatContext *s, const uint8_t *buf, int size, int
#if 1
write_packet_header(s, stream, size + 7, key_frame);
/* bit 7: '1' if final packet of a frame converted in several packets */
- put_byte(pb, 0x81);
+ put_byte(pb, 0x81);
/* bit 7: '1' if I frame. bits 6..0 : sequence number in current
frame starting from 1 */
if (key_frame) {
- put_byte(pb, 0x81);
+ put_byte(pb, 0x81);
} else {
- put_byte(pb, 0x01);
+ put_byte(pb, 0x01);
}
put_be16(pb, 0x4000 + (size)); /* total frame size */
put_be16(pb, 0x4000 + (size)); /* offset from the start or the end */
#else
/* full frame */
write_packet_header(s, size + 6);
- put_byte(pb, 0xc0);
+ put_byte(pb, 0xc0);
put_be16(pb, 0x4000 + size); /* total frame size */
put_be16(pb, 0x4000 + packet_number * 126); /* position in stream */
#endif
- put_byte(pb, stream->nb_frames & 0xff);
-
+ put_byte(pb, stream->nb_frames & 0xff);
+
put_buffer(pb, buf, size);
put_flush_packet(pb);
@@ -407,13 +407,13 @@ static int rm_write_video(AVFormatContext *s, const uint8_t *buf, int size, int
static int rm_write_packet(AVFormatContext *s, AVPacket *pkt)
{
- if (s->streams[pkt->stream_index]->codec->codec_type ==
+ if (s->streams[pkt->stream_index]->codec->codec_type ==
CODEC_TYPE_AUDIO)
return rm_write_audio(s, pkt->data, pkt->size, pkt->flags);
else
return rm_write_video(s, pkt->data, pkt->size, pkt->flags);
}
-
+
static int rm_write_trailer(AVFormatContext *s)
{
RMContext *rm = s->priv_data;
@@ -429,7 +429,7 @@ static int rm_write_trailer(AVFormatContext *s)
put_tag(pb, "INDX");
put_be32(pb, 10 + 10 * s->nb_streams);
put_be16(pb, 0);
-
+
for(i=0;i<s->nb_streams;i++) {
put_be32(pb, 0); /* zero indices */
put_be16(pb, i); /* stream number */
@@ -438,7 +438,7 @@ static int rm_write_trailer(AVFormatContext *s)
/* undocumented end header */
put_be32(pb, 0);
put_be32(pb, 0);
-
+
url_fseek(pb, 0, SEEK_SET);
for(i=0;i<s->nb_streams;i++)
rm->streams[i].total_frames = rm->streams[i].nb_frames;
@@ -483,7 +483,7 @@ static void get_str8(ByteIOContext *pb, char *buf, int buf_size)
*q = '\0';
}
-static void rm_read_audio_stream_info(AVFormatContext *s, AVStream *st,
+static void rm_read_audio_stream_info(AVFormatContext *s, AVStream *st,
int read_all)
{
RMContext *rm = s->priv_data;
@@ -520,7 +520,7 @@ static void rm_read_audio_stream_info(AVFormatContext *s, AVStream *st,
get_be32(pb); /* ??? */
get_be32(pb); /* ??? */
get_be32(pb); /* ??? */
- rm->sub_packet_h = sub_packet_h = get_be16(pb); /* 1 */
+ rm->sub_packet_h = sub_packet_h = get_be16(pb); /* 1 */
st->codec->block_align= get_be16(pb); /* frame size */
rm->sub_packet_size = sub_packet_size = get_be16(pb); /* sub packet size */
get_be16(pb); /* ??? */
@@ -572,7 +572,7 @@ static void rm_read_audio_stream_info(AVFormatContext *s, AVStream *st,
get_byte(pb);
get_byte(pb);
get_byte(pb);
-
+
get_str8(pb, s->title, sizeof(s->title));
get_str8(pb, s->author, sizeof(s->author));
get_str8(pb, s->copyright, sizeof(s->copyright));
@@ -620,7 +620,7 @@ static int rm_read_header(AVFormatContext *s, AVFormatParameters *ap)
get_be16(pb);
get_be32(pb);
get_be32(pb); /* number of headers */
-
+
for(;;) {
if (url_feof(pb))
goto fail;
@@ -628,7 +628,7 @@ static int rm_read_header(AVFormatContext *s, AVFormatParameters *ap)
tag_size = get_be32(pb);
get_be16(pb);
#if 0
- printf("tag=%c%c%c%c (%08x) size=%d\n",
+ printf("tag=%c%c%c%c (%08x) size=%d\n",
(tag) & 0xff,
(tag >> 8) & 0xff,
(tag >> 16) & 0xff,
@@ -706,11 +706,11 @@ static int rm_read_header(AVFormatContext *s, AVFormatParameters *ap)
get_be32(pb);
fps2= get_be16(pb);
get_be16(pb);
-
+
st->codec->extradata_size= codec_data_size - (url_ftell(pb) - codec_pos);
st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
-
+
// av_log(NULL, AV_LOG_DEBUG, "fps= %d fps2= %d\n", fps, fps2);
st->codec->time_base.den = fps * st->codec->time_base.num;
/* modification of h263 codec version (!) */
@@ -789,14 +789,14 @@ static int sync(AVFormatContext *s, int64_t *timestamp, int *flags, int *stream_
*flags= 0;
}else{
state= (state<<8) + get_byte(pb);
-
+
if(state == MKBETAG('I', 'N', 'D', 'X')){
len = get_be16(pb) - 6;
if(len<0)
continue;
goto skip;
}
-
+
if(state > (unsigned)0xFFFF || state < 12)
continue;
len=state;
@@ -807,7 +807,7 @@ static int sync(AVFormatContext *s, int64_t *timestamp, int *flags, int *stream_
res= get_byte(pb); /* reserved */
*flags = get_byte(pb); /* flags */
-
+
len -= 12;
}
for(i=0;i<s->nb_streams;i++) {
@@ -823,7 +823,7 @@ skip:
continue;
}
*stream_index= i;
-
+
return len;
}
return -1;
@@ -940,7 +940,7 @@ resync:
av_free_packet(pkt);
goto resync;
}
-
+
pkt->stream_index = i;
#if 0
@@ -999,7 +999,7 @@ static int rm_probe(AVProbeData *p)
return 0;
}
-static int64_t rm_read_dts(AVFormatContext *s, int stream_index,
+static int64_t rm_read_dts(AVFormatContext *s, int stream_index,
int64_t *ppos, int64_t pos_limit)
{
RMContext *rm = s->priv_data;
@@ -1007,7 +1007,7 @@ static int64_t rm_read_dts(AVFormatContext *s, int stream_index,
int stream_index2, flags, len, h;
pos = *ppos;
-
+
if(rm->old_format)
return AV_NOPTS_VALUE;
@@ -1028,7 +1028,7 @@ static int64_t rm_read_dts(AVFormatContext *s, int stream_index,
seq = get_byte(&s->pb); len--;
}
}
-
+
if((flags&2) && (seq&0x7F) == 1){
// av_log(s, AV_LOG_DEBUG, "%d %d-%d %Ld %d\n", flags, stream_index2, stream_index, dts, seq);
av_add_index_entry(st, pos, dts, 0, AVINDEX_KEYFRAME);
diff --git a/libavformat/rtp.c b/libavformat/rtp.c
index 2bd61ca9e9..b9758c917a 100644
--- a/libavformat/rtp.c
+++ b/libavformat/rtp.c
@@ -40,7 +40,7 @@
buffer to 'rtp_write_packet' contains all the packets for ONE
frame. Each packet should have a four byte header containing
the length in big endian format (same trick as
- 'url_open_dyn_packet_buf')
+ 'url_open_dyn_packet_buf')
*/
/* from http://www.iana.org/assignments/rtp-parameters last updated 05 January 2005 */
@@ -197,7 +197,7 @@ struct RTPDemuxContext {
MpegTSContext *ts; /* only used for MP2T payloads */
int read_buf_index;
int read_buf_size;
-
+
/* rtcp sender statistics receive */
int64_t last_rtcp_ntp_time;
int64_t first_rtcp_ntp_time;
@@ -268,7 +268,7 @@ static int rtcp_parse_packet(RTPDemuxContext *s, const unsigned char *buf, int l
/**
* open a new RTP parse context for stream 'st'. 'st' can be NULL for
* MPEG2TS streams to indicate that they should be demuxed inside the
- * rtp demux (otherwise CODEC_ID_MPEG2TS packets are returned)
+ * rtp demux (otherwise CODEC_ID_MPEG2TS packets are returned)
*/
RTPDemuxContext *rtp_parse_open(AVFormatContext *s1, AVStream *st, int payload_type, rtp_payload_data_t *rtp_payload_data)
{
@@ -354,27 +354,27 @@ static int rtp_parse_mp4_au(RTPDemuxContext *s, const uint8_t *buf)
}
/**
- * Parse an RTP or RTCP packet directly sent as a buffer.
+ * Parse an RTP or RTCP packet directly sent as a buffer.
* @param s RTP parse context.
* @param pkt returned packet
* @param buf input buffer or NULL to read the next packets
* @param len buffer len
- * @return 0 if a packet is returned, 1 if a packet is returned and more can follow
+ * @return 0 if a packet is returned, 1 if a packet is returned and more can follow
* (use buf as NULL to read the next). -1 if no packet (error or no more packet).
*/
-int rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
+int rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
const uint8_t *buf, int len)
{
unsigned int ssrc, h;
int payload_type, seq, delta_timestamp, ret;
AVStream *st;
uint32_t timestamp;
-
+
if (!buf) {
/* return the next packets, if any */
if (s->read_buf_index >= s->read_buf_size)
return -1;
- ret = mpegts_parse_packet(s->ts, pkt, s->buf + s->read_buf_index,
+ ret = mpegts_parse_packet(s->ts, pkt, s->buf + s->read_buf_index,
s->read_buf_size - s->read_buf_index);
if (ret < 0)
return -1;
@@ -398,13 +398,13 @@ int rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
seq = (buf[2] << 8) | buf[3];
timestamp = decode_be32(buf + 4);
ssrc = decode_be32(buf + 8);
-
+
/* NOTE: we can handle only one payload type */
if (s->payload_type != payload_type)
return -1;
#if defined(DEBUG) || 1
if (seq != ((s->seq + 1) & 0xffff)) {
- av_log(s->st->codec, AV_LOG_ERROR, "RTP: PT=%02x: bad cseq %04x expected=%04x\n",
+ av_log(s->st->codec, AV_LOG_ERROR, "RTP: PT=%02x: bad cseq %04x expected=%04x\n",
payload_type, seq, ((s->seq + 1) & 0xffff));
}
#endif
@@ -458,7 +458,7 @@ int rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
memcpy(pkt->data, buf, len);
break;
}
-
+
switch(st->codec->codec_id) {
case CODEC_ID_MP2:
case CODEC_ID_MPEG1VIDEO:
@@ -599,10 +599,10 @@ static void rtp_send_data(AVFormatContext *s1, const uint8_t *buf1, int len, int
put_be16(&s1->pb, s->seq);
put_be32(&s1->pb, s->timestamp);
put_be32(&s1->pb, s->ssrc);
-
+
put_buffer(&s1->pb, buf1, len);
put_flush_packet(&s1->pb);
-
+
s->seq++;
s->octet_count += len;
s->packet_count++;
@@ -639,7 +639,7 @@ static void rtp_send_samples(AVFormatContext *s1,
s->timestamp += n / sample_size;
}
}
-}
+}
/* NOTE: we suppose that exactly one frame is given as argument here */
/* XXX: test it */
@@ -659,7 +659,7 @@ static void rtp_send_mpegaudio(AVFormatContext *s1,
rtp_send_data(s1, s->buf, s->buf_ptr - s->buf, 0);
s->buf_ptr = s->buf + 4;
/* 90 KHz time stamp */
- s->timestamp = s->base_timestamp +
+ s->timestamp = s->base_timestamp +
(s->cur_timestamp * 90000LL) / st->codec->sample_rate;
}
}
@@ -727,7 +727,7 @@ static void rtp_send_mpegvideo(AVFormatContext *s1,
*q++ = h >> 8;
*q++ = h;
}
-
+
len = max_packet_size - (q - s->buf);
if (len > size)
len = size;
@@ -736,7 +736,7 @@ static void rtp_send_mpegvideo(AVFormatContext *s1,
q += len;
/* 90 KHz time stamp */
- s->timestamp = s->base_timestamp +
+ s->timestamp = s->base_timestamp +
av_rescale((int64_t)s->cur_timestamp * st->codec->time_base.num, 90000, st->codec->time_base.den); //FIXME pass timestamps
rtp_send_data(s1, s->buf, q - s->buf, (len == size));
@@ -761,7 +761,7 @@ static void rtp_send_raw(AVFormatContext *s1,
len = size;
/* 90 KHz time stamp */
- s->timestamp = s->base_timestamp +
+ s->timestamp = s->base_timestamp +
av_rescale((int64_t)s->cur_timestamp * st->codec->time_base.num, 90000, st->codec->time_base.den); //FIXME pass timestamps
rtp_send_data(s1, buf1, len, (len == size));
@@ -786,7 +786,7 @@ static void rtp_send_mpegts_raw(AVFormatContext *s1,
buf1 += len;
size -= len;
s->buf_ptr += len;
-
+
out_len = s->buf_ptr - s->buf;
if (out_len >= s->max_payload_size) {
rtp_send_data(s1, s->buf, out_len, 0);
@@ -804,19 +804,19 @@ static int rtp_write_packet(AVFormatContext *s1, AVPacket *pkt)
int64_t ntp_time;
int size= pkt->size;
uint8_t *buf1= pkt->data;
-
+
#ifdef DEBUG
printf("%d: write len=%d\n", pkt->stream_index, size);
#endif
/* XXX: mpeg pts hardcoded. RTCP send every 0.5 seconds */
- rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) /
+ rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) /
RTCP_TX_RATIO_DEN;
if (s->first_packet || rtcp_bytes >= 28) {
/* compute NTP time */
/* XXX: 90 kHz timestamp hardcoded */
ntp_time = (pkt->pts << 28) / 5625;
- rtcp_send_sr(s1, ntp_time);
+ rtcp_send_sr(s1, ntp_time);
s->last_octet_count = s->octet_count;
s->first_packet = 0;
}
diff --git a/libavformat/rtp.h b/libavformat/rtp.h
index 8bdbe2af90..93f50d66de 100644
--- a/libavformat/rtp.h
+++ b/libavformat/rtp.h
@@ -29,7 +29,7 @@ int rtp_get_payload_type(AVCodecContext *codec);
typedef struct RTPDemuxContext RTPDemuxContext;
typedef struct rtp_payload_data_s rtp_payload_data_s;
RTPDemuxContext *rtp_parse_open(AVFormatContext *s1, AVStream *st, int payload_type, rtp_payload_data_s *rtp_payload_data);
-int rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
+int rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
const uint8_t *buf, int len);
void rtp_parse_close(RTPDemuxContext *s);
diff --git a/libavformat/rtpproto.c b/libavformat/rtpproto.c
index 26565ed10a..8b8c73f6f3 100644
--- a/libavformat/rtpproto.c
+++ b/libavformat/rtpproto.c
@@ -56,8 +56,8 @@ int rtp_set_remote_url(URLContext *h, const char *uri)
char buf[1024];
char path[1024];
-
- url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port,
+
+ url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port,
path, sizeof(path), uri);
snprintf(buf, sizeof(buf), "udp://%s:%d%s", hostname, port, path);
@@ -101,7 +101,7 @@ static void build_udp_url(char *buf, int buf_size,
/*
* url syntax: rtp://host:port[?option=val...]
- * option: 'multicast=1' : enable multicast
+ * option: 'multicast=1' : enable multicast
* 'ttl=n' : set the ttl value (for multicast only)
* 'localport=n' : set the local port to n
*
@@ -114,15 +114,15 @@ static int rtp_open(URLContext *h, const char *uri, int flags)
char buf[1024];
char path[1024];
const char *p;
-
+
is_output = (flags & URL_WRONLY);
s = av_mallocz(sizeof(RTPContext));
if (!s)
return -ENOMEM;
h->priv_data = s;
-
- url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port,
+
+ url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port,
path, sizeof(path), uri);
/* extract parameters */
is_multicast = 0;
@@ -147,18 +147,18 @@ static int rtp_open(URLContext *h, const char *uri, int flags)
/* XXX: need to open another connexion if the port is not even */
/* well, should suppress localport in path */
-
+
build_udp_url(buf, sizeof(buf),
hostname, port + 1, local_port + 1, is_multicast, ttl);
if (url_open(&s->rtcp_hd, buf, flags) < 0)
goto fail;
-
+
/* just to ease handle access. XXX: need to suppress direct handle
access */
s->rtp_fd = udp_get_file_handle(s->rtp_hd);
s->rtcp_fd = udp_get_file_handle(s->rtcp_hd);
- h->max_packet_size = url_get_max_packet_size(s->rtp_hd);
+ h->max_packet_size = url_get_max_packet_size(s->rtp_hd);
h->is_streamed = 1;
return 0;
@@ -235,7 +235,7 @@ static int rtp_write(URLContext *h, uint8_t *buf, int size)
RTPContext *s = h->priv_data;
int ret;
URLContext *hd;
-
+
if (buf[1] >= 200 && buf[1] <= 204) {
/* RTCP payload type */
hd = s->rtcp_hd;
diff --git a/libavformat/rtsp.c b/libavformat/rtsp.c
index 789266e9c0..df790009f4 100644
--- a/libavformat/rtsp.c
+++ b/libavformat/rtsp.c
@@ -41,10 +41,10 @@ typedef struct RTSPState {
URLContext *rtsp_hd; /* RTSP TCP connexion handle */
int nb_rtsp_streams;
struct RTSPStream **rtsp_streams;
-
+
enum RTSPClientState state;
int64_t seek_timestamp;
-
+
/* XXX: currently we use unbuffered input */
// ByteIOContext rtsp_gb;
int seq; /* RTSP command sequence number */
@@ -57,7 +57,7 @@ typedef struct RTSPState {
typedef struct RTSPStream {
URLContext *rtp_handle; /* RTP stream handle */
RTPDemuxContext *rtp_ctx; /* RTP parse context */
-
+
int stream_index; /* corresponding stream index, if any. -1 if none (MPEG2TS case) */
int interleaved_min, interleaved_max; /* interleave ids, if TCP transport */
char control_url[1024]; /* url for this stream (from SDP) */
@@ -99,7 +99,7 @@ static void skip_spaces(const char **pp)
*pp = p;
}
-static void get_word_sep(char *buf, int buf_size, const char *sep,
+static void get_word_sep(char *buf, int buf_size, const char *sep,
const char **pp)
{
const char *p;
@@ -291,7 +291,7 @@ static void sdp_parse_fmtp(AVStream *st, const char *p)
if (*p == '\0')
break;
get_word_sep(attr, sizeof(attr), "=", &p);
- if (*p == '=')
+ if (*p == '=')
p++;
get_word_sep(value, sizeof(value), ";", &p);
if (*p == ';')
@@ -392,7 +392,7 @@ static void sdp_parse_line(AVFormatContext *s, SDPParseState *s1,
rtsp_st->sdp_port = atoi(buf1);
get_word(buf1, sizeof(buf1), &p); /* protocol (ignored) */
-
+
/* XXX: handle list of formats */
get_word(buf1, sizeof(buf1), &p); /* format list */
rtsp_st->sdp_payload_type = atoi(buf1);
@@ -420,7 +420,7 @@ static void sdp_parse_line(AVFormatContext *s, SDPParseState *s1,
/* get the control url */
st = s->streams[s->nb_streams - 1];
rtsp_st = st->priv_data;
-
+
/* XXX: may need to add full url resolution */
url_split(proto, sizeof(proto), NULL, 0, NULL, 0, NULL, NULL, 0, p);
if (proto[0] == '\0') {
@@ -432,7 +432,7 @@ static void sdp_parse_line(AVFormatContext *s, SDPParseState *s1,
}
} else if (strstart(p, "rtpmap:", &p)) {
/* NOTE: rtpmap is only supported AFTER the 'm=' tag */
- get_word(buf1, sizeof(buf1), &p);
+ get_word(buf1, sizeof(buf1), &p);
payload_type = atoi(buf1);
for(i = 0; i < s->nb_streams;i++) {
st = s->streams[i];
@@ -443,7 +443,7 @@ static void sdp_parse_line(AVFormatContext *s, SDPParseState *s1,
}
} else if (strstart(p, "fmtp:", &p)) {
/* NOTE: fmtp is only supported AFTER the 'a=rtpmap:xxx' tag */
- get_word(buf1, sizeof(buf1), &p);
+ get_word(buf1, sizeof(buf1), &p);
payload_type = atoi(buf1);
for(i = 0; i < s->nb_streams;i++) {
st = s->streams[i];
@@ -463,7 +463,7 @@ static int sdp_parse(AVFormatContext *s, const char *content)
int letter;
char buf[1024], *q;
SDPParseState sdp_parse_state, *s1 = &sdp_parse_state;
-
+
memset(s1, 0, sizeof(SDPParseState));
p = content;
for(;;) {
@@ -522,9 +522,9 @@ static void rtsp_parse_transport(RTSPHeader *reply, const char *p)
char parameter[16];
RTSPTransportField *th;
char buf[256];
-
+
reply->nb_transports = 0;
-
+
for(;;) {
skip_spaces(&p);
if (*p == '\0')
@@ -532,7 +532,7 @@ static void rtsp_parse_transport(RTSPHeader *reply, const char *p)
th = &reply->transports[reply->nb_transports];
- get_word_sep(transport_protocol, sizeof(transport_protocol),
+ get_word_sep(transport_protocol, sizeof(transport_protocol),
"/", &p);
if (*p == '/')
p++;
@@ -540,14 +540,14 @@ static void rtsp_parse_transport(RTSPHeader *reply, const char *p)
lower_transport[0] = '\0';
if (*p == '/') {
p++;
- get_word_sep(lower_transport, sizeof(lower_transport),
+ get_word_sep(lower_transport, sizeof(lower_transport),
";,", &p);
}
if (!strcasecmp(lower_transport, "TCP"))
th->protocol = RTSP_PROTOCOL_RTP_TCP;
else
th->protocol = RTSP_PROTOCOL_RTP_UDP;
-
+
if (*p == ';')
p++;
/* get each parameter */
@@ -561,19 +561,19 @@ static void rtsp_parse_transport(RTSPHeader *reply, const char *p)
} else if (!strcmp(parameter, "client_port")) {
if (*p == '=') {
p++;
- rtsp_parse_range(&th->client_port_min,
+ rtsp_parse_range(&th->client_port_min,
&th->client_port_max, &p);
}
} else if (!strcmp(parameter, "server_port")) {
if (*p == '=') {
p++;
- rtsp_parse_range(&th->server_port_min,
+ rtsp_parse_range(&th->server_port_min,
&th->server_port_max, &p);
}
} else if (!strcmp(parameter, "interleaved")) {
if (*p == '=') {
p++;
- rtsp_parse_range(&th->interleaved_min,
+ rtsp_parse_range(&th->interleaved_min,
&th->interleaved_max, &p);
}
} else if (!strcmp(parameter, "multicast")) {
@@ -590,7 +590,7 @@ static void rtsp_parse_transport(RTSPHeader *reply, const char *p)
if (*p == '=') {
p++;
get_word_sep(buf, sizeof(buf), ";,", &p);
- if (inet_aton(buf, &ipaddr))
+ if (inet_aton(buf, &ipaddr))
th->destination = ntohl(ipaddr.s_addr);
}
}
@@ -616,7 +616,7 @@ static void rtsp_parse_range_npt(RTSPHeader *reply, const char *p)
reply->range_start = AV_NOPTS_VALUE;
reply->range_end = AV_NOPTS_VALUE;
-
+
get_word_sep(buf, sizeof(buf), "-", &p);
reply->range_start = parse_date(buf, 1);
if (*p == '-') {
@@ -685,8 +685,8 @@ static void rtsp_skip_packet(AVFormatContext *s)
}
}
-static void rtsp_send_cmd(AVFormatContext *s,
- const char *cmd, RTSPHeader *reply,
+static void rtsp_send_cmd(AVFormatContext *s,
+ const char *cmd, RTSPHeader *reply,
unsigned char **content_ptr)
{
RTSPState *rt = s->priv_data;
@@ -750,10 +750,10 @@ static void rtsp_send_cmd(AVFormatContext *s,
}
line_count++;
}
-
+
if (rt->session_id[0] == '\0' && reply->session_id[0] != '\0')
pstrcpy(rt->session_id, sizeof(rt->session_id), reply->session_id);
-
+
content_length = reply->content_length;
if (content_length > 0) {
/* leave some room for a trailing '\0' (useful for simple parsing) */
@@ -817,9 +817,9 @@ static int rtsp_read_header(AVFormatContext *s,
return AVERROR_IO;
rt->rtsp_hd = rtsp_hd;
rt->seq = 0;
-
+
/* describe the stream */
- snprintf(cmd, sizeof(cmd),
+ snprintf(cmd, sizeof(cmd),
"DESCRIBE %s RTSP/1.0\r\n"
"Accept: application/sdp\r\n",
s->filename);
@@ -832,7 +832,7 @@ static int rtsp_read_header(AVFormatContext *s,
err = AVERROR_INVALIDDATA;
goto fail;
}
-
+
/* now we got the SDP description, we parse it */
ret = sdp_parse(s, (const char *)content);
av_freep(&content);
@@ -840,7 +840,7 @@ static int rtsp_read_header(AVFormatContext *s,
err = AVERROR_INVALIDDATA;
goto fail;
}
-
+
protocol_mask = rtsp_default_protocols;
/* for each stream, make the setup request */
@@ -897,11 +897,11 @@ static int rtsp_read_header(AVFormatContext *s,
else if (protocol_mask & (1 << RTSP_PROTOCOL_RTP_UDP_MULTICAST)) {
if (transport[0] != '\0')
pstrcat(transport, sizeof(transport), ",");
- snprintf(transport + strlen(transport),
+ snprintf(transport + strlen(transport),
sizeof(transport) - strlen(transport) - 1,
"RTP/AVP/UDP;multicast");
}
- snprintf(cmd, sizeof(cmd),
+ snprintf(cmd, sizeof(cmd),
"SETUP %s RTSP/1.0\r\n"
"Transport: %s\r\n",
rtsp_st->control_url, transport);
@@ -934,13 +934,13 @@ static int rtsp_read_header(AVFormatContext *s,
rtsp_st->interleaved_min = reply->transports[0].interleaved_min;
rtsp_st->interleaved_max = reply->transports[0].interleaved_max;
break;
-
+
case RTSP_PROTOCOL_RTP_UDP:
{
char url[1024];
-
+
/* XXX: also use address if specified */
- snprintf(url, sizeof(url), "rtp://%s:%d",
+ snprintf(url, sizeof(url), "rtp://%s:%d",
host, reply->transports[0].server_port_min);
if (rtp_set_remote_url(rtsp_st->rtp_handle, url) < 0) {
err = AVERROR_INVALIDDATA;
@@ -956,8 +956,8 @@ static int rtsp_read_header(AVFormatContext *s,
ttl = reply->transports[0].ttl;
if (!ttl)
ttl = 16;
- snprintf(url, sizeof(url), "rtp://%s:%d?multicast=1&ttl=%d",
- host,
+ snprintf(url, sizeof(url), "rtp://%s:%d?multicast=1&ttl=%d",
+ host,
reply->transports[0].server_port_min,
ttl);
if (url_open(&rtsp_st->rtp_handle, url, URL_RDONLY) < 0) {
@@ -983,13 +983,13 @@ static int rtsp_read_header(AVFormatContext *s,
/* use callback if available to extend setup */
if (ff_rtsp_callback) {
- if (ff_rtsp_callback(RTSP_ACTION_CLIENT_SETUP, rt->session_id,
+ if (ff_rtsp_callback(RTSP_ACTION_CLIENT_SETUP, rt->session_id,
NULL, 0, rt->last_reply) < 0) {
err = AVERROR_INVALIDDATA;
goto fail;
}
}
-
+
rt->state = RTSP_STATE_IDLE;
rt->seek_timestamp = 0; /* default is to start stream at position
@@ -1045,12 +1045,12 @@ static int tcp_read_packet(AVFormatContext *s, RTSPStream **prtsp_st,
ret = url_readbuf(rt->rtsp_hd, buf, len);
if (ret != len)
return -1;
-
+
/* find the matching stream */
for(i = 0; i < rt->nb_rtsp_streams; i++) {
rtsp_st = rt->rtsp_streams[i];
- if (id >= rtsp_st->interleaved_min &&
- id <= rtsp_st->interleaved_max)
+ if (id >= rtsp_st->interleaved_min &&
+ id <= rtsp_st->interleaved_max)
goto found;
}
goto redo;
@@ -1059,7 +1059,7 @@ static int tcp_read_packet(AVFormatContext *s, RTSPStream **prtsp_st,
return len;
}
-static int udp_read_packet(AVFormatContext *s, RTSPStream **prtsp_st,
+static int udp_read_packet(AVFormatContext *s, RTSPStream **prtsp_st,
uint8_t *buf, int buf_size)
{
RTSPState *rt = s->priv_data;
@@ -1154,11 +1154,11 @@ static int rtsp_read_play(AVFormatContext *s)
av_log(s, AV_LOG_DEBUG, "hello state=%d\n", rt->state);
if (rt->state == RTSP_STATE_PAUSED) {
- snprintf(cmd, sizeof(cmd),
+ snprintf(cmd, sizeof(cmd),
"PLAY %s RTSP/1.0\r\n",
s->filename);
} else {
- snprintf(cmd, sizeof(cmd),
+ snprintf(cmd, sizeof(cmd),
"PLAY %s RTSP/1.0\r\n"
"Range: npt=%0.3f-\r\n",
s->filename,
@@ -1181,11 +1181,11 @@ static int rtsp_read_pause(AVFormatContext *s)
char cmd[1024];
rt = s->priv_data;
-
+
if (rt->state != RTSP_STATE_PLAYING)
return 0;
- snprintf(cmd, sizeof(cmd),
+ snprintf(cmd, sizeof(cmd),
"PAUSE %s RTSP/1.0\r\n",
s->filename);
rtsp_send_cmd(s, cmd, reply, NULL);
@@ -1197,11 +1197,11 @@ static int rtsp_read_pause(AVFormatContext *s)
}
}
-static int rtsp_read_seek(AVFormatContext *s, int stream_index,
+static int rtsp_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
RTSPState *rt = s->priv_data;
-
+
rt->seek_timestamp = timestamp;
switch(rt->state) {
default:
@@ -1230,13 +1230,13 @@ static int rtsp_read_close(AVFormatContext *s)
url_fclose(&rt->rtsp_gb);
}
#endif
- snprintf(cmd, sizeof(cmd),
+ snprintf(cmd, sizeof(cmd),
"TEARDOWN %s RTSP/1.0\r\n",
s->filename);
rtsp_send_cmd(s, cmd, reply, NULL);
if (ff_rtsp_callback) {
- ff_rtsp_callback(RTSP_ACTION_CLIENT_TEARDOWN, rt->session_id,
+ ff_rtsp_callback(RTSP_ACTION_CLIENT_TEARDOWN, rt->session_id,
NULL, 0, NULL);
}
@@ -1305,9 +1305,9 @@ static int sdp_read_header(AVFormatContext *s,
/* open each RTP stream */
for(i=0;i<rt->nb_rtsp_streams;i++) {
rtsp_st = rt->rtsp_streams[i];
-
- snprintf(url, sizeof(url), "rtp://%s:%d?multicast=1&ttl=%d",
- inet_ntoa(rtsp_st->sdp_ip),
+
+ snprintf(url, sizeof(url), "rtp://%s:%d?multicast=1&ttl=%d",
+ inet_ntoa(rtsp_st->sdp_ip),
rtsp_st->sdp_port,
rtsp_st->sdp_ttl);
if (url_open(&rtsp_st->rtp_handle, url, URL_RDONLY) < 0) {
diff --git a/libavformat/rtsp.h b/libavformat/rtsp.h
index 6c2c5efd52..3687748e8b 100644
--- a/libavformat/rtsp.h
+++ b/libavformat/rtsp.h
@@ -55,7 +55,7 @@ typedef struct RTSPHeader {
enum RTSPStatusCode status_code; /* response code from server */
int nb_transports;
/* in AV_TIME_BASE unit, AV_NOPTS_VALUE if not used */
- int64_t range_start, range_end;
+ int64_t range_start, range_end;
RTSPTransportField transports[RTSP_MAX_TRANSPORTS];
int seq; /* sequence number */
char session_id[512];
@@ -74,7 +74,7 @@ typedef struct RTSPActionServerSetup {
char transport_option[512];
} RTSPActionServerSetup;
-typedef int FFRTSPCallback(enum RTSPCallbackAction action,
+typedef int FFRTSPCallback(enum RTSPCallbackAction action,
const char *session_id,
char *buf, int buf_size,
void *arg);
diff --git a/libavformat/segafilm.c b/libavformat/segafilm.c
index 7727462e21..562f5522a8 100644
--- a/libavformat/segafilm.c
+++ b/libavformat/segafilm.c
@@ -160,7 +160,7 @@ static int film_read_header(AVFormatContext *s,
st->codec->sample_rate = film->audio_samplerate;
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
st->codec->bits_per_sample;
- st->codec->block_align = st->codec->channels *
+ st->codec->block_align = st->codec->channels *
st->codec->bits_per_sample / 8;
}
@@ -174,10 +174,10 @@ static int film_read_header(AVFormatContext *s,
if(film->sample_count >= UINT_MAX / sizeof(film_sample_t))
return -1;
film->sample_table = av_malloc(film->sample_count * sizeof(film_sample_t));
-
+
for(i=0; i<s->nb_streams; i++)
av_set_pts_info(s->streams[i], 33, 1, film->base_clock);
-
+
audio_frame_counter = 0;
for (i = 0; i < film->sample_count; i++) {
/* load the next sample record and transfer it to an internal struct */
@@ -185,7 +185,7 @@ static int film_read_header(AVFormatContext *s,
av_free(film->sample_table);
return AVERROR_IO;
}
- film->sample_table[i].sample_offset =
+ film->sample_table[i].sample_offset =
data_offset + BE_32(&scratch[0]);
film->sample_table[i].sample_size = BE_32(&scratch[4]);
if (BE_32(&scratch[8]) == 0xFFFFFFFF) {
@@ -227,7 +227,7 @@ static int film_read_packet(AVFormatContext *s,
url_fseek(pb, sample->sample_offset, SEEK_SET);
/* do a special song and dance when loading FILM Cinepak chunks */
- if ((sample->stream == film->video_stream_index) &&
+ if ((sample->stream == film->video_stream_index) &&
(film->video_type == CODEC_ID_CINEPAK)) {
if (av_new_packet(pkt, sample->sample_size - film->cvid_extra_bytes))
return AVERROR_NOMEM;
@@ -237,7 +237,7 @@ static int film_read_packet(AVFormatContext *s,
ret = get_buffer(pb, pkt->data, 10);
/* skip the non-spec CVID bytes */
url_fseek(pb, film->cvid_extra_bytes, SEEK_CUR);
- ret += get_buffer(pb, pkt->data + 10,
+ ret += get_buffer(pb, pkt->data + 10,
sample->sample_size - 10 - film->cvid_extra_bytes);
if (ret != sample->sample_size - film->cvid_extra_bytes)
ret = AVERROR_IO;
diff --git a/libavformat/sgi.c b/libavformat/sgi.c
index a6de0fffbd..9c8f07f191 100644
--- a/libavformat/sgi.c
+++ b/libavformat/sgi.c
@@ -65,8 +65,8 @@ static void read_sgi_header(ByteIOContext *f, SGIInfo *info)
info->xsize = (unsigned short) get_be16(f);
info->ysize = (unsigned short) get_be16(f);
info->zsize = (unsigned short) get_be16(f);
-
- if(info->zsize > 4096)
+
+ if(info->zsize > 4096)
info->zsize= 0;
#ifdef DEBUG
@@ -85,13 +85,13 @@ static void read_sgi_header(ByteIOContext *f, SGIInfo *info)
/* read an uncompressed sgi image */
-static int read_uncompressed_sgi(const SGIInfo *si,
+static int read_uncompressed_sgi(const SGIInfo *si,
AVPicture *pict, ByteIOContext *f)
{
int x, y, z, chan_offset, ret = 0;
uint8_t *dest_row;
- /* skip header */
+ /* skip header */
url_fseek(f, SGI_HEADER_SIZE, SEEK_SET);
pict->linesize[0] = si->xsize;
@@ -100,17 +100,17 @@ static int read_uncompressed_sgi(const SGIInfo *si,
#ifndef WORDS_BIGENDIAN
/* rgba -> bgra for rgba32 on little endian cpus */
- if (si->zsize == 4 && z != 3)
+ if (si->zsize == 4 && z != 3)
chan_offset = 2 - z;
else
#endif
chan_offset = z;
-
+
for (y = si->ysize - 1; y >= 0; y--) {
dest_row = pict->data[0] + (y * si->xsize * si->zsize);
for (x = 0; x < si->xsize; x++) {
- dest_row[chan_offset] = get_byte(f);
+ dest_row[chan_offset] = get_byte(f);
dest_row += si->zsize;
}
}
@@ -126,14 +126,14 @@ static int expand_rle_row(ByteIOContext *f, unsigned char *optr,
{
unsigned char pixel, count;
int length = 0;
-
+
#ifndef WORDS_BIGENDIAN
/* rgba -> bgra for rgba32 on little endian cpus */
if (pixelstride == 4 && chan_offset != 3) {
chan_offset = 2 - chan_offset;
}
#endif
-
+
optr += chan_offset;
while (1) {
@@ -162,12 +162,12 @@ static int expand_rle_row(ByteIOContext *f, unsigned char *optr,
/* read a run length encoded sgi image */
-static int read_rle_sgi(const SGIInfo *sgi_info,
+static int read_rle_sgi(const SGIInfo *sgi_info,
AVPicture *pict, ByteIOContext *f)
{
uint8_t *dest_row;
unsigned long *start_table;
- int y, z, xsize, ysize, zsize, tablen;
+ int y, z, xsize, ysize, zsize, tablen;
long start_offset;
int ret = 0;
@@ -175,7 +175,7 @@ static int read_rle_sgi(const SGIInfo *sgi_info,
ysize = sgi_info->ysize;
zsize = sgi_info->zsize;
- /* skip header */
+ /* skip header */
url_fseek(f, SGI_HEADER_SIZE, SEEK_SET);
/* size of rle offset and length tables */
@@ -188,7 +188,7 @@ static int read_rle_sgi(const SGIInfo *sgi_info,
goto fail;
}
- /* skip run length table */
+ /* skip run length table */
url_fseek(f, tablen, SEEK_CUR);
for (z = 0; z < zsize; z++) {
@@ -216,7 +216,7 @@ fail:
}
-static int sgi_read(ByteIOContext *f,
+static int sgi_read(ByteIOContext *f,
int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
{
SGIInfo sgi_info, *s = &sgi_info;
@@ -267,23 +267,23 @@ static void write_sgi_header(ByteIOContext *f, const SGIInfo *info)
put_be16(f, SGI_MAGIC);
put_byte(f, info->rle);
- put_byte(f, info->bytes_per_channel);
+ put_byte(f, info->bytes_per_channel);
put_be16(f, info->dimension);
put_be16(f, info->xsize);
put_be16(f, info->ysize);
put_be16(f, info->zsize);
/* The rest are constant in this implementation */
- put_be32(f, 0L); /* pixmin */
- put_be32(f, 255L); /* pixmax */
- put_be32(f, 0L); /* dummy */
+ put_be32(f, 0L); /* pixmin */
+ put_be32(f, 255L); /* pixmax */
+ put_be32(f, 0L); /* dummy */
/* name */
for (i = 0; i < 80; i++) {
put_byte(f, 0);
}
- put_be32(f, 0L); /* colormap */
+ put_be32(f, 0L); /* colormap */
/* The rest of the 512 byte header is unused. */
for (i = 0; i < 404; i++) {
@@ -302,7 +302,7 @@ static int rle_row(ByteIOContext *f, char *row, int stride, int rowsize)
row += (2 * stride);
x -= 2;
- while (x > 0 && (row[-2 * stride] != row[-1 * stride] ||
+ while (x > 0 && (row[-2 * stride] != row[-1 * stride] ||
row[-1 * stride] != row[0])) {
row += stride;
x--;
@@ -316,7 +316,7 @@ static int rle_row(ByteIOContext *f, char *row, int stride, int rowsize)
i = count > 126 ? 126 : count;
count -= i;
- put_byte(f, 0x80 | i);
+ put_byte(f, 0x80 | i);
length++;
while (i > 0) {
@@ -350,14 +350,14 @@ static int rle_row(ByteIOContext *f, char *row, int stride, int rowsize)
put_byte(f, i);
length++;
- put_byte(f, repeat);
+ put_byte(f, repeat);
length++;
};
};
length++;
- put_byte(f, 0);
+ put_byte(f, 0);
return (length);
}
@@ -374,7 +374,7 @@ static int sgi_write(ByteIOContext *pb, AVImageInfo *info)
si->ysize = info->height;
si->rle = 1;
si->bytes_per_channel = 1;
-
+
switch(info->pix_fmt) {
case PIX_FMT_GRAY8:
si->dimension = SGI_SINGLE_CHAN;
@@ -392,14 +392,14 @@ static int sgi_write(ByteIOContext *pb, AVImageInfo *info)
return AVERROR_INVALIDDATA;
}
- write_sgi_header(pb, si);
+ write_sgi_header(pb, si);
tablesize = si->zsize * si->ysize * sizeof(long);
-
+
/* skip rle offset and length tables, write them at the end. */
url_fseek(pb, tablesize * 2, SEEK_CUR);
put_flush_packet(pb);
-
+
lengthtab = av_malloc(tablesize);
offsettab = av_malloc(tablesize);
@@ -407,36 +407,36 @@ static int sgi_write(ByteIOContext *pb, AVImageInfo *info)
#ifndef WORDS_BIGENDIAN
/* rgba -> bgra for rgba32 on little endian cpus */
- if (si->zsize == 4 && z != 3)
+ if (si->zsize == 4 && z != 3)
chan_offset = 2 - z;
else
#endif
chan_offset = z;
-
+
srcrow = info->pict.data[0] + chan_offset;
-
+
for (y = si->ysize -1; y >= 0; y--) {
offsettab[(z * si->ysize) + y] = url_ftell(pb);
lengthtab[(z * si->ysize) + y] = rle_row(pb, srcrow,
si->zsize, si->xsize);
- srcrow += info->pict.linesize[0];
+ srcrow += info->pict.linesize[0];
}
}
url_fseek(pb, 512, SEEK_SET);
-
+
/* write offset table */
for (i = 0; i < (si->ysize * si->zsize); i++) {
put_be32(pb, offsettab[i]);
}
-
+
/* write length table */
for (i = 0; i < (si->ysize * si->zsize); i++) {
put_be32(pb, lengthtab[i]);
}
put_flush_packet(pb);
-
+
av_free(lengthtab);
av_free(offsettab);
@@ -449,7 +449,7 @@ AVImageFormat sgi_image_format = {
"sgi,rgb,rgba,bw",
sgi_probe,
sgi_read,
- (1 << PIX_FMT_GRAY8) | (1 << PIX_FMT_RGB24) | (1 << PIX_FMT_RGBA32),
+ (1 << PIX_FMT_GRAY8) | (1 << PIX_FMT_RGB24) | (1 << PIX_FMT_RGBA32),
#ifdef CONFIG_MUXERS
sgi_write,
#else
diff --git a/libavformat/sierravmd.c b/libavformat/sierravmd.c
index 4037614b8a..8332758242 100644
--- a/libavformat/sierravmd.c
+++ b/libavformat/sierravmd.c
@@ -153,7 +153,7 @@ static int vmd_read_header(AVFormatContext *s,
st->codec->codec_tag = 0; /* no codec tag */
st->codec->channels = (vmd->vmd_header[811] & 0x80) ? 2 : 1;
st->codec->sample_rate = vmd->sample_rate;
- st->codec->block_align = vmd->audio_block_align =
+ st->codec->block_align = vmd->audio_block_align =
LE_16(&vmd->vmd_header[806]);
if (st->codec->block_align & 0x8000) {
st->codec->bits_per_sample = 16;
@@ -161,11 +161,11 @@ static int vmd_read_header(AVFormatContext *s,
} else
st->codec->bits_per_sample = 16;
// st->codec->bits_per_sample = 8;
- st->codec->bit_rate = st->codec->sample_rate *
+ st->codec->bit_rate = st->codec->sample_rate *
st->codec->bits_per_sample * st->codec->channels;
/* for calculating pts */
- vmd->audio_frame_divisor = st->codec->bits_per_sample / 8 /
+ vmd->audio_frame_divisor = st->codec->bits_per_sample / 8 /
st->codec->channels;
video_pts_inc = 90000;
@@ -176,8 +176,8 @@ static int vmd_read_header(AVFormatContext *s,
video_pts_inc = 90000 / 10;
}
- /* skip over the offset table and load the table of contents; don't
- * care about the offset table since demuxer will calculate those
+ /* skip over the offset table and load the table of contents; don't
+ * care about the offset table since demuxer will calculate those
* independently */
toc_offset = LE_32(&vmd->vmd_header[812]);
vmd->frame_count = LE_16(&vmd->vmd_header[6]);
@@ -197,7 +197,7 @@ static int vmd_read_header(AVFormatContext *s,
av_free(vmd->frame_table);
return AVERROR_NOMEM;
}
- if (get_buffer(pb, raw_frame_table, raw_frame_table_size) !=
+ if (get_buffer(pb, raw_frame_table, raw_frame_table_size) !=
raw_frame_table_size) {
av_free(raw_frame_table);
av_free(vmd->frame_table);
@@ -213,7 +213,7 @@ static int vmd_read_header(AVFormatContext *s,
/* if the frame size is 0, do not count the frame and bring the
* total frame count down */
// note, we limit the size to 1Gb to ensure that we dont end up overflowing the size integer used to allocate the memory
- vmd->frame_table[i].frame_size = LE_32(&current_frame_record[2]) & 0x3FFFFFFF;
+ vmd->frame_table[i].frame_size = LE_32(&current_frame_record[2]) & 0x3FFFFFFF;
/* this logic is present so that 0-length audio chunks are not
* accounted */
@@ -271,7 +271,7 @@ static int vmd_read_packet(AVFormatContext *s,
return AVERROR_NOMEM;
pkt->pos= url_ftell(pb);
memcpy(pkt->data, frame->frame_record, BYTES_PER_FRAME_RECORD);
- ret = get_buffer(pb, pkt->data + BYTES_PER_FRAME_RECORD,
+ ret = get_buffer(pb, pkt->data + BYTES_PER_FRAME_RECORD,
frame->frame_size);
if (ret != frame->frame_size) {
diff --git a/libavformat/sol.c b/libavformat/sol.c
index 70dab2ac12..4f6a8934c0 100644
--- a/libavformat/sol.c
+++ b/libavformat/sol.c
@@ -1,4 +1,4 @@
-/*
+/*
* Sierra SOL decoder
* Copyright Konstantin Shishkov.
*
@@ -17,7 +17,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-/*
+/*
* Based on documents from Game Audio Player and own research
*/
@@ -81,7 +81,7 @@ static int sol_channels(int magic, int type)
if (magic == 0x0B8D || !(type & SOL_STEREO)) return 1;
return 2;
}
-
+
static int sol_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
@@ -101,14 +101,14 @@ static int sol_read_header(AVFormatContext *s,
size = get_le32(pb);
if (magic != 0x0B8D)
get_byte(pb); /* newer SOLs contain padding byte */
-
+
codec = sol_codec_id(magic, type);
channels = sol_channels(magic, type);
-
+
if (codec == CODEC_ID_SOL_DPCM)
id = sol_codec_type(magic, type);
else id = 0;
-
+
/* now we are ready: build format streams */
st = av_new_stream(s, 0);
if (!st)
@@ -117,7 +117,7 @@ static int sol_read_header(AVFormatContext *s,
st->codec->codec_tag = id;
st->codec->codec_id = codec;
st->codec->channels = channels;
- st->codec->sample_rate = rate;
+ st->codec->sample_rate = rate;
av_set_pts_info(st, 64, 1, rate);
return 0;
}
diff --git a/libavformat/swf.c b/libavformat/swf.c
index 258bb2fdb7..e85ae2f31d 100644
--- a/libavformat/swf.c
+++ b/libavformat/swf.c
@@ -61,7 +61,7 @@ typedef struct {
offset_t duration_pos;
offset_t tag_pos;
-
+
int samples_per_frame;
int sound_samples;
int video_samples;
@@ -120,7 +120,7 @@ static int swf_mp3_info(void *data, int *byteSize, int *samplesPerFrame, int *sa
int bitRate = 0;
int bitsPerSlot = sBitsPerSlot[layerID];
int isPadded = ((header >> 9) & 0x01);
-
+
if ( (( header >> 21 ) & 0x7ff) != 0x7ff ) {
return 0;
}
@@ -203,7 +203,7 @@ static inline void max_nbits(int *nbits_ptr, int val)
*nbits_ptr = n;
}
-static void put_swf_rect(ByteIOContext *pb,
+static void put_swf_rect(ByteIOContext *pb,
int xmin, int xmax, int ymin, int ymax)
{
PutBitContext p;
@@ -211,7 +211,7 @@ static void put_swf_rect(ByteIOContext *pb,
int nbits, mask;
init_put_bits(&p, buf, sizeof(buf));
-
+
nbits = 0;
max_nbits(&nbits, xmin);
max_nbits(&nbits, xmax);
@@ -225,7 +225,7 @@ static void put_swf_rect(ByteIOContext *pb,
put_bits(&p, nbits, xmax & mask);
put_bits(&p, nbits, ymin & mask);
put_bits(&p, nbits, ymax & mask);
-
+
flush_put_bits(&p);
put_buffer(pb, buf, pbBufPtr(&p) - p.buf);
}
@@ -243,15 +243,15 @@ static void put_swf_line_edge(PutBitContext *pb, int dx, int dy)
mask = (1 << nbits) - 1;
put_bits(pb, 4, nbits - 2); /* 16 bits precision */
if (dx == 0) {
- put_bits(pb, 1, 0);
- put_bits(pb, 1, 1);
+ put_bits(pb, 1, 0);
+ put_bits(pb, 1, 1);
put_bits(pb, nbits, dy & mask);
} else if (dy == 0) {
- put_bits(pb, 1, 0);
- put_bits(pb, 1, 0);
+ put_bits(pb, 1, 0);
+ put_bits(pb, 1, 0);
put_bits(pb, nbits, dx & mask);
} else {
- put_bits(pb, 1, 1);
+ put_bits(pb, 1, 1);
put_bits(pb, nbits, dx & mask);
put_bits(pb, nbits, dy & mask);
}
@@ -268,7 +268,7 @@ static void put_swf_matrix(ByteIOContext *pb,
int nbits;
init_put_bits(&p, buf, sizeof(buf));
-
+
put_bits(&p, 1, 1); /* a, d present */
nbits = 1;
max_nbits(&nbits, a);
@@ -276,7 +276,7 @@ static void put_swf_matrix(ByteIOContext *pb,
put_bits(&p, 5, nbits); /* nb bits */
put_bits(&p, nbits, a);
put_bits(&p, nbits, d);
-
+
put_bits(&p, 1, 1); /* b, c present */
nbits = 1;
max_nbits(&nbits, c);
@@ -366,14 +366,14 @@ static int swf_write_header(AVFormatContext *s)
} else {
put_byte(pb, 4); /* version (should use 4 for mpeg audio support) */
}
- put_le32(pb, DUMMY_FILE_SIZE); /* dummy size
- (will be patched if not streamed) */
+ put_le32(pb, DUMMY_FILE_SIZE); /* dummy size
+ (will be patched if not streamed) */
put_swf_rect(pb, 0, width * 20, 0, height * 20);
put_le16(pb, (rate * 256) / rate_base); /* frame rate */
swf->duration_pos = url_ftell(pb);
put_le16(pb, (uint16_t)(DUMMY_DURATION * (int64_t)rate / rate_base)); /* frame count */
-
+
/* define a shape with the jpeg inside */
if ( video_enc && video_enc->codec_id == CODEC_ID_FLV1 ) {
} else if ( video_enc && video_enc->codec_id == CODEC_ID_MJPEG ) {
@@ -387,28 +387,28 @@ static int swf_write_header(AVFormatContext *s)
put_byte(pb, 0x41); /* clipped bitmap fill */
put_le16(pb, BITMAP_ID); /* bitmap ID */
/* position of the bitmap */
- put_swf_matrix(pb, (int)(1.0 * (1 << FRAC_BITS)), 0,
+ put_swf_matrix(pb, (int)(1.0 * (1 << FRAC_BITS)), 0,
0, (int)(1.0 * (1 << FRAC_BITS)), 0, 0);
put_byte(pb, 0); /* no line style */
-
+
/* shape drawing */
init_put_bits(&p, buf1, sizeof(buf1));
put_bits(&p, 4, 1); /* one fill bit */
put_bits(&p, 4, 0); /* zero line bit */
-
+
put_bits(&p, 1, 0); /* not an edge */
put_bits(&p, 5, FLAG_MOVETO | FLAG_SETFILL0);
put_bits(&p, 5, 1); /* nbits */
put_bits(&p, 1, 0); /* X */
put_bits(&p, 1, 0); /* Y */
put_bits(&p, 1, 1); /* set fill style 1 */
-
+
/* draw the rectangle ! */
put_swf_line_edge(&p, width, 0);
put_swf_line_edge(&p, 0, height);
put_swf_line_edge(&p, -width, 0);
put_swf_line_edge(&p, 0, -height);
-
+
/* end of shape */
put_bits(&p, 1, 0); /* not an edge */
put_bits(&p, 5, 0);
@@ -418,7 +418,7 @@ static int swf_write_header(AVFormatContext *s)
put_swf_end_tag(s);
}
-
+
if (audio_enc && audio_enc->codec_id == CODEC_ID_MP3 ) {
int v;
@@ -450,7 +450,7 @@ static int swf_write_header(AVFormatContext *s)
put_byte(&s->pb, v);
put_le16(&s->pb, swf->samples_per_frame); /* avg samples per frame */
put_le16(&s->pb, 0);
-
+
put_swf_end_tag(s);
}
@@ -458,7 +458,7 @@ static int swf_write_header(AVFormatContext *s)
return 0;
}
-static int swf_write_video(AVFormatContext *s,
+static int swf_write_video(AVFormatContext *s,
AVCodecContext *enc, const uint8_t *buf, int size)
{
SWFContext *swf = s->priv_data;
@@ -466,7 +466,7 @@ static int swf_write_video(AVFormatContext *s,
int c = 0;
int outSize = 0;
int outSamples = 0;
-
+
/* Flash Player limit */
if ( swf->swf_frame_number == 16000 ) {
av_log(enc, AV_LOG_INFO, "warning: Flash Player limit of 16000 frames reached\n");
@@ -480,13 +480,13 @@ retry_swf_audio_packet:
int mp3SampleRate = 0;
int mp3IsMono = 0;
int mp3SamplesPerFrame = 0;
-
+
/* copy out mp3 header from ring buffer */
uint8_t header[4];
for (c=0; c<4; c++) {
header[c] = swf->audio_fifo[(swf->audio_in_pos+outSize+c) % AUDIO_FIFO_SIZE];
}
-
+
if ( swf_mp3_info(header,&mp3FrameSize,&mp3SamplesPerFrame,&mp3SampleRate,&mp3IsMono) ) {
if ( ( swf->audio_size-outSize ) >= mp3FrameSize ) {
outSize += mp3FrameSize;
@@ -497,7 +497,7 @@ retry_swf_audio_packet:
}
} else {
/* invalid mp3 data, skip forward
- we need to do this since the Flash Player
+ we need to do this since the Flash Player
does not like custom headers */
swf->audio_in_pos ++;
swf->audio_size --;
@@ -505,7 +505,7 @@ retry_swf_audio_packet:
goto retry_swf_audio_packet;
}
}
-
+
/* audio stream is behind video stream, bail */
if ( ( swf->sound_samples + outSamples + swf->samples_per_frame ) < swf->video_samples ) {
return 0;
@@ -523,7 +523,7 @@ retry_swf_audio_packet:
put_byte(pb, 0);
put_byte(pb, SWF_VIDEO_CODEC_FLV1);
put_swf_end_tag(s);
-
+
/* place the video object for the first time */
put_swf_tag(s, TAG_PLACEOBJECT2);
put_byte(pb, 0x36);
@@ -546,10 +546,10 @@ retry_swf_audio_packet:
put_le16(pb, swf->video_frame_number );
put_swf_end_tag(s);
}
-
+
/* set video frame data */
put_swf_tag(s, TAG_VIDEOFRAME | TAG_LONG);
- put_le16(pb, VIDEO_ID);
+ put_le16(pb, VIDEO_ID);
put_le16(pb, swf->video_frame_number++ );
put_buffer(pb, buf, size);
put_swf_end_tag(s);
@@ -560,29 +560,29 @@ retry_swf_audio_packet:
put_le16(pb, SHAPE_ID); /* shape ID */
put_le16(pb, 1); /* depth */
put_swf_end_tag(s);
-
+
/* free the bitmap */
put_swf_tag(s, TAG_FREECHARACTER);
put_le16(pb, BITMAP_ID);
put_swf_end_tag(s);
}
-
+
put_swf_tag(s, TAG_JPEG2 | TAG_LONG);
-
+
put_le16(pb, BITMAP_ID); /* ID of the image */
-
+
/* a dummy jpeg header seems to be required */
- put_byte(pb, 0xff);
+ put_byte(pb, 0xff);
put_byte(pb, 0xd8);
put_byte(pb, 0xff);
put_byte(pb, 0xd9);
/* write the jpeg image */
put_buffer(pb, buf, size);
-
+
put_swf_end_tag(s);
-
+
/* draw the shape */
-
+
put_swf_tag(s, TAG_PLACEOBJECT);
put_le16(pb, SHAPE_ID); /* shape ID */
put_le16(pb, 1); /* depth */
@@ -591,7 +591,7 @@ retry_swf_audio_packet:
} else {
/* invalid codec */
}
-
+
swf->swf_frame_number ++;
swf->video_samples += swf->samples_per_frame;
@@ -605,7 +605,7 @@ retry_swf_audio_packet:
put_byte(pb,swf->audio_fifo[(swf->audio_in_pos+c) % AUDIO_FIFO_SIZE]);
}
put_swf_end_tag(s);
-
+
/* update FIFO */
swf->sound_samples += outSamples;
swf->audio_in_pos += outSize;
@@ -616,13 +616,13 @@ retry_swf_audio_packet:
/* output the frame */
put_swf_tag(s, TAG_SHOWFRAME);
put_swf_end_tag(s);
-
+
put_flush_packet(&s->pb);
-
+
return 0;
}
-static int swf_write_audio(AVFormatContext *s,
+static int swf_write_audio(AVFormatContext *s,
AVCodecContext *enc, const uint8_t *buf, int size)
{
SWFContext *swf = s->priv_data;
@@ -675,7 +675,7 @@ static int swf_write_trailer(AVFormatContext *s)
put_swf_tag(s, TAG_END);
put_swf_end_tag(s);
-
+
put_flush_packet(&s->pb);
/* patch file size and number of frames if not streamed */
@@ -686,7 +686,7 @@ static int swf_write_trailer(AVFormatContext *s)
url_fseek(pb, swf->duration_pos, SEEK_SET);
put_le16(pb, video_enc->frame_number);
}
-
+
av_free(swf->audio_fifo);
return 0;
@@ -697,13 +697,13 @@ static int swf_write_trailer(AVFormatContext *s)
/* Extract FLV encoded frame and MP3 from swf
Note that the detection of the real frame
is inaccurate at this point as it can be
- quite tricky to determine, you almost certainly
+ quite tricky to determine, you almost certainly
will get a bad audio/video sync */
static int get_swf_tag(ByteIOContext *pb, int *len_ptr)
{
int tag, len;
-
+
if (url_feof(pb))
return -1;
@@ -761,9 +761,9 @@ static int swf_read_header(AVFormatContext *s, AVFormatParameters *ap)
url_fskip(pb, len);
frame_rate = get_le16(pb);
get_le16(pb); /* frame count */
-
- /* The Flash Player converts 8.8 frame rates
- to milliseconds internally. Do the same to get
+
+ /* The Flash Player converts 8.8 frame rates
+ to milliseconds internally. Do the same to get
a correct framerate */
swf->ms_per_frame = ( 1000 * 256 ) / frame_rate;
swf->samples_per_frame = 0;
@@ -793,7 +793,7 @@ static int swf_read_header(AVFormatContext *s, AVFormatParameters *ap)
if ( get_byte(pb) == SWF_VIDEO_CODEC_FLV1 ) {
vst = av_new_stream(s, 0);
av_set_pts_info(vst, 24, 1, 1000); /* 24 bit pts in ms */
-
+
vst->codec->codec_type = CODEC_TYPE_VIDEO;
vst->codec->codec_id = CODEC_ID_FLV1;
if ( swf->samples_per_frame ) {
@@ -845,7 +845,7 @@ static int swf_read_header(AVFormatContext *s, AVFormatParameters *ap)
}
}
url_fseek(pb, firstTagOff, SEEK_SET);
-
+
return 0;
}
@@ -855,10 +855,10 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
ByteIOContext *pb = &s->pb;
AVStream *st = 0;
int tag, len, i, frame;
-
+
for(;;) {
tag = get_swf_tag(pb, &len);
- if (tag < 0)
+ if (tag < 0)
return AVERROR_IO;
if (tag == TAG_VIDEOFRAME) {
for( i=0; i<s->nb_streams; i++ ) {
@@ -875,7 +875,7 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
continue;
}
}
- }
+ }
url_fskip(pb, len);
} else if (tag == TAG_STREAMBLOCK) {
for( i=0; i<s->nb_streams; i++ ) {
diff --git a/libavformat/tcp.c b/libavformat/tcp.c
index 86a3cfb6c4..3928200a88 100644
--- a/libavformat/tcp.c
+++ b/libavformat/tcp.c
@@ -68,15 +68,15 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
&port, path, sizeof(path), uri); // PETR: use url_split
if (strcmp(proto,"tcp")) goto fail; // PETR: check protocol
if ((q = strchr(hostname,'@'))) { strcpy(tmp,q+1); strcpy(hostname,tmp); } // PETR: take only the part after '@' for tcp protocol
-
+
s = av_malloc(sizeof(TCPContext));
if (!s)
return -ENOMEM;
h->priv_data = s;
-
+
if (port <= 0 || port >= 65536)
goto fail;
-
+
dest_addr.sin_family = AF_INET;
dest_addr.sin_port = htons(port);
if (resolve_host(&dest_addr.sin_addr, hostname) < 0)
@@ -86,9 +86,9 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
if (fd < 0)
goto fail;
fcntl(fd, F_SETFL, O_NONBLOCK);
-
+
redo:
- ret = connect(fd, (struct sockaddr *)&dest_addr,
+ ret = connect(fd, (struct sockaddr *)&dest_addr,
sizeof(dest_addr));
if (ret < 0) {
if (errno == EINTR)
@@ -111,7 +111,7 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
if (ret > 0 && FD_ISSET(fd, &wfds))
break;
}
-
+
/* test error */
optlen = sizeof(ret);
getsockopt (fd, SOL_SOCKET, SO_ERROR, &ret, &optlen);
diff --git a/libavformat/udp.c b/libavformat/udp.c
index ff2885b19b..8e8db698e0 100644
--- a/libavformat/udp.c
+++ b/libavformat/udp.c
@@ -53,9 +53,9 @@ typedef struct {
int udp_ipv6_is_multicast_address(const struct sockaddr *addr) {
if (addr->sa_family == AF_INET)
- return IN_MULTICAST(ntohl(((struct sockaddr_in *)addr)->sin_addr.s_addr));
+ return IN_MULTICAST(ntohl(((struct sockaddr_in *)addr)->sin_addr.s_addr));
if (addr->sa_family == AF_INET6)
- return IN6_IS_ADDR_MULTICAST(&((struct sockaddr_in6 *)addr)->sin6_addr);
+ return IN6_IS_ADDR_MULTICAST(&((struct sockaddr_in6 *)addr)->sin6_addr);
return -1;
}
@@ -77,7 +77,7 @@ int udp_ipv6_set_multicast_ttl(int sockfd, int mcastTTL, struct sockaddr *addr)
int udp_ipv6_join_multicast_group(int sockfd, struct sockaddr *addr) {
struct ip_mreq mreq;
- struct ipv6_mreq mreq6;
+ struct ipv6_mreq mreq6;
if (addr->sa_family == AF_INET) {
mreq.imr_multiaddr.s_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
mreq.imr_interface.s_addr= INADDR_ANY;
@@ -99,7 +99,7 @@ int udp_ipv6_join_multicast_group(int sockfd, struct sockaddr *addr) {
int udp_ipv6_leave_multicast_group(int sockfd, struct sockaddr *addr) {
struct ip_mreq mreq;
- struct ipv6_mreq mreq6;
+ struct ipv6_mreq mreq6;
if (addr->sa_family == AF_INET) {
mreq.imr_multiaddr.s_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
mreq.imr_interface.s_addr= INADDR_ANY;
@@ -136,7 +136,7 @@ struct addrinfo* udp_ipv6_resolve_host(const char *hostname, int port, int type,
memset(&hints, 0, sizeof(hints));
hints.ai_socktype = type;
hints.ai_family = family;
- hints.ai_flags = flags;
+ hints.ai_flags = flags;
if ((error = getaddrinfo(node, service, &hints, &res))) {
av_log(NULL, AV_LOG_ERROR, "udp_ipv6_resolve_host: %s\n", gai_strerror(error));
}
@@ -166,25 +166,25 @@ int udp_ipv6_set_local(URLContext *h) {
char sbuf[NI_MAXSERV];
char hbuf[NI_MAXHOST];
struct addrinfo *res0 = NULL, *res = NULL;
-
- if (s->local_port != 0) {
+
+ if (s->local_port != 0) {
res0 = udp_ipv6_resolve_host(0, s->local_port, SOCK_DGRAM, AF_UNSPEC, AI_PASSIVE);
if (res0 == 0)
goto fail;
- for (res = res0; res; res=res->ai_next) {
+ for (res = res0; res; res=res->ai_next) {
udp_fd = socket(res->ai_family, SOCK_DGRAM, 0);
if (udp_fd > 0) break;
perror("socket");
}
} else {
udp_fd = socket(s->dest_addr.ss_family, SOCK_DGRAM, 0);
- if (udp_fd < 0)
+ if (udp_fd < 0)
perror("socket");
}
if (udp_fd < 0)
goto fail;
-
+
if (s->local_port != 0) {
if (bind(udp_fd, res0->ai_addr, res0->ai_addrlen) < 0) {
perror("bind");
@@ -192,7 +192,7 @@ int udp_ipv6_set_local(URLContext *h) {
}
freeaddrinfo(res0);
res0 = NULL;
- }
+ }
addrlen = sizeof(clientaddr);
if (getsockname(udp_fd, (struct sockaddr *)&clientaddr, &addrlen) < 0) {
@@ -206,9 +206,9 @@ int udp_ipv6_set_local(URLContext *h) {
}
s->local_port = strtol(sbuf, NULL, 10);
-
+
return udp_fd;
-
+
fail:
if (udp_fd >= 0)
#ifdef CONFIG_BEOS_NETSERVER
@@ -230,7 +230,7 @@ int udp_ipv6_set_local(URLContext *h) {
* the remote server address.
*
* url syntax: udp://host:port[?option=val...]
- * option: 'multicast=1' : enable multicast
+ * option: 'multicast=1' : enable multicast
* 'ttl=n' : set the ttl value (for multicast only)
* 'localport=n' : set the local port
* 'pkt_size=n' : set max packet size
@@ -247,7 +247,7 @@ int udp_set_remote_url(URLContext *h, const char *uri)
UDPContext *s = h->priv_data;
char hostname[256];
int port;
-
+
url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port, NULL, 0, uri);
/* set the destination address */
@@ -300,7 +300,7 @@ static int udp_open(URLContext *h, const char *uri, int flags)
h->max_packet_size = 1472;
is_output = (flags & URL_WRONLY);
-
+
s = av_malloc(sizeof(UDPContext));
if (!s)
return -ENOMEM;
@@ -325,7 +325,7 @@ static int udp_open(URLContext *h, const char *uri, int flags)
/* fill the dest addr */
url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port, NULL, 0, uri);
-
+
/* XXX: fix url_split */
if (hostname[0] == '\0' || hostname[0] == '?') {
/* only accepts null hostname if input */
@@ -350,7 +350,7 @@ static int udp_open(URLContext *h, const char *uri, int flags)
}
/* the bind is needed to give a port to the socket now */
- if (bind(udp_fd,(struct sockaddr *)&my_addr, sizeof(my_addr)) < 0)
+ if (bind(udp_fd,(struct sockaddr *)&my_addr, sizeof(my_addr)) < 0)
goto fail;
len = sizeof(my_addr1);
@@ -361,7 +361,7 @@ static int udp_open(URLContext *h, const char *uri, int flags)
if (s->is_multicast) {
if (h->flags & URL_WRONLY) {
/* output */
- if (setsockopt(udp_fd, IPPROTO_IP, IP_MULTICAST_TTL,
+ if (setsockopt(udp_fd, IPPROTO_IP, IP_MULTICAST_TTL,
&s->ttl, sizeof(s->ttl)) < 0) {
perror("IP_MULTICAST_TTL");
goto fail;
@@ -371,7 +371,7 @@ static int udp_open(URLContext *h, const char *uri, int flags)
memset(&s->mreq, 0, sizeof(s->mreq));
s->mreq.imr_multiaddr = s->dest_addr.sin_addr;
s->mreq.imr_interface.s_addr = htonl (INADDR_ANY);
- if (setsockopt(udp_fd, IPPROTO_IP, IP_ADD_MEMBERSHIP,
+ if (setsockopt(udp_fd, IPPROTO_IP, IP_ADD_MEMBERSHIP,
&s->mreq, sizeof(s->mreq)) < 0) {
perror("rtp: IP_ADD_MEMBERSHIP");
goto fail;
@@ -395,7 +395,7 @@ static int udp_open(URLContext *h, const char *uri, int flags)
goto fail;
}
}
-#endif
+#endif
#endif
if (is_output) {
@@ -450,7 +450,7 @@ static int udp_write(URLContext *h, uint8_t *buf, int size)
int ret;
for(;;) {
- ret = sendto (s->udp_fd, buf, size, 0,
+ ret = sendto (s->udp_fd, buf, size, 0,
(struct sockaddr *) &s->dest_addr,
#ifndef CONFIG_IPV6
sizeof (s->dest_addr));
@@ -474,7 +474,7 @@ static int udp_close(URLContext *h)
#ifndef CONFIG_BEOS_NETSERVER
#ifndef CONFIG_IPV6
if (s->is_multicast && !(h->flags & URL_WRONLY)) {
- if (setsockopt(s->udp_fd, IPPROTO_IP, IP_DROP_MEMBERSHIP,
+ if (setsockopt(s->udp_fd, IPPROTO_IP, IP_DROP_MEMBERSHIP,
&s->mreq, sizeof(s->mreq)) < 0) {
perror("IP_DROP_MEMBERSHIP");
}
diff --git a/libavformat/utils.c b/libavformat/utils.c
index 83887a5996..37995e1fc6 100644
--- a/libavformat/utils.c
+++ b/libavformat/utils.c
@@ -58,19 +58,19 @@ int match_ext(const char *filename, const char *extensions)
if(!filename)
return 0;
-
+
ext = strrchr(filename, '.');
if (ext) {
ext++;
p = extensions;
for(;;) {
q = ext1;
- while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
+ while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
*q++ = *p++;
*q = '\0';
- if (!strcasecmp(ext1, ext))
+ if (!strcasecmp(ext1, ext))
return 1;
- if (*p == '\0')
+ if (*p == '\0')
break;
p++;
}
@@ -78,19 +78,19 @@ int match_ext(const char *filename, const char *extensions)
return 0;
}
-AVOutputFormat *guess_format(const char *short_name, const char *filename,
+AVOutputFormat *guess_format(const char *short_name, const char *filename,
const char *mime_type)
{
AVOutputFormat *fmt, *fmt_found;
int score_max, score;
/* specific test for image sequences */
- if (!short_name && filename &&
+ if (!short_name && filename &&
filename_number_test(filename) >= 0 &&
av_guess_image2_codec(filename) != CODEC_ID_NONE) {
return guess_format("image2", NULL, NULL);
}
- if (!short_name && filename &&
+ if (!short_name && filename &&
filename_number_test(filename) >= 0 &&
guess_image_format(filename)) {
return guess_format("image", NULL, NULL);
@@ -106,7 +106,7 @@ AVOutputFormat *guess_format(const char *short_name, const char *filename,
score += 100;
if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
score += 10;
- if (filename && fmt->extensions &&
+ if (filename && fmt->extensions &&
match_ext(filename, fmt->extensions)) {
score += 5;
}
@@ -117,9 +117,9 @@ AVOutputFormat *guess_format(const char *short_name, const char *filename,
fmt = fmt->next;
}
return fmt_found;
-}
+}
-AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
+AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
const char *mime_type)
{
AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
@@ -141,7 +141,7 @@ AVOutputFormat *guess_stream_format(const char *short_name, const char *filename
/**
* Guesses the codec id based upon muxer and filename.
*/
-enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
+enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
const char *filename, const char *mime_type, enum CodecType type){
if(type == CODEC_TYPE_VIDEO){
enum CodecID codec_id= CODEC_ID_NONE;
@@ -193,14 +193,14 @@ int av_new_packet(AVPacket *pkt, int size)
{
void *data;
if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
- return AVERROR_NOMEM;
+ return AVERROR_NOMEM;
data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!data)
return AVERROR_NOMEM;
memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
av_init_packet(pkt);
- pkt->data = data;
+ pkt->data = data;
pkt->size = size;
pkt->destruct = av_destruct_packet;
return 0;
@@ -240,7 +240,7 @@ int av_dup_packet(AVPacket *pkt)
/* we duplicate the packet and don't forget to put the padding
again */
if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
- return AVERROR_NOMEM;
+ return AVERROR_NOMEM;
data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!data) {
return AVERROR_NOMEM;
@@ -273,7 +273,7 @@ void fifo_free(FifoBuffer *f)
int fifo_size(FifoBuffer *f, uint8_t *rptr)
{
int size;
-
+
if(!rptr)
rptr= f->rptr;
@@ -302,7 +302,7 @@ int fifo_read(FifoBuffer *f, uint8_t *buf, int buf_size, uint8_t **rptr_ptr)
} else {
size = (f->end - rptr) + (f->wptr - f->buffer);
}
-
+
if (size < buf_size)
return -1;
while (buf_size > 0) {
@@ -325,7 +325,7 @@ int fifo_read(FifoBuffer *f, uint8_t *buf, int buf_size, uint8_t **rptr_ptr)
*/
void fifo_realloc(FifoBuffer *f, unsigned int new_size){
unsigned int old_size= f->end - f->buffer;
-
+
if(old_size < new_size){
uint8_t *old= f->buffer;
@@ -376,7 +376,7 @@ int put_fifo(ByteIOContext *pb, FifoBuffer *f, int buf_size, uint8_t **rptr_ptr)
} else {
size = (f->end - rptr) + (f->wptr - f->buffer);
}
-
+
if (size < buf_size)
return -1;
while (buf_size > 0) {
@@ -421,7 +421,7 @@ AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
if (match_ext(pd->filename, fmt1->extensions)) {
score = 50;
}
- }
+ }
if (score > score_max) {
score_max = score;
fmt = fmt1;
@@ -459,8 +459,8 @@ AVFormatContext *av_alloc_format_context(void)
* Allocates all the structures needed to read an input stream.
* This does not open the needed codecs for decoding the stream[s].
*/
-int av_open_input_stream(AVFormatContext **ic_ptr,
- ByteIOContext *pb, const char *filename,
+int av_open_input_stream(AVFormatContext **ic_ptr,
+ ByteIOContext *pb, const char *filename,
AVInputFormat *fmt, AVFormatParameters *ap)
{
int err;
@@ -521,7 +521,7 @@ int av_open_input_stream(AVFormatContext **ic_ptr,
* @param ap additionnal parameters needed when opening the file (NULL if default)
* @return 0 if OK. AVERROR_xxx otherwise.
*/
-int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
+int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
AVInputFormat *fmt,
int buf_size,
AVFormatParameters *ap)
@@ -530,7 +530,7 @@ int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
uint8_t buf[PROBE_BUF_SIZE];
AVProbeData probe_data, *pd = &probe_data;
ByteIOContext pb1, *pb = &pb1;
-
+
file_opened = 0;
pd->filename = "";
if (filename)
@@ -573,7 +573,7 @@ int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
}
}
}
-
+
/* guess file format */
if (!fmt) {
fmt = av_probe_input_format(pd, 1);
@@ -584,7 +584,7 @@ int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
err = AVERROR_NOFMT;
goto fail;
}
-
+
/* XXX: suppress this hack for redirectors */
#ifdef CONFIG_NETWORK
if (fmt == &redir_demux) {
@@ -596,7 +596,7 @@ int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
/* check filename in case of an image number is expected */
if (fmt->flags & AVFMT_NEEDNUMBER) {
- if (filename_number_test(filename) < 0) {
+ if (filename_number_test(filename) < 0) {
err = AVERROR_NUMEXPECTED;
goto fail;
}
@@ -610,7 +610,7 @@ int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
url_fclose(pb);
*ic_ptr = NULL;
return err;
-
+
}
/*******************************************************/
@@ -620,10 +620,10 @@ int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
*
* This function is absolete and should never be used.
* Use av_read_frame() instead.
- *
+ *
* @param s media file handle
- * @param pkt is filled
- * @return 0 if OK. AVERROR_xxx if error.
+ * @param pkt is filled
+ * @return 0 if OK. AVERROR_xxx if error.
*/
int av_read_packet(AVFormatContext *s, AVPacket *pkt)
{
@@ -693,7 +693,7 @@ static int get_audio_frame_size(AVCodecContext *enc, int size)
/**
* Return the frame duration in seconds, return 0 if not available.
*/
-static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
+static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
AVCodecParserContext *pc, AVPacket *pkt)
{
int frame_size;
@@ -754,7 +754,7 @@ static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
return ((lsb - delta)&mask) + delta;
}
-static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
+static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
AVCodecParserContext *pc, AVPacket *pkt)
{
int num, den, presentation_delayed;
@@ -765,7 +765,7 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
if(pkt->dts != AV_NOPTS_VALUE)
pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
}
-
+
if (pkt->duration == 0) {
compute_frame_duration(&num, &den, st, pc, pkt);
if (den && num) {
@@ -781,15 +781,15 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
/* XXX: need has_b_frame, but cannot get it if the codec is
not initialized */
- if (( st->codec->codec_id == CODEC_ID_H264
- || st->codec->has_b_frames) &&
+ if (( st->codec->codec_id == CODEC_ID_H264
+ || st->codec->has_b_frames) &&
pc && pc->pict_type != FF_B_TYPE)
presentation_delayed = 1;
/* this may be redundant, but it shouldnt hurt */
if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
presentation_delayed = 1;
}
-
+
if(st->cur_dts == AV_NOPTS_VALUE){
if(presentation_delayed) st->cur_dts = -pkt->duration;
else st->cur_dts = 0;
@@ -828,7 +828,7 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
// av_log(NULL, AV_LOG_DEBUG, "id:%d old:%Ld new:%Ld dur:%d cur:%Ld size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
}
}
-
+
/* presentation is not delayed : PTS and DTS are the same */
if (pkt->pts == AV_NOPTS_VALUE) {
if (pkt->dts == AV_NOPTS_VALUE) {
@@ -846,7 +846,7 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
st->cur_dts += pkt->duration;
}
// av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
-
+
/* update flags */
if (pc) {
pkt->flags = 0;
@@ -887,7 +887,7 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
s->cur_st = NULL;
return 0;
} else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
- len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
+ len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
s->cur_ptr, s->cur_len,
s->cur_pkt.pts, s->cur_pkt.dts);
s->cur_pkt.pts = AV_NOPTS_VALUE;
@@ -895,7 +895,7 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
/* increment read pointer */
s->cur_ptr += len;
s->cur_len -= len;
-
+
/* return packet if any */
if (pkt->size) {
got_packet:
@@ -909,7 +909,7 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
}
} else {
/* free packet */
- av_free_packet(&s->cur_pkt);
+ av_free_packet(&s->cur_pkt);
s->cur_st = NULL;
}
} else {
@@ -922,9 +922,9 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
for(i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->parser && st->need_parsing) {
- av_parser_parse(st->parser, st->codec,
- &pkt->data, &pkt->size,
- NULL, 0,
+ av_parser_parse(st->parser, st->codec,
+ &pkt->data, &pkt->size,
+ NULL, 0,
AV_NOPTS_VALUE, AV_NOPTS_VALUE);
if (pkt->size)
goto got_packet;
@@ -933,7 +933,7 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
/* no more packets: really terminates parsing */
return ret;
}
-
+
st = s->streams[s->cur_pkt.stream_index];
s->cur_st = st;
@@ -962,13 +962,13 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
* frames if each frame has a known fixed size (e.g. PCM or ADPCM
* data). If the audio frames have a variable size (e.g. MPEG audio),
* then it contains one frame.
- *
+ *
* pkt->pts, pkt->dts and pkt->duration are always set to correct
* values in AV_TIME_BASE unit (and guessed if the format cannot
* provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
* has B frames, so it is better to rely on pkt->dts if you do not
* decompress the payload.
- *
+ *
* @return 0 if OK, < 0 if error or end of file.
*/
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
@@ -984,7 +984,7 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt)
if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
- if( pktl->pkt.stream_index == next_pkt->stream_index
+ if( pktl->pkt.stream_index == next_pkt->stream_index
&& next_pkt->dts < pktl->pkt.dts
&& pktl->pkt.pts != pktl->pkt.dts //not b frame
/*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
@@ -994,9 +994,9 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt)
}
pktl = s->packet_buffer;
}
-
- if( next_pkt->pts != AV_NOPTS_VALUE
- || next_pkt->dts == AV_NOPTS_VALUE
+
+ if( next_pkt->pts != AV_NOPTS_VALUE
+ || next_pkt->dts == AV_NOPTS_VALUE
|| !genpts || eof){
/* read packet from packet buffer, if there is data */
*pkt = *next_pkt;
@@ -1015,20 +1015,20 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt)
}else
return ret;
}
-
+
/* duplicate the packet */
if (av_dup_packet(pkt) < 0)
return AVERROR_NOMEM;
while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
-
+
pktl = av_mallocz(sizeof(AVPacketList));
if (!pktl)
return AVERROR_NOMEM;
-
+
/* add the packet in the buffered packet list */
*plast_pktl = pktl;
- pktl->pkt= *pkt;
+ pktl->pkt= *pkt;
}else{
assert(!s->packet_buffer);
return av_read_frame_internal(s, pkt);
@@ -1043,7 +1043,7 @@ static void flush_packet_queue(AVFormatContext *s)
for(;;) {
pktl = s->packet_buffer;
- if (!pktl)
+ if (!pktl)
break;
s->packet_buffer = pktl->next;
av_free_packet(&pktl->pkt);
@@ -1089,11 +1089,11 @@ static void av_read_frame_flush(AVFormatContext *s)
/* fail safe */
s->cur_ptr = NULL;
s->cur_len = 0;
-
+
/* for each stream, reset read state */
for(i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
-
+
if (st->parser) {
av_parser_close(st->parser);
st->parser = NULL;
@@ -1117,7 +1117,7 @@ static void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t time
for(i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
- st->cur_dts = av_rescale(timestamp,
+ st->cur_dts = av_rescale(timestamp,
st->time_base.den * (int64_t)ref_st->time_base.num,
st->time_base.num * (int64_t)ref_st->time_base.den);
}
@@ -1133,13 +1133,13 @@ int av_add_index_entry(AVStream *st,
{
AVIndexEntry *entries, *ie;
int index;
-
+
if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
return -1;
-
+
entries = av_fast_realloc(st->index_entries,
&st->index_entries_allocated_size,
- (st->nb_index_entries + 1) *
+ (st->nb_index_entries + 1) *
sizeof(AVIndexEntry));
if(!entries)
return -1;
@@ -1167,7 +1167,7 @@ int av_add_index_entry(AVStream *st,
ie->timestamp = timestamp;
ie->min_distance= distance;
ie->flags = flags;
-
+
return index;
}
@@ -1190,7 +1190,7 @@ static void av_build_index_raw(AVFormatContext *s)
break;
if (pkt->stream_index == 0 && st->parser &&
(pkt->flags & PKT_FLAG_KEY)) {
- av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
+ av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
0, AVINDEX_KEYFRAME);
}
av_free_packet(pkt);
@@ -1216,8 +1216,8 @@ static int is_raw_stream(AVFormatContext *s)
/**
* Gets the index for a specific timestamp.
- * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
- * the timestamp which is <= the requested one, if backward is 0
+ * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
+ * the timestamp which is <= the requested one, if backward is 0
* then it will be >=
* if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
* @return < 0 if no such timestamp could be found
@@ -1242,14 +1242,14 @@ int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
a = m;
}
m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
-
+
if(!(flags & AVSEEK_FLAG_ANY)){
while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
}
}
- if(m == nb_entries)
+ if(m == nb_entries)
return -1;
return m;
}
@@ -1272,7 +1272,7 @@ int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts
if (stream_index < 0)
return -1;
-
+
#ifdef DEBUG_SEEK
av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
#endif
@@ -1293,14 +1293,14 @@ int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts
pos_min= e->pos;
ts_min= e->timestamp;
#ifdef DEBUG_SEEK
- av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
+ av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
pos_min,ts_min);
#endif
}else{
assert(index==0);
}
-
- index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
+
+ index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
assert(index < st->nb_index_entries);
if(index >= 0){
e= &st->index_entries[index];
@@ -1309,7 +1309,7 @@ int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts
ts_max= e->timestamp;
pos_limit= pos_max - e->min_distance;
#ifdef DEBUG_SEEK
- av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
+ av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
pos_max,pos_limit, ts_max);
#endif
}
@@ -1333,7 +1333,7 @@ int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts
}while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
if (ts_max == AV_NOPTS_VALUE)
return -1;
-
+
for(;;){
int64_t tmp_pos= pos_max + 1;
int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
@@ -1350,7 +1350,7 @@ int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts
no_change=0;
while (pos_min < pos_limit) {
#ifdef DEBUG_SEEK
- av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
+ av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
pos_min, pos_max,
ts_min, ts_max);
#endif
@@ -1393,7 +1393,7 @@ av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"P
ts_min = ts;
}
}
-
+
pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
#ifdef DEBUG_SEEK
@@ -1401,7 +1401,7 @@ av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"P
ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
pos_min++;
ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
- av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
+ av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
pos, ts_min, target_ts, ts_max);
#endif
/* do the seek */
@@ -1437,7 +1437,7 @@ static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos,
return 0;
}
-static int av_seek_frame_generic(AVFormatContext *s,
+static int av_seek_frame_generic(AVFormatContext *s,
int stream_index, int64_t timestamp, int flags)
{
int index;
@@ -1472,7 +1472,7 @@ static int av_seek_frame_generic(AVFormatContext *s,
* Seek to the key frame at timestamp.
* 'timestamp' in 'stream_index'.
* @param stream_index If stream_index is (-1), a default
- * stream is selected, and timestamp is automatically converted
+ * stream is selected, and timestamp is automatically converted
* from AV_TIME_BASE units to the stream specific time_base.
* @param timestamp timestamp in AVStream.time_base units
* or if there is no stream specified then in AV_TIME_BASE units
@@ -1483,17 +1483,17 @@ int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int f
{
int ret;
AVStream *st;
-
+
av_read_frame_flush(s);
-
+
if(flags & AVSEEK_FLAG_BYTE)
return av_seek_frame_byte(s, stream_index, timestamp, flags);
-
+
if(stream_index < 0){
stream_index= av_find_default_stream_index(s);
if(stream_index < 0)
return -1;
-
+
st= s->streams[stream_index];
/* timestamp for default must be expressed in AV_TIME_BASE units */
timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
@@ -1569,7 +1569,7 @@ static void av_update_stream_timings(AVFormatContext *ic)
ic->duration = end_time - start_time;
if (ic->file_size > 0) {
/* compute the bit rate */
- ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
+ ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
(double)ic->duration;
}
}
@@ -1611,8 +1611,8 @@ static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
}
/* if duration is already set, we believe it */
- if (ic->duration == AV_NOPTS_VALUE &&
- ic->bit_rate != 0 &&
+ if (ic->duration == AV_NOPTS_VALUE &&
+ ic->bit_rate != 0 &&
ic->file_size != 0) {
filesize = ic->file_size;
if (filesize > 0) {
@@ -1639,10 +1639,10 @@ static void av_estimate_timings_from_pts(AVFormatContext *ic)
int read_size, i, ret;
int64_t end_time;
int64_t filesize, offset, duration;
-
+
/* free previous packet */
if (ic->cur_st && ic->cur_st->parser)
- av_free_packet(&ic->cur_pkt);
+ av_free_packet(&ic->cur_pkt);
ic->cur_st = NULL;
/* flush packet queue */
@@ -1655,7 +1655,7 @@ static void av_estimate_timings_from_pts(AVFormatContext *ic)
st->parser= NULL;
}
}
-
+
/* we read the first packets to get the first PTS (not fully
accurate, but it is enough now) */
url_fseek(&ic->pb, 0, SEEK_SET);
@@ -1704,7 +1704,7 @@ static void av_estimate_timings_from_pts(AVFormatContext *ic)
}
if (i == ic->nb_streams)
break;
-
+
ret = av_read_packet(ic, pkt);
if (ret != 0)
break;
@@ -1721,7 +1721,7 @@ static void av_estimate_timings_from_pts(AVFormatContext *ic)
}
av_free_packet(pkt);
}
-
+
fill_all_stream_timings(ic);
url_fseek(&ic->pb, 0, SEEK_SET);
@@ -1760,12 +1760,12 @@ static void av_estimate_timings(AVFormatContext *ic)
AVStream *st;
for(i = 0;i < ic->nb_streams; i++) {
st = ic->streams[i];
- printf("%d: start_time: %0.3f duration: %0.3f\n",
- i, (double)st->start_time / AV_TIME_BASE,
+ printf("%d: start_time: %0.3f duration: %0.3f\n",
+ i, (double)st->start_time / AV_TIME_BASE,
(double)st->duration / AV_TIME_BASE);
}
- printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
- (double)ic->start_time / AV_TIME_BASE,
+ printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
+ (double)ic->start_time / AV_TIME_BASE,
(double)ic->duration / AV_TIME_BASE,
ic->bit_rate / 1000);
}
@@ -1795,7 +1795,7 @@ static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
AVCodec *codec;
int got_picture, ret=0;
AVFrame picture;
-
+
if(!st->codec->codec){
codec = avcodec_find_decoder(st->codec->codec_id);
if (!codec)
@@ -1808,14 +1808,14 @@ static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
if(!has_codec_parameters(st->codec)){
switch(st->codec->codec_type) {
case CODEC_TYPE_VIDEO:
- ret = avcodec_decode_video(st->codec, &picture,
+ ret = avcodec_decode_video(st->codec, &picture,
&got_picture, (uint8_t *)data, size);
break;
case CODEC_TYPE_AUDIO:
samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
if (!samples)
goto fail;
- ret = avcodec_decode_audio(st->codec, samples,
+ ret = avcodec_decode_audio(st->codec, samples,
&got_picture, (uint8_t *)data, size);
av_free(samples);
break;
@@ -1840,7 +1840,7 @@ static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
* frame mode.
*
* @param ic media file handle
- * @return >=0 if OK. AVERROR_xxx if error.
+ * @return >=0 if OK. AVERROR_xxx if error.
* @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
*/
int av_find_stream_info(AVFormatContext *ic)
@@ -1874,7 +1874,7 @@ int av_find_stream_info(AVFormatContext *ic)
last_dts[i]= AV_NOPTS_VALUE;
duration_sum[i]= INT64_MAX;
}
-
+
count = 0;
read_size = 0;
ppktl = &ic->packet_buffer;
@@ -1936,7 +1936,7 @@ int av_find_stream_info(AVFormatContext *ic)
pkt = &pktl->pkt;
*pkt = pkt1;
-
+
/* duplicate the packet */
if (av_dup_packet(pkt) < 0) {
ret = AVERROR_NOMEM;
@@ -1977,7 +1977,7 @@ int av_find_stream_info(AVFormatContext *ic)
memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
}
}
-
+
/* if still no information, we try to open the codec and to
decompress the frame. We try to avoid that in most cases as
it takes longer and uses more memory. For MPEG4, we need to
@@ -1998,7 +1998,7 @@ int av_find_stream_info(AVFormatContext *ic)
st->codec->codec_id == CODEC_ID_SHORTEN ||
(st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
try_decode_frame(st, pkt->data, pkt->size);
-
+
if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= MAX_STREAM_DURATION) {
break;
}
@@ -2024,7 +2024,7 @@ int av_find_stream_info(AVFormatContext *ic)
num= st->time_base.den*duration_count[i];
den= st->time_base.num*duration_sum[i];
-
+
av_reduce(&fps1.num, &fps1.den, num*1001, den*1000, FFMAX(st->time_base.den, st->time_base.num)/4);
av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, den, FFMAX(st->time_base.den, st->time_base.num)/4);
if(fps1.num < st->r_frame_rate.num && fps1.den == 1 && (fps1.num==24 || fps1.num==30)){ //FIXME better decission
@@ -2040,17 +2040,17 @@ int av_find_stream_info(AVFormatContext *ic)
st->codec->sub_id == 2) {
if (st->codec_info_nb_frames >= 20) {
float coded_frame_rate, est_frame_rate;
- est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) /
+ est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) /
(double)st->codec_info_duration ;
coded_frame_rate = 1.0/av_q2d(st->codec->time_base);
#if 0
- printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n",
+ printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n",
coded_frame_rate, est_frame_rate);
#endif
/* if we detect that it could be a telecine, we
signal it. It would be better to do it at a
higher level as it can change in a film */
- if (coded_frame_rate >= 24.97 &&
+ if (coded_frame_rate >= 24.97 &&
(est_frame_rate >= 23.5 && est_frame_rate < 24.5)) {
st->r_frame_rate = (AVRational){24000, 1001};
}
@@ -2096,7 +2096,7 @@ int av_find_stream_info(AVFormatContext *ic)
/**
* start playing a network based stream (e.g. RTSP stream) at the
- * current position
+ * current position
*/
int av_read_play(AVFormatContext *s)
{
@@ -2129,7 +2129,7 @@ void av_close_input_file(AVFormatContext *s)
/* free previous packet */
if (s->cur_st && s->cur_st->parser)
- av_free_packet(&s->cur_pkt);
+ av_free_packet(&s->cur_pkt);
if (s->iformat->read_close)
s->iformat->read_close(s);
@@ -2163,7 +2163,7 @@ void av_close_input_file(AVFormatContext *s)
* can be added in read_packet too.
*
* @param s media file handle
- * @param id file format dependent stream id
+ * @param id file format dependent stream id
*/
AVStream *av_new_stream(AVFormatContext *s, int id)
{
@@ -2175,7 +2175,7 @@ AVStream *av_new_stream(AVFormatContext *s, int id)
st = av_mallocz(sizeof(AVStream));
if (!st)
return NULL;
-
+
st->codec= avcodec_alloc_context();
if (s->iformat) {
/* no default bitrate if decoding */
@@ -2201,14 +2201,14 @@ AVStream *av_new_stream(AVFormatContext *s, int id)
int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
{
int ret;
-
+
if (s->oformat->priv_data_size > 0) {
s->priv_data = av_mallocz(s->oformat->priv_data_size);
if (!s->priv_data)
return AVERROR_NOMEM;
} else
s->priv_data = NULL;
-
+
if (s->oformat->set_parameters) {
ret = s->oformat->set_parameters(s, ap);
if (ret < 0)
@@ -2222,7 +2222,7 @@ int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
* output media file
*
* @param s media file handle
- * @return 0 if OK. AVERROR_xxx if error.
+ * @return 0 if OK. AVERROR_xxx if error.
*/
int av_write_header(AVFormatContext *s)
{
@@ -2289,10 +2289,10 @@ static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
int num, den, frame_size;
// av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index);
-
+
/* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
return -1;*/
-
+
/* duration field */
if (pkt->duration == 0) {
compute_frame_duration(&num, &den, st, NULL, pkt);
@@ -2308,7 +2308,7 @@ static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
pkt->pts= st->pts.val;
}
- //calculate dts from pts
+ //calculate dts from pts
if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
if(b_frames){
if(st->last_IP_pts == AV_NOPTS_VALUE){
@@ -2322,7 +2322,7 @@ static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
}else
pkt->dts= pkt->pts;
}
-
+
if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
return -1;
@@ -2358,10 +2358,10 @@ static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
static void truncate_ts(AVStream *st, AVPacket *pkt){
int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
-
+
// if(pkt->dts < 0)
// pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
-
+
pkt->pts &= pts_mask;
pkt->dts &= pts_mask;
}
@@ -2382,7 +2382,7 @@ int av_write_frame(AVFormatContext *s, AVPacket *pkt)
ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
if(ret<0)
return ret;
-
+
truncate_ts(s->streams[pkt->stream_index], pkt);
ret= s->oformat->write_packet(s, pkt);
@@ -2393,7 +2393,7 @@ int av_write_frame(AVFormatContext *s, AVPacket *pkt)
/**
* interleave_packet implementation which will interleave per DTS.
- * packets with pkt->destruct == av_destruct_packet will be freed inside this function.
+ * packets with pkt->destruct == av_destruct_packet will be freed inside this function.
* so they cannot be used after it, note calling av_free_packet() on them is still safe
*/
static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
@@ -2425,7 +2425,7 @@ static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPac
this_pktl->next= *next_point;
*next_point= this_pktl;
}
-
+
memset(streams, 0, sizeof(streams));
pktl= s->packet_buffer;
while(pktl){
@@ -2435,12 +2435,12 @@ static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPac
streams[ pktl->pkt.stream_index ]++;
pktl= pktl->next;
}
-
+
if(s->nb_streams == stream_count || (flush && stream_count)){
pktl= s->packet_buffer;
*out= pktl->pkt;
-
- s->packet_buffer= pktl->next;
+
+ s->packet_buffer= pktl->next;
av_freep(&pktl);
return 1;
}else{
@@ -2455,7 +2455,7 @@ static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPac
* @param in the input packet
* @param flush 1 if no further packets are available as input and all
* remaining packets should be output
- * @return 1 if a packet was output, 0 if no packet could be output,
+ * @return 1 if a packet was output, 0 if no packet could be output,
* < 0 if an error occured
*/
static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
@@ -2489,7 +2489,7 @@ int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
//av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %Ld %Ld\n", pkt->size, pkt->dts, pkt->pts);
if(compute_pkt_fields2(st, pkt) < 0)
return -1;
-
+
if(pkt->dts == AV_NOPTS_VALUE)
return -1;
@@ -2498,13 +2498,13 @@ int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
int ret= av_interleave_packet(s, &opkt, pkt, 0);
if(ret<=0) //FIXME cleanup needed for ret<0 ?
return ret;
-
+
truncate_ts(s->streams[opkt.stream_index], &opkt);
ret= s->oformat->write_packet(s, &opkt);
-
+
av_free_packet(&opkt);
pkt= NULL;
-
+
if(ret<0)
return ret;
if(url_ferror(&s->pb))
@@ -2522,7 +2522,7 @@ int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
int av_write_trailer(AVFormatContext *s)
{
int ret, i;
-
+
for(;;){
AVPacket pkt;
ret= av_interleave_packet(s, &pkt, NULL, 1);
@@ -2530,12 +2530,12 @@ int av_write_trailer(AVFormatContext *s)
goto fail;
if(!ret)
break;
-
+
truncate_ts(s->streams[pkt.stream_index], &pkt);
ret= s->oformat->write_packet(s, &pkt);
-
+
av_free_packet(&pkt);
-
+
if(ret<0)
goto fail;
if(url_ferror(&s->pb))
@@ -2556,17 +2556,17 @@ fail:
/* "user interface" functions */
void dump_format(AVFormatContext *ic,
- int index,
+ int index,
const char *url,
int is_output)
{
int i, flags;
char buf[256];
- av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
+ av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
is_output ? "Output" : "Input",
- index,
- is_output ? ic->oformat->name : ic->iformat->name,
+ index,
+ is_output ? ic->oformat->name : ic->iformat->name,
is_output ? "to" : "from", url);
if (!is_output) {
av_log(NULL, AV_LOG_INFO, " Duration: ");
@@ -2578,7 +2578,7 @@ void dump_format(AVFormatContext *ic,
secs %= 60;
hours = mins / 60;
mins %= 60;
- av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
+ av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
(10 * us) / AV_TIME_BASE);
} else {
av_log(NULL, AV_LOG_INFO, "N/A");
@@ -2692,7 +2692,7 @@ int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
{
int i;
char* cp;
-
+
/* First, we check our abbreviation table */
for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
if (!strcmp(frame_abvs[i].abv, arg)) {
@@ -2708,11 +2708,11 @@ int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
if (cp) {
char* cpp;
*frame_rate = strtol(arg, &cpp, 10);
- if (cpp != arg || cpp == cp)
+ if (cpp != arg || cpp == cp)
*frame_rate_base = strtol(cp+1, &cpp, 10);
else
*frame_rate = 0;
- }
+ }
else {
/* Finally we give up and parse it as double */
*frame_rate_base = DEFAULT_FRAME_RATE_BASE; //FIXME use av_d2q()
@@ -2732,7 +2732,7 @@ int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
* - If not a duration:
* [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
* Time is localtime unless Z is suffixed to the end. In this case GMT
- * Return the date in micro seconds since 1970
+ * Return the date in micro seconds since 1970
*
* - If a duration:
* HH[:MM[:SS[.m...]]]
@@ -2838,7 +2838,7 @@ int64_t parse_date(const char *datestr, int duration)
int val, n;
q++;
for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
- if (!isdigit(*q))
+ if (!isdigit(*q))
break;
val += n * (*q - '0');
}
@@ -2883,7 +2883,7 @@ int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
}
*q = '\0';
}
- if (!strcmp(tag, tag1))
+ if (!strcmp(tag, tag1))
return 1;
if (*p != '&')
break;
@@ -3089,10 +3089,10 @@ void url_split(char *proto, int proto_size,
/**
* Set the pts for a given stream.
*
- * @param s stream
+ * @param s stream
* @param pts_wrap_bits number of bits effectively used by the pts
- * (used for wrap control, 33 is the value for MPEG)
- * @param pts_num numerator to convert to seconds (MPEG: 1)
+ * (used for wrap control, 33 is the value for MPEG)
+ * @param pts_num numerator to convert to seconds (MPEG: 1)
* @param pts_den denominator to convert to seconds (MPEG: 90000)
*/
void av_set_pts_info(AVStream *s, int pts_wrap_bits,
@@ -3113,7 +3113,7 @@ void av_set_pts_info(AVStream *s, int pts_wrap_bits,
* @param f fractional number
* @param val integer value
* @param num must be >= 0
- * @param den must be >= 1
+ * @param den must be >= 1
*/
void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
{
@@ -3213,7 +3213,7 @@ AVImageFormat *guess_image_format(const char *filename)
}
/**
- * Read an image from a stream.
+ * Read an image from a stream.
* @param gb byte stream containing the image
* @param fmt image format, NULL if probing is required
*/
diff --git a/libavformat/wav.c b/libavformat/wav.c
index 0dc4bd39b9..38405fe3ef 100644
--- a/libavformat/wav.c
+++ b/libavformat/wav.c
@@ -1,4 +1,4 @@
-/*
+/*
* WAV encoder and decoder
* Copyright (c) 2001, 2002 Fabrice Bellard.
*
@@ -78,7 +78,7 @@ int put_wav_header(ByteIOContext *pb, AVCodecContext *enc)
} else {
bps = 16;
}
-
+
if (enc->codec_id == CODEC_ID_MP2 || enc->codec_id == CODEC_ID_MP3) {
blkalign = enc->frame_size; //this is wrong, but seems many demuxers dont work if this is set correctly
//blkalign = 144 * enc->bit_rate/enc->sample_rate;
@@ -145,7 +145,7 @@ int put_wav_header(ByteIOContext *pb, AVCodecContext *enc)
* WAVEFORMATEX adds 'WORD cbSize' and basically makes itself
* an openended structure.
*/
-void get_wav_header(ByteIOContext *pb, AVCodecContext *codec, int size)
+void get_wav_header(ByteIOContext *pb, AVCodecContext *codec, int size)
{
int id;
@@ -161,7 +161,7 @@ void get_wav_header(ByteIOContext *pb, AVCodecContext *codec, int size)
}else
codec->bits_per_sample = get_le16(pb);
codec->codec_id = wav_codec_get_id(id, codec->bits_per_sample);
-
+
if (size > 16) { /* We're obviously dealing with WAVEFORMATEX */
codec->extradata_size = get_le16(pb);
if (codec->extradata_size > 0) {
@@ -171,7 +171,7 @@ void get_wav_header(ByteIOContext *pb, AVCodecContext *codec, int size)
get_buffer(pb, codec->extradata, codec->extradata_size);
} else
codec->extradata_size = 0;
-
+
/* It is possible for the chunk to contain garbage at the end */
if (size - codec->extradata_size - 18 > 0)
url_fskip(pb, size - codec->extradata_size - 18);
@@ -222,7 +222,7 @@ static int wav_write_header(AVFormatContext *s)
/* data header */
wav->data = start_tag(pb, "data");
-
+
put_flush_packet(pb);
return 0;
@@ -309,7 +309,7 @@ static int wav_read_header(AVFormatContext *s,
tag = get_le32(pb);
if (tag != MKTAG('W', 'A', 'V', 'E'))
return -1;
-
+
/* parse fmt header */
size = find_tag(pb, MKTAG('f', 'm', 't', ' '));
if (size < 0)
@@ -365,7 +365,7 @@ static int wav_read_close(AVFormatContext *s)
return 0;
}
-static int wav_read_seek(AVFormatContext *s,
+static int wav_read_seek(AVFormatContext *s,
int stream_index, int64_t timestamp, int flags)
{
AVStream *st;
diff --git a/libavformat/wc3movie.c b/libavformat/wc3movie.c
index 23c13324b4..693396cbf6 100644
--- a/libavformat/wc3movie.c
+++ b/libavformat/wc3movie.c
@@ -73,37 +73,37 @@ typedef struct Wc3DemuxContext {
/* bizarre palette lookup table */
static const unsigned char wc3_pal_lookup[] = {
- 0x00, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0E,
+ 0x00, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0E,
0x10, 0x12, 0x13, 0x15, 0x16, 0x18, 0x19, 0x1A,
- 0x1C, 0x1D, 0x1F, 0x20, 0x21, 0x23, 0x24, 0x25,
+ 0x1C, 0x1D, 0x1F, 0x20, 0x21, 0x23, 0x24, 0x25,
0x27, 0x28, 0x29, 0x2A, 0x2C, 0x2D, 0x2E, 0x2F,
- 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x38, 0x39,
+ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x38, 0x39,
0x3A, 0x3B, 0x3C, 0x3D, 0x3F, 0x40, 0x41, 0x42,
- 0x43, 0x44, 0x45, 0x46, 0x48, 0x49, 0x4A, 0x4B,
+ 0x43, 0x44, 0x45, 0x46, 0x48, 0x49, 0x4A, 0x4B,
0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53,
- 0x54, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C,
+ 0x54, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C,
0x5D, 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64,
- 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C,
+ 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C,
0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74,
- 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C,
+ 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C,
0x7D, 0x7D, 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83,
- 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B,
+ 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B,
0x8C, 0x8D, 0x8D, 0x8E, 0x8F, 0x90, 0x91, 0x92,
- 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x99,
+ 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x99,
0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F, 0xA0, 0xA1,
- 0xA2, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8,
+ 0xA2, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8,
0xA9, 0xAA, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
- 0xB0, 0xB1, 0xB2, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6,
+ 0xB0, 0xB1, 0xB2, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6,
0xB7, 0xB8, 0xB9, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD,
- 0xBE, 0xBF, 0xBF, 0xC0, 0xC1, 0xC2, 0xC3, 0xC4,
+ 0xBE, 0xBF, 0xBF, 0xC0, 0xC1, 0xC2, 0xC3, 0xC4,
0xC5, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB,
- 0xCB, 0xCC, 0xCD, 0xCE, 0xCF, 0xD0, 0xD0, 0xD1,
+ 0xCB, 0xCC, 0xCD, 0xCE, 0xCF, 0xD0, 0xD0, 0xD1,
0xD2, 0xD3, 0xD4, 0xD5, 0xD5, 0xD6, 0xD7, 0xD8,
0xD9, 0xDA, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
0xDF, 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE4, 0xE5,
- 0xE6, 0xE7, 0xE8, 0xE9, 0xE9, 0xEA, 0xEB, 0xEC,
+ 0xE6, 0xE7, 0xE8, 0xE9, 0xE9, 0xEA, 0xEB, 0xEC,
0xED, 0xED, 0xEE, 0xEF, 0xF0, 0xF1, 0xF1, 0xF2,
- 0xF3, 0xF4, 0xF5, 0xF6, 0xF6, 0xF7, 0xF8, 0xF9,
+ 0xF3, 0xF4, 0xF5, 0xF6, 0xF6, 0xF7, 0xF8, 0xF9,
0xFA, 0xFA, 0xFB, 0xFC, 0xFD, 0xFD, 0xFD, 0xFD
};
@@ -148,7 +148,7 @@ static int wc3_read_header(AVFormatContext *s,
/* traverse through the chunks and load the header information before
* the first BRCH tag */
- if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) !=
+ if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) !=
WC3_PREAMBLE_SIZE)
return AVERROR_IO;
fourcc_tag = LE_32(&preamble[0]);
@@ -188,7 +188,7 @@ static int wc3_read_header(AVFormatContext *s,
case SIZE_TAG:
/* video resolution override */
- if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) !=
+ if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) !=
WC3_PREAMBLE_SIZE)
return AVERROR_IO;
wc3->width = LE_32(&preamble[0]);
@@ -199,8 +199,8 @@ static int wc3_read_header(AVFormatContext *s,
/* one of several palettes */
if ((unsigned)current_palette >= wc3->palette_count)
return AVERROR_INVALIDDATA;
- if ((ret = get_buffer(pb,
- &wc3->palettes[current_palette * PALETTE_SIZE],
+ if ((ret = get_buffer(pb,
+ &wc3->palettes[current_palette * PALETTE_SIZE],
PALETTE_SIZE)) != PALETTE_SIZE)
return AVERROR_IO;
@@ -209,7 +209,7 @@ static int wc3_read_header(AVFormatContext *s,
i < (current_palette + 1) * PALETTE_SIZE; i++) {
/* rotate each palette component left by 2 and use the result
* as an index into the color component table */
- rotate = ((wc3->palettes[i] << 2) & 0xFF) |
+ rotate = ((wc3->palettes[i] << 2) & 0xFF) |
((wc3->palettes[i] >> 6) & 0xFF);
wc3->palettes[i] = wc3_pal_lookup[rotate];
}
@@ -224,7 +224,7 @@ static int wc3_read_header(AVFormatContext *s,
break;
}
- if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) !=
+ if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) !=
WC3_PREAMBLE_SIZE)
return AVERROR_IO;
fourcc_tag = LE_32(&preamble[0]);
diff --git a/libavformat/westwood.c b/libavformat/westwood.c
index d9f61c717a..ac85579ad5 100644
--- a/libavformat/westwood.c
+++ b/libavformat/westwood.c
@@ -101,7 +101,7 @@ static int wsaud_probe(AVProbeData *p)
if ((field < 8000) || (field > 48000))
return 0;
- /* note: only check for WS IMA (type 99) right now since there is no
+ /* note: only check for WS IMA (type 99) right now since there is no
* support for type 1 */
if (p->buf[11] != 99)
return 0;
@@ -332,7 +332,7 @@ static int wsvqa_read_packet(AVFormatContext *s,
pkt->pts /= wsvqa->audio_samplerate;
/* 2 samples/byte, 1 or 2 samples per frame depending on stereo */
- wsvqa->audio_frame_counter += (chunk_size * 2) /
+ wsvqa->audio_frame_counter += (chunk_size * 2) /
wsvqa->audio_channels;
} else {
pkt->stream_index = wsvqa->video_stream_index;
diff --git a/libavformat/yuv.c b/libavformat/yuv.c
index c4bce68b29..04f6f0c679 100644
--- a/libavformat/yuv.c
+++ b/libavformat/yuv.c
@@ -53,7 +53,7 @@ static int yuv_read(ByteIOContext *f,
int size;
URLContext *h;
AVImageInfo info1, *info = &info1;
-
+
img_size = url_fsize(f);
/* XXX: hack hack */
@@ -64,26 +64,26 @@ static int yuv_read(ByteIOContext *f,
return AVERROR_IO;
}
info->pix_fmt = PIX_FMT_YUV420P;
-
+
ret = alloc_cb(opaque, info);
if (ret)
return ret;
-
+
size = info->width * info->height;
-
+
p = strrchr(fname, '.');
if (!p || p[1] != 'Y')
return AVERROR_IO;
get_buffer(f, info->pict.data[0], size);
-
+
p[1] = 'U';
if (url_fopen(pb, fname, URL_RDONLY) < 0)
return AVERROR_IO;
get_buffer(pb, info->pict.data[1], size / 4);
url_fclose(pb);
-
+
p[1] = 'V';
if (url_fopen(pb, fname, URL_RDONLY) < 0)
return AVERROR_IO;
@@ -101,7 +101,7 @@ static int yuv_write(ByteIOContext *pb2, AVImageInfo *info)
uint8_t *ptr;
URLContext *h;
static const char *ext = "YUV";
-
+
/* XXX: hack hack */
h = url_fileno(pb2);
url_get_filename(h, fname, sizeof(fname));
@@ -127,7 +127,7 @@ static int yuv_write(ByteIOContext *pb2, AVImageInfo *info)
} else {
pb = pb2;
}
-
+
ptr = info->pict.data[i];
for(j=0;j<height;j++) {
put_buffer(pb, ptr, width);
@@ -140,7 +140,7 @@ static int yuv_write(ByteIOContext *pb2, AVImageInfo *info)
}
return 0;
}
-
+
static int yuv_probe(AVProbeData *pd)
{
if (match_ext(pd->filename, "Y"))
diff --git a/libavformat/yuv4mpeg.c b/libavformat/yuv4mpeg.c
index 2078b63199..f0f15cddb6 100644
--- a/libavformat/yuv4mpeg.c
+++ b/libavformat/yuv4mpeg.c
@@ -37,10 +37,10 @@ static int yuv4_generate_header(AVFormatContext *s, char* buf)
height = st->codec->height;
av_reduce(&raten, &rated, st->codec->time_base.den, st->codec->time_base.num, (1UL<<31)-1);
-
+
aspectn = st->codec->sample_aspect_ratio.num;
aspectd = st->codec->sample_aspect_ratio.den;
-
+
if ( aspectn == 0 && aspectd == 1 ) aspectd = 0; // 0:0 means unknown
inter = 'p'; /* progressive is the default */
@@ -75,7 +75,7 @@ static int yuv4_generate_header(AVFormatContext *s, char* buf)
inter,
aspectn, aspectd,
colorspace);
-
+
return n;
}
@@ -100,18 +100,18 @@ static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt)
av_log(s, AV_LOG_ERROR, "Error. YUV4MPEG stream header write failed.\n");
return AVERROR_IO;
} else {
- put_buffer(pb, buf2, strlen(buf2));
+ put_buffer(pb, buf2, strlen(buf2));
}
}
/* construct frame header */
-
+
m = snprintf(buf1, sizeof(buf1), "%s\n", Y4M_FRAME_MAGIC);
put_buffer(pb, buf1, strlen(buf1));
width = st->codec->width;
height = st->codec->height;
-
+
ptr = picture->data[0];
for(i=0;i<height;i++) {
put_buffer(pb, ptr, width);
@@ -142,21 +142,21 @@ static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt)
static int yuv4_write_header(AVFormatContext *s)
{
int* first_pkt = s->priv_data;
-
+
if (s->nb_streams != 1)
return AVERROR_IO;
-
+
if (s->streams[0]->codec->pix_fmt == PIX_FMT_YUV411P) {
av_log(s, AV_LOG_ERROR, "Warning: generating rarely used 4:1:1 YUV stream, some mjpegtools might not work.\n");
- }
- else if ((s->streams[0]->codec->pix_fmt != PIX_FMT_YUV420P) &&
- (s->streams[0]->codec->pix_fmt != PIX_FMT_YUV422P) &&
- (s->streams[0]->codec->pix_fmt != PIX_FMT_GRAY8) &&
+ }
+ else if ((s->streams[0]->codec->pix_fmt != PIX_FMT_YUV420P) &&
+ (s->streams[0]->codec->pix_fmt != PIX_FMT_YUV422P) &&
+ (s->streams[0]->codec->pix_fmt != PIX_FMT_GRAY8) &&
(s->streams[0]->codec->pix_fmt != PIX_FMT_YUV444P)) {
av_log(s, AV_LOG_ERROR, "ERROR: yuv4mpeg only handles yuv444p, yuv422p, yuv420p, yuv411p and gray pixel formats. Use -pix_fmt to select one.\n");
return AVERROR_IO;
}
-
+
*first_pkt = 1;
return 0;
}
@@ -194,7 +194,7 @@ static int yuv4_read_header(AVFormatContext *s, AVFormatParameters *ap)
int width=-1, height=-1, raten=0, rated=0, aspectn=0, aspectd=0,interlaced_frame=0,top_field_first=0;
enum PixelFormat pix_fmt=PIX_FMT_NONE,alt_pix_fmt=PIX_FMT_NONE;
AVStream *st;
-
+
for (i=0; i<MAX_YUV4_HEADER; i++) {
header[i] = get_byte(pb);
if (header[i] == '\n') {
@@ -293,13 +293,13 @@ static int yuv4_read_header(AVFormatContext *s, AVFormatParameters *ap)
while(tokstart<header_end&&*tokstart!=0x20) tokstart++;
break;
}
- }
+ }
if ((width == -1) || (height == -1)) {
av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n");
- return -1;
+ return -1;
}
-
+
if (pix_fmt == PIX_FMT_NONE) {
if (alt_pix_fmt == PIX_FMT_NONE)
pix_fmt = PIX_FMT_YUV420P;
@@ -317,7 +317,7 @@ static int yuv4_read_header(AVFormatContext *s, AVFormatParameters *ap)
// Pixel aspect unknown
aspectd = 1;
}
-
+
st = av_new_stream(s, 0);
st = s->streams[0];
st->codec->width = width;
@@ -348,7 +348,7 @@ static int yuv4_read_packet(AVFormatContext *s, AVPacket *pkt)
}
if (i == MAX_FRAME_HEADER) return -1;
if (strncmp(header, Y4M_FRAME_MAGIC, strlen(Y4M_FRAME_MAGIC))) return -1;
-
+
width = st->codec->width;
height = st->codec->height;