summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--libavformat/rtp.c76
-rw-r--r--libavformat/rtp_internal.h2
-rw-r--r--libavformat/rtsp.c35
3 files changed, 82 insertions, 31 deletions
diff --git a/libavformat/rtp.c b/libavformat/rtp.c
index ad6931d15d..3b3e60ebc5 100644
--- a/libavformat/rtp.c
+++ b/libavformat/rtp.c
@@ -328,6 +328,7 @@ int rtp_check_and_send_back_rr(RTPDemuxContext *s, int count)
* open a new RTP parse context for stream 'st'. 'st' can be NULL for
* MPEG2TS streams to indicate that they should be demuxed inside the
* rtp demux (otherwise CODEC_ID_MPEG2TS packets are returned)
+ * TODO: change this to not take rtp_payload data, and use the new dynamic payload system.
*/
RTPDemuxContext *rtp_parse_open(AVFormatContext *s1, AVStream *st, URLContext *rtpc, int payload_type, rtp_payload_data_t *rtp_payload_data)
{
@@ -419,6 +420,39 @@ static int rtp_parse_mp4_au(RTPDemuxContext *s, const uint8_t *buf)
}
/**
+ * This was the second switch in rtp_parse packet. Normalizes time, if required, sets stream_index, etc.
+ */
+static void finalize_packet(RTPDemuxContext *s, AVPacket *pkt, uint32_t timestamp)
+{
+ switch(s->st->codec->codec_id) {
+ case CODEC_ID_MP2:
+ case CODEC_ID_MPEG1VIDEO:
+ if (s->last_rtcp_ntp_time != AV_NOPTS_VALUE) {
+ int64_t addend;
+
+ int delta_timestamp;
+ /* XXX: is it really necessary to unify the timestamp base ? */
+ /* compute pts from timestamp with received ntp_time */
+ delta_timestamp = timestamp - s->last_rtcp_timestamp;
+ /* convert to 90 kHz without overflow */
+ addend = (s->last_rtcp_ntp_time - s->first_rtcp_ntp_time) >> 14;
+ addend = (addend * 5625) >> 14;
+ pkt->pts = addend + delta_timestamp;
+ }
+ break;
+ case CODEC_ID_MPEG4AAC:
+ case CODEC_ID_H264:
+ case CODEC_ID_MPEG4:
+ pkt->pts = timestamp;
+ break;
+ default:
+ /* no timestamp info yet */
+ break;
+ }
+ pkt->stream_index = s->st->index;
+}
+
+/**
* Parse an RTP or RTCP packet directly sent as a buffer.
* @param s RTP parse context.
* @param pkt returned packet
@@ -431,15 +465,20 @@ int rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
const uint8_t *buf, int len)
{
unsigned int ssrc, h;
- int payload_type, seq, delta_timestamp, ret;
+ int payload_type, seq, ret;
AVStream *st;
uint32_t timestamp;
+ int rv= 0;
if (!buf) {
/* return the next packets, if any */
if(s->st && s->parse_packet) {
- return s->parse_packet(s, pkt, 0, NULL, 0);
+ timestamp= 0; ///< Should not be used if buf is NULL, but should be set to the timestamp of the packet returned....
+ rv= s->parse_packet(s, pkt, &timestamp, NULL, 0);
+ finalize_packet(s, pkt, timestamp);
+ return rv;
} else {
+ // TODO: Move to a dynamic packet handler (like above)
if (s->read_buf_index >= s->read_buf_size)
return -1;
ret = mpegts_parse_packet(s->ts, pkt, s->buf + s->read_buf_index,
@@ -548,12 +587,11 @@ int rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
}
s->read_buf_size = len;
s->buf_ptr = buf;
- pkt->stream_index = s->st->index;
- return 0; ///< Temporary return.
+ rv= 0;
break;
default:
if(s->parse_packet) {
- return s->parse_packet(s, pkt, timestamp, buf, len);
+ rv= s->parse_packet(s, pkt, &timestamp, buf, len);
} else {
av_new_packet(pkt, len);
memcpy(pkt->data, buf, len);
@@ -561,32 +599,10 @@ int rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
break;
}
- switch(st->codec->codec_id) {
- case CODEC_ID_MP2:
- case CODEC_ID_MPEG1VIDEO:
- if (s->last_rtcp_ntp_time != AV_NOPTS_VALUE) {
- int64_t addend;
- /* XXX: is it really necessary to unify the timestamp base ? */
- /* compute pts from timestamp with received ntp_time */
- delta_timestamp = timestamp - s->last_rtcp_timestamp;
- /* convert to 90 kHz without overflow */
- addend = (s->last_rtcp_ntp_time - s->first_rtcp_ntp_time) >> 14;
- addend = (addend * 5625) >> 14;
- pkt->pts = addend + delta_timestamp;
- }
- break;
- case CODEC_ID_MPEG4AAC:
- case CODEC_ID_H264:
- case CODEC_ID_MPEG4:
- pkt->pts = timestamp;
- break;
- default:
- /* no timestamp info yet */
- break;
- }
- pkt->stream_index = s->st->index;
+ // now perform timestamp things....
+ finalize_packet(s, pkt, timestamp);
}
- return 0;
+ return rv;
}
void rtp_parse_close(RTPDemuxContext *s)
diff --git a/libavformat/rtp_internal.h b/libavformat/rtp_internal.h
index 3930966ad8..953051156b 100644
--- a/libavformat/rtp_internal.h
+++ b/libavformat/rtp_internal.h
@@ -25,7 +25,7 @@
typedef int (*DynamicPayloadPacketHandlerProc) (struct RTPDemuxContext * s,
AVPacket * pkt,
- uint32_t timestamp,
+ uint32_t *timestamp,
const uint8_t * buf,
int len);
diff --git a/libavformat/rtsp.c b/libavformat/rtsp.c
index 08c71c534b..0c5038e0d4 100644
--- a/libavformat/rtsp.c
+++ b/libavformat/rtsp.c
@@ -200,6 +200,8 @@ static int sdp_parse_rtpmap(AVCodecContext *codec, RTSPStream *rtsp_st, int payl
i = atoi(buf);
if (i > 0)
codec->channels = i;
+ // TODO: there is a bug here; if it is a mono stream, and less than 22000Hz, faad upconverts to stereo and twice the
+ // frequency. No problem, but the sample rate is being set here by the sdp line. Upcoming patch forthcoming. (rdm)
}
av_log(codec, AV_LOG_DEBUG, " audio samplerate set to : %i\n", codec->sample_rate);
av_log(codec, AV_LOG_DEBUG, " audio channels set to : %i\n", codec->channels);
@@ -287,6 +289,25 @@ static attrname_map_t attr_names[]=
{NULL, -1, -1},
};
+/** parse the attribute line from the fmtp a line of an sdp resonse. This is broken out as a function
+* because it is used in rtp_h264.c, which is forthcoming.
+*/
+int rtsp_next_attr_and_value(const char **p, char *attr, int attr_size, char *value, int value_size)
+{
+ skip_spaces(p);
+ if(**p)
+ {
+ get_word_sep(attr, attr_size, "=", p);
+ if (**p == '=')
+ (*p)++;
+ get_word_sep(value, value_size, ";", p);
+ if (**p == ';')
+ (*p)++;
+ return 1;
+ }
+ return 0;
+}
+
/* parse a SDP line and save stream attributes */
static void sdp_parse_fmtp(AVStream *st, const char *p)
{
@@ -298,6 +319,7 @@ static void sdp_parse_fmtp(AVStream *st, const char *p)
AVCodecContext *codec = st->codec;
rtp_payload_data_t *rtp_payload_data = &rtsp_st->rtp_payload_data;
+ // TODO (Replace with rtsp_next_attr_and_value)
/* loop on each attribute */
for(;;) {
skip_spaces(&p);
@@ -471,6 +493,19 @@ static void sdp_parse_line(AVFormatContext *s, SDPParseState *s1,
}
}
}
+ } else if(strstart(p, "framesize:", &p)) {
+ // let dynamic protocol handlers have a stab at the line.
+ get_word(buf1, sizeof(buf1), &p);
+ payload_type = atoi(buf1);
+ for(i = 0; i < s->nb_streams;i++) {
+ st = s->streams[i];
+ rtsp_st = st->priv_data;
+ if (rtsp_st->sdp_payload_type == payload_type) {
+ if(rtsp_st->dynamic_handler && rtsp_st->dynamic_handler->parse_sdp_a_line) {
+ rtsp_st->dynamic_handler->parse_sdp_a_line(st, rtsp_st->dynamic_protocol_context, buf);
+ }
+ }
+ }
}
break;
}