From bd5c860fdbc33d19d2ff0f6d1f06de07c17560dd Mon Sep 17 00:00:00 2001 From: Clément Bœsch Date: Tue, 1 Dec 2015 15:54:31 +0100 Subject: avutil/threadmessage: split the pthread condition in two Fix a dead lock under certain conditions. Let's assume we have a queue of 1 message max, 2 senders, and 1 receiver. Scenario (real record obtained with debug added): [...] SENDER #0: acquired lock SENDER #0: queue is full, wait SENDER #1: acquired lock SENDER #1: queue is full, wait RECEIVER: acquired lock RECEIVER: reading a msg from the queue RECEIVER: signal the cond RECEIVER: acquired lock RECEIVER: queue is empty, wait SENDER #0: writing a msg the queue SENDER #0: signal the cond SENDER #0: acquired lock SENDER #0: queue is full, wait SENDER #1: queue is full, wait Translated: - initially the queue contains 1/1 message with 2 senders blocking on it, waiting to push another message. - Meanwhile the receiver is obtaining the lock, read the message, signal & release the lock. For some reason it is able to acquire the lock again before the signal wakes up one of the sender. Since it just emptied the queue, the reader waits for the queue to fill up again. - The signal finally reaches one of the sender, which writes a message and then signal the condition. Unfortunately, instead of waking up the reader, it actually wakes up the other worker (signal = notify the condition just for 1 waiter), who can't push another message in the queue because it's full. - Meanwhile, the receiver is still waiting. Deadlock. This scenario can be triggered with for example: tests/api/api-threadmessage-test 1 2 100 100 1 1000 1000 One working solution is to make av_thread_message_queue_{send,recv}() call pthread_cond_broadcast() instead of pthread_cond_signal() so both senders and receivers are unlocked when work is done (be it reading or writing). This second solution replaces the condition with two: one to notify the senders, and one to notify the receivers. This prevents senders from notifying other senders instead of a reader, and the other way around. It also avoid broadcasting to everyone like the first solution, and is, as a result in theory more optimized. --- libavutil/threadmessage.c | 35 ++++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-) (limited to 'libavutil') diff --git a/libavutil/threadmessage.c b/libavutil/threadmessage.c index b7d7dadb5a..1f2cca8668 100644 --- a/libavutil/threadmessage.c +++ b/libavutil/threadmessage.c @@ -36,7 +36,8 @@ struct AVThreadMessageQueue { #if HAVE_THREADS AVFifoBuffer *fifo; pthread_mutex_t lock; - pthread_cond_t cond; + pthread_cond_t cond_recv; + pthread_cond_t cond_send; int err_send; int err_recv; unsigned elsize; @@ -62,13 +63,20 @@ int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, av_free(rmq); return AVERROR(ret); } - if ((ret = pthread_cond_init(&rmq->cond, NULL))) { + if ((ret = pthread_cond_init(&rmq->cond_recv, NULL))) { + pthread_mutex_destroy(&rmq->lock); + av_free(rmq); + return AVERROR(ret); + } + if ((ret = pthread_cond_init(&rmq->cond_send, NULL))) { + pthread_cond_destroy(&rmq->cond_recv); pthread_mutex_destroy(&rmq->lock); av_free(rmq); return AVERROR(ret); } if (!(rmq->fifo = av_fifo_alloc(elsize * nelem))) { - pthread_cond_destroy(&rmq->cond); + pthread_cond_destroy(&rmq->cond_send); + pthread_cond_destroy(&rmq->cond_recv); pthread_mutex_destroy(&rmq->lock); av_free(rmq); return AVERROR(ret); @@ -94,7 +102,8 @@ void av_thread_message_queue_free(AVThreadMessageQueue **mq) if (*mq) { av_thread_message_flush(*mq); av_fifo_freep(&(*mq)->fifo); - pthread_cond_destroy(&(*mq)->cond); + pthread_cond_destroy(&(*mq)->cond_send); + pthread_cond_destroy(&(*mq)->cond_recv); pthread_mutex_destroy(&(*mq)->lock); av_freep(mq); } @@ -110,12 +119,13 @@ static int av_thread_message_queue_send_locked(AVThreadMessageQueue *mq, while (!mq->err_send && av_fifo_space(mq->fifo) < mq->elsize) { if ((flags & AV_THREAD_MESSAGE_NONBLOCK)) return AVERROR(EAGAIN); - pthread_cond_wait(&mq->cond, &mq->lock); + pthread_cond_wait(&mq->cond_send, &mq->lock); } if (mq->err_send) return mq->err_send; av_fifo_generic_write(mq->fifo, msg, mq->elsize, NULL); - pthread_cond_signal(&mq->cond); + /* one message is sent, signal one receiver */ + pthread_cond_signal(&mq->cond_recv); return 0; } @@ -126,12 +136,13 @@ static int av_thread_message_queue_recv_locked(AVThreadMessageQueue *mq, while (!mq->err_recv && av_fifo_size(mq->fifo) < mq->elsize) { if ((flags & AV_THREAD_MESSAGE_NONBLOCK)) return AVERROR(EAGAIN); - pthread_cond_wait(&mq->cond, &mq->lock); + pthread_cond_wait(&mq->cond_recv, &mq->lock); } if (av_fifo_size(mq->fifo) < mq->elsize) return mq->err_recv; av_fifo_generic_read(mq->fifo, msg, mq->elsize, NULL); - pthread_cond_signal(&mq->cond); + /* one message space appeared, signal one sender */ + pthread_cond_signal(&mq->cond_send); return 0; } @@ -175,7 +186,7 @@ void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, #if HAVE_THREADS pthread_mutex_lock(&mq->lock); mq->err_send = err; - pthread_cond_broadcast(&mq->cond); + pthread_cond_broadcast(&mq->cond_send); pthread_mutex_unlock(&mq->lock); #endif /* HAVE_THREADS */ } @@ -186,7 +197,7 @@ void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, #if HAVE_THREADS pthread_mutex_lock(&mq->lock); mq->err_recv = err; - pthread_cond_broadcast(&mq->cond); + pthread_cond_broadcast(&mq->cond_recv); pthread_mutex_unlock(&mq->lock); #endif /* HAVE_THREADS */ } @@ -209,7 +220,9 @@ void av_thread_message_flush(AVThreadMessageQueue *mq) for (off = 0; off < used; off += mq->elsize) av_fifo_generic_peek_at(mq->fifo, mq, off, mq->elsize, free_func_wrap); av_fifo_drain(mq->fifo, used); - pthread_cond_broadcast(&mq->cond); + /* only the senders need to be notified since the queue is empty and there + * is nothing to read */ + pthread_cond_broadcast(&mq->cond_send); pthread_mutex_unlock(&mq->lock); #endif /* HAVE_THREADS */ } -- cgit v1.2.3