From: Jaedon Shin <jaedon.shin@gmail.com>
To: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
Cc: linux-media@vger.kernel.org, linux-kernel@vger.kernel.org,
Jaedon Shin <jaedon.shin@gmail.com>
Subject: [PATCH RESEND] media: dmxdev: fix possible race condition
Date: Wed, 25 Mar 2015 14:37:06 +0900 [thread overview]
Message-ID: <1427261826-20208-1-git-send-email-jaedon.shin@gmail.com> (raw)
This patch fixes race condition between dvb_dmxdev_buffer_read and
dvb_demux_io_ioctl.
There are race conditions executing DMX_ADD_PID or DMX_REMOVE_PID in the
dvb_demux_ioctl when dvb_demux_read is waiting for the data by
wait_event_interruptible. So, this fixes to sleep with acquired mutex
and splits dvb_dmxdev_buffer_read into dvb_dvr_read.
Signed-off-by: Jaedon Shin <jaedon.shin@gmail.com>
---
drivers/media/dvb-core/dmxdev.c | 94 ++++++++++++++++++++++++++++++++---------
1 file changed, 75 insertions(+), 19 deletions(-)
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index abff803ad69a..c2564b0e389b 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -57,10 +57,11 @@ static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
return dvb_ringbuffer_write(buf, src, len);
}
-static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src,
+static ssize_t dvb_dmxdev_buffer_read(struct dmxdev_filter *dmxdevfilter,
int non_blocking, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct dvb_ringbuffer *src = &dmxdevfilter->buffer;
size_t todo;
ssize_t avail;
ssize_t ret = 0;
@@ -75,16 +76,21 @@ static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src,
}
for (todo = count; todo > 0; todo -= ret) {
- if (non_blocking && dvb_ringbuffer_empty(src)) {
- ret = -EWOULDBLOCK;
- break;
- }
+ if (dvb_ringbuffer_empty(src)) {
+ mutex_unlock(&dmxdevfilter->mutex);
- ret = wait_event_interruptible(src->queue,
- !dvb_ringbuffer_empty(src) ||
- (src->error != 0));
- if (ret < 0)
- break;
+ if (non_blocking)
+ return -EWOULDBLOCK;
+
+ ret = wait_event_interruptible(src->queue,
+ !dvb_ringbuffer_empty(src) ||
+ (src->error != 0));
+ if (ret < 0)
+ return ret;
+
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex))
+ return -ERESTARTSYS;
+ }
if (src->error) {
ret = src->error;
@@ -242,13 +248,63 @@ static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count,
{
struct dvb_device *dvbdev = file->private_data;
struct dmxdev *dmxdev = dvbdev->priv;
+ struct dvb_ringbuffer *src = &dmxdev->dvr_buffer;
+ size_t todo;
+ ssize_t avail;
+ ssize_t ret = 0;
- if (dmxdev->exit)
+ if (mutex_lock_interruptible(&dmxdev->mutex))
+ return -ERESTARTSYS;
+
+ if (dmxdev->exit) {
+ mutex_unlock(&dmxdev->mutex);
return -ENODEV;
+ }
+
+ if (src->error) {
+ ret = src->error;
+ dvb_ringbuffer_flush(src);
+ mutex_unlock(&dmxdev->mutex);
+ return ret;
+ }
+
+ for (todo = count; todo > 0; todo -= ret) {
+ if (dvb_ringbuffer_empty(src)) {
+ mutex_unlock(&dmxdev->mutex);
- return dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer,
- file->f_flags & O_NONBLOCK,
- buf, count, ppos);
+ if (file->f_flags & O_NONBLOCK)
+ return -EWOULDBLOCK;
+
+ ret = wait_event_interruptible(src->queue,
+ !dvb_ringbuffer_empty(src) ||
+ (src->error != 0));
+ if (ret < 0)
+ return ret;
+
+ if (mutex_lock_interruptible(&dmxdev->mutex))
+ return -ERESTARTSYS;
+ }
+
+ if (src->error) {
+ ret = src->error;
+ dvb_ringbuffer_flush(src);
+ break;
+ }
+
+ avail = dvb_ringbuffer_avail(src);
+ if (avail > todo)
+ avail = todo;
+
+ ret = dvb_ringbuffer_read_user(src, buf, avail);
+ if (ret < 0)
+ break;
+
+ buf += ret;
+ }
+
+ mutex_unlock(&dmxdev->mutex);
+
+ return (count - todo) ? (count - todo) : ret;
}
static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
@@ -283,7 +339,6 @@ static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
return 0;
}
-
static inline void dvb_dmxdev_filter_state_set(struct dmxdev_filter
*dmxdevfilter, int state)
{
@@ -904,7 +959,7 @@ static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil,
hcount = 3 + dfil->todo;
if (hcount > count)
hcount = count;
- result = dvb_dmxdev_buffer_read(&dfil->buffer,
+ result = dvb_dmxdev_buffer_read(dfil,
file->f_flags & O_NONBLOCK,
buf, hcount, ppos);
if (result < 0) {
@@ -925,7 +980,7 @@ static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil,
}
if (count > dfil->todo)
count = dfil->todo;
- result = dvb_dmxdev_buffer_read(&dfil->buffer,
+ result = dvb_dmxdev_buffer_read(dfil,
file->f_flags & O_NONBLOCK,
buf, count, ppos);
if (result < 0)
@@ -947,11 +1002,12 @@ dvb_demux_read(struct file *file, char __user *buf, size_t count,
if (dmxdevfilter->type == DMXDEV_TYPE_SEC)
ret = dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos);
else
- ret = dvb_dmxdev_buffer_read(&dmxdevfilter->buffer,
+ ret = dvb_dmxdev_buffer_read(dmxdevfilter,
file->f_flags & O_NONBLOCK,
buf, count, ppos);
- mutex_unlock(&dmxdevfilter->mutex);
+ if (mutex_is_locked(&dmxdevfilter->mutex))
+ mutex_unlock(&dmxdevfilter->mutex);
return ret;
}
--
2.3.4
reply other threads:[~2015-03-25 5:37 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1427261826-20208-1-git-send-email-jaedon.shin@gmail.com \
--to=jaedon.shin@gmail.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-media@vger.kernel.org \
--cc=mchehab@osg.samsung.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).