aboutsummaryrefslogtreecommitdiff
path: root/drivers/iio
diff options
context:
space:
mode:
authorLars-Peter Clausen <lars@metafoo.de>2013-10-15 09:30:00 +0100
committerJonathan Cameron <jic23@kernel.org>2013-10-15 19:19:22 +0100
commit0894d80dfddaeb9f95904ceab623460c1bfdab06 (patch)
tree7b482dcaa4b4db142fcabfe866ebe9a7829e7148 /drivers/iio
parentf6c23f483937b8be53f313ec31068acdca91a25d (diff)
iio:kfifo: Protect against concurrent access from userspace
It is possible for userspace to concurrently access the buffer from multiple threads or processes. To avoid corruption of the internal state of the buffer we need to add proper locking. It is possible for multiple processes to try to read from the buffer concurrently and it is also possible that one process causes a buffer re-allocation while a different process still access the buffer. Both can be fixed by protecting the calls to kfifo_to_user() and kfifo_alloc() by the same mutex. In iio_read_first_n_kfifo() we also use kfifo_recsize() instead of the buffers bytes_per_datum to avoid a race that can happen if bytes_per_datum has been changed, but the buffer has not been reallocated yet. Note that all access to the buffer from within the kernel is already properly synchronized, so there is no need for extra locking in iio_store_to_kfifo(). Signed-off-by: Lars-Peter Clausen <lars@metafoo.de> Signed-off-by: Jonathan Cameron <jic23@kernel.org>
Diffstat (limited to 'drivers/iio')
-rw-r--r--drivers/iio/kfifo_buf.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/drivers/iio/kfifo_buf.c b/drivers/iio/kfifo_buf.c
index ce51092695a..c95b61f6091 100644
--- a/drivers/iio/kfifo_buf.c
+++ b/drivers/iio/kfifo_buf.c
@@ -12,6 +12,7 @@
struct iio_kfifo {
struct iio_buffer buffer;
struct kfifo kf;
+ struct mutex user_lock;
int update_needed;
};
@@ -34,10 +35,12 @@ static int iio_request_update_kfifo(struct iio_buffer *r)
if (!buf->update_needed)
goto error_ret;
+ mutex_lock(&buf->user_lock);
kfifo_free(&buf->kf);
ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
buf->buffer.length);
r->stufftoread = false;
+ mutex_unlock(&buf->user_lock);
error_ret:
return ret;
}
@@ -114,12 +117,13 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
int ret, copied;
struct iio_kfifo *kf = iio_to_kfifo(r);
- if (n < r->bytes_per_datum || r->bytes_per_datum == 0)
- return -EINVAL;
+ if (mutex_lock_interruptible(&kf->user_lock))
+ return -ERESTARTSYS;
- ret = kfifo_to_user(&kf->kf, buf, n, &copied);
- if (ret < 0)
- return ret;
+ if (!kfifo_initialized(&kf->kf) || n < kfifo_esize(&kf->kf))
+ ret = -EINVAL;
+ else
+ ret = kfifo_to_user(&kf->kf, buf, n, &copied);
if (kfifo_is_empty(&kf->kf))
r->stufftoread = false;
@@ -127,6 +131,10 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
if (!kfifo_is_empty(&kf->kf))
r->stufftoread = true;
+ mutex_unlock(&kf->user_lock);
+ if (ret < 0)
+ return ret;
+
return copied;
}
@@ -134,6 +142,7 @@ static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
{
struct iio_kfifo *kf = iio_to_kfifo(buffer);
+ mutex_destroy(&kf->user_lock);
kfifo_free(&kf->kf);
kfree(kf);
}
@@ -161,6 +170,7 @@ struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
kf->buffer.attrs = &iio_kfifo_attribute_group;
kf->buffer.access = &kfifo_access_funcs;
kf->buffer.length = 2;
+ mutex_init(&kf->user_lock);
return &kf->buffer;
}
EXPORT_SYMBOL(iio_kfifo_allocate);