2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-12 07:14:03 +08:00

iio:kfifo: Protect against concurrent access from userspace

It is possible for userspace to concurrently access the buffer from multiple
threads or processes. To avoid corruption of the internal state of the buffer we
need to add proper locking. It is possible for multiple processes to try to read
from the buffer concurrently and it is also possible that one process causes a
buffer re-allocation while a different process still access the buffer. Both can
be fixed by protecting the calls to kfifo_to_user() and kfifo_alloc() by the
same mutex. In iio_read_first_n_kfifo() we also use kfifo_recsize() instead of
the buffers bytes_per_datum to avoid a race that can happen if bytes_per_datum
has been changed, but the buffer has not been reallocated yet.

Note that all access to the buffer from within the kernel is already properly
synchronized, so there is no need for extra locking in iio_store_to_kfifo().

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Jonathan Cameron <jic23@kernel.org>
This commit is contained in:
Lars-Peter Clausen 2013-10-15 09:30:00 +01:00 committed by Jonathan Cameron
parent f6c23f4839
commit 0894d80dfd

View File

@ -12,6 +12,7 @@
struct iio_kfifo { struct iio_kfifo {
struct iio_buffer buffer; struct iio_buffer buffer;
struct kfifo kf; struct kfifo kf;
struct mutex user_lock;
int update_needed; int update_needed;
}; };
@ -34,10 +35,12 @@ static int iio_request_update_kfifo(struct iio_buffer *r)
if (!buf->update_needed) if (!buf->update_needed)
goto error_ret; goto error_ret;
mutex_lock(&buf->user_lock);
kfifo_free(&buf->kf); kfifo_free(&buf->kf);
ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum, ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
buf->buffer.length); buf->buffer.length);
r->stufftoread = false; r->stufftoread = false;
mutex_unlock(&buf->user_lock);
error_ret: error_ret:
return ret; return ret;
} }
@ -114,12 +117,13 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
int ret, copied; int ret, copied;
struct iio_kfifo *kf = iio_to_kfifo(r); struct iio_kfifo *kf = iio_to_kfifo(r);
if (n < r->bytes_per_datum || r->bytes_per_datum == 0) if (mutex_lock_interruptible(&kf->user_lock))
return -EINVAL; return -ERESTARTSYS;
ret = kfifo_to_user(&kf->kf, buf, n, &copied); if (!kfifo_initialized(&kf->kf) || n < kfifo_esize(&kf->kf))
if (ret < 0) ret = -EINVAL;
return ret; else
ret = kfifo_to_user(&kf->kf, buf, n, &copied);
if (kfifo_is_empty(&kf->kf)) if (kfifo_is_empty(&kf->kf))
r->stufftoread = false; r->stufftoread = false;
@ -127,6 +131,10 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
if (!kfifo_is_empty(&kf->kf)) if (!kfifo_is_empty(&kf->kf))
r->stufftoread = true; r->stufftoread = true;
mutex_unlock(&kf->user_lock);
if (ret < 0)
return ret;
return copied; return copied;
} }
@ -134,6 +142,7 @@ static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
{ {
struct iio_kfifo *kf = iio_to_kfifo(buffer); struct iio_kfifo *kf = iio_to_kfifo(buffer);
mutex_destroy(&kf->user_lock);
kfifo_free(&kf->kf); kfifo_free(&kf->kf);
kfree(kf); kfree(kf);
} }
@ -161,6 +170,7 @@ struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
kf->buffer.attrs = &iio_kfifo_attribute_group; kf->buffer.attrs = &iio_kfifo_attribute_group;
kf->buffer.access = &kfifo_access_funcs; kf->buffer.access = &kfifo_access_funcs;
kf->buffer.length = 2; kf->buffer.length = 2;
mutex_init(&kf->user_lock);
return &kf->buffer; return &kf->buffer;
} }
EXPORT_SYMBOL(iio_kfifo_allocate); EXPORT_SYMBOL(iio_kfifo_allocate);