aboutsummaryrefslogtreecommitdiff
path: root/drivers/iio/industrialio-buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iio/industrialio-buffer.c')
-rw-r--r--drivers/iio/industrialio-buffer.c335
1 files changed, 242 insertions, 93 deletions
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 2710f7245c3..9f1a1400990 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -20,6 +20,7 @@
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/poll.h>
+#include <linux/sched.h>
#include <linux/iio/iio.h>
#include "iio_core.h"
@@ -31,16 +32,17 @@ static const char * const iio_endian_prefix[] = {
[IIO_LE] = "le",
};
-static bool iio_buffer_is_active(struct iio_dev *indio_dev,
- struct iio_buffer *buf)
+static bool iio_buffer_is_active(struct iio_buffer *buf)
{
- struct list_head *p;
+ return !list_empty(&buf->buffer_list);
+}
- list_for_each(p, &indio_dev->buffer_list)
- if (p == &buf->buffer_list)
- return true;
+static bool iio_buffer_data_available(struct iio_buffer *buf)
+{
+ if (buf->access->data_available)
+ return buf->access->data_available(buf);
- return false;
+ return buf->stufftoread;
}
/**
@@ -54,10 +56,34 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
{
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
+ int ret;
+
+ if (!indio_dev->info)
+ return -ENODEV;
if (!rb || !rb->access->read_first_n)
return -EINVAL;
- return rb->access->read_first_n(rb, n, buf);
+
+ do {
+ if (!iio_buffer_data_available(rb)) {
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(rb->pollq,
+ iio_buffer_data_available(rb) ||
+ indio_dev->info == NULL);
+ if (ret)
+ return ret;
+ if (indio_dev->info == NULL)
+ return -ENODEV;
+ }
+
+ ret = rb->access->read_first_n(rb, n, buf);
+ if (ret == 0 && (filp->f_flags & O_NONBLOCK))
+ ret = -EAGAIN;
+ } while (ret == 0);
+
+ return ret;
}
/**
@@ -69,17 +95,37 @@ unsigned int iio_buffer_poll(struct file *filp,
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
+ if (!indio_dev->info)
+ return -ENODEV;
+
poll_wait(filp, &rb->pollq, wait);
- if (rb->stufftoread)
+ if (iio_buffer_data_available(rb))
return POLLIN | POLLRDNORM;
/* need a way of knowing if there may be enough data... */
return 0;
}
+/**
+ * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
+ * @indio_dev: The IIO device
+ *
+ * Wakes up the event waitqueue used for poll(). Should usually
+ * be called when the device is unregistered.
+ */
+void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
+{
+ if (!indio_dev->buffer)
+ return;
+
+ wake_up(&indio_dev->buffer->pollq);
+}
+
void iio_buffer_init(struct iio_buffer *buffer)
{
INIT_LIST_HEAD(&buffer->demux_list);
+ INIT_LIST_HEAD(&buffer->buffer_list);
init_waitqueue_head(&buffer->pollq);
+ kref_init(&buffer->ref);
}
EXPORT_SYMBOL(iio_buffer_init);
@@ -104,7 +150,16 @@ static ssize_t iio_show_fixed_type(struct device *dev,
type = IIO_BE;
#endif
}
- return sprintf(buf, "%s:%c%d/%d>>%u\n",
+ if (this_attr->c->scan_type.repeat > 1)
+ return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
+ iio_endian_prefix[type],
+ this_attr->c->scan_type.sign,
+ this_attr->c->scan_type.realbits,
+ this_attr->c->scan_type.storagebits,
+ this_attr->c->scan_type.repeat,
+ this_attr->c->scan_type.shift);
+ else
+ return sprintf(buf, "%s:%c%d/%d>>%u\n",
iio_endian_prefix[type],
this_attr->c->scan_type.sign,
this_attr->c->scan_type.realbits,
@@ -119,7 +174,8 @@ static ssize_t iio_scan_el_show(struct device *dev,
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- ret = test_bit(to_iio_dev_attr(attr)->address,
+ /* Ensure ret is 0 or 1. */
+ ret = !!test_bit(to_iio_dev_attr(attr)->address,
indio_dev->buffer->scan_mask);
return sprintf(buf, "%d\n", ret);
@@ -146,7 +202,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
if (ret < 0)
return ret;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
+ if (iio_buffer_is_active(indio_dev->buffer)) {
ret = -EBUSY;
goto error_ret;
}
@@ -192,7 +248,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
return ret;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
+ if (iio_buffer_is_active(indio_dev->buffer)) {
ret = -EBUSY;
goto error_ret;
}
@@ -214,11 +270,11 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
&iio_show_scan_index,
NULL,
0,
- 0,
+ IIO_SEPARATE,
&indio_dev->dev,
&buffer->scan_el_dev_attr_list);
if (ret)
- goto error_ret;
+ return ret;
attrcount++;
ret = __iio_add_chan_devattr("type",
chan,
@@ -229,7 +285,7 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
&indio_dev->dev,
&buffer->scan_el_dev_attr_list);
if (ret)
- goto error_ret;
+ return ret;
attrcount++;
if (chan->type != IIO_TIMESTAMP)
ret = __iio_add_chan_devattr("en",
@@ -249,29 +305,13 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
0,
&indio_dev->dev,
&buffer->scan_el_dev_attr_list);
+ if (ret)
+ return ret;
attrcount++;
ret = attrcount;
-error_ret:
return ret;
}
-static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
- struct iio_dev_attr *p)
-{
- kfree(p->dev_attr.attr.name);
- kfree(p);
-}
-
-static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
-{
- struct iio_dev_attr *p, *n;
- struct iio_buffer *buffer = indio_dev->buffer;
-
- list_for_each_entry_safe(p, n,
- &buffer->scan_el_dev_attr_list, l)
- iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
-}
-
static const char * const iio_scan_elements_group_name = "scan_elements";
int iio_buffer_register(struct iio_dev *indio_dev,
@@ -348,7 +388,7 @@ int iio_buffer_register(struct iio_dev *indio_dev,
error_free_scan_mask:
kfree(buffer->scan_mask);
error_cleanup_dynamic:
- __iio_buffer_attr_cleanup(indio_dev);
+ iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
return ret;
}
@@ -358,7 +398,7 @@ void iio_buffer_unregister(struct iio_dev *indio_dev)
{
kfree(indio_dev->buffer->scan_mask);
kfree(indio_dev->buffer->scan_el_group.attrs);
- __iio_buffer_attr_cleanup(indio_dev);
+ iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
}
EXPORT_SYMBOL(iio_buffer_unregister);
@@ -396,7 +436,7 @@ ssize_t iio_buffer_write_length(struct device *dev,
return len;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
+ if (iio_buffer_is_active(indio_dev->buffer)) {
ret = -EBUSY;
} else {
if (buffer->access->set_length)
@@ -414,13 +454,11 @@ ssize_t iio_buffer_show_enable(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- return sprintf(buf, "%d\n",
- iio_buffer_is_active(indio_dev,
- indio_dev->buffer));
+ return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
}
EXPORT_SYMBOL(iio_buffer_show_enable);
-/* note NULL used as error indicator as it doesn't make sense. */
+/* Note NULL used as error indicator as it doesn't make sense. */
static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
unsigned int masklength,
const unsigned long *mask)
@@ -435,8 +473,8 @@ static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
return NULL;
}
-static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
- bool timestamp)
+static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
+ const unsigned long *mask, bool timestamp)
{
const struct iio_chan_spec *ch;
unsigned bytes = 0;
@@ -446,20 +484,41 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
for_each_set_bit(i, mask,
indio_dev->masklength) {
ch = iio_find_channel_from_si(indio_dev, i);
- length = ch->scan_type.storagebits / 8;
+ if (ch->scan_type.repeat > 1)
+ length = ch->scan_type.storagebits / 8 *
+ ch->scan_type.repeat;
+ else
+ length = ch->scan_type.storagebits / 8;
bytes = ALIGN(bytes, length);
bytes += length;
}
if (timestamp) {
ch = iio_find_channel_from_si(indio_dev,
indio_dev->scan_index_timestamp);
- length = ch->scan_type.storagebits / 8;
+ if (ch->scan_type.repeat > 1)
+ length = ch->scan_type.storagebits / 8 *
+ ch->scan_type.repeat;
+ else
+ length = ch->scan_type.storagebits / 8;
bytes = ALIGN(bytes, length);
bytes += length;
}
return bytes;
}
+static void iio_buffer_activate(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer)
+{
+ iio_buffer_get(buffer);
+ list_add(&buffer->buffer_list, &indio_dev->buffer_list);
+}
+
+static void iio_buffer_deactivate(struct iio_buffer *buffer)
+{
+ list_del_init(&buffer->buffer_list);
+ iio_buffer_put(buffer);
+}
+
void iio_disable_all_buffers(struct iio_dev *indio_dev)
{
struct iio_buffer *buffer, *_buffer;
@@ -472,14 +531,31 @@ void iio_disable_all_buffers(struct iio_dev *indio_dev)
list_for_each_entry_safe(buffer, _buffer,
&indio_dev->buffer_list, buffer_list)
- list_del_init(&buffer->buffer_list);
+ iio_buffer_deactivate(buffer);
indio_dev->currentmode = INDIO_DIRECT_MODE;
if (indio_dev->setup_ops->postdisable)
indio_dev->setup_ops->postdisable(indio_dev);
+
+ if (indio_dev->available_scan_masks == NULL)
+ kfree(indio_dev->active_scan_mask);
}
-int iio_update_buffers(struct iio_dev *indio_dev,
+static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer)
+{
+ unsigned int bytes;
+
+ if (!buffer->access->set_bytes_per_datum)
+ return;
+
+ bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
+ buffer->scan_timestamp);
+
+ buffer->access->set_bytes_per_datum(buffer, bytes);
+}
+
+static int __iio_update_buffers(struct iio_dev *indio_dev,
struct iio_buffer *insert_buffer,
struct iio_buffer *remove_buffer)
{
@@ -494,13 +570,13 @@ int iio_update_buffers(struct iio_dev *indio_dev,
if (indio_dev->setup_ops->predisable) {
ret = indio_dev->setup_ops->predisable(indio_dev);
if (ret)
- goto error_ret;
+ return ret;
}
indio_dev->currentmode = INDIO_DIRECT_MODE;
if (indio_dev->setup_ops->postdisable) {
ret = indio_dev->setup_ops->postdisable(indio_dev);
if (ret)
- goto error_ret;
+ return ret;
}
}
/* Keep a copy of current setup to allow roll back */
@@ -509,9 +585,9 @@ int iio_update_buffers(struct iio_dev *indio_dev,
indio_dev->active_scan_mask = NULL;
if (remove_buffer)
- list_del(&remove_buffer->buffer_list);
+ iio_buffer_deactivate(remove_buffer);
if (insert_buffer)
- list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list);
+ iio_buffer_activate(indio_dev, insert_buffer);
/* If no buffers in list, we are done */
if (list_empty(&indio_dev->buffer_list)) {
@@ -521,7 +597,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
return 0;
}
- /* What scan mask do we actually have ?*/
+ /* What scan mask do we actually have? */
compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
sizeof(long), GFP_KERNEL);
if (compound_mask == NULL) {
@@ -546,7 +622,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
* Roll back.
* Note can only occur when adding a buffer.
*/
- list_del(&insert_buffer->buffer_list);
+ iio_buffer_deactivate(insert_buffer);
if (old_mask) {
indio_dev->active_scan_mask = old_mask;
success = -EINVAL;
@@ -554,7 +630,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
else {
kfree(compound_mask);
ret = -EINVAL;
- goto error_ret;
+ return ret;
}
}
} else {
@@ -576,7 +652,8 @@ int iio_update_buffers(struct iio_dev *indio_dev,
iio_compute_scan_bytes(indio_dev,
indio_dev->active_scan_mask,
indio_dev->scan_timestamp);
- list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
+ list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
+ iio_buffer_update_bytes_per_datum(indio_dev, buffer);
if (buffer->access->request_update) {
ret = buffer->access->request_update(buffer);
if (ret) {
@@ -585,6 +662,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
goto error_run_postdisable;
}
}
+ }
if (indio_dev->info->update_scan_mode) {
ret = indio_dev->info
->update_scan_mode(indio_dev,
@@ -594,7 +672,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
goto error_run_postdisable;
}
}
- /* Definitely possible for devices to support both of these.*/
+ /* Definitely possible for devices to support both of these. */
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
if (!indio_dev->trig) {
printk(KERN_INFO "Buffer not started: no trigger\n");
@@ -605,7 +683,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
} else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
- } else { /* should never be reached */
+ } else { /* Should never be reached */
ret = -EINVAL;
goto error_run_postdisable;
}
@@ -635,12 +713,46 @@ error_run_postdisable:
if (indio_dev->setup_ops->postdisable)
indio_dev->setup_ops->postdisable(indio_dev);
error_remove_inserted:
-
if (insert_buffer)
- list_del(&insert_buffer->buffer_list);
+ iio_buffer_deactivate(insert_buffer);
indio_dev->active_scan_mask = old_mask;
kfree(compound_mask);
-error_ret:
+ return ret;
+}
+
+int iio_update_buffers(struct iio_dev *indio_dev,
+ struct iio_buffer *insert_buffer,
+ struct iio_buffer *remove_buffer)
+{
+ int ret;
+
+ if (insert_buffer == remove_buffer)
+ return 0;
+
+ mutex_lock(&indio_dev->info_exist_lock);
+ mutex_lock(&indio_dev->mlock);
+
+ if (insert_buffer && iio_buffer_is_active(insert_buffer))
+ insert_buffer = NULL;
+
+ if (remove_buffer && !iio_buffer_is_active(remove_buffer))
+ remove_buffer = NULL;
+
+ if (!insert_buffer && !remove_buffer) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ if (indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
+
+out_unlock:
+ mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&indio_dev->info_exist_lock);
return ret;
}
@@ -654,7 +766,6 @@ ssize_t iio_buffer_store_enable(struct device *dev,
int ret;
bool requested_state;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_buffer *pbuf = indio_dev->buffer;
bool inlist;
ret = strtobool(buf, &requested_state);
@@ -664,16 +775,16 @@ ssize_t iio_buffer_store_enable(struct device *dev,
mutex_lock(&indio_dev->mlock);
/* Find out if it is in the list */
- inlist = iio_buffer_is_active(indio_dev, pbuf);
+ inlist = iio_buffer_is_active(indio_dev->buffer);
/* Already in desired state */
if (inlist == requested_state)
goto done;
if (requested_state)
- ret = iio_update_buffers(indio_dev,
+ ret = __iio_update_buffers(indio_dev,
indio_dev->buffer, NULL);
else
- ret = iio_update_buffers(indio_dev,
+ ret = __iio_update_buffers(indio_dev,
NULL, indio_dev->buffer);
if (ret < 0)
@@ -684,24 +795,6 @@ done:
}
EXPORT_SYMBOL(iio_buffer_store_enable);
-int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
-{
- struct iio_buffer *buffer;
- unsigned bytes;
- dev_dbg(&indio_dev->dev, "%s\n", __func__);
-
- list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
- if (buffer->access->set_bytes_per_datum) {
- bytes = iio_compute_scan_bytes(indio_dev,
- buffer->scan_mask,
- buffer->scan_timestamp);
-
- buffer->access->set_bytes_per_datum(buffer, bytes);
- }
- return 0;
-}
-EXPORT_SYMBOL(iio_sw_buffer_preenable);
-
/**
* iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
* @indio_dev: the iio device
@@ -729,6 +822,7 @@ static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
/**
* iio_scan_mask_set() - set particular bit in the scan mask
+ * @indio_dev: the iio device
* @buffer: the buffer whose scan mask we are interested in
* @bit: the bit to be set.
*
@@ -749,7 +843,7 @@ int iio_scan_mask_set(struct iio_dev *indio_dev,
if (trialmask == NULL)
return -ENOMEM;
if (!indio_dev->masklength) {
- WARN_ON("trying to set scanmask prior to registering buffer\n");
+ WARN_ON("Trying to set scanmask prior to registering buffer\n");
goto err_invalid_mask;
}
bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
@@ -786,7 +880,8 @@ int iio_scan_mask_query(struct iio_dev *indio_dev,
if (!buffer->scan_mask)
return 0;
- return test_bit(bit, buffer->scan_mask);
+ /* Ensure return value is 0 or 1. */
+ return !!test_bit(bit, buffer->scan_mask);
};
EXPORT_SYMBOL_GPL(iio_scan_mask_query);
@@ -804,8 +899,8 @@ struct iio_demux_table {
struct list_head l;
};
-static unsigned char *iio_demux(struct iio_buffer *buffer,
- unsigned char *datain)
+static const void *iio_demux(struct iio_buffer *buffer,
+ const void *datain)
{
struct iio_demux_table *t;
@@ -818,9 +913,9 @@ static unsigned char *iio_demux(struct iio_buffer *buffer,
return buffer->demux_bounce;
}
-static int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data)
+static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
{
- unsigned char *dataout = iio_demux(buffer, data);
+ const void *dataout = iio_demux(buffer, data);
return buffer->access->store_to(buffer, dataout);
}
@@ -835,7 +930,7 @@ static void iio_buffer_demux_free(struct iio_buffer *buffer)
}
-int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data)
+int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
{
int ret;
struct iio_buffer *buf;
@@ -871,7 +966,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
/* Now we have the two masks, work from least sig and build up sizes */
for_each_set_bit(out_ind,
- indio_dev->active_scan_mask,
+ buffer->scan_mask,
indio_dev->masklength) {
in_ind = find_next_bit(indio_dev->active_scan_mask,
indio_dev->masklength,
@@ -881,7 +976,11 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
indio_dev->masklength,
in_ind + 1);
ch = iio_find_channel_from_si(indio_dev, in_ind);
- length = ch->scan_type.storagebits/8;
+ if (ch->scan_type.repeat > 1)
+ length = ch->scan_type.storagebits / 8 *
+ ch->scan_type.repeat;
+ else
+ length = ch->scan_type.storagebits / 8;
/* Make sure we are aligned */
in_loc += length;
if (in_loc % length)
@@ -893,7 +992,11 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
goto error_clear_mux_table;
}
ch = iio_find_channel_from_si(indio_dev, in_ind);
- length = ch->scan_type.storagebits/8;
+ if (ch->scan_type.repeat > 1)
+ length = ch->scan_type.storagebits / 8 *
+ ch->scan_type.repeat;
+ else
+ length = ch->scan_type.storagebits / 8;
if (out_loc % length)
out_loc += length - out_loc % length;
if (in_loc % length)
@@ -914,7 +1017,11 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
}
ch = iio_find_channel_from_si(indio_dev,
indio_dev->scan_index_timestamp);
- length = ch->scan_type.storagebits/8;
+ if (ch->scan_type.repeat > 1)
+ length = ch->scan_type.storagebits / 8 *
+ ch->scan_type.repeat;
+ else
+ length = ch->scan_type.storagebits / 8;
if (out_loc % length)
out_loc += length - out_loc % length;
if (in_loc % length)
@@ -958,3 +1065,45 @@ error_clear_mux_table:
return ret;
}
EXPORT_SYMBOL_GPL(iio_update_demux);
+
+/**
+ * iio_buffer_release() - Free a buffer's resources
+ * @ref: Pointer to the kref embedded in the iio_buffer struct
+ *
+ * This function is called when the last reference to the buffer has been
+ * dropped. It will typically free all resources allocated by the buffer. Do not
+ * call this function manually, always use iio_buffer_put() when done using a
+ * buffer.
+ */
+static void iio_buffer_release(struct kref *ref)
+{
+ struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
+
+ buffer->access->release(buffer);
+}
+
+/**
+ * iio_buffer_get() - Grab a reference to the buffer
+ * @buffer: The buffer to grab a reference for, may be NULL
+ *
+ * Returns the pointer to the buffer that was passed into the function.
+ */
+struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
+{
+ if (buffer)
+ kref_get(&buffer->ref);
+
+ return buffer;
+}
+EXPORT_SYMBOL_GPL(iio_buffer_get);
+
+/**
+ * iio_buffer_put() - Release the reference to the buffer
+ * @buffer: The buffer to release the reference for, may be NULL
+ */
+void iio_buffer_put(struct iio_buffer *buffer)
+{
+ if (buffer)
+ kref_put(&buffer->ref, iio_buffer_release);
+}
+EXPORT_SYMBOL_GPL(iio_buffer_put);