linux-iio.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] iio: make blocking read wait for data
@ 2014-06-10 16:26 Josselin Costanzi
  2014-06-12 12:44 ` Lars-Peter Clausen
  2014-06-14 17:08 ` Jonathan Cameron
  0 siblings, 2 replies; 5+ messages in thread
From: Josselin Costanzi @ 2014-06-10 16:26 UTC (permalink / raw)
  To: linux-iio; +Cc: lars, jic23, Josselin Costanzi

Currently the IIO buffer blocking read only wait until at least one
data element is available.
This patch adds the possibility for the userspace to to blocking calls
for multiple elements. This should limit the read() calls count when
trying to get data in batches.
This commit also fix a bug where data is lost if an error happens
after some data is already read.

Signed-off-by: Josselin Costanzi <josselin.costanzi@mobile-devices.fr>
---
 drivers/iio/industrialio-buffer.c | 138 ++++++++++++++++++++++++++++++++++----
 drivers/iio/kfifo_buf.c           |   4 ++
 include/linux/iio/buffer.h        |  40 +++++++++++
 3 files changed, 169 insertions(+), 13 deletions(-)

Changelog:
v2: thanks to Lars-Peter Clausen and Jonathan Cameron
- Avoid breaking default ABI
- Add watermark and timeout properties to buffers

diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 36b1ae9..1fe0116 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -56,6 +56,10 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
 {
 	struct iio_dev *indio_dev = filp->private_data;
 	struct iio_buffer *rb = indio_dev->buffer;
+	size_t datum_size = rb->access->get_bytes_per_datum(rb);
+	size_t watermark_bytes = rb->low_watermark * datum_size;
+	size_t count = 0;
+	long timeout = rb->timeout;
 	int ret;
 
 	if (!indio_dev->info)
@@ -66,24 +70,35 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
 
 	do {
 		if (!iio_buffer_data_available(rb)) {
-			if (filp->f_flags & O_NONBLOCK)
-				return -EAGAIN;
+			if (filp->f_flags & O_NONBLOCK) {
+				ret = -EAGAIN;
+				break;
+			}
 
-			ret = wait_event_interruptible(rb->pollq,
+			timeout = wait_event_interruptible_timeout(rb->pollq,
 					iio_buffer_data_available(rb) ||
-					indio_dev->info == NULL);
-			if (ret)
-				return ret;
-			if (indio_dev->info == NULL)
-				return -ENODEV;
+					indio_dev->info == NULL,
+					timeout);
+			if (timeout <= 0) {
+				ret = timeout;
+				break;
+			}
+
+			if (indio_dev->info == NULL) {
+				ret = -ENODEV;
+				break;
+			}
 		}
 
-		ret = rb->access->read_first_n(rb, n, buf);
-		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
-			ret = -EAGAIN;
-	 } while (ret == 0);
+		ret = rb->access->read_first_n(rb, n, buf + count);
+		if (ret < 0)
+			break;
 
-	return ret;
+		n -= ret;
+		count += ret;
+	 } while (n > 0 && count < watermark_bytes);
+
+	return count ? count : ret;
 }
 
 /**
@@ -126,6 +141,8 @@ void iio_buffer_init(struct iio_buffer *buffer)
 	INIT_LIST_HEAD(&buffer->buffer_list);
 	init_waitqueue_head(&buffer->pollq);
 	kref_init(&buffer->ref);
+	buffer->low_watermark = 1;
+	buffer->timeout = MAX_SCHEDULE_TIMEOUT;
 }
 EXPORT_SYMBOL(iio_buffer_init);
 
@@ -795,6 +812,101 @@ done:
 }
 EXPORT_SYMBOL(iio_buffer_store_enable);
 
+ssize_t iio_buffer_show_watermark(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct iio_buffer *buffer = indio_dev->buffer;
+
+	return sprintf(buf, "%u\n", buffer->low_watermark);
+}
+EXPORT_SYMBOL(iio_buffer_show_watermark);
+
+ssize_t iio_buffer_store_watermark(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf,
+				size_t len)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct iio_buffer *buffer = indio_dev->buffer;
+	unsigned int val;
+	int ret;
+
+	ret = kstrtouint(buf, 10, &val);
+	if (ret)
+		return ret;
+
+	mutex_lock(&indio_dev->mlock);
+	if (iio_buffer_is_active(indio_dev->buffer)) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	buffer->low_watermark = val;
+	ret = 0;
+out:
+	mutex_unlock(&indio_dev->mlock);
+	return ret ? ret : len;
+}
+EXPORT_SYMBOL(iio_buffer_store_watermark);
+
+static int iio_buffer_timeout_set(struct iio_buffer *buffer, long timeout_us)
+{
+	if (timeout_us >= 0) {
+		buffer->timeout = usecs_to_jiffies(timeout_us);
+	} else if (timeout_us == -1){
+		buffer->timeout = MAX_SCHEDULE_TIMEOUT;
+	} else {
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static long iio_buffer_timeout_get(struct iio_buffer *buffer)
+{
+	if (buffer->timeout != MAX_SCHEDULE_TIMEOUT)
+		return jiffies_to_usecs(buffer->timeout);
+	return -1;
+}
+
+ssize_t iio_buffer_show_timeout(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct iio_buffer *buffer = indio_dev->buffer;
+
+	return sprintf(buf, "%ld\n", iio_buffer_timeout_get(buffer));
+}
+EXPORT_SYMBOL(iio_buffer_show_timeout);
+
+ssize_t iio_buffer_store_timeout(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf,
+				size_t len)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct iio_buffer *buffer = indio_dev->buffer;
+	long val;
+	int ret;
+
+	ret = kstrtol(buf, 10, &val);
+	if (ret)
+		return ret;
+
+	mutex_lock(&indio_dev->mlock);
+	if (iio_buffer_is_active(indio_dev->buffer)) {
+		ret = -EBUSY;
+		goto out;
+	}
+	ret = iio_buffer_timeout_set(buffer, val);
+out:
+	mutex_unlock(&indio_dev->mlock);
+	return ret ? ret : len;
+}
+EXPORT_SYMBOL(iio_buffer_store_timeout);
+
 /**
  * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
  * @indio_dev: the iio device
diff --git a/drivers/iio/kfifo_buf.c b/drivers/iio/kfifo_buf.c
index 7134e8a..ab7ea54 100644
--- a/drivers/iio/kfifo_buf.c
+++ b/drivers/iio/kfifo_buf.c
@@ -54,10 +54,14 @@ static int iio_get_length_kfifo(struct iio_buffer *r)
 
 static IIO_BUFFER_ENABLE_ATTR;
 static IIO_BUFFER_LENGTH_ATTR;
+static IIO_BUFFER_WATERMARK_ATTR;
+static IIO_BUFFER_TIMEOUT_ATTR;
 
 static struct attribute *iio_kfifo_attributes[] = {
 	&dev_attr_length.attr,
 	&dev_attr_enable.attr,
+	&dev_attr_low_watermark.attr,
+	&dev_attr_timeout.attr,
 	NULL,
 };
 
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 5193927..41c8f11 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -61,6 +61,8 @@ struct iio_buffer_access_funcs {
  * struct iio_buffer - general buffer structure
  * @length:		[DEVICE] number of datums in buffer
  * @bytes_per_datum:	[DEVICE] size of individual datum including timestamp
+ * @low_watermark:	[DEVICE] blocking read granularity in datums
+ * @timeout:		[DEVICE] blocking read timeout in jiffies
  * @scan_el_attrs:	[DRIVER] control of scan elements if that scan mode
  *			control method is used
  * @scan_mask:		[INTERN] bitmask used in masking scan mode elements
@@ -80,6 +82,8 @@ struct iio_buffer_access_funcs {
 struct iio_buffer {
 	int					length;
 	int					bytes_per_datum;
+	unsigned int				low_watermark;
+	long					timeout;
 	struct attribute_group			*scan_el_attrs;
 	long					*scan_mask;
 	bool					scan_timestamp;
@@ -201,6 +205,34 @@ ssize_t iio_buffer_store_enable(struct device *dev,
 ssize_t iio_buffer_show_enable(struct device *dev,
 			       struct device_attribute *attr,
 			       char *buf);
+/**
+ * iio_buffer_show_watermark() - attr to see the read watermark
+ **/
+ssize_t iio_buffer_show_watermark(struct device *dev,
+				struct device_attribute *attr,
+				char *buf);
+/**
+ * iio_buffer_store_watermark() - attr to configure the read watermark
+ **/
+ssize_t iio_buffer_store_watermark(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf,
+				size_t len);
+/**
+ * iio_buffer_show_timeout() - attr to see the read timeout (microseconds)
+ * Note: if no timeout is set, returns -1
+ **/
+ssize_t iio_buffer_show_timeout(struct device *dev,
+				struct device_attribute *attr,
+				char *buf);
+/**
+ * iio_buffer_store_timeout() - attr to configure read timeout (microseconds)
+ * Note: to disable the timeout, write -1
+ **/
+ssize_t iio_buffer_store_timeout(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf,
+				size_t len);
 #define IIO_BUFFER_LENGTH_ATTR DEVICE_ATTR(length, S_IRUGO | S_IWUSR,	\
 					   iio_buffer_read_length,	\
 					   iio_buffer_write_length)
@@ -209,6 +241,14 @@ ssize_t iio_buffer_show_enable(struct device *dev,
 					   iio_buffer_show_enable,	\
 					   iio_buffer_store_enable)
 
+#define IIO_BUFFER_WATERMARK_ATTR DEVICE_ATTR(low_watermark, S_IRUGO | S_IWUSR,	\
+					   iio_buffer_show_watermark,	\
+					   iio_buffer_store_watermark)
+
+#define IIO_BUFFER_TIMEOUT_ATTR DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR,	\
+					   iio_buffer_show_timeout,	\
+					   iio_buffer_store_timeout)
+
 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
 	const unsigned long *mask);
 
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] iio: make blocking read wait for data
  2014-06-10 16:26 [PATCH v2] iio: make blocking read wait for data Josselin Costanzi
@ 2014-06-12 12:44 ` Lars-Peter Clausen
  2014-06-13 13:09   ` Josselin Costanzi
  2014-06-14 17:08 ` Jonathan Cameron
  1 sibling, 1 reply; 5+ messages in thread
From: Lars-Peter Clausen @ 2014-06-12 12:44 UTC (permalink / raw)
  To: Josselin Costanzi; +Cc: linux-iio, jic23

On 06/10/2014 06:26 PM, Josselin Costanzi wrote:
> Currently the IIO buffer blocking read only wait until at least one
> data element is available.
> This patch adds the possibility for the userspace to to blocking calls
> for multiple elements. This should limit the read() calls count when
> trying to get data in batches.
> This commit also fix a bug where data is lost if an error happens
> after some data is already read.
>
> Signed-off-by: Josselin Costanzi <josselin.costanzi@mobile-devices.fr>

This is going into the right direction. But where did that timeout come 
from? If a user wants a timeout on the read() call they should open the 
device in non-blocking mode and use poll() with a timeout. I dont think we 
should add a different way of doing this since the poll() method already 
works fine.

Also read() should still return once it got data and not wait until the full 
buffer has been read. Also poll() should not return until there is more data 
than the watermark in the buffer. So this means the pollq of the buffer 
should not be woken up until more data is in the buffer then the watermark. 
E.g. in iio_store_to_kfifo

if (kfifo_len(&kf->kf) >= kf->buffer.watermark)
	wake_up_interruptible_poll(&r->pollq, POLLIN | POLLRDNORM);

iio_kfifo_buf_data_available() can be re-factored to return the amount of 
data that is available rather than just if data is available or not.

In iio_buffer_data_available() you can then compare the result of the 
data_available() callback with the watermark and return true or false 
depending on that.

There is one more case that needs to be handled which is the buffer being 
disabled. When the buffer is disabled iio_buffer_data_available() should 
return true if there is data in the buffer regardless of whether it is above 
or below the watermark. This also means that the pollq needs to be woken up 
when the buffer is disabled. This should be done in iio_buffer_deactivate() 
before the iio_buffer_put().

- Lars

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] iio: make blocking read wait for data
  2014-06-12 12:44 ` Lars-Peter Clausen
@ 2014-06-13 13:09   ` Josselin Costanzi
  0 siblings, 0 replies; 5+ messages in thread
From: Josselin Costanzi @ 2014-06-13 13:09 UTC (permalink / raw)
  To: linux-iio

Lars-Peter Clausen wrote:

> On 06/10/2014 06:26 PM, Josselin Costanzi wrote:
>> Currently the IIO buffer blocking read only wait until at least one
>> data element is available.
>> This patch adds the possibility for the userspace to to blocking calls
>> for multiple elements. This should limit the read() calls count when
>> trying to get data in batches.
>> This commit also fix a bug where data is lost if an error happens
>> after some data is already read.
>>
>> Signed-off-by: Josselin Costanzi <josselin.costanzi@mobile-devices.fr>
> 
> This is going into the right direction. But where did that timeout come 
> from? If a user wants a timeout on the read() call they should open the 
> device in non-blocking mode and use poll() with a timeout. I dont think we 
> should add a different way of doing this since the poll() method already 
> works fine.
> Also read() should still return once it got data and not wait until the full 
> buffer has been read. 

The idea was to implement something like SO_RCVLOWAT and SO_RCVTIMEO (or VMIN
and VTIME for ttys) hence the two sysfs entries for watermark and timeout.

As far as I understand the problem, there is at least two ABI functions that
can be impacted by adding a watermark: poll and read.

We should change poll() to return POLLIN only when there is enough data to 
reach the watermark. Non blocking read returns whatever data is available.

However, for blocking read, maybe the watermark could still be respected if
it's set? It's what is done in tcp_recvmsg but then we need the timeout so it's
maybe not worth the effort and complexity. OTOH, if we don't do this, then 
the blocking read isn't really useful anymore because the userspace needs 
to call poll() to check the watermark. What do you think?

> Also poll() should not return until there is more data 
> than the watermark in the buffer. So this means the pollq of the buffer 
> should not be woken up until more data is in the buffer then the watermark. 
> E.g. in iio_store_to_kfifo
> 
> if (kfifo_len(&kf->kf) >= kf->buffer.watermark)
> 	wake_up_interruptible_poll(&r->pollq, POLLIN | POLLRDNORM);

Indeed, this needs to be implemented.

> iio_kfifo_buf_data_available() can be re-factored to return the amount of 
> data that is available rather than just if data is available or not.
> 
> In iio_buffer_data_available() you can then compare the result of the 
> data_available() callback with the watermark and return true or false 
> depending on that.

Ok, I'll have a look.

> 
> There is one more case that needs to be handled which is the buffer being 
> disabled. When the buffer is disabled iio_buffer_data_available() should 
> return true if there is data in the buffer regardless of whether it is above 
> or below the watermark. This also means that the pollq needs to be woken up 
> when the buffer is disabled. This should be done in iio_buffer_deactivate() 
> before the iio_buffer_put().

Yes, will check


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] iio: make blocking read wait for data
  2014-06-10 16:26 [PATCH v2] iio: make blocking read wait for data Josselin Costanzi
  2014-06-12 12:44 ` Lars-Peter Clausen
@ 2014-06-14 17:08 ` Jonathan Cameron
  2014-06-16 10:10   ` Josselin Costanzi
  1 sibling, 1 reply; 5+ messages in thread
From: Jonathan Cameron @ 2014-06-14 17:08 UTC (permalink / raw)
  To: Josselin Costanzi, linux-iio; +Cc: lars

On 10/06/14 17:26, Josselin Costanzi wrote:
> Currently the IIO buffer blocking read only wait until at least one
> data element is available.
> This patch adds the possibility for the userspace to to blocking calls
> for multiple elements. This should limit the read() calls count when
> trying to get data in batches.
> This commit also fix a bug where data is lost if an error happens
> after some data is already read.
>
> Signed-off-by: Josselin Costanzi <josselin.costanzi@mobile-devices.fr>
Hi Josselin,

Thanks for taking this on. Definitely a useful little bit of functionality.
I'll take a close look once all the bits Lars picked up on are sorted.

Right now, why watermark_low?  (rather than simply watermark?).

And you know what comes with adding new ABI.  Documentation please :)
> ---
>   drivers/iio/industrialio-buffer.c | 138 ++++++++++++++++++++++++++++++++++----
>   drivers/iio/kfifo_buf.c           |   4 ++
>   include/linux/iio/buffer.h        |  40 +++++++++++
>   3 files changed, 169 insertions(+), 13 deletions(-)
>
> Changelog:
> v2: thanks to Lars-Peter Clausen and Jonathan Cameron
> - Avoid breaking default ABI
> - Add watermark and timeout properties to buffers
>
> diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
> index 36b1ae9..1fe0116 100644
> --- a/drivers/iio/industrialio-buffer.c
> +++ b/drivers/iio/industrialio-buffer.c
> @@ -56,6 +56,10 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
>   {
>   	struct iio_dev *indio_dev = filp->private_data;
>   	struct iio_buffer *rb = indio_dev->buffer;
> +	size_t datum_size = rb->access->get_bytes_per_datum(rb);
> +	size_t watermark_bytes = rb->low_watermark * datum_size;
> +	size_t count = 0;
> +	long timeout = rb->timeout;
>   	int ret;
>
>   	if (!indio_dev->info)
> @@ -66,24 +70,35 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
>
>   	do {
>   		if (!iio_buffer_data_available(rb)) {
> -			if (filp->f_flags & O_NONBLOCK)
> -				return -EAGAIN;
> +			if (filp->f_flags & O_NONBLOCK) {
> +				ret = -EAGAIN;
> +				break;
> +			}
>
> -			ret = wait_event_interruptible(rb->pollq,
> +			timeout = wait_event_interruptible_timeout(rb->pollq,
>   					iio_buffer_data_available(rb) ||
> -					indio_dev->info == NULL);
> -			if (ret)
> -				return ret;
> -			if (indio_dev->info == NULL)
> -				return -ENODEV;
> +					indio_dev->info == NULL,
> +					timeout);
> +			if (timeout <= 0) {
> +				ret = timeout;
> +				break;
> +			}
> +
> +			if (indio_dev->info == NULL) {
> +				ret = -ENODEV;
> +				break;
> +			}
>   		}
>
> -		ret = rb->access->read_first_n(rb, n, buf);
> -		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
> -			ret = -EAGAIN;
> -	 } while (ret == 0);
> +		ret = rb->access->read_first_n(rb, n, buf + count);
> +		if (ret < 0)
> +			break;
>
> -	return ret;
> +		n -= ret;
> +		count += ret;
> +	 } while (n > 0 && count < watermark_bytes);
> +
> +	return count ? count : ret;
>   }
>
>   /**
> @@ -126,6 +141,8 @@ void iio_buffer_init(struct iio_buffer *buffer)
>   	INIT_LIST_HEAD(&buffer->buffer_list);
>   	init_waitqueue_head(&buffer->pollq);
>   	kref_init(&buffer->ref);
> +	buffer->low_watermark = 1;
> +	buffer->timeout = MAX_SCHEDULE_TIMEOUT;
>   }
>   EXPORT_SYMBOL(iio_buffer_init);
>
> @@ -795,6 +812,101 @@ done:
>   }
>   EXPORT_SYMBOL(iio_buffer_store_enable);
>
> +ssize_t iio_buffer_show_watermark(struct device *dev,
> +				   struct device_attribute *attr,
> +				   char *buf)
> +{
> +	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
> +	struct iio_buffer *buffer = indio_dev->buffer;
> +
> +	return sprintf(buf, "%u\n", buffer->low_watermark);
> +}
> +EXPORT_SYMBOL(iio_buffer_show_watermark);
> +
> +ssize_t iio_buffer_store_watermark(struct device *dev,
> +				struct device_attribute *attr,
> +				const char *buf,
> +				size_t len)
> +{
> +	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
> +	struct iio_buffer *buffer = indio_dev->buffer;
> +	unsigned int val;
> +	int ret;
> +
> +	ret = kstrtouint(buf, 10, &val);
> +	if (ret)
> +		return ret;
> +
> +	mutex_lock(&indio_dev->mlock);
> +	if (iio_buffer_is_active(indio_dev->buffer)) {
> +		ret = -EBUSY;
> +		goto out;
> +	}
> +
> +	buffer->low_watermark = val;
> +	ret = 0;
> +out:
> +	mutex_unlock(&indio_dev->mlock);
> +	return ret ? ret : len;
> +}
> +EXPORT_SYMBOL(iio_buffer_store_watermark);
> +
> +static int iio_buffer_timeout_set(struct iio_buffer *buffer, long timeout_us)
> +{
> +	if (timeout_us >= 0) {
> +		buffer->timeout = usecs_to_jiffies(timeout_us);
> +	} else if (timeout_us == -1){
> +		buffer->timeout = MAX_SCHEDULE_TIMEOUT;
> +	} else {
> +		return -EINVAL;
> +	}
> +	return 0;
> +}
> +
> +static long iio_buffer_timeout_get(struct iio_buffer *buffer)
> +{
> +	if (buffer->timeout != MAX_SCHEDULE_TIMEOUT)
> +		return jiffies_to_usecs(buffer->timeout);
> +	return -1;
> +}
> +
> +ssize_t iio_buffer_show_timeout(struct device *dev,
> +				   struct device_attribute *attr,
> +				   char *buf)
> +{
> +	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
> +	struct iio_buffer *buffer = indio_dev->buffer;
> +
> +	return sprintf(buf, "%ld\n", iio_buffer_timeout_get(buffer));
> +}
> +EXPORT_SYMBOL(iio_buffer_show_timeout);
> +
> +ssize_t iio_buffer_store_timeout(struct device *dev,
> +				struct device_attribute *attr,
> +				const char *buf,
> +				size_t len)
> +{
> +	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
> +	struct iio_buffer *buffer = indio_dev->buffer;
> +	long val;
> +	int ret;
> +
> +	ret = kstrtol(buf, 10, &val);
> +	if (ret)
> +		return ret;
> +
> +	mutex_lock(&indio_dev->mlock);
> +	if (iio_buffer_is_active(indio_dev->buffer)) {
> +		ret = -EBUSY;
> +		goto out;
> +	}
> +	ret = iio_buffer_timeout_set(buffer, val);
> +out:
> +	mutex_unlock(&indio_dev->mlock);
> +	return ret ? ret : len;
> +}
> +EXPORT_SYMBOL(iio_buffer_store_timeout);
> +
>   /**
>    * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
>    * @indio_dev: the iio device
> diff --git a/drivers/iio/kfifo_buf.c b/drivers/iio/kfifo_buf.c
> index 7134e8a..ab7ea54 100644
> --- a/drivers/iio/kfifo_buf.c
> +++ b/drivers/iio/kfifo_buf.c
> @@ -54,10 +54,14 @@ static int iio_get_length_kfifo(struct iio_buffer *r)
>
>   static IIO_BUFFER_ENABLE_ATTR;
>   static IIO_BUFFER_LENGTH_ATTR;
> +static IIO_BUFFER_WATERMARK_ATTR;
> +static IIO_BUFFER_TIMEOUT_ATTR;
>
>   static struct attribute *iio_kfifo_attributes[] = {
>   	&dev_attr_length.attr,
>   	&dev_attr_enable.attr,
> +	&dev_attr_low_watermark.attr,
> +	&dev_attr_timeout.attr,
>   	NULL,
>   };
>
> diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
> index 5193927..41c8f11 100644
> --- a/include/linux/iio/buffer.h
> +++ b/include/linux/iio/buffer.h
> @@ -61,6 +61,8 @@ struct iio_buffer_access_funcs {
>    * struct iio_buffer - general buffer structure
>    * @length:		[DEVICE] number of datums in buffer
>    * @bytes_per_datum:	[DEVICE] size of individual datum including timestamp
> + * @low_watermark:	[DEVICE] blocking read granularity in datums
> + * @timeout:		[DEVICE] blocking read timeout in jiffies
>    * @scan_el_attrs:	[DRIVER] control of scan elements if that scan mode
>    *			control method is used
>    * @scan_mask:		[INTERN] bitmask used in masking scan mode elements
> @@ -80,6 +82,8 @@ struct iio_buffer_access_funcs {
>   struct iio_buffer {
>   	int					length;
>   	int					bytes_per_datum;
> +	unsigned int				low_watermark;
> +	long					timeout;
>   	struct attribute_group			*scan_el_attrs;
>   	long					*scan_mask;
>   	bool					scan_timestamp;
> @@ -201,6 +205,34 @@ ssize_t iio_buffer_store_enable(struct device *dev,
>   ssize_t iio_buffer_show_enable(struct device *dev,
>   			       struct device_attribute *attr,
>   			       char *buf);
> +/**
> + * iio_buffer_show_watermark() - attr to see the read watermark
> + **/
> +ssize_t iio_buffer_show_watermark(struct device *dev,
> +				struct device_attribute *attr,
> +				char *buf);
> +/**
> + * iio_buffer_store_watermark() - attr to configure the read watermark
> + **/
> +ssize_t iio_buffer_store_watermark(struct device *dev,
> +				struct device_attribute *attr,
> +				const char *buf,
> +				size_t len);
> +/**
> + * iio_buffer_show_timeout() - attr to see the read timeout (microseconds)
> + * Note: if no timeout is set, returns -1
> + **/
> +ssize_t iio_buffer_show_timeout(struct device *dev,
> +				struct device_attribute *attr,
> +				char *buf);
> +/**
> + * iio_buffer_store_timeout() - attr to configure read timeout (microseconds)
> + * Note: to disable the timeout, write -1
> + **/
> +ssize_t iio_buffer_store_timeout(struct device *dev,
> +				struct device_attribute *attr,
> +				const char *buf,
> +				size_t len);
>   #define IIO_BUFFER_LENGTH_ATTR DEVICE_ATTR(length, S_IRUGO | S_IWUSR,	\
>   					   iio_buffer_read_length,	\
>   					   iio_buffer_write_length)
> @@ -209,6 +241,14 @@ ssize_t iio_buffer_show_enable(struct device *dev,
>   					   iio_buffer_show_enable,	\
>   					   iio_buffer_store_enable)
>
> +#define IIO_BUFFER_WATERMARK_ATTR DEVICE_ATTR(low_watermark, S_IRUGO | S_IWUSR,	\
> +					   iio_buffer_show_watermark,	\
> +					   iio_buffer_store_watermark)
> +
> +#define IIO_BUFFER_TIMEOUT_ATTR DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR,	\
> +					   iio_buffer_show_timeout,	\
> +					   iio_buffer_store_timeout)
> +
>   bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
>   	const unsigned long *mask);
>
>


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] iio: make blocking read wait for data
  2014-06-14 17:08 ` Jonathan Cameron
@ 2014-06-16 10:10   ` Josselin Costanzi
  0 siblings, 0 replies; 5+ messages in thread
From: Josselin Costanzi @ 2014-06-16 10:10 UTC (permalink / raw)
  To: linux-iio

Jonathan Cameron wrote:

> On 10/06/14 17:26, Josselin Costanzi wrote:
>> Currently the IIO buffer blocking read only wait until at least one
>> data element is available.
>> This patch adds the possibility for the userspace to to blocking calls
>> for multiple elements. This should limit the read() calls count when
>> trying to get data in batches.
>> This commit also fix a bug where data is lost if an error happens
>> after some data is already read.
>>
>> Signed-off-by: Josselin Costanzi <josselin.costanzi@mobile-devices.fr>
> Hi Josselin,
> 
> Thanks for taking this on. Definitely a useful little bit of functionality.
> I'll take a close look once all the bits Lars picked up on are sorted.
> 
> Right now, why watermark_low?  (rather than simply watermark?).
It was to match the SO_RCVLOWAT socket option wich I understand as
"socket_receive_low_watermark".

> And you know what comes with adding new ABI.  Documentation please :)
Yes, I'll have to work on that! Once we all agree on the interface we want 
to provide.

>> ---
>>   drivers/iio/industrialio-buffer.c | 138 ++++++++++++++++++++++++++++++++++----
>>   drivers/iio/kfifo_buf.c           |   4 ++
>>   include/linux/iio/buffer.h        |  40 +++++++++++
>>   3 files changed, 169 insertions(+), 13 deletions(-)
>>
>> Changelog:
>> v2: thanks to Lars-Peter Clausen and Jonathan Cameron
>> - Avoid breaking default ABI
>> - Add watermark and timeout properties to buffers
>>
>> diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
>> index 36b1ae9..1fe0116 100644
>> --- a/drivers/iio/industrialio-buffer.c
>> +++ b/drivers/iio/industrialio-buffer.c
>> @@ -56,6 +56,10 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
>>   {
>>   	struct iio_dev *indio_dev = filp->private_data;
>>   	struct iio_buffer *rb = indio_dev->buffer;
>> +	size_t datum_size = rb->access->get_bytes_per_datum(rb);
>> +	size_t watermark_bytes = rb->low_watermark * datum_size;
>> +	size_t count = 0;
>> +	long timeout = rb->timeout;
>>   	int ret;
>>
>>   	if (!indio_dev->info)
>> @@ -66,24 +70,35 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
>>
>>   	do {
>>   		if (!iio_buffer_data_available(rb)) {
>> -			if (filp->f_flags & O_NONBLOCK)
>> -				return -EAGAIN;
>> +			if (filp->f_flags & O_NONBLOCK) {
>> +				ret = -EAGAIN;
>> +				break;
>> +			}
>>
>> -			ret = wait_event_interruptible(rb->pollq,
>> +			timeout = wait_event_interruptible_timeout(rb->pollq,
>>   					iio_buffer_data_available(rb) ||
>> -					indio_dev->info == NULL);
>> -			if (ret)
>> -				return ret;
>> -			if (indio_dev->info == NULL)
>> -				return -ENODEV;
>> +					indio_dev->info == NULL,
>> +					timeout);
>> +			if (timeout <= 0) {
>> +				ret = timeout;
>> +				break;
>> +			}
>> +
>> +			if (indio_dev->info == NULL) {
>> +				ret = -ENODEV;
>> +				break;
>> +			}
>>   		}
>>
>> -		ret = rb->access->read_first_n(rb, n, buf);
>> -		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
>> -			ret = -EAGAIN;
>> -	 } while (ret == 0);
>> +		ret = rb->access->read_first_n(rb, n, buf + count);
>> +		if (ret < 0)
>> +			break;
>>
>> -	return ret;
>> +		n -= ret;
>> +		count += ret;
>> +	 } while (n > 0 && count < watermark_bytes);
>> +
>> +	return count ? count : ret;
>>   }
>>
>>   /**
>> @@ -126,6 +141,8 @@ void iio_buffer_init(struct iio_buffer *buffer)
>>   	INIT_LIST_HEAD(&buffer->buffer_list);
>>   	init_waitqueue_head(&buffer->pollq);
>>   	kref_init(&buffer->ref);
>> +	buffer->low_watermark = 1;
>> +	buffer->timeout = MAX_SCHEDULE_TIMEOUT;
>>   }
>>   EXPORT_SYMBOL(iio_buffer_init);
>>
>> @@ -795,6 +812,101 @@ done:
>>   }
>>   EXPORT_SYMBOL(iio_buffer_store_enable);
>>
>> +ssize_t iio_buffer_show_watermark(struct device *dev,
>> +				   struct device_attribute *attr,
>> +				   char *buf)
>> +{
>> +	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
>> +	struct iio_buffer *buffer = indio_dev->buffer;
>> +
>> +	return sprintf(buf, "%u\n", buffer->low_watermark);
>> +}
>> +EXPORT_SYMBOL(iio_buffer_show_watermark);
>> +
>> +ssize_t iio_buffer_store_watermark(struct device *dev,
>> +				struct device_attribute *attr,
>> +				const char *buf,
>> +				size_t len)
>> +{
>> +	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
>> +	struct iio_buffer *buffer = indio_dev->buffer;
>> +	unsigned int val;
>> +	int ret;
>> +
>> +	ret = kstrtouint(buf, 10, &val);
>> +	if (ret)
>> +		return ret;
>> +
>> +	mutex_lock(&indio_dev->mlock);
>> +	if (iio_buffer_is_active(indio_dev->buffer)) {
>> +		ret = -EBUSY;
>> +		goto out;
>> +	}
>> +
>> +	buffer->low_watermark = val;
>> +	ret = 0;
>> +out:
>> +	mutex_unlock(&indio_dev->mlock);
>> +	return ret ? ret : len;
>> +}
>> +EXPORT_SYMBOL(iio_buffer_store_watermark);
>> +
>> +static int iio_buffer_timeout_set(struct iio_buffer *buffer, long timeout_us)
>> +{
>> +	if (timeout_us >= 0) {
>> +		buffer->timeout = usecs_to_jiffies(timeout_us);
>> +	} else if (timeout_us == -1){
>> +		buffer->timeout = MAX_SCHEDULE_TIMEOUT;
>> +	} else {
>> +		return -EINVAL;
>> +	}
>> +	return 0;
>> +}
>> +
>> +static long iio_buffer_timeout_get(struct iio_buffer *buffer)
>> +{
>> +	if (buffer->timeout != MAX_SCHEDULE_TIMEOUT)
>> +		return jiffies_to_usecs(buffer->timeout);
>> +	return -1;
>> +}
>> +
>> +ssize_t iio_buffer_show_timeout(struct device *dev,
>> +				   struct device_attribute *attr,
>> +				   char *buf)
>> +{
>> +	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
>> +	struct iio_buffer *buffer = indio_dev->buffer;
>> +
>> +	return sprintf(buf, "%ld\n", iio_buffer_timeout_get(buffer));
>> +}
>> +EXPORT_SYMBOL(iio_buffer_show_timeout);
>> +
>> +ssize_t iio_buffer_store_timeout(struct device *dev,
>> +				struct device_attribute *attr,
>> +				const char *buf,
>> +				size_t len)
>> +{
>> +	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
>> +	struct iio_buffer *buffer = indio_dev->buffer;
>> +	long val;
>> +	int ret;
>> +
>> +	ret = kstrtol(buf, 10, &val);
>> +	if (ret)
>> +		return ret;
>> +
>> +	mutex_lock(&indio_dev->mlock);
>> +	if (iio_buffer_is_active(indio_dev->buffer)) {
>> +		ret = -EBUSY;
>> +		goto out;
>> +	}
>> +	ret = iio_buffer_timeout_set(buffer, val);
>> +out:
>> +	mutex_unlock(&indio_dev->mlock);
>> +	return ret ? ret : len;
>> +}
>> +EXPORT_SYMBOL(iio_buffer_store_timeout);
>> +
>>   /**
>>    * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
>>    * @indio_dev: the iio device
>> diff --git a/drivers/iio/kfifo_buf.c b/drivers/iio/kfifo_buf.c
>> index 7134e8a..ab7ea54 100644
>> --- a/drivers/iio/kfifo_buf.c
>> +++ b/drivers/iio/kfifo_buf.c
>> @@ -54,10 +54,14 @@ static int iio_get_length_kfifo(struct iio_buffer *r)
>>
>>   static IIO_BUFFER_ENABLE_ATTR;
>>   static IIO_BUFFER_LENGTH_ATTR;
>> +static IIO_BUFFER_WATERMARK_ATTR;
>> +static IIO_BUFFER_TIMEOUT_ATTR;
>>
>>   static struct attribute *iio_kfifo_attributes[] = {
>>   	&dev_attr_length.attr,
>>   	&dev_attr_enable.attr,
>> +	&dev_attr_low_watermark.attr,
>> +	&dev_attr_timeout.attr,
>>   	NULL,
>>   };
>>
>> diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
>> index 5193927..41c8f11 100644
>> --- a/include/linux/iio/buffer.h
>> +++ b/include/linux/iio/buffer.h
>> @@ -61,6 +61,8 @@ struct iio_buffer_access_funcs {
>>    * struct iio_buffer - general buffer structure
>>    * @length:		[DEVICE] number of datums in buffer
>>    * @bytes_per_datum:	[DEVICE] size of individual datum including timestamp
>> + * @low_watermark:	[DEVICE] blocking read granularity in datums
>> + * @timeout:		[DEVICE] blocking read timeout in jiffies
>>    * @scan_el_attrs:	[DRIVER] control of scan elements if that scan mode
>>    *			control method is used
>>    * @scan_mask:		[INTERN] bitmask used in masking scan mode elements
>> @@ -80,6 +82,8 @@ struct iio_buffer_access_funcs {
>>   struct iio_buffer {
>>   	int					length;
>>   	int					bytes_per_datum;
>> +	unsigned int				low_watermark;
>> +	long					timeout;
>>   	struct attribute_group			*scan_el_attrs;
>>   	long					*scan_mask;
>>   	bool					scan_timestamp;
>> @@ -201,6 +205,34 @@ ssize_t iio_buffer_store_enable(struct device *dev,
>>   ssize_t iio_buffer_show_enable(struct device *dev,
>>   			       struct device_attribute *attr,
>>   			       char *buf);
>> +/**
>> + * iio_buffer_show_watermark() - attr to see the read watermark
>> + **/
>> +ssize_t iio_buffer_show_watermark(struct device *dev,
>> +				struct device_attribute *attr,
>> +				char *buf);
>> +/**
>> + * iio_buffer_store_watermark() - attr to configure the read watermark
>> + **/
>> +ssize_t iio_buffer_store_watermark(struct device *dev,
>> +				struct device_attribute *attr,
>> +				const char *buf,
>> +				size_t len);
>> +/**
>> + * iio_buffer_show_timeout() - attr to see the read timeout (microseconds)
>> + * Note: if no timeout is set, returns -1
>> + **/
>> +ssize_t iio_buffer_show_timeout(struct device *dev,
>> +				struct device_attribute *attr,
>> +				char *buf);
>> +/**
>> + * iio_buffer_store_timeout() - attr to configure read timeout (microseconds)
>> + * Note: to disable the timeout, write -1
>> + **/
>> +ssize_t iio_buffer_store_timeout(struct device *dev,
>> +				struct device_attribute *attr,
>> +				const char *buf,
>> +				size_t len);
>>   #define IIO_BUFFER_LENGTH_ATTR DEVICE_ATTR(length, S_IRUGO | S_IWUSR,	\
>>   					   iio_buffer_read_length,	\
>>   					   iio_buffer_write_length)
>> @@ -209,6 +241,14 @@ ssize_t iio_buffer_show_enable(struct device *dev,
>>   					   iio_buffer_show_enable,	\
>>   					   iio_buffer_store_enable)
>>
>> +#define IIO_BUFFER_WATERMARK_ATTR DEVICE_ATTR(low_watermark, S_IRUGO | S_IWUSR,	\
>> +					   iio_buffer_show_watermark,	\
>> +					   iio_buffer_store_watermark)
>> +
>> +#define IIO_BUFFER_TIMEOUT_ATTR DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR,	\
>> +					   iio_buffer_show_timeout,	\
>> +					   iio_buffer_store_timeout)
>> +
>>   bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
>>   	const unsigned long *mask);
>>
>>



^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2014-06-16 10:10 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-06-10 16:26 [PATCH v2] iio: make blocking read wait for data Josselin Costanzi
2014-06-12 12:44 ` Lars-Peter Clausen
2014-06-13 13:09   ` Josselin Costanzi
2014-06-14 17:08 ` Jonathan Cameron
2014-06-16 10:10   ` Josselin Costanzi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).