linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Maarten Lankhorst <maarten.lankhorst@canonical.com>
To: linux-kernel@vger.kernel.org
Cc: linux-arch@vger.kernel.org, dri-devel@lists.freedesktop.org,
	linaro-mm-sig@lists.linaro.org, ccross@google.com,
	linux-media@vger.kernel.org
Subject: [PATCH 6/6] dma-buf: add poll support, v2
Date: Mon, 17 Feb 2014 16:58:37 +0100	[thread overview]
Message-ID: <20140217155832.20337.83163.stgit@patser> (raw)
In-Reply-To: <20140217155056.20337.25254.stgit@patser>

Thanks to Fengguang Wu for spotting a missing static cast.

v2:
- Kill unused variable need_shared.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
---
 drivers/base/dma-buf.c  |  101 +++++++++++++++++++++++++++++++++++++++++++++++
 include/linux/dma-buf.h |   12 ++++++
 2 files changed, 113 insertions(+)

diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 85e792c2c909..77ea621ab59d 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -30,6 +30,7 @@
 #include <linux/export.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
+#include <linux/poll.h>
 #include <linux/reservation.h>
 
 static inline int is_dma_buf_file(struct file *);
@@ -52,6 +53,13 @@ static int dma_buf_release(struct inode *inode, struct file *file)
 
 	BUG_ON(dmabuf->vmapping_counter);
 
+	/*
+	 * Any fences that a dma-buf poll can wait on should be signaled
+	 * before releasing dma-buf. This is the responsibility of each
+	 * driver that uses the reservation objects.
+	 */
+	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
+
 	dmabuf->ops->release(dmabuf);
 
 	mutex_lock(&db_list.lock);
@@ -108,10 +116,99 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
 	return base + offset;
 }
 
+static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
+{
+	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t*) cb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dcb->poll->lock, flags);
+	wake_up_locked_poll(dcb->poll, dcb->active);
+	dcb->active = 0;
+	spin_unlock_irqrestore(&dcb->poll->lock, flags);
+}
+
+static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
+{
+	struct dma_buf *dmabuf;
+	struct reservation_object *resv;
+	unsigned long events;
+
+	dmabuf = file->private_data;
+	if (!dmabuf || !dmabuf->resv)
+		return POLLERR;
+
+	resv = dmabuf->resv;
+
+	poll_wait(file, &dmabuf->poll, poll);
+
+	events = poll_requested_events(poll) & (POLLIN | POLLOUT);
+	if (!events)
+		return 0;
+
+	ww_mutex_lock(&resv->lock, NULL);
+
+	if (resv->fence_excl && (!(events & POLLOUT) || resv->fence_shared_count == 0)) {
+		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
+		unsigned long pevents = POLLIN;
+
+		if (resv->fence_shared_count == 0)
+			pevents |= POLLOUT;
+
+		spin_lock_irq(&dmabuf->poll.lock);
+		if (dcb->active) {
+			dcb->active |= pevents;
+			events &= ~pevents;
+		} else
+			dcb->active = pevents;
+		spin_unlock_irq(&dmabuf->poll.lock);
+
+		if (events & pevents) {
+			if (!fence_add_callback(resv->fence_excl,
+						&dcb->cb, dma_buf_poll_cb))
+				events &= ~pevents;
+			else
+			// No callback queued, wake up any additional waiters.
+				dma_buf_poll_cb(NULL, &dcb->cb);
+		}
+	}
+
+	if ((events & POLLOUT) && resv->fence_shared_count > 0) {
+		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
+		int i;
+
+		/* Only queue a new callback if no event has fired yet */
+		spin_lock_irq(&dmabuf->poll.lock);
+		if (dcb->active)
+			events &= ~POLLOUT;
+		else
+			dcb->active = POLLOUT;
+		spin_unlock_irq(&dmabuf->poll.lock);
+
+		if (!(events & POLLOUT))
+			goto out;
+
+		for (i = 0; i < resv->fence_shared_count; ++i)
+			if (!fence_add_callback(resv->fence_shared[i],
+						&dcb->cb, dma_buf_poll_cb)) {
+				events &= ~POLLOUT;
+				break;
+			}
+
+		// No callback queued, wake up any additional waiters.
+		if (i == resv->fence_shared_count)
+			dma_buf_poll_cb(NULL, &dcb->cb);
+	}
+
+out:
+	ww_mutex_unlock(&resv->lock);
+	return events;
+}
+
 static const struct file_operations dma_buf_fops = {
 	.release	= dma_buf_release,
 	.mmap		= dma_buf_mmap_internal,
 	.llseek		= dma_buf_llseek,
+	.poll		= dma_buf_poll,
 };
 
 /*
@@ -171,6 +268,10 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
 	dmabuf->ops = ops;
 	dmabuf->size = size;
 	dmabuf->exp_name = exp_name;
+	init_waitqueue_head(&dmabuf->poll);
+	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
+	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
+
 	if (!resv) {
 		resv = (struct reservation_object*)&dmabuf[1];
 		reservation_object_init(resv);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 34cfbac52c03..e1df18f584ef 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -30,6 +30,8 @@
 #include <linux/list.h>
 #include <linux/dma-mapping.h>
 #include <linux/fs.h>
+#include <linux/fence.h>
+#include <linux/wait.h>
 
 struct device;
 struct dma_buf;
@@ -130,6 +132,16 @@ struct dma_buf {
 	struct list_head list_node;
 	void *priv;
 	struct reservation_object *resv;
+
+	/* poll support */
+	wait_queue_head_t poll;
+
+	struct dma_buf_poll_cb_t {
+		struct fence_cb cb;
+		wait_queue_head_t *poll;
+
+		unsigned long active;
+	} cb_excl, cb_shared;
 };
 
 /**

WARNING: multiple messages have this Message-ID (diff)
From: Maarten Lankhorst <maarten.lankhorst@canonical.com>
To: linux-kernel@vger.kernel.org
Cc: linux-arch@vger.kernel.org, ccross@google.com,
	linaro-mm-sig@lists.linaro.org, robdclark@gmail.com,
	dri-devel@lists.freedesktop.org, daniel@ffwll.ch,
	sumit.semwal@linaro.org, linux-media@vger.kernel.org
Subject: [PATCH 6/6] dma-buf: add poll support, v2
Date: Mon, 17 Feb 2014 16:58:37 +0100	[thread overview]
Message-ID: <20140217155832.20337.83163.stgit@patser> (raw)
Message-ID: <20140217155837.jAz2slIZFlj9-mOE1VU7dAKOT-LfSEYsdAcy15a79qQ@z> (raw)
In-Reply-To: <20140217155056.20337.25254.stgit@patser>

Thanks to Fengguang Wu for spotting a missing static cast.

v2:
- Kill unused variable need_shared.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
---
 drivers/base/dma-buf.c  |  101 +++++++++++++++++++++++++++++++++++++++++++++++
 include/linux/dma-buf.h |   12 ++++++
 2 files changed, 113 insertions(+)

diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 85e792c2c909..77ea621ab59d 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -30,6 +30,7 @@
 #include <linux/export.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
+#include <linux/poll.h>
 #include <linux/reservation.h>
 
 static inline int is_dma_buf_file(struct file *);
@@ -52,6 +53,13 @@ static int dma_buf_release(struct inode *inode, struct file *file)
 
 	BUG_ON(dmabuf->vmapping_counter);
 
+	/*
+	 * Any fences that a dma-buf poll can wait on should be signaled
+	 * before releasing dma-buf. This is the responsibility of each
+	 * driver that uses the reservation objects.
+	 */
+	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
+
 	dmabuf->ops->release(dmabuf);
 
 	mutex_lock(&db_list.lock);
@@ -108,10 +116,99 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
 	return base + offset;
 }
 
+static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
+{
+	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t*) cb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dcb->poll->lock, flags);
+	wake_up_locked_poll(dcb->poll, dcb->active);
+	dcb->active = 0;
+	spin_unlock_irqrestore(&dcb->poll->lock, flags);
+}
+
+static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
+{
+	struct dma_buf *dmabuf;
+	struct reservation_object *resv;
+	unsigned long events;
+
+	dmabuf = file->private_data;
+	if (!dmabuf || !dmabuf->resv)
+		return POLLERR;
+
+	resv = dmabuf->resv;
+
+	poll_wait(file, &dmabuf->poll, poll);
+
+	events = poll_requested_events(poll) & (POLLIN | POLLOUT);
+	if (!events)
+		return 0;
+
+	ww_mutex_lock(&resv->lock, NULL);
+
+	if (resv->fence_excl && (!(events & POLLOUT) || resv->fence_shared_count == 0)) {
+		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
+		unsigned long pevents = POLLIN;
+
+		if (resv->fence_shared_count == 0)
+			pevents |= POLLOUT;
+
+		spin_lock_irq(&dmabuf->poll.lock);
+		if (dcb->active) {
+			dcb->active |= pevents;
+			events &= ~pevents;
+		} else
+			dcb->active = pevents;
+		spin_unlock_irq(&dmabuf->poll.lock);
+
+		if (events & pevents) {
+			if (!fence_add_callback(resv->fence_excl,
+						&dcb->cb, dma_buf_poll_cb))
+				events &= ~pevents;
+			else
+			// No callback queued, wake up any additional waiters.
+				dma_buf_poll_cb(NULL, &dcb->cb);
+		}
+	}
+
+	if ((events & POLLOUT) && resv->fence_shared_count > 0) {
+		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
+		int i;
+
+		/* Only queue a new callback if no event has fired yet */
+		spin_lock_irq(&dmabuf->poll.lock);
+		if (dcb->active)
+			events &= ~POLLOUT;
+		else
+			dcb->active = POLLOUT;
+		spin_unlock_irq(&dmabuf->poll.lock);
+
+		if (!(events & POLLOUT))
+			goto out;
+
+		for (i = 0; i < resv->fence_shared_count; ++i)
+			if (!fence_add_callback(resv->fence_shared[i],
+						&dcb->cb, dma_buf_poll_cb)) {
+				events &= ~POLLOUT;
+				break;
+			}
+
+		// No callback queued, wake up any additional waiters.
+		if (i == resv->fence_shared_count)
+			dma_buf_poll_cb(NULL, &dcb->cb);
+	}
+
+out:
+	ww_mutex_unlock(&resv->lock);
+	return events;
+}
+
 static const struct file_operations dma_buf_fops = {
 	.release	= dma_buf_release,
 	.mmap		= dma_buf_mmap_internal,
 	.llseek		= dma_buf_llseek,
+	.poll		= dma_buf_poll,
 };
 
 /*
@@ -171,6 +268,10 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
 	dmabuf->ops = ops;
 	dmabuf->size = size;
 	dmabuf->exp_name = exp_name;
+	init_waitqueue_head(&dmabuf->poll);
+	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
+	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
+
 	if (!resv) {
 		resv = (struct reservation_object*)&dmabuf[1];
 		reservation_object_init(resv);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 34cfbac52c03..e1df18f584ef 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -30,6 +30,8 @@
 #include <linux/list.h>
 #include <linux/dma-mapping.h>
 #include <linux/fs.h>
+#include <linux/fence.h>
+#include <linux/wait.h>
 
 struct device;
 struct dma_buf;
@@ -130,6 +132,16 @@ struct dma_buf {
 	struct list_head list_node;
 	void *priv;
 	struct reservation_object *resv;
+
+	/* poll support */
+	wait_queue_head_t poll;
+
+	struct dma_buf_poll_cb_t {
+		struct fence_cb cb;
+		wait_queue_head_t *poll;
+
+		unsigned long active;
+	} cb_excl, cb_shared;
 };
 
 /**


  parent reply	other threads:[~2014-02-17 15:58 UTC|newest]

Thread overview: 41+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-02-17 15:55 [PATCH 0/6] dma-buf synchronization patches Maarten Lankhorst
2014-02-17 15:55 ` [PATCH 1/6] fence: dma-buf cross-device synchronization (v17) Maarten Lankhorst
2014-02-17 15:55   ` Maarten Lankhorst
2014-02-17 16:56   ` Rob Clark
2014-02-17 15:56 ` [PATCH 2/6] seqno-fence: Hardware dma-buf implementation of fencing (v4) Maarten Lankhorst
2014-02-17 15:56   ` Maarten Lankhorst
2014-02-17 16:55   ` Rob Clark
2014-02-17 16:55     ` Rob Clark
2014-02-17 16:56   ` Christian König
2014-02-17 16:56     ` Christian König
2014-02-17 17:27     ` Rob Clark
2014-02-17 17:27       ` Rob Clark
2014-02-17 17:36       ` Christian König
2014-02-17 18:24         ` Rob Clark
2014-02-17 18:41           ` Christian König
2014-02-17 18:41             ` Christian König
2014-02-19 13:25             ` Maarten Lankhorst
2014-03-03 21:01               ` Daniel Vetter
2014-02-17 15:56 ` [PATCH 3/6] dma-buf: use reservation objects Maarten Lankhorst
2014-02-17 15:56   ` Maarten Lankhorst
2014-02-17 16:46   ` Rob Clark
2014-02-19 13:58   ` Thomas Hellstrom
2014-02-19 13:58     ` Thomas Hellstrom
2014-02-17 15:57 ` [PATCH 4/6] android: convert sync to fence api, v4 Maarten Lankhorst
2014-02-17 15:57   ` Maarten Lankhorst
2014-02-19 13:56   ` Thomas Hellstrom
2014-02-24 13:01     ` Maarten Lankhorst
2014-03-03 21:11   ` Daniel Vetter
2014-03-04  7:50     ` Maarten Lankhorst
2014-03-04  7:50       ` Maarten Lankhorst
2014-03-04  8:14       ` Daniel Vetter
2014-03-04  8:20         ` Maarten Lankhorst
2014-03-04 10:00           ` Daniel Vetter
2014-03-04 10:42             ` Maarten Lankhorst
2014-03-04 10:42               ` Maarten Lankhorst
2014-02-17 15:58 ` [PATCH 5/6] reservation: add support for fences to enable cross-device synchronisation Maarten Lankhorst
2014-02-17 16:54   ` Rob Clark
2014-02-17 16:54     ` Rob Clark
2014-02-17 15:58 ` Maarten Lankhorst [this message]
2014-02-17 15:58   ` [PATCH 6/6] dma-buf: add poll support, v2 Maarten Lankhorst
2014-02-17 16:38   ` Rob Clark

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20140217155832.20337.83163.stgit@patser \
    --to=maarten.lankhorst@canonical.com \
    --cc=ccross@google.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=linaro-mm-sig@lists.linaro.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-media@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).