From: Maarten Lankhorst <maarten.lankhorst@canonical.com> To: gregkh@linuxfoundation.org Cc: linux-arch@vger.kernel.org, thellstrom@vmware.com, linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org, linaro-mm-sig@lists.linaro.org, ccross@google.com, linux-media@vger.kernel.org Subject: [REPOST PATCH 6/8] dma-buf: add poll support, v3 Date: Wed, 18 Jun 2014 12:37:23 +0200 [thread overview] Message-ID: <20140618103723.15728.65924.stgit@patser> (raw) In-Reply-To: <20140618102957.15728.43525.stgit@patser> Thanks to Fengguang Wu for spotting a missing static cast. v2: - Kill unused variable need_shared. v3: - Clarify the BUG() in dma_buf_release some more. (Rob Clark) Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> --- drivers/base/dma-buf.c | 108 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/dma-buf.h | 12 +++++ 2 files changed, 120 insertions(+) diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c index cd40ca22911f..25e8c4165936 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/base/dma-buf.c @@ -30,6 +30,7 @@ #include <linux/export.h> #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <linux/poll.h> #include <linux/reservation.h> static inline int is_dma_buf_file(struct file *); @@ -52,6 +53,16 @@ static int dma_buf_release(struct inode *inode, struct file *file) BUG_ON(dmabuf->vmapping_counter); + /* + * Any fences that a dma-buf poll can wait on should be signaled + * before releasing dma-buf. This is the responsibility of each + * driver that uses the reservation objects. + * + * If you hit this BUG() it means someone dropped their ref to the + * dma-buf while still having pending operation to the buffer. + */ + BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); + dmabuf->ops->release(dmabuf); mutex_lock(&db_list.lock); @@ -108,10 +119,103 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) return base + offset; } +static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb) +{ + struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; + unsigned long flags; + + spin_lock_irqsave(&dcb->poll->lock, flags); + wake_up_locked_poll(dcb->poll, dcb->active); + dcb->active = 0; + spin_unlock_irqrestore(&dcb->poll->lock, flags); +} + +static unsigned int dma_buf_poll(struct file *file, poll_table *poll) +{ + struct dma_buf *dmabuf; + struct reservation_object *resv; + unsigned long events; + + dmabuf = file->private_data; + if (!dmabuf || !dmabuf->resv) + return POLLERR; + + resv = dmabuf->resv; + + poll_wait(file, &dmabuf->poll, poll); + + events = poll_requested_events(poll) & (POLLIN | POLLOUT); + if (!events) + return 0; + + ww_mutex_lock(&resv->lock, NULL); + + if (resv->fence_excl && (!(events & POLLOUT) || + resv->fence_shared_count == 0)) { + struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; + unsigned long pevents = POLLIN; + + if (resv->fence_shared_count == 0) + pevents |= POLLOUT; + + spin_lock_irq(&dmabuf->poll.lock); + if (dcb->active) { + dcb->active |= pevents; + events &= ~pevents; + } else + dcb->active = pevents; + spin_unlock_irq(&dmabuf->poll.lock); + + if (events & pevents) { + if (!fence_add_callback(resv->fence_excl, + &dcb->cb, dma_buf_poll_cb)) + events &= ~pevents; + else + /* + * No callback queued, wake up any additional + * waiters. + */ + dma_buf_poll_cb(NULL, &dcb->cb); + } + } + + if ((events & POLLOUT) && resv->fence_shared_count > 0) { + struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared; + int i; + + /* Only queue a new callback if no event has fired yet */ + spin_lock_irq(&dmabuf->poll.lock); + if (dcb->active) + events &= ~POLLOUT; + else + dcb->active = POLLOUT; + spin_unlock_irq(&dmabuf->poll.lock); + + if (!(events & POLLOUT)) + goto out; + + for (i = 0; i < resv->fence_shared_count; ++i) + if (!fence_add_callback(resv->fence_shared[i], + &dcb->cb, dma_buf_poll_cb)) { + events &= ~POLLOUT; + break; + } + + /* No callback queued, wake up any additional waiters. */ + if (i == resv->fence_shared_count) + dma_buf_poll_cb(NULL, &dcb->cb); + } + +out: + ww_mutex_unlock(&resv->lock); + return events; +} + static const struct file_operations dma_buf_fops = { .release = dma_buf_release, .mmap = dma_buf_mmap_internal, .llseek = dma_buf_llseek, + .poll = dma_buf_poll, }; /* @@ -171,6 +275,10 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, dmabuf->ops = ops; dmabuf->size = size; dmabuf->exp_name = exp_name; + init_waitqueue_head(&dmabuf->poll); + dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; + dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; + if (!resv) { resv = (struct reservation_object *)&dmabuf[1]; reservation_object_init(resv); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index fd7def2e0ae2..694e1fe1c4b4 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -30,6 +30,8 @@ #include <linux/list.h> #include <linux/dma-mapping.h> #include <linux/fs.h> +#include <linux/fence.h> +#include <linux/wait.h> struct device; struct dma_buf; @@ -130,6 +132,16 @@ struct dma_buf { struct list_head list_node; void *priv; struct reservation_object *resv; + + /* poll support */ + wait_queue_head_t poll; + + struct dma_buf_poll_cb_t { + struct fence_cb cb; + wait_queue_head_t *poll; + + unsigned long active; + } cb_excl, cb_shared; }; /**
WARNING: multiple messages have this Message-ID (diff)
From: Maarten Lankhorst <maarten.lankhorst@canonical.com> To: gregkh@linuxfoundation.org Cc: linux-arch@vger.kernel.org, thellstrom@vmware.com, linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org, linaro-mm-sig@lists.linaro.org, robdclark@gmail.com, thierry.reding@gmail.com, ccross@google.com, daniel@ffwll.ch, sumit.semwal@linaro.org, linux-media@vger.kernel.org Subject: [REPOST PATCH 6/8] dma-buf: add poll support, v3 Date: Wed, 18 Jun 2014 12:37:23 +0200 [thread overview] Message-ID: <20140618103723.15728.65924.stgit@patser> (raw) Message-ID: <20140618103723.9Pww1aiPDY_pqtYPWEXL09t_-cwrZtrzLl6dwTFxZKY@z> (raw) In-Reply-To: <20140618102957.15728.43525.stgit@patser> Thanks to Fengguang Wu for spotting a missing static cast. v2: - Kill unused variable need_shared. v3: - Clarify the BUG() in dma_buf_release some more. (Rob Clark) Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> --- drivers/base/dma-buf.c | 108 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/dma-buf.h | 12 +++++ 2 files changed, 120 insertions(+) diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c index cd40ca22911f..25e8c4165936 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/base/dma-buf.c @@ -30,6 +30,7 @@ #include <linux/export.h> #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <linux/poll.h> #include <linux/reservation.h> static inline int is_dma_buf_file(struct file *); @@ -52,6 +53,16 @@ static int dma_buf_release(struct inode *inode, struct file *file) BUG_ON(dmabuf->vmapping_counter); + /* + * Any fences that a dma-buf poll can wait on should be signaled + * before releasing dma-buf. This is the responsibility of each + * driver that uses the reservation objects. + * + * If you hit this BUG() it means someone dropped their ref to the + * dma-buf while still having pending operation to the buffer. + */ + BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); + dmabuf->ops->release(dmabuf); mutex_lock(&db_list.lock); @@ -108,10 +119,103 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) return base + offset; } +static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb) +{ + struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; + unsigned long flags; + + spin_lock_irqsave(&dcb->poll->lock, flags); + wake_up_locked_poll(dcb->poll, dcb->active); + dcb->active = 0; + spin_unlock_irqrestore(&dcb->poll->lock, flags); +} + +static unsigned int dma_buf_poll(struct file *file, poll_table *poll) +{ + struct dma_buf *dmabuf; + struct reservation_object *resv; + unsigned long events; + + dmabuf = file->private_data; + if (!dmabuf || !dmabuf->resv) + return POLLERR; + + resv = dmabuf->resv; + + poll_wait(file, &dmabuf->poll, poll); + + events = poll_requested_events(poll) & (POLLIN | POLLOUT); + if (!events) + return 0; + + ww_mutex_lock(&resv->lock, NULL); + + if (resv->fence_excl && (!(events & POLLOUT) || + resv->fence_shared_count == 0)) { + struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; + unsigned long pevents = POLLIN; + + if (resv->fence_shared_count == 0) + pevents |= POLLOUT; + + spin_lock_irq(&dmabuf->poll.lock); + if (dcb->active) { + dcb->active |= pevents; + events &= ~pevents; + } else + dcb->active = pevents; + spin_unlock_irq(&dmabuf->poll.lock); + + if (events & pevents) { + if (!fence_add_callback(resv->fence_excl, + &dcb->cb, dma_buf_poll_cb)) + events &= ~pevents; + else + /* + * No callback queued, wake up any additional + * waiters. + */ + dma_buf_poll_cb(NULL, &dcb->cb); + } + } + + if ((events & POLLOUT) && resv->fence_shared_count > 0) { + struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared; + int i; + + /* Only queue a new callback if no event has fired yet */ + spin_lock_irq(&dmabuf->poll.lock); + if (dcb->active) + events &= ~POLLOUT; + else + dcb->active = POLLOUT; + spin_unlock_irq(&dmabuf->poll.lock); + + if (!(events & POLLOUT)) + goto out; + + for (i = 0; i < resv->fence_shared_count; ++i) + if (!fence_add_callback(resv->fence_shared[i], + &dcb->cb, dma_buf_poll_cb)) { + events &= ~POLLOUT; + break; + } + + /* No callback queued, wake up any additional waiters. */ + if (i == resv->fence_shared_count) + dma_buf_poll_cb(NULL, &dcb->cb); + } + +out: + ww_mutex_unlock(&resv->lock); + return events; +} + static const struct file_operations dma_buf_fops = { .release = dma_buf_release, .mmap = dma_buf_mmap_internal, .llseek = dma_buf_llseek, + .poll = dma_buf_poll, }; /* @@ -171,6 +275,10 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, dmabuf->ops = ops; dmabuf->size = size; dmabuf->exp_name = exp_name; + init_waitqueue_head(&dmabuf->poll); + dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; + dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; + if (!resv) { resv = (struct reservation_object *)&dmabuf[1]; reservation_object_init(resv); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index fd7def2e0ae2..694e1fe1c4b4 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -30,6 +30,8 @@ #include <linux/list.h> #include <linux/dma-mapping.h> #include <linux/fs.h> +#include <linux/fence.h> +#include <linux/wait.h> struct device; struct dma_buf; @@ -130,6 +132,16 @@ struct dma_buf { struct list_head list_node; void *priv; struct reservation_object *resv; + + /* poll support */ + wait_queue_head_t poll; + + struct dma_buf_poll_cb_t { + struct fence_cb cb; + wait_queue_head_t *poll; + + unsigned long active; + } cb_excl, cb_shared; }; /**
next prev parent reply other threads:[~2014-06-18 10:37 UTC|newest] Thread overview: 78+ messages / expand[flat|nested] mbox.gz Atom feed top 2014-06-18 10:36 [REPOST PATCH 0/8] fence synchronization patches Maarten Lankhorst 2014-06-18 10:36 ` Maarten Lankhorst 2014-06-18 10:36 ` [REPOST PATCH 1/8] fence: dma-buf cross-device synchronization (v17) Maarten Lankhorst 2014-06-18 10:36 ` Maarten Lankhorst 2014-06-19 1:13 ` Greg KH 2014-06-19 1:13 ` Greg KH 2014-06-19 1:23 ` Rob Clark 2014-06-19 1:23 ` Rob Clark 2014-06-19 1:44 ` Greg KH 2014-06-19 1:44 ` Greg KH 2014-06-19 14:00 ` Rob Clark 2014-06-19 17:00 ` Greg KH 2014-06-19 17:00 ` Greg KH 2014-06-19 17:45 ` Rob Clark 2014-06-19 17:45 ` Rob Clark 2014-06-19 18:19 ` Greg KH 2014-06-19 18:37 ` James Bottomley 2014-06-19 18:37 ` James Bottomley 2014-06-19 18:52 ` Rob Clark 2014-06-19 18:52 ` Rob Clark 2014-06-19 19:20 ` Daniel Vetter 2014-06-19 19:20 ` Daniel Vetter 2014-06-19 21:50 ` Dave Airlie 2014-06-19 21:50 ` Dave Airlie 2014-06-19 23:21 ` Rob Clark 2014-06-19 23:21 ` Rob Clark 2014-06-19 19:15 ` Daniel Vetter 2014-06-19 19:15 ` Daniel Vetter 2014-06-19 20:01 ` Greg KH 2014-06-19 20:01 ` Greg KH 2014-06-19 22:39 ` H. Peter Anvin 2014-06-19 22:39 ` H. Peter Anvin 2014-06-19 23:08 ` James Bottomley 2014-06-19 23:08 ` James Bottomley 2014-06-19 23:42 ` Greg KH 2014-06-20 8:30 ` Daniel Vetter 2014-06-20 8:24 ` Daniel Vetter 2014-06-20 8:24 ` Daniel Vetter 2014-06-19 1:15 ` Greg KH 2014-06-19 1:16 ` Greg KH 2014-06-19 1:25 ` Rob Clark 2014-06-19 1:25 ` Rob Clark 2014-06-19 4:27 ` Sumit Semwal 2014-06-19 4:54 ` Greg KH 2014-06-19 4:54 ` Greg KH 2014-06-19 5:26 ` Sumit Semwal 2014-06-19 5:26 ` Sumit Semwal 2014-06-18 10:37 ` [REPOST PATCH 2/8] seqno-fence: Hardware dma-buf implementation of fencing (v5) Maarten Lankhorst 2014-06-18 10:37 ` Maarten Lankhorst 2014-06-18 10:37 ` [REPOST PATCH 3/8] dma-buf: use reservation objects Maarten Lankhorst 2014-06-18 10:37 ` Maarten Lankhorst 2014-06-18 10:37 ` [REPOST PATCH 4/8] android: convert sync to fence api, v5 Maarten Lankhorst 2014-06-18 10:37 ` Maarten Lankhorst 2014-06-19 1:15 ` Greg KH 2014-06-19 6:37 ` Daniel Vetter 2014-06-19 6:37 ` Daniel Vetter 2014-06-19 11:48 ` Thierry Reding 2014-06-19 11:48 ` Thierry Reding 2014-06-19 12:28 ` Daniel Vetter 2014-06-19 15:35 ` Colin Cross 2014-06-19 16:34 ` Daniel Vetter 2014-06-19 16:34 ` Daniel Vetter 2014-06-20 20:52 ` Thierry Reding 2014-06-20 20:52 ` Thierry Reding 2014-06-23 8:45 ` Maarten Lankhorst 2014-07-07 13:28 ` Daniel Vetter 2014-07-07 13:28 ` Daniel Vetter 2014-06-19 15:22 ` Colin Cross 2014-06-19 15:22 ` Colin Cross 2014-06-19 16:12 ` Maarten Lankhorst 2014-06-18 10:37 ` [REPOST PATCH 5/8] reservation: add support for fences to enable cross-device synchronisation Maarten Lankhorst 2014-06-18 10:37 ` Maarten Lankhorst 2014-06-18 10:37 ` Maarten Lankhorst [this message] 2014-06-18 10:37 ` [REPOST PATCH 6/8] dma-buf: add poll support, v3 Maarten Lankhorst 2014-06-18 10:37 ` [REPOST PATCH 7/8] reservation: update api and add some helpers Maarten Lankhorst 2014-06-18 10:37 ` Maarten Lankhorst 2014-06-18 10:37 ` [REPOST PATCH 8/8] reservation: add suppport for read-only access using rcu Maarten Lankhorst 2014-06-18 10:37 ` Maarten Lankhorst
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20140618103723.15728.65924.stgit@patser \ --to=maarten.lankhorst@canonical.com \ --cc=ccross@google.com \ --cc=dri-devel@lists.freedesktop.org \ --cc=gregkh@linuxfoundation.org \ --cc=linaro-mm-sig@lists.linaro.org \ --cc=linux-arch@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-media@vger.kernel.org \ --cc=thellstrom@vmware.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).