From: Rob Clark <robdclark@gmail.com>
To: dri-devel@lists.freedesktop.org
Cc: linux-arm-msm@vger.kernel.org, freedreno@lists.freedesktop.org,
Chris Wilson <chris@chris-wilson.co.uk>,
Sushmita Susheelendra <ssusheel@codeaurora.org>,
Rob Clark <robdclark@gmail.com>
Subject: [PATCH] fixup! drm/msm: Separate locking of buffer resources from struct_mutex
Date: Thu, 15 Jun 2017 09:20:50 -0400 [thread overview]
Message-ID: <20170615132050.1196-1-robdclark@gmail.com> (raw)
In-Reply-To: <1497394374-19982-1-git-send-email-ssusheel@codeaurora.org>
---
This is roughly based on Chris's suggestion, in particular the part
about using mutex_lock_nested(). It's not *exactly* the same, in
particular msm_obj->lock protects a bit more than just backing store
and we don't currently track a pin_count. (Instead we currently
keep pages pinned until the object is purged or freed.)
Instead of making msm_obj->lock only cover backing store, it is
easier to split out madv, which is still protected by struct_mutex,
which is still held by the shrinker, so the shrinker does not need
to grab msm_obj->lock until it purges an object. We avoid going
down any path that could trigger shrinker by ensuring that
msm_obj->madv == WILLNEED. To synchronize access to msm_obj->madv
it is protected by msm_obj->lock inside struct_mutex.
This seems to keep lockdep happy in my testing so far.
drivers/gpu/drm/msm/msm_gem.c | 54 ++++++++++++++++++++++++++++++++--
drivers/gpu/drm/msm/msm_gem.h | 1 +
drivers/gpu/drm/msm/msm_gem_shrinker.c | 12 ++++++++
3 files changed, 65 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index e132548..f5d1f84 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -26,6 +26,22 @@
#include "msm_gpu.h"
#include "msm_mmu.h"
+/* The shrinker can be triggered while we hold objA->lock, and need
+ * to grab objB->lock to purge it. Lockdep just sees these as a single
+ * class of lock, so we use subclasses to teach it the difference.
+ *
+ * OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and
+ * OBJ_LOCK_SHRINKER is used in msm_gem_purge().
+ *
+ * It is *essential* that we never go down paths that could trigger the
+ * shrinker for a purgable object. This is ensured by checking that
+ * msm_obj->madv == MSM_MADV_WILLNEED.
+ */
+enum {
+ OBJ_LOCK_NORMAL,
+ OBJ_LOCK_SHRINKER,
+};
+
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -150,6 +166,12 @@ struct page **msm_gem_get_pages(struct drm_gem_object *obj)
struct page **p;
mutex_lock(&msm_obj->lock);
+
+ if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+ mutex_unlock(&msm_obj->lock);
+ return ERR_PTR(-EBUSY);
+ }
+
p = get_pages(obj);
mutex_unlock(&msm_obj->lock);
return p;
@@ -220,6 +242,11 @@ int msm_gem_fault(struct vm_fault *vmf)
if (ret)
goto out;
+ if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+ mutex_unlock(&msm_obj->lock);
+ return VM_FAULT_SIGBUS;
+ }
+
/* make sure we have pages attached now */
pages = get_pages(obj);
if (IS_ERR(pages)) {
@@ -358,6 +385,11 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
mutex_lock(&msm_obj->lock);
+ if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+ mutex_unlock(&msm_obj->lock);
+ return -EBUSY;
+ }
+
vma = lookup_vma(obj, aspace);
if (!vma) {
@@ -454,6 +486,12 @@ void *msm_gem_get_vaddr(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
mutex_lock(&msm_obj->lock);
+
+ if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+ mutex_unlock(&msm_obj->lock);
+ return ERR_PTR(-EBUSY);
+ }
+
if (!msm_obj->vaddr) {
struct page **pages = get_pages(obj);
if (IS_ERR(pages)) {
@@ -489,12 +527,18 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ mutex_lock(&msm_obj->lock);
+
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
if (msm_obj->madv != __MSM_MADV_PURGED)
msm_obj->madv = madv;
- return (msm_obj->madv != __MSM_MADV_PURGED);
+ madv = msm_obj->madv;
+
+ mutex_unlock(&msm_obj->lock);
+
+ return (madv != __MSM_MADV_PURGED);
}
void msm_gem_purge(struct drm_gem_object *obj)
@@ -506,6 +550,8 @@ void msm_gem_purge(struct drm_gem_object *obj)
WARN_ON(!is_purgeable(msm_obj));
WARN_ON(obj->import_attach);
+ mutex_lock_nested(&msm_obj->lock, OBJ_LOCK_SHRINKER);
+
put_iova(obj);
msm_gem_vunmap(obj);
@@ -526,6 +572,8 @@ void msm_gem_purge(struct drm_gem_object *obj)
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
0, (loff_t)-1);
+
+ mutex_unlock(&msm_obj->lock);
}
void msm_gem_vunmap(struct drm_gem_object *obj)
@@ -660,7 +708,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+ mutex_lock(&msm_obj->lock);
switch (msm_obj->madv) {
case __MSM_MADV_PURGED:
@@ -701,6 +749,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
if (fence)
describe_fence(fence, "Exclusive", m);
rcu_read_unlock();
+
+ mutex_unlock(&msm_obj->lock);
}
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 9ad5ba4c..2b9b8e9 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -101,6 +101,7 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
static inline bool is_purgeable(struct msm_gem_object *msm_obj)
{
+ WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex));
return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
!msm_obj->base.dma_buf && !msm_obj->base.import_attach;
}
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index ab1dd02..e1db4ad 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -20,6 +20,18 @@
static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
+ /* NOTE: we are *closer* to being able to get rid of
+ * mutex_trylock_recursive().. the msm_gem code itself does
+ * not need struct_mutex, although codepaths that can trigger
+ * shrinker are still called in code-paths that hold the
+ * struct_mutex.
+ *
+ * Also, msm_obj->madv is protected by struct_mutex.
+ *
+ * The next step is probably split out a seperate lock for
+ * protecting inactive_list, so that shrinker does not need
+ * struct_mutex.
+ */
switch (mutex_trylock_recursive(&dev->struct_mutex)) {
case MUTEX_TRYLOCK_FAILED:
return false;
--
2.9.4
next prev parent reply other threads:[~2017-06-15 13:20 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-06-13 22:52 [PATCH] drm/msm: Separate locking of buffer resources from struct_mutex Sushmita Susheelendra
[not found] ` <1497394374-19982-1-git-send-email-ssusheel-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2017-06-13 23:10 ` Rob Clark
2017-06-14 16:49 ` Rob Clark
[not found] ` <CAF6AEGuUNmpbssk0nsf6Nrb3Z-H5ug-gthEkZTqg_9GQAA3iUA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-06-14 23:08 ` Susheelendra, Sushmita
2017-06-14 23:31 ` Rob Clark
2017-06-15 10:19 ` Chris Wilson
2017-06-15 13:20 ` Rob Clark [this message]
[not found] ` <20170615132050.1196-1-robdclark-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2017-06-15 21:59 ` [PATCH] fixup! " Susheelendra, Sushmita
[not found] ` <D38DFC6B-2CE4-459F-9D35-C21CEE5B362D-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2017-06-15 22:57 ` Rob Clark
[not found] ` <CAF6AEGst=tPn30N-0u5J8-FkBb=ZKKNkjEBGE-M84TxnC054Vw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-06-16 14:22 ` Rob Clark
[not found] ` <20170616142207.20821-1-robdclark-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2017-06-16 21:32 ` ssusheel-sgV2jX0FEOL9JmXXK+q4OQ
2017-06-16 21:44 ` [Freedreno] " Rob Clark
[not found] ` <CAF6AEGvbf_bwDz9CHPd-BhNHFaBngspfyV1OE=m8YAtm+W1T2Q-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-06-16 23:04 ` Sushmita Susheelendra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170615132050.1196-1-robdclark@gmail.com \
--to=robdclark@gmail.com \
--cc=chris@chris-wilson.co.uk \
--cc=dri-devel@lists.freedesktop.org \
--cc=freedreno@lists.freedesktop.org \
--cc=linux-arm-msm@vger.kernel.org \
--cc=ssusheel@codeaurora.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).