linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: ira.weiny-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org
To: dledford-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org
Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	Dean Luick <dean.luick-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
Subject: [PATCH 16/16] IB/hfi1: Add cache evict LRU list
Date: Thu, 28 Jul 2016 15:21:27 -0400	[thread overview]
Message-ID: <1469733687-31738-17-git-send-email-ira.weiny@intel.com> (raw)
In-Reply-To: <1469733687-31738-1-git-send-email-ira.weiny-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>

From: Dean Luick <dean.luick-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>

The original code used a LRU list to evict nodes which were least
recently used.  For correctness the evict code was moved under the
handler->lock, now add back the LRU list.

Reviewed-by: Ira Weiny <ira.weiny-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
Signed-off-by: Dean Luick <dean.luick-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
---
 drivers/infiniband/hw/hfi1/mmu_rb.c | 29 +++++++++++++++++++----------
 1 file changed, 19 insertions(+), 10 deletions(-)

diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index a02344a9d746..7ad30898fc19 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -59,6 +59,7 @@ struct mmu_rb_handler {
 	spinlock_t lock;        /* protect the RB tree */
 	struct mmu_rb_ops *ops;
 	struct mm_struct *mm;
+	struct list_head lru_list;
 	struct work_struct del_work;
 	struct list_head del_list;
 	struct workqueue_struct *wq;
@@ -119,6 +120,7 @@ int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
 	handlr->mm = mm;
 	INIT_WORK(&handlr->del_work, handle_remove);
 	INIT_LIST_HEAD(&handlr->del_list);
+	INIT_LIST_HEAD(&handlr->lru_list);
 	handlr->wq = wq;
 
 	ret = mmu_notifier_register(&handlr->mn, handlr->mm);
@@ -153,7 +155,8 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
 	while ((node = rb_first(&handler->root))) {
 		rbnode = rb_entry(node, struct mmu_rb_node, node);
 		rb_erase(node, &handler->root);
-		list_add(&rbnode->list, &del_list);
+		/* move from LRU list to delete list */
+		list_move(&rbnode->list, &del_list);
 	}
 	spin_unlock_irqrestore(&handler->lock, flags);
 
@@ -178,10 +181,13 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
 		goto unlock;
 	}
 	__mmu_int_rb_insert(mnode, &handler->root);
+	list_add(&mnode->list, &handler->lru_list);
 
 	ret = handler->ops->insert(handler->ops_arg, mnode);
-	if (ret)
+	if (ret) {
 		__mmu_int_rb_remove(mnode, &handler->root);
+		list_del(&mnode->list); /* remove from LRU list */
+	}
 unlock:
 	spin_unlock_irqrestore(&handler->lock, flags);
 	return ret;
@@ -219,8 +225,10 @@ struct mmu_rb_node *hfi1_mmu_rb_extract(struct mmu_rb_handler *handler,
 
 	spin_lock_irqsave(&handler->lock, flags);
 	node = __mmu_rb_search(handler, addr, len);
-	if (node)
+	if (node) {
 		__mmu_int_rb_remove(node, &handler->root);
+		list_del(&node->list); /* remove from LRU list */
+	}
 	spin_unlock_irqrestore(&handler->lock, flags);
 
 	return node;
@@ -228,8 +236,7 @@ struct mmu_rb_node *hfi1_mmu_rb_extract(struct mmu_rb_handler *handler,
 
 void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
 {
-	struct mmu_rb_node *rbnode;
-	struct rb_node *node, *next;
+	struct mmu_rb_node *rbnode, *ptr;
 	struct list_head del_list;
 	unsigned long flags;
 	bool stop = false;
@@ -237,13 +244,13 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
 	INIT_LIST_HEAD(&del_list);
 
 	spin_lock_irqsave(&handler->lock, flags);
-	for (node = rb_first(&handler->root); node; node = next) {
-		next = rb_next(node);
-		rbnode = rb_entry(node, struct mmu_rb_node, node);
+	list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
+					 list) {
 		if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
 					&stop)) {
 			__mmu_int_rb_remove(rbnode, &handler->root);
-			list_add(&rbnode->list, &del_list);
+			/* move from LRU list to delete list */
+			list_move(&rbnode->list, &del_list);
 		}
 		if (stop)
 			break;
@@ -272,6 +279,7 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
 		  node->len);
 	spin_lock_irqsave(&handler->lock, flags);
 	__mmu_int_rb_remove(node, &handler->root);
+	list_del(&node->list); /* remove from LRU list */
 	spin_unlock_irqrestore(&handler->lock, flags);
 
 	handler->ops->remove(handler->ops_arg, node);
@@ -311,7 +319,8 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
 			  node->addr, node->len);
 		if (handler->ops->invalidate(handler->ops_arg, node)) {
 			__mmu_int_rb_remove(node, root);
-			list_add(&node->list, &handler->del_list);
+			/* move from LRU list to delete list */
+			list_move(&node->list, &handler->del_list);
 			added = true;
 		}
 	}
-- 
1.8.2

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

  parent reply	other threads:[~2016-07-28 19:21 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-07-28 19:21 [PATCH 00/16] Fix SDMA/TID caching code ira.weiny-ral2JQCrhuEAvxtiuMwx3w
     [not found] ` <1469733687-31738-1-git-send-email-ira.weiny-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
2016-07-28 19:21   ` [PATCH 01/16] IB/hfi1: Prevent null pointer dereference ira.weiny-ral2JQCrhuEAvxtiuMwx3w
2016-07-28 19:21   ` [PATCH 02/16] IB/hfi1: Use the same capability state for all shared contexts ira.weiny-ral2JQCrhuEAvxtiuMwx3w
2016-07-28 19:21   ` [PATCH 03/16] IB/hfi1: Validate SDMA user request index ira.weiny-ral2JQCrhuEAvxtiuMwx3w
2016-07-28 19:21   ` [PATCH 04/16] IB/hfi1: Validate SDMA user iovector count ira.weiny-ral2JQCrhuEAvxtiuMwx3w
2016-07-28 19:21   ` [PATCH 05/16] IB/hfi1: Release node on insert failure ira.weiny-ral2JQCrhuEAvxtiuMwx3w
2016-07-28 19:21   ` [PATCH 06/16] IB/hfi1: Fix error condition that needs to clean up ira.weiny-ral2JQCrhuEAvxtiuMwx3w
2016-07-28 19:21   ` [PATCH 07/16] IB/hfi1: Fix user SDMA racy user request claim ira.weiny-ral2JQCrhuEAvxtiuMwx3w
2016-07-28 19:21   ` [PATCH 08/16] IB/hfi1: Make use of mm consistent ira.weiny-ral2JQCrhuEAvxtiuMwx3w
2016-07-28 19:21   ` [PATCH 09/16] IB/hfi1: Make the cache handler own its rb tree root ira.weiny-ral2JQCrhuEAvxtiuMwx3w
2016-07-28 19:21   ` [PATCH 10/16] IB/hfi1: Fix TID caching actions ira.weiny-ral2JQCrhuEAvxtiuMwx3w
2016-07-28 19:21   ` [PATCH 11/16] IB/hfi1: Add evict operation to the mmu rb handler ira.weiny-ral2JQCrhuEAvxtiuMwx3w
2016-07-28 19:21   ` [PATCH 12/16] IB/hfi1: Use evict mmu rb operation ira.weiny-ral2JQCrhuEAvxtiuMwx3w
2016-07-28 19:21   ` [PATCH 13/16] IB/hfi1: Consistently call ops->remove outside spinlock ira.weiny-ral2JQCrhuEAvxtiuMwx3w
2016-07-28 19:21   ` [PATCH 14/16] IB/hfi1: Remove unneeded mm argument in remove function ira.weiny-ral2JQCrhuEAvxtiuMwx3w
2016-07-28 19:21   ` [PATCH 15/16] IB/hfi1: Fix memory leak during unexpected shutdown ira.weiny-ral2JQCrhuEAvxtiuMwx3w
2016-07-28 19:21   ` ira.weiny-ral2JQCrhuEAvxtiuMwx3w [this message]
2016-08-03  3:04   ` [PATCH 00/16] Fix SDMA/TID caching code Doug Ledford

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1469733687-31738-17-git-send-email-ira.weiny@intel.com \
    --to=ira.weiny-ral2jqcrhueavxtiumwx3w@public.gmane.org \
    --cc=dean.luick-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org \
    --cc=dledford-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org \
    --cc=linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).