From mboxrd@z Thu Jan 1 00:00:00 1970 From: Matthew Wilcox Subject: [PATCH v11 42/63] memfd: Convert shmem_wait_for_pins to XArray Date: Sat, 14 Apr 2018 07:12:55 -0700 Message-ID: <20180414141316.7167-43-willy@infradead.org> References: <20180414141316.7167-1-willy@infradead.org> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=sourceforge.net; s=x; h=References:In-Reply-To:Message-Id:Date:Subject:Cc: To:From:Sender:Reply-To:MIME-Version:Content-Type:Content-Transfer-Encoding: Content-ID:Content-Description:Resent-Date:Resent-From:Resent-Sender: Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help:List-Unsubscribe: List-Subscribe:List-Post:List-Owner:List-Archive; bh=UZTN8aBxMXkhxuecI/sSr7eB1694B2zAdvt7cIq0N98=; b=krW5pMim4oG68TCGjOAyd1jP/Q cr/9qdSWVPfvZoTqx3CJUYSapqp5++Z02B1mBfe8GM6KmoyY4TcHKWMFm7KMKVSQKcGsOFjMMcJYM NEUFA/w9Co7bCk/t8BUjSQkTrzIxR7xm0K9Mpm3rzQn3dnkWTS2HuvxosnXRV7En6izo=; DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=sf.net; s=x ; h=References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender:Reply-To :MIME-Version:Content-Type:Content-Transfer-Encoding:Content-ID: Content-Description:Resent-Date:Resent-From:Resent-Sender:Resent-To:Resent-Cc :Resent-Message-ID:List-Id:List-Help:List-Unsubscribe:List-Subscribe: List-Post:List-Owner:List-Archive; bh=UZTN8aBxMXkhxuecI/sSr7eB1694B2zAdvt7cIq0N98=; b=TD40SXtI0XwgpjL6ETCt8ouQpA 8Eo51WpDVUWPehUmWW1t6aCzzd0tKeKR1SDqvULjZZskl6aJoQ5CEgs7UxjzC0tc6BGRIZi0+fl8D HIkU2iToWwHnZ5VwcJv4996C3pCNsbvn27BJ+p08kE3eJT1vO2KxHamZKsM09qlsbZXA=; DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=infradead.org; s=bombadil.20170209; h=References:In-Reply-To:Message-Id: Date:Subject:Cc:To:From:Sender:Reply-To:MIME-Version:Content-Type: Content-Transfer-Encoding:Content-ID:Content-Description:Resent-Date: Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Id: List-Help:List-Unsubscribe:List-Subscribe:List-Post:List-Owner:List-Archive; bh=UZTN8aBxMXkhxuecI/sSr7eB1694B2zAdvt7cIq0N98=; b=bOQVsbizoRWcpvTNNUHE7peqt 6Au+vU3Rh4+zFd/e2EXO4k+4RjTsA4+fZVuN1LSkV2XgO8zYskZtJ/jJyit3lDGS+EgrQ3v5kE3Aa XVj2xEsAFwwJ9biOS2kQ49Omy+YwwnS3djTMvo87XntHXWpvOR8Z7WbTaFBmbgCJn+5FPh5o+h5Lw /5+QsrtRjJsJNu1ParhuNL2jUyvbxQ5O56mzxaaBTXBNcrS0EbAHCauBx6iAkyI+yI/dXVK0Q6Ojw RY7Fonoa/T52uDFTgJqIjJWM1pct9BcO0VVbpIF5kCEvcIa1bJdKx94TL1SA8mwkCZSW4kOHlrgl5 dcY8El3gw==; In-Reply-To: <20180414141316.7167-1-willy@infradead.org> List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: linux-f2fs-devel-bounces@lists.sourceforge.net To: linux-mm@kvack.org, linux-fsdevel@vger.kernel.org Cc: linux-nilfs@vger.kernel.org, Jan Kara , Jeff Layton , Matthew Wilcox , James Simmons , Jaegeuk Kim , Andreas Dilger , Nicholas Piggin , linux-f2fs-devel@lists.sourceforge.net, Oleg Drokin , Ryusuke Konishi , Lukas Czerner , Ross Zwisler , Christoph Hellwig , Goldwyn Rodrigues , Mike Kravetz From: Matthew Wilcox Simplify the locking by taking the spinlock while we walk the tree on the assumption that many acquires and releases of the lock will be worse than holding the lock while we process an entire batch of pages. Signed-off-by: Matthew Wilcox Reviewed-by: Mike Kravetz --- mm/shmem.c | 59 ++++++++++++++++++++++-------------------------------- 1 file changed, 24 insertions(+), 35 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index e1a0d1c7513e..017340fe933d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2627,9 +2627,7 @@ static void shmem_tag_pins(struct address_space *mapping) */ static int shmem_wait_for_pins(struct address_space *mapping) { - struct radix_tree_iter iter; - void **slot; - pgoff_t start; + XA_STATE(xas, &mapping->i_pages, 0); struct page *page; int error, scan; @@ -2637,7 +2635,9 @@ static int shmem_wait_for_pins(struct address_space *mapping) error = 0; for (scan = 0; scan <= LAST_SCAN; scan++) { - if (!radix_tree_tagged(&mapping->i_pages, SHMEM_TAG_PINNED)) + unsigned int tagged = 0; + + if (!xas_tagged(&xas, SHMEM_TAG_PINNED)) break; if (!scan) @@ -2645,45 +2645,34 @@ static int shmem_wait_for_pins(struct address_space *mapping) else if (schedule_timeout_killable((HZ << scan) / 200)) scan = LAST_SCAN; - start = 0; - rcu_read_lock(); - radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, - start, SHMEM_TAG_PINNED) { - - page = radix_tree_deref_slot(slot); - if (radix_tree_exception(page)) { - if (radix_tree_deref_retry(page)) { - slot = radix_tree_iter_retry(&iter); - continue; - } - - page = NULL; - } - - if (page && - page_count(page) - page_mapcount(page) != 1) { - if (scan < LAST_SCAN) - goto continue_resched; - + xas_set(&xas, 0); + xas_lock_irq(&xas); + xas_for_each_tag(&xas, page, ULONG_MAX, SHMEM_TAG_PINNED) { + bool clear = true; + if (xa_is_value(page)) + continue; + if (page_count(page) - page_mapcount(page) != 1) { /* * On the last scan, we clean up all those tags * we inserted; but make a note that we still * found pages pinned. */ - error = -EBUSY; + if (scan == LAST_SCAN) + error = -EBUSY; + else + clear = false; } + if (clear) + xas_clear_tag(&xas, SHMEM_TAG_PINNED); + if (++tagged % XA_CHECK_SCHED) + continue; - xa_lock_irq(&mapping->i_pages); - radix_tree_tag_clear(&mapping->i_pages, - iter.index, SHMEM_TAG_PINNED); - xa_unlock_irq(&mapping->i_pages); -continue_resched: - if (need_resched()) { - slot = radix_tree_iter_resume(slot, &iter); - cond_resched_rcu(); - } + xas_pause(&xas); + xas_unlock_irq(&xas); + cond_resched(); + xas_lock_irq(&xas); } - rcu_read_unlock(); + xas_unlock_irq(&xas); } return error; -- 2.17.0 ------------------------------------------------------------------------------ Check out the vibrant tech community on one of the world's most engaging tech sites, Slashdot.org! http://sdm.link/slashdot