linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: linux-mm@kvack.org, linux-fsdevel@vger.kernel.org
Cc: Matthew Wilcox <mawilcox@microsoft.com>, Jan Kara <jack@suse.cz>,
	Jeff Layton <jlayton@redhat.com>,
	Lukas Czerner <lczerner@redhat.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Christoph Hellwig <hch@lst.de>,
	Goldwyn Rodrigues <rgoldwyn@suse.com>,
	Nicholas Piggin <npiggin@gmail.com>,
	Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>,
	linux-nilfs@vger.kernel.org, Jaegeuk Kim <jaegeuk@kernel.org>,
	Chao Yu <yuchao0@huawei.com>,
	linux-f2fs-devel@lists.sourceforge.net,
	Oleg Drokin <oleg.drokin@intel.com>,
	Andreas Dilger <andreas.dilger@intel.com>,
	James Simmons <jsimmons@infradead.org>,
	Mike Kravetz <mike.kravetz@oracle.com>
Subject: [PATCH v11 55/63] dax: Convert dax_insert_pfn_mkwrite to XArray
Date: Sat, 14 Apr 2018 07:13:08 -0700	[thread overview]
Message-ID: <20180414141316.7167-56-willy@infradead.org> (raw)
In-Reply-To: <20180414141316.7167-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

Add some XArray-based helper functions to replace the radix tree based
metaphors currently in use.  The biggest change is that converted code
doesn't see its own lock bit; get_unlocked_entry() always returns an
entry with the lock bit clear, and locking the entry now returns void.
So we don't have to mess around loading the current entry and clearing
the lock bit; we can just store the entry that we were using.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 fs/dax.c | 146 +++++++++++++++++++++++++++++++++++++++++++------------
 1 file changed, 115 insertions(+), 31 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index af669ca5020a..19ac013204a1 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -38,6 +38,17 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/fs_dax.h>
 
+static inline unsigned int pe_order(enum page_entry_size pe_size)
+{
+	if (pe_size == PE_SIZE_PTE)
+		return PAGE_SHIFT - PAGE_SHIFT;
+	if (pe_size == PE_SIZE_PMD)
+		return PMD_SHIFT - PAGE_SHIFT;
+	if (pe_size == PE_SIZE_PUD)
+		return PUD_SHIFT - PAGE_SHIFT;
+	return ~0;
+}
+
 /* We choose 4096 entries - same as per-zone page wait tables */
 #define DAX_WAIT_TABLE_BITS 12
 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
@@ -46,6 +57,9 @@
 #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
 #define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
 
+/* The order of a PMD entry */
+#define PMD_ORDER	(PMD_SHIFT - PAGE_SHIFT)
+
 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
 
 static int __init init_dax_wait_table(void)
@@ -85,10 +99,15 @@ static void *dax_mk_locked(unsigned long pfn, unsigned long flags)
 			DAX_ENTRY_LOCK);
 }
 
+static bool dax_is_locked(void *entry)
+{
+	return xa_to_value(entry) & DAX_ENTRY_LOCK;
+}
+
 static unsigned int dax_entry_order(void *entry)
 {
 	if (xa_to_value(entry) & DAX_PMD)
-		return PMD_SHIFT - PAGE_SHIFT;
+		return PMD_ORDER;
 	return 0;
 }
 
@@ -181,6 +200,79 @@ static void dax_wake_mapping_entry_waiter(struct xarray *xa,
 		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
 }
 
+static void dax_wake_entry(struct xa_state *xas, bool wake_all)
+{
+	return dax_wake_mapping_entry_waiter(xas->xa, xas->xa_index, NULL,
+								wake_all);
+}
+
+/*
+ * Look up entry in page cache, wait for it to become unlocked if it
+ * is a DAX entry and return it.  The caller must subsequently call
+ * put_unlocked_entry() if it did not lock the entry or put_locked_entry()
+ * if it did.
+ *
+ * Must be called with the i_pages lock held.
+ */
+static void *get_unlocked_entry(struct xa_state *xas)
+{
+	void *entry;
+	struct wait_exceptional_entry_queue ewait;
+	wait_queue_head_t *wq;
+
+	init_wait(&ewait.wait);
+	ewait.wait.func = wake_exceptional_entry_func;
+
+	for (;;) {
+		entry = xas_load(xas);
+		if (!entry || xa_is_internal(entry) ||
+				WARN_ON_ONCE(!xa_is_value(entry)) ||
+				!dax_is_locked(entry))
+			return entry;
+
+		wq = dax_entry_waitqueue(xas->xa, xas->xa_index, entry,
+				&ewait.key);
+		prepare_to_wait_exclusive(wq, &ewait.wait,
+					  TASK_UNINTERRUPTIBLE);
+		xas_unlock_irq(xas);
+		xas_reset(xas);
+		schedule();
+		finish_wait(wq, &ewait.wait);
+		xas_lock_irq(xas);
+	}
+}
+
+static void put_unlocked_entry(struct xa_state *xas, void *entry)
+{
+	/* We wake all waiters whenever we store a NULL entry */
+	if (!entry)
+		return;
+	dax_wake_entry(xas, false);
+}
+
+/*
+ * We must have used the xa_state to get the entry, but then we locked the
+ * entry and dropped the xa_lock, so we know the xa_state is stale and must
+ * be reset before use.
+ */
+static void put_locked_entry(struct xa_state *xas, void *entry)
+{
+	void *old;
+
+	xas_reset(xas);
+	xas_lock_irq(xas);
+	old = xas_store(xas, entry);
+	xas_unlock_irq(xas);
+	BUG_ON(!dax_is_locked(old));
+	dax_wake_entry(xas, false);
+}
+
+static void dax_lock_entry(struct xa_state *xas, void *entry)
+{
+	unsigned long v = xa_to_value(entry);
+	xas_store(xas, xa_mk_value(v | DAX_ENTRY_LOCK));
+}
+
 /*
  * Check whether the given slot is locked.  Must be called with the i_pages
  * lock held.
@@ -1521,51 +1613,48 @@ EXPORT_SYMBOL_GPL(dax_iomap_fault);
 /*
  * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
  * @vmf: The description of the fault
- * @pe_size: Size of entry to be inserted
  * @pfn: PFN to insert
+ * @order: Order of entry to insert.
  *
  * This function inserts a writeable PTE or PMD entry into the page tables
  * for an mmaped DAX file.  It also marks the page cache entry as dirty.
  */
-static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
-				  enum page_entry_size pe_size,
-				  pfn_t pfn)
+static
+int dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
 {
 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
-	void *entry, **slot;
-	pgoff_t index = vmf->pgoff;
+	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
+	void *entry;
 	int vmf_ret, error;
 
-	xa_lock_irq(&mapping->i_pages);
-	entry = get_unlocked_mapping_entry(mapping, index, &slot);
+	xas_lock_irq(&xas);
+	entry = get_unlocked_entry(&xas);
 	/* Did we race with someone splitting entry or so? */
 	if (!entry ||
-	    (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) ||
-	    (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) {
-		put_unlocked_mapping_entry(mapping, index, entry);
-		xa_unlock_irq(&mapping->i_pages);
+	    (order == 0 && !dax_is_pte_entry(entry)) ||
+	    (order == PMD_ORDER && (xa_is_internal(entry) ||
+				    !dax_is_pmd_entry(entry)))) {
+		put_unlocked_entry(&xas, entry);
+		xas_unlock_irq(&xas);
 		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
 						      VM_FAULT_NOPAGE);
 		return VM_FAULT_NOPAGE;
 	}
-	radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY);
-	entry = lock_slot(mapping, slot);
-	xa_unlock_irq(&mapping->i_pages);
-	switch (pe_size) {
-	case PE_SIZE_PTE:
+	xas_set_tag(&xas, PAGECACHE_TAG_DIRTY);
+	dax_lock_entry(&xas, entry);
+	xas_unlock_irq(&xas);
+	if (order == 0) {
 		error = vm_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
 		vmf_ret = dax_fault_return(error);
-		break;
 #ifdef CONFIG_FS_DAX_PMD
-	case PE_SIZE_PMD:
+	} else if (order == PMD_ORDER) {
 		vmf_ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
 			pfn, true);
-		break;
 #endif
-	default:
+	} else {
 		vmf_ret = VM_FAULT_FALLBACK;
 	}
-	put_locked_mapping_entry(mapping, index);
+	put_locked_entry(&xas, entry);
 	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, vmf_ret);
 	return vmf_ret;
 }
@@ -1585,17 +1674,12 @@ int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
 {
 	int err;
 	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
-	size_t len = 0;
+	unsigned int order = pe_order(pe_size);
+	size_t len = PAGE_SIZE << order;
 
-	if (pe_size == PE_SIZE_PTE)
-		len = PAGE_SIZE;
-	else if (pe_size == PE_SIZE_PMD)
-		len = PMD_SIZE;
-	else
-		WARN_ON_ONCE(1);
 	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
 	if (err)
 		return VM_FAULT_SIGBUS;
-	return dax_insert_pfn_mkwrite(vmf, pe_size, pfn);
+	return dax_insert_pfn_mkwrite(vmf, pfn, order);
 }
 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
-- 
2.17.0

  parent reply	other threads:[~2018-04-14 14:13 UTC|newest]

Thread overview: 83+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-04-14 14:12 [PATCH v11 00/63] Convert page cache to XArray Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 01/63] xarray: Replace exceptional entries Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 02/63] xarray: Change definition of sibling entries Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 03/63] xarray: Add definition of struct xarray Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 04/63] xarray: Define struct xa_node Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 05/63] xarray: Add documentation Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 06/63] xarray: Add xa_load Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 07/63] xarray: Add xa_get_tag, xa_set_tag and xa_clear_tag Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 08/63] xarray: Add xa_store Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 09/63] xarray: Add xa_cmpxchg and xa_insert Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 10/63] xarray: Add xa_for_each Matthew Wilcox
2018-04-20 12:00   ` Goldwyn Rodrigues
2018-04-21  1:34     ` Matthew Wilcox
2018-04-22 20:33       ` Goldwyn Rodrigues
2018-04-14 14:12 ` [PATCH v11 11/63] xarray: Add xa_extract Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 12/63] xarray: Add xa_destroy Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 13/63] xarray: Add xas_next and xas_prev Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 14/63] xarray: Add xas_create_range Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 15/63] xarray: Add MAINTAINERS entry Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 16/63] page cache: Rearrange address_space Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 17/63] page cache: Convert hole search to XArray Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 18/63] page cache: Add and replace pages using the XArray Matthew Wilcox
2018-04-27  3:24   ` Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 19/63] page cache: Convert page deletion to XArray Matthew Wilcox
2018-04-20 12:00   ` Goldwyn Rodrigues
2018-04-27  2:58     ` Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 20/63] page cache: Convert page cache lookups " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 21/63] page cache: Convert delete_batch " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 22/63] page cache: Remove stray radix comment Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 23/63] page cache: Convert filemap_range_has_page to XArray Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 24/63] mm: Convert page-writeback " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 25/63] mm: Convert workingset " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 26/63] mm: Convert truncate " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 27/63] mm: Convert add_to_swap_cache " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 28/63] mm: Convert delete_from_swap_cache " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 29/63] mm: Convert __do_page_cache_readahead " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 30/63] mm: Convert page migration " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 31/63] mm: Convert huge_memory " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 32/63] mm: Convert collapse_shmem " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 33/63] mm: Convert khugepaged_scan_shmem " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 34/63] pagevec: Use xa_tag_t Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 35/63] shmem: Convert replace to XArray Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 36/63] shmem: Convert shmem_confirm_swap " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 37/63] shmem: Convert find_swap_entry " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 38/63] shmem: Convert shmem_add_to_page_cache " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 39/63] shmem: Convert shmem_alloc_hugepage " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 40/63] shmem: Convert shmem_free_swap " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 41/63] shmem: Convert shmem_partial_swap_usage " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 42/63] memfd: Convert shmem_wait_for_pins " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 43/63] memfd: Convert shmem_tag_pins " Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 44/63] shmem: Comment fixups Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 45/63] btrfs: Convert page cache to XArray Matthew Wilcox
2018-04-14 14:12 ` [PATCH v11 46/63] fs: Convert buffer " Matthew Wilcox
2018-04-14 14:13 ` [PATCH v11 47/63] fs: Convert writeback " Matthew Wilcox
2018-04-14 14:13 ` [PATCH v11 48/63] nilfs2: Convert " Matthew Wilcox
2018-04-14 14:13 ` [PATCH v11 49/63] f2fs: " Matthew Wilcox
2018-04-14 14:13 ` [PATCH v11 50/63] lustre: " Matthew Wilcox
2018-04-14 14:13 ` [PATCH v11 51/63] dax: Fix use of zero page Matthew Wilcox
2018-05-21  4:25   ` Ross Zwisler
2018-04-14 14:13 ` [PATCH v11 52/63] dax: dax_insert_mapping_entry always succeeds Matthew Wilcox
2018-05-21  4:33   ` Ross Zwisler
2018-04-14 14:13 ` [PATCH v11 53/63] dax: Rename some functions Matthew Wilcox
2018-05-21  4:42   ` Ross Zwisler
2018-05-21 10:11     ` Matthew Wilcox
2018-05-22 21:39       ` Ross Zwisler
2018-04-14 14:13 ` [PATCH v11 54/63] dax: Hash on XArray instead of mapping Matthew Wilcox
2018-05-21  4:47   ` Ross Zwisler
2018-05-21 10:25     ` Matthew Wilcox
2018-05-22 21:38       ` Ross Zwisler
2018-04-14 14:13 ` Matthew Wilcox [this message]
2018-04-14 14:13 ` [PATCH v11 56/63] dax: Convert __dax_invalidate_entry to XArray Matthew Wilcox
2018-04-14 14:13 ` [PATCH v11 57/63] dax: Convert dax writeback " Matthew Wilcox
2018-04-14 14:13 ` [PATCH v11 58/63] dax: Convert page fault handlers " Matthew Wilcox
2018-04-14 14:13 ` [PATCH v11 59/63] dax: Return fault code from dax_load_hole Matthew Wilcox
2018-04-14 14:13 ` [PATCH v11 60/63] page cache: Finish XArray conversion Matthew Wilcox
2018-04-14 14:13 ` [PATCH v11 61/63] radix tree: Remove unused functions Matthew Wilcox
2018-04-14 14:13 ` [PATCH v11 62/63] radix tree: Remove radix_tree_update_node_t Matthew Wilcox
2018-04-14 14:13 ` [PATCH v11 63/63] radix tree: Remove radix_tree_clear_tags Matthew Wilcox
2018-04-16 16:01 ` [PATCH v11 00/63] Convert page cache to XArray Ross Zwisler
2018-05-31 21:36   ` Ross Zwisler
2018-05-31 21:37     ` Ross Zwisler
2018-05-31 21:46       ` Matthew Wilcox
2018-05-31 21:53         ` Ross Zwisler

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180414141316.7167-56-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=andreas.dilger@intel.com \
    --cc=hch@lst.de \
    --cc=jack@suse.cz \
    --cc=jaegeuk@kernel.org \
    --cc=jlayton@redhat.com \
    --cc=jsimmons@infradead.org \
    --cc=konishi.ryusuke@lab.ntt.co.jp \
    --cc=lczerner@redhat.com \
    --cc=linux-f2fs-devel@lists.sourceforge.net \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nilfs@vger.kernel.org \
    --cc=mawilcox@microsoft.com \
    --cc=mike.kravetz@oracle.com \
    --cc=npiggin@gmail.com \
    --cc=oleg.drokin@intel.com \
    --cc=rgoldwyn@suse.com \
    --cc=ross.zwisler@linux.intel.com \
    --cc=yuchao0@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).