From: Ross Zwisler <ross.zwisler@linux.intel.com>
To: linux-kernel@vger.kernel.org
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>,
Theodore Ts'o <tytso@mit.edu>,
Alexander Viro <viro@zeniv.linux.org.uk>,
Andreas Dilger <adilger.kernel@dilger.ca>,
Andrew Morton <akpm@linux-foundation.org>,
Christoph Hellwig <hch@lst.de>,
Dan Williams <dan.j.williams@intel.com>,
Dave Chinner <david@fromorbit.com>, Jan Kara <jack@suse.cz>,
Matthew Wilcox <mawilcox@microsoft.com>,
linux-ext4@vger.kernel.org, linux-fsdevel@vger.kernel.org,
linux-mm@kvack.org, linux-nvdimm@lists.01.org,
linux-xfs@vger.kernel.org
Subject: [PATCH v9 07/16] dax: coordinate locking for offsets in PMD range
Date: Tue, 1 Nov 2016 13:54:09 -0600 [thread overview]
Message-ID: <1478030058-1422-8-git-send-email-ross.zwisler@linux.intel.com> (raw)
In-Reply-To: <1478030058-1422-1-git-send-email-ross.zwisler@linux.intel.com>
DAX radix tree locking currently locks entries based on the unique
combination of the 'mapping' pointer and the pgoff_t 'index' for the entry.
This works for PTEs, but as we move to PMDs we will need to have all the
offsets within the range covered by the PMD to map to the same bit lock.
To accomplish this, for ranges covered by a PMD entry we will instead lock
based on the page offset of the beginning of the PMD entry. The 'mapping'
pointer is still used in the same way.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jan Kara <jack@suse.cz>
---
fs/dax.c | 65 +++++++++++++++++++++++++++++++++--------------------
include/linux/dax.h | 2 +-
mm/filemap.c | 2 +-
3 files changed, 43 insertions(+), 26 deletions(-)
diff --git a/fs/dax.c b/fs/dax.c
index 835e7f0..7238702 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -64,14 +64,6 @@ static int __init init_dax_wait_table(void)
}
fs_initcall(init_dax_wait_table);
-static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
- pgoff_t index)
-{
- unsigned long hash = hash_long((unsigned long)mapping ^ index,
- DAX_WAIT_TABLE_BITS);
- return wait_table + hash;
-}
-
static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
{
struct request_queue *q = bdev->bd_queue;
@@ -285,7 +277,7 @@ EXPORT_SYMBOL_GPL(dax_do_io);
*/
struct exceptional_entry_key {
struct address_space *mapping;
- unsigned long index;
+ pgoff_t entry_start;
};
struct wait_exceptional_entry_queue {
@@ -293,6 +285,26 @@ struct wait_exceptional_entry_queue {
struct exceptional_entry_key key;
};
+static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
+ pgoff_t index, void *entry, struct exceptional_entry_key *key)
+{
+ unsigned long hash;
+
+ /*
+ * If 'entry' is a PMD, align the 'index' that we use for the wait
+ * queue to the start of that PMD. This ensures that all offsets in
+ * the range covered by the PMD map to the same bit lock.
+ */
+ if (RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
+ index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
+
+ key->mapping = mapping;
+ key->entry_start = index;
+
+ hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
+ return wait_table + hash;
+}
+
static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
int sync, void *keyp)
{
@@ -301,7 +313,7 @@ static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
container_of(wait, struct wait_exceptional_entry_queue, wait);
if (key->mapping != ewait->key.mapping ||
- key->index != ewait->key.index)
+ key->entry_start != ewait->key.entry_start)
return 0;
return autoremove_wake_function(wait, mode, sync, NULL);
}
@@ -359,12 +371,10 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping,
{
void *entry, **slot;
struct wait_exceptional_entry_queue ewait;
- wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index);
+ wait_queue_head_t *wq;
init_wait(&ewait.wait);
ewait.wait.func = wake_exceptional_entry_func;
- ewait.key.mapping = mapping;
- ewait.key.index = index;
for (;;) {
entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
@@ -375,6 +385,8 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping,
*slotp = slot;
return entry;
}
+
+ wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
prepare_to_wait_exclusive(wq, &ewait.wait,
TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&mapping->tree_lock);
@@ -447,10 +459,20 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index)
return entry;
}
+/*
+ * We do not necessarily hold the mapping->tree_lock when we call this
+ * function so it is possible that 'entry' is no longer a valid item in the
+ * radix tree. This is okay, though, because all we really need to do is to
+ * find the correct waitqueue where tasks might be sleeping waiting for that
+ * old 'entry' and wake them.
+ */
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
- pgoff_t index, bool wake_all)
+ pgoff_t index, void *entry, bool wake_all)
{
- wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index);
+ struct exceptional_entry_key key;
+ wait_queue_head_t *wq;
+
+ wq = dax_entry_waitqueue(mapping, index, entry, &key);
/*
* Checking for locked entry and prepare_to_wait_exclusive() happens
@@ -458,13 +480,8 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
* So at this point all tasks that could have seen our entry locked
* must be in the waitqueue and the following check will see them.
*/
- if (waitqueue_active(wq)) {
- struct exceptional_entry_key key;
-
- key.mapping = mapping;
- key.index = index;
+ if (waitqueue_active(wq))
__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
- }
}
void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
@@ -480,7 +497,7 @@ void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
}
unlock_slot(mapping, slot);
spin_unlock_irq(&mapping->tree_lock);
- dax_wake_mapping_entry_waiter(mapping, index, false);
+ dax_wake_mapping_entry_waiter(mapping, index, entry, false);
}
static void put_locked_mapping_entry(struct address_space *mapping,
@@ -505,7 +522,7 @@ static void put_unlocked_mapping_entry(struct address_space *mapping,
return;
/* We have to wake up next waiter for the radix tree entry lock */
- dax_wake_mapping_entry_waiter(mapping, index, false);
+ dax_wake_mapping_entry_waiter(mapping, index, entry, false);
}
/*
@@ -532,7 +549,7 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
radix_tree_delete(&mapping->page_tree, index);
mapping->nrexceptional--;
spin_unlock_irq(&mapping->tree_lock);
- dax_wake_mapping_entry_waiter(mapping, index, true);
+ dax_wake_mapping_entry_waiter(mapping, index, entry, true);
return 1;
}
diff --git a/include/linux/dax.h b/include/linux/dax.h
index add6c4b..a41a747 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -22,7 +22,7 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
- pgoff_t index, bool wake_all);
+ pgoff_t index, void *entry, bool wake_all);
#ifdef CONFIG_FS_DAX
struct page *read_dax_sector(struct block_device *bdev, sector_t n);
diff --git a/mm/filemap.c b/mm/filemap.c
index c7fe2f1..8709f1e9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -143,7 +143,7 @@ static int page_cache_tree_insert(struct address_space *mapping,
if (node)
workingset_node_pages_dec(node);
/* Wakeup waiters for exceptional entry lock */
- dax_wake_mapping_entry_waiter(mapping, page->index,
+ dax_wake_mapping_entry_waiter(mapping, page->index, p,
false);
}
}
--
2.7.4
next prev parent reply other threads:[~2016-11-01 19:54 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-11-01 19:54 [PATCH v9 00/16] re-enable DAX PMD support Ross Zwisler
2016-11-01 19:54 ` [PATCH v9 01/16] ext4: tell DAX the size of allocation holes Ross Zwisler
2016-11-01 19:54 ` [PATCH v9 02/16] dax: remove buffer_size_valid() Ross Zwisler
2016-11-01 19:54 ` [PATCH v9 03/16] ext2: remove support for DAX PMD faults Ross Zwisler
2016-11-01 19:54 ` [PATCH v9 04/16] dax: make 'wait_table' global variable static Ross Zwisler
2016-11-01 19:54 ` [PATCH v9 05/16] dax: remove the last BUG_ON() from fs/dax.c Ross Zwisler
2016-11-01 19:54 ` [PATCH v9 06/16] dax: consistent variable naming for DAX entries Ross Zwisler
2016-11-01 19:54 ` Ross Zwisler [this message]
2016-11-01 19:54 ` [PATCH v9 08/16] dax: remove dax_pmd_fault() Ross Zwisler
2016-11-01 19:54 ` [PATCH v9 09/16] dax: correct dax iomap code namespace Ross Zwisler
2016-11-01 19:54 ` [PATCH v9 10/16] dax: add dax_iomap_sector() helper function Ross Zwisler
2016-11-01 19:54 ` [PATCH v9 11/16] dax: dax_iomap_fault() needs to call iomap_end() Ross Zwisler
2016-11-01 19:54 ` [PATCH v9 12/16] dax: move RADIX_DAX_* defines to dax.h Ross Zwisler
2016-11-01 19:54 ` [PATCH v9 13/16] dax: move put_(un)locked_mapping_entry() in dax.c Ross Zwisler
2016-11-01 19:54 ` [PATCH v9 14/16] dax: add struct iomap based DAX PMD support Ross Zwisler
2016-11-01 19:54 ` [PATCH v9 15/16] xfs: use struct iomap based DAX PMD fault path Ross Zwisler
2016-11-01 19:54 ` [PATCH v9 16/16] dax: remove "depends on BROKEN" from FS_DAX_PMD Ross Zwisler
2016-11-03 1:58 ` [PATCH v9 00/16] re-enable DAX PMD support Dave Chinner
2016-11-03 17:51 ` Ross Zwisler
2016-11-03 21:16 ` Dave Chinner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1478030058-1422-8-git-send-email-ross.zwisler@linux.intel.com \
--to=ross.zwisler@linux.intel.com \
--cc=adilger.kernel@dilger.ca \
--cc=akpm@linux-foundation.org \
--cc=dan.j.williams@intel.com \
--cc=david@fromorbit.com \
--cc=hch@lst.de \
--cc=jack@suse.cz \
--cc=linux-ext4@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nvdimm@lists.01.org \
--cc=linux-xfs@vger.kernel.org \
--cc=mawilcox@microsoft.com \
--cc=tytso@mit.edu \
--cc=viro@zeniv.linux.org.uk \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).