From mboxrd@z Thu Jan 1 00:00:00 1970 From: Matthew Wilcox Subject: [PATCH v10 55/62] dax: Hash on XArray instead of mapping Date: Thu, 29 Mar 2018 20:42:38 -0700 Message-ID: <20180330034245.10462-56-willy@infradead.org> References: <20180330034245.10462-1-willy@infradead.org> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=sourceforge.net; s=x; h=References:In-Reply-To:Message-Id:Date:Subject:Cc: To:From:Sender:Reply-To:MIME-Version:Content-Type:Content-Transfer-Encoding: Content-ID:Content-Description:Resent-Date:Resent-From:Resent-Sender: Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help:List-Unsubscribe: List-Subscribe:List-Post:List-Owner:List-Archive; bh=XvMBk2DTdZEv4EjzqdWr4q0pcxOLtVXb6t/R8hxIEIM=; b=TtT3rBtK0GTG9NWDbicKb2NnHg YnW0epZ4DNf9R/vg2oenmUwXsTkVMkNP7R33HdXA/JIhvMcd3hM4y6ohma/o3VQW0c5DiTq4lkFWq aG8cvGNBiFuSVuKkQ0/vAfJ+PRAcwrONCUrszwecU9c8nC7dC/I7eMLWR38ji1bHjlVE=; DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=sf.net; s=x ; h=References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender:Reply-To :MIME-Version:Content-Type:Content-Transfer-Encoding:Content-ID: Content-Description:Resent-Date:Resent-From:Resent-Sender:Resent-To:Resent-Cc :Resent-Message-ID:List-Id:List-Help:List-Unsubscribe:List-Subscribe: List-Post:List-Owner:List-Archive; bh=XvMBk2DTdZEv4EjzqdWr4q0pcxOLtVXb6t/R8hxIEIM=; b=LRNU27vQKwwRDHg9M2AnJCVD1B O37oQeMeufWHln7UbKPvHck3u/Yl5Ps0rih5MVZT8KQxwGfwCgn8GSexqMll7UZR+bVy/o7Ji49TF PqM9u4oM05aUdfYO8GXuIMY6jZg4Tx+9u8akfuDXbEB2e27zUBm/NUzF3ZWAiF+rcdqo=; DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=infradead.org; s=bombadil.20170209; h=References:In-Reply-To:Message-Id: Date:Subject:Cc:To:From:Sender:Reply-To:MIME-Version:Content-Type: Content-Transfer-Encoding:Content-ID:Content-Description:Resent-Date: Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Id: List-Help:List-Unsubscribe:List-Subscribe:List-Post:List-Owner:List-Archive; bh=XvMBk2DTdZEv4EjzqdWr4q0pcxOLtVXb6t/R8hxIEIM=; b=Rw7r0dosjq4MWIoo+ZUnQK7wb w4EBL0N5yXT5lahFCzXuRf0ManWFh+qH5Z1Rtkqq0RljUAd7Vhva2OxsQ1jQ2SqdoFXtEfh2+FqnP Hqv9sxEL7IQecVP1ezRkv0yAunFtPck4A3DUEsVdTq5BX2kJW7PcQyiOB/8FdUW34dtg0QfXr/6Ph INp3j0UZsmbTzHFHU+Zea2+8pEoVFKRtEsWRdsKa7geyd5bLlTcujsrZYDLMULZ3D0DmmUJdeV9SN VnCzwR5ksw3kuL+OJWfHxPozcDL6+0TdPPRWFkr2bZPsdRdb1rmJh4fx6uspDHgFanhAJ/EBXeyQK eWB/RPFFw==; In-Reply-To: <20180330034245.10462-1-willy@infradead.org> List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: linux-f2fs-devel-bounces@lists.sourceforge.net To: linux-mm@kvack.org, linux-fsdevel@vger.kernel.org Cc: linux-nilfs@vger.kernel.org, Jan Kara , Jeff Layton , Matthew Wilcox , James Simmons , Jaegeuk Kim , Andreas Dilger , Nicholas Piggin , linux-f2fs-devel@lists.sourceforge.net, Oleg Drokin , Ryusuke Konishi , Lukas Czerner , Ross Zwisler , Christoph Hellwig , Goldwyn Rodrigues , Mike Kravetz From: Matthew Wilcox Since the XArray is embedded in the struct address_space, this contains exactly as much entropy as the address of the mapping. Signed-off-by: Matthew Wilcox --- fs/dax.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index ce4ad6a99e78..2d6e21e41567 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -116,7 +116,7 @@ static int dax_is_empty_entry(void *entry) * DAX page cache entry locking */ struct exceptional_entry_key { - struct address_space *mapping; + struct xarray *xa; pgoff_t entry_start; }; @@ -125,7 +125,7 @@ struct wait_exceptional_entry_queue { struct exceptional_entry_key key; }; -static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, +static wait_queue_head_t *dax_entry_waitqueue(struct xarray *xa, pgoff_t index, void *entry, struct exceptional_entry_key *key) { unsigned long hash; @@ -138,21 +138,21 @@ static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, if (dax_is_pmd_entry(entry)) index &= ~PG_PMD_COLOUR; - key->mapping = mapping; + key->xa = xa; key->entry_start = index; - hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); + hash = hash_long((unsigned long)xa ^ index, DAX_WAIT_TABLE_BITS); return wait_table + hash; } -static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode, - int sync, void *keyp) +static int wake_exceptional_entry_func(wait_queue_entry_t *wait, + unsigned int mode, int sync, void *keyp) { struct exceptional_entry_key *key = keyp; struct wait_exceptional_entry_queue *ewait = container_of(wait, struct wait_exceptional_entry_queue, wait); - if (key->mapping != ewait->key.mapping || + if (key->xa != ewait->key.xa || key->entry_start != ewait->key.entry_start) return 0; return autoremove_wake_function(wait, mode, sync, NULL); @@ -163,13 +163,13 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mo * The important information it's conveying is whether the entry at * this index used to be a PMD entry. */ -static void dax_wake_mapping_entry_waiter(struct address_space *mapping, +static void dax_wake_mapping_entry_waiter(struct xarray *xa, pgoff_t index, void *entry, bool wake_all) { struct exceptional_entry_key key; wait_queue_head_t *wq; - wq = dax_entry_waitqueue(mapping, index, entry, &key); + wq = dax_entry_waitqueue(xa, index, entry, &key); /* * Checking for locked entry and prepare_to_wait_exclusive() happens @@ -246,7 +246,8 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping, return entry; } - wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); + wq = dax_entry_waitqueue(&mapping->i_pages, index, entry, + &ewait.key); prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); xa_unlock_irq(&mapping->i_pages); @@ -270,7 +271,7 @@ static void dax_unlock_mapping_entry(struct address_space *mapping, } unlock_slot(mapping, slot); xa_unlock_irq(&mapping->i_pages); - dax_wake_mapping_entry_waiter(mapping, index, entry, false); + dax_wake_mapping_entry_waiter(&mapping->i_pages, index, entry, false); } static void put_locked_mapping_entry(struct address_space *mapping, @@ -290,7 +291,7 @@ static void put_unlocked_mapping_entry(struct address_space *mapping, return; /* We have to wake up next waiter for the page cache entry lock */ - dax_wake_mapping_entry_waiter(mapping, index, entry, false); + dax_wake_mapping_entry_waiter(&mapping->i_pages, index, entry, false); } static unsigned long dax_entry_size(void *entry) @@ -464,8 +465,8 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, dax_disassociate_entry(entry, mapping, false); radix_tree_delete(&mapping->i_pages, index); mapping->nrexceptional--; - dax_wake_mapping_entry_waiter(mapping, index, entry, - true); + dax_wake_mapping_entry_waiter(&mapping->i_pages, + index, entry, true); } entry = dax_mk_locked(0, size_flag | DAX_EMPTY); -- 2.16.2 ------------------------------------------------------------------------------ Check out the vibrant tech community on one of the world's most engaging tech sites, Slashdot.org! http://sdm.link/slashdot