From: Matthew Wilcox <willy@infradead.org>
To: linux-kernel@vger.kernel.org
Cc: linux-s390@vger.kernel.org, David Howells <dhowells@redhat.com>,
linux-nilfs@vger.kernel.org,
Matthew Wilcox <mawilcox@microsoft.com>,
linux-sh@vger.kernel.org, intel-gfx@lists.freedesktop.org,
linux-usb@vger.kernel.org, linux-remoteproc@vger.kernel.org,
linux-f2fs-devel@lists.sourceforge.net,
linux-xfs@vger.kernel.org, linux-mm@kvack.org,
iommu@lists.linux-foundation.org,
Stefano Stabellini <sstabellini@kernel.org>,
linux-fsdevel@vger.kernel.org, cgroups@vger.kernel.org,
Bjorn Andersson <bjorn.andersson@linaro.org>,
linux-btrfs@vger.kernel.org
Subject: [PATCH v6 26/99] page cache: Convert page cache lookups to XArray
Date: Wed, 17 Jan 2018 12:20:50 -0800 [thread overview]
Message-ID: <20180117202203.19756-27-willy@infradead.org> (raw)
In-Reply-To: <20180117202203.19756-1-willy@infradead.org>
From: Matthew Wilcox <mawilcox@microsoft.com>
Introduce page_cache_pin() to factor out the common logic between the
various lookup routines:
find_get_entry
find_get_entries
find_get_pages_range
find_get_pages_contig
find_get_pages_range_tag
find_get_entries_tag
filemap_map_pages
By using the xa_state to control the iteration, we can remove most of
the gotos and just use the normal break/continue loop control flow.
Also convert the regression1 read-side to XArray since that simulates
the functions being modified here.
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
include/linux/pagemap.h | 6 +-
mm/filemap.c | 380 +++++++++------------------------
tools/testing/radix-tree/regression1.c | 68 +++---
3 files changed, 129 insertions(+), 325 deletions(-)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 34d4fa3ad1c5..1a59f4a5424a 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -365,17 +365,17 @@ static inline unsigned find_get_pages(struct address_space *mapping,
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
- pgoff_t end, int tag, unsigned int nr_pages,
+ pgoff_t end, xa_tag_t tag, unsigned int nr_pages,
struct page **pages);
static inline unsigned find_get_pages_tag(struct address_space *mapping,
- pgoff_t *index, int tag, unsigned int nr_pages,
+ pgoff_t *index, xa_tag_t tag, unsigned int nr_pages,
struct page **pages)
{
return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
nr_pages, pages);
}
unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
- int tag, unsigned int nr_entries,
+ xa_tag_t tag, unsigned int nr_entries,
struct page **entries, pgoff_t *indices);
struct page *grab_cache_page_write_begin(struct address_space *mapping,
diff --git a/mm/filemap.c b/mm/filemap.c
index ed30d5310e50..317a89df1945 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1401,6 +1401,32 @@ bool page_cache_range_empty(struct address_space *mapping, pgoff_t index,
}
EXPORT_SYMBOL_GPL(page_cache_range_empty);
+/*
+ * page_cache_pin() - Try to pin a page in the page cache.
+ * @xas: The XArray operation state.
+ * @pagep: The page which has been previously found at this location.
+ *
+ * On success, the page has an elevated refcount, but is not locked.
+ * This implements the lockless pagecache protocol as described in
+ * include/linux/pagemap.h; see page_cache_get_speculative().
+ *
+ * Return: True if the page is still in the cache.
+ */
+static bool page_cache_pin(struct xa_state *xas, struct page *page)
+{
+ struct page *head = compound_head(page);
+ bool got = page_cache_get_speculative(head);
+
+ if (likely(got && (xas_reload(xas) == page) &&
+ (compound_head(page) == head)))
+ return true;
+
+ if (got)
+ put_page(head);
+ xas_retry(xas, XA_RETRY_ENTRY);
+ return false;
+}
+
/**
* find_get_entry - find and get a page cache entry
* @mapping: the address_space to search
@@ -1416,51 +1442,21 @@ EXPORT_SYMBOL_GPL(page_cache_range_empty);
*/
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
{
- void **pagep;
- struct page *head, *page;
+ XA_STATE(xas, &mapping->pages, offset);
+ struct page *page;
rcu_read_lock();
-repeat:
- page = NULL;
- pagep = radix_tree_lookup_slot(&mapping->pages, offset);
- if (pagep) {
- page = radix_tree_deref_slot(pagep);
- if (unlikely(!page))
- goto out;
- if (radix_tree_exception(page)) {
- if (radix_tree_deref_retry(page))
- goto repeat;
- /*
- * A shadow entry of a recently evicted page,
- * or a swap entry from shmem/tmpfs. Return
- * it without attempting to raise page count.
- */
- goto out;
- }
-
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
- goto repeat;
-
- /* The page was split under us? */
- if (compound_head(page) != head) {
- put_page(head);
- goto repeat;
- }
+ do {
+ page = xas_load(&xas);
+ if (xas_retry(&xas, page))
+ continue;
+ if (!page || xa_is_value(page))
+ break;
+ if (!page_cache_pin(&xas, page))
+ continue;
+ } while (0);
- /*
- * Has the page moved?
- * This is part of the lockless pagecache protocol. See
- * include/linux/pagemap.h for details.
- */
- if (unlikely(page != *pagep)) {
- put_page(head);
- goto repeat;
- }
- }
-out:
rcu_read_unlock();
-
return page;
}
EXPORT_SYMBOL(find_get_entry);
@@ -1487,7 +1483,7 @@ struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
repeat:
page = find_get_entry(mapping, offset);
- if (page && !radix_tree_exception(page)) {
+ if (page && !xa_is_value(page)) {
lock_page(page);
/* Has the page been truncated? */
if (unlikely(page_mapping(page) != mapping)) {
@@ -1620,50 +1616,21 @@ unsigned find_get_entries(struct address_space *mapping,
pgoff_t start, unsigned int nr_entries,
struct page **entries, pgoff_t *indices)
{
- void **slot;
+ XA_STATE(xas, &mapping->pages, start);
+ struct page *page;
unsigned int ret = 0;
- struct radix_tree_iter iter;
if (!nr_entries)
return 0;
rcu_read_lock();
- radix_tree_for_each_slot(slot, &mapping->pages, &iter, start) {
- struct page *head, *page;
-repeat:
- page = radix_tree_deref_slot(slot);
- if (unlikely(!page))
+ xas_for_each(&xas, page, ULONG_MAX) {
+ if (xas_retry(&xas, page))
+ continue;
+ if (!xa_is_value(page) && !page_cache_pin(&xas, page))
continue;
- if (radix_tree_exception(page)) {
- if (radix_tree_deref_retry(page)) {
- slot = radix_tree_iter_retry(&iter);
- continue;
- }
- /*
- * A shadow entry of a recently evicted page, a swap
- * entry from shmem/tmpfs or a DAX entry. Return it
- * without attempting to raise page count.
- */
- goto export;
- }
-
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
- goto repeat;
-
- /* The page was split under us? */
- if (compound_head(page) != head) {
- put_page(head);
- goto repeat;
- }
- /* Has the page moved? */
- if (unlikely(page != *slot)) {
- put_page(head);
- goto repeat;
- }
-export:
- indices[ret] = iter.index;
+ indices[ret] = xas.xa_index;
entries[ret] = page;
if (++ret == nr_entries)
break;
@@ -1697,56 +1664,26 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
pgoff_t end, unsigned int nr_pages,
struct page **pages)
{
- struct radix_tree_iter iter;
- void **slot;
+ XA_STATE(xas, &mapping->pages, *start);
+ struct page *page;
unsigned ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
- radix_tree_for_each_slot(slot, &mapping->pages, &iter, *start) {
- struct page *head, *page;
-
- if (iter.index > end)
- break;
-repeat:
- page = radix_tree_deref_slot(slot);
- if (unlikely(!page))
+ xas_for_each(&xas, page, end) {
+ if (xas_retry(&xas, page))
continue;
-
- if (radix_tree_exception(page)) {
- if (radix_tree_deref_retry(page)) {
- slot = radix_tree_iter_retry(&iter);
- continue;
- }
- /*
- * A shadow entry of a recently evicted page,
- * or a swap entry from shmem/tmpfs. Skip
- * over it.
- */
+ /* Skip over shadow or swap entries */
+ if (xa_is_value(page))
+ continue;
+ if (!page_cache_pin(&xas, page))
continue;
- }
-
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
- goto repeat;
-
- /* The page was split under us? */
- if (compound_head(page) != head) {
- put_page(head);
- goto repeat;
- }
-
- /* Has the page moved? */
- if (unlikely(page != *slot)) {
- put_page(head);
- goto repeat;
- }
pages[ret] = page;
if (++ret == nr_pages) {
- *start = pages[ret - 1]->index + 1;
+ *start = page->index + 1;
goto out;
}
}
@@ -1754,7 +1691,7 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
/*
* We come here when there is no page beyond @end. We take care to not
* overflow the index @start as it confuses some of the callers. This
- * breaks the iteration when there is page at index -1 but that is
+ * breaks the iteration when there is a page at index -1 but that is
* already broken anyway.
*/
if (end == (pgoff_t)-1)
@@ -1782,57 +1719,28 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
unsigned int nr_pages, struct page **pages)
{
- struct radix_tree_iter iter;
- void **slot;
+ XA_STATE(xas, &mapping->pages, index);
+ struct page *page;
unsigned int ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
- radix_tree_for_each_contig(slot, &mapping->pages, &iter, index) {
- struct page *head, *page;
-repeat:
- page = radix_tree_deref_slot(slot);
- /* The hole, there no reason to continue */
- if (unlikely(!page))
- break;
-
- if (radix_tree_exception(page)) {
- if (radix_tree_deref_retry(page)) {
- slot = radix_tree_iter_retry(&iter);
- continue;
- }
- /*
- * A shadow entry of a recently evicted page,
- * or a swap entry from shmem/tmpfs. Stop
- * looking for contiguous pages.
- */
+ for (page = xas_load(&xas); page; page = xas_next(&xas)) {
+ if (xas_retry(&xas, page))
+ continue;
+ if (xa_is_value(page))
break;
- }
-
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
- goto repeat;
-
- /* The page was split under us? */
- if (compound_head(page) != head) {
- put_page(head);
- goto repeat;
- }
-
- /* Has the page moved? */
- if (unlikely(page != *slot)) {
- put_page(head);
- goto repeat;
- }
+ if (!page_cache_pin(&xas, page))
+ continue;
/*
* must check mapping and index after taking the ref.
* otherwise we can get both false positives and false
* negatives, which is just confusing to the caller.
*/
- if (page->mapping == NULL || page_to_pgoff(page) != iter.index) {
+ if (!page->mapping || page_to_pgoff(page) != xas.xa_index) {
put_page(page);
break;
}
@@ -1859,74 +1767,42 @@ EXPORT_SYMBOL(find_get_pages_contig);
* @tag. We update @index to index the next page for the traversal.
*/
unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
- pgoff_t end, int tag, unsigned int nr_pages,
+ pgoff_t end, xa_tag_t tag, unsigned int nr_pages,
struct page **pages)
{
- struct radix_tree_iter iter;
- void **slot;
+ XA_STATE(xas, &mapping->pages, *index);
+ struct page *page;
unsigned ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
- radix_tree_for_each_tagged(slot, &mapping->pages, &iter, *index, tag) {
- struct page *head, *page;
-
- if (iter.index > end)
- break;
-repeat:
- page = radix_tree_deref_slot(slot);
- if (unlikely(!page))
+ xas_for_each_tag(&xas, page, end, tag) {
+ if (xas_retry(&xas, page))
continue;
-
- if (radix_tree_exception(page)) {
- if (radix_tree_deref_retry(page)) {
- slot = radix_tree_iter_retry(&iter);
- continue;
- }
- /*
- * A shadow entry of a recently evicted page.
- *
- * Those entries should never be tagged, but
- * this tree walk is lockless and the tags are
- * looked up in bulk, one radix tree node at a
- * time, so there is a sizable window for page
- * reclaim to evict a page we saw tagged.
- *
- * Skip over it.
- */
+ /*
+ * Shadow entries should never be tagged, but this iteration
+ * is lockless so there is a window for page reclaim to evict
+ * a page we saw tagged. Skip over it.
+ */
+ if (xa_is_value(page))
+ continue;
+ if (!page_cache_pin(&xas, page))
continue;
- }
-
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
- goto repeat;
-
- /* The page was split under us? */
- if (compound_head(page) != head) {
- put_page(head);
- goto repeat;
- }
-
- /* Has the page moved? */
- if (unlikely(page != *slot)) {
- put_page(head);
- goto repeat;
- }
pages[ret] = page;
if (++ret == nr_pages) {
- *index = pages[ret - 1]->index + 1;
+ *index = page->index + 1;
goto out;
}
}
/*
- * We come here when we got at @end. We take care to not overflow the
+ * We come here when we got to @end. We take care to not overflow the
* index @index as it confuses some of the callers. This breaks the
- * iteration when there is page at index -1 but that is already broken
- * anyway.
+ * iteration when there is a page at index -1 but that is already
+ * broken anyway.
*/
if (end == (pgoff_t)-1)
*index = (pgoff_t)-1;
@@ -1952,54 +1828,24 @@ EXPORT_SYMBOL(find_get_pages_range_tag);
* @tag.
*/
unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
- int tag, unsigned int nr_entries,
+ xa_tag_t tag, unsigned int nr_entries,
struct page **entries, pgoff_t *indices)
{
- void **slot;
+ XA_STATE(xas, &mapping->pages, start);
+ struct page *page;
unsigned int ret = 0;
- struct radix_tree_iter iter;
if (!nr_entries)
return 0;
rcu_read_lock();
- radix_tree_for_each_tagged(slot, &mapping->pages, &iter, start, tag) {
- struct page *head, *page;
-repeat:
- page = radix_tree_deref_slot(slot);
- if (unlikely(!page))
+ xas_for_each_tag(&xas, page, ULONG_MAX, tag) {
+ if (xas_retry(&xas, page))
+ continue;
+ if (!xa_is_value(page) && !page_cache_pin(&xas, page))
continue;
- if (radix_tree_exception(page)) {
- if (radix_tree_deref_retry(page)) {
- slot = radix_tree_iter_retry(&iter);
- continue;
- }
-
- /*
- * A shadow entry of a recently evicted page, a swap
- * entry from shmem/tmpfs or a DAX entry. Return it
- * without attempting to raise page count.
- */
- goto export;
- }
-
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
- goto repeat;
-
- /* The page was split under us? */
- if (compound_head(page) != head) {
- put_page(head);
- goto repeat;
- }
- /* Has the page moved? */
- if (unlikely(page != *slot)) {
- put_page(head);
- goto repeat;
- }
-export:
- indices[ret] = iter.index;
+ indices[ret] = xas.xa_index;
entries[ret] = page;
if (++ret == nr_entries)
break;
@@ -2608,45 +2454,21 @@ EXPORT_SYMBOL(filemap_fault);
void filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff)
{
- struct radix_tree_iter iter;
- void **slot;
struct file *file = vmf->vma->vm_file;
struct address_space *mapping = file->f_mapping;
pgoff_t last_pgoff = start_pgoff;
unsigned long max_idx;
- struct page *head, *page;
+ XA_STATE(xas, &mapping->pages, start_pgoff);
+ struct page *page;
rcu_read_lock();
- radix_tree_for_each_slot(slot, &mapping->pages, &iter, start_pgoff) {
- if (iter.index > end_pgoff)
- break;
-repeat:
- page = radix_tree_deref_slot(slot);
- if (unlikely(!page))
- goto next;
- if (radix_tree_exception(page)) {
- if (radix_tree_deref_retry(page)) {
- slot = radix_tree_iter_retry(&iter);
- continue;
- }
+ xas_for_each(&xas, page, end_pgoff) {
+ if (xas_retry(&xas, page))
+ continue;
+ if (xa_is_value(page))
goto next;
- }
-
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
- goto repeat;
-
- /* The page was split under us? */
- if (compound_head(page) != head) {
- put_page(head);
- goto repeat;
- }
-
- /* Has the page moved? */
- if (unlikely(page != *slot)) {
- put_page(head);
- goto repeat;
- }
+ if (!page_cache_pin(&xas, page))
+ continue;
if (!PageUptodate(page) ||
PageReadahead(page) ||
@@ -2665,10 +2487,10 @@ void filemap_map_pages(struct vm_fault *vmf,
if (file->f_ra.mmap_miss > 0)
file->f_ra.mmap_miss--;
- vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT;
+ vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
if (vmf->pte)
- vmf->pte += iter.index - last_pgoff;
- last_pgoff = iter.index;
+ vmf->pte += xas.xa_index - last_pgoff;
+ last_pgoff = xas.xa_index;
if (alloc_set_pte(vmf, NULL, page))
goto unlock;
unlock_page(page);
@@ -2681,8 +2503,6 @@ void filemap_map_pages(struct vm_fault *vmf,
/* Huge page is mapped? No need to proceed. */
if (pmd_trans_huge(*vmf->pmd))
break;
- if (iter.index == end_pgoff)
- break;
}
rcu_read_unlock();
}
diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c
index 0aece092f40e..008393906be5 100644
--- a/tools/testing/radix-tree/regression1.c
+++ b/tools/testing/radix-tree/regression1.c
@@ -58,7 +58,7 @@ static struct page *page_alloc(void)
struct page *p;
p = malloc(sizeof(struct page));
p->count = 1;
- p->index = 1;
+ p->index = (unsigned long)p;
pthread_mutex_init(&p->lock, NULL);
return p;
@@ -77,53 +77,37 @@ static void page_free(struct page *p)
call_rcu(&p->rcu, page_rcu_free);
}
+static bool page_cache_pin(struct xa_state *xas, struct page *page)
+{
+ pthread_mutex_lock(&page->lock);
+ if (!page->count) {
+ pthread_mutex_unlock(&page->lock);
+ goto fail;
+ }
+ /* don't actually update page refcount */
+ pthread_mutex_unlock(&page->lock);
+
+ /* Has the page moved? */
+ if (xas_reload(xas) == page)
+ return true;
+fail:
+ xas_retry(xas, XA_RETRY_ENTRY);
+ return false;
+}
+
static unsigned find_get_pages(unsigned long start,
unsigned int nr_pages, struct page **pages)
{
- unsigned int i;
- unsigned int ret;
- unsigned int nr_found;
+ XA_STATE(xas, &mt_tree, start);
+ struct page *page;
+ unsigned int ret = 0;
rcu_read_lock();
-restart:
- nr_found = radix_tree_gang_lookup_slot(&mt_tree,
- (void ***)pages, NULL, start, nr_pages);
- ret = 0;
- for (i = 0; i < nr_found; i++) {
- struct page *page;
-repeat:
- page = radix_tree_deref_slot((void **)pages[i]);
- if (unlikely(!page))
+ xas_for_each(&xas, page, ULONG_MAX) {
+ if (xas_retry(&xas, page))
+ continue;
+ if (!page_cache_pin(&xas, page))
continue;
-
- if (radix_tree_exception(page)) {
- if (radix_tree_deref_retry(page)) {
- /*
- * Transient condition which can only trigger
- * when entry at index 0 moves out of or back
- * to root: none yet gotten, safe to restart.
- */
- assert((start | i) == 0);
- goto restart;
- }
- /*
- * No exceptional entries are inserted in this test.
- */
- assert(0);
- }
-
- pthread_mutex_lock(&page->lock);
- if (!page->count) {
- pthread_mutex_unlock(&page->lock);
- goto repeat;
- }
- /* don't actually update page refcount */
- pthread_mutex_unlock(&page->lock);
-
- /* Has the page moved? */
- if (unlikely(page != *((void **)pages[i]))) {
- goto repeat;
- }
pages[ret] = page;
ret++;
--
2.15.1
------------------------------------------------------------------------------
Check out the vibrant tech community on one of the world's most
engaging tech sites, Slashdot.org! http://sdm.link/slashdot
next prev parent reply other threads:[~2018-01-17 20:22 UTC|newest]
Thread overview: 107+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-01-17 20:20 [PATCH v6 00/99] XArray version 6 Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 01/99] xarray: Add the xa_lock to the radix_tree_root Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 02/99] page cache: Use xa_lock Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 03/99] xarray: Replace exceptional entries Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 04/99] xarray: Change definition of sibling entries Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 05/99] xarray: Add definition of struct xarray Matthew Wilcox
2018-01-24 8:45 ` Paul Bolle
2018-01-17 20:20 ` [PATCH v6 06/99] xarray: Define struct xa_node Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 07/99] xarray: Add documentation Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 08/99] xarray: Add xa_load Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 09/99] xarray: Add xa_get_tag, xa_set_tag and xa_clear_tag Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 10/99] xarray: Add xa_store Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 11/99] xarray: Add xa_cmpxchg and xa_insert Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 12/99] xarray: Add xa_for_each Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 13/99] xarray: Add xa_extract Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 14/99] xarray: Add xa_destroy Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 15/99] xarray: Add xas_next and xas_prev Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 16/99] xarray: Add xas_create_range Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 17/99] xarray: Add MAINTAINERS entry Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 18/99] xarray: Add ability to store errno values Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 19/99] idr: Convert to XArray Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 20/99] ida: " Matthew Wilcox
2018-01-17 21:17 ` John Paul Adrian Glaubitz
2018-01-17 20:20 ` [PATCH v6 21/99] xarray: Add xa_reserve and xa_release Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 22/99] page cache: Convert hole search to XArray Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 23/99] page cache: Add page_cache_range_empty function Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 24/99] page cache: Add and replace pages using the XArray Matthew Wilcox
2018-01-17 20:20 ` Matthew Wilcox [this message]
2018-01-17 20:20 ` [PATCH v6 27/99] page cache: Convert delete_batch to XArray Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 28/99] page cache: Remove stray radix comment Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 29/99] page cache: Convert filemap_range_has_page to XArray Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 30/99] mm: Convert page-writeback " Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 31/99] mm: Convert workingset " Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 32/99] mm: Convert truncate " Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 33/99] mm: Convert add_to_swap_cache " Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 34/99] mm: Convert delete_from_swap_cache " Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 35/99] mm: Convert __do_page_cache_readahead " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 36/99] mm: Convert page migration " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 37/99] mm: Convert huge_memory " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 38/99] mm: Convert collapse_shmem " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 39/99] mm: Convert khugepaged_scan_shmem " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 40/99] pagevec: Use xa_tag_t Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 41/99] shmem: Convert replace to XArray Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 42/99] shmem: Convert shmem_confirm_swap " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 43/99] shmem: Convert find_swap_entry " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 44/99] shmem: Convert shmem_tag_pins " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 45/99] shmem: Convert shmem_wait_for_pins " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 46/99] shmem: Convert shmem_add_to_page_cache " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 47/99] shmem: Convert shmem_alloc_hugepage " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 48/99] shmem: Convert shmem_free_swap " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 49/99] shmem: Convert shmem_partial_swap_usage " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 50/99] shmem: Comment fixups Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 51/99] btrfs: Convert page cache to XArray Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 52/99] fs: Convert buffer " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 53/99] fs: Convert writeback " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 54/99] nilfs2: Convert " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 55/99] f2fs: " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 56/99] lustre: " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 57/99] dax: Convert dax_unlock_mapping_entry " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 58/99] dax: Convert lock_slot " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 59/99] dax: More XArray conversion Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 60/99] dax: Convert __dax_invalidate_mapping_entry to XArray Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 61/99] dax: Convert dax_writeback_one " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 62/99] dax: Convert dax_insert_pfn_mkwrite " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 63/99] dax: Convert dax_insert_mapping_entry " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 64/99] dax: Convert grab_mapping_entry " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 65/99] dax: Fix sparse warning Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 66/99] page cache: Finish XArray conversion Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 67/99] mm: Convert cgroup writeback to XArray Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 68/99] vmalloc: Convert " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 69/99] brd: " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 70/99] xfs: Convert m_perag_tree " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 71/99] xfs: Convert pag_ici_root " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 72/99] xfs: Convert xfs dquot " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 73/99] xfs: Convert mru cache " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 74/99] usb: Convert xhci-mem " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 75/99] md: Convert raid5-cache " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 76/99] irqdomain: Convert " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 77/99] fscache: " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 78/99] sh: intc: " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 79/99] blk-cgroup: " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 80/99] blk-ioc: " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 81/99] i915: Convert handles_vma " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 82/99] s390: Convert gmap " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 83/99] hwspinlock: Convert " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 84/99] btrfs: Convert fs_roots_radix " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 85/99] btrfs: Remove unused spinlock Matthew Wilcox
2018-01-18 14:22 ` David Sterba
2018-01-17 20:21 ` [PATCH v6 86/99] btrfs: Convert reada_zones to XArray Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 87/99] btrfs: Convert reada_extents " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 88/99] btrfs: Convert reada_tree " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 89/99] btrfs: Convert buffer_radix " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 90/99] btrfs: Convert delayed_nodes_tree " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 92/99] f2fs: Convert pids radix tree " Matthew Wilcox
[not found] ` <20180117202203.19756-1-willy-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
2018-01-17 20:20 ` [PATCH v6 25/99] page cache: Convert page deletion " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 91/99] btrfs: Convert name_cache " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 93/99] f2fs: Convert ino_root " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 94/99] f2fs: Convert extent_tree_root " Matthew Wilcox
2018-01-17 20:22 ` [PATCH v6 96/99] dma-debug: Convert " Matthew Wilcox
2018-01-17 20:22 ` [PATCH v6 99/99] null_blk: " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 95/99] f2fs: Convert gclist.iroot " Matthew Wilcox
2018-01-17 20:22 ` [PATCH v6 97/99] xen: Convert pvcalls-back " Matthew Wilcox
2018-01-17 20:22 ` [PATCH v6 98/99] qrtr: Convert " Matthew Wilcox
2018-01-18 16:07 ` [PATCH v6 00/99] XArray version 6 David Sterba
[not found] ` <20180118160749.GP13726-1ReQVI26iDCaZKY3DrU6dA@public.gmane.org>
2018-01-18 16:48 ` Matthew Wilcox
2018-01-18 16:56 ` David Sterba
2018-01-18 17:02 ` Matthew Wilcox
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180117202203.19756-27-willy@infradead.org \
--to=willy@infradead.org \
--cc=bjorn.andersson@linaro.org \
--cc=cgroups@vger.kernel.org \
--cc=dhowells@redhat.com \
--cc=intel-gfx@lists.freedesktop.org \
--cc=iommu@lists.linux-foundation.org \
--cc=linux-btrfs@vger.kernel.org \
--cc=linux-f2fs-devel@lists.sourceforge.net \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nilfs@vger.kernel.org \
--cc=linux-remoteproc@vger.kernel.org \
--cc=linux-s390@vger.kernel.org \
--cc=linux-sh@vger.kernel.org \
--cc=linux-usb@vger.kernel.org \
--cc=linux-xfs@vger.kernel.org \
--cc=mawilcox@microsoft.com \
--cc=sstabellini@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).