From: Matthew Wilcox <willy@infradead.org>
To: linux-kernel@vger.kernel.org
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
linux-f2fs-devel@lists.sourceforge.net,
linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
Bjorn Andersson <bjorn.andersson@linaro.org>,
Stefano Stabellini <sstabellini@kernel.org>,
iommu@lists.linux-foundation.org,
linux-remoteproc@vger.kernel.org, linux-s390@vger.kernel.org,
intel-gfx@lists.freedesktop.org, cgroups@vger.kernel.org,
linux-sh@vger.kernel.org, David Howells <dhowells@redhat.com>
Subject: [PATCH v6 82/99] s390: Convert gmap to XArray
Date: Wed, 17 Jan 2018 12:21:46 -0800 [thread overview]
Message-ID: <20180117202203.19756-83-willy@infradead.org> (raw)
In-Reply-To: <20180117202203.19756-1-willy@infradead.org>
From: Matthew Wilcox <mawilcox@microsoft.com>
The three radix trees in gmap are all converted to the XArray.
This is another case where the multiple locks held mandates the use
of the xa_reserve() API. The gmap_insert_rmap() function is
considerably simplified by using the advanced API;
gmap_radix_tree_free() turns out to just be xa_destroy(), and
gmap_rmap_radix_tree_free() is a nice little iteration followed
by xa_destroy().
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
arch/s390/include/asm/gmap.h | 12 ++--
arch/s390/mm/gmap.c | 133 +++++++++++++++----------------------------
2 files changed, 51 insertions(+), 94 deletions(-)
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index e07cce88dfb0..7695a01d19d7 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -14,14 +14,14 @@
* @list: list head for the mm->context gmap list
* @crst_list: list of all crst tables used in the guest address space
* @mm: pointer to the parent mm_struct
- * @guest_to_host: radix tree with guest to host address translation
- * @host_to_guest: radix tree with pointer to segment table entries
+ * @guest_to_host: guest to host address translation
+ * @host_to_guest: pointers to segment table entries
* @guest_table_lock: spinlock to protect all entries in the guest page table
* @ref_count: reference counter for the gmap structure
* @table: pointer to the page directory
* @asce: address space control element for gmap page table
* @pfault_enabled: defines if pfaults are applicable for the guest
- * @host_to_rmap: radix tree with gmap_rmap lists
+ * @host_to_rmap: gmap_rmap lists
* @children: list of shadow gmap structures
* @pt_list: list of all page tables used in the shadow guest address space
* @shadow_lock: spinlock to protect the shadow gmap list
@@ -35,8 +35,8 @@ struct gmap {
struct list_head list;
struct list_head crst_list;
struct mm_struct *mm;
- struct radix_tree_root guest_to_host;
- struct radix_tree_root host_to_guest;
+ struct xarray guest_to_host;
+ struct xarray host_to_guest;
spinlock_t guest_table_lock;
atomic_t ref_count;
unsigned long *table;
@@ -45,7 +45,7 @@ struct gmap {
void *private;
bool pfault_enabled;
/* Additional data for shadow guest address spaces */
- struct radix_tree_root host_to_rmap;
+ struct xarray host_to_rmap;
struct list_head children;
struct list_head pt_list;
spinlock_t shadow_lock;
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 05d459b638f5..818a5e80914d 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -60,9 +60,9 @@ static struct gmap *gmap_alloc(unsigned long limit)
INIT_LIST_HEAD(&gmap->crst_list);
INIT_LIST_HEAD(&gmap->children);
INIT_LIST_HEAD(&gmap->pt_list);
- INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
- INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
- INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
+ xa_init(&gmap->guest_to_host);
+ xa_init(&gmap->host_to_guest);
+ xa_init(&gmap->host_to_rmap);
spin_lock_init(&gmap->guest_table_lock);
spin_lock_init(&gmap->shadow_lock);
atomic_set(&gmap->ref_count, 1);
@@ -121,55 +121,16 @@ static void gmap_flush_tlb(struct gmap *gmap)
__tlb_flush_global();
}
-static void gmap_radix_tree_free(struct radix_tree_root *root)
-{
- struct radix_tree_iter iter;
- unsigned long indices[16];
- unsigned long index;
- void __rcu **slot;
- int i, nr;
-
- /* A radix tree is freed by deleting all of its entries */
- index = 0;
- do {
- nr = 0;
- radix_tree_for_each_slot(slot, root, &iter, index) {
- indices[nr] = iter.index;
- if (++nr == 16)
- break;
- }
- for (i = 0; i < nr; i++) {
- index = indices[i];
- radix_tree_delete(root, index);
- }
- } while (nr > 0);
-}
-
-static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
+static void gmap_rmap_free(struct xarray *xa)
{
struct gmap_rmap *rmap, *rnext, *head;
- struct radix_tree_iter iter;
- unsigned long indices[16];
- unsigned long index;
- void __rcu **slot;
- int i, nr;
-
- /* A radix tree is freed by deleting all of its entries */
- index = 0;
- do {
- nr = 0;
- radix_tree_for_each_slot(slot, root, &iter, index) {
- indices[nr] = iter.index;
- if (++nr == 16)
- break;
- }
- for (i = 0; i < nr; i++) {
- index = indices[i];
- head = radix_tree_delete(root, index);
- gmap_for_each_rmap_safe(rmap, rnext, head)
- kfree(rmap);
- }
- } while (nr > 0);
+ unsigned long index = 0;
+
+ xa_for_each(xa, head, index, ULONG_MAX, XA_PRESENT) {
+ gmap_for_each_rmap_safe(rmap, rnext, head)
+ kfree(rmap);
+ }
+ xa_destroy(xa);
}
/**
@@ -188,15 +149,15 @@ static void gmap_free(struct gmap *gmap)
/* Free all segment & region tables. */
list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
__free_pages(page, CRST_ALLOC_ORDER);
- gmap_radix_tree_free(&gmap->guest_to_host);
- gmap_radix_tree_free(&gmap->host_to_guest);
+ xa_destroy(&gmap->guest_to_host);
+ xa_destroy(&gmap->host_to_guest);
/* Free additional data for a shadow gmap */
if (gmap_is_shadow(gmap)) {
/* Free all page tables. */
list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
page_table_free_pgste(page);
- gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
+ gmap_rmap_free(&gmap->host_to_rmap);
/* Release reference to the parent */
gmap_put(gmap->parent);
}
@@ -358,7 +319,7 @@ static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
BUG_ON(gmap_is_shadow(gmap));
spin_lock(&gmap->guest_table_lock);
- entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
+ entry = xa_erase(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
if (entry) {
flush = (*entry != _SEGMENT_ENTRY_EMPTY);
*entry = _SEGMENT_ENTRY_EMPTY;
@@ -378,7 +339,7 @@ static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
{
unsigned long vmaddr;
- vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
+ vmaddr = (unsigned long) xa_erase(&gmap->guest_to_host,
gaddr >> PMD_SHIFT);
return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
}
@@ -441,9 +402,9 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
/* Remove old translation */
flush |= __gmap_unmap_by_gaddr(gmap, to + off);
/* Store new translation */
- if (radix_tree_insert(&gmap->guest_to_host,
+ if (xa_is_err(xa_store(&gmap->guest_to_host,
(to + off) >> PMD_SHIFT,
- (void *) from + off))
+ (void *) from + off, GFP_KERNEL)))
break;
}
up_write(&gmap->mm->mmap_sem);
@@ -474,7 +435,7 @@ unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
unsigned long vmaddr;
vmaddr = (unsigned long)
- radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
+ xa_load(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
/* Note: guest_to_host is empty for a shadow gmap */
return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
}
@@ -588,21 +549,19 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
if (pmd_large(*pmd))
return -EFAULT;
/* Link gmap segment table entry location to page table. */
- rc = radix_tree_preload(GFP_KERNEL);
+ rc = xa_reserve(&gmap->host_to_guest, vmaddr >> PMD_SHIFT, GFP_KERNEL);
if (rc)
return rc;
ptl = pmd_lock(mm, pmd);
spin_lock(&gmap->guest_table_lock);
if (*table == _SEGMENT_ENTRY_EMPTY) {
- rc = radix_tree_insert(&gmap->host_to_guest,
- vmaddr >> PMD_SHIFT, table);
+ rc = xa_err(xa_store(&gmap->host_to_guest, vmaddr >> PMD_SHIFT,
+ table, GFP_NOWAIT | __GFP_NOFAIL));
if (!rc)
*table = pmd_val(*pmd);
- } else
- rc = 0;
+ }
spin_unlock(&gmap->guest_table_lock);
spin_unlock(ptl);
- radix_tree_preload_end();
return rc;
}
@@ -660,7 +619,7 @@ void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
pte_t *ptep;
/* Find the vm address for the guest address */
- vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
+ vmaddr = (unsigned long) xa_load(&gmap->guest_to_host,
gaddr >> PMD_SHIFT);
if (vmaddr) {
vmaddr |= gaddr & ~PMD_MASK;
@@ -682,8 +641,7 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
for (gaddr = from; gaddr < to;
gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
/* Find the vm address for the guest address */
- vmaddr = (unsigned long)
- radix_tree_lookup(&gmap->guest_to_host,
+ vmaddr = (unsigned long) xa_load(&gmap->guest_to_host,
gaddr >> PMD_SHIFT);
if (!vmaddr)
continue;
@@ -1002,29 +960,24 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
EXPORT_SYMBOL_GPL(gmap_read_table);
/**
- * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
+ * gmap_insert_rmap - add a rmap to the host_to_rmap
* @sg: pointer to the shadow guest address space structure
* @vmaddr: vm address associated with the rmap
* @rmap: pointer to the rmap structure
*
- * Called with the sg->guest_table_lock
+ * Called with the sg->guest_table_lock and page table lock held
*/
static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
struct gmap_rmap *rmap)
{
- void __rcu **slot;
+ XA_STATE(xas, &sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
BUG_ON(!gmap_is_shadow(sg));
- slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
- if (slot) {
- rmap->next = radix_tree_deref_slot_protected(slot,
- &sg->guest_table_lock);
- radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
- } else {
- rmap->next = NULL;
- radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
- rmap);
- }
+
+ xas_lock(&xas);
+ rmap->next = xas_load(&xas);
+ xas_store(&xas, rmap);
+ xas_unlock(&xas);
}
/**
@@ -1058,7 +1011,8 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
if (!rmap)
return -ENOMEM;
rmap->raddr = raddr;
- rc = radix_tree_preload(GFP_KERNEL);
+ rc = xa_reserve(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
+ GFP_KERNEL);
if (rc) {
kfree(rmap);
return rc;
@@ -1074,7 +1028,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
spin_unlock(&sg->guest_table_lock);
gmap_pte_op_end(ptl);
}
- radix_tree_preload_end();
+ xa_release(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
if (rc) {
kfree(rmap);
rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
@@ -1962,7 +1916,8 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
rc = vmaddr;
break;
}
- rc = radix_tree_preload(GFP_KERNEL);
+ rc = xa_reserve(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
+ GFP_KERNEL);
if (rc)
break;
rc = -EAGAIN;
@@ -1974,7 +1929,8 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
if (!tptep) {
spin_unlock(&sg->guest_table_lock);
gmap_pte_op_end(ptl);
- radix_tree_preload_end();
+ xa_release(&sg->host_to_rmap,
+ vmaddr >> PAGE_SHIFT);
break;
}
rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
@@ -1983,11 +1939,13 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
gmap_insert_rmap(sg, vmaddr, rmap);
rmap = NULL;
rc = 0;
+ } else {
+ xa_release(&sg->host_to_rmap,
+ vmaddr >> PAGE_SHIFT);
}
gmap_pte_op_end(ptl);
spin_unlock(&sg->guest_table_lock);
}
- radix_tree_preload_end();
if (!rc)
break;
rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
@@ -2030,7 +1988,7 @@ static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
return;
}
/* Remove the page table tree from on specific entry */
- head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
+ head = xa_erase(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
gmap_for_each_rmap_safe(rmap, rnext, head) {
bits = rmap->raddr & _SHADOW_RMAP_MASK;
raddr = rmap->raddr ^ bits;
@@ -2078,8 +2036,7 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
rcu_read_lock();
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
spin_lock(&gmap->guest_table_lock);
- table = radix_tree_lookup(&gmap->host_to_guest,
- vmaddr >> PMD_SHIFT);
+ table = xa_load(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
if (table)
gaddr = __gmap_segment_gaddr(table) + offset;
spin_unlock(&gmap->guest_table_lock);
--
2.15.1
next prev parent reply other threads:[~2018-01-17 20:23 UTC|newest]
Thread overview: 107+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-01-17 20:20 [PATCH v6 00/99] XArray version 6 Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 01/99] xarray: Add the xa_lock to the radix_tree_root Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 02/99] page cache: Use xa_lock Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 03/99] xarray: Replace exceptional entries Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 04/99] xarray: Change definition of sibling entries Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 05/99] xarray: Add definition of struct xarray Matthew Wilcox
2018-01-24 8:45 ` Paul Bolle
2018-01-17 20:20 ` [PATCH v6 06/99] xarray: Define struct xa_node Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 07/99] xarray: Add documentation Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 08/99] xarray: Add xa_load Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 09/99] xarray: Add xa_get_tag, xa_set_tag and xa_clear_tag Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 10/99] xarray: Add xa_store Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 11/99] xarray: Add xa_cmpxchg and xa_insert Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 12/99] xarray: Add xa_for_each Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 13/99] xarray: Add xa_extract Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 14/99] xarray: Add xa_destroy Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 15/99] xarray: Add xas_next and xas_prev Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 16/99] xarray: Add xas_create_range Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 17/99] xarray: Add MAINTAINERS entry Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 18/99] xarray: Add ability to store errno values Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 19/99] idr: Convert to XArray Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 20/99] ida: " Matthew Wilcox
2018-01-17 21:17 ` John Paul Adrian Glaubitz
2018-01-17 20:20 ` [PATCH v6 21/99] xarray: Add xa_reserve and xa_release Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 22/99] page cache: Convert hole search to XArray Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 23/99] page cache: Add page_cache_range_empty function Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 24/99] page cache: Add and replace pages using the XArray Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 25/99] page cache: Convert page deletion to XArray Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 26/99] page cache: Convert page cache lookups " Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 27/99] page cache: Convert delete_batch " Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 28/99] page cache: Remove stray radix comment Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 29/99] page cache: Convert filemap_range_has_page to XArray Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 30/99] mm: Convert page-writeback " Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 31/99] mm: Convert workingset " Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 32/99] mm: Convert truncate " Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 33/99] mm: Convert add_to_swap_cache " Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 34/99] mm: Convert delete_from_swap_cache " Matthew Wilcox
2018-01-17 20:20 ` [PATCH v6 35/99] mm: Convert __do_page_cache_readahead " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 36/99] mm: Convert page migration " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 37/99] mm: Convert huge_memory " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 38/99] mm: Convert collapse_shmem " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 39/99] mm: Convert khugepaged_scan_shmem " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 40/99] pagevec: Use xa_tag_t Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 41/99] shmem: Convert replace to XArray Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 42/99] shmem: Convert shmem_confirm_swap " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 43/99] shmem: Convert find_swap_entry " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 44/99] shmem: Convert shmem_tag_pins " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 45/99] shmem: Convert shmem_wait_for_pins " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 46/99] shmem: Convert shmem_add_to_page_cache " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 47/99] shmem: Convert shmem_alloc_hugepage " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 48/99] shmem: Convert shmem_free_swap " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 49/99] shmem: Convert shmem_partial_swap_usage " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 50/99] shmem: Comment fixups Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 51/99] btrfs: Convert page cache to XArray Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 52/99] fs: Convert buffer " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 53/99] fs: Convert writeback " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 54/99] nilfs2: Convert " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 55/99] f2fs: " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 56/99] lustre: " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 57/99] dax: Convert dax_unlock_mapping_entry " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 58/99] dax: Convert lock_slot " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 59/99] dax: More XArray conversion Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 60/99] dax: Convert __dax_invalidate_mapping_entry to XArray Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 61/99] dax: Convert dax_writeback_one " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 62/99] dax: Convert dax_insert_pfn_mkwrite " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 63/99] dax: Convert dax_insert_mapping_entry " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 64/99] dax: Convert grab_mapping_entry " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 65/99] dax: Fix sparse warning Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 66/99] page cache: Finish XArray conversion Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 67/99] mm: Convert cgroup writeback to XArray Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 68/99] vmalloc: Convert " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 69/99] brd: " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 70/99] xfs: Convert m_perag_tree " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 71/99] xfs: Convert pag_ici_root " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 72/99] xfs: Convert xfs dquot " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 73/99] xfs: Convert mru cache " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 74/99] usb: Convert xhci-mem " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 75/99] md: Convert raid5-cache " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 76/99] irqdomain: Convert " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 77/99] fscache: " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 78/99] sh: intc: " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 79/99] blk-cgroup: " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 80/99] blk-ioc: " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 81/99] i915: Convert handles_vma " Matthew Wilcox
2018-01-17 20:21 ` Matthew Wilcox [this message]
2018-01-17 20:21 ` [PATCH v6 83/99] hwspinlock: Convert " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 84/99] btrfs: Convert fs_roots_radix " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 85/99] btrfs: Remove unused spinlock Matthew Wilcox
2018-01-18 14:22 ` David Sterba
2018-01-17 20:21 ` [PATCH v6 86/99] btrfs: Convert reada_zones to XArray Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 87/99] btrfs: Convert reada_extents " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 88/99] btrfs: Convert reada_tree " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 89/99] btrfs: Convert buffer_radix " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 90/99] btrfs: Convert delayed_nodes_tree " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 91/99] btrfs: Convert name_cache " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 92/99] f2fs: Convert pids radix tree " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 93/99] f2fs: Convert ino_root " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 94/99] f2fs: Convert extent_tree_root " Matthew Wilcox
2018-01-17 20:21 ` [PATCH v6 95/99] f2fs: Convert gclist.iroot " Matthew Wilcox
2018-01-17 20:22 ` [PATCH v6 96/99] dma-debug: Convert " Matthew Wilcox
2018-01-17 20:22 ` [PATCH v6 97/99] xen: Convert pvcalls-back " Matthew Wilcox
2018-01-17 20:22 ` [PATCH v6 98/99] qrtr: Convert " Matthew Wilcox
2018-01-17 20:22 ` [PATCH v6 99/99] null_blk: " Matthew Wilcox
2018-01-18 16:07 ` [PATCH v6 00/99] XArray version 6 David Sterba
2018-01-18 16:48 ` Matthew Wilcox
2018-01-18 16:56 ` David Sterba
2018-01-18 17:02 ` Matthew Wilcox
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180117202203.19756-83-willy@infradead.org \
--to=willy@infradead.org \
--cc=bjorn.andersson@linaro.org \
--cc=cgroups@vger.kernel.org \
--cc=dhowells@redhat.com \
--cc=intel-gfx@lists.freedesktop.org \
--cc=iommu@lists.linux-foundation.org \
--cc=linux-btrfs@vger.kernel.org \
--cc=linux-f2fs-devel@lists.sourceforge.net \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nilfs@vger.kernel.org \
--cc=linux-remoteproc@vger.kernel.org \
--cc=linux-s390@vger.kernel.org \
--cc=linux-sh@vger.kernel.org \
--cc=linux-usb@vger.kernel.org \
--cc=linux-xfs@vger.kernel.org \
--cc=mawilcox@microsoft.com \
--cc=sstabellini@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).