From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
To: Andrew Morton <akpm@linux-foundation.org>,
Matthew Wilcox <willy@infradead.org>
Cc: linux-mm@kvack.org, linux-arch@vger.kernel.org,
linux-arm-kernel@lists.infradead.org, linux-csky@vger.kernel.org,
linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev,
linux-m68k@lists.linux-m68k.org, linux-mips@vger.kernel.org,
linux-openrisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org,
linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
linux-um@lists.infradead.org, xen-devel@lists.xenproject.org,
kvm@vger.kernel.org, Hugh Dickins <hughd@google.com>,
"Vishal Moola (Oracle)" <vishal.moola@gmail.com>,
David Hildenbrand <david@redhat.com>,
Claudio Imbrenda <imbrenda@linux.ibm.com>,
Mike Rapoport <rppt@kernel.org>
Subject: [PATCH v6 02/33] s390: Use _pt_s390_gaddr for gmap address tracking
Date: Mon, 26 Jun 2023 20:14:00 -0700 [thread overview]
Message-ID: <20230627031431.29653-3-vishal.moola@gmail.com> (raw)
In-Reply-To: <20230627031431.29653-1-vishal.moola@gmail.com>
s390 uses page->index to keep track of page tables for the guest address
space. In an attempt to consolidate the usage of page fields in s390,
replace _pt_pad_2 with _pt_s390_gaddr to replace page->index in gmap.
Since page->_pt_s390_gaddr aliases with mapping, ensure its set to NULL
before freeing the pages as well.
This also reverts commit 7e25de77bc5ea ("s390/mm: use pmd_pgtable_page()
helper in __gmap_segment_gaddr()") which had s390 use
pmd_pgtable_page() to get a gmap page table, as pmd_pgtable_page()
should be used for more generic process page tables.
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
---
arch/s390/mm/gmap.c | 56 +++++++++++++++++++++++++++-------------
include/linux/mm_types.h | 2 +-
2 files changed, 39 insertions(+), 19 deletions(-)
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index f4b6fc746fce..beb4804d9ca8 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -70,7 +70,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
goto out_free;
- page->index = 0;
+ page->_pt_s390_gaddr = 0;
list_add(&page->lru, &gmap->crst_list);
table = page_to_virt(page);
crst_table_init(table, etype);
@@ -187,16 +187,20 @@ static void gmap_free(struct gmap *gmap)
if (!(gmap_is_shadow(gmap) && gmap->removed))
gmap_flush_tlb(gmap);
/* Free all segment & region tables. */
- list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
+ list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
+ }
gmap_radix_tree_free(&gmap->guest_to_host);
gmap_radix_tree_free(&gmap->host_to_guest);
/* Free additional data for a shadow gmap */
if (gmap_is_shadow(gmap)) {
/* Free all page tables. */
- list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
+ list_for_each_entry_safe(page, next, &gmap->pt_list, lru) {
+ page->_pt_s390_gaddr = 0;
page_table_free_pgste(page);
+ }
gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
/* Release reference to the parent */
gmap_put(gmap->parent);
@@ -318,12 +322,14 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
list_add(&page->lru, &gmap->crst_list);
*table = __pa(new) | _REGION_ENTRY_LENGTH |
(*table & _REGION_ENTRY_TYPE_MASK);
- page->index = gaddr;
+ page->_pt_s390_gaddr = gaddr;
page = NULL;
}
spin_unlock(&gmap->guest_table_lock);
- if (page)
+ if (page) {
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
+ }
return 0;
}
@@ -336,12 +342,14 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
static unsigned long __gmap_segment_gaddr(unsigned long *entry)
{
struct page *page;
- unsigned long offset;
+ unsigned long offset, mask;
offset = (unsigned long) entry / sizeof(unsigned long);
offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
- page = pmd_pgtable_page((pmd_t *) entry);
- return page->index + offset;
+ mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
+ page = virt_to_page((void *)((unsigned long) entry & mask));
+
+ return page->_pt_s390_gaddr + offset;
}
/**
@@ -1351,6 +1359,7 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
/* Free page table */
page = phys_to_page(pgt);
list_del(&page->lru);
+ page->_pt_s390_gaddr = 0;
page_table_free_pgste(page);
}
@@ -1379,6 +1388,7 @@ static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
/* Free page table */
page = phys_to_page(pgt);
list_del(&page->lru);
+ page->_pt_s390_gaddr = 0;
page_table_free_pgste(page);
}
}
@@ -1409,6 +1419,7 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
/* Free segment table */
page = phys_to_page(sgt);
list_del(&page->lru);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
}
@@ -1437,6 +1448,7 @@ static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
/* Free segment table */
page = phys_to_page(sgt);
list_del(&page->lru);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
}
}
@@ -1467,6 +1479,7 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
/* Free region 3 table */
page = phys_to_page(r3t);
list_del(&page->lru);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
}
@@ -1495,6 +1508,7 @@ static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
/* Free region 3 table */
page = phys_to_page(r3t);
list_del(&page->lru);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
}
}
@@ -1525,6 +1539,7 @@ static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
/* Free region 2 table */
page = phys_to_page(r2t);
list_del(&page->lru);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
}
@@ -1557,6 +1572,7 @@ static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
/* Free region 2 table */
page = phys_to_page(r2t);
list_del(&page->lru);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
}
}
@@ -1762,9 +1778,9 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
return -ENOMEM;
- page->index = r2t & _REGION_ENTRY_ORIGIN;
+ page->_pt_s390_gaddr = r2t & _REGION_ENTRY_ORIGIN;
if (fake)
- page->index |= GMAP_SHADOW_FAKE_TABLE;
+ page->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
s_r2t = page_to_phys(page);
/* Install shadow region second table */
spin_lock(&sg->guest_table_lock);
@@ -1814,6 +1830,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
return rc;
out_free:
spin_unlock(&sg->guest_table_lock);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
return rc;
}
@@ -1846,9 +1863,9 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
return -ENOMEM;
- page->index = r3t & _REGION_ENTRY_ORIGIN;
+ page->_pt_s390_gaddr = r3t & _REGION_ENTRY_ORIGIN;
if (fake)
- page->index |= GMAP_SHADOW_FAKE_TABLE;
+ page->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
s_r3t = page_to_phys(page);
/* Install shadow region second table */
spin_lock(&sg->guest_table_lock);
@@ -1898,6 +1915,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
return rc;
out_free:
spin_unlock(&sg->guest_table_lock);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
return rc;
}
@@ -1930,9 +1948,9 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
return -ENOMEM;
- page->index = sgt & _REGION_ENTRY_ORIGIN;
+ page->_pt_s390_gaddr = sgt & _REGION_ENTRY_ORIGIN;
if (fake)
- page->index |= GMAP_SHADOW_FAKE_TABLE;
+ page->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
s_sgt = page_to_phys(page);
/* Install shadow region second table */
spin_lock(&sg->guest_table_lock);
@@ -1982,6 +2000,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
return rc;
out_free:
spin_unlock(&sg->guest_table_lock);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
return rc;
}
@@ -2014,9 +2033,9 @@ int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
/* Shadow page tables are full pages (pte+pgste) */
page = pfn_to_page(*table >> PAGE_SHIFT);
- *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
+ *pgt = page->_pt_s390_gaddr & ~GMAP_SHADOW_FAKE_TABLE;
*dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
- *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
+ *fake = !!(page->_pt_s390_gaddr & GMAP_SHADOW_FAKE_TABLE);
rc = 0;
} else {
rc = -EAGAIN;
@@ -2054,9 +2073,9 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
page = page_table_alloc_pgste(sg->mm);
if (!page)
return -ENOMEM;
- page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
+ page->_pt_s390_gaddr = pgt & _SEGMENT_ENTRY_ORIGIN;
if (fake)
- page->index |= GMAP_SHADOW_FAKE_TABLE;
+ page->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
s_pgt = page_to_phys(page);
/* Install shadow page table */
spin_lock(&sg->guest_table_lock);
@@ -2101,6 +2120,7 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
return rc;
out_free:
spin_unlock(&sg->guest_table_lock);
+ page->_pt_s390_gaddr = 0;
page_table_free_pgste(page);
return rc;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index de10fc797c8e..fbbe4e93a9ba 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -144,7 +144,7 @@ struct page {
struct { /* Page table pages */
unsigned long _pt_pad_1; /* compound_head */
pgtable_t pmd_huge_pte; /* protected by page->ptl */
- unsigned long _pt_pad_2; /* mapping */
+ unsigned long _pt_s390_gaddr; /* mapping */
union {
struct mm_struct *pt_mm; /* x86 pgds only */
atomic_t pt_frag_refcount; /* powerpc */
--
2.40.1
_______________________________________________
linux-um mailing list
linux-um@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-um
next prev parent reply other threads:[~2023-06-27 3:15 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-27 3:13 [PATCH v6 00/33] Split ptdesc from struct page Vishal Moola (Oracle)
2023-06-27 3:13 ` [PATCH v6 01/33] mm: Add PAGE_TYPE_OP folio functions Vishal Moola (Oracle)
2023-06-27 3:14 ` Vishal Moola (Oracle) [this message]
2023-06-27 3:14 ` [PATCH v6 03/33] pgtable: Create struct ptdesc Vishal Moola (Oracle)
2023-06-27 16:07 ` Peter Xu
2023-06-27 17:51 ` Vishal Moola
2023-06-27 3:14 ` [PATCH v6 04/33] mm: add utility functions for ptdesc Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 05/33] mm: Convert pmd_pgtable_page() to pmd_ptdesc() Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 06/33] mm: Convert ptlock_alloc() to use ptdescs Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 07/33] mm: Convert ptlock_ptr() " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 08/33] mm: Convert pmd_ptlock_init() " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 09/33] mm: Convert ptlock_init() " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 10/33] mm: Convert pmd_ptlock_free() " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 11/33] mm: Convert ptlock_free() " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 12/33] mm: Create ptdesc equivalents for pgtable_{pte,pmd}_page_{ctor,dtor} Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 13/33] powerpc: Convert various functions to use ptdescs Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 14/33] x86: " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 15/33] s390: Convert various gmap " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 16/33] s390: Convert various pgalloc " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 17/33] mm: Remove page table members from struct page Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 18/33] pgalloc: Convert various functions to use ptdescs Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 19/33] arm: " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 20/33] arm64: " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 21/33] csky: Convert __pte_free_tlb() " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 22/33] hexagon: " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 23/33] loongarch: Convert various functions " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 24/33] m68k: " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 25/33] mips: " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 26/33] nios2: Convert __pte_free_tlb() " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 27/33] openrisc: " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 28/33] riscv: Convert alloc_{pmd, pte}_late() " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 29/33] sh: Convert pte_free_tlb() " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 30/33] sparc64: Convert various functions " Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 31/33] sparc: Convert pgtable_pte_page_{ctor, dtor}() to ptdesc equivalents Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 32/33] um: Convert {pmd, pte}_free_tlb() to use ptdescs Vishal Moola (Oracle)
2023-06-27 3:14 ` [PATCH v6 33/33] mm: Remove pgtable_{pmd, pte}_page_{ctor, dtor}() wrappers Vishal Moola (Oracle)
2023-06-27 4:44 ` [PATCH v6 00/33] Split ptdesc from struct page Hugh Dickins
2023-06-27 7:14 ` David Hildenbrand
2023-06-27 20:13 ` Hugh Dickins
2023-06-28 7:41 ` David Hildenbrand
2023-06-28 18:51 ` Matthew Wilcox
2023-06-27 15:57 ` Matthew Wilcox
2023-06-27 20:25 ` Hugh Dickins
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230627031431.29653-3-vishal.moola@gmail.com \
--to=vishal.moola@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=david@redhat.com \
--cc=hughd@google.com \
--cc=imbrenda@linux.ibm.com \
--cc=kvm@vger.kernel.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-csky@vger.kernel.org \
--cc=linux-hexagon@vger.kernel.org \
--cc=linux-m68k@lists.linux-m68k.org \
--cc=linux-mips@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-openrisc@vger.kernel.org \
--cc=linux-riscv@lists.infradead.org \
--cc=linux-s390@vger.kernel.org \
--cc=linux-sh@vger.kernel.org \
--cc=linux-um@lists.infradead.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=loongarch@lists.linux.dev \
--cc=rppt@kernel.org \
--cc=sparclinux@vger.kernel.org \
--cc=willy@infradead.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).