From: Mike Rapoport <rppt@kernel.org>
To: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>,
Matthew Wilcox <willy@infradead.org>,
linux-mm@kvack.org, linux-arch@vger.kernel.org,
linux-arm-kernel@lists.infradead.org, linux-csky@vger.kernel.org,
linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev,
linux-m68k@lists.linux-m68k.org, linux-mips@vger.kernel.org,
linux-openrisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org,
linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
linux-um@lists.infradead.org, xen-devel@lists.xenproject.org,
kvm@vger.kernel.org, Hugh Dickins <hughd@google.com>,
David Hildenbrand <david@redhat.com>,
Claudio Imbrenda <imbrenda@linux.ibm.com>
Subject: Re: [PATCH v4 16/34] s390: Convert various gmap functions to use ptdescs
Date: Wed, 14 Jun 2023 17:28:54 +0300 [thread overview]
Message-ID: <20230614142854.GO52412@kernel.org> (raw)
In-Reply-To: <20230612210423.18611-17-vishal.moola@gmail.com>
On Mon, Jun 12, 2023 at 02:04:05PM -0700, Vishal Moola (Oracle) wrote:
> In order to split struct ptdesc from struct page, convert various
> functions to use ptdescs.
>
> Some of the functions use the *get*page*() helper functions. Convert
> these to use pagetable_alloc() and ptdesc_address() instead to help
> standardize page tables further.
>
> Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
With folding
ptdesc->_pt_s390_gaddr = 0;
into pagetable_free()
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
> ---
> arch/s390/mm/gmap.c | 230 ++++++++++++++++++++++++--------------------
> 1 file changed, 128 insertions(+), 102 deletions(-)
>
> diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
> index 81c683426b49..010e87df7299 100644
> --- a/arch/s390/mm/gmap.c
> +++ b/arch/s390/mm/gmap.c
> @@ -34,7 +34,7 @@
> static struct gmap *gmap_alloc(unsigned long limit)
> {
> struct gmap *gmap;
> - struct page *page;
> + struct ptdesc *ptdesc;
> unsigned long *table;
> unsigned long etype, atype;
>
> @@ -67,12 +67,12 @@ static struct gmap *gmap_alloc(unsigned long limit)
> spin_lock_init(&gmap->guest_table_lock);
> spin_lock_init(&gmap->shadow_lock);
> refcount_set(&gmap->ref_count, 1);
> - page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
> - if (!page)
> + ptdesc = pagetable_alloc(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
> + if (!ptdesc)
> goto out_free;
> - page->_pt_s390_gaddr = 0;
> - list_add(&page->lru, &gmap->crst_list);
> - table = page_to_virt(page);
> + ptdesc->_pt_s390_gaddr = 0;
> + list_add(&ptdesc->pt_list, &gmap->crst_list);
> + table = ptdesc_to_virt(ptdesc);
> crst_table_init(table, etype);
> gmap->table = table;
> gmap->asce = atype | _ASCE_TABLE_LENGTH |
> @@ -181,25 +181,25 @@ static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
> */
> static void gmap_free(struct gmap *gmap)
> {
> - struct page *page, *next;
> + struct ptdesc *ptdesc, *next;
>
> /* Flush tlb of all gmaps (if not already done for shadows) */
> if (!(gmap_is_shadow(gmap) && gmap->removed))
> gmap_flush_tlb(gmap);
> /* Free all segment & region tables. */
> - list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
> - page->_pt_s390_gaddr = 0;
> - __free_pages(page, CRST_ALLOC_ORDER);
> + list_for_each_entry_safe(ptdesc, next, &gmap->crst_list, pt_list) {
> + ptdesc->_pt_s390_gaddr = 0;
> + pagetable_free(ptdesc);
> }
> gmap_radix_tree_free(&gmap->guest_to_host);
> gmap_radix_tree_free(&gmap->host_to_guest);
>
> /* Free additional data for a shadow gmap */
> if (gmap_is_shadow(gmap)) {
> - /* Free all page tables. */
> - list_for_each_entry_safe(page, next, &gmap->pt_list, lru) {
> - page->_pt_s390_gaddr = 0;
> - page_table_free_pgste(page);
> + /* Free all ptdesc tables. */
> + list_for_each_entry_safe(ptdesc, next, &gmap->pt_list, pt_list) {
> + ptdesc->_pt_s390_gaddr = 0;
> + page_table_free_pgste(ptdesc_page(ptdesc));
> }
> gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
> /* Release reference to the parent */
> @@ -308,27 +308,27 @@ EXPORT_SYMBOL_GPL(gmap_get_enabled);
> static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
> unsigned long init, unsigned long gaddr)
> {
> - struct page *page;
> + struct ptdesc *ptdesc;
> unsigned long *new;
>
> /* since we dont free the gmap table until gmap_free we can unlock */
> - page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
> - if (!page)
> + ptdesc = pagetable_alloc(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
> + if (!ptdesc)
> return -ENOMEM;
> - new = page_to_virt(page);
> + new = ptdesc_to_virt(ptdesc);
> crst_table_init(new, init);
> spin_lock(&gmap->guest_table_lock);
> if (*table & _REGION_ENTRY_INVALID) {
> - list_add(&page->lru, &gmap->crst_list);
> + list_add(&ptdesc->pt_list, &gmap->crst_list);
> *table = __pa(new) | _REGION_ENTRY_LENGTH |
> (*table & _REGION_ENTRY_TYPE_MASK);
> - page->_pt_s390_gaddr = gaddr;
> - page = NULL;
> + ptdesc->_pt_s390_gaddr = gaddr;
> + ptdesc = NULL;
> }
> spin_unlock(&gmap->guest_table_lock);
> - if (page) {
> - page->_pt_s390_gaddr = 0;
> - __free_pages(page, CRST_ALLOC_ORDER);
> + if (ptdesc) {
> + ptdesc->_pt_s390_gaddr = 0;
> + pagetable_free(ptdesc);
> }
> return 0;
> }
> @@ -341,15 +341,15 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
> */
> static unsigned long __gmap_segment_gaddr(unsigned long *entry)
> {
> - struct page *page;
> + struct ptdesc *ptdesc;
> unsigned long offset, mask;
>
> offset = (unsigned long) entry / sizeof(unsigned long);
> offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
> mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
> - page = virt_to_page((void *)((unsigned long) entry & mask));
> + ptdesc = virt_to_ptdesc((void *)((unsigned long) entry & mask));
>
> - return page->_pt_s390_gaddr + offset;
> + return ptdesc->_pt_s390_gaddr + offset;
> }
>
> /**
> @@ -1345,6 +1345,7 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
> unsigned long *ste;
> phys_addr_t sto, pgt;
> struct page *page;
> + struct ptdesc *ptdesc;
>
> BUG_ON(!gmap_is_shadow(sg));
> ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
> @@ -1358,9 +1359,11 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
> __gmap_unshadow_pgt(sg, raddr, __va(pgt));
> /* Free page table */
> page = phys_to_page(pgt);
> - list_del(&page->lru);
> - page->_pt_s390_gaddr = 0;
> - page_table_free_pgste(page);
> +
> + ptdesc = page_ptdesc(page);
> + list_del(&ptdesc->pt_list);
> + ptdesc->_pt_s390_gaddr = 0;
> + page_table_free_pgste(ptdesc_page(ptdesc));
> }
>
> /**
> @@ -1374,9 +1377,10 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
> static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
> unsigned long *sgt)
> {
> - struct page *page;
> phys_addr_t pgt;
> int i;
> + struct page *page;
> + struct ptdesc *ptdesc;
>
> BUG_ON(!gmap_is_shadow(sg));
> for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
> @@ -1387,9 +1391,11 @@ static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
> __gmap_unshadow_pgt(sg, raddr, __va(pgt));
> /* Free page table */
> page = phys_to_page(pgt);
> - list_del(&page->lru);
> - page->_pt_s390_gaddr = 0;
> - page_table_free_pgste(page);
> +
> + ptdesc = page_ptdesc(page);
> + list_del(&ptdesc->pt_list);
> + ptdesc->_pt_s390_gaddr = 0;
> + page_table_free_pgste(ptdesc_page(ptdesc));
> }
> }
>
> @@ -1405,6 +1411,7 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
> unsigned long r3o, *r3e;
> phys_addr_t sgt;
> struct page *page;
> + struct ptdesc *ptdesc;
>
> BUG_ON(!gmap_is_shadow(sg));
> r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
> @@ -1418,9 +1425,11 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
> __gmap_unshadow_sgt(sg, raddr, __va(sgt));
> /* Free segment table */
> page = phys_to_page(sgt);
> - list_del(&page->lru);
> - page->_pt_s390_gaddr = 0;
> - __free_pages(page, CRST_ALLOC_ORDER);
> +
> + ptdesc = page_ptdesc(page);
> + list_del(&ptdesc->pt_list);
> + ptdesc->_pt_s390_gaddr = 0;
> + pagetable_free(ptdesc);
> }
>
> /**
> @@ -1434,9 +1443,10 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
> static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
> unsigned long *r3t)
> {
> - struct page *page;
> phys_addr_t sgt;
> int i;
> + struct page *page;
> + struct ptdesc *ptdesc;
>
> BUG_ON(!gmap_is_shadow(sg));
> for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
> @@ -1447,9 +1457,11 @@ static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
> __gmap_unshadow_sgt(sg, raddr, __va(sgt));
> /* Free segment table */
> page = phys_to_page(sgt);
> - list_del(&page->lru);
> - page->_pt_s390_gaddr = 0;
> - __free_pages(page, CRST_ALLOC_ORDER);
> +
> + ptdesc = page_ptdesc(page);
> + list_del(&ptdesc->pt_list);
> + ptdesc->_pt_s390_gaddr = 0;
> + pagetable_free(ptdesc);
> }
> }
>
> @@ -1465,6 +1477,7 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
> unsigned long r2o, *r2e;
> phys_addr_t r3t;
> struct page *page;
> + struct ptdesc *ptdesc;
>
> BUG_ON(!gmap_is_shadow(sg));
> r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
> @@ -1478,9 +1491,11 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
> __gmap_unshadow_r3t(sg, raddr, __va(r3t));
> /* Free region 3 table */
> page = phys_to_page(r3t);
> - list_del(&page->lru);
> - page->_pt_s390_gaddr = 0;
> - __free_pages(page, CRST_ALLOC_ORDER);
> +
> + ptdesc = page_ptdesc(page);
> + list_del(&ptdesc->pt_list);
> + ptdesc->_pt_s390_gaddr = 0;
> + pagetable_free(ptdesc);
> }
>
> /**
> @@ -1495,8 +1510,9 @@ static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
> unsigned long *r2t)
> {
> phys_addr_t r3t;
> - struct page *page;
> int i;
> + struct page *page;
> + struct ptdesc *ptdesc;
>
> BUG_ON(!gmap_is_shadow(sg));
> for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
> @@ -1507,9 +1523,11 @@ static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
> __gmap_unshadow_r3t(sg, raddr, __va(r3t));
> /* Free region 3 table */
> page = phys_to_page(r3t);
> - list_del(&page->lru);
> - page->_pt_s390_gaddr = 0;
> - __free_pages(page, CRST_ALLOC_ORDER);
> +
> + ptdesc = page_ptdesc(page);
> + list_del(&ptdesc->pt_list);
> + ptdesc->_pt_s390_gaddr = 0;
> + pagetable_free(ptdesc);
> }
> }
>
> @@ -1525,6 +1543,7 @@ static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
> unsigned long r1o, *r1e;
> struct page *page;
> phys_addr_t r2t;
> + struct ptdesc *ptdesc;
>
> BUG_ON(!gmap_is_shadow(sg));
> r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
> @@ -1538,9 +1557,11 @@ static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
> __gmap_unshadow_r2t(sg, raddr, __va(r2t));
> /* Free region 2 table */
> page = phys_to_page(r2t);
> - list_del(&page->lru);
> - page->_pt_s390_gaddr = 0;
> - __free_pages(page, CRST_ALLOC_ORDER);
> +
> + ptdesc = page_ptdesc(page);
> + list_del(&ptdesc->pt_list);
> + ptdesc->_pt_s390_gaddr = 0;
> + pagetable_free(ptdesc);
> }
>
> /**
> @@ -1558,6 +1579,7 @@ static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
> struct page *page;
> phys_addr_t r2t;
> int i;
> + struct ptdesc *ptdesc;
>
> BUG_ON(!gmap_is_shadow(sg));
> asce = __pa(r1t) | _ASCE_TYPE_REGION1;
> @@ -1571,9 +1593,11 @@ static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
> r1t[i] = _REGION1_ENTRY_EMPTY;
> /* Free region 2 table */
> page = phys_to_page(r2t);
> - list_del(&page->lru);
> - page->_pt_s390_gaddr = 0;
> - __free_pages(page, CRST_ALLOC_ORDER);
> +
> + ptdesc = page_ptdesc(page);
> + list_del(&ptdesc->pt_list);
> + ptdesc->_pt_s390_gaddr = 0;
> + pagetable_free(ptdesc);
> }
> }
>
> @@ -1770,18 +1794,18 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
> unsigned long raddr, origin, offset, len;
> unsigned long *table;
> phys_addr_t s_r2t;
> - struct page *page;
> + struct ptdesc *ptdesc;
> int rc;
>
> BUG_ON(!gmap_is_shadow(sg));
> /* Allocate a shadow region second table */
> - page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
> - if (!page)
> + ptdesc = pagetable_alloc(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
> + if (!ptdesc)
> return -ENOMEM;
> - page->_pt_s390_gaddr = r2t & _REGION_ENTRY_ORIGIN;
> + ptdesc->_pt_s390_gaddr = r2t & _REGION_ENTRY_ORIGIN;
> if (fake)
> - page->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
> - s_r2t = page_to_phys(page);
> + ptdesc->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
> + s_r2t = page_to_phys(ptdesc_page(ptdesc));
> /* Install shadow region second table */
> spin_lock(&sg->guest_table_lock);
> table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
> @@ -1802,7 +1826,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
> _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
> if (sg->edat_level >= 1)
> *table |= (r2t & _REGION_ENTRY_PROTECT);
> - list_add(&page->lru, &sg->crst_list);
> + list_add(&ptdesc->pt_list, &sg->crst_list);
> if (fake) {
> /* nothing to protect for fake tables */
> *table &= ~_REGION_ENTRY_INVALID;
> @@ -1830,8 +1854,8 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
> return rc;
> out_free:
> spin_unlock(&sg->guest_table_lock);
> - page->_pt_s390_gaddr = 0;
> - __free_pages(page, CRST_ALLOC_ORDER);
> + ptdesc->_pt_s390_gaddr = 0;
> + pagetable_free(ptdesc);
> return rc;
> }
> EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
> @@ -1855,18 +1879,18 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
> unsigned long raddr, origin, offset, len;
> unsigned long *table;
> phys_addr_t s_r3t;
> - struct page *page;
> + struct ptdesc *ptdesc;
> int rc;
>
> BUG_ON(!gmap_is_shadow(sg));
> /* Allocate a shadow region second table */
> - page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
> - if (!page)
> + ptdesc = pagetable_alloc(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
> + if (!ptdesc)
> return -ENOMEM;
> - page->_pt_s390_gaddr = r3t & _REGION_ENTRY_ORIGIN;
> + ptdesc->_pt_s390_gaddr = r3t & _REGION_ENTRY_ORIGIN;
> if (fake)
> - page->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
> - s_r3t = page_to_phys(page);
> + ptdesc->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
> + s_r3t = page_to_phys(ptdesc_page(ptdesc));
> /* Install shadow region second table */
> spin_lock(&sg->guest_table_lock);
> table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
> @@ -1887,7 +1911,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
> _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
> if (sg->edat_level >= 1)
> *table |= (r3t & _REGION_ENTRY_PROTECT);
> - list_add(&page->lru, &sg->crst_list);
> + list_add(&ptdesc->pt_list, &sg->crst_list);
> if (fake) {
> /* nothing to protect for fake tables */
> *table &= ~_REGION_ENTRY_INVALID;
> @@ -1915,8 +1939,8 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
> return rc;
> out_free:
> spin_unlock(&sg->guest_table_lock);
> - page->_pt_s390_gaddr = 0;
> - __free_pages(page, CRST_ALLOC_ORDER);
> + ptdesc->_pt_s390_gaddr = 0;
> + pagetable_free(ptdesc);
> return rc;
> }
> EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
> @@ -1940,18 +1964,18 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
> unsigned long raddr, origin, offset, len;
> unsigned long *table;
> phys_addr_t s_sgt;
> - struct page *page;
> + struct ptdesc *ptdesc;
> int rc;
>
> BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
> /* Allocate a shadow segment table */
> - page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
> - if (!page)
> + ptdesc = pagetable_alloc(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
> + if (!ptdesc)
> return -ENOMEM;
> - page->_pt_s390_gaddr = sgt & _REGION_ENTRY_ORIGIN;
> + ptdesc->_pt_s390_gaddr = sgt & _REGION_ENTRY_ORIGIN;
> if (fake)
> - page->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
> - s_sgt = page_to_phys(page);
> + ptdesc->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
> + s_sgt = page_to_phys(ptdesc_page(ptdesc));
> /* Install shadow region second table */
> spin_lock(&sg->guest_table_lock);
> table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
> @@ -1972,7 +1996,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
> _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
> if (sg->edat_level >= 1)
> *table |= sgt & _REGION_ENTRY_PROTECT;
> - list_add(&page->lru, &sg->crst_list);
> + list_add(&ptdesc->pt_list, &sg->crst_list);
> if (fake) {
> /* nothing to protect for fake tables */
> *table &= ~_REGION_ENTRY_INVALID;
> @@ -2000,8 +2024,8 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
> return rc;
> out_free:
> spin_unlock(&sg->guest_table_lock);
> - page->_pt_s390_gaddr = 0;
> - __free_pages(page, CRST_ALLOC_ORDER);
> + ptdesc->_pt_s390_gaddr = 0;
> + pagetable_free(ptdesc);
> return rc;
> }
> EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
> @@ -2024,8 +2048,9 @@ int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
> int *fake)
> {
> unsigned long *table;
> - struct page *page;
> int rc;
> + struct page *page;
> + struct ptdesc *ptdesc;
>
> BUG_ON(!gmap_is_shadow(sg));
> spin_lock(&sg->guest_table_lock);
> @@ -2033,9 +2058,10 @@ int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
> if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
> /* Shadow page tables are full pages (pte+pgste) */
> page = pfn_to_page(*table >> PAGE_SHIFT);
> - *pgt = page->_pt_s390_gaddr & ~GMAP_SHADOW_FAKE_TABLE;
> + ptdesc = page_ptdesc(page);
> + *pgt = ptdesc->_pt_s390_gaddr & ~GMAP_SHADOW_FAKE_TABLE;
> *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
> - *fake = !!(page->_pt_s390_gaddr & GMAP_SHADOW_FAKE_TABLE);
> + *fake = !!(ptdesc->_pt_s390_gaddr & GMAP_SHADOW_FAKE_TABLE);
> rc = 0;
> } else {
> rc = -EAGAIN;
> @@ -2064,19 +2090,19 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
> {
> unsigned long raddr, origin;
> unsigned long *table;
> - struct page *page;
> + struct ptdesc *ptdesc;
> phys_addr_t s_pgt;
> int rc;
>
> BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
> /* Allocate a shadow page table */
> - page = page_table_alloc_pgste(sg->mm);
> - if (!page)
> + ptdesc = page_ptdesc(page_table_alloc_pgste(sg->mm));
> + if (!ptdesc)
> return -ENOMEM;
> - page->_pt_s390_gaddr = pgt & _SEGMENT_ENTRY_ORIGIN;
> + ptdesc->_pt_s390_gaddr = pgt & _SEGMENT_ENTRY_ORIGIN;
> if (fake)
> - page->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
> - s_pgt = page_to_phys(page);
> + ptdesc->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
> + s_pgt = page_to_phys(ptdesc_page(ptdesc));
> /* Install shadow page table */
> spin_lock(&sg->guest_table_lock);
> table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
> @@ -2094,7 +2120,7 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
> /* mark as invalid as long as the parent table is not protected */
> *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
> (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
> - list_add(&page->lru, &sg->pt_list);
> + list_add(&ptdesc->pt_list, &sg->pt_list);
> if (fake) {
> /* nothing to protect for fake tables */
> *table &= ~_SEGMENT_ENTRY_INVALID;
> @@ -2120,8 +2146,8 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
> return rc;
> out_free:
> spin_unlock(&sg->guest_table_lock);
> - page->_pt_s390_gaddr = 0;
> - page_table_free_pgste(page);
> + ptdesc->_pt_s390_gaddr = 0;
> + page_table_free_pgste(ptdesc_page(ptdesc));
> return rc;
>
> }
> @@ -2814,11 +2840,11 @@ EXPORT_SYMBOL_GPL(__s390_uv_destroy_range);
> */
> void s390_unlist_old_asce(struct gmap *gmap)
> {
> - struct page *old;
> + struct ptdesc *old;
>
> - old = virt_to_page(gmap->table);
> + old = virt_to_ptdesc(gmap->table);
> spin_lock(&gmap->guest_table_lock);
> - list_del(&old->lru);
> + list_del(&old->pt_list);
> /*
> * Sometimes the topmost page might need to be "removed" multiple
> * times, for example if the VM is rebooted into secure mode several
> @@ -2833,7 +2859,7 @@ void s390_unlist_old_asce(struct gmap *gmap)
> * pointers, so list_del can work (and do nothing) without
> * dereferencing stale or invalid pointers.
> */
> - INIT_LIST_HEAD(&old->lru);
> + INIT_LIST_HEAD(&old->pt_list);
> spin_unlock(&gmap->guest_table_lock);
> }
> EXPORT_SYMBOL_GPL(s390_unlist_old_asce);
> @@ -2854,7 +2880,7 @@ EXPORT_SYMBOL_GPL(s390_unlist_old_asce);
> int s390_replace_asce(struct gmap *gmap)
> {
> unsigned long asce;
> - struct page *page;
> + struct ptdesc *ptdesc;
> void *table;
>
> s390_unlist_old_asce(gmap);
> @@ -2863,10 +2889,10 @@ int s390_replace_asce(struct gmap *gmap)
> if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
> return -EINVAL;
>
> - page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
> - if (!page)
> + ptdesc = pagetable_alloc(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
> + if (!ptdesc)
> return -ENOMEM;
> - table = page_to_virt(page);
> + table = ptdesc_to_virt(ptdesc);
> memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT));
>
> /*
> @@ -2875,7 +2901,7 @@ int s390_replace_asce(struct gmap *gmap)
> * it will be freed when the VM is torn down.
> */
> spin_lock(&gmap->guest_table_lock);
> - list_add(&page->lru, &gmap->crst_list);
> + list_add(&ptdesc->pt_list, &gmap->crst_list);
> spin_unlock(&gmap->guest_table_lock);
>
> /* Set new table origin while preserving existing ASCE control bits */
> --
> 2.40.1
>
>
--
Sincerely yours,
Mike.
next prev parent reply other threads:[~2023-06-14 14:28 UTC|newest]
Thread overview: 81+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-12 21:03 [PATCH v4 00/34] Split ptdesc from struct page Vishal Moola (Oracle)
2023-06-12 21:03 ` [PATCH v4 01/34] mm: Add PAGE_TYPE_OP folio functions Vishal Moola (Oracle)
2023-06-14 13:02 ` Mike Rapoport
2023-06-12 21:03 ` [PATCH v4 02/34] s390: Use _pt_s390_gaddr for gmap address tracking Vishal Moola (Oracle)
2023-06-14 13:14 ` Mike Rapoport
2023-06-12 21:03 ` [PATCH v4 03/34] s390: Use pt_frag_refcount for pagetables Vishal Moola (Oracle)
2023-06-14 13:21 ` Mike Rapoport
2023-06-12 21:03 ` [PATCH v4 04/34] pgtable: Create struct ptdesc Vishal Moola (Oracle)
2023-06-14 13:34 ` Mike Rapoport
2023-06-15 7:57 ` Hugh Dickins
2023-06-16 20:38 ` Matthew Wilcox
2023-06-16 21:28 ` Vishal Moola
2023-06-16 12:38 ` Jason Gunthorpe
2023-06-20 20:01 ` Vishal Moola
2023-06-20 23:05 ` Jason Gunthorpe
2023-06-20 23:10 ` Vishal Moola
2023-06-12 21:03 ` [PATCH v4 05/34] mm: add utility functions for ptdesc Vishal Moola (Oracle)
2023-06-14 13:48 ` Mike Rapoport
2023-06-12 21:03 ` [PATCH v4 06/34] mm: Convert pmd_pgtable_page() to pmd_ptdesc() Vishal Moola (Oracle)
2023-06-14 13:51 ` Mike Rapoport
2023-06-12 21:03 ` [PATCH v4 07/34] mm: Convert ptlock_alloc() to use ptdescs Vishal Moola (Oracle)
2023-06-14 13:51 ` Mike Rapoport
2023-06-12 21:03 ` [PATCH v4 08/34] mm: Convert ptlock_ptr() " Vishal Moola (Oracle)
2023-06-14 13:52 ` Mike Rapoport
2023-06-12 21:03 ` [PATCH v4 09/34] mm: Convert pmd_ptlock_init() " Vishal Moola (Oracle)
2023-06-14 13:52 ` Mike Rapoport
2023-06-12 21:03 ` [PATCH v4 10/34] mm: Convert ptlock_init() " Vishal Moola (Oracle)
2023-06-14 13:57 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 11/34] mm: Convert pmd_ptlock_free() " Vishal Moola (Oracle)
2023-06-14 13:59 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 12/34] mm: Convert ptlock_free() " Vishal Moola (Oracle)
2023-06-14 13:59 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 13/34] mm: Create ptdesc equivalents for pgtable_{pte,pmd}_page_{ctor,dtor} Vishal Moola (Oracle)
2023-06-14 14:10 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 14/34] powerpc: Convert various functions to use ptdescs Vishal Moola (Oracle)
2023-06-14 14:19 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 15/34] x86: " Vishal Moola (Oracle)
2023-06-14 14:27 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 16/34] s390: Convert various gmap " Vishal Moola (Oracle)
2023-06-14 14:28 ` Mike Rapoport [this message]
2023-06-12 21:04 ` [PATCH v4 17/34] s390: Convert various pgalloc " Vishal Moola (Oracle)
2023-06-14 14:46 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 18/34] mm: Remove page table members from struct page Vishal Moola (Oracle)
2023-06-14 14:53 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 19/34] pgalloc: Convert various functions to use ptdescs Vishal Moola (Oracle)
2023-06-14 14:59 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 20/34] arm: " Vishal Moola (Oracle)
2023-06-14 15:03 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 21/34] arm64: " Vishal Moola (Oracle)
2023-06-14 15:05 ` Mike Rapoport
2023-06-14 16:41 ` Catalin Marinas
2023-06-12 21:04 ` [PATCH v4 22/34] csky: Convert __pte_free_tlb() " Vishal Moola (Oracle)
2023-06-14 15:07 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 23/34] hexagon: " Vishal Moola (Oracle)
2023-06-14 15:07 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 24/34] loongarch: Convert various functions " Vishal Moola (Oracle)
2023-06-14 15:09 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 25/34] m68k: " Vishal Moola (Oracle)
2023-06-13 7:28 ` Geert Uytterhoeven
2023-06-14 15:12 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 26/34] mips: " Vishal Moola (Oracle)
2023-06-14 15:16 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 27/34] nios2: Convert __pte_free_tlb() " Vishal Moola (Oracle)
2023-06-13 22:16 ` Dinh Nguyen
2023-06-14 9:30 ` Geert Uytterhoeven
2023-06-14 21:23 ` Dinh Nguyen
2023-06-14 15:16 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 28/34] openrisc: " Vishal Moola (Oracle)
2023-06-14 15:17 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 29/34] riscv: Convert alloc_{pmd, pte}_late() " Vishal Moola (Oracle)
2023-06-14 15:18 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 30/34] sh: Convert pte_free_tlb() " Vishal Moola (Oracle)
2023-06-14 15:19 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 31/34] sparc64: Convert various functions " Vishal Moola (Oracle)
2023-06-14 15:20 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 32/34] sparc: Convert pgtable_pte_page_{ctor, dtor}() to ptdesc equivalents Vishal Moola (Oracle)
2023-06-14 15:20 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 33/34] um: Convert {pmd, pte}_free_tlb() to use ptdescs Vishal Moola (Oracle)
2023-06-14 15:21 ` Mike Rapoport
2023-06-12 21:04 ` [PATCH v4 34/34] mm: Remove pgtable_{pmd, pte}_page_{ctor, dtor}() wrappers Vishal Moola (Oracle)
2023-06-14 15:23 ` Mike Rapoport
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230614142854.GO52412@kernel.org \
--to=rppt@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=david@redhat.com \
--cc=hughd@google.com \
--cc=imbrenda@linux.ibm.com \
--cc=kvm@vger.kernel.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-csky@vger.kernel.org \
--cc=linux-hexagon@vger.kernel.org \
--cc=linux-m68k@lists.linux-m68k.org \
--cc=linux-mips@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-openrisc@vger.kernel.org \
--cc=linux-riscv@lists.infradead.org \
--cc=linux-s390@vger.kernel.org \
--cc=linux-sh@vger.kernel.org \
--cc=linux-um@lists.infradead.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=loongarch@lists.linux.dev \
--cc=sparclinux@vger.kernel.org \
--cc=vishal.moola@gmail.com \
--cc=willy@infradead.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).