From: Peter Xu <peterx@redhat.com>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org
Cc: Mike Kravetz <mike.kravetz@oracle.com>,
"Kirill A . Shutemov" <kirill@shutemov.name>,
Lorenzo Stoakes <lstoakes@gmail.com>,
Axel Rasmussen <axelrasmussen@google.com>,
Matthew Wilcox <willy@infradead.org>,
John Hubbard <jhubbard@nvidia.com>,
Mike Rapoport <rppt@kernel.org>,
peterx@redhat.com, Hugh Dickins <hughd@google.com>,
David Hildenbrand <david@redhat.com>,
Andrea Arcangeli <aarcange@redhat.com>,
Rik van Riel <riel@surriel.com>,
James Houghton <jthoughton@google.com>,
Yang Shi <shy828301@gmail.com>, Jason Gunthorpe <jgg@nvidia.com>,
Vlastimil Babka <vbabka@suse.cz>,
Andrew Morton <akpm@linux-foundation.org>
Subject: [PATCH RFC 11/12] mm/gup: Handle hugepd for follow_page()
Date: Wed, 15 Nov 2023 20:29:07 -0500 [thread overview]
Message-ID: <20231116012908.392077-12-peterx@redhat.com> (raw)
In-Reply-To: <20231116012908.392077-1-peterx@redhat.com>
Hugepd is only used in PowerPC's hugetlbfs. follow_page_mask() used to
leverage hugetlb APIs to access hugepd entries. Teach follow_page_mask()
itself on hugepd.
With previous refactors on fast-gup gup_huge_pd(), most of the code can be
easily leveraged. Since follow_page() always only fetch one page, set the
end to "address + PAGE_SIZE" should suffice.
Signed-off-by: Peter Xu <peterx@redhat.com>
---
mm/gup.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++------
1 file changed, 69 insertions(+), 8 deletions(-)
diff --git a/mm/gup.c b/mm/gup.c
index 7c210206470f..e635278f65f9 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -25,6 +25,11 @@
#include "internal.h"
+static struct page *follow_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
+ unsigned long addr, unsigned int pdshift,
+ unsigned int flags,
+ struct follow_page_context *ctx);
+
static inline void sanity_check_pinned_pages(struct page **pages,
unsigned long npages)
{
@@ -713,6 +718,9 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
spin_unlock(ptl);
return page;
}
+ if (unlikely(is_hugepd(__hugepd(pmd_val(pmdval)))))
+ return follow_hugepd(vma, __hugepd(pmd_val(pmdval)),
+ address, PMD_SHIFT, flags, ctx);
if (likely(!pmd_thp_or_huge(pmdval)))
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
@@ -764,6 +772,10 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
if (unlikely(pud_bad(pud)))
return no_page_table(vma, flags, address);
+ if (unlikely(is_hugepd(__hugepd(pud_val(pud)))))
+ return follow_hugepd(vma, __hugepd(pud_val(pud)),
+ address, PUD_SHIFT, flags, ctx);
+
return follow_pmd_mask(vma, address, pudp, flags, ctx);
}
@@ -772,15 +784,19 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
unsigned int flags,
struct follow_page_context *ctx)
{
- p4d_t *p4d;
+ p4d_t *p4d, p4dval;
p4d = p4d_offset(pgdp, address);
- if (p4d_none(*p4d))
- return no_page_table(vma, flags, address);
- BUILD_BUG_ON(p4d_huge(*p4d));
- if (unlikely(p4d_bad(*p4d)))
+ p4dval = *p4d;
+ BUILD_BUG_ON(p4d_huge(p4dval));
+
+ if (p4d_none(p4dval) || unlikely(p4d_bad(p4dval)))
return no_page_table(vma, flags, address);
+ if (unlikely(is_hugepd(__hugepd(p4d_val(p4dval)))))
+ return follow_hugepd(vma, __hugepd(p4d_val(p4dval)),
+ address, P4D_SHIFT, flags, ctx);
+
return follow_pud_mask(vma, address, p4d, flags, ctx);
}
@@ -812,7 +828,7 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
struct follow_page_context *ctx)
{
- pgd_t *pgd;
+ pgd_t *pgd, pgdval;
struct mm_struct *mm = vma->vm_mm;
ctx->page_mask = 0;
@@ -827,11 +843,17 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
&ctx->page_mask);
pgd = pgd_offset(mm, address);
+ pgdval = *pgd;
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
- return no_page_table(vma, flags, address);
+ page = no_page_table(vma, flags, address);
+ else if (unlikely(is_hugepd(__hugepd(pgd_val(pgdval)))))
+ page = follow_hugepd(vma, __hugepd(pgd_val(pgdval)),
+ address, PGDIR_SHIFT, flags, ctx);
+ else
+ page = follow_p4d_mask(vma, address, pgd, flags, ctx);
- return follow_p4d_mask(vma, address, pgd, flags, ctx);
+ return page;
}
struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
@@ -2850,6 +2872,37 @@ static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
return 1;
}
+
+static struct page *follow_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
+ unsigned long addr, unsigned int pdshift,
+ unsigned int flags,
+ struct follow_page_context *ctx)
+{
+ struct page *page;
+ struct hstate *h;
+ spinlock_t *ptl;
+ int nr = 0, ret;
+ pte_t *ptep;
+
+ /* Only hugetlb supports hugepd */
+ if (WARN_ON_ONCE(!is_vm_hugetlb_page(vma)))
+ return ERR_PTR(-EFAULT);
+
+ h = hstate_vma(vma);
+ ptep = hugepte_offset(hugepd, addr, pdshift);
+ ptl = huge_pte_lock(h, mm, ptep);
+ ret = gup_huge_pd(hugepd, addr, pdshift, addr + PAGE_SIZE,
+ flags, &page, &nr)) {
+ spin_unlock(ptl);
+
+ if (ret) {
+ WARN_ON_ONCE(nr != 1);
+ ctx->page_mask = (1U << huge_page_order(h)) - 1;
+ return page;
+ }
+
+ return NULL;
+}
#else
static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
unsigned int pdshift, unsigned long end, unsigned int flags,
@@ -2857,6 +2910,14 @@ static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
{
return 0;
}
+
+static struct page *follow_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
+ unsigned long addr, unsigned int pdshift,
+ unsigned int flags,
+ struct follow_page_context *ctx)
+{
+ return NULL;
+}
#endif /* CONFIG_ARCH_HAS_HUGEPD */
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
--
2.41.0
next prev parent reply other threads:[~2023-11-16 1:29 UTC|newest]
Thread overview: 58+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-16 1:28 [PATCH RFC 00/12] mm/gup: Unify hugetlb, part 2 Peter Xu
2023-11-16 1:28 ` [PATCH RFC 01/12] mm/hugetlb: Export hugetlbfs_pagecache_present() Peter Xu
2023-11-23 7:23 ` Christoph Hellwig
2023-11-23 16:05 ` Peter Xu
2023-11-16 1:28 ` [PATCH RFC 02/12] mm: Provide generic pmd_thp_or_huge() Peter Xu
2023-11-16 1:28 ` [PATCH RFC 03/12] mm: Export HPAGE_PXD_* macros even if !THP Peter Xu
2023-11-23 7:23 ` Christoph Hellwig
2023-11-23 9:53 ` Mike Rapoport
2023-11-23 15:27 ` Peter Xu
2023-11-16 1:29 ` [PATCH RFC 04/12] mm: Introduce vma_pgtable_walk_{begin|end}() Peter Xu
2023-11-23 7:24 ` Christoph Hellwig
2023-11-23 16:11 ` Peter Xu
2023-11-24 4:02 ` Aneesh Kumar K.V
2023-11-24 15:34 ` Peter Xu
2023-11-16 1:29 ` [PATCH RFC 05/12] mm/gup: Fix follow_devmap_p[mu]d() to return even if NULL Peter Xu
2023-11-23 7:25 ` Christoph Hellwig
2023-11-23 17:59 ` Peter Xu
2023-11-16 1:29 ` [PATCH RFC 06/12] mm/gup: Drop folio_fast_pin_allowed() in hugepd processing Peter Xu
2023-11-20 8:26 ` Christoph Hellwig
2023-11-21 15:59 ` Peter Xu
2023-11-22 8:00 ` Christoph Hellwig
2023-11-22 15:22 ` Peter Xu
2023-11-23 7:21 ` Christoph Hellwig
2023-11-23 16:10 ` Peter Xu
2023-11-23 18:22 ` Christophe Leroy
2023-11-23 19:37 ` Peter Xu
2023-11-24 5:28 ` Aneesh Kumar K.V
2023-11-24 7:03 ` Christophe Leroy
2023-11-24 1:06 ` Michael Ellerman
2023-11-23 15:47 ` Matthew Wilcox
2023-11-23 17:22 ` Peter Xu
2023-11-23 19:11 ` Ryan Roberts
2023-11-23 19:46 ` Peter Xu
2023-11-24 9:06 ` Ryan Roberts
2023-11-24 16:07 ` Peter Xu
2023-11-30 21:30 ` Peter Xu
2023-12-03 13:33 ` Christophe Leroy
2023-12-04 11:11 ` Ryan Roberts
2023-12-04 11:25 ` Christophe Leroy
2023-12-04 11:46 ` Ryan Roberts
2023-12-04 11:57 ` Christophe Leroy
2023-12-04 12:02 ` Ryan Roberts
2023-12-04 16:48 ` Peter Xu
2023-11-16 1:29 ` [PATCH RFC 07/12] mm/gup: Refactor record_subpages() to find 1st small page Peter Xu
2023-11-16 14:51 ` Matthew Wilcox
2023-11-16 19:40 ` Peter Xu
2023-11-16 19:41 ` Matthew Wilcox
2023-11-16 1:29 ` [PATCH RFC 08/12] mm/gup: Handle hugetlb for no_page_table() Peter Xu
2023-11-23 7:26 ` Christoph Hellwig
2023-11-16 1:29 ` [PATCH RFC 09/12] mm/gup: Handle huge pud for follow_pud_mask() Peter Xu
2023-11-23 7:28 ` Christoph Hellwig
2023-11-23 16:19 ` Peter Xu
2023-11-16 1:29 ` [PATCH RFC 10/12] mm/gup: Handle huge pmd for follow_pmd_mask() Peter Xu
2023-11-16 1:29 ` Peter Xu [this message]
2023-11-16 1:29 ` [PATCH RFC 12/12] mm/gup: Merge hugetlb into generic mm code Peter Xu
2023-11-23 7:29 ` Christoph Hellwig
2023-11-23 16:21 ` Peter Xu
2023-11-22 14:51 ` [PATCH RFC 00/12] mm/gup: Unify hugetlb, part 2 Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231116012908.392077-12-peterx@redhat.com \
--to=peterx@redhat.com \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=axelrasmussen@google.com \
--cc=david@redhat.com \
--cc=hughd@google.com \
--cc=jgg@nvidia.com \
--cc=jhubbard@nvidia.com \
--cc=jthoughton@google.com \
--cc=kirill@shutemov.name \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lstoakes@gmail.com \
--cc=mike.kravetz@oracle.com \
--cc=riel@surriel.com \
--cc=rppt@kernel.org \
--cc=shy828301@gmail.com \
--cc=vbabka@suse.cz \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).