From: Peter Zijlstra <peterz@infradead.org>
To: x86@kernel.org, willy@infradead.org
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
aarcange@redhat.com, kirill.shutemov@linux.intel.com,
jroedel@suse.de, peterz@infradead.org
Subject: [RFC][PATCH 2/9] x86/mm/pae: Make pmd_t similar to pte_t
Date: Mon, 30 Nov 2020 12:27:07 +0100 [thread overview]
Message-ID: <20201130113602.900215647@infradead.org> (raw)
In-Reply-To: 20201130112705.900705277@infradead.org
Instead of mucking about with at least 2 different ways of fudging
it, do the same thing we do for pte_t.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
arch/x86/include/asm/pgtable-3level.h | 42 +++++++++-------------------
arch/x86/include/asm/pgtable-3level_types.h | 7 ++++
arch/x86/include/asm/pgtable_64_types.h | 1
arch/x86/include/asm/pgtable_types.h | 4 --
4 files changed, 23 insertions(+), 31 deletions(-)
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -87,7 +87,7 @@ static inline pmd_t pmd_read_atomic(pmd_
ret |= ((pmdval_t)*(tmp + 1)) << 32;
}
- return (pmd_t) { ret };
+ return (pmd_t) { .pmd = ret };
}
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
@@ -121,12 +121,11 @@ static inline void native_pte_clear(stru
ptep->pte_high = 0;
}
-static inline void native_pmd_clear(pmd_t *pmd)
+static inline void native_pmd_clear(pmd_t *pmdp)
{
- u32 *tmp = (u32 *)pmd;
- *tmp = 0;
+ pmdp->pmd_low = 0;
smp_wmb();
- *(tmp + 1) = 0;
+ pmdp->pmd_high = 0;
}
static inline void native_pud_clear(pud_t *pudp)
@@ -162,25 +161,17 @@ static inline pte_t native_ptep_get_and_
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
#endif
-union split_pmd {
- struct {
- u32 pmd_low;
- u32 pmd_high;
- };
- pmd_t pmd;
-};
-
#ifdef CONFIG_SMP
static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
{
- union split_pmd res, *orig = (union split_pmd *)pmdp;
+ pmd_t res;
/* xchg acts as a barrier before setting of the high bits */
- res.pmd_low = xchg(&orig->pmd_low, 0);
- res.pmd_high = orig->pmd_high;
- orig->pmd_high = 0;
+ res.pmd_low = xchg(&pmdp->pmd_low, 0);
+ res.pmd_high = READ_ONCE(pmdp->pmd_high);
+ WRITE_ONCE(pmdp->pmd_high, 0);
- return res.pmd;
+ return res;
}
#else
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
@@ -199,17 +190,12 @@ static inline pmd_t pmdp_establish(struc
* anybody.
*/
if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
- union split_pmd old, new, *ptr;
-
- ptr = (union split_pmd *)pmdp;
-
- new.pmd = pmd;
-
/* xchg acts as a barrier before setting of the high bits */
- old.pmd_low = xchg(&ptr->pmd_low, new.pmd_low);
- old.pmd_high = ptr->pmd_high;
- ptr->pmd_high = new.pmd_high;
- return old.pmd;
+ old.pmd_low = xchg(&pmdp->pmd_low, pmd.pmd_low);
+ old.pmd_high = READ_ONCE(pmdp->pmd_high);
+ WRITE_ONCE(pmdp->pmd_high, pmd.pmd_high);
+
+ return old;
}
do {
--- a/arch/x86/include/asm/pgtable-3level_types.h
+++ b/arch/x86/include/asm/pgtable-3level_types.h
@@ -18,6 +18,13 @@ typedef union {
};
pteval_t pte;
} pte_t;
+
+typedef union {
+ struct {
+ unsigned long pmd_low, pmd_high;
+ };
+ pmdval_t pmd;
+} pmd_t;
#endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD (!static_cpu_has(X86_FEATURE_PTI))
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -19,6 +19,7 @@ typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
typedef struct { pteval_t pte; } pte_t;
+typedef struct { pmdval_t pmd; } pmd_t;
#ifdef CONFIG_X86_5LEVEL
extern unsigned int __pgtable_l5_enabled;
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -381,11 +381,9 @@ static inline pudval_t native_pud_val(pu
#endif
#if CONFIG_PGTABLE_LEVELS > 2
-typedef struct { pmdval_t pmd; } pmd_t;
-
static inline pmd_t native_make_pmd(pmdval_t val)
{
- return (pmd_t) { val };
+ return (pmd_t) { .pmd = val };
}
static inline pmdval_t native_pmd_val(pmd_t pmd)
next prev parent reply other threads:[~2020-11-30 11:38 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-11-30 11:27 [RFC][PATCH 0/9] Clean up i386-PAE Peter Zijlstra
2020-11-30 11:27 ` [RFC][PATCH 1/9] mm: Update ptep_get_lockless()s comment Peter Zijlstra
2020-11-30 11:27 ` Peter Zijlstra [this message]
2020-11-30 11:27 ` [RFC][PATCH 3/9] sh/mm: Make pmd_t similar to pte_t Peter Zijlstra
2020-11-30 14:10 ` David Laight
2020-11-30 14:21 ` Peter Zijlstra
2020-11-30 11:27 ` [RFC][PATCH 4/9] mm: Fix pmd_read_atomic() Peter Zijlstra
2020-11-30 11:27 ` [RFC][PATCH 5/9] mm: Rename pmd_read_atomic() Peter Zijlstra
2020-11-30 15:31 ` Jason Gunthorpe
2020-12-01 8:57 ` Peter Zijlstra
2020-11-30 11:27 ` [RFC][PATCH 6/9] mm/gup: Fix the lockless walkers Peter Zijlstra
2020-11-30 11:27 ` [RFC][PATCH 7/9] x86/mm/pae: Dont (ab)use atomic64 Peter Zijlstra
2020-11-30 11:27 ` [RFC][PATCH 8/9] x86/mm/pae: Use WRITE_ONCE() Peter Zijlstra
2020-11-30 11:27 ` [RFC][PATCH 9/9] x86/mm/pae: Be consistent with pXXp_get_and_clear() Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201130113602.900215647@infradead.org \
--to=peterz@infradead.org \
--cc=aarcange@redhat.com \
--cc=jroedel@suse.de \
--cc=kirill.shutemov@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=willy@infradead.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).