From: Santosh Sivaraj <santosh@fossix.org>
To: <stable@vger.kernel.org>, linuxppc-dev <linuxppc-dev@lists.ozlabs.org>
Cc: Sasha Levin <sashal@kernel.org>,
Will Deacon <will.deacon@arm.com>, Greg KH <greg@kroah.com>
Subject: [PATCH v3 2/6] asm-generic/tlb: Track which levels of the page tables have been cleared
Date: Thu, 12 Mar 2020 18:57:36 +0530 [thread overview]
Message-ID: <20200312132740.225241-3-santosh@fossix.org> (raw)
In-Reply-To: <20200312132740.225241-1-santosh@fossix.org>
From: Will Deacon <will.deacon@arm.com>
commit a6d60245d6d9b1caf66b0d94419988c4836980af upstream
It is common for architectures with hugepage support to require only a
single TLB invalidation operation per hugepage during unmap(), rather than
iterating through the mapping at a PAGE_SIZE increment. Currently,
however, the level in the page table where the unmap() operation occurs
is not stored in the mmu_gather structure, therefore forcing
architectures to issue additional TLB invalidation operations or to give
up and over-invalidate by e.g. invalidating the entire TLB.
Ideally, we could add an interval rbtree to the mmu_gather structure,
which would allow us to associate the correct mapping granule with the
various sub-mappings within the range being invalidated. However, this
is costly in terms of book-keeping and memory management, so instead we
approximate by keeping track of the page table levels that are cleared
and provide a means to query the smallest granule required for invalidation.
Signed-off-by: Will Deacon <will.deacon@arm.com>
Cc: <stable@vger.kernel.org> # 4.19
Signed-off-by: Santosh Sivaraj <santosh@fossix.org>
[santosh: prerequisite for upcoming tlbflush backports]
---
include/asm-generic/tlb.h | 58 +++++++++++++++++++++++++++++++++------
mm/memory.c | 4 ++-
2 files changed, 53 insertions(+), 9 deletions(-)
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 97306b32d8d2..f2b9dc9cbaf8 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -114,6 +114,14 @@ struct mmu_gather {
*/
unsigned int freed_tables : 1;
+ /*
+ * at which levels have we cleared entries?
+ */
+ unsigned int cleared_ptes : 1;
+ unsigned int cleared_pmds : 1;
+ unsigned int cleared_puds : 1;
+ unsigned int cleared_p4ds : 1;
+
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];
@@ -148,6 +156,10 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
tlb->end = 0;
}
tlb->freed_tables = 0;
+ tlb->cleared_ptes = 0;
+ tlb->cleared_pmds = 0;
+ tlb->cleared_puds = 0;
+ tlb->cleared_p4ds = 0;
}
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
@@ -197,6 +209,25 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
}
#endif
+static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
+{
+ if (tlb->cleared_ptes)
+ return PAGE_SHIFT;
+ if (tlb->cleared_pmds)
+ return PMD_SHIFT;
+ if (tlb->cleared_puds)
+ return PUD_SHIFT;
+ if (tlb->cleared_p4ds)
+ return P4D_SHIFT;
+
+ return PAGE_SHIFT;
+}
+
+static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
+{
+ return 1UL << tlb_get_unmap_shift(tlb);
+}
+
/*
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,
@@ -230,13 +261,19 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define tlb_remove_tlb_entry(tlb, ptep, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->cleared_ptes = 1; \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
- do { \
- __tlb_adjust_range(tlb, address, huge_page_size(h)); \
- __tlb_remove_tlb_entry(tlb, ptep, address); \
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ do { \
+ unsigned long _sz = huge_page_size(h); \
+ __tlb_adjust_range(tlb, address, _sz); \
+ if (_sz == PMD_SIZE) \
+ tlb->cleared_pmds = 1; \
+ else if (_sz == PUD_SIZE) \
+ tlb->cleared_puds = 1; \
+ __tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
/**
@@ -250,6 +287,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
do { \
__tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
+ tlb->cleared_pmds = 1; \
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
} while (0)
@@ -264,6 +302,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
+ tlb->cleared_puds = 1; \
__tlb_remove_pud_tlb_entry(tlb, pudp, address); \
} while (0)
@@ -289,7 +328,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define pte_free_tlb(tlb, ptep, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
- tlb->freed_tables = 1; \
+ tlb->freed_tables = 1; \
+ tlb->cleared_pmds = 1; \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
#endif
@@ -298,7 +338,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
- tlb->freed_tables = 1; \
+ tlb->freed_tables = 1; \
+ tlb->cleared_puds = 1; \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
#endif
@@ -308,7 +349,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define pud_free_tlb(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
- tlb->freed_tables = 1; \
+ tlb->freed_tables = 1; \
+ tlb->cleared_p4ds = 1; \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
@@ -319,7 +361,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define p4d_free_tlb(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
- tlb->freed_tables = 1; \
+ tlb->freed_tables = 1; \
__p4d_free_tlb(tlb, pudp, address); \
} while (0)
#endif
diff --git a/mm/memory.c b/mm/memory.c
index bbf0cc4066c8..1832c5ed6ac0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -267,8 +267,10 @@ void arch_tlb_finish_mmu(struct mmu_gather *tlb,
{
struct mmu_gather_batch *batch, *next;
- if (force)
+ if (force) {
+ __tlb_reset_range(tlb);
__tlb_adjust_range(tlb, start, end - start);
+ }
tlb_flush_mmu(tlb);
--
2.24.1
next prev parent reply other threads:[~2020-03-12 13:44 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-03-12 13:27 [PATCH v3 0/6] Memory corruption may occur due to incorrent tlb flush Santosh Sivaraj
2020-03-12 13:27 ` [PATCH v3 1/6] asm-generic/tlb: Track freeing of page-table directories in struct mmu_gather Santosh Sivaraj
2021-01-04 12:55 ` Patch "asm-generic/tlb: Track freeing of page-table directories in struct mmu_gather" has been added to the 4.19-stable tree gregkh
2020-03-12 13:27 ` Santosh Sivaraj [this message]
2021-01-04 12:55 ` Patch "asm-generic/tlb: Track which levels of the page tables have been cleared" " gregkh
2020-03-12 13:27 ` [PATCH v3 3/6] asm-generic/tlb, arch: Invert CONFIG_HAVE_RCU_TABLE_INVALIDATE Santosh Sivaraj
2021-01-04 12:55 ` Patch "asm-generic/tlb, arch: Invert CONFIG_HAVE_RCU_TABLE_INVALIDATE" has been added to the 4.19-stable tree gregkh
2020-03-12 13:27 ` [PATCH v3 4/6] powerpc/mmu_gather: enable RCU_TABLE_FREE even for !SMP case Santosh Sivaraj
2021-01-04 12:55 ` Patch "powerpc/mmu_gather: enable RCU_TABLE_FREE even for !SMP case" has been added to the 4.19-stable tree gregkh
2020-03-12 13:27 ` [PATCH v3 5/6] mm/mmu_gather: invalidate TLB correctly on batch allocation failure and flush Santosh Sivaraj
2021-01-04 12:55 ` Patch "mm/mmu_gather: invalidate TLB correctly on batch allocation failure and flush" has been added to the 4.19-stable tree gregkh
2021-01-05 9:05 ` [PATCH v3 5/6] mm/mmu_gather: invalidate TLB correctly on batch allocation failure and flush Greg KH
2020-03-12 13:27 ` [PATCH v3 6/6] asm-generic/tlb: avoid potential double flush Santosh Sivaraj
2021-01-04 12:55 ` Patch "asm-generic/tlb: avoid potential double flush" has been added to the 4.19-stable tree gregkh
2021-01-04 12:56 ` [PATCH v3 0/6] Memory corruption may occur due to incorrent tlb flush Greg KH
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200312132740.225241-3-santosh@fossix.org \
--to=santosh@fossix.org \
--cc=greg@kroah.com \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=sashal@kernel.org \
--cc=stable@vger.kernel.org \
--cc=will.deacon@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).