From: Andy Lutomirski <luto@kernel.org>
To: X86 ML <x86@kernel.org>
Cc: "linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
Borislav Petkov <bpetkov@suse.de>,
Linus Torvalds <torvalds@linux-foundation.org>,
Andrew Morton <akpm@linux-foundation.org>,
Mel Gorman <mgorman@suse.de>,
"linux-mm@kvack.org" <linux-mm@kvack.org>,
Nadav Amit <nadav.amit@gmail.com>,
Andy Lutomirski <luto@kernel.org>, Rik van Riel <riel@redhat.com>,
Dave Hansen <dave.hansen@intel.com>,
Nadav Amit <namit@vmware.com>, Michal Hocko <mhocko@suse.com>,
Sasha Levin <sasha.levin@oracle.com>
Subject: [PATCH v2 03/11] x86/mm: Make the batched unmap TLB flush API more generic
Date: Mon, 22 May 2017 15:30:03 -0700 [thread overview]
Message-ID: <19f25a8581f9fb77876b7ff3b001f89835e34ea3.1495492063.git.luto@kernel.org> (raw)
In-Reply-To: <cover.1495492063.git.luto@kernel.org>
In-Reply-To: <cover.1495492063.git.luto@kernel.org>
try_to_unmap_flush() used to open-code a rather x86-centric flush
sequence: local_flush_tlb() + flush_tlb_others(). Rearrange the
code so that the arch (only x86 for now) provides
arch_tlbbatch_add_mm() and arch_tlbbatch_flush() and the core code
calls those functions instead.
I'll want this for x86 because, to enable address space ids, I can't
support the flush_tlb_others() mode used by exising
try_to_unmap_flush() implementation with good performance. I can
support the new API fairly easily, though.
I imagine that other architectures may be in a similar position.
Architectures with strong remote flush primitives (arm64?) may have
even worse performance problems with flush_tlb_others() the way that
try_to_unmap_flush() uses it.
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Nadav Amit <namit@vmware.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Andy Lutomirski <luto@kernel.org>
---
arch/x86/include/asm/tlbbatch.h | 16 ++++++++++++++++
arch/x86/include/asm/tlbflush.h | 8 ++++++++
arch/x86/mm/tlb.c | 17 +++++++++++++++++
include/linux/mm_types_task.h | 15 +++++++++++----
mm/rmap.c | 16 ++--------------
5 files changed, 54 insertions(+), 18 deletions(-)
create mode 100644 arch/x86/include/asm/tlbbatch.h
diff --git a/arch/x86/include/asm/tlbbatch.h b/arch/x86/include/asm/tlbbatch.h
new file mode 100644
index 000000000000..01a6de16fb96
--- /dev/null
+++ b/arch/x86/include/asm/tlbbatch.h
@@ -0,0 +1,16 @@
+#ifndef _ARCH_X86_TLBBATCH_H
+#define _ARCH_X86_TLBBATCH_H
+
+#include <linux/cpumask.h>
+
+#ifdef CONFIG_SMP
+struct arch_tlbflush_unmap_batch {
+ /*
+ * Each bit set is a CPU that potentially has a TLB entry for one of
+ * the PFNs being flushed..
+ */
+ struct cpumask cpumask;
+};
+#endif
+
+#endif /* _ARCH_X86_TLBBATCH_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index b9db0f8fef55..8f6e2f87511b 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -329,6 +329,14 @@ static inline void reset_lazy_tlbstate(void)
this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
}
+static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
+ struct mm_struct *mm)
+{
+ cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
+}
+
+extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
+
#endif /* SMP */
#ifndef CONFIG_PARAVIRT
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 4d303864b310..743e4c6b4529 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -395,6 +395,23 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
}
}
+void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
+{
+ int cpu = get_cpu();
+
+ if (cpumask_test_cpu(cpu, &batch->cpumask)) {
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+ local_flush_tlb();
+ trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
+ }
+
+ if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
+ flush_tlb_others(&batch->cpumask, NULL, 0, TLB_FLUSH_ALL);
+ cpumask_clear(&batch->cpumask);
+
+ put_cpu();
+}
+
static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index 136dfdf63ba1..fc412fbd80bd 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -14,6 +14,10 @@
#include <asm/page.h>
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+#include <asm/tlbbatch.h>
+#endif
+
#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
@@ -67,12 +71,15 @@ struct page_frag {
struct tlbflush_unmap_batch {
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/*
- * Each bit set is a CPU that potentially has a TLB entry for one of
- * the PFNs being flushed. See set_tlb_ubc_flush_pending().
+ * The arch code makes the following promise: generic code can modify a
+ * PTE, then call arch_tlbbatch_add_mm() (which internally provides all
+ * needed barriers), then call arch_tlbbatch_flush(), and the entries
+ * will be flushed on all CPUs by the time that arch_tlbbatch_flush()
+ * returns.
*/
- struct cpumask cpumask;
+ struct arch_tlbflush_unmap_batch arch;
- /* True if any bit in cpumask is set */
+ /* True if a flush is needed. */
bool flush_required;
/*
diff --git a/mm/rmap.c b/mm/rmap.c
index f6838015810f..f29d725528d2 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -579,25 +579,13 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
void try_to_unmap_flush(void)
{
struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
- int cpu;
if (!tlb_ubc->flush_required)
return;
- cpu = get_cpu();
-
- if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) {
- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
- local_flush_tlb();
- trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
- }
-
- if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids)
- flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL);
- cpumask_clear(&tlb_ubc->cpumask);
+ arch_tlbbatch_flush(&tlb_ubc->arch);
tlb_ubc->flush_required = false;
tlb_ubc->writable = false;
- put_cpu();
}
/* Flush iff there are potentially writable TLB entries that can race with IO */
@@ -613,7 +601,7 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
{
struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
- cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
+ arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
tlb_ubc->flush_required = true;
/*
--
2.9.3
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2017-05-22 22:30 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-05-22 22:30 [PATCH v2 00/11] x86 TLB flush cleanups, moving toward PCID support Andy Lutomirski
2017-05-22 22:30 ` [PATCH v2 01/11] x86/mm: Reimplement flush_tlb_page() using flush_tlb_mm_range() Andy Lutomirski
2017-05-22 22:30 ` [PATCH v2 02/11] x86/mm: Reduce indentation in flush_tlb_func() Andy Lutomirski
2017-05-22 22:30 ` Andy Lutomirski [this message]
2017-05-22 22:30 ` [PATCH v2 04/11] x86/mm: Pass flush_tlb_info to flush_tlb_others() etc Andy Lutomirski
2017-05-24 8:18 ` Ingo Molnar
2017-05-22 22:30 ` [PATCH v2 05/11] x86/mm: Change the leave_mm() condition for local TLB flushes Andy Lutomirski
2017-05-22 22:30 ` [PATCH v2 06/11] x86/mm: Refactor flush_tlb_mm_range() to merge local and remote cases Andy Lutomirski
2017-05-22 22:30 ` [PATCH v2 07/11] x86/mm: Use new merged flush logic in arch_tlbbatch_flush() Andy Lutomirski
2017-05-22 22:30 ` [PATCH v2 08/11] x86/mm: Remove the UP tlbflush code; always use the formerly SMP code Andy Lutomirski
2017-05-22 22:30 ` [PATCH v2 09/11] x86/mm: Rework lazy TLB to track the actual loaded mm Andy Lutomirski
2017-05-22 22:30 ` [PATCH v2 10/11] x86/mm: Be more consistent wrt PAGE_SHIFT vs PAGE_SIZE in tlb flush code Andy Lutomirski
2017-05-22 23:24 ` Nadav Amit
2017-05-22 23:41 ` Andy Lutomirski
2017-05-22 22:30 ` [PATCH v2 11/11] x86,kvm: Teach KVM's VMX code that CR3 isn't a constant Andy Lutomirski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=19f25a8581f9fb77876b7ff3b001f89835e34ea3.1495492063.git.luto@kernel.org \
--to=luto@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=bpetkov@suse.de \
--cc=dave.hansen@intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@suse.de \
--cc=mhocko@suse.com \
--cc=nadav.amit@gmail.com \
--cc=namit@vmware.com \
--cc=riel@redhat.com \
--cc=sasha.levin@oracle.com \
--cc=torvalds@linux-foundation.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).