--- b/arch/x86/hyperv/mmu.c | 4 ++-- b/arch/x86/include/asm/tlb.h | 11 ++++++++++- b/arch/x86/include/asm/tlbflush.h | 4 ++-- b/arch/x86/mm/tlb.c | 29 +++++++++++++---------------- 4 files changed, 27 insertions(+), 21 deletions(-) diff -puN arch/x86/mm/tlb.c~flush_tlb_mm_range-lazy arch/x86/mm/tlb.c --- a/arch/x86/mm/tlb.c~flush_tlb_mm_range-lazy 2026-04-23 10:37:49.745839224 -0700 +++ b/arch/x86/mm/tlb.c 2026-04-23 10:45:25.670880226 -0700 @@ -1339,16 +1339,12 @@ STATIC_NOPV void native_flush_tlb_multi( (info->end - info->start) >> PAGE_SHIFT); /* - * If no page tables were freed, we can skip sending IPIs to - * CPUs in lazy TLB mode. They will flush the CPU themselves - * at the next context switch. - * - * However, if page tables are getting freed, we need to send the - * IPI everywhere, to prevent CPUs in lazy TLB mode from tripping - * up on the new contents of what used to be page tables, while - * doing a speculative memory access. + * Simple TLB flushes can avoid sending IPIs to CPUs in lazy + * TLB mode. But some operations like freeing page tables + * could leave dangerous state in paging structure caches. + * Send IPIs even to lazy CPUs when necessary. */ - if (info->freed_tables || mm_in_asid_transition(info->mm)) + if (info->wake_lazy_cpus || mm_in_asid_transition(info->mm)) on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true); else on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func, @@ -1381,7 +1377,7 @@ static DEFINE_PER_CPU(unsigned int, flus static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm, unsigned long start, unsigned long end, - unsigned int stride_shift, bool freed_tables, + unsigned int stride_shift, bool wake_lazy_cpus, u64 new_tlb_gen) { struct flush_tlb_info *info = this_cpu_ptr(&flush_tlb_info); @@ -1408,7 +1404,7 @@ static struct flush_tlb_info *get_flush_ info->end = end; info->mm = mm; info->stride_shift = stride_shift; - info->freed_tables = freed_tables; + info->wake_lazy_cpus = wake_lazy_cpus; info->new_tlb_gen = new_tlb_gen; info->initiating_cpu = smp_processor_id(); info->trim_cpumask = 0; @@ -1427,7 +1423,7 @@ static void put_flush_tlb_info(void) void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned int stride_shift, - bool freed_tables) + bool wake_lazy_cpus) { struct flush_tlb_info *info; int cpu = get_cpu(); @@ -1436,7 +1432,7 @@ void flush_tlb_mm_range(struct mm_struct /* This is also a barrier that synchronizes with switch_mm(). */ new_tlb_gen = inc_mm_tlb_gen(mm); - info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables, + info = get_flush_tlb_info(mm, start, end, stride_shift, wake_lazy_cpus, new_tlb_gen); /* @@ -1528,10 +1524,11 @@ static void kernel_tlb_flush_range(struc void flush_tlb_kernel_range(unsigned long start, unsigned long end) { struct flush_tlb_info *info; + bool wake_lazy_cpus = false; guard(preempt)(); - info = get_flush_tlb_info(NULL, start, end, PAGE_SHIFT, false, + info = get_flush_tlb_info(NULL, start, end, PAGE_SHIFT, wake_lazy_cpus, TLB_GENERATION_INVALID); if (info->end == TLB_FLUSH_ALL) @@ -1708,10 +1705,10 @@ EXPORT_SYMBOL_FOR_KVM(__flush_tlb_all); void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) { struct flush_tlb_info *info; - + bool wake_lazy_cpus = false; int cpu = get_cpu(); - info = get_flush_tlb_info(NULL, 0, TLB_FLUSH_ALL, 0, false, + info = get_flush_tlb_info(NULL, 0, TLB_FLUSH_ALL, 0, wake_lazy_cpus, TLB_GENERATION_INVALID); /* * flush_tlb_multi() is not optimized for the common case in which only diff -puN arch/x86/include/asm/tlbflush.h~flush_tlb_mm_range-lazy arch/x86/include/asm/tlbflush.h --- a/arch/x86/include/asm/tlbflush.h~flush_tlb_mm_range-lazy 2026-04-23 10:38:02.088295820 -0700 +++ b/arch/x86/include/asm/tlbflush.h 2026-04-23 10:39:40.979965863 -0700 @@ -247,7 +247,7 @@ struct flush_tlb_info { u64 new_tlb_gen; unsigned int initiating_cpu; u8 stride_shift; - u8 freed_tables; + u8 wake_lazy_cpus; u8 trim_cpumask; }; @@ -337,7 +337,7 @@ static inline bool mm_in_asid_transition extern void flush_tlb_all(void); extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned int stride_shift, - bool freed_tables); + bool wake_lazy_cpus); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) diff -puN arch/x86/include/asm/tlb.h~flush_tlb_mm_range-lazy arch/x86/include/asm/tlb.h --- a/arch/x86/include/asm/tlb.h~flush_tlb_mm_range-lazy 2026-04-23 10:47:01.221483878 -0700 +++ b/arch/x86/include/asm/tlb.h 2026-04-23 10:49:26.746985616 -0700 @@ -14,13 +14,22 @@ static inline void tlb_flush(struct mmu_ { unsigned long start = 0UL, end = TLB_FLUSH_ALL; unsigned int stride_shift = tlb_get_unmap_shift(tlb); + bool wake_lazy_cpus; if (!tlb->fullmm && !tlb->need_flush_all) { start = tlb->start; end = tlb->end; } - flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables); + /* + * Ensure all paging structure caches on all CPUs are flushed + * when freeing page tables. Otherwise, a lazy CPU might wake + * up and start walking previously-freed page tables and + * caching garbage. + */ + wake_lazy_cpus = tlb->freed_tables; + + flush_tlb_mm_range(tlb->mm, start, end, stride_shift, wake_lazy_cpus); } static inline void invlpg(unsigned long addr) diff -puN arch/x86/hyperv/mmu.c~flush_tlb_mm_range-lazy arch/x86/hyperv/mmu.c --- a/arch/x86/hyperv/mmu.c~flush_tlb_mm_range-lazy 2026-04-23 10:53:05.251268911 -0700 +++ b/arch/x86/hyperv/mmu.c 2026-04-23 10:53:28.622156121 -0700 @@ -63,7 +63,7 @@ static void hyperv_flush_tlb_multi(const struct hv_tlb_flush *flush; u64 status; unsigned long flags; - bool do_lazy = !info->freed_tables; + bool do_lazy = !info->wake_lazy_cpus; trace_hyperv_mmu_flush_tlb_multi(cpus, info); @@ -198,7 +198,7 @@ static u64 hyperv_flush_tlb_others_ex(co flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K; nr_bank = cpumask_to_vpset_skip(&flush->hv_vp_set, cpus, - info->freed_tables ? NULL : cpu_is_lazy); + info->wake_lazy_cpus ? NULL : cpu_is_lazy); if (nr_bank < 0) return HV_STATUS_INVALID_PARAMETER; _