From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 66DB770809 for ; Mon, 5 Jan 2026 05:34:28 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1767591270; cv=none; b=idvfXCYKyGT7zPGnOVuqnRFExi31FYh3s5M+vsnZXxfkEZbhKsQrTi0bOTkVIwYvrGis+SBB56U8nNAANSH3FBt0nHqLRJYPxMJb93+BFFLldAhj1/11g0piCqJwEq01VBFm44EIuRA253bSTMXVg/VEauIh7dfTrhOVFbgDkog= ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1767591270; c=relaxed/simple; bh=wQ6DJNmY+0TDwxWd4xGJdZlZh1YNc7yl8A4/NMNB8eQ=; h=Date:From:To:Cc:Subject:Message-ID:References:MIME-Version: Content-Type:Content-Disposition:In-Reply-To; b=DdExFtwJ7aqmX/ddlhzxhUgxQdHctOB/EhSUNGFTAkFeWvkfItOavz/Hwew/2oPeuID6HW7QyJjm40xFOeQllJh0AYB119nZMIAmHX/gWIg+RMQS/EONl3n/NRkdwTelMiNH0QS3wC87gCWGpRtz0FZ5oTXfzbkZtSzA8SDrHgM= ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 49FA9497; Sun, 4 Jan 2026 21:34:20 -0800 (PST) Received: from localhost (a079125.arm.com [10.164.21.37]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 504D73F694; Sun, 4 Jan 2026 21:34:26 -0800 (PST) Date: Mon, 5 Jan 2026 11:04:22 +0530 From: Linu Cherian To: Ryan Roberts Cc: Will Deacon , Ard Biesheuvel , Catalin Marinas , Mark Rutland , Linus Torvalds , Oliver Upton , Marc Zyngier , Dev Jain , linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org Subject: Re: [PATCH v1 03/13] arm64: mm: Implicitly invalidate user ASID based on TLBI operation Message-ID: References: <20251216144601.2106412-1-ryan.roberts@arm.com> <20251216144601.2106412-4-ryan.roberts@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <20251216144601.2106412-4-ryan.roberts@arm.com> Hi Ryan, On Tue, Dec 16, 2025 at 02:45:48PM +0000, Ryan Roberts wrote: > When kpti is enabled, separate ASIDs are used for userspace and > kernelspace, requiring ASID-qualified TLB invalidation by virtual > address to invalidate both of them. > > Push the logic for invalidating the two ASIDs down into the low-level > tlbi-op-specific functions and remove the burden from the caller to > handle the kpti-specific behaviour. > > Co-developed-by: Will Deacon > Signed-off-by: Will Deacon > Signed-off-by: Ryan Roberts > --- > arch/arm64/include/asm/tlbflush.h | 27 ++++++++++----------------- > 1 file changed, 10 insertions(+), 17 deletions(-) > > diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h > index c5111d2afc66..31f43d953ce2 100644 > --- a/arch/arm64/include/asm/tlbflush.h > +++ b/arch/arm64/include/asm/tlbflush.h > @@ -110,6 +110,7 @@ typedef void (*tlbi_op)(u64 arg); > static __always_inline void vae1is(u64 arg) > { > __tlbi(vae1is, arg); > + __tlbi_user(vae1is, arg); > } > > static __always_inline void vae2is(u64 arg) > @@ -126,6 +127,7 @@ static __always_inline void vale1(u64 arg) > static __always_inline void vale1is(u64 arg) > { > __tlbi(vale1is, arg); > + __tlbi_user(vale1is, arg); > } > > static __always_inline void vale2is(u64 arg) > @@ -162,11 +164,6 @@ static __always_inline void __tlbi_level(tlbi_op op, u64 addr, u32 level) > op(arg); > } > > -#define __tlbi_user_level(op, arg, level) do { \ > - if (arm64_kernel_unmapped_at_el0()) \ > - __tlbi_level(op, (arg | USER_ASID_FLAG), level); \ > -} while (0) > - > /* > * This macro creates a properly formatted VA operand for the TLB RANGE. The > * value bit assignments are: > @@ -435,8 +432,6 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) > * @stride: Flush granularity > * @asid: The ASID of the task (0 for IPA instructions) > * @tlb_level: Translation Table level hint, if known > - * @tlbi_user: If 'true', call an additional __tlbi_user() > - * (typically for user ASIDs). 'flase' for IPA instructions > * @lpa2: If 'true', the lpa2 scheme is used as set out below > * > * When the CPU does not support TLB range operations, flush the TLB > @@ -462,6 +457,7 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) > static __always_inline void rvae1is(u64 arg) > { > __tlbi(rvae1is, arg); > + __tlbi_user(rvae1is, arg); > } > > static __always_inline void rvale1(u64 arg) > @@ -473,6 +469,7 @@ static __always_inline void rvale1(u64 arg) > static __always_inline void rvale1is(u64 arg) > { > __tlbi(rvale1is, arg); > + __tlbi_user(rvale1is, arg); > } > > static __always_inline void rvaale1is(u64 arg) > @@ -491,7 +488,7 @@ static __always_inline void __tlbi_range(tlbi_op op, u64 arg) > } > > #define __flush_tlb_range_op(op, start, pages, stride, \ > - asid, tlb_level, tlbi_user, lpa2) \ > + asid, tlb_level, lpa2) \ > do { \ > typeof(start) __flush_start = start; \ > typeof(pages) __flush_pages = pages; \ > @@ -506,8 +503,6 @@ do { \ > (lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) { \ > addr = __TLBI_VADDR(__flush_start, asid); \ > __tlbi_level(op, addr, tlb_level); \ > - if (tlbi_user) \ > - __tlbi_user_level(op, addr, tlb_level); \ > __flush_start += stride; \ > __flush_pages -= stride >> PAGE_SHIFT; \ > continue; \ > @@ -518,8 +513,6 @@ do { \ > addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \ > scale, num, tlb_level); \ > __tlbi_range(r##op, addr); \ > - if (tlbi_user) \ > - __tlbi_user(r##op, addr); \ > __flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \ > __flush_pages -= __TLBI_RANGE_PAGES(num, scale);\ > } \ > @@ -528,7 +521,7 @@ do { \ > } while (0) > > #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \ > - __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled()); > + __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, kvm_lpa2_is_enabled()); > > static inline bool __flush_tlb_range_limit_excess(unsigned long start, > unsigned long end, unsigned long pages, unsigned long stride) > @@ -568,10 +561,10 @@ static inline void __flush_tlb_range_nosync(struct mm_struct *mm, > > if (last_level) > __flush_tlb_range_op(vale1is, start, pages, stride, asid, > - tlb_level, true, lpa2_is_enabled()); > + tlb_level, lpa2_is_enabled()); > else > __flush_tlb_range_op(vae1is, start, pages, stride, asid, > - tlb_level, true, lpa2_is_enabled()); > + tlb_level, lpa2_is_enabled()); > > mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); > } > @@ -630,7 +623,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end > > dsb(ishst); > __flush_tlb_range_op(vaale1is, start, pages, stride, 0, > - TLBI_TTL_UNKNOWN, false, lpa2_is_enabled()); > + TLBI_TTL_UNKNOWN, lpa2_is_enabled()); > dsb(ish); > isb(); __flush_tlb_range_op call in local_flush_tlb_contpte is missed out. -- Linu Cherian