From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 7F93F320A06 for ; Mon, 5 Jan 2026 17:13:56 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1767633238; cv=none; b=CE2JjhwndezvlsPUY5Ecf00ngueTyaSC0ygCwNVUcKvVyRkVR6+g5Go1K9GQw0fhC43R0oe0j1Un4ldNlm3BBqEqitbA7vCzsqtdatDaMcRda9ERdt1btzmoMW5vbJatF2VkIB0nZ06ErXTJXkjpU3HEvAjLdu6UA3Aezcfd2S0= ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1767633238; c=relaxed/simple; bh=TRFifidSZaf4R30oU6VqHHvSncKk2W7WKOSDFvuR2Vg=; h=Message-ID:Date:MIME-Version:Subject:To:Cc:References:From: In-Reply-To:Content-Type; b=Kkz1qE1hvm/Bl4vORcB4m8NLSOPZB3gIZU7fKHAg4q+4RfOBoI4tn51YZUA3A63p7iP/o3DCe6DydzOy44TJNNbSEADY0I8Cb1lS8sqWaYuMHOmVkerYnJjy8kVcY8Bc87DNQ//Ac7gLxIh8tpZqzdrbCSfeec4yKHE3llRk0F0= ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id E1D5D339; Mon, 5 Jan 2026 09:13:48 -0800 (PST) Received: from [10.1.38.150] (XHFQ2J9959.cambridge.arm.com [10.1.38.150]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 7628A3F5A1; Mon, 5 Jan 2026 09:13:54 -0800 (PST) Message-ID: Date: Mon, 5 Jan 2026 17:13:53 +0000 Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 User-Agent: Mozilla Thunderbird Subject: Re: [PATCH v1 03/13] arm64: mm: Implicitly invalidate user ASID based on TLBI operation Content-Language: en-GB To: Linu Cherian Cc: Will Deacon , Ard Biesheuvel , Catalin Marinas , Mark Rutland , Linus Torvalds , Oliver Upton , Marc Zyngier , Dev Jain , linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org References: <20251216144601.2106412-1-ryan.roberts@arm.com> <20251216144601.2106412-4-ryan.roberts@arm.com> From: Ryan Roberts In-Reply-To: Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 7bit On 05/01/2026 05:34, Linu Cherian wrote: > Hi Ryan, > > On Tue, Dec 16, 2025 at 02:45:48PM +0000, Ryan Roberts wrote: >> When kpti is enabled, separate ASIDs are used for userspace and >> kernelspace, requiring ASID-qualified TLB invalidation by virtual >> address to invalidate both of them. >> >> Push the logic for invalidating the two ASIDs down into the low-level >> tlbi-op-specific functions and remove the burden from the caller to >> handle the kpti-specific behaviour. >> >> Co-developed-by: Will Deacon >> Signed-off-by: Will Deacon >> Signed-off-by: Ryan Roberts >> --- >> arch/arm64/include/asm/tlbflush.h | 27 ++++++++++----------------- >> 1 file changed, 10 insertions(+), 17 deletions(-) >> >> diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h >> index c5111d2afc66..31f43d953ce2 100644 >> --- a/arch/arm64/include/asm/tlbflush.h >> +++ b/arch/arm64/include/asm/tlbflush.h >> @@ -110,6 +110,7 @@ typedef void (*tlbi_op)(u64 arg); >> static __always_inline void vae1is(u64 arg) >> { >> __tlbi(vae1is, arg); >> + __tlbi_user(vae1is, arg); >> } >> >> static __always_inline void vae2is(u64 arg) >> @@ -126,6 +127,7 @@ static __always_inline void vale1(u64 arg) >> static __always_inline void vale1is(u64 arg) >> { >> __tlbi(vale1is, arg); >> + __tlbi_user(vale1is, arg); >> } >> >> static __always_inline void vale2is(u64 arg) >> @@ -162,11 +164,6 @@ static __always_inline void __tlbi_level(tlbi_op op, u64 addr, u32 level) >> op(arg); >> } >> >> -#define __tlbi_user_level(op, arg, level) do { \ >> - if (arm64_kernel_unmapped_at_el0()) \ >> - __tlbi_level(op, (arg | USER_ASID_FLAG), level); \ >> -} while (0) >> - >> /* >> * This macro creates a properly formatted VA operand for the TLB RANGE. The >> * value bit assignments are: >> @@ -435,8 +432,6 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) >> * @stride: Flush granularity >> * @asid: The ASID of the task (0 for IPA instructions) >> * @tlb_level: Translation Table level hint, if known >> - * @tlbi_user: If 'true', call an additional __tlbi_user() >> - * (typically for user ASIDs). 'flase' for IPA instructions >> * @lpa2: If 'true', the lpa2 scheme is used as set out below >> * >> * When the CPU does not support TLB range operations, flush the TLB >> @@ -462,6 +457,7 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) >> static __always_inline void rvae1is(u64 arg) >> { >> __tlbi(rvae1is, arg); >> + __tlbi_user(rvae1is, arg); >> } >> >> static __always_inline void rvale1(u64 arg) >> @@ -473,6 +469,7 @@ static __always_inline void rvale1(u64 arg) >> static __always_inline void rvale1is(u64 arg) >> { >> __tlbi(rvale1is, arg); >> + __tlbi_user(rvale1is, arg); >> } >> >> static __always_inline void rvaale1is(u64 arg) >> @@ -491,7 +488,7 @@ static __always_inline void __tlbi_range(tlbi_op op, u64 arg) >> } >> >> #define __flush_tlb_range_op(op, start, pages, stride, \ >> - asid, tlb_level, tlbi_user, lpa2) \ >> + asid, tlb_level, lpa2) \ >> do { \ >> typeof(start) __flush_start = start; \ >> typeof(pages) __flush_pages = pages; \ >> @@ -506,8 +503,6 @@ do { \ >> (lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) { \ >> addr = __TLBI_VADDR(__flush_start, asid); \ >> __tlbi_level(op, addr, tlb_level); \ >> - if (tlbi_user) \ >> - __tlbi_user_level(op, addr, tlb_level); \ >> __flush_start += stride; \ >> __flush_pages -= stride >> PAGE_SHIFT; \ >> continue; \ >> @@ -518,8 +513,6 @@ do { \ >> addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \ >> scale, num, tlb_level); \ >> __tlbi_range(r##op, addr); \ >> - if (tlbi_user) \ >> - __tlbi_user(r##op, addr); \ >> __flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \ >> __flush_pages -= __TLBI_RANGE_PAGES(num, scale);\ >> } \ >> @@ -528,7 +521,7 @@ do { \ >> } while (0) >> >> #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \ >> - __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled()); >> + __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, kvm_lpa2_is_enabled()); >> >> static inline bool __flush_tlb_range_limit_excess(unsigned long start, >> unsigned long end, unsigned long pages, unsigned long stride) >> @@ -568,10 +561,10 @@ static inline void __flush_tlb_range_nosync(struct mm_struct *mm, >> >> if (last_level) >> __flush_tlb_range_op(vale1is, start, pages, stride, asid, >> - tlb_level, true, lpa2_is_enabled()); >> + tlb_level, lpa2_is_enabled()); >> else >> __flush_tlb_range_op(vae1is, start, pages, stride, asid, >> - tlb_level, true, lpa2_is_enabled()); >> + tlb_level, lpa2_is_enabled()); >> >> mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); >> } >> @@ -630,7 +623,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end >> >> dsb(ishst); >> __flush_tlb_range_op(vaale1is, start, pages, stride, 0, >> - TLBI_TTL_UNKNOWN, false, lpa2_is_enabled()); >> + TLBI_TTL_UNKNOWN, lpa2_is_enabled()); >> dsb(ish); >> isb(); > > > > __flush_tlb_range_op call in local_flush_tlb_contpte is missed out. Thanks; this is part of my botched rebase. I'll fix it in the next rev. > > > -- > Linu Cherian