From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 2253722A1D5 for ; Thu, 18 Dec 2025 07:05:45 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766041549; cv=none; b=h2hUa16+URnmEVqEJ3rxPPZuB/EOEo7XO7x+z/CIudXHf5AGJa5C6B0yBxYFy6jFDCPmW1knAsrJ3gDGr4LhFa7tFUUEETG0qUBvVgyeKeSRhhaBRV65xv5iWjWg7xo8lDC9kOsRkHJxM0O4CAIwxG4Fhrh/Rq1u+jlLbAkwdGE= ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766041549; c=relaxed/simple; bh=0VI/Q0RybeP0UaHL6HNvEaxC1dq1imWIXSNPsPiIdw0=; h=Date:From:To:Cc:Subject:Message-ID:References:MIME-Version: Content-Type:Content-Disposition:In-Reply-To; b=YlMzcCDiLYNvv4tvV0cdIOLIFl0sQ3ewKlp9/jupDEsbx43B/LV/Gbm9flXIOQJR9TRxYiGKHwFkJiws5M3dHSGVjq+PnLlzMX3MmvnsNGrqRNy08ZBJWxKcbFN/rO0wgM3CMqSjEODqhUoFFgvkOkAipEvtMbjKypOf4ZNKos4= ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 31A8AFEC; Wed, 17 Dec 2025 23:05:38 -0800 (PST) Received: from localhost (a079125.arm.com [10.164.21.37]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 7933C3F762; Wed, 17 Dec 2025 23:05:44 -0800 (PST) Date: Thu, 18 Dec 2025 12:35:41 +0530 From: Linu Cherian To: Ryan Roberts Cc: Will Deacon , Ard Biesheuvel , Catalin Marinas , Mark Rutland , Linus Torvalds , Oliver Upton , Marc Zyngier , Dev Jain , linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org Subject: Re: [PATCH v1 03/13] arm64: mm: Implicitly invalidate user ASID based on TLBI operation Message-ID: References: <20251216144601.2106412-1-ryan.roberts@arm.com> <20251216144601.2106412-4-ryan.roberts@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: On Thu, Dec 18, 2025 at 12:00:57PM +0530, Linu Cherian wrote: > Ryan, > > On Tue, Dec 16, 2025 at 02:45:48PM +0000, Ryan Roberts wrote: > > When kpti is enabled, separate ASIDs are used for userspace and > > kernelspace, requiring ASID-qualified TLB invalidation by virtual > > address to invalidate both of them. > > > > Push the logic for invalidating the two ASIDs down into the low-level > > tlbi-op-specific functions and remove the burden from the caller to > > handle the kpti-specific behaviour. > > > > Co-developed-by: Will Deacon > > Signed-off-by: Will Deacon > > Signed-off-by: Ryan Roberts > > --- > > arch/arm64/include/asm/tlbflush.h | 27 ++++++++++----------------- > > 1 file changed, 10 insertions(+), 17 deletions(-) > > > > diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h > > index c5111d2afc66..31f43d953ce2 100644 > > --- a/arch/arm64/include/asm/tlbflush.h > > +++ b/arch/arm64/include/asm/tlbflush.h > > @@ -110,6 +110,7 @@ typedef void (*tlbi_op)(u64 arg); > > static __always_inline void vae1is(u64 arg) > > { > > __tlbi(vae1is, arg); > > + __tlbi_user(vae1is, arg); > > } > > > > static __always_inline void vae2is(u64 arg) > > @@ -126,6 +127,7 @@ static __always_inline void vale1(u64 arg) > > static __always_inline void vale1is(u64 arg) > > { > > __tlbi(vale1is, arg); > > + __tlbi_user(vale1is, arg); > > } > > > > static __always_inline void vale2is(u64 arg) > > @@ -162,11 +164,6 @@ static __always_inline void __tlbi_level(tlbi_op op, u64 addr, u32 level) > > op(arg); > > } > > > > -#define __tlbi_user_level(op, arg, level) do { \ > > - if (arm64_kernel_unmapped_at_el0()) \ > > - __tlbi_level(op, (arg | USER_ASID_FLAG), level); \ > > -} while (0) > > - > > /* > > * This macro creates a properly formatted VA operand for the TLB RANGE. The > > * value bit assignments are: > > @@ -435,8 +432,6 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) > > * @stride: Flush granularity > > * @asid: The ASID of the task (0 for IPA instructions) > > * @tlb_level: Translation Table level hint, if known > > - * @tlbi_user: If 'true', call an additional __tlbi_user() > > - * (typically for user ASIDs). 'flase' for IPA instructions > > * @lpa2: If 'true', the lpa2 scheme is used as set out below > > * > > * When the CPU does not support TLB range operations, flush the TLB > > @@ -462,6 +457,7 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) > > static __always_inline void rvae1is(u64 arg) > > { > > __tlbi(rvae1is, arg); > > + __tlbi_user(rvae1is, arg); > > } > > > > static __always_inline void rvale1(u64 arg) > > @@ -473,6 +469,7 @@ static __always_inline void rvale1(u64 arg) > > static __always_inline void rvale1is(u64 arg) > > { > > __tlbi(rvale1is, arg); > > + __tlbi_user(rvale1is, arg); > > } > > > > static __always_inline void rvaale1is(u64 arg) > > @@ -491,7 +488,7 @@ static __always_inline void __tlbi_range(tlbi_op op, u64 arg) > > } > > > > #define __flush_tlb_range_op(op, start, pages, stride, \ > > - asid, tlb_level, tlbi_user, lpa2) \ > > + asid, tlb_level, lpa2) \ > > do { \ > > typeof(start) __flush_start = start; \ > > typeof(pages) __flush_pages = pages; \ > > @@ -506,8 +503,6 @@ do { \ > > (lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) { \ > > addr = __TLBI_VADDR(__flush_start, asid); \ > > __tlbi_level(op, addr, tlb_level); \ > > - if (tlbi_user) \ > > - __tlbi_user_level(op, addr, tlb_level); \ > > __flush_start += stride; \ > > __flush_pages -= stride >> PAGE_SHIFT; \ > > continue; \ > > @@ -518,8 +513,6 @@ do { \ > > addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \ > > scale, num, tlb_level); \ > > __tlbi_range(r##op, addr); \ > > - if (tlbi_user) \ > > - __tlbi_user(r##op, addr); \ > > __flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \ > > __flush_pages -= __TLBI_RANGE_PAGES(num, scale);\ > > > There are more __tlbi_user invocations in __flush_tlb_mm, __local_flush_tlb_page_nonotify_nosync > and __flush_tlb_page_nosync in this file. Should we not address them as well as > part of this ? > I see that except __flush_tlb_mm, the others got addressed in subsequent patches. Should we hint this in the commit message ?