From: Mike Rapoport <rppt@kernel.org>
To: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: linux-kernel@vger.kernel.org, hch@infradead.org,
linux-mm@kvack.org, Paul Mackerras <paulus@samba.org>,
akpm@linux-foundation.org, linuxppc-dev@lists.ozlabs.org
Subject: Re: [RFC V1 04/31] powerpc/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
Date: Fri, 4 Feb 2022 06:44:38 +0200 [thread overview]
Message-ID: <YfyvNphICBzoVfNs@kernel.org> (raw)
In-Reply-To: <46e15116-78fb-e6fe-e0f0-fe776f9348c3@arm.com>
On Fri, Feb 04, 2022 at 08:27:37AM +0530, Anshuman Khandual wrote:
>
> On 2/3/22 11:45 PM, Mike Rapoport wrote:
> > On Mon, Jan 24, 2022 at 06:26:41PM +0530, Anshuman Khandual wrote:
> >> This defines and exports a platform specific custom vm_get_page_prot() via
> >> subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
> >> macros can be dropped which are no longer needed. While here, this also
> >> localizes arch_vm_get_page_prot() as powerpc_vm_get_page_prot().
> >>
> >> Cc: Michael Ellerman <mpe@ellerman.id.au>
> >> Cc: Paul Mackerras <paulus@samba.org>
> >> Cc: linuxppc-dev@lists.ozlabs.org
> >> Cc: linux-kernel@vger.kernel.org
> >> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
> >> ---
> >> arch/powerpc/Kconfig | 1 +
> >> arch/powerpc/include/asm/mman.h | 3 +-
> >> arch/powerpc/include/asm/pgtable.h | 19 ------------
> >> arch/powerpc/mm/mmap.c | 47 ++++++++++++++++++++++++++++++
> >> 4 files changed, 49 insertions(+), 21 deletions(-)
> >>
> >> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> >> index b779603978e1..ddb4a3687c05 100644
> >> --- a/arch/powerpc/Kconfig
> >> +++ b/arch/powerpc/Kconfig
> >> @@ -135,6 +135,7 @@ config PPC
> >> select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
> >> select ARCH_HAS_UACCESS_FLUSHCACHE
> >> select ARCH_HAS_UBSAN_SANITIZE_ALL
> >> + select ARCH_HAS_VM_GET_PAGE_PROT
> >> select ARCH_HAVE_NMI_SAFE_CMPXCHG
> >> select ARCH_KEEP_MEMBLOCK
> >> select ARCH_MIGHT_HAVE_PC_PARPORT
> >> diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
> >> index 7cb6d18f5cd6..7b10c2031e82 100644
> >> --- a/arch/powerpc/include/asm/mman.h
> >> +++ b/arch/powerpc/include/asm/mman.h
> >> @@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
> >> }
> >> #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
> >>
> >> -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
> >> +static inline pgprot_t powerpc_vm_get_page_prot(unsigned long vm_flags)
> >> {
> >> #ifdef CONFIG_PPC_MEM_KEYS
> >> return (vm_flags & VM_SAO) ?
> >> @@ -34,7 +34,6 @@ static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
> >> return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
> >> #endif
> >> }
> >> -#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
> >>
> >> static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
> >> {
> >> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
> >> index d564d0ecd4cd..3cbb6de20f9d 100644
> >> --- a/arch/powerpc/include/asm/pgtable.h
> >> +++ b/arch/powerpc/include/asm/pgtable.h
> >> @@ -20,25 +20,6 @@ struct mm_struct;
> >> #include <asm/nohash/pgtable.h>
> >> #endif /* !CONFIG_PPC_BOOK3S */
> >>
> >> -/* Note due to the way vm flags are laid out, the bits are XWR */
> >> -#define __P000 PAGE_NONE
> >> -#define __P001 PAGE_READONLY
> >> -#define __P010 PAGE_COPY
> >> -#define __P011 PAGE_COPY
> >> -#define __P100 PAGE_READONLY_X
> >> -#define __P101 PAGE_READONLY_X
> >> -#define __P110 PAGE_COPY_X
> >> -#define __P111 PAGE_COPY_X
> >> -
> >> -#define __S000 PAGE_NONE
> >> -#define __S001 PAGE_READONLY
> >> -#define __S010 PAGE_SHARED
> >> -#define __S011 PAGE_SHARED
> >> -#define __S100 PAGE_READONLY_X
> >> -#define __S101 PAGE_READONLY_X
> >> -#define __S110 PAGE_SHARED_X
> >> -#define __S111 PAGE_SHARED_X
> >> -
> >> #ifndef __ASSEMBLY__
> >>
> >> #ifndef MAX_PTRS_PER_PGD
> >> diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
> >> index c475cf810aa8..7f05e7903bd2 100644
> >> --- a/arch/powerpc/mm/mmap.c
> >> +++ b/arch/powerpc/mm/mmap.c
> >> @@ -254,3 +254,50 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
> >> mm->get_unmapped_area = arch_get_unmapped_area_topdown;
> >> }
> >> }
> >> +
> >> +static inline pgprot_t __vm_get_page_prot(unsigned long vm_flags)
> >> +{
> >> + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
> >> + case VM_NONE:
> >> + return PAGE_NONE;
> >> + case VM_READ:
> >> + return PAGE_READONLY;
> >> + case VM_WRITE:
> >> + return PAGE_COPY;
> >> + case VM_READ | VM_WRITE:
> >> + return PAGE_COPY;
> >> + case VM_EXEC:
> >> + return PAGE_READONLY_X;
> >> + case VM_EXEC | VM_READ:
> >> + return PAGE_READONLY_X;
> >> + case VM_EXEC | VM_WRITE:
> >> + return PAGE_COPY_X;
> >> + case VM_EXEC | VM_READ | VM_WRITE:
> >> + return PAGE_COPY_X;
> >> + case VM_SHARED:
> >> + return PAGE_NONE;
> >> + case VM_SHARED | VM_READ:
> >> + return PAGE_READONLY;
> >> + case VM_SHARED | VM_WRITE:
> >> + return PAGE_SHARED;
> >> + case VM_SHARED | VM_READ | VM_WRITE:
> >> + return PAGE_SHARED;
> >> + case VM_SHARED | VM_EXEC:
> >> + return PAGE_READONLY_X;
> >> + case VM_SHARED | VM_EXEC | VM_READ:
> >> + return PAGE_READONLY_X;
> >> + case VM_SHARED | VM_EXEC | VM_WRITE:
> >> + return PAGE_SHARED_X;
> >> + case VM_SHARED | VM_EXEC | VM_READ | VM_WRITE:
> >> + return PAGE_SHARED_X;
> >> + default:
> >> + BUILD_BUG();
> >> + }
> >> +}
> >> +
> >> +pgprot_t vm_get_page_prot(unsigned long vm_flags)
> >> +{
> >> + return __pgprot(pgprot_val(__vm_get_page_prot(vm_flags)) |
> >> + pgprot_val(powerpc_vm_get_page_prot(vm_flags)));
> > Any reason to keep powerpc_vm_get_page_prot() rather than open code it
> > here?
> >
> > This applies to other architectures that implement arch_vm_get_page_prot()
> > and/or arch_filter_pgprot() as well.
>
> Just to minimize the code churn ! But I will be happy to open code them
> here (and in other platforms) if that will be preferred.
I think this will be clearer because all the processing will be at one place.
Besides, this way include/asm/pgtable.h becomes shorter and less crowded.
--
Sincerely yours,
Mike.
WARNING: multiple messages have this Message-ID (diff)
From: Mike Rapoport <rppt@kernel.org>
To: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
hch@infradead.org, akpm@linux-foundation.org,
Michael Ellerman <mpe@ellerman.id.au>,
Paul Mackerras <paulus@samba.org>,
linuxppc-dev@lists.ozlabs.org
Subject: Re: [RFC V1 04/31] powerpc/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
Date: Fri, 4 Feb 2022 06:44:38 +0200 [thread overview]
Message-ID: <YfyvNphICBzoVfNs@kernel.org> (raw)
In-Reply-To: <46e15116-78fb-e6fe-e0f0-fe776f9348c3@arm.com>
On Fri, Feb 04, 2022 at 08:27:37AM +0530, Anshuman Khandual wrote:
>
> On 2/3/22 11:45 PM, Mike Rapoport wrote:
> > On Mon, Jan 24, 2022 at 06:26:41PM +0530, Anshuman Khandual wrote:
> >> This defines and exports a platform specific custom vm_get_page_prot() via
> >> subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
> >> macros can be dropped which are no longer needed. While here, this also
> >> localizes arch_vm_get_page_prot() as powerpc_vm_get_page_prot().
> >>
> >> Cc: Michael Ellerman <mpe@ellerman.id.au>
> >> Cc: Paul Mackerras <paulus@samba.org>
> >> Cc: linuxppc-dev@lists.ozlabs.org
> >> Cc: linux-kernel@vger.kernel.org
> >> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
> >> ---
> >> arch/powerpc/Kconfig | 1 +
> >> arch/powerpc/include/asm/mman.h | 3 +-
> >> arch/powerpc/include/asm/pgtable.h | 19 ------------
> >> arch/powerpc/mm/mmap.c | 47 ++++++++++++++++++++++++++++++
> >> 4 files changed, 49 insertions(+), 21 deletions(-)
> >>
> >> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> >> index b779603978e1..ddb4a3687c05 100644
> >> --- a/arch/powerpc/Kconfig
> >> +++ b/arch/powerpc/Kconfig
> >> @@ -135,6 +135,7 @@ config PPC
> >> select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
> >> select ARCH_HAS_UACCESS_FLUSHCACHE
> >> select ARCH_HAS_UBSAN_SANITIZE_ALL
> >> + select ARCH_HAS_VM_GET_PAGE_PROT
> >> select ARCH_HAVE_NMI_SAFE_CMPXCHG
> >> select ARCH_KEEP_MEMBLOCK
> >> select ARCH_MIGHT_HAVE_PC_PARPORT
> >> diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
> >> index 7cb6d18f5cd6..7b10c2031e82 100644
> >> --- a/arch/powerpc/include/asm/mman.h
> >> +++ b/arch/powerpc/include/asm/mman.h
> >> @@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
> >> }
> >> #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
> >>
> >> -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
> >> +static inline pgprot_t powerpc_vm_get_page_prot(unsigned long vm_flags)
> >> {
> >> #ifdef CONFIG_PPC_MEM_KEYS
> >> return (vm_flags & VM_SAO) ?
> >> @@ -34,7 +34,6 @@ static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
> >> return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
> >> #endif
> >> }
> >> -#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
> >>
> >> static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
> >> {
> >> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
> >> index d564d0ecd4cd..3cbb6de20f9d 100644
> >> --- a/arch/powerpc/include/asm/pgtable.h
> >> +++ b/arch/powerpc/include/asm/pgtable.h
> >> @@ -20,25 +20,6 @@ struct mm_struct;
> >> #include <asm/nohash/pgtable.h>
> >> #endif /* !CONFIG_PPC_BOOK3S */
> >>
> >> -/* Note due to the way vm flags are laid out, the bits are XWR */
> >> -#define __P000 PAGE_NONE
> >> -#define __P001 PAGE_READONLY
> >> -#define __P010 PAGE_COPY
> >> -#define __P011 PAGE_COPY
> >> -#define __P100 PAGE_READONLY_X
> >> -#define __P101 PAGE_READONLY_X
> >> -#define __P110 PAGE_COPY_X
> >> -#define __P111 PAGE_COPY_X
> >> -
> >> -#define __S000 PAGE_NONE
> >> -#define __S001 PAGE_READONLY
> >> -#define __S010 PAGE_SHARED
> >> -#define __S011 PAGE_SHARED
> >> -#define __S100 PAGE_READONLY_X
> >> -#define __S101 PAGE_READONLY_X
> >> -#define __S110 PAGE_SHARED_X
> >> -#define __S111 PAGE_SHARED_X
> >> -
> >> #ifndef __ASSEMBLY__
> >>
> >> #ifndef MAX_PTRS_PER_PGD
> >> diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
> >> index c475cf810aa8..7f05e7903bd2 100644
> >> --- a/arch/powerpc/mm/mmap.c
> >> +++ b/arch/powerpc/mm/mmap.c
> >> @@ -254,3 +254,50 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
> >> mm->get_unmapped_area = arch_get_unmapped_area_topdown;
> >> }
> >> }
> >> +
> >> +static inline pgprot_t __vm_get_page_prot(unsigned long vm_flags)
> >> +{
> >> + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
> >> + case VM_NONE:
> >> + return PAGE_NONE;
> >> + case VM_READ:
> >> + return PAGE_READONLY;
> >> + case VM_WRITE:
> >> + return PAGE_COPY;
> >> + case VM_READ | VM_WRITE:
> >> + return PAGE_COPY;
> >> + case VM_EXEC:
> >> + return PAGE_READONLY_X;
> >> + case VM_EXEC | VM_READ:
> >> + return PAGE_READONLY_X;
> >> + case VM_EXEC | VM_WRITE:
> >> + return PAGE_COPY_X;
> >> + case VM_EXEC | VM_READ | VM_WRITE:
> >> + return PAGE_COPY_X;
> >> + case VM_SHARED:
> >> + return PAGE_NONE;
> >> + case VM_SHARED | VM_READ:
> >> + return PAGE_READONLY;
> >> + case VM_SHARED | VM_WRITE:
> >> + return PAGE_SHARED;
> >> + case VM_SHARED | VM_READ | VM_WRITE:
> >> + return PAGE_SHARED;
> >> + case VM_SHARED | VM_EXEC:
> >> + return PAGE_READONLY_X;
> >> + case VM_SHARED | VM_EXEC | VM_READ:
> >> + return PAGE_READONLY_X;
> >> + case VM_SHARED | VM_EXEC | VM_WRITE:
> >> + return PAGE_SHARED_X;
> >> + case VM_SHARED | VM_EXEC | VM_READ | VM_WRITE:
> >> + return PAGE_SHARED_X;
> >> + default:
> >> + BUILD_BUG();
> >> + }
> >> +}
> >> +
> >> +pgprot_t vm_get_page_prot(unsigned long vm_flags)
> >> +{
> >> + return __pgprot(pgprot_val(__vm_get_page_prot(vm_flags)) |
> >> + pgprot_val(powerpc_vm_get_page_prot(vm_flags)));
> > Any reason to keep powerpc_vm_get_page_prot() rather than open code it
> > here?
> >
> > This applies to other architectures that implement arch_vm_get_page_prot()
> > and/or arch_filter_pgprot() as well.
>
> Just to minimize the code churn ! But I will be happy to open code them
> here (and in other platforms) if that will be preferred.
I think this will be clearer because all the processing will be at one place.
Besides, this way include/asm/pgtable.h becomes shorter and less crowded.
--
Sincerely yours,
Mike.
next prev parent reply other threads:[~2022-02-04 4:45 UTC|newest]
Thread overview: 70+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-24 12:56 [RFC V1 00/31] mm/mmap: Drop protection_map[] and platform's __SXXX/__PXXX requirements Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 01/31] mm/debug_vm_pgtable: Directly use vm_get_page_prot() Anshuman Khandual
2022-01-26 7:15 ` Christoph Hellwig
2022-01-27 4:16 ` Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 02/31] mm/mmap: Clarify protection_map[] indices Anshuman Khandual
2022-01-26 7:16 ` Christoph Hellwig
2022-01-27 4:07 ` Anshuman Khandual
2022-01-27 12:39 ` Mike Rapoport
2022-01-31 3:25 ` Anshuman Khandual
2022-02-05 9:10 ` Firo Yang
2022-02-09 3:23 ` Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 03/31] mm/mmap: Add new config ARCH_HAS_VM_GET_PAGE_PROT Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 04/31] powerpc/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT Anshuman Khandual
2022-01-24 12:56 ` Anshuman Khandual
2022-02-03 18:15 ` Mike Rapoport
2022-02-03 18:15 ` Mike Rapoport
2022-02-04 2:57 ` Anshuman Khandual
2022-02-04 2:57 ` Anshuman Khandual
2022-02-04 4:44 ` Mike Rapoport [this message]
2022-02-04 4:44 ` Mike Rapoport
2022-01-24 12:56 ` [RFC V1 05/31] arm64/mm: " Anshuman Khandual
2022-01-24 12:56 ` Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 06/31] sparc/mm: " Anshuman Khandual
2022-01-24 12:58 ` David Miller
2022-01-24 18:21 ` Khalid Aziz
2022-01-24 12:56 ` [RFC V1 07/31] mips/mm: " Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 08/31] m68k/mm: " Anshuman Khandual
2022-01-24 14:13 ` Andreas Schwab
2022-01-25 3:42 ` Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 09/31] arm/mm: " Anshuman Khandual
2022-01-24 12:56 ` Anshuman Khandual
2022-01-24 17:06 ` Russell King (Oracle)
2022-01-24 17:06 ` Russell King (Oracle)
2022-01-25 3:36 ` Anshuman Khandual
2022-01-25 3:36 ` Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 10/31] x86/mm: " Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 11/31] mm/mmap: Drop protection_map[] Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 12/31] mm/mmap: Drop arch_filter_pgprot() Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 13/31] mm/mmap: Drop arch_vm_get_page_pgprot() Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 14/31] s390/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 15/31] riscv/mm: " Anshuman Khandual
2022-01-24 12:56 ` Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 16/31] alpha/mm: " Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 17/31] sh/mm: " Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 18/31] arc/mm: " Anshuman Khandual
2022-01-24 12:56 ` Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 19/31] csky/mm: " Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 20/31] extensa/mm: " Anshuman Khandual
2022-01-24 12:56 ` [RFC V1 21/31] parisc/mm: " Anshuman Khandual
2022-01-25 16:53 ` Rolf Eike Beer
2022-01-27 4:06 ` Anshuman Khandual
2022-01-24 12:56 ` [OpenRISC] [RFC V1 22/31] openrisc/mm: " Anshuman Khandual
2022-01-24 12:56 ` Anshuman Khandual
2022-02-05 6:58 ` [OpenRISC] " Stafford Horne
2022-02-05 6:58 ` Stafford Horne
2022-01-24 12:57 ` [RFC V1 23/31] um/mm: " Anshuman Khandual
2022-01-24 12:57 ` Anshuman Khandual
2022-01-24 12:57 ` [RFC V1 24/31] microblaze/mm: " Anshuman Khandual
2022-01-24 12:57 ` [RFC V1 25/31] nios2/mm: " Anshuman Khandual
2022-01-26 16:38 ` Dinh Nguyen
2022-01-24 12:57 ` [RFC V1 26/31] hexagon/mm: " Anshuman Khandual
2022-01-24 12:57 ` [RFC V1 27/31] nds32/mm: " Anshuman Khandual
2022-01-24 12:57 ` [RFC V1 28/31] ia64/mm: " Anshuman Khandual
2022-01-24 12:57 ` Anshuman Khandual
2022-01-24 12:57 ` [RFC V1 29/31] mm/mmap: Drop generic vm_get_page_prot() Anshuman Khandual
2022-01-24 12:57 ` [RFC V1 30/31] mm/mmap: Drop ARCH_HAS_VM_GET_PAGE_PROT Anshuman Khandual
2022-01-24 12:57 ` [RFC V1 31/31] mm/mmap: Define macros for vm_flags access permission combinations Anshuman Khandual
2022-02-03 5:24 ` Anshuman Khandual
2022-01-27 12:38 ` [RFC V1 00/31] mm/mmap: Drop protection_map[] and platform's __SXXX/__PXXX requirements Mike Rapoport
2022-01-31 3:35 ` Anshuman Khandual
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=YfyvNphICBzoVfNs@kernel.org \
--to=rppt@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=anshuman.khandual@arm.com \
--cc=hch@infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=paulus@samba.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.