linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
From: Laurent Dufour <ldufour@linux.ibm.com>
To: Suren Baghdasaryan <surenb@google.com>, akpm@linux-foundation.org
Cc: michel@lespinasse.org, joelaf@google.com, songliubraving@fb.com,
	mhocko@suse.com, david@redhat.com, peterz@infradead.org,
	bigeasy@linutronix.de, peterx@redhat.com, dhowells@redhat.com,
	linux-mm@kvack.org, jglisse@google.com, dave@stgolabs.net,
	minchan@google.com, x86@kernel.org, hughd@google.com,
	willy@infradead.org, laurent.dufour@fr.ibm.com, mgorman@suse.de,
	rientjes@google.com, axelrasmussen@google.com,
	kernel-team@android.com, paulmck@kernel.org,
	liam.howlett@oracle.com, luto@kernel.org, vbabka@suse.cz,
	linux-arm-kernel@lists.infradead.org, kent.overstreet@linux.dev,
	linux-kernel@vger.kernel.org, hannes@cmpxchg.org,
	linuxppc-dev@lists.ozlabs.org
Subject: Re: [RFC PATCH RESEND 13/28] mm: conditionally mark VMA as locked in free_pgtables and unmap_page_range
Date: Fri, 9 Sep 2022 12:33:44 +0200	[thread overview]
Message-ID: <667ad41f-21af-ccc8-be7b-7e6c49930195@linux.ibm.com> (raw)
In-Reply-To: <20220901173516.702122-14-surenb@google.com>

Le 01/09/2022 à 19:35, Suren Baghdasaryan a écrit :
> free_pgtables and unmap_page_range functions can be called with mmap_lock
> held for write (e.g. in mmap_region), held for read (e.g in
> madvise_pageout) or not held at all (e.g in madvise_remove might
> drop mmap_lock before calling vfs_fallocate, which ends up calling
> unmap_page_range).
> Provide free_pgtables and unmap_page_range with additional argument
> indicating whether to mark the VMA as locked or not based on the usage.
> The parameter is set based on whether mmap_lock is held in write mode
> during the call. This ensures no change in behavior between mmap_lock
> and per-vma locks.
> 
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> ---
>  include/linux/mm.h |  2 +-
>  mm/internal.h      |  4 ++--
>  mm/memory.c        | 32 +++++++++++++++++++++-----------
>  mm/mmap.c          | 17 +++++++++--------
>  mm/oom_kill.c      |  3 ++-
>  5 files changed, 35 insertions(+), 23 deletions(-)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 476bf936c5f0..dc72be923e5b 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -1874,7 +1874,7 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
>  void zap_page_range(struct vm_area_struct *vma, unsigned long address,
>  		    unsigned long size);
>  void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
> -		unsigned long start, unsigned long end);
> +		unsigned long start, unsigned long end, bool lock_vma);
>  
>  struct mmu_notifier_range;
>  
> diff --git a/mm/internal.h b/mm/internal.h
> index 785409805ed7..e6c0f999e0cb 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -85,14 +85,14 @@ bool __folio_end_writeback(struct folio *folio);
>  void deactivate_file_folio(struct folio *folio);
>  
>  void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
> -		unsigned long floor, unsigned long ceiling);
> +		unsigned long floor, unsigned long ceiling, bool lock_vma);
>  void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
>  
>  struct zap_details;
>  void unmap_page_range(struct mmu_gather *tlb,
>  			     struct vm_area_struct *vma,
>  			     unsigned long addr, unsigned long end,
> -			     struct zap_details *details);
> +			     struct zap_details *details, bool lock_vma);
>  
>  void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
>  		unsigned int order);
> diff --git a/mm/memory.c b/mm/memory.c
> index 4ba73f5aa8bb..9ac9944e8c62 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -403,7 +403,7 @@ void free_pgd_range(struct mmu_gather *tlb,
>  }
>  
>  void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
> -		unsigned long floor, unsigned long ceiling)
> +		unsigned long floor, unsigned long ceiling, bool lock_vma)
>  {
>  	while (vma) {
>  		struct vm_area_struct *next = vma->vm_next;
> @@ -413,6 +413,8 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
>  		 * Hide vma from rmap and truncate_pagecache before freeing
>  		 * pgtables
>  		 */
> +		if (lock_vma)
> +			vma_mark_locked(vma);
>  		unlink_anon_vmas(vma);
>  		unlink_file_vma(vma);
>  
> @@ -427,6 +429,8 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
>  			       && !is_vm_hugetlb_page(next)) {
>  				vma = next;
>  				next = vma->vm_next;
> +				if (lock_vma)
> +					vma_mark_locked(vma);
>  				unlink_anon_vmas(vma);
>  				unlink_file_vma(vma);
>  			}
> @@ -1631,12 +1635,16 @@ static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
>  void unmap_page_range(struct mmu_gather *tlb,
>  			     struct vm_area_struct *vma,
>  			     unsigned long addr, unsigned long end,
> -			     struct zap_details *details)
> +			     struct zap_details *details,
> +			     bool lock_vma)
>  {
>  	pgd_t *pgd;
>  	unsigned long next;
>  
>  	BUG_ON(addr >= end);
> +	if (lock_vma)
> +		vma_mark_locked(vma);

I'm wondering if that is really needed here.
The following processing is only dealing with the page table entries.
Today, if that could be called without holding the mmap_lock, that should
be safe to not mark the VMA locked (indeed the VMA itself is not impacted).

Thus unmap_single_vma() below no need to be touched, and its callers.

In the case a locking is required, I think there is a real potential issue
in the current kernel.

> +
>  	tlb_start_vma(tlb, vma);
>  	pgd = pgd_offset(vma->vm_mm, addr);
>  	do {
> @@ -1652,7 +1660,7 @@ void unmap_page_range(struct mmu_gather *tlb,
>  static void unmap_single_vma(struct mmu_gather *tlb,
>  		struct vm_area_struct *vma, unsigned long start_addr,
>  		unsigned long end_addr,
> -		struct zap_details *details)
> +		struct zap_details *details, bool lock_vma)
>  {
>  	unsigned long start = max(vma->vm_start, start_addr);
>  	unsigned long end;
> @@ -1691,7 +1699,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
>  				i_mmap_unlock_write(vma->vm_file->f_mapping);
>  			}
>  		} else
> -			unmap_page_range(tlb, vma, start, end, details);
> +			unmap_page_range(tlb, vma, start, end, details, lock_vma);
>  	}
>  }
>  
> @@ -1715,7 +1723,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
>   */
>  void unmap_vmas(struct mmu_gather *tlb,
>  		struct vm_area_struct *vma, unsigned long start_addr,
> -		unsigned long end_addr)
> +		unsigned long end_addr, bool lock_vma)
>  {
>  	struct mmu_notifier_range range;
>  	struct zap_details details = {
> @@ -1728,7 +1736,8 @@ void unmap_vmas(struct mmu_gather *tlb,
>  				start_addr, end_addr);
>  	mmu_notifier_invalidate_range_start(&range);
>  	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
> -		unmap_single_vma(tlb, vma, start_addr, end_addr, &details);
> +		unmap_single_vma(tlb, vma, start_addr, end_addr, &details,
> +				 lock_vma);
>  	mmu_notifier_invalidate_range_end(&range);
>  }
>  
> @@ -1753,7 +1762,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
>  	update_hiwater_rss(vma->vm_mm);
>  	mmu_notifier_invalidate_range_start(&range);
>  	for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
> -		unmap_single_vma(&tlb, vma, start, range.end, NULL);
> +		unmap_single_vma(&tlb, vma, start, range.end, NULL, false);
>  	mmu_notifier_invalidate_range_end(&range);
>  	tlb_finish_mmu(&tlb);
>  }
> @@ -1768,7 +1777,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
>   * The range must fit into one VMA.
>   */
>  static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
> -		unsigned long size, struct zap_details *details)
> +		unsigned long size, struct zap_details *details, bool lock_vma)
>  {
>  	struct mmu_notifier_range range;
>  	struct mmu_gather tlb;
> @@ -1779,7 +1788,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
>  	tlb_gather_mmu(&tlb, vma->vm_mm);
>  	update_hiwater_rss(vma->vm_mm);
>  	mmu_notifier_invalidate_range_start(&range);
> -	unmap_single_vma(&tlb, vma, address, range.end, details);
> +	unmap_single_vma(&tlb, vma, address, range.end, details, lock_vma);
>  	mmu_notifier_invalidate_range_end(&range);
>  	tlb_finish_mmu(&tlb);
>  }
> @@ -1802,7 +1811,7 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
>  	    		!(vma->vm_flags & VM_PFNMAP))
>  		return;
>  
> -	zap_page_range_single(vma, address, size, NULL);
> +	zap_page_range_single(vma, address, size, NULL, true);
>  }
>  EXPORT_SYMBOL_GPL(zap_vma_ptes);
>  
> @@ -3483,7 +3492,8 @@ static void unmap_mapping_range_vma(struct vm_area_struct *vma,
>  		unsigned long start_addr, unsigned long end_addr,
>  		struct zap_details *details)
>  {
> -	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
> +	zap_page_range_single(vma, start_addr, end_addr - start_addr, details,
> +			      false);
>  }
>  
>  static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 121544fd90de..094678b4434b 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -79,7 +79,7 @@ core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
>  
>  static void unmap_region(struct mm_struct *mm,
>  		struct vm_area_struct *vma, struct vm_area_struct *prev,
> -		unsigned long start, unsigned long end);
> +		unsigned long start, unsigned long end, bool lock_vma);
>  
>  static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
>  {
> @@ -1866,7 +1866,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
>  	vma->vm_file = NULL;
>  
>  	/* Undo any partial mapping done by a device driver. */
> -	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
> +	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end, true);
>  	if (vm_flags & VM_SHARED)
>  		mapping_unmap_writable(file->f_mapping);
>  free_vma: 
> @@ -2626,7 +2626,7 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
>   */
>  static void unmap_region(struct mm_struct *mm,
>  		struct vm_area_struct *vma, struct vm_area_struct *prev,
> -		unsigned long start, unsigned long end)
> +		unsigned long start, unsigned long end, bool lock_vma)
>  {
>  	struct vm_area_struct *next = vma_next(mm, prev);
>  	struct mmu_gather tlb;
> @@ -2634,9 +2634,10 @@ static void unmap_region(struct mm_struct *mm,
>  	lru_add_drain();
>  	tlb_gather_mmu(&tlb, mm);
>  	update_hiwater_rss(mm);
> -	unmap_vmas(&tlb, vma, start, end);
> +	unmap_vmas(&tlb, vma, start, end, lock_vma);
>  	free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
> -				 next ? next->vm_start : USER_PGTABLES_CEILING);
> +				 next ? next->vm_start : USER_PGTABLES_CEILING,
> +				 lock_vma);
>  	tlb_finish_mmu(&tlb);
>  }
>  
> @@ -2849,7 +2850,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
>  	if (downgrade)
>  		mmap_write_downgrade(mm);
>  
> -	unmap_region(mm, vma, prev, start, end);
> +	unmap_region(mm, vma, prev, start, end, !downgrade);
>  
>  	/* Fix up all other VM information */
>  	remove_vma_list(mm, vma);
> @@ -3129,8 +3130,8 @@ void exit_mmap(struct mm_struct *mm)
>  	tlb_gather_mmu_fullmm(&tlb, mm);
>  	/* update_hiwater_rss(mm) here? but nobody should be looking */
>  	/* Use -1 here to ensure all VMAs in the mm are unmapped */
> -	unmap_vmas(&tlb, vma, 0, -1);
> -	free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
> +	unmap_vmas(&tlb, vma, 0, -1, true);
> +	free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING, true);
>  	tlb_finish_mmu(&tlb);
>  
>  	/* Walk the list again, actually closing and freeing it. */
> diff --git a/mm/oom_kill.c b/mm/oom_kill.c
> index 3c6cf9e3cd66..6ffa7c511aa3 100644
> --- a/mm/oom_kill.c
> +++ b/mm/oom_kill.c
> @@ -549,7 +549,8 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
>  				ret = false;
>  				continue;
>  			}
> -			unmap_page_range(&tlb, vma, range.start, range.end, NULL);
> +			unmap_page_range(&tlb, vma, range.start, range.end,
> +					 NULL, false);
>  			mmu_notifier_invalidate_range_end(&range);
>  			tlb_finish_mmu(&tlb);
>  		}

I'm wondering if the VMA locking should be done here instead of inside
unmap_page_range() which is not really touching the VMA's fields.

Here this would be needed because the page fault handler may check the
MMF_UNSTABLE flag and the VMA's locking before this loop is entered by
another thread.


  reply	other threads:[~2022-09-09 10:35 UTC|newest]

Thread overview: 91+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-01 17:34 [RFC PATCH RESEND 00/28] per-VMA locks proposal Suren Baghdasaryan
2022-09-01 17:34 ` [RFC PATCH RESEND 01/28] mm: introduce CONFIG_PER_VMA_LOCK Suren Baghdasaryan
2022-09-01 17:34 ` [RFC PATCH RESEND 02/28] mm: rcu safe VMA freeing Suren Baghdasaryan
2022-09-01 17:34 ` [RFC PATCH RESEND 03/28] mm: introduce __find_vma to be used without mmap_lock protection Suren Baghdasaryan
2022-09-01 20:22   ` Kent Overstreet
2022-09-01 23:18     ` Suren Baghdasaryan
2022-09-01 17:34 ` [RFC PATCH RESEND 04/28] mm: move mmap_lock assert function definitions Suren Baghdasaryan
2022-09-01 20:24   ` Kent Overstreet
2022-09-01 20:51     ` Liam Howlett
2022-09-01 23:21       ` Suren Baghdasaryan
2022-09-02  6:23     ` Sebastian Andrzej Siewior
2022-09-02 17:46       ` Suren Baghdasaryan
2022-09-01 17:34 ` [RFC PATCH RESEND 05/28] mm: add per-VMA lock and helper functions to control it Suren Baghdasaryan
2022-09-06 13:46   ` Laurent Dufour
2022-09-06 17:24     ` Suren Baghdasaryan
2022-09-01 17:34 ` [RFC PATCH RESEND 06/28] mm: mark VMA as locked whenever vma->vm_flags are modified Suren Baghdasaryan
2022-09-06 14:26   ` Laurent Dufour
2022-09-06 19:00     ` Suren Baghdasaryan
2022-09-06 20:00       ` Liam Howlett
2022-09-06 20:13         ` Suren Baghdasaryan
2022-09-01 17:34 ` [RFC PATCH RESEND 07/28] kernel/fork: mark VMAs as locked before copying pages during fork Suren Baghdasaryan
2022-09-06 14:37   ` Laurent Dufour
2022-09-08 23:57     ` Suren Baghdasaryan
2022-09-09 13:27       ` Laurent Dufour
2022-09-09 16:29         ` Suren Baghdasaryan
2022-09-01 17:34 ` [RFC PATCH RESEND 08/28] mm/khugepaged: mark VMA as locked while collapsing a hugepage Suren Baghdasaryan
2022-09-06 14:43   ` Laurent Dufour
2022-09-09  0:15     ` Suren Baghdasaryan
2022-09-01 17:34 ` [RFC PATCH RESEND 09/28] mm/mempolicy: mark VMA as locked when changing protection policy Suren Baghdasaryan
2022-09-06 14:47   ` Laurent Dufour
2022-09-09  0:27     ` Suren Baghdasaryan
2022-09-01 17:34 ` [RFC PATCH RESEND 10/28] mm/mmap: mark VMAs as locked in vma_adjust Suren Baghdasaryan
2022-09-06 15:35   ` Laurent Dufour
2022-09-09  0:51     ` Suren Baghdasaryan
2022-09-09 15:52       ` Laurent Dufour
2022-09-01 17:34 ` [RFC PATCH RESEND 11/28] mm/mmap: mark VMAs as locked before merging or splitting them Suren Baghdasaryan
2022-09-06 15:44   ` Laurent Dufour
2022-09-01 17:35 ` [RFC PATCH RESEND 12/28] mm/mremap: mark VMA as locked while remapping it to a new address range Suren Baghdasaryan
2022-09-06 16:09   ` Laurent Dufour
2022-09-01 17:35 ` [RFC PATCH RESEND 13/28] mm: conditionally mark VMA as locked in free_pgtables and unmap_page_range Suren Baghdasaryan
2022-09-09 10:33   ` Laurent Dufour [this message]
2022-09-09 16:43     ` Suren Baghdasaryan
2022-09-01 17:35 ` [RFC PATCH RESEND 14/28] mm: mark VMAs as locked before isolating them Suren Baghdasaryan
2022-09-09 13:35   ` Laurent Dufour
2022-09-09 16:28     ` Suren Baghdasaryan
2022-09-01 17:35 ` [RFC PATCH RESEND 15/28] mm/mmap: mark adjacent VMAs as locked if they can grow into unmapped area Suren Baghdasaryan
2022-09-09 13:43   ` Laurent Dufour
2022-09-09 16:25     ` Suren Baghdasaryan
2022-09-01 17:35 ` [RFC PATCH RESEND 16/28] kernel/fork: assert no VMA readers during its destruction Suren Baghdasaryan
2022-09-09 13:56   ` Laurent Dufour
2022-09-09 16:19     ` Suren Baghdasaryan
2022-09-01 17:35 ` [RFC PATCH RESEND 17/28] mm/mmap: prevent pagefault handler from racing with mmu_notifier registration Suren Baghdasaryan
2022-09-09 14:20   ` Laurent Dufour
2022-09-09 16:12     ` Suren Baghdasaryan
2022-09-01 17:35 ` [RFC PATCH RESEND 18/28] mm: add FAULT_FLAG_VMA_LOCK flag Suren Baghdasaryan
2022-09-09 14:26   ` Laurent Dufour
2022-09-01 17:35 ` [RFC PATCH RESEND 19/28] mm: disallow do_swap_page to handle page faults under VMA lock Suren Baghdasaryan
2022-09-06 19:39   ` Peter Xu
2022-09-06 20:08     ` Suren Baghdasaryan
2022-09-06 20:22       ` Peter Xu
2022-09-07  0:58         ` Suren Baghdasaryan
2022-09-09 14:26   ` Laurent Dufour
2022-09-01 17:35 ` [RFC PATCH RESEND 20/28] mm: introduce per-VMA lock statistics Suren Baghdasaryan
2022-09-09 14:28   ` Laurent Dufour
2022-09-09 16:11     ` Suren Baghdasaryan
2022-09-01 17:35 ` [RFC PATCH RESEND 21/28] mm: introduce find_and_lock_anon_vma to be used from arch-specific code Suren Baghdasaryan
2022-09-09 14:38   ` Laurent Dufour
2022-09-09 16:10     ` Suren Baghdasaryan
2022-09-01 17:35 ` [RFC PATCH RESEND 22/28] x86/mm: try VMA lock-based page fault handling first Suren Baghdasaryan
2022-09-01 17:35 ` [RFC PATCH RESEND 23/28] x86/mm: define ARCH_SUPPORTS_PER_VMA_LOCK Suren Baghdasaryan
2022-09-01 20:20   ` Kent Overstreet
2022-09-01 23:17     ` Suren Baghdasaryan
2022-09-01 17:35 ` [RFC PATCH RESEND 24/28] arm64/mm: try VMA lock-based page fault handling first Suren Baghdasaryan
2022-09-01 17:35 ` [RFC PATCH RESEND 25/28] arm64/mm: define ARCH_SUPPORTS_PER_VMA_LOCK Suren Baghdasaryan
2022-09-01 17:35 ` [RFC PATCH RESEND 26/28] powerc/mm: try VMA lock-based page fault handling first Suren Baghdasaryan
2022-09-01 17:35 ` [RFC PATCH RESEND 27/28] powerpc/mm: define ARCH_SUPPORTS_PER_VMA_LOCK Suren Baghdasaryan
2022-09-01 17:35 ` [RFC PATCH RESEND 28/28] kernel/fork: throttle call_rcu() calls in vm_area_free Suren Baghdasaryan
2022-09-09 15:19   ` Laurent Dufour
2022-09-09 16:02     ` Suren Baghdasaryan
2022-09-09 16:14       ` Laurent Dufour
2022-09-01 20:58 ` [RFC PATCH RESEND 00/28] per-VMA locks proposal Kent Overstreet
2022-09-01 23:26   ` Suren Baghdasaryan
2022-09-11  9:35     ` Vlastimil Babka
2022-09-28  2:28       ` Suren Baghdasaryan
2022-09-29 11:18         ` Vlastimil Babka
2022-09-02  7:42 ` Peter Zijlstra
2022-09-02 14:45   ` Suren Baghdasaryan
2022-09-05 12:32 ` Michal Hocko
2022-09-05 18:32   ` Suren Baghdasaryan
2022-09-05 20:35     ` Kent Overstreet
2022-09-06 15:46       ` Suren Baghdasaryan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=667ad41f-21af-ccc8-be7b-7e6c49930195@linux.ibm.com \
    --to=ldufour@linux.ibm.com \
    --cc=akpm@linux-foundation.org \
    --cc=axelrasmussen@google.com \
    --cc=bigeasy@linutronix.de \
    --cc=dave@stgolabs.net \
    --cc=david@redhat.com \
    --cc=dhowells@redhat.com \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=jglisse@google.com \
    --cc=joelaf@google.com \
    --cc=kent.overstreet@linux.dev \
    --cc=kernel-team@android.com \
    --cc=laurent.dufour@fr.ibm.com \
    --cc=liam.howlett@oracle.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=luto@kernel.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@suse.com \
    --cc=michel@lespinasse.org \
    --cc=minchan@google.com \
    --cc=paulmck@kernel.org \
    --cc=peterx@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rientjes@google.com \
    --cc=songliubraving@fb.com \
    --cc=surenb@google.com \
    --cc=vbabka@suse.cz \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).