devicetree-spec.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Doug Berger <opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
To: Andrew Morton <akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org>
Cc: Jonathan Corbet <corbet-T1hC0tSOHrs@public.gmane.org>,
	Rob Herring <robh+dt-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
	Krzysztof Kozlowski
	<krzysztof.kozlowski+dt-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org>,
	Frank Rowand
	<frowand.list-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>,
	Mike Kravetz
	<mike.kravetz-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>,
	Muchun Song <songmuchun-EC8Uxl6Npydl57MIdRCFDg@public.gmane.org>,
	Mike Rapoport <rppt-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
	Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>,
	Marek Szyprowski
	<m.szyprowski-Sze3O3UU22JBDgjK7y7TUQ@public.gmane.org>,
	Robin Murphy <robin.murphy-5wv7dgnIgG8@public.gmane.org>,
	Borislav Petkov <bp-l3A5Bk7waGM@public.gmane.org>,
	"Paul E. McKenney"
	<paulmck-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
	Neeraj Upadhyay
	<quic_neeraju-jfJNa2p1gH1BDgjK7y7TUQ@public.gmane.org>,
	Randy Dunlap <rdunlap-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>,
	Damien Le Moal
	<damien.lemoal-yzvPICuk2AC/Fx7ZUtofftBPR1lH4CV8@public.gmane.org>,
	Doug Berger <opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>,
	Florian Fainelli
	<f.fainelli-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>,
	David Hildenbrand <david-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	Zi Yan <ziy-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>,
	Oscar Salvador <osalvador@su>
Subject: [PATCH 18/21] mm/cma: support CMA in Designated Movable Blocks
Date: Tue, 13 Sep 2022 12:55:05 -0700	[thread overview]
Message-ID: <20220913195508.3511038-19-opendmb@gmail.com> (raw)
In-Reply-To: <20220913195508.3511038-1-opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>

This commit allows for different page allocator handling for
CMA areas that are within Designated Movable Blocks.

Specifically, the pageblocks are allowed to remain migratetype
MIGRATE_MOVABLE to allow more aggressive utilization by the
page allocator. This also means that the page allocator should
not consider these pages as part of the nr_free_cma metric it
uses for managing MIGRATE_CMA type pageblocks.

This leads to the decision to remove these areas from the
CmaTotal metrics after initialization to avoid confusion.

Signed-off-by: Doug Berger <opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
---
 include/linux/cma.h | 13 ++++++---
 mm/cma.c            | 55 +++++++++++++++++++++++++-----------
 mm/page_alloc.c     | 69 +++++++++++++++++++++++++++++----------------
 3 files changed, 92 insertions(+), 45 deletions(-)

diff --git a/include/linux/cma.h b/include/linux/cma.h
index 63873b93deaa..ffbb8ea2c5f8 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -31,11 +31,13 @@ extern phys_addr_t cma_get_base(const struct cma *cma);
 extern unsigned long cma_get_size(const struct cma *cma);
 extern const char *cma_get_name(const struct cma *cma);
 
-extern int __init cma_declare_contiguous_nid(phys_addr_t base,
+extern int __init __cma_declare_contiguous_nid(phys_addr_t base,
 			phys_addr_t size, phys_addr_t limit,
 			phys_addr_t alignment, unsigned int order_per_bit,
 			bool fixed, const char *name, struct cma **res_cma,
-			int nid);
+			int nid, bool in_dmb);
+#define cma_declare_contiguous_nid(b, s, l, a, o, f, n, r_c, nid)	\
+	__cma_declare_contiguous_nid(b, s, l, a, o, f, n, r_c, nid, false)
 static inline int __init cma_declare_contiguous(phys_addr_t base,
 			phys_addr_t size, phys_addr_t limit,
 			phys_addr_t alignment, unsigned int order_per_bit,
@@ -44,10 +46,13 @@ static inline int __init cma_declare_contiguous(phys_addr_t base,
 	return cma_declare_contiguous_nid(base, size, limit, alignment,
 			order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
 }
-extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+extern int __cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 					unsigned int order_per_bit,
 					const char *name,
-					struct cma **res_cma);
+					struct cma **res_cma,
+					bool in_dmb);
+#define cma_init_reserved_mem(base, size, order, name, res_cma)		\
+	__cma_init_reserved_mem(base, size, order, name, res_cma, 0)
 extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
 			      bool no_warn);
 extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
diff --git a/mm/cma.c b/mm/cma.c
index 6208a3e1cd9d..4f33cd54db9e 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -33,6 +33,7 @@
 #include <linux/kmemleak.h>
 #include <linux/page-isolation.h>
 #include <trace/events/cma.h>
+#include <linux/dmb.h>
 
 #include "cma.h"
 
@@ -98,6 +99,10 @@ static void __init cma_activate_area(struct cma *cma)
 {
 	unsigned long base_pfn = cma->base_pfn, pfn;
 	struct zone *zone;
+	int is_dmb = dmb_intersects(base_pfn, base_pfn + cma->count);
+
+	if (is_dmb == DMB_MIXED)
+		goto out_error;
 
 	cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
 	if (!cma->bitmap)
@@ -116,13 +121,17 @@ static void __init cma_activate_area(struct cma *cma)
 			goto not_in_zone;
 	}
 
-	for (pfn = base_pfn; pfn < base_pfn + cma->count;
-	     pfn += pageblock_nr_pages) {
-		struct page *page = pfn_to_page(pfn);
+	if (is_dmb == DMB_INTERSECTS) {
+		totalcma_pages -= cma->count;
+	} else {
+		for (pfn = base_pfn; pfn < base_pfn + cma->count;
+		     pfn += pageblock_nr_pages) {
+			struct page *page = pfn_to_page(pfn);
 
-		set_pageblock_migratetype(page, MIGRATE_CMA);
-		init_reserved_pageblock(page);
-		page_zone(page)->cma_pages += pageblock_nr_pages;
+			set_pageblock_migratetype(page, MIGRATE_CMA);
+			init_reserved_pageblock(page);
+			page_zone(page)->cma_pages += pageblock_nr_pages;
+		}
 	}
 
 	spin_lock_init(&cma->lock);
@@ -141,7 +150,8 @@ static void __init cma_activate_area(struct cma *cma)
 	if (!cma->reserve_pages_on_error) {
 		for (pfn = base_pfn; pfn < base_pfn + cma->count;
 		     pfn += pageblock_nr_pages)
-			init_reserved_pageblock(pfn_to_page(pfn));
+			if (!dmb_intersects(pfn, pfn + pageblock_nr_pages))
+				init_reserved_pageblock(pfn_to_page(pfn));
 	}
 	totalcma_pages -= cma->count;
 	cma->count = 0;
@@ -166,7 +176,7 @@ void __init cma_reserve_pages_on_error(struct cma *cma)
 }
 
 /**
- * cma_init_reserved_mem() - create custom contiguous area from reserved memory
+ * __cma_init_reserved_mem() - create custom contiguous area in reserved memory
  * @base: Base address of the reserved area
  * @size: Size of the reserved area (in bytes),
  * @order_per_bit: Order of pages represented by one bit on bitmap.
@@ -174,15 +184,18 @@ void __init cma_reserve_pages_on_error(struct cma *cma)
  *        the area will be set to "cmaN", where N is a running counter of
  *        used areas.
  * @res_cma: Pointer to store the created cma region.
+ * @in_dmb: Designate the reserved memory as a Designated Movable Block.
  *
  * This function creates custom contiguous area from already reserved memory.
  */
-int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
-				 unsigned int order_per_bit,
-				 const char *name,
-				 struct cma **res_cma)
+int __init __cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+				   unsigned int order_per_bit,
+				   const char *name,
+				   struct cma **res_cma,
+				   bool in_dmb)
 {
 	struct cma *cma;
+	int err;
 
 	/* Sanity checks */
 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
@@ -201,6 +214,14 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 	if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
 		return -EINVAL;
 
+	if (in_dmb) {
+		err = dmb_reserve(base, size, NULL);
+		if (err) {
+			pr_err("Cannot reserve DMB for CMA!\n");
+			return err;
+		}
+	}
+
 	/*
 	 * Each reserved area must be initialised later, when more kernel
 	 * subsystems (like slab allocator) are available.
@@ -223,7 +244,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 }
 
 /**
- * cma_declare_contiguous_nid() - reserve custom contiguous area
+ * __cma_declare_contiguous_nid() - reserve custom contiguous area
  * @base: Base address of the reserved area optional, use 0 for any
  * @size: Size of the reserved area (in bytes),
  * @limit: End address of the reserved memory (optional, 0 for any).
@@ -233,6 +254,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
  * @name: The name of the area. See function cma_init_reserved_mem()
  * @res_cma: Pointer to store the created cma region.
  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
+ * @in_dmb: Designate the reserved memory as a Designated Movable Block.
  *
  * This function reserves memory from early allocator. It should be
  * called by arch specific code once the early allocator (memblock or bootmem)
@@ -242,11 +264,11 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
  * reserve in range from @base to @limit.
  */
-int __init cma_declare_contiguous_nid(phys_addr_t base,
+int __init __cma_declare_contiguous_nid(phys_addr_t base,
 			phys_addr_t size, phys_addr_t limit,
 			phys_addr_t alignment, unsigned int order_per_bit,
 			bool fixed, const char *name, struct cma **res_cma,
-			int nid)
+			int nid, bool in_dmb)
 {
 	phys_addr_t memblock_end = memblock_end_of_DRAM();
 	phys_addr_t highmem_start;
@@ -374,7 +396,8 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
 		base = addr;
 	}
 
-	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
+	ret = __cma_init_reserved_mem(base, size, order_per_bit, name, res_cma,
+				      in_dmb);
 	if (ret)
 		goto free_mem;
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e38dd1b32771..09d00c178bc8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -9233,29 +9233,8 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
 	return 0;
 }
 
-/**
- * alloc_contig_range() -- tries to allocate given range of pages
- * @start:	start PFN to allocate
- * @end:	one-past-the-last PFN to allocate
- * @migratetype:	migratetype of the underlying pageblocks (either
- *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
- *			in range must have the same migratetype and it must
- *			be either of the two.
- * @gfp_mask:	GFP mask to use during compaction
- *
- * The PFN range does not have to be pageblock aligned. The PFN range must
- * belong to a single zone.
- *
- * The first thing this routine does is attempt to MIGRATE_ISOLATE all
- * pageblocks in the range.  Once isolated, the pageblocks should not
- * be modified by others.
- *
- * Return: zero on success or negative error code.  On success all
- * pages which PFN is in [start, end) are allocated for the caller and
- * need to be freed with free_contig_range().
- */
-int alloc_contig_range(unsigned long start, unsigned long end,
-		       unsigned migratetype, gfp_t gfp_mask)
+int _alloc_contig_range(unsigned long start, unsigned long end,
+			unsigned int migratetype, gfp_t gfp_mask)
 {
 	unsigned long outer_start, outer_end;
 	int order;
@@ -9379,6 +9358,46 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 	undo_isolate_page_range(start, end, migratetype);
 	return ret;
 }
+
+/**
+ * alloc_contig_range() -- tries to allocate given range of pages
+ * @start:	start PFN to allocate
+ * @end:	one-past-the-last PFN to allocate
+ * @migratetype:	migratetype of the underlying pageblocks (either
+ *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
+ *			in range must have the same migratetype and it must
+ *			be either of the two.
+ * @gfp_mask:	GFP mask to use during compaction
+ *
+ * The PFN range does not have to be pageblock aligned. The PFN range must
+ * belong to a single zone.
+ *
+ * The first thing this routine does is attempt to MIGRATE_ISOLATE all
+ * pageblocks in the range.  Once isolated, the pageblocks should not
+ * be modified by others.
+ *
+ * Return: zero on success or negative error code.  On success all
+ * pages which PFN is in [start, end) are allocated for the caller and
+ * need to be freed with free_contig_range().
+ */
+int alloc_contig_range(unsigned long start, unsigned long end,
+		       unsigned int migratetype, gfp_t gfp_mask)
+{
+	switch (dmb_intersects(start, end)) {
+	case DMB_DISJOINT:
+		break;
+	case DMB_INTERSECTS:
+		if (migratetype == MIGRATE_CMA)
+			migratetype = MIGRATE_MOVABLE;
+		else
+			return -EBUSY;
+		break;
+	default:
+		return -EBUSY;
+	}
+
+	return _alloc_contig_range(start, end, migratetype, gfp_mask);
+}
 EXPORT_SYMBOL(alloc_contig_range);
 
 static int __alloc_contig_pages(unsigned long start_pfn,
@@ -9386,8 +9405,8 @@ static int __alloc_contig_pages(unsigned long start_pfn,
 {
 	unsigned long end_pfn = start_pfn + nr_pages;
 
-	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
-				  gfp_mask);
+	return _alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
+				   gfp_mask);
 }
 
 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
-- 
2.25.1


  parent reply	other threads:[~2022-09-13 19:55 UTC|newest]

Thread overview: 59+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-13 19:54 [PATCH 00/21] mm: introduce Designated Movable Blocks Doug Berger
2022-09-13 19:54 ` [PATCH 01/21] mm/page_isolation: protect cma from isolate_single_pageblock Doug Berger
2022-09-14  0:02   ` Zi Yan
     [not found]     ` <36E322BF-F052-4A8B-9FA5-4E0AA84E4AAF-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2022-09-14  0:59       ` Doug Berger
     [not found]         ` <ff84d89b-6d4f-c739-63be-4c4825b1fd03-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-14  1:09           ` Zi Yan
2022-09-14  1:47             ` Doug Berger
     [not found]               ` <6b465dd1-f5b1-cb6c-cee0-5461b66f6031-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-14  1:53                 ` Zi Yan
     [not found]                   ` <819CB6CD-0112-4B07-BDFE-84611470070F-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2022-09-14 17:27                     ` Doug Berger
     [not found]   ` <20220913195508.3511038-2-opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-16  3:40     ` kernel test robot
2022-09-13 19:54 ` [PATCH 02/21] mm/hugetlb: correct max_huge_pages accounting on demote Doug Berger
2022-09-14 17:23   ` Mike Kravetz
2022-09-14 17:26     ` Florian Fainelli
     [not found]       ` <c4d40567-6ac6-d822-7098-a5ad1ae567ec-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-14 18:43         ` Mike Kravetz
2022-09-14 17:30     ` Doug Berger
2022-09-14 20:58     ` Andrew Morton
2022-09-14 21:11       ` Mike Kravetz
2022-09-13 19:54 ` [PATCH 11/21] mm/page_alloc: introduce init_reserved_pageblock() Doug Berger
2022-09-13 19:55 ` [PATCH 13/21] mm/dmb: Introduce Designated Movable Blocks Doug Berger
2022-09-13 19:55 ` [PATCH 21/21] mm/hugetlb: introduce hugetlb_dmb Doug Berger
     [not found] ` <20220913195508.3511038-1-opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-13 19:54   ` [PATCH 03/21] mm/hugetlb: correct demote page offset logic Doug Berger
     [not found]     ` <20220913195508.3511038-4-opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-13 23:34       ` Matthew Wilcox
2022-09-14  1:07         ` Doug Berger
     [not found]           ` <33ff9543-3396-7609-3865-7eed20b853f5-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-14 17:08             ` Mike Kravetz
2022-09-15  1:40       ` Muchun Song
2022-09-13 19:54   ` [PATCH 04/21] mm/hugetlb: refactor alloc_and_dissolve_huge_page Doug Berger
2022-09-13 19:54   ` [PATCH 05/21] mm/hugetlb: allow migrated hugepage to dissolve when freed Doug Berger
2022-09-13 19:54   ` [PATCH 06/21] mm/hugetlb: add hugepage isolation support Doug Berger
2022-09-13 19:54   ` [PATCH 07/21] lib/show_mem.c: display MovableOnly Doug Berger
2022-09-13 19:54   ` [PATCH 08/21] mm/vmstat: show start_pfn when zone spans pages Doug Berger
2022-09-13 19:54   ` [PATCH 09/21] mm/page_alloc: calculate node_spanned_pages from pfns Doug Berger
2022-09-13 19:54   ` [PATCH 10/21] mm/page_alloc.c: allow oversized movablecore Doug Berger
2022-09-13 19:54   ` [PATCH 12/21] memblock: introduce MEMBLOCK_MOVABLE flag Doug Berger
2022-09-13 19:55   ` [PATCH 14/21] mm/page_alloc: make alloc_contig_pages DMB aware Doug Berger
2022-09-13 19:55   ` [PATCH 15/21] mm/page_alloc: allow base for movablecore Doug Berger
2022-09-13 19:55   ` [PATCH 16/21] dt-bindings: reserved-memory: introduce designated-movable-block Doug Berger
     [not found]     ` <20220913195508.3511038-17-opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-14 14:55       ` Rob Herring
2022-09-14 17:13         ` Doug Berger
     [not found]           ` <57f19774-39a1-03a6-fe68-83d7e4b16521-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-18 10:31             ` Krzysztof Kozlowski
2022-09-18 23:12               ` Doug Berger
2022-09-19 11:03                 ` Krzysztof Kozlowski
2022-09-21  0:14                   ` Doug Berger
     [not found]                     ` <92a2cf9f-c371-fb7d-11ff-90cdc09dcae6-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-21  6:35                       ` Krzysztof Kozlowski
2022-09-18 10:28     ` Krzysztof Kozlowski
     [not found]       ` <be75776d-c55a-ddce-81f3-aeacc2f29592-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org>
2022-09-18 22:41         ` Doug Berger
2022-09-13 19:55   ` [PATCH 17/21] mm/dmb: introduce rmem designated-movable-block Doug Berger
2022-09-13 19:55   ` Doug Berger [this message]
2022-09-14 17:07     ` [PATCH 18/21] mm/cma: support CMA in Designated Movable Blocks kernel test robot
     [not found]     ` <20220913195508.3511038-19-opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-14 17:58       ` kernel test robot
2022-09-14 22:03       ` kernel test robot
2022-09-13 19:55   ` [PATCH 19/21] dt-bindings: reserved-memory: shared-dma-pool: support DMB Doug Berger
2022-09-13 19:55   ` [PATCH 20/21] mm/cma: introduce rmem shared-dmb-pool Doug Berger
2022-09-14 13:21   ` [PATCH 00/21] mm: introduce Designated Movable Blocks Rob Herring
     [not found]     ` <CAL_JsqLmJcLHViPaBPvkBhR4xi+ZQuAJQpXoiJLVRW9EH4EX0Q-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2022-09-14 16:57       ` Doug Berger
     [not found]         ` <1825234b-f17a-dea4-38f6-ba5881ab9a3d-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-14 18:07           ` Rob Herring
2022-09-19  9:00 ` David Hildenbrand
     [not found]   ` <b610a7b3-d740-8d45-c270-4c638deb1cfa-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2022-09-20  1:03     ` Doug Berger
     [not found]       ` <02561695-df44-4df6-c486-1431bf152650-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-23 11:19         ` Mike Rapoport
     [not found]           ` <Yy2WUypD5qVmqB0k-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2022-09-23 22:10             ` Doug Berger
2022-09-29  9:00         ` David Hildenbrand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220913195508.3511038-19-opendmb@gmail.com \
    --to=opendmb-re5jqeeqqe8avxtiumwx3w@public.gmane.org \
    --cc=akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org \
    --cc=bp-l3A5Bk7waGM@public.gmane.org \
    --cc=corbet-T1hC0tSOHrs@public.gmane.org \
    --cc=damien.lemoal-yzvPICuk2AC/Fx7ZUtofftBPR1lH4CV8@public.gmane.org \
    --cc=david-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org \
    --cc=f.fainelli-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org \
    --cc=frowand.list-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org \
    --cc=hch-jcswGhMUV9g@public.gmane.org \
    --cc=krzysztof.kozlowski+dt-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org \
    --cc=m.szyprowski-Sze3O3UU22JBDgjK7y7TUQ@public.gmane.org \
    --cc=mike.kravetz-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org \
    --cc=osalvador@su \
    --cc=paulmck-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org \
    --cc=quic_neeraju-jfJNa2p1gH1BDgjK7y7TUQ@public.gmane.org \
    --cc=rdunlap-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org \
    --cc=robh+dt-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org \
    --cc=robin.murphy-5wv7dgnIgG8@public.gmane.org \
    --cc=rppt-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org \
    --cc=songmuchun-EC8Uxl6Npydl57MIdRCFDg@public.gmane.org \
    --cc=ziy-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).