public inbox for linux-arch@vger.kernel.org
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: Christoph Lameter <clameter@sgi.com>
Cc: akpm@linux-foundation.org, linux-arch@vger.kernel.org,
	Ingo Molnar <mingo@elte.hu>, Thomas Gleixner <tglx@linutronix.de>,
	Steven Rostedt <rostedt@goodmis.org>
Subject: Re: [patch 00/41] cpu alloc / cpu ops v3: Optimize per cpu access
Date: Fri, 30 May 2008 21:35:04 +0200	[thread overview]
Message-ID: <1212176104.24826.53.camel@lappy.programming.kicks-ass.net> (raw)
In-Reply-To: <1212175315.24826.49.camel@lappy.programming.kicks-ass.net>

On Fri, 2008-05-30 at 21:21 +0200, Peter Zijlstra wrote:
> On Fri, 2008-05-30 at 12:10 -0700, Christoph Lameter wrote:

> > Ahh. Okay. This would make the lockless preemptless fastpath impossible 
> > because it would have to use some sort of locking to avoid access to the 
> > same percpu data from multiple processor?
> 
> TBH its been a while since I attempted slub-rt, but yes that got hairy.
> I think it can be done using cmpxchg and speculative page refs, but I
> can't quite recall.

This is the last version I could find on my disks (2007-11-17) - it does
indeed have a severely handicapped fast-path.

Never got around to testing it properly - so it might be utter bollocks.

---
Subject: rt: make SLUB usable

Spurred by John Corbet's harsh words that SLUB is not available for -rt
I made a quick fix for this.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
CC: Christoph Lameter <clameter@sgi.com>
---
 include/linux/slub_def.h |    3 +
 init/Kconfig             |    1 
 mm/slub.c                |  108 ++++++++++++++++++++++++++++++++++++-----------
 3 files changed, 88 insertions(+), 24 deletions(-)

Index: linux-2.6/init/Kconfig
===================================================================
--- linux-2.6.orig/init/Kconfig
+++ linux-2.6/init/Kconfig
@@ -635,7 +635,6 @@ config SLAB
 
 config SLUB
 	bool "SLUB (Unqueued Allocator)"
-	depends on !PREEMPT_RT
 	help
 	   SLUB is a slab allocator that minimizes cache line usage
 	   instead of managing queues of cached objects (SLAB approach).
Index: linux-2.6/include/linux/slub_def.h
===================================================================
--- linux-2.6.orig/include/linux/slub_def.h
+++ linux-2.6/include/linux/slub_def.h
@@ -17,6 +17,9 @@ struct kmem_cache_cpu {
 	int node;
 	unsigned int offset;
 	unsigned int objsize;
+#ifdef CONFIG_PREEMPT_RT
+	spinlock_t lock;
+#endif
 };
 
 struct kmem_cache_node {
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c
+++ linux-2.6/mm/slub.c
@@ -21,11 +21,13 @@
 #include <linux/ctype.h>
 #include <linux/kallsyms.h>
 #include <linux/memory.h>
+#include <linux/pagemap.h>
 
 /*
  * Lock order:
- *   1. slab_lock(page)
- *   2. slab->list_lock
+ *   1. IRQ disable / c->lock
+ *   2. slab_lock(page)
+ *   3. node->list_lock
  *
  *   The slab_lock protects operations on the object of a particular
  *   slab and its metadata in the page struct. If the slab lock
@@ -270,10 +272,25 @@ static inline struct kmem_cache_node *ge
 
 static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
 {
+	struct kmem_cache_cpu *c;
+
 #ifdef CONFIG_SMP
-	return s->cpu_slab[cpu];
+	c = s->cpu_slab[cpu];
 #else
-	return &s->cpu_slab;
+	c = &s->cpu_slab;
+#endif
+#ifdef CONFIG_PREEMPT_RT
+	if (c)
+		spin_lock(&c->lock);
+#endif
+	return c;
+}
+
+static inline void put_cpu_slab(struct kmem_cache_cpu *c)
+{
+#ifdef CONFIG_PREEMPT_RT
+	if (likely(c))
+		spin_unlock(&c->lock);
 #endif
 }
 
@@ -399,7 +416,7 @@ static void set_track(struct kmem_cache 
 	p += alloc;
 	if (addr) {
 		p->addr = addr;
-		p->cpu = smp_processor_id();
+		p->cpu = raw_smp_processor_id();
 		p->pid = current ? current->pid : -1;
 		p->when = jiffies;
 	} else
@@ -1176,6 +1193,7 @@ static void discard_slab(struct kmem_cac
 /*
  * Per slab locking using the pagelock
  */
+#ifndef CONFIG_PREEMPT_RT
 static __always_inline void slab_lock(struct page *page)
 {
 	bit_spin_lock(PG_locked, &page->flags);
@@ -1193,6 +1211,22 @@ static __always_inline int slab_trylock(
 	rc = bit_spin_trylock(PG_locked, &page->flags);
 	return rc;
 }
+#else
+static __always_inline void slab_lock(struct page *page)
+{
+	lock_page_nosync(page);
+}
+
+static __always_inline void slab_unlock(struct page *page)
+{
+	unlock_page(page);
+}
+
+static __always_inline int slab_trylock(struct page *page)
+{
+	return !TestSetPageLocked(page);
+}
+#endif
 
 /*
  * Management of partially allocated slabs
@@ -1412,25 +1446,31 @@ static inline void __flush_cpu_slab(stru
 
 	if (likely(c && c->page))
 		flush_slab(s, c);
+
+	put_cpu_slab(c);
 }
 
 static void flush_cpu_slab(void *d)
 {
 	struct kmem_cache *s = d;
 
-	__flush_cpu_slab(s, smp_processor_id());
+	__flush_cpu_slab(s, raw_smp_processor_id());
 }
 
 static void flush_all(struct kmem_cache *s)
 {
 #ifdef CONFIG_SMP
+#ifndef CONFIG_PREEMPT_RT
 	on_each_cpu(flush_cpu_slab, s, 1, 1);
 #else
+	schedule_on_each_cpu(flush_cpu_slab, s, 1, 1);
+#endif
+#else
 	unsigned long flags;
 
-	local_irq_save(flags);
+	local_irq_save_nort(flags);
 	flush_cpu_slab(s);
-	local_irq_restore(flags);
+	local_irq_restore_nort(flags);
 #endif
 }
 
@@ -1489,6 +1529,7 @@ load_freelist:
 	c->page->freelist = NULL;
 	c->node = page_to_nid(c->page);
 	slab_unlock(c->page);
+	put_cpu_slab(c);
 	return object;
 
 another_slab:
@@ -1502,15 +1543,16 @@ new_slab:
 	}
 
 	if (gfpflags & __GFP_WAIT)
-		local_irq_enable();
+		local_irq_enable_nort();
 
 	new = new_slab(s, gfpflags, node);
 
 	if (gfpflags & __GFP_WAIT)
-		local_irq_disable();
+		local_irq_disable_nort();
 
 	if (new) {
-		c = get_cpu_slab(s, smp_processor_id());
+		put_cpu_slab(c);
+		c = get_cpu_slab(s, raw_smp_processor_id());
 		if (c->page)
 			flush_slab(s, c);
 		slab_lock(new);
@@ -1518,6 +1560,7 @@ new_slab:
 		c->page = new;
 		goto load_freelist;
 	}
+	put_cpu_slab(c);
 	return NULL;
 debug:
 	object = c->page->freelist;
@@ -1528,6 +1571,7 @@ debug:
 	c->page->freelist = object[c->offset];
 	c->node = -1;
 	slab_unlock(c->page);
+	put_cpu_slab(c);
 	return object;
 }
 
@@ -1548,8 +1592,8 @@ static void __always_inline *slab_alloc(
 	unsigned long flags;
 	struct kmem_cache_cpu *c;
 
-	local_irq_save(flags);
-	c = get_cpu_slab(s, smp_processor_id());
+	local_irq_save_nort(flags);
+	c = get_cpu_slab(s, raw_smp_processor_id());
 	if (unlikely(!c->freelist || !node_match(c, node)))
 
 		object = __slab_alloc(s, gfpflags, node, addr, c);
@@ -1557,8 +1601,9 @@ static void __always_inline *slab_alloc(
 	else {
 		object = c->freelist;
 		c->freelist = object[c->offset];
+		put_cpu_slab(c);
 	}
-	local_irq_restore(flags);
+	local_irq_restore_nort(flags);
 
 	if (unlikely((gfpflags & __GFP_ZERO) && object))
 		memset(object, 0, c->objsize);
@@ -1656,16 +1701,16 @@ static void __always_inline slab_free(st
 	unsigned long flags;
 	struct kmem_cache_cpu *c;
 
-	local_irq_save(flags);
+	local_irq_save_nort(flags);
 	debug_check_no_locks_freed(object, s->objsize);
-	c = get_cpu_slab(s, smp_processor_id());
+	c = get_cpu_slab(s, raw_smp_processor_id());
 	if (likely(page == c->page && c->node >= 0)) {
 		object[c->offset] = c->freelist;
 		c->freelist = object;
 	} else
 		__slab_free(s, page, x, addr, c->offset);
-
-	local_irq_restore(flags);
+	put_cpu_slab(c);
+	local_irq_restore_nort(flags);
 }
 
 void kmem_cache_free(struct kmem_cache *s, void *x)
@@ -1846,6 +1891,9 @@ static void init_kmem_cache_cpu(struct k
 	c->node = 0;
 	c->offset = s->offset / sizeof(void *);
 	c->objsize = s->objsize;
+#ifdef CONFIG_PREEMPT_RT
+	spin_lock_init(&c->lock);
+#endif
 }
 
 static void init_kmem_cache_node(struct kmem_cache_node *n)
@@ -1925,6 +1973,7 @@ static void free_kmem_cache_cpus(struct 
 		if (c) {
 			s->cpu_slab[cpu] = NULL;
 			free_kmem_cache_cpu(c, cpu);
+			put_cpu_slab(c);
 		}
 	}
 }
@@ -1936,8 +1985,10 @@ static int alloc_kmem_cache_cpus(struct 
 	for_each_online_cpu(cpu) {
 		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
 
-		if (c)
+		if (c) {
+			put_cpu_slab(c);
 			continue;
+		}
 
 		c = alloc_kmem_cache_cpu(s, cpu, flags);
 		if (!c) {
@@ -2962,8 +3013,12 @@ struct kmem_cache *kmem_cache_create(con
 		 * And then we need to update the object size in the
 		 * per cpu structures
 		 */
-		for_each_online_cpu(cpu)
-			get_cpu_slab(s, cpu)->objsize = s->objsize;
+		for_each_online_cpu(cpu) {
+			struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+
+			c->objsize = s->objsize;
+			put_cpu_slab(c);
+		}
 		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
 		up_write(&slub_lock);
 		if (sysfs_slab_alias(s, name))
@@ -3024,11 +3079,13 @@ static int __cpuinit slab_cpuup_callback
 		list_for_each_entry(s, &slab_caches, list) {
 			struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
 
-			local_irq_save(flags);
+			local_irq_save_nort(flags);
 			__flush_cpu_slab(s, cpu);
-			local_irq_restore(flags);
+			local_irq_restore_nort(flags);
 			free_kmem_cache_cpu(c, cpu);
 			s->cpu_slab[cpu] = NULL;
+
+			put_cpu_slab(c);
 		}
 		up_read(&slub_lock);
 		break;
@@ -3519,6 +3576,7 @@ static unsigned long slab_objects(struct
 			}
 			per_cpu[node]++;
 		}
+		put_cpu_slab(c);
 	}
 
 	for_each_node_state(node, N_NORMAL_MEMORY) {
@@ -3564,9 +3622,13 @@ static int any_slab_objects(struct kmem_
 	int cpu;
 
 	for_each_possible_cpu(cpu) {
+		int ret = 0;
 		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
 
 		if (c && c->page)
+			ret = 1;
+		put_cpu_slab(c);
+		if (ret)
 			return 1;
 	}
 

  reply	other threads:[~2008-05-30 19:35 UTC|newest]

Thread overview: 163+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2008-05-30  3:56 [patch 00/41] cpu alloc / cpu ops v3: Optimize per cpu access Christoph Lameter
2008-05-30  3:56 ` [patch 01/41] cpu_alloc: Increase percpu area size to 128k Christoph Lameter
2008-06-02 17:58   ` Luck, Tony
2008-06-02 23:48     ` Rusty Russell
2008-06-10 17:22     ` Christoph Lameter
2008-06-10 17:22       ` Christoph Lameter
2008-06-10 19:54       ` Luck, Tony
2008-05-30  3:56 ` [patch 02/41] cpu alloc: The allocator Christoph Lameter
2008-05-30  4:58   ` Andrew Morton
2008-05-30  5:10     ` Christoph Lameter
2008-05-30  5:31       ` Andrew Morton
2008-06-02  9:29         ` Paul Jackson
2008-05-30  5:56       ` KAMEZAWA Hiroyuki
2008-05-30  6:16         ` Christoph Lameter
2008-06-04 14:48     ` Mike Travis
2008-05-30  5:04   ` Eric Dumazet
2008-05-30  5:20     ` Christoph Lameter
2008-05-30  5:52       ` Rusty Russell
2008-06-04 15:30         ` Mike Travis
2008-06-05 23:48           ` Rusty Russell
2008-05-30  5:54       ` Eric Dumazet
2008-06-04 14:58       ` Mike Travis
2008-06-04 15:11         ` Eric Dumazet
2008-06-06  0:32           ` Rusty Russell
2008-06-06  0:32             ` Rusty Russell
2008-06-10 17:33         ` Christoph Lameter
2008-06-10 18:05           ` Eric Dumazet
2008-06-10 18:28             ` Christoph Lameter
2008-05-30  5:46   ` Rusty Russell
2008-06-04 15:04     ` Mike Travis
2008-06-10 17:34       ` Christoph Lameter
2008-05-31 20:58   ` Pavel Machek
2008-05-30  3:56 ` [patch 03/41] cpu alloc: Use cpu allocator instead of the builtin modules per cpu allocator Christoph Lameter
2008-05-30  4:58   ` Andrew Morton
2008-05-30  5:14     ` Christoph Lameter
2008-05-30  5:34       ` Andrew Morton
2008-05-30  6:08   ` Rusty Russell
2008-05-30  6:21     ` Christoph Lameter
2008-05-30  3:56 ` [patch 04/41] cpu ops: Core piece for generic atomic per cpu operations Christoph Lameter
2008-05-30  4:58   ` Andrew Morton
2008-05-30  5:17     ` Christoph Lameter
2008-05-30  5:38       ` Andrew Morton
2008-05-30  6:12         ` Christoph Lameter
2008-05-30  7:08           ` Rusty Russell
2008-05-30 18:00             ` Christoph Lameter
2008-06-02  2:00               ` Rusty Russell
2008-06-04 18:18                 ` Mike Travis
2008-06-05 23:59                   ` Rusty Russell
2008-06-09 19:00                     ` Christoph Lameter
2008-06-09 23:27                       ` Rusty Russell
2008-06-09 23:54                         ` Christoph Lameter
2008-06-10  2:56                           ` Rusty Russell
2008-06-10  3:18                             ` Christoph Lameter
2008-06-11  0:03                               ` Rusty Russell
2008-06-11  0:15                                 ` Christoph Lameter
2008-06-09 23:09                   ` Christoph Lameter
2008-06-10 17:42                 ` Christoph Lameter
2008-06-11 11:10                   ` Rusty Russell
2008-06-11 23:39                     ` Christoph Lameter
2008-06-12  0:58                       ` Nick Piggin
2008-06-12  2:44                         ` Rusty Russell
2008-06-12  3:40                           ` Nick Piggin
2008-06-12  9:37                             ` Martin Peschke
2008-06-12 11:21                               ` Nick Piggin
2008-06-12 17:19                                 ` Christoph Lameter
2008-06-13  0:38                                   ` Rusty Russell
2008-06-13  2:27                                     ` Christoph Lameter
2008-06-15 10:33                                       ` Rusty Russell
2008-06-15 10:33                                         ` Rusty Russell
2008-06-16 14:52                                         ` Christoph Lameter
2008-06-17  0:24                                           ` Rusty Russell
2008-06-17  2:29                                             ` Christoph Lameter
2008-06-17 14:21                                             ` Mike Travis
2008-05-30  7:05         ` Rusty Russell
2008-05-30  6:32       ` Rusty Russell
2008-05-30  3:56 ` [patch 05/41] cpu alloc: Percpu_counter conversion Christoph Lameter
2008-05-30  6:47   ` Rusty Russell
2008-05-30 17:54     ` Christoph Lameter
2008-05-30  3:56 ` [patch 06/41] cpu alloc: crash_notes conversion Christoph Lameter
2008-05-30  3:56 ` [patch 07/41] cpu alloc: Workqueue conversion Christoph Lameter
2008-05-30  3:56 ` [patch 08/41] cpu alloc: ACPI cstate handling conversion Christoph Lameter
2008-05-30  3:56 ` [patch 09/41] cpu alloc: Genhd statistics conversion Christoph Lameter
2008-05-30  3:56 ` [patch 10/41] cpu alloc: blktrace conversion Christoph Lameter
2008-05-30  3:56 ` [patch 11/41] cpu alloc: SRCU cpu alloc conversion Christoph Lameter
2008-05-30  3:56 ` [patch 12/41] cpu alloc: XFS counter conversion Christoph Lameter
2008-05-30  3:56 ` [patch 13/41] cpu alloc: NFS statistics Christoph Lameter
2008-05-30  3:56 ` [patch 14/41] cpu alloc: Neigbour statistics Christoph Lameter
2008-05-30  3:56 ` [patch 15/41] cpu_alloc: Convert ip route statistics Christoph Lameter
2008-05-30  3:56 ` [patch 16/41] cpu alloc: Tcp statistics conversion Christoph Lameter
2008-05-30  3:56 ` [patch 17/41] cpu alloc: Convert scratches to cpu alloc Christoph Lameter
2008-05-30  3:56 ` [patch 18/41] cpu alloc: Dmaengine conversion Christoph Lameter
2008-05-30  3:56 ` [patch 19/41] cpu alloc: Convert loopback statistics Christoph Lameter
2008-05-30  3:56 ` [patch 20/41] cpu alloc: Veth conversion Christoph Lameter
2008-05-30  3:56 ` [patch 21/41] cpu alloc: Chelsio statistics conversion Christoph Lameter
2008-05-30  3:56 ` [patch 22/41] cpu alloc: Convert network sockets inuse counter Christoph Lameter
2008-05-30  3:56 ` [patch 23/41] cpu alloc: Use it for infiniband Christoph Lameter
2008-05-30  3:56 ` [patch 24/41] cpu alloc: Use in the crypto subsystem Christoph Lameter
2008-05-30  3:56 ` [patch 25/41] cpu alloc: scheduler: Convert cpuusage to cpu_alloc Christoph Lameter
2008-05-30  3:56 ` [patch 26/41] cpu alloc: Convert mib handling to cpu alloc Christoph Lameter
2008-05-30  6:47   ` Eric Dumazet
2008-05-30 18:01     ` Christoph Lameter
2008-05-30  3:56 ` [patch 27/41] cpu alloc: Remove the allocpercpu functionality Christoph Lameter
2008-05-30  4:58   ` Andrew Morton
2008-05-30  3:56 ` [patch 28/41] Module handling: Use CPU_xx ops to dynamically allocate counters Christoph Lameter
2008-05-30  3:56 ` [patch 29/41] x86_64: Use CPU ops for nmi alert counter Christoph Lameter
2008-05-30  3:56 ` [patch 30/41] Remove local_t support Christoph Lameter
2008-05-30  3:56 ` [patch 31/41] VM statistics: Use CPU ops Christoph Lameter
2008-05-30  3:56 ` [patch 32/41] cpu alloc: Use in slub Christoph Lameter
2008-05-30  3:56 ` [patch 33/41] cpu alloc: Remove slub fields Christoph Lameter
2008-05-30  3:56 ` [patch 34/41] cpu alloc: Page allocator conversion Christoph Lameter
2008-05-30  3:56 ` [patch 35/41] Support for CPU ops Christoph Lameter
2008-05-30  4:58   ` Andrew Morton
2008-05-30  5:18     ` Christoph Lameter
2008-05-30  3:56 ` [patch 36/41] Zero based percpu: Infrastructure to rebase the per cpu area to zero Christoph Lameter
2008-05-30  3:56 ` [patch 37/41] x86_64: Fold pda into per cpu area Christoph Lameter
2008-05-30  3:56 ` [patch 38/41] x86: Extend percpu ops to 64 bit Christoph Lameter
2008-05-30  3:56 ` [patch 39/41] x86: Replace cpu_pda() using percpu logic and get rid of _cpu_pda() Christoph Lameter
2008-05-30  3:57 ` [patch 40/41] x86: Replace xxx_pda() operations with x86_xx_percpu() Christoph Lameter
2008-05-30  3:57 ` [patch 41/41] x86_64: Support for cpu ops Christoph Lameter
2008-05-30  4:58 ` [patch 00/41] cpu alloc / cpu ops v3: Optimize per cpu access Andrew Morton
2008-05-30  5:03   ` Christoph Lameter
2008-05-30  5:21     ` Andrew Morton
2008-05-30  5:27       ` Christoph Lameter
2008-05-30  5:49         ` Andrew Morton
2008-05-30  6:16           ` Christoph Lameter
2008-05-30  6:51             ` KAMEZAWA Hiroyuki
2008-05-30 14:38         ` Mike Travis
2008-05-30 17:50           ` Christoph Lameter
2008-05-30 18:00             ` Matthew Wilcox
2008-05-30 18:12               ` Christoph Lameter
2008-05-30  6:01       ` Eric Dumazet
2008-05-30  6:16         ` Andrew Morton
2008-05-30  6:22           ` Christoph Lameter
2008-05-30  6:37             ` Andrew Morton
2008-05-30 11:32               ` Matthew Wilcox
2008-06-04 15:07   ` Mike Travis
2008-06-06  5:33     ` Eric Dumazet
2008-06-06 13:08       ` Mike Travis
2008-06-08  6:00       ` Rusty Russell
2008-06-09 18:44       ` Christoph Lameter
2008-06-09 19:11         ` Andi Kleen
2008-06-09 20:15           ` Eric Dumazet
2008-05-30  9:12 ` Peter Zijlstra
2008-05-30  9:18   ` Ingo Molnar
2008-05-30 18:11     ` Christoph Lameter
2008-05-30 18:40       ` Peter Zijlstra
2008-05-30 18:56         ` Christoph Lameter
2008-05-30 19:13           ` Peter Zijlstra
2008-06-01  3:25             ` Christoph Lameter
2008-06-01  8:19               ` Peter Zijlstra
2008-05-30 18:06   ` Christoph Lameter
2008-05-30 18:19     ` Peter Zijlstra
2008-05-30 18:26       ` Christoph Lameter
2008-05-30 18:47         ` Peter Zijlstra
2008-05-30 19:10           ` Christoph Lameter
2008-05-30 19:21             ` Peter Zijlstra
2008-05-30 19:35               ` Peter Zijlstra [this message]
2008-06-01  3:27               ` Christoph Lameter
2008-05-30 18:08   ` Christoph Lameter
2008-05-30 18:39     ` Peter Zijlstra
2008-05-30 18:51       ` Christoph Lameter
2008-05-30 19:00         ` Peter Zijlstra
2008-05-30 19:11           ` Christoph Lameter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1212176104.24826.53.camel@lappy.programming.kicks-ass.net \
    --to=peterz@infradead.org \
    --cc=akpm@linux-foundation.org \
    --cc=clameter@sgi.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox