linux-pm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3] powercap/rapl: reduce ipi calls
@ 2016-01-15 19:04 Jacob Pan
  2016-02-16  0:54 ` Rafael J. Wysocki
  0 siblings, 1 reply; 5+ messages in thread
From: Jacob Pan @ 2016-01-15 19:04 UTC (permalink / raw)
  To: LKML, Linux PM, Rafael Wysocki
  Cc: Peter Zijlstra, Srinivas Pandruvada, Jacob Pan

Reduce remote CPU calls for MSR access by combining read
modify write into one function. Also optimize search for active CPU on
package such that remote CPU is not used if the current CPU is already
on the target package.

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com>
---
 drivers/powercap/intel_rapl.c | 122 ++++++++++++++++++++++++++++++------------
 1 file changed, 87 insertions(+), 35 deletions(-)

diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 48747c2..2dcd95f 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -133,6 +133,12 @@ struct rapl_domain_data {
 	unsigned long timestamp;
 };
 
+struct msrl_action {
+	u32 msr_no;
+	u64 clear_mask;
+	u64 set_mask;
+	int err;
+};
 
 #define	DOMAIN_STATE_INACTIVE           BIT(0)
 #define	DOMAIN_STATE_POWER_LIMIT_SET    BIT(1)
@@ -263,7 +269,11 @@ static struct rapl_package *find_package_by_id(int id)
 /* caller to ensure CPU hotplug lock is held */
 static int find_active_cpu_on_package(int package_id)
 {
-	int i;
+	/* try to avoid remote cpu call, use raw since we are preemptible */
+	int i = raw_smp_processor_id();
+
+	if (topology_physical_package_id(i) == package_id)
+		return i;
 
 	for_each_online_cpu(i) {
 		if (topology_physical_package_id(i) == package_id)
@@ -800,35 +810,62 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
 	return 0;
 }
 
+
+static int msrl_update_safe(u32 msr_no, u64 clear_mask, u64 set_mask)
+{
+	int err;
+	u64 val;
+
+	err = rdmsrl_safe(msr_no, &val);
+	if (err)
+		goto out;
+
+	val &= ~clear_mask;
+	val |= set_mask;
+
+	err = wrmsrl_safe(msr_no, val);
+
+out:
+	return err;
+}
+
+static void msrl_update_func(void *info)
+{
+	struct msrl_action *ma = info;
+
+	ma->err = msrl_update_safe(ma->msr_no, ma->clear_mask, ma->set_mask);
+}
+
 /* Similar use of primitive info in the read counterpart */
 static int rapl_write_data_raw(struct rapl_domain *rd,
 			enum rapl_primitives prim,
 			unsigned long long value)
 {
-	u64 msr_val;
-	u32 msr;
 	struct rapl_primitive_info *rp = &rpi[prim];
 	int cpu;
+	u64 bits;
+	struct msrl_action ma;
+	int ret;
 
 	cpu = find_active_cpu_on_package(rd->package_id);
 	if (cpu < 0)
 		return cpu;
-	msr = rd->msrs[rp->id];
-	if (rdmsrl_safe_on_cpu(cpu, msr, &msr_val)) {
-		dev_dbg(&rd->power_zone.dev,
-			"failed to read msr 0x%x on cpu %d\n", msr, cpu);
-		return -EIO;
-	}
-	value = rapl_unit_xlate(rd, rd->package_id, rp->unit, value, 1);
-	msr_val &= ~rp->mask;
-	msr_val |= value << rp->shift;
-	if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) {
-		dev_dbg(&rd->power_zone.dev,
-			"failed to write msr 0x%x on cpu %d\n", msr, cpu);
-		return -EIO;
-	}
 
-	return 0;
+	bits = rapl_unit_xlate(rd, rd->package_id, rp->unit, value, 1);
+	bits |= bits << rp->shift;
+	memset(&ma, 0, sizeof(ma));
+
+	ma.msr_no = rd->msrs[rp->id];
+	ma.clear_mask = rp->mask;
+	ma.set_mask = bits;
+
+	ret = smp_call_function_single(cpu, msrl_update_func, &ma, 1);
+	if (ret)
+		WARN_ON_ONCE(ret);
+	else
+		ret = ma.err;
+
+	return ret;
 }
 
 /*
@@ -893,6 +930,21 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
 	return 0;
 }
 
+static void power_limit_irq_save_cpu(void *info)
+{
+	u32 l, h = 0;
+	struct rapl_package *rp = (struct rapl_package *)info;
+
+	/* save the state of PLN irq mask bit before disabling it */
+	rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
+	if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) {
+		rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE;
+		rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED;
+	}
+	l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
+	wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+}
+
 
 /* REVISIT:
  * When package power limit is set artificially low by RAPL, LVT
@@ -906,7 +958,6 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
 
 static void package_power_limit_irq_save(int package_id)
 {
-	u32 l, h = 0;
 	int cpu;
 	struct rapl_package *rp;
 
@@ -920,20 +971,27 @@ static void package_power_limit_irq_save(int package_id)
 	cpu = find_active_cpu_on_package(package_id);
 	if (cpu < 0)
 		return;
-	/* save the state of PLN irq mask bit before disabling it */
-	rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
-	if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) {
-		rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE;
-		rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED;
-	}
-	l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
-	wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+	smp_call_function_single(cpu, power_limit_irq_save_cpu, rp, 1);
+}
+
+static void power_limit_irq_restore_cpu(void *info)
+{
+	u32 l, h = 0;
+	struct rapl_package *rp = (struct rapl_package *)info;
+
+	rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
+
+	if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE)
+		l |= PACKAGE_THERM_INT_PLN_ENABLE;
+	else
+		l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
+
+	wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
 }
 
 /* restore per package power limit interrupt enable state */
 static void package_power_limit_irq_restore(int package_id)
 {
-	u32 l, h;
 	int cpu;
 	struct rapl_package *rp;
 
@@ -951,14 +1009,8 @@ static void package_power_limit_irq_restore(int package_id)
 	/* irq enable state not saved, nothing to restore */
 	if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
 		return;
-	rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
-
-	if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE)
-		l |= PACKAGE_THERM_INT_PLN_ENABLE;
-	else
-		l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
 
-	wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+	smp_call_function_single(cpu, power_limit_irq_restore_cpu, rp, 1);
 }
 
 static void set_floor_freq_default(struct rapl_domain *rd, bool mode)
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v3] powercap/rapl: reduce ipi calls
  2016-01-15 19:04 [PATCH v3] powercap/rapl: reduce ipi calls Jacob Pan
@ 2016-02-16  0:54 ` Rafael J. Wysocki
  2016-02-17 16:04   ` Thomas Gleixner
  0 siblings, 1 reply; 5+ messages in thread
From: Rafael J. Wysocki @ 2016-02-16  0:54 UTC (permalink / raw)
  To: Jacob Pan, Peter Zijlstra
  Cc: LKML, Linux PM, Rafael Wysocki, Srinivas Pandruvada,
	Thomas Gleixner

On Friday, January 15, 2016 11:04:13 AM Jacob Pan wrote:
> Reduce remote CPU calls for MSR access by combining read
> modify write into one function. Also optimize search for active CPU on
> package such that remote CPU is not used if the current CPU is already
> on the target package.
> 
> Suggested-by: Peter Zijlstra <peterz@infradead.org>
> Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com>

I'm wondering if there are any comments on this one?

> ---
>  drivers/powercap/intel_rapl.c | 122 ++++++++++++++++++++++++++++++------------
>  1 file changed, 87 insertions(+), 35 deletions(-)
> 
> diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
> index 48747c2..2dcd95f 100644
> --- a/drivers/powercap/intel_rapl.c
> +++ b/drivers/powercap/intel_rapl.c
> @@ -133,6 +133,12 @@ struct rapl_domain_data {
>  	unsigned long timestamp;
>  };
>  
> +struct msrl_action {
> +	u32 msr_no;
> +	u64 clear_mask;
> +	u64 set_mask;
> +	int err;
> +};
>  
>  #define	DOMAIN_STATE_INACTIVE           BIT(0)
>  #define	DOMAIN_STATE_POWER_LIMIT_SET    BIT(1)
> @@ -263,7 +269,11 @@ static struct rapl_package *find_package_by_id(int id)
>  /* caller to ensure CPU hotplug lock is held */
>  static int find_active_cpu_on_package(int package_id)
>  {
> -	int i;
> +	/* try to avoid remote cpu call, use raw since we are preemptible */
> +	int i = raw_smp_processor_id();
> +
> +	if (topology_physical_package_id(i) == package_id)
> +		return i;
>  
>  	for_each_online_cpu(i) {
>  		if (topology_physical_package_id(i) == package_id)
> @@ -800,35 +810,62 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
>  	return 0;
>  }
>  
> +
> +static int msrl_update_safe(u32 msr_no, u64 clear_mask, u64 set_mask)
> +{
> +	int err;
> +	u64 val;
> +
> +	err = rdmsrl_safe(msr_no, &val);
> +	if (err)
> +		goto out;
> +
> +	val &= ~clear_mask;
> +	val |= set_mask;
> +
> +	err = wrmsrl_safe(msr_no, val);
> +
> +out:
> +	return err;
> +}
> +
> +static void msrl_update_func(void *info)
> +{
> +	struct msrl_action *ma = info;
> +
> +	ma->err = msrl_update_safe(ma->msr_no, ma->clear_mask, ma->set_mask);
> +}
> +
>  /* Similar use of primitive info in the read counterpart */
>  static int rapl_write_data_raw(struct rapl_domain *rd,
>  			enum rapl_primitives prim,
>  			unsigned long long value)
>  {
> -	u64 msr_val;
> -	u32 msr;
>  	struct rapl_primitive_info *rp = &rpi[prim];
>  	int cpu;
> +	u64 bits;
> +	struct msrl_action ma;
> +	int ret;
>  
>  	cpu = find_active_cpu_on_package(rd->package_id);
>  	if (cpu < 0)
>  		return cpu;
> -	msr = rd->msrs[rp->id];
> -	if (rdmsrl_safe_on_cpu(cpu, msr, &msr_val)) {
> -		dev_dbg(&rd->power_zone.dev,
> -			"failed to read msr 0x%x on cpu %d\n", msr, cpu);
> -		return -EIO;
> -	}
> -	value = rapl_unit_xlate(rd, rd->package_id, rp->unit, value, 1);
> -	msr_val &= ~rp->mask;
> -	msr_val |= value << rp->shift;
> -	if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) {
> -		dev_dbg(&rd->power_zone.dev,
> -			"failed to write msr 0x%x on cpu %d\n", msr, cpu);
> -		return -EIO;
> -	}
>  
> -	return 0;
> +	bits = rapl_unit_xlate(rd, rd->package_id, rp->unit, value, 1);
> +	bits |= bits << rp->shift;
> +	memset(&ma, 0, sizeof(ma));
> +
> +	ma.msr_no = rd->msrs[rp->id];
> +	ma.clear_mask = rp->mask;
> +	ma.set_mask = bits;
> +
> +	ret = smp_call_function_single(cpu, msrl_update_func, &ma, 1);
> +	if (ret)
> +		WARN_ON_ONCE(ret);
> +	else
> +		ret = ma.err;
> +
> +	return ret;
>  }
>  
>  /*
> @@ -893,6 +930,21 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
>  	return 0;
>  }
>  
> +static void power_limit_irq_save_cpu(void *info)
> +{
> +	u32 l, h = 0;
> +	struct rapl_package *rp = (struct rapl_package *)info;
> +
> +	/* save the state of PLN irq mask bit before disabling it */
> +	rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
> +	if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) {
> +		rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE;
> +		rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED;
> +	}
> +	l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
> +	wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
> +}
> +
>  
>  /* REVISIT:
>   * When package power limit is set artificially low by RAPL, LVT
> @@ -906,7 +958,6 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
>  
>  static void package_power_limit_irq_save(int package_id)
>  {
> -	u32 l, h = 0;
>  	int cpu;
>  	struct rapl_package *rp;
>  
> @@ -920,20 +971,27 @@ static void package_power_limit_irq_save(int package_id)
>  	cpu = find_active_cpu_on_package(package_id);
>  	if (cpu < 0)
>  		return;
> -	/* save the state of PLN irq mask bit before disabling it */
> -	rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
> -	if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) {
> -		rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE;
> -		rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED;
> -	}
> -	l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
> -	wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
> +	smp_call_function_single(cpu, power_limit_irq_save_cpu, rp, 1);
> +}
> +
> +static void power_limit_irq_restore_cpu(void *info)
> +{
> +	u32 l, h = 0;
> +	struct rapl_package *rp = (struct rapl_package *)info;
> +
> +	rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
> +
> +	if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE)
> +		l |= PACKAGE_THERM_INT_PLN_ENABLE;
> +	else
> +		l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
> +
> +	wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
>  }
>  
>  /* restore per package power limit interrupt enable state */
>  static void package_power_limit_irq_restore(int package_id)
>  {
> -	u32 l, h;
>  	int cpu;
>  	struct rapl_package *rp;
>  
> @@ -951,14 +1009,8 @@ static void package_power_limit_irq_restore(int package_id)
>  	/* irq enable state not saved, nothing to restore */
>  	if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
>  		return;
> -	rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
> -
> -	if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE)
> -		l |= PACKAGE_THERM_INT_PLN_ENABLE;
> -	else
> -		l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
>  
> -	wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
> +	smp_call_function_single(cpu, power_limit_irq_restore_cpu, rp, 1);
>  }
>  
>  static void set_floor_freq_default(struct rapl_domain *rd, bool mode)
> 


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v3] powercap/rapl: reduce ipi calls
  2016-02-16  0:54 ` Rafael J. Wysocki
@ 2016-02-17 16:04   ` Thomas Gleixner
  2016-02-17 18:04     ` Jacob Pan
  0 siblings, 1 reply; 5+ messages in thread
From: Thomas Gleixner @ 2016-02-17 16:04 UTC (permalink / raw)
  To: Rafael J. Wysocki
  Cc: Jacob Pan, Peter Zijlstra, LKML, Linux PM, Rafael Wysocki,
	Srinivas Pandruvada

On Tue, 16 Feb 2016, Rafael J. Wysocki wrote:
> On Friday, January 15, 2016 11:04:13 AM Jacob Pan wrote:
> > Reduce remote CPU calls for MSR access by combining read
> > modify write into one function. Also optimize search for active CPU on
> > package such that remote CPU is not used if the current CPU is already
> > on the target package.
> > 
> > Suggested-by: Peter Zijlstra <peterz@infradead.org>
> > Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com>
> 
> I'm wondering if there are any comments on this one?

Yes.
 
> > ---
> >  drivers/powercap/intel_rapl.c | 122 ++++++++++++++++++++++++++++++------------
> >  1 file changed, 87 insertions(+), 35 deletions(-)
> > 
> > diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
> > index 48747c2..2dcd95f 100644
> > --- a/drivers/powercap/intel_rapl.c
> > +++ b/drivers/powercap/intel_rapl.c
> > @@ -133,6 +133,12 @@ struct rapl_domain_data {
> >  	unsigned long timestamp;
> >  };
> >  
> > +struct msrl_action {
> > +	u32 msr_no;
> > +	u64 clear_mask;
> > +	u64 set_mask;
> > +	int err;
> > +};
> >  
> >  #define	DOMAIN_STATE_INACTIVE           BIT(0)
> >  #define	DOMAIN_STATE_POWER_LIMIT_SET    BIT(1)
> > @@ -263,7 +269,11 @@ static struct rapl_package *find_package_by_id(int id)
> >  /* caller to ensure CPU hotplug lock is held */
> >  static int find_active_cpu_on_package(int package_id)
> >  {
> > -	int i;
> > +	/* try to avoid remote cpu call, use raw since we are preemptible */
> > +	int i = raw_smp_processor_id();
> > +
> > +	if (topology_physical_package_id(i) == package_id)
> > +		return i;
> >  
> >  	for_each_online_cpu(i) {
> >  		if (topology_physical_package_id(i) == package_id)

This is still crap. We really want a proper handling for that which does
not require these silly for_each_cpu() loops.

That code already has a hotplug notifier, so you can track one online cpu in
the package like we do in cqm and other facilities.

The first online cpu of a package sets itself as 'owner'. On unplug it looks
up:

	rpl->cpu = cpumask_any_but(topology_core_cpumask(cpu), cpu);

So if this is the last cpu then rpl->cpu becomes nr_cpu_ids.

So that whole nonsense can be avoided completely.

Thanks,

	tglx







^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v3] powercap/rapl: reduce ipi calls
  2016-02-17 16:04   ` Thomas Gleixner
@ 2016-02-17 18:04     ` Jacob Pan
  2016-02-17 18:22       ` Thomas Gleixner
  0 siblings, 1 reply; 5+ messages in thread
From: Jacob Pan @ 2016-02-17 18:04 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: Rafael J. Wysocki, Peter Zijlstra, LKML, Linux PM, Rafael Wysocki,
	Srinivas Pandruvada, jacob.jun.pan

On Wed, 17 Feb 2016 17:04:41 +0100 (CET)
Thomas Gleixner <tglx@linutronix.de> wrote:

> > >  
> > >  	for_each_online_cpu(i) {
> > >  		if (topology_physical_package_id(i) ==
> > > package_id)  
> 
> This is still crap. We really want a proper handling for that which
> does not require these silly for_each_cpu() loops.
> 
> That code already has a hotplug notifier, so you can track one online
> cpu in the package like we do in cqm and other facilities.
> 
> The first online cpu of a package sets itself as 'owner'. On unplug
> it looks up:
> 
> 	rpl->cpu = cpumask_any_but(topology_core_cpumask(cpu), cpu);
> 
> So if this is the last cpu then rpl->cpu becomes nr_cpu_ids.
> 
> So that whole nonsense can be avoided completely.
ok, that would work nicely, similar to cqm_pick_event_reader().
thanks for the tip. just fyi, unlike perf cqm, rapl runtime change is
expected to be low frequency.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v3] powercap/rapl: reduce ipi calls
  2016-02-17 18:04     ` Jacob Pan
@ 2016-02-17 18:22       ` Thomas Gleixner
  0 siblings, 0 replies; 5+ messages in thread
From: Thomas Gleixner @ 2016-02-17 18:22 UTC (permalink / raw)
  To: Jacob Pan
  Cc: Rafael J. Wysocki, Peter Zijlstra, LKML, Linux PM, Rafael Wysocki,
	Srinivas Pandruvada

On Wed, 17 Feb 2016, Jacob Pan wrote:
> On Wed, 17 Feb 2016 17:04:41 +0100 (CET)
> Thomas Gleixner <tglx@linutronix.de> wrote:
> 
> > > >  
> > > >  	for_each_online_cpu(i) {
> > > >  		if (topology_physical_package_id(i) ==
> > > > package_id)  
> > 
> > This is still crap. We really want a proper handling for that which
> > does not require these silly for_each_cpu() loops.
> > 
> > That code already has a hotplug notifier, so you can track one online
> > cpu in the package like we do in cqm and other facilities.
> > 
> > The first online cpu of a package sets itself as 'owner'. On unplug
> > it looks up:
> > 
> > 	rpl->cpu = cpumask_any_but(topology_core_cpumask(cpu), cpu);
> > 
> > So if this is the last cpu then rpl->cpu becomes nr_cpu_ids.
> > 
> > So that whole nonsense can be avoided completely.
> ok, that would work nicely, similar to cqm_pick_event_reader().
> thanks for the tip. just fyi, unlike perf cqm, rapl runtime change is
> expected to be low frequency.

That still does not justify crap like this. And please use the version of pick
reader which I posted a couple of hours ago.

       http://marc.info/?l=linux-kernel&m=145572542221627&w=2

Thanks,

	tglx


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2016-02-17 18:23 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-01-15 19:04 [PATCH v3] powercap/rapl: reduce ipi calls Jacob Pan
2016-02-16  0:54 ` Rafael J. Wysocki
2016-02-17 16:04   ` Thomas Gleixner
2016-02-17 18:04     ` Jacob Pan
2016-02-17 18:22       ` Thomas Gleixner

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).