* [PATCH 1/2] perf/x86/rapl: Move the pmu allocation out of CPU hotplug
@ 2024-09-13 17:10 kan.liang
2024-09-13 17:10 ` [PATCH 2/2] perf/x86/rapl: Clean up cpumask and hotplug kan.liang
2024-10-08 16:33 ` [PATCH 1/2] perf/x86/rapl: Move the pmu allocation out of CPU hotplug Thomas Gleixner
0 siblings, 2 replies; 8+ messages in thread
From: kan.liang @ 2024-09-13 17:10 UTC (permalink / raw)
To: peterz, mingo, linux-kernel; +Cc: Kan Liang, Oliver Sang, Dhananjay Ugwekar
From: Kan Liang <kan.liang@linux.intel.com>
The rapl pmu just needs to be allocated once. It doesn't matter to be
allocated at each CPU hotplug, or the global init_rapl_pmus().
Move the pmu allocation to the init_rapl_pmus(). So the generic hotplug
supports can be applied.
Tested-by: Oliver Sang <oliver.sang@intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
---
Rebase on top of perf/core branch
commit bac2a553dbf2 ("perf/x86/intel: Add PMU support for ArrowLake-H")
Close the UBSAN issue.
https://lore.kernel.org/oe-lkp/202409111521.c7c6d56f-lkp@intel.com/
arch/x86/events/rapl.c | 50 +++++++++++++++++++++++++++++++-----------
1 file changed, 37 insertions(+), 13 deletions(-)
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index a481a939862e..a6f31978a5b4 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -602,19 +602,8 @@ static int rapl_cpu_online(unsigned int cpu)
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
int target;
- if (!pmu) {
- pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
- if (!pmu)
- return -ENOMEM;
-
- raw_spin_lock_init(&pmu->lock);
- INIT_LIST_HEAD(&pmu->active_list);
- pmu->pmu = &rapl_pmus->pmu;
- pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
- rapl_hrtimer_init(pmu);
-
- rapl_pmus->pmus[rapl_pmu_idx] = pmu;
- }
+ if (!pmu)
+ return -ENOMEM;
/*
* Check if there is an online cpu in the package which collects rapl
@@ -707,6 +696,38 @@ static const struct attribute_group *rapl_attr_update[] = {
NULL,
};
+static void __init init_rapl_pmu(void)
+{
+ struct rapl_pmu *pmu;
+ s32 rapl_pmu_idx;
+ int cpu;
+
+ cpus_read_lock();
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ pmu = cpu_to_rapl_pmu(cpu);
+ if (pmu)
+ continue;
+ rapl_pmu_idx = get_rapl_pmu_idx(cpu);
+ if (WARN_ON_ONCE(rapl_pmu_idx < 0))
+ continue;
+
+ pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+ if (!pmu)
+ continue;
+
+ raw_spin_lock_init(&pmu->lock);
+ INIT_LIST_HEAD(&pmu->active_list);
+ pmu->pmu = &rapl_pmus->pmu;
+ pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
+ rapl_hrtimer_init(pmu);
+
+ rapl_pmus->pmus[rapl_pmu_idx] = pmu;
+ }
+
+ cpus_read_unlock();
+}
+
static int __init init_rapl_pmus(void)
{
int nr_rapl_pmu = topology_max_packages();
@@ -730,6 +751,9 @@ static int __init init_rapl_pmus(void)
rapl_pmus->pmu.read = rapl_pmu_event_read;
rapl_pmus->pmu.module = THIS_MODULE;
rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
+
+ init_rapl_pmu();
+
return 0;
}
--
2.38.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 2/2] perf/x86/rapl: Clean up cpumask and hotplug
2024-09-13 17:10 [PATCH 1/2] perf/x86/rapl: Move the pmu allocation out of CPU hotplug kan.liang
@ 2024-09-13 17:10 ` kan.liang
2024-10-08 16:33 ` [PATCH 1/2] perf/x86/rapl: Move the pmu allocation out of CPU hotplug Thomas Gleixner
1 sibling, 0 replies; 8+ messages in thread
From: kan.liang @ 2024-09-13 17:10 UTC (permalink / raw)
To: peterz, mingo, linux-kernel; +Cc: Kan Liang, Oliver Sang, Dhananjay Ugwekar
From: Kan Liang <kan.liang@linux.intel.com>
The rapl pmu is die scope, which is supported by the generic perf_event
subsystem now.
Set the scope for the rapl PMU and remove all the cpumask and hotplug
codes.
Tested-by: Oliver Sang <oliver.sang@intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
---
arch/x86/events/rapl.c | 90 +++-----------------------------------
include/linux/cpuhotplug.h | 1 -
2 files changed, 6 insertions(+), 85 deletions(-)
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index a6f31978a5b4..74b057737ea1 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -148,7 +148,6 @@ struct rapl_model {
/* 1/2^hw_unit Joule */
static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly;
static struct rapl_pmus *rapl_pmus;
-static cpumask_t rapl_cpu_mask;
static unsigned int rapl_cntr_mask;
static u64 rapl_timer_ms;
static struct perf_msr *rapl_msrs;
@@ -369,8 +368,6 @@ static int rapl_pmu_event_init(struct perf_event *event)
if (event->cpu < 0)
return -EINVAL;
- event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
-
if (!cfg || cfg >= NR_RAPL_DOMAINS + 1)
return -EINVAL;
@@ -389,7 +386,6 @@ static int rapl_pmu_event_init(struct perf_event *event)
pmu = cpu_to_rapl_pmu(event->cpu);
if (!pmu)
return -EINVAL;
- event->cpu = pmu->cpu;
event->pmu_private = pmu;
event->hw.event_base = rapl_msrs[bit].msr;
event->hw.config = cfg;
@@ -403,23 +399,6 @@ static void rapl_pmu_event_read(struct perf_event *event)
rapl_event_update(event);
}
-static ssize_t rapl_get_attr_cpumask(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return cpumap_print_to_pagebuf(true, buf, &rapl_cpu_mask);
-}
-
-static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL);
-
-static struct attribute *rapl_pmu_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static struct attribute_group rapl_pmu_attr_group = {
- .attrs = rapl_pmu_attrs,
-};
-
RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
@@ -467,7 +446,6 @@ static struct attribute_group rapl_pmu_format_group = {
};
static const struct attribute_group *rapl_attr_groups[] = {
- &rapl_pmu_attr_group,
&rapl_pmu_format_group,
&rapl_pmu_events_group,
NULL,
@@ -570,54 +548,6 @@ static struct perf_msr amd_rapl_msrs[] = {
[PERF_RAPL_PSYS] = { 0, &rapl_events_psys_group, NULL, false, 0 },
};
-static int rapl_cpu_offline(unsigned int cpu)
-{
- struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
- int target;
-
- /* Check if exiting cpu is used for collecting rapl events */
- if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
- return 0;
-
- pmu->cpu = -1;
- /* Find a new cpu to collect rapl events */
- target = cpumask_any_but(get_rapl_pmu_cpumask(cpu), cpu);
-
- /* Migrate rapl events to the new target */
- if (target < nr_cpu_ids) {
- cpumask_set_cpu(target, &rapl_cpu_mask);
- pmu->cpu = target;
- perf_pmu_migrate_context(pmu->pmu, cpu, target);
- }
- return 0;
-}
-
-static int rapl_cpu_online(unsigned int cpu)
-{
- s32 rapl_pmu_idx = get_rapl_pmu_idx(cpu);
- if (rapl_pmu_idx < 0) {
- pr_err("topology_logical_(package/die)_id() returned a negative value");
- return -EINVAL;
- }
- struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
- int target;
-
- if (!pmu)
- return -ENOMEM;
-
- /*
- * Check if there is an online cpu in the package which collects rapl
- * events already.
- */
- target = cpumask_any_and(&rapl_cpu_mask, get_rapl_pmu_cpumask(cpu));
- if (target < nr_cpu_ids)
- return 0;
-
- cpumask_set_cpu(cpu, &rapl_cpu_mask);
- pmu->cpu = cpu;
- return 0;
-}
-
static int rapl_check_hw_unit(struct rapl_model *rm)
{
u64 msr_rapl_power_unit_bits;
@@ -731,9 +661,12 @@ static void __init init_rapl_pmu(void)
static int __init init_rapl_pmus(void)
{
int nr_rapl_pmu = topology_max_packages();
+ int rapl_pmu_scope = PERF_PMU_SCOPE_PKG;
- if (!rapl_pmu_is_pkg_scope())
+ if (!rapl_pmu_is_pkg_scope()) {
nr_rapl_pmu *= topology_max_dies_per_package();
+ rapl_pmu_scope = PERF_PMU_SCOPE_DIE;
+ }
rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, nr_rapl_pmu), GFP_KERNEL);
if (!rapl_pmus)
@@ -749,6 +682,7 @@ static int __init init_rapl_pmus(void)
rapl_pmus->pmu.start = rapl_pmu_event_start;
rapl_pmus->pmu.stop = rapl_pmu_event_stop;
rapl_pmus->pmu.read = rapl_pmu_event_read;
+ rapl_pmus->pmu.scope = rapl_pmu_scope;
rapl_pmus->pmu.module = THIS_MODULE;
rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
@@ -900,24 +834,13 @@ static int __init rapl_pmu_init(void)
if (ret)
return ret;
- /*
- * Install callbacks. Core will call them for each online cpu.
- */
- ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
- "perf/x86/rapl:online",
- rapl_cpu_online, rapl_cpu_offline);
- if (ret)
- goto out;
-
ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
if (ret)
- goto out1;
+ goto out;
rapl_advertise();
return 0;
-out1:
- cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
out:
pr_warn("Initialization failed (%d), disabled\n", ret);
cleanup_rapl_pmus();
@@ -927,7 +850,6 @@ module_init(rapl_pmu_init);
static void __exit intel_rapl_exit(void)
{
- cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
perf_pmu_unregister(&rapl_pmus->pmu);
cleanup_rapl_pmus();
}
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 2101ae2ecfca..801053c45c88 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -207,7 +207,6 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
- CPUHP_AP_PERF_X86_RAPL_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
--
2.38.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH 1/2] perf/x86/rapl: Move the pmu allocation out of CPU hotplug
2024-09-13 17:10 [PATCH 1/2] perf/x86/rapl: Move the pmu allocation out of CPU hotplug kan.liang
2024-09-13 17:10 ` [PATCH 2/2] perf/x86/rapl: Clean up cpumask and hotplug kan.liang
@ 2024-10-08 16:33 ` Thomas Gleixner
2024-10-08 20:10 ` Liang, Kan
1 sibling, 1 reply; 8+ messages in thread
From: Thomas Gleixner @ 2024-10-08 16:33 UTC (permalink / raw)
To: kan.liang, peterz, mingo, linux-kernel
Cc: Kan Liang, Oliver Sang, Dhananjay Ugwekar
On Fri, Sep 13 2024 at 10:10, kan liang wrote:
> +static void __init init_rapl_pmu(void)
> +{
> + struct rapl_pmu *pmu;
> + s32 rapl_pmu_idx;
> + int cpu;
> +
> + cpus_read_lock();
> +
> + for_each_cpu(cpu, cpu_online_mask) {
How is that supposed to work, when not all CPUs are online when
init_rapl_pmus() is invoked?
Thanks,
tglx
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 1/2] perf/x86/rapl: Move the pmu allocation out of CPU hotplug
2024-10-08 16:33 ` [PATCH 1/2] perf/x86/rapl: Move the pmu allocation out of CPU hotplug Thomas Gleixner
@ 2024-10-08 20:10 ` Liang, Kan
2024-10-08 20:23 ` Thomas Gleixner
0 siblings, 1 reply; 8+ messages in thread
From: Liang, Kan @ 2024-10-08 20:10 UTC (permalink / raw)
To: Thomas Gleixner, peterz, mingo, linux-kernel
Cc: Oliver Sang, Dhananjay Ugwekar
On 2024-10-08 12:33 p.m., Thomas Gleixner wrote:
> On Fri, Sep 13 2024 at 10:10, kan liang wrote:
>> +static void __init init_rapl_pmu(void)
>> +{
>> + struct rapl_pmu *pmu;
>> + s32 rapl_pmu_idx;
>> + int cpu;
>> +
>> + cpus_read_lock();
>> +
>> + for_each_cpu(cpu, cpu_online_mask) {
>
> How is that supposed to work, when not all CPUs are online when
> init_rapl_pmus() is invoked?
>
RAPL is a module. The module_init() is called during do_initcalls(),
which is after the smp_init(). The cpu_online_mask has been setup in the
smp_init().
I also patched the kernel to double check. The cpu_online_mask indeed
shows all the online CPUs.
[ 7.021212] smp: Brought up 1 node, 48 CPUs
[ 7.021212] smpboot: Total of 48 processors activated (211200.00
BogoMIPS)
... ...
[ 16.557323] RAPL PMU: rapl_pmu_init: cpu_online_mask 0xffffffffffff
Thanks,
Kan
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 1/2] perf/x86/rapl: Move the pmu allocation out of CPU hotplug
2024-10-08 20:10 ` Liang, Kan
@ 2024-10-08 20:23 ` Thomas Gleixner
2024-10-08 20:36 ` Thomas Gleixner
0 siblings, 1 reply; 8+ messages in thread
From: Thomas Gleixner @ 2024-10-08 20:23 UTC (permalink / raw)
To: Liang, Kan, peterz, mingo, linux-kernel; +Cc: Oliver Sang, Dhananjay Ugwekar
On Tue, Oct 08 2024 at 16:10, Kan Liang wrote:
> On 2024-10-08 12:33 p.m., Thomas Gleixner wrote:
>> On Fri, Sep 13 2024 at 10:10, kan liang wrote:
>>> +static void __init init_rapl_pmu(void)
>>> +{
>>> + struct rapl_pmu *pmu;
>>> + s32 rapl_pmu_idx;
>>> + int cpu;
>>> +
>>> + cpus_read_lock();
>>> +
>>> + for_each_cpu(cpu, cpu_online_mask) {
>>
>> How is that supposed to work, when not all CPUs are online when
>> init_rapl_pmus() is invoked?
>>
>
> RAPL is a module. The module_init() is called during do_initcalls(),
> which is after the smp_init(). The cpu_online_mask has been setup in the
> smp_init().
>
> I also patched the kernel to double check. The cpu_online_mask indeed
> shows all the online CPUs.
>
> [ 7.021212] smp: Brought up 1 node, 48 CPUs
> [ 7.021212] smpboot: Total of 48 processors activated (211200.00
> BogoMIPS)
> ... ...
> [ 16.557323] RAPL PMU: rapl_pmu_init: cpu_online_mask 0xffffffffffff
1) Start your kernel with maxcpus=2 (not recommended, but ...)
2) Load the module
3) Online the rest of the CPUs from userspace
If your machine has more than one die you might be surprised...
Thanks,
tglx
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 1/2] perf/x86/rapl: Move the pmu allocation out of CPU hotplug
2024-10-08 20:23 ` Thomas Gleixner
@ 2024-10-08 20:36 ` Thomas Gleixner
2024-10-08 21:05 ` Liang, Kan
0 siblings, 1 reply; 8+ messages in thread
From: Thomas Gleixner @ 2024-10-08 20:36 UTC (permalink / raw)
To: Liang, Kan, peterz, mingo, linux-kernel; +Cc: Oliver Sang, Dhananjay Ugwekar
On Tue, Oct 08 2024 at 22:23, Thomas Gleixner wrote:
> On Tue, Oct 08 2024 at 16:10, Kan Liang wrote:
>> On 2024-10-08 12:33 p.m., Thomas Gleixner wrote:
>>> On Fri, Sep 13 2024 at 10:10, kan liang wrote:
>>>> +static void __init init_rapl_pmu(void)
>>>> +{
>>>> + struct rapl_pmu *pmu;
>>>> + s32 rapl_pmu_idx;
>>>> + int cpu;
>>>> +
>>>> + cpus_read_lock();
>>>> +
>>>> + for_each_cpu(cpu, cpu_online_mask) {
>>>
>>> How is that supposed to work, when not all CPUs are online when
>>> init_rapl_pmus() is invoked?
>>>
>>
>> RAPL is a module. The module_init() is called during do_initcalls(),
>> which is after the smp_init(). The cpu_online_mask has been setup in the
>> smp_init().
>>
>> I also patched the kernel to double check. The cpu_online_mask indeed
>> shows all the online CPUs.
>>
>> [ 7.021212] smp: Brought up 1 node, 48 CPUs
>> [ 7.021212] smpboot: Total of 48 processors activated (211200.00
>> BogoMIPS)
>> ... ...
>> [ 16.557323] RAPL PMU: rapl_pmu_init: cpu_online_mask 0xffffffffffff
>
> 1) Start your kernel with maxcpus=2 (not recommended, but ...)
> 2) Load the module
> 3) Online the rest of the CPUs from userspace
>
> If your machine has more than one die you might be surprised...
You can make this work because the new topology code allows you to
retrieve the possible number of cores/dies/packages even when they have
not been onlined yet.
Thanks,
tglx
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 1/2] perf/x86/rapl: Move the pmu allocation out of CPU hotplug
2024-10-08 20:36 ` Thomas Gleixner
@ 2024-10-08 21:05 ` Liang, Kan
2024-10-08 22:38 ` Thomas Gleixner
0 siblings, 1 reply; 8+ messages in thread
From: Liang, Kan @ 2024-10-08 21:05 UTC (permalink / raw)
To: Thomas Gleixner, peterz, mingo, linux-kernel
Cc: Oliver Sang, Dhananjay Ugwekar
On 2024-10-08 4:36 p.m., Thomas Gleixner wrote:
> On Tue, Oct 08 2024 at 22:23, Thomas Gleixner wrote:
>> On Tue, Oct 08 2024 at 16:10, Kan Liang wrote:
>>> On 2024-10-08 12:33 p.m., Thomas Gleixner wrote:
>>>> On Fri, Sep 13 2024 at 10:10, kan liang wrote:
>>>>> +static void __init init_rapl_pmu(void)
>>>>> +{
>>>>> + struct rapl_pmu *pmu;
>>>>> + s32 rapl_pmu_idx;
>>>>> + int cpu;
>>>>> +
>>>>> + cpus_read_lock();
>>>>> +
>>>>> + for_each_cpu(cpu, cpu_online_mask) {
>>>>
>>>> How is that supposed to work, when not all CPUs are online when
>>>> init_rapl_pmus() is invoked?
>>>>
>>>
>>> RAPL is a module. The module_init() is called during do_initcalls(),
>>> which is after the smp_init(). The cpu_online_mask has been setup in the
>>> smp_init().
>>>
>>> I also patched the kernel to double check. The cpu_online_mask indeed
>>> shows all the online CPUs.
>>>
>>> [ 7.021212] smp: Brought up 1 node, 48 CPUs
>>> [ 7.021212] smpboot: Total of 48 processors activated (211200.00
>>> BogoMIPS)
>>> ... ...
>>> [ 16.557323] RAPL PMU: rapl_pmu_init: cpu_online_mask 0xffffffffffff
>>
>> 1) Start your kernel with maxcpus=2 (not recommended, but ...)
>> 2) Load the module
>> 3) Online the rest of the CPUs from userspace
>>
>> If your machine has more than one die you might be surprised...
Thanks. I will find a 2 sockets machine and give it a try.
>
> You can make this work because the new topology code allows you to
> retrieve the possible number of cores/dies/packages even when they have
> not been onlined yet.
>
Actually, I think the possible CPU mask should be good enough here. The
init_rapl_pmu() just intends to allocate the space for a pmu in each die.
The worst case of using a possible mask is that some space may be
wasted, when there is no online CPUs on a die. But it should be an
unusual case. It should be harmless.
Thanks,
Kan
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 1/2] perf/x86/rapl: Move the pmu allocation out of CPU hotplug
2024-10-08 21:05 ` Liang, Kan
@ 2024-10-08 22:38 ` Thomas Gleixner
0 siblings, 0 replies; 8+ messages in thread
From: Thomas Gleixner @ 2024-10-08 22:38 UTC (permalink / raw)
To: Liang, Kan, peterz, mingo, linux-kernel; +Cc: Oliver Sang, Dhananjay Ugwekar
On Tue, Oct 08 2024 at 17:05, Kan Liang wrote:
> On 2024-10-08 4:36 p.m., Thomas Gleixner wrote:
>>>
>>> 1) Start your kernel with maxcpus=2 (not recommended, but ...)
>>> 2) Load the module
>>> 3) Online the rest of the CPUs from userspace
>>>
>>> If your machine has more than one die you might be surprised...
>
> Thanks. I will find a 2 sockets machine and give it a try.
>
>>
>> You can make this work because the new topology code allows you to
>> retrieve the possible number of cores/dies/packages even when they have
>> not been onlined yet.
>>
>
> Actually, I think the possible CPU mask should be good enough here. The
> init_rapl_pmu() just intends to allocate the space for a pmu in each die.
>
> The worst case of using a possible mask is that some space may be
> wasted, when there is no online CPUs on a die. But it should be an
> unusual case. It should be harmless.
Right, but you can't use the regular topology functions which are used
by cpu to rapl ID for that because they depend on the CPU being
online. The x86 specific ones which parse the APIC ID topology
information can provide that information.
I.e. you only need
topology_max_packages()
topology_max_dies_per_package()
topology_num_cores_per_package()
which provide you the required information to allocate upfront. Later
when the CPUs are actually online the existing mapping functions work.
Thanks,
tglx
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2024-10-08 22:38 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-09-13 17:10 [PATCH 1/2] perf/x86/rapl: Move the pmu allocation out of CPU hotplug kan.liang
2024-09-13 17:10 ` [PATCH 2/2] perf/x86/rapl: Clean up cpumask and hotplug kan.liang
2024-10-08 16:33 ` [PATCH 1/2] perf/x86/rapl: Move the pmu allocation out of CPU hotplug Thomas Gleixner
2024-10-08 20:10 ` Liang, Kan
2024-10-08 20:23 ` Thomas Gleixner
2024-10-08 20:36 ` Thomas Gleixner
2024-10-08 21:05 ` Liang, Kan
2024-10-08 22:38 ` Thomas Gleixner
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox