linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH RFC] arm64: acpi: reenumerate topology ids
@ 2018-06-22 14:22 Andrew Jones
  2018-06-22 16:20 ` Jeremy Linton
  0 siblings, 1 reply; 3+ messages in thread
From: Andrew Jones @ 2018-06-22 14:22 UTC (permalink / raw)
  To: linux-arm-kernel

When booting with devicetree, and the devicetree has the cpu-map
node, the topology IDs that are visible from sysfs are generated
with counters. ACPI, on the other hand, uses ACPI table pointer
offsets, which, while guaranteed to be unique, look a bit weird.
Instead, we can generate DT identical topology IDs for ACPI by
just using counters for the leaf nodes and by remapping the
non-leaf table pointer offsets to counters.

Cc: Jeremy Linton <jeremy.linton@arm.com>
Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 arch/arm64/kernel/topology.c | 68 ++++++++++++++++++++++++++++--------
 1 file changed, 54 insertions(+), 14 deletions(-)

diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index f845a8617812..4c6aa9eec3d3 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -316,6 +316,10 @@ static void __init reset_cpu_topology(void)
 }
 
 #ifdef CONFIG_ACPI
+
+#define acpi_topology_mktag(id)	(-((id) + 1))
+#define acpi_topology_istag(id)	((id) < 0)
+
 /*
  * Propagate the topology information of the processor_topology_node tree to the
  * cpu_topology array.
@@ -323,27 +327,24 @@ static void __init reset_cpu_topology(void)
 static int __init parse_acpi_topology(void)
 {
 	bool is_threaded;
-	int cpu, topology_id;
+	int package_id = 0, core_id, thread_id;
+	int cpu, ret;
 
 	is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
 
 	for_each_possible_cpu(cpu) {
 		int i, cache_id;
 
-		topology_id = find_acpi_cpu_topology(cpu, 0);
-		if (topology_id < 0)
-			return topology_id;
+		ret = find_acpi_cpu_topology(cpu, 0);
+		if (ret < 0)
+			return ret;
 
-		if (is_threaded) {
-			cpu_topology[cpu].thread_id = topology_id;
-			topology_id = find_acpi_cpu_topology(cpu, 1);
-			cpu_topology[cpu].core_id   = topology_id;
-		} else {
-			cpu_topology[cpu].thread_id  = -1;
-			cpu_topology[cpu].core_id    = topology_id;
-		}
-		topology_id = find_acpi_cpu_topology_package(cpu);
-		cpu_topology[cpu].package_id = topology_id;
+		if (is_threaded)
+			ret = find_acpi_cpu_topology(cpu, 1);
+		cpu_topology[cpu].core_id = acpi_topology_mktag(ret);
+
+		ret = find_acpi_cpu_topology_package(cpu);
+		cpu_topology[cpu].package_id = acpi_topology_mktag(ret);
 
 		i = acpi_find_last_cache_level(cpu);
 
@@ -358,6 +359,45 @@ static int __init parse_acpi_topology(void)
 		}
 	}
 
+	for_each_possible_cpu(cpu) {
+		int tag, cpu2;
+
+		if (is_threaded) {
+			if (acpi_topology_istag(cpu_topology[cpu].core_id))
+				thread_id = 0;
+			cpu_topology[cpu].thread_id = thread_id;
+			++thread_id;
+		} else {
+			cpu_topology[cpu].thread_id = -1;
+		}
+
+		if (acpi_topology_istag(cpu_topology[cpu].core_id)) {
+			if (acpi_topology_istag(cpu_topology[cpu].package_id))
+				core_id = 0;
+			tag = cpu_topology[cpu].core_id;
+			cpu_topology[cpu].core_id = core_id;
+
+			if (is_threaded) {
+				for_each_possible_cpu(cpu2) {
+					if (cpu_topology[cpu2].core_id == tag)
+						cpu_topology[cpu2].core_id = core_id;
+				}
+			}
+			++core_id;
+		}
+
+		if (acpi_topology_istag(cpu_topology[cpu].package_id)) {
+			tag = cpu_topology[cpu].package_id;
+			cpu_topology[cpu].package_id = package_id;
+
+			for_each_possible_cpu(cpu2) {
+				if (cpu_topology[cpu2].package_id == tag)
+					cpu_topology[cpu2].package_id = package_id;
+			}
+			++package_id;
+		}
+	}
+
 	return 0;
 }
 
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [PATCH RFC] arm64: acpi: reenumerate topology ids
  2018-06-22 14:22 [PATCH RFC] arm64: acpi: reenumerate topology ids Andrew Jones
@ 2018-06-22 16:20 ` Jeremy Linton
  2018-06-25  8:03   ` Andrew Jones
  0 siblings, 1 reply; 3+ messages in thread
From: Jeremy Linton @ 2018-06-22 16:20 UTC (permalink / raw)
  To: linux-arm-kernel

Hi,

On 06/22/2018 09:22 AM, Andrew Jones wrote:
> When booting with devicetree, and the devicetree has the cpu-map
> node, the topology IDs that are visible from sysfs are generated
> with counters. ACPI, on the other hand, uses ACPI table pointer
> offsets, which, while guaranteed to be unique, look a bit weird.
> Instead, we can generate DT identical topology IDs for ACPI by
> just using counters for the leaf nodes and by remapping the
> non-leaf table pointer offsets to counters.

I think there was some discussion about renumbering the ID's at one 
point to make them more 'human readable', its a pretty reasonable idea..


> 
> Cc: Jeremy Linton <jeremy.linton@arm.com>
> Signed-off-by: Andrew Jones <drjones@redhat.com>
> ---
>   arch/arm64/kernel/topology.c | 68 ++++++++++++++++++++++++++++--------
>   1 file changed, 54 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
> index f845a8617812..4c6aa9eec3d3 100644
> --- a/arch/arm64/kernel/topology.c
> +++ b/arch/arm64/kernel/topology.c
> @@ -316,6 +316,10 @@ static void __init reset_cpu_topology(void)
>   }
>   
>   #ifdef CONFIG_ACPI
> +
> +#define acpi_topology_mktag(id)	(-((id) + 1))
> +#define acpi_topology_istag(id)	((id) < 0)
> +
>   /*
>    * Propagate the topology information of the processor_topology_node tree to the
>    * cpu_topology array.
> @@ -323,27 +327,24 @@ static void __init reset_cpu_topology(void)
>   static int __init parse_acpi_topology(void)
>   {
>   	bool is_threaded;
> -	int cpu, topology_id;
> +	int package_id = 0, core_id, thread_id;
> +	int cpu, ret;
>   
>   	is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
>   
>   	for_each_possible_cpu(cpu) {
>   		int i, cache_id;
>   
> -		topology_id = find_acpi_cpu_topology(cpu, 0);
> -		if (topology_id < 0)
> -			return topology_id;
> +		ret = find_acpi_cpu_topology(cpu, 0);
> +		if (ret < 0)
> +			return ret;
>   
> -		if (is_threaded) {
> -			cpu_topology[cpu].thread_id = topology_id;
> -			topology_id = find_acpi_cpu_topology(cpu, 1);
> -			cpu_topology[cpu].core_id   = topology_id;
> -		} else {
> -			cpu_topology[cpu].thread_id  = -1;
> -			cpu_topology[cpu].core_id    = topology_id;
> -		}
> -		topology_id = find_acpi_cpu_topology_package(cpu);
> -		cpu_topology[cpu].package_id = topology_id;
> +		if (is_threaded)
> +			ret = find_acpi_cpu_topology(cpu, 1);
> +		cpu_topology[cpu].core_id = acpi_topology_mktag(ret);
> +
> +		ret = find_acpi_cpu_topology_package(cpu);
> +		cpu_topology[cpu].package_id = acpi_topology_mktag(ret);
>   
>   		i = acpi_find_last_cache_level(cpu);
>   
> @@ -358,6 +359,45 @@ static int __init parse_acpi_topology(void)
>   		}
>   	}
>   
> +	for_each_possible_cpu(cpu) {
> +		int tag, cpu2;
> +
> +		if (is_threaded) {
> +			if (acpi_topology_istag(cpu_topology[cpu].core_id))
> +				thread_id = 0;
> +			cpu_topology[cpu].thread_id = thread_id;
> +			++thread_id;
> +		} else {
> +			cpu_topology[cpu].thread_id = -1;
> +		}

Doesn't this assume the threads are adjacent to each other? Also, if i'm 
understanding this correctly, the threads for the first core are 0..N, 
and again the threads for the second core are again 0..N? Don't we want 
to avoid resetting the thread id? Although thread ID IIRC isn't visible, 
because the core-id dictates the thread siblings.


> +
> +		if (acpi_topology_istag(cpu_topology[cpu].core_id)) {
> +			if (acpi_topology_istag(cpu_topology[cpu].package_id))
> +				core_id = 0;
> +			tag = cpu_topology[cpu].core_id;
> +			cpu_topology[cpu].core_id = core_id;
> +
> +			if (is_threaded) {
> +				for_each_possible_cpu(cpu2) {
> +					if (cpu_topology[cpu2].core_id == tag)
> +						cpu_topology[cpu2].core_id = core_id;
> +				}
> +			}
> +			++core_id;
> +		}
> +
> +		if (acpi_topology_istag(cpu_topology[cpu].package_id)) {
> +			tag = cpu_topology[cpu].package_id;
> +			cpu_topology[cpu].package_id = package_id;
> +
> +			for_each_possible_cpu(cpu2) {
> +				if (cpu_topology[cpu2].package_id == tag)
> +					cpu_topology[cpu2].package_id = package_id;
> +			}
> +			++package_id;
> +		}
> +	}
> +
>   	return 0;
>   }
>   
> 

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH RFC] arm64: acpi: reenumerate topology ids
  2018-06-22 16:20 ` Jeremy Linton
@ 2018-06-25  8:03   ` Andrew Jones
  0 siblings, 0 replies; 3+ messages in thread
From: Andrew Jones @ 2018-06-25  8:03 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Jun 22, 2018 at 11:20:24AM -0500, Jeremy Linton wrote:
> Hi,
> 
> On 06/22/2018 09:22 AM, Andrew Jones wrote:
> > When booting with devicetree, and the devicetree has the cpu-map
> > node, the topology IDs that are visible from sysfs are generated
> > with counters. ACPI, on the other hand, uses ACPI table pointer
> > offsets, which, while guaranteed to be unique, look a bit weird.
> > Instead, we can generate DT identical topology IDs for ACPI by
> > just using counters for the leaf nodes and by remapping the
> > non-leaf table pointer offsets to counters.
> 
> I think there was some discussion about renumbering the ID's at one point to
> make them more 'human readable', its a pretty reasonable idea..
> 
> 
> > 
> > Cc: Jeremy Linton <jeremy.linton@arm.com>
> > Signed-off-by: Andrew Jones <drjones@redhat.com>
> > ---
> >   arch/arm64/kernel/topology.c | 68 ++++++++++++++++++++++++++++--------
> >   1 file changed, 54 insertions(+), 14 deletions(-)
> > 
> > diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
> > index f845a8617812..4c6aa9eec3d3 100644
> > --- a/arch/arm64/kernel/topology.c
> > +++ b/arch/arm64/kernel/topology.c
> > @@ -316,6 +316,10 @@ static void __init reset_cpu_topology(void)
> >   }
> >   #ifdef CONFIG_ACPI
> > +
> > +#define acpi_topology_mktag(id)	(-((id) + 1))
> > +#define acpi_topology_istag(id)	((id) < 0)
> > +
> >   /*
> >    * Propagate the topology information of the processor_topology_node tree to the
> >    * cpu_topology array.
> > @@ -323,27 +327,24 @@ static void __init reset_cpu_topology(void)
> >   static int __init parse_acpi_topology(void)
> >   {
> >   	bool is_threaded;
> > -	int cpu, topology_id;
> > +	int package_id = 0, core_id, thread_id;
> > +	int cpu, ret;
> >   	is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
> >   	for_each_possible_cpu(cpu) {
> >   		int i, cache_id;
> > -		topology_id = find_acpi_cpu_topology(cpu, 0);
> > -		if (topology_id < 0)
> > -			return topology_id;
> > +		ret = find_acpi_cpu_topology(cpu, 0);
> > +		if (ret < 0)
> > +			return ret;
> > -		if (is_threaded) {
> > -			cpu_topology[cpu].thread_id = topology_id;
> > -			topology_id = find_acpi_cpu_topology(cpu, 1);
> > -			cpu_topology[cpu].core_id   = topology_id;
> > -		} else {
> > -			cpu_topology[cpu].thread_id  = -1;
> > -			cpu_topology[cpu].core_id    = topology_id;
> > -		}
> > -		topology_id = find_acpi_cpu_topology_package(cpu);
> > -		cpu_topology[cpu].package_id = topology_id;
> > +		if (is_threaded)
> > +			ret = find_acpi_cpu_topology(cpu, 1);
> > +		cpu_topology[cpu].core_id = acpi_topology_mktag(ret);
> > +
> > +		ret = find_acpi_cpu_topology_package(cpu);
> > +		cpu_topology[cpu].package_id = acpi_topology_mktag(ret);
> >   		i = acpi_find_last_cache_level(cpu);
> > @@ -358,6 +359,45 @@ static int __init parse_acpi_topology(void)
> >   		}
> >   	}
> > +	for_each_possible_cpu(cpu) {
> > +		int tag, cpu2;
> > +
> > +		if (is_threaded) {
> > +			if (acpi_topology_istag(cpu_topology[cpu].core_id))
> > +				thread_id = 0;
> > +			cpu_topology[cpu].thread_id = thread_id;
> > +			++thread_id;
> > +		} else {
> > +			cpu_topology[cpu].thread_id = -1;
> > +		}
> 
> Doesn't this assume the threads are adjacent to each other? Also, if i'm
> understanding this correctly, the threads for the first core are 0..N, and
> again the threads for the second core are again 0..N? Don't we want to avoid
> resetting the thread id? Although thread ID IIRC isn't visible, because the
> core-id dictates the thread siblings.

To make the numbering consistent with DT's cpu-map the thread_id should
start at 0 for each core. Or, parse_core() should be changed if we'd
rather have topology-wide unique thread-ids. However, I think it makes
more sense to reset them per core, especially considering that if the
topology hasn't already been populated, the fallback in
store_cpu_topology() will simply use MPIDR Aff0, which means a maximum ID
of 255, and thus necessarily wrapping for machines with more than 255 PEs.

I think I understand the concern with the assumption of adjacent table
entries, which applies to core-ids too, when they're leaves. I'll send
a v1 removing that assumption.

Thanks,
drew

> 
> 
> > +
> > +		if (acpi_topology_istag(cpu_topology[cpu].core_id)) {
> > +			if (acpi_topology_istag(cpu_topology[cpu].package_id))
> > +				core_id = 0;
> > +			tag = cpu_topology[cpu].core_id;
> > +			cpu_topology[cpu].core_id = core_id;
> > +
> > +			if (is_threaded) {
> > +				for_each_possible_cpu(cpu2) {
> > +					if (cpu_topology[cpu2].core_id == tag)
> > +						cpu_topology[cpu2].core_id = core_id;
> > +				}
> > +			}
> > +			++core_id;
> > +		}
> > +
> > +		if (acpi_topology_istag(cpu_topology[cpu].package_id)) {
> > +			tag = cpu_topology[cpu].package_id;
> > +			cpu_topology[cpu].package_id = package_id;
> > +
> > +			for_each_possible_cpu(cpu2) {
> > +				if (cpu_topology[cpu2].package_id == tag)
> > +					cpu_topology[cpu2].package_id = package_id;
> > +			}
> > +			++package_id;
> > +		}
> > +	}
> > +
> >   	return 0;
> >   }
> > 
> 

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2018-06-25  8:03 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-06-22 14:22 [PATCH RFC] arm64: acpi: reenumerate topology ids Andrew Jones
2018-06-22 16:20 ` Jeremy Linton
2018-06-25  8:03   ` Andrew Jones

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).