* [PATCH v5 1/3] spapr: move h_home_node_associativity to spapr_numa.c
2020-09-04 13:56 [PATCH v5 0/3] pseries NUMA distance rework Daniel Henrique Barboza
@ 2020-09-04 13:56 ` Daniel Henrique Barboza
2020-09-04 14:42 ` Greg Kurz
2020-09-04 13:56 ` [PATCH v5 2/3] spapr_numa: create a vcpu associativity helper Daniel Henrique Barboza
2020-09-04 13:56 ` [PATCH v5 3/3] spapr_numa: use spapr_numa_get_vcpu_assoc() in home_node hcall Daniel Henrique Barboza
2 siblings, 1 reply; 8+ messages in thread
From: Daniel Henrique Barboza @ 2020-09-04 13:56 UTC (permalink / raw)
To: qemu-devel; +Cc: Daniel Henrique Barboza, qemu-ppc, groug, david
The implementation of this hypercall will be modified to use
spapr->numa_assoc_arrays input. Moving it to spapr_numa.c makes
make more sense.
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
---
hw/ppc/spapr_hcall.c | 40 ---------------------------------------
hw/ppc/spapr_numa.c | 45 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 45 insertions(+), 40 deletions(-)
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index c1d01228c6..c2776b6a7d 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -1873,42 +1873,6 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu,
return ret;
}
-static target_ulong h_home_node_associativity(PowerPCCPU *cpu,
- SpaprMachineState *spapr,
- target_ulong opcode,
- target_ulong *args)
-{
- target_ulong flags = args[0];
- target_ulong procno = args[1];
- PowerPCCPU *tcpu;
- int idx;
-
- /* only support procno from H_REGISTER_VPA */
- if (flags != 0x1) {
- return H_FUNCTION;
- }
-
- tcpu = spapr_find_cpu(procno);
- if (tcpu == NULL) {
- return H_P2;
- }
-
- /* sequence is the same as in the "ibm,associativity" property */
-
- idx = 0;
-#define ASSOCIATIVITY(a, b) (((uint64_t)(a) << 32) | \
- ((uint64_t)(b) & 0xffffffff))
- args[idx++] = ASSOCIATIVITY(0, 0);
- args[idx++] = ASSOCIATIVITY(0, tcpu->node_id);
- args[idx++] = ASSOCIATIVITY(procno, -1);
- for ( ; idx < 6; idx++) {
- args[idx] = -1;
- }
-#undef ASSOCIATIVITY
-
- return H_SUCCESS;
-}
-
static target_ulong h_get_cpu_characteristics(PowerPCCPU *cpu,
SpaprMachineState *spapr,
target_ulong opcode,
@@ -2139,10 +2103,6 @@ static void hypercall_register_types(void)
spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
spapr_register_hypercall(KVMPPC_H_UPDATE_DT, h_update_dt);
-
- /* Virtual Processor Home Node */
- spapr_register_hypercall(H_HOME_NODE_ASSOCIATIVITY,
- h_home_node_associativity);
}
type_init(hypercall_register_types)
diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
index 93a000b729..368c1a494d 100644
--- a/hw/ppc/spapr_numa.c
+++ b/hw/ppc/spapr_numa.c
@@ -165,3 +165,48 @@ void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas)
_FDT(fdt_setprop(fdt, rtas, "ibm,max-associativity-domains",
maxdomains, sizeof(maxdomains)));
}
+
+static target_ulong h_home_node_associativity(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong opcode,
+ target_ulong *args)
+{
+ target_ulong flags = args[0];
+ target_ulong procno = args[1];
+ PowerPCCPU *tcpu;
+ int idx;
+
+ /* only support procno from H_REGISTER_VPA */
+ if (flags != 0x1) {
+ return H_FUNCTION;
+ }
+
+ tcpu = spapr_find_cpu(procno);
+ if (tcpu == NULL) {
+ return H_P2;
+ }
+
+ /* sequence is the same as in the "ibm,associativity" property */
+
+ idx = 0;
+#define ASSOCIATIVITY(a, b) (((uint64_t)(a) << 32) | \
+ ((uint64_t)(b) & 0xffffffff))
+ args[idx++] = ASSOCIATIVITY(0, 0);
+ args[idx++] = ASSOCIATIVITY(0, tcpu->node_id);
+ args[idx++] = ASSOCIATIVITY(procno, -1);
+ for ( ; idx < 6; idx++) {
+ args[idx] = -1;
+ }
+#undef ASSOCIATIVITY
+
+ return H_SUCCESS;
+}
+
+static void spapr_numa_register_types(void)
+{
+ /* Virtual Processor Home Node */
+ spapr_register_hypercall(H_HOME_NODE_ASSOCIATIVITY,
+ h_home_node_associativity);
+}
+
+type_init(spapr_numa_register_types)
--
2.26.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH v5 1/3] spapr: move h_home_node_associativity to spapr_numa.c
2020-09-04 13:56 ` [PATCH v5 1/3] spapr: move h_home_node_associativity to spapr_numa.c Daniel Henrique Barboza
@ 2020-09-04 14:42 ` Greg Kurz
0 siblings, 0 replies; 8+ messages in thread
From: Greg Kurz @ 2020-09-04 14:42 UTC (permalink / raw)
To: Daniel Henrique Barboza; +Cc: qemu-ppc, qemu-devel, david
On Fri, 4 Sep 2020 10:56:29 -0300
Daniel Henrique Barboza <danielhb413@gmail.com> wrote:
> The implementation of this hypercall will be modified to use
> spapr->numa_assoc_arrays input. Moving it to spapr_numa.c makes
> make more sense.
>
> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
> ---
Not sure if David already applied it to ppc-for-5.2. Anyway:
Reviewed-by: Greg Kurz <groug@kaod.org>
> hw/ppc/spapr_hcall.c | 40 ---------------------------------------
> hw/ppc/spapr_numa.c | 45 ++++++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 45 insertions(+), 40 deletions(-)
>
> diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
> index c1d01228c6..c2776b6a7d 100644
> --- a/hw/ppc/spapr_hcall.c
> +++ b/hw/ppc/spapr_hcall.c
> @@ -1873,42 +1873,6 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu,
> return ret;
> }
>
> -static target_ulong h_home_node_associativity(PowerPCCPU *cpu,
> - SpaprMachineState *spapr,
> - target_ulong opcode,
> - target_ulong *args)
> -{
> - target_ulong flags = args[0];
> - target_ulong procno = args[1];
> - PowerPCCPU *tcpu;
> - int idx;
> -
> - /* only support procno from H_REGISTER_VPA */
> - if (flags != 0x1) {
> - return H_FUNCTION;
> - }
> -
> - tcpu = spapr_find_cpu(procno);
> - if (tcpu == NULL) {
> - return H_P2;
> - }
> -
> - /* sequence is the same as in the "ibm,associativity" property */
> -
> - idx = 0;
> -#define ASSOCIATIVITY(a, b) (((uint64_t)(a) << 32) | \
> - ((uint64_t)(b) & 0xffffffff))
> - args[idx++] = ASSOCIATIVITY(0, 0);
> - args[idx++] = ASSOCIATIVITY(0, tcpu->node_id);
> - args[idx++] = ASSOCIATIVITY(procno, -1);
> - for ( ; idx < 6; idx++) {
> - args[idx] = -1;
> - }
> -#undef ASSOCIATIVITY
> -
> - return H_SUCCESS;
> -}
> -
> static target_ulong h_get_cpu_characteristics(PowerPCCPU *cpu,
> SpaprMachineState *spapr,
> target_ulong opcode,
> @@ -2139,10 +2103,6 @@ static void hypercall_register_types(void)
> spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
>
> spapr_register_hypercall(KVMPPC_H_UPDATE_DT, h_update_dt);
> -
> - /* Virtual Processor Home Node */
> - spapr_register_hypercall(H_HOME_NODE_ASSOCIATIVITY,
> - h_home_node_associativity);
> }
>
> type_init(hypercall_register_types)
> diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
> index 93a000b729..368c1a494d 100644
> --- a/hw/ppc/spapr_numa.c
> +++ b/hw/ppc/spapr_numa.c
> @@ -165,3 +165,48 @@ void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas)
> _FDT(fdt_setprop(fdt, rtas, "ibm,max-associativity-domains",
> maxdomains, sizeof(maxdomains)));
> }
> +
> +static target_ulong h_home_node_associativity(PowerPCCPU *cpu,
> + SpaprMachineState *spapr,
> + target_ulong opcode,
> + target_ulong *args)
> +{
> + target_ulong flags = args[0];
> + target_ulong procno = args[1];
> + PowerPCCPU *tcpu;
> + int idx;
> +
> + /* only support procno from H_REGISTER_VPA */
> + if (flags != 0x1) {
> + return H_FUNCTION;
> + }
> +
> + tcpu = spapr_find_cpu(procno);
> + if (tcpu == NULL) {
> + return H_P2;
> + }
> +
> + /* sequence is the same as in the "ibm,associativity" property */
> +
> + idx = 0;
> +#define ASSOCIATIVITY(a, b) (((uint64_t)(a) << 32) | \
> + ((uint64_t)(b) & 0xffffffff))
> + args[idx++] = ASSOCIATIVITY(0, 0);
> + args[idx++] = ASSOCIATIVITY(0, tcpu->node_id);
> + args[idx++] = ASSOCIATIVITY(procno, -1);
> + for ( ; idx < 6; idx++) {
> + args[idx] = -1;
> + }
> +#undef ASSOCIATIVITY
> +
> + return H_SUCCESS;
> +}
> +
> +static void spapr_numa_register_types(void)
> +{
> + /* Virtual Processor Home Node */
> + spapr_register_hypercall(H_HOME_NODE_ASSOCIATIVITY,
> + h_home_node_associativity);
> +}
> +
> +type_init(spapr_numa_register_types)
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v5 2/3] spapr_numa: create a vcpu associativity helper
2020-09-04 13:56 [PATCH v5 0/3] pseries NUMA distance rework Daniel Henrique Barboza
2020-09-04 13:56 ` [PATCH v5 1/3] spapr: move h_home_node_associativity to spapr_numa.c Daniel Henrique Barboza
@ 2020-09-04 13:56 ` Daniel Henrique Barboza
2020-09-04 14:35 ` Greg Kurz
2020-09-04 13:56 ` [PATCH v5 3/3] spapr_numa: use spapr_numa_get_vcpu_assoc() in home_node hcall Daniel Henrique Barboza
2 siblings, 1 reply; 8+ messages in thread
From: Daniel Henrique Barboza @ 2020-09-04 13:56 UTC (permalink / raw)
To: qemu-devel; +Cc: Daniel Henrique Barboza, qemu-ppc, groug, david
The work to be done in h_home_node_associativity() intersects
with what is already done in spapr_numa_fixup_cpu_dt(). This
patch creates a new helper, spapr_numa_get_vcpu_assoc(), to
be used for both spapr_numa_fixup_cpu_dt() and
h_home_node_associativity().
While we're at it, use memcpy() instead of loop assignment
to created the returned array.
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
---
hw/ppc/spapr_numa.c | 33 ++++++++++++++++++++-------------
include/hw/ppc/spapr.h | 7 ++++++-
2 files changed, 26 insertions(+), 14 deletions(-)
diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
index 368c1a494d..674d2ee86d 100644
--- a/hw/ppc/spapr_numa.c
+++ b/hw/ppc/spapr_numa.c
@@ -71,31 +71,38 @@ void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
sizeof(spapr->numa_assoc_array[nodeid]))));
}
-int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
- int offset, PowerPCCPU *cpu)
+static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr,
+ PowerPCCPU *cpu)
{
- uint vcpu_assoc_size = NUMA_ASSOC_SIZE + 1;
- uint32_t vcpu_assoc[vcpu_assoc_size];
+ uint32_t *vcpu_assoc = g_malloc(VCPU_ASSOC_SIZE * sizeof(uint32_t));
int index = spapr_get_vcpu_id(cpu);
- int i;
+
+ g_assert(vcpu_assoc != NULL);
/*
* VCPUs have an extra 'cpu_id' value in ibm,associativity
* compared to other resources. Increment the size at index
- * 0, copy all associativity domains already set, then put
- * cpu_id last.
+ * 0, put cpu_id last, then copy the remaining associativity
+ * domains.
*/
vcpu_assoc[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS + 1);
+ vcpu_assoc[VCPU_ASSOC_SIZE - 1] = cpu_to_be32(index);
+ memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id] + 1,
+ (VCPU_ASSOC_SIZE - 2) * sizeof(uint32_t));
- for (i = 1; i <= MAX_DISTANCE_REF_POINTS; i++) {
- vcpu_assoc[i] = spapr->numa_assoc_array[cpu->node_id][i];
- }
+ return vcpu_assoc;
+}
+
+int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
+ int offset, PowerPCCPU *cpu)
+{
+ g_autofree uint32_t *vcpu_assoc = NULL;
- vcpu_assoc[vcpu_assoc_size - 1] = cpu_to_be32(index);
+ vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu);
/* Advertise NUMA via ibm,associativity */
- return fdt_setprop(fdt, offset, "ibm,associativity",
- vcpu_assoc, sizeof(vcpu_assoc));
+ return fdt_setprop(fdt, offset, "ibm,associativity", vcpu_assoc,
+ VCPU_ASSOC_SIZE * sizeof(uint32_t));
}
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index 9a63380801..e50a2672e3 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -107,13 +107,18 @@ typedef enum {
/*
* NUMA related macros. MAX_DISTANCE_REF_POINTS was taken
- * from Taken from Linux kernel arch/powerpc/mm/numa.h.
+ * from Linux kernel arch/powerpc/mm/numa.h. It represents the
+ * amount of associativity domains for non-CPU resources.
*
* NUMA_ASSOC_SIZE is the base array size of an ibm,associativity
* array for any non-CPU resource.
+ *
+ * VCPU_ASSOC_SIZE represents the size of ibm,associativity array
+ * for CPUs, which has an extra element (vcpu_id) in the end.
*/
#define MAX_DISTANCE_REF_POINTS 4
#define NUMA_ASSOC_SIZE (MAX_DISTANCE_REF_POINTS + 1)
+#define VCPU_ASSOC_SIZE (NUMA_ASSOC_SIZE + 1)
typedef struct SpaprCapabilities SpaprCapabilities;
struct SpaprCapabilities {
--
2.26.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH v5 2/3] spapr_numa: create a vcpu associativity helper
2020-09-04 13:56 ` [PATCH v5 2/3] spapr_numa: create a vcpu associativity helper Daniel Henrique Barboza
@ 2020-09-04 14:35 ` Greg Kurz
2020-09-04 14:44 ` Daniel Henrique Barboza
0 siblings, 1 reply; 8+ messages in thread
From: Greg Kurz @ 2020-09-04 14:35 UTC (permalink / raw)
To: Daniel Henrique Barboza; +Cc: qemu-ppc, qemu-devel, david
On Fri, 4 Sep 2020 10:56:30 -0300
Daniel Henrique Barboza <danielhb413@gmail.com> wrote:
> The work to be done in h_home_node_associativity() intersects
> with what is already done in spapr_numa_fixup_cpu_dt(). This
> patch creates a new helper, spapr_numa_get_vcpu_assoc(), to
> be used for both spapr_numa_fixup_cpu_dt() and
> h_home_node_associativity().
>
> While we're at it, use memcpy() instead of loop assignment
> to created the returned array.
>
> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
> ---
> hw/ppc/spapr_numa.c | 33 ++++++++++++++++++++-------------
> include/hw/ppc/spapr.h | 7 ++++++-
> 2 files changed, 26 insertions(+), 14 deletions(-)
>
> diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
> index 368c1a494d..674d2ee86d 100644
> --- a/hw/ppc/spapr_numa.c
> +++ b/hw/ppc/spapr_numa.c
> @@ -71,31 +71,38 @@ void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
> sizeof(spapr->numa_assoc_array[nodeid]))));
> }
>
> -int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
> - int offset, PowerPCCPU *cpu)
> +static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr,
> + PowerPCCPU *cpu)
> {
> - uint vcpu_assoc_size = NUMA_ASSOC_SIZE + 1;
> - uint32_t vcpu_assoc[vcpu_assoc_size];
> + uint32_t *vcpu_assoc = g_malloc(VCPU_ASSOC_SIZE * sizeof(uint32_t));
CODING_STYLE recommends g_new(uint32_t, VCPU_ASSOC_SIZE)
> int index = spapr_get_vcpu_id(cpu);
> - int i;
> +
> + g_assert(vcpu_assoc != NULL);
>
g_malloc() and friends only return NULL when passed a zero size,
which is obviously not the case here.
> /*
> * VCPUs have an extra 'cpu_id' value in ibm,associativity
> * compared to other resources. Increment the size at index
> - * 0, copy all associativity domains already set, then put
> - * cpu_id last.
> + * 0, put cpu_id last, then copy the remaining associativity
> + * domains.
> */
> vcpu_assoc[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS + 1);
> + vcpu_assoc[VCPU_ASSOC_SIZE - 1] = cpu_to_be32(index);
> + memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id] + 1,
> + (VCPU_ASSOC_SIZE - 2) * sizeof(uint32_t));
>
> - for (i = 1; i <= MAX_DISTANCE_REF_POINTS; i++) {
> - vcpu_assoc[i] = spapr->numa_assoc_array[cpu->node_id][i];
> - }
> + return vcpu_assoc;
> +}
> +
> +int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
> + int offset, PowerPCCPU *cpu)
> +{
> + g_autofree uint32_t *vcpu_assoc = NULL;
>
> - vcpu_assoc[vcpu_assoc_size - 1] = cpu_to_be32(index);
> + vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu);
>
> /* Advertise NUMA via ibm,associativity */
> - return fdt_setprop(fdt, offset, "ibm,associativity",
> - vcpu_assoc, sizeof(vcpu_assoc));
> + return fdt_setprop(fdt, offset, "ibm,associativity", vcpu_assoc,
> + VCPU_ASSOC_SIZE * sizeof(uint32_t));
> }
>
>
> diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
> index 9a63380801..e50a2672e3 100644
> --- a/include/hw/ppc/spapr.h
> +++ b/include/hw/ppc/spapr.h
> @@ -107,13 +107,18 @@ typedef enum {
>
> /*
> * NUMA related macros. MAX_DISTANCE_REF_POINTS was taken
> - * from Taken from Linux kernel arch/powerpc/mm/numa.h.
Heh a good opportunity to fix the "from Taken from" typo I guess ;)
> + * from Linux kernel arch/powerpc/mm/numa.h. It represents the
> + * amount of associativity domains for non-CPU resources.
> *
> * NUMA_ASSOC_SIZE is the base array size of an ibm,associativity
> * array for any non-CPU resource.
> + *
> + * VCPU_ASSOC_SIZE represents the size of ibm,associativity array
> + * for CPUs, which has an extra element (vcpu_id) in the end.
> */
> #define MAX_DISTANCE_REF_POINTS 4
> #define NUMA_ASSOC_SIZE (MAX_DISTANCE_REF_POINTS + 1)
> +#define VCPU_ASSOC_SIZE (NUMA_ASSOC_SIZE + 1)
>
> typedef struct SpaprCapabilities SpaprCapabilities;
> struct SpaprCapabilities {
With the comments on g_malloc() addressed, feel free to add:
Reviewed-by: Greg Kurz <groug@kaod.org>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v5 2/3] spapr_numa: create a vcpu associativity helper
2020-09-04 14:35 ` Greg Kurz
@ 2020-09-04 14:44 ` Daniel Henrique Barboza
0 siblings, 0 replies; 8+ messages in thread
From: Daniel Henrique Barboza @ 2020-09-04 14:44 UTC (permalink / raw)
To: Greg Kurz; +Cc: qemu-ppc, qemu-devel, david
On 9/4/20 11:35 AM, Greg Kurz wrote:
> On Fri, 4 Sep 2020 10:56:30 -0300
> Daniel Henrique Barboza <danielhb413@gmail.com> wrote:
>
>> The work to be done in h_home_node_associativity() intersects
>> with what is already done in spapr_numa_fixup_cpu_dt(). This
>> patch creates a new helper, spapr_numa_get_vcpu_assoc(), to
>> be used for both spapr_numa_fixup_cpu_dt() and
>> h_home_node_associativity().
>>
>> While we're at it, use memcpy() instead of loop assignment
>> to created the returned array.
>>
>> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
>> ---
>> hw/ppc/spapr_numa.c | 33 ++++++++++++++++++++-------------
>> include/hw/ppc/spapr.h | 7 ++++++-
>> 2 files changed, 26 insertions(+), 14 deletions(-)
>>
>> diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
>> index 368c1a494d..674d2ee86d 100644
>> --- a/hw/ppc/spapr_numa.c
>> +++ b/hw/ppc/spapr_numa.c
>> @@ -71,31 +71,38 @@ void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
>> sizeof(spapr->numa_assoc_array[nodeid]))));
>> }
>>
>> -int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
>> - int offset, PowerPCCPU *cpu)
>> +static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr,
>> + PowerPCCPU *cpu)
>> {
>> - uint vcpu_assoc_size = NUMA_ASSOC_SIZE + 1;
>> - uint32_t vcpu_assoc[vcpu_assoc_size];
>> + uint32_t *vcpu_assoc = g_malloc(VCPU_ASSOC_SIZE * sizeof(uint32_t));
>
> CODING_STYLE recommends g_new(uint32_t, VCPU_ASSOC_SIZE)
Guess I can't rely solely on scripts/checkpath.pl anymore ....
>
>> int index = spapr_get_vcpu_id(cpu);
>> - int i;
>> +
>> + g_assert(vcpu_assoc != NULL);
>>
>
> g_malloc() and friends only return NULL when passed a zero size,
> which is obviously not the case here.
This was my attempt of trying to cover the case where g_malloc() might fail,
but reading the docs again I believe failure in g_malloc() would result in
application terminationl. What I did there makes sense for g_try_malloc().
I'll change up there for g_new() and drop this g_assert().
>
>> /*
>> * VCPUs have an extra 'cpu_id' value in ibm,associativity
>> * compared to other resources. Increment the size at index
>> - * 0, copy all associativity domains already set, then put
>> - * cpu_id last.
>> + * 0, put cpu_id last, then copy the remaining associativity
>> + * domains.
>> */
>> vcpu_assoc[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS + 1);
>> + vcpu_assoc[VCPU_ASSOC_SIZE - 1] = cpu_to_be32(index);
>> + memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id] + 1,
>> + (VCPU_ASSOC_SIZE - 2) * sizeof(uint32_t));
>>
>> - for (i = 1; i <= MAX_DISTANCE_REF_POINTS; i++) {
>> - vcpu_assoc[i] = spapr->numa_assoc_array[cpu->node_id][i];
>> - }
>> + return vcpu_assoc;
>> +}
>> +
>> +int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
>> + int offset, PowerPCCPU *cpu)
>> +{
>> + g_autofree uint32_t *vcpu_assoc = NULL;
>>
>> - vcpu_assoc[vcpu_assoc_size - 1] = cpu_to_be32(index);
>> + vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu);
>>
>> /* Advertise NUMA via ibm,associativity */
>> - return fdt_setprop(fdt, offset, "ibm,associativity",
>> - vcpu_assoc, sizeof(vcpu_assoc));
>> + return fdt_setprop(fdt, offset, "ibm,associativity", vcpu_assoc,
>> + VCPU_ASSOC_SIZE * sizeof(uint32_t));
>> }
>>
>>
>> diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
>> index 9a63380801..e50a2672e3 100644
>> --- a/include/hw/ppc/spapr.h
>> +++ b/include/hw/ppc/spapr.h
>> @@ -107,13 +107,18 @@ typedef enum {
>>
>> /*
>> * NUMA related macros. MAX_DISTANCE_REF_POINTS was taken
>> - * from Taken from Linux kernel arch/powerpc/mm/numa.h.
>
> Heh a good opportunity to fix the "from Taken from" typo I guess ;)
>
>> + * from Linux kernel arch/powerpc/mm/numa.h. It represents the
>> + * amount of associativity domains for non-CPU resources.
>> *
>> * NUMA_ASSOC_SIZE is the base array size of an ibm,associativity
>> * array for any non-CPU resource.
>> + *
>> + * VCPU_ASSOC_SIZE represents the size of ibm,associativity array
>> + * for CPUs, which has an extra element (vcpu_id) in the end.
>> */
>> #define MAX_DISTANCE_REF_POINTS 4
>> #define NUMA_ASSOC_SIZE (MAX_DISTANCE_REF_POINTS + 1)
>> +#define VCPU_ASSOC_SIZE (NUMA_ASSOC_SIZE + 1)
>>
>> typedef struct SpaprCapabilities SpaprCapabilities;
>> struct SpaprCapabilities {
>
> With the comments on g_malloc() addressed, feel free to add:
>
> Reviewed-by: Greg Kurz <groug@kaod.org>
Thanks! I'll change g_malloc() and so on like you suggested and send
this as v6.
DHB
>
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v5 3/3] spapr_numa: use spapr_numa_get_vcpu_assoc() in home_node hcall
2020-09-04 13:56 [PATCH v5 0/3] pseries NUMA distance rework Daniel Henrique Barboza
2020-09-04 13:56 ` [PATCH v5 1/3] spapr: move h_home_node_associativity to spapr_numa.c Daniel Henrique Barboza
2020-09-04 13:56 ` [PATCH v5 2/3] spapr_numa: create a vcpu associativity helper Daniel Henrique Barboza
@ 2020-09-04 13:56 ` Daniel Henrique Barboza
2020-09-04 14:41 ` Greg Kurz
2 siblings, 1 reply; 8+ messages in thread
From: Daniel Henrique Barboza @ 2020-09-04 13:56 UTC (permalink / raw)
To: qemu-devel; +Cc: Daniel Henrique Barboza, qemu-ppc, groug, david
The current implementation of h_home_node_associativity hard codes
the values of associativity domains of the vcpus. Let's make
it consider the values already initialized in spapr->numa_assoc_array,
via the spapr_numa_get_vcpu_assoc() helper.
We want to set it and forget it, and for that we also need to
assert that we don't overflow the registers of the hypercall.
From R4 to R9 we can squeeze in 12 associativity domains for
vcpus, so let's assert that VCPU_ASSOC_SIZE -1 isn't greater
than that.
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
---
hw/ppc/spapr_numa.c | 41 +++++++++++++++++++++++++++++++++--------
1 file changed, 33 insertions(+), 8 deletions(-)
diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
index 674d2ee86d..07eb921737 100644
--- a/hw/ppc/spapr_numa.c
+++ b/hw/ppc/spapr_numa.c
@@ -178,10 +178,11 @@ static target_ulong h_home_node_associativity(PowerPCCPU *cpu,
target_ulong opcode,
target_ulong *args)
{
+ g_autofree uint32_t *vcpu_assoc = NULL;
target_ulong flags = args[0];
target_ulong procno = args[1];
PowerPCCPU *tcpu;
- int idx;
+ int idx, assoc_idx;
/* only support procno from H_REGISTER_VPA */
if (flags != 0x1) {
@@ -193,16 +194,40 @@ static target_ulong h_home_node_associativity(PowerPCCPU *cpu,
return H_P2;
}
- /* sequence is the same as in the "ibm,associativity" property */
+ /*
+ * Given that we want to be flexible with the sizes and indexes,
+ * we must consider that there is a hard limit of how many
+ * associativities domain we can fit in R4 up to R9, which would be
+ * 12 associativity domains for vcpus. Assert and bail if that's
+ * not the case.
+ */
+ G_STATIC_ASSERT((VCPU_ASSOC_SIZE - 1) <= 12);
+
+ vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, tcpu);
+ /* assoc_idx starts at 1 to skip associativity size */
+ assoc_idx = 1;
- idx = 0;
#define ASSOCIATIVITY(a, b) (((uint64_t)(a) << 32) | \
((uint64_t)(b) & 0xffffffff))
- args[idx++] = ASSOCIATIVITY(0, 0);
- args[idx++] = ASSOCIATIVITY(0, tcpu->node_id);
- args[idx++] = ASSOCIATIVITY(procno, -1);
- for ( ; idx < 6; idx++) {
- args[idx] = -1;
+
+ for (idx = 0; idx < 6; idx++) {
+ int32_t a, b;
+
+ /*
+ * vcpu_assoc[] will contain the associativity domains for tcpu,
+ * including tcpu->node_id and procno, meaning that we don't
+ * need to use these variables here.
+ *
+ * We'll read 2 values at a time to fill up the ASSOCIATIVITY()
+ * macro. The ternary will fill the remaining registers with -1
+ * after we went through vcpu_assoc[].
+ */
+ a = assoc_idx < VCPU_ASSOC_SIZE ?
+ be32_to_cpu(vcpu_assoc[assoc_idx++]) : -1;
+ b = assoc_idx < VCPU_ASSOC_SIZE ?
+ be32_to_cpu(vcpu_assoc[assoc_idx++]) : -1;
+
+ args[idx] = ASSOCIATIVITY(a, b);
}
#undef ASSOCIATIVITY
--
2.26.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH v5 3/3] spapr_numa: use spapr_numa_get_vcpu_assoc() in home_node hcall
2020-09-04 13:56 ` [PATCH v5 3/3] spapr_numa: use spapr_numa_get_vcpu_assoc() in home_node hcall Daniel Henrique Barboza
@ 2020-09-04 14:41 ` Greg Kurz
0 siblings, 0 replies; 8+ messages in thread
From: Greg Kurz @ 2020-09-04 14:41 UTC (permalink / raw)
To: Daniel Henrique Barboza; +Cc: qemu-ppc, qemu-devel, david
On Fri, 4 Sep 2020 10:56:31 -0300
Daniel Henrique Barboza <danielhb413@gmail.com> wrote:
> The current implementation of h_home_node_associativity hard codes
> the values of associativity domains of the vcpus. Let's make
> it consider the values already initialized in spapr->numa_assoc_array,
> via the spapr_numa_get_vcpu_assoc() helper.
>
> We want to set it and forget it, and for that we also need to
> assert that we don't overflow the registers of the hypercall.
> From R4 to R9 we can squeeze in 12 associativity domains for
> vcpus, so let's assert that VCPU_ASSOC_SIZE -1 isn't greater
> than that.
>
> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
> ---
> hw/ppc/spapr_numa.c | 41 +++++++++++++++++++++++++++++++++--------
> 1 file changed, 33 insertions(+), 8 deletions(-)
>
> diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
> index 674d2ee86d..07eb921737 100644
> --- a/hw/ppc/spapr_numa.c
> +++ b/hw/ppc/spapr_numa.c
> @@ -178,10 +178,11 @@ static target_ulong h_home_node_associativity(PowerPCCPU *cpu,
> target_ulong opcode,
> target_ulong *args)
> {
> + g_autofree uint32_t *vcpu_assoc = NULL;
> target_ulong flags = args[0];
> target_ulong procno = args[1];
> PowerPCCPU *tcpu;
> - int idx;
> + int idx, assoc_idx;
>
> /* only support procno from H_REGISTER_VPA */
> if (flags != 0x1) {
> @@ -193,16 +194,40 @@ static target_ulong h_home_node_associativity(PowerPCCPU *cpu,
> return H_P2;
> }
>
> - /* sequence is the same as in the "ibm,associativity" property */
> + /*
> + * Given that we want to be flexible with the sizes and indexes,
> + * we must consider that there is a hard limit of how many
> + * associativities domain we can fit in R4 up to R9, which would be
> + * 12 associativity domains for vcpus. Assert and bail if that's
> + * not the case.
> + */
> + G_STATIC_ASSERT((VCPU_ASSOC_SIZE - 1) <= 12);
> +
> + vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, tcpu);
> + /* assoc_idx starts at 1 to skip associativity size */
> + assoc_idx = 1;
>
> - idx = 0;
> #define ASSOCIATIVITY(a, b) (((uint64_t)(a) << 32) | \
> ((uint64_t)(b) & 0xffffffff))
> - args[idx++] = ASSOCIATIVITY(0, 0);
> - args[idx++] = ASSOCIATIVITY(0, tcpu->node_id);
> - args[idx++] = ASSOCIATIVITY(procno, -1);
> - for ( ; idx < 6; idx++) {
> - args[idx] = -1;
> +
> + for (idx = 0; idx < 6; idx++) {
> + int32_t a, b;
> +
> + /*
> + * vcpu_assoc[] will contain the associativity domains for tcpu,
> + * including tcpu->node_id and procno, meaning that we don't
> + * need to use these variables here.
> + *
> + * We'll read 2 values at a time to fill up the ASSOCIATIVITY()
> + * macro. The ternary will fill the remaining registers with -1
> + * after we went through vcpu_assoc[].
> + */
> + a = assoc_idx < VCPU_ASSOC_SIZE ?
> + be32_to_cpu(vcpu_assoc[assoc_idx++]) : -1;
> + b = assoc_idx < VCPU_ASSOC_SIZE ?
> + be32_to_cpu(vcpu_assoc[assoc_idx++]) : -1;
> +
> + args[idx] = ASSOCIATIVITY(a, b);
> }
> #undef ASSOCIATIVITY
>
Much clearer with the comment.
Reviewed-by: Greg Kurz <groug@kaod.org>
^ permalink raw reply [flat|nested] 8+ messages in thread