From: Daniel Henrique Barboza <danielhb413@gmail.com>
To: David Gibson <david@gibson.dropbear.id.au>
Cc: qemu-ppc@nongnu.org, qemu-devel@nongnu.org
Subject: Re: [PATCH v2 5/7] spapr, spapr_numa: move lookup-arrays handling to spapr_numa.c
Date: Thu, 3 Sep 2020 08:22:51 -0300 [thread overview]
Message-ID: <f34dd2cc-49ba-6118-715a-1acce48820fa@gmail.com> (raw)
In-Reply-To: <20200903013430.GG1897@yekko.fritz.box>
On 9/2/20 10:34 PM, David Gibson wrote:
> On Tue, Sep 01, 2020 at 09:56:43AM -0300, Daniel Henrique Barboza wrote:
>> In a similar fashion as the previous patch, let's move the
>> handling of ibm,associativity-lookup-arrays from spapr.c to
>> spapr_numa.c. A spapr_numa_write_assoc_lookup_arrays() helper was
>> created, and spapr_dt_dynamic_reconfiguration_memory() can now
>> use it to advertise the lookup-arrays.
>>
>> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
>> ---
>> hw/ppc/spapr.c | 25 ++----------------------
>> hw/ppc/spapr_numa.c | 39 +++++++++++++++++++++++++++++++++++++
>> include/hw/ppc/spapr_numa.h | 2 ++
>> 3 files changed, 43 insertions(+), 23 deletions(-)
>>
>> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
>> index 172f965fe0..65d2ccd578 100644
>> --- a/hw/ppc/spapr.c
>> +++ b/hw/ppc/spapr.c
>> @@ -535,13 +535,10 @@ static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr,
>> void *fdt)
>> {
>> MachineState *machine = MACHINE(spapr);
>> - int nb_numa_nodes = machine->numa_state->num_nodes;
>> - int ret, i, offset;
>> + int ret, offset;
>> uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
>> uint32_t prop_lmb_size[] = {cpu_to_be32(lmb_size >> 32),
>> cpu_to_be32(lmb_size & 0xffffffff)};
>> - uint32_t *int_buf, *cur_index, buf_len;
>> - int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1;
>> MemoryDeviceInfoList *dimms = NULL;
>>
>> /*
>> @@ -582,25 +579,7 @@ static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr,
>> return ret;
>> }
>>
>> - /* ibm,associativity-lookup-arrays */
>> - buf_len = (nr_nodes * 4 + 2) * sizeof(uint32_t);
>> - cur_index = int_buf = g_malloc0(buf_len);
>> - int_buf[0] = cpu_to_be32(nr_nodes);
>> - int_buf[1] = cpu_to_be32(4); /* Number of entries per associativity list */
>> - cur_index += 2;
>> - for (i = 0; i < nr_nodes; i++) {
>> - uint32_t associativity[] = {
>> - cpu_to_be32(0x0),
>> - cpu_to_be32(0x0),
>> - cpu_to_be32(0x0),
>> - cpu_to_be32(i)
>> - };
>> - memcpy(cur_index, associativity, sizeof(associativity));
>> - cur_index += 4;
>> - }
>> - ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf,
>> - (cur_index - int_buf) * sizeof(uint32_t));
>> - g_free(int_buf);
>> + ret = spapr_numa_write_assoc_lookup_arrays(spapr, fdt, offset);
>>
>> return ret;
>> }
>> diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
>> index b8882d209e..9eb4bdbe80 100644
>> --- a/hw/ppc/spapr_numa.c
>> +++ b/hw/ppc/spapr_numa.c
>> @@ -75,6 +75,45 @@ int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
>> vcpu_assoc, sizeof(vcpu_assoc));
>> }
>>
>> +
>> +int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt,
>> + int offset)
>> +{
>> + MachineState *machine = MACHINE(spapr);
>> + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
>> + int nb_numa_nodes = machine->numa_state->num_nodes;
>> + int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1;
>> + uint32_t *int_buf, *cur_index, buf_len;
>> + int ret, i, j;
>> +
>> + /* ibm,associativity-lookup-arrays */
>> + buf_len = (nr_nodes * MAX_DISTANCE_REF_POINTS + 2) * sizeof(uint32_t);
>> + cur_index = int_buf = g_malloc0(buf_len);
>> + int_buf[0] = cpu_to_be32(nr_nodes);
>> + /* Number of entries per associativity list */
>> + int_buf[1] = cpu_to_be32(MAX_DISTANCE_REF_POINTS);
>> + cur_index += 2;
>> + for (i = 0; i < nr_nodes; i++) {
>> + /*
>> + * For the lookup-array we use the ibm,associativity array,
>> + * from numa_assoc_array. without the first element (size).
>> + */
>> + uint32_t associativity[MAX_DISTANCE_REF_POINTS];
>> +
>> + for (j = 0; j < MAX_DISTANCE_REF_POINTS; j++) {
>> + associativity[j] = smc->numa_assoc_array[i][j + 1];
>> + }
>> +
>> + memcpy(cur_index, associativity, sizeof(associativity));
>
> AFAICT, you could just use a single memcpy() to copy from the
> numa_assoc_array() into the property here, rather than using a loop
> and temporary array.
I remember that I was having some weird problems with memcpy() and
numa_assoc_array and this is how I got around it. I'll try to sort it
out again.
>
>> + cur_index += 4;
>
> Shouldn't this be += MAX_DISTANCE_REF_POINTS?
Yeah it should. Good catch.
>
>> + }
>> + ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf,
>> + (cur_index - int_buf) * sizeof(uint32_t));
>> + g_free(int_buf);
>> +
>> + return ret;
>> +}
>> +
>> /*
>> * Helper that writes ibm,associativity-reference-points and
>> * max-associativity-domains in the RTAS pointed by @rtas
>> diff --git a/include/hw/ppc/spapr_numa.h b/include/hw/ppc/spapr_numa.h
>> index f92fb4f28a..f6127501a6 100644
>> --- a/include/hw/ppc/spapr_numa.h
>> +++ b/include/hw/ppc/spapr_numa.h
>> @@ -22,6 +22,8 @@ void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
>> int offset, int nodeid);
>> int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
>> int offset, PowerPCCPU *cpu);
>> +int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt,
>> + int offset);
>>
>>
>> #endif /* HW_SPAPR_NUMA_H */
>
next prev parent reply other threads:[~2020-09-03 11:24 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-09-01 12:56 [PATCH v2 0/7] pseries NUMA distance rework Daniel Henrique Barboza
2020-09-01 12:56 ` [PATCH v2 1/7] ppc: introducing spapr_numa.c NUMA code helper Daniel Henrique Barboza
2020-09-01 12:56 ` [PATCH v2 2/7] ppc/spapr_nvdimm: turn spapr_dt_nvdimm() static Daniel Henrique Barboza
2020-09-01 12:56 ` [PATCH v2 3/7] spapr: introduce SpaprMachineClass::numa_assoc_array Daniel Henrique Barboza
2020-09-03 1:51 ` David Gibson
2020-09-03 11:28 ` Daniel Henrique Barboza
2020-09-01 12:56 ` [PATCH v2 4/7] spapr, spapr_numa: handle vcpu ibm,associativity Daniel Henrique Barboza
2020-09-01 12:56 ` [PATCH v2 5/7] spapr, spapr_numa: move lookup-arrays handling to spapr_numa.c Daniel Henrique Barboza
2020-09-03 1:34 ` David Gibson
2020-09-03 11:22 ` Daniel Henrique Barboza [this message]
2020-09-01 12:56 ` [PATCH v2 6/7] spapr_numa: move NVLink2 associativity " Daniel Henrique Barboza
2020-09-03 1:56 ` David Gibson
2020-09-03 14:20 ` Daniel Henrique Barboza
2020-09-01 12:56 ` [PATCH v2 7/7] spapr_hcall: h_home_node_associativity now reads numa_assoc_array Daniel Henrique Barboza
2020-09-03 1:46 ` David Gibson
2020-09-03 11:17 ` Daniel Henrique Barboza
2020-09-03 1:35 ` [PATCH v2 0/7] pseries NUMA distance rework David Gibson
2020-09-03 1:49 ` David Gibson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=f34dd2cc-49ba-6118-715a-1acce48820fa@gmail.com \
--to=danielhb413@gmail.com \
--cc=david@gibson.dropbear.id.au \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).