qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Daniel Henrique Barboza <danielhb413@gmail.com>
To: David Gibson <david@gibson.dropbear.id.au>
Cc: qemu-ppc@nongnu.org, qemu-devel@nongnu.org
Subject: Re: [PATCH v2 6/7] spapr_numa: move NVLink2 associativity handling to spapr_numa.c
Date: Thu, 3 Sep 2020 11:20:08 -0300	[thread overview]
Message-ID: <606dd354-5c95-9112-1c0f-919e142c9df8@gmail.com> (raw)
In-Reply-To: <20200903015632.GL1897@yekko.fritz.box>



On 9/2/20 10:56 PM, David Gibson wrote:
> On Tue, Sep 01, 2020 at 09:56:44AM -0300, Daniel Henrique Barboza wrote:
>> This patch adds a new spapr_numa_write_assoc_nvlink2() helper
>> to handle the ibm,associativity for NVLink2 GPUs.
>>
>> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
> 
> Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
> 
> It might be nice to "precompute" the assoc arrays for the gpus as you
> now do for the regular numa nodes.  That can be a later revision, though.

Hmm ... I have the follow-up series with the NUMA calculation ready, and
one of the steps I had to take there was to initiate all the associativity
arrays with 'node_id' instead of leaving unintialized (reason: the kernel
make associativity matches with zeroes). In the end I'm initializing every
numa node as we do with GPUs.

I'll bring some of this future code to this series and handle GPUs like
a regular numa node as you suggested. Let's see how it goes.



Thanks,

DHB

> 
>> ---
>>   hw/ppc/spapr_numa.c         | 23 +++++++++++++++++++++++
>>   hw/ppc/spapr_pci_nvlink2.c  | 19 ++-----------------
>>   include/hw/ppc/spapr_numa.h |  3 +++
>>   3 files changed, 28 insertions(+), 17 deletions(-)
>>
>> diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
>> index 9eb4bdbe80..785cc24624 100644
>> --- a/hw/ppc/spapr_numa.c
>> +++ b/hw/ppc/spapr_numa.c
>> @@ -15,6 +15,8 @@
>>   #include "hw/ppc/spapr_numa.h"
>>   #include "hw/ppc/fdt.h"
>>   
>> +/* Moved from hw/ppc/spapr_pci_nvlink2.c */
>> +#define SPAPR_GPU_NUMA_ID           (cpu_to_be32(1))
>>   
>>   void spapr_numa_associativity_init(MachineState *machine)
>>   {
>> @@ -114,6 +116,27 @@ int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt,
>>       return ret;
>>   }
>>   
>> +void spapr_numa_write_assoc_nvlink2(void *fdt, int offset, int numa_id,
>> +                                    SpaprPhbState *sphb)
>> +{
>> +    uint32_t associativity[NUMA_ASSOC_SIZE];
>> +    int i;
>> +
>> +    associativity[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS);
>> +    for (i = 1; i < NUMA_ASSOC_SIZE; i++) {
>> +        associativity[i] = cpu_to_be32(numa_id);
>> +    };
>> +
>> +    if (sphb->pre_5_1_assoc) {
>> +        associativity[1] = SPAPR_GPU_NUMA_ID;
>> +        associativity[2] = SPAPR_GPU_NUMA_ID;
>> +        associativity[3] = SPAPR_GPU_NUMA_ID;
>> +    }
>> +
>> +    _FDT((fdt_setprop(fdt, offset, "ibm,associativity", associativity,
>> +                      sizeof(associativity))));
>> +}
>> +
>>   /*
>>    * Helper that writes ibm,associativity-reference-points and
>>    * max-associativity-domains in the RTAS pointed by @rtas
>> diff --git a/hw/ppc/spapr_pci_nvlink2.c b/hw/ppc/spapr_pci_nvlink2.c
>> index 76ae77ebc8..662a0af990 100644
>> --- a/hw/ppc/spapr_pci_nvlink2.c
>> +++ b/hw/ppc/spapr_pci_nvlink2.c
>> @@ -29,6 +29,7 @@
>>   #include "qemu/error-report.h"
>>   #include "hw/ppc/fdt.h"
>>   #include "hw/pci/pci_bridge.h"
>> +#include "hw/ppc/spapr_numa.h"
>>   
>>   #define PHANDLE_PCIDEV(phb, pdev)    (0x12000000 | \
>>                                        (((phb)->index) << 16) | ((pdev)->devfn))
>> @@ -37,8 +38,6 @@
>>   #define PHANDLE_NVLINK(phb, gn, nn)  (0x00130000 | (((phb)->index) << 8) | \
>>                                        ((gn) << 4) | (nn))
>>   
>> -#define SPAPR_GPU_NUMA_ID           (cpu_to_be32(1))
>> -
>>   typedef struct SpaprPhbPciNvGpuSlot {
>>           uint64_t tgt;
>>           uint64_t gpa;
>> @@ -360,13 +359,6 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
>>           Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev),
>>                                                       "nvlink2-mr[0]",
>>                                                       &error_abort);
>> -        uint32_t associativity[] = {
>> -            cpu_to_be32(0x4),
>> -            cpu_to_be32(nvslot->numa_id),
>> -            cpu_to_be32(nvslot->numa_id),
>> -            cpu_to_be32(nvslot->numa_id),
>> -            cpu_to_be32(nvslot->numa_id)
>> -        };
>>           uint64_t size = object_property_get_uint(nv_mrobj, "size", NULL);
>>           uint64_t mem_reg[2] = { cpu_to_be64(nvslot->gpa), cpu_to_be64(size) };
>>           char *mem_name = g_strdup_printf("memory@%"PRIx64, nvslot->gpa);
>> @@ -376,14 +368,7 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
>>           _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
>>           _FDT((fdt_setprop(fdt, off, "reg", mem_reg, sizeof(mem_reg))));
>>   
>> -        if (sphb->pre_5_1_assoc) {
>> -            associativity[1] = SPAPR_GPU_NUMA_ID;
>> -            associativity[2] = SPAPR_GPU_NUMA_ID;
>> -            associativity[3] = SPAPR_GPU_NUMA_ID;
>> -        }
>> -
>> -        _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
>> -                          sizeof(associativity))));
>> +        spapr_numa_write_assoc_nvlink2(fdt, off, nvslot->numa_id, sphb);
>>   
>>           _FDT((fdt_setprop_string(fdt, off, "compatible",
>>                                    "ibm,coherent-device-memory")));
>> diff --git a/include/hw/ppc/spapr_numa.h b/include/hw/ppc/spapr_numa.h
>> index f6127501a6..b6e0721b07 100644
>> --- a/include/hw/ppc/spapr_numa.h
>> +++ b/include/hw/ppc/spapr_numa.h
>> @@ -15,6 +15,7 @@
>>   
>>   #include "hw/boards.h"
>>   #include "hw/ppc/spapr.h"
>> +#include "hw/pci-host/spapr.h"
>>   
>>   void spapr_numa_associativity_init(MachineState *machine);
>>   void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas);
>> @@ -24,6 +25,8 @@ int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
>>                               int offset, PowerPCCPU *cpu);
>>   int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt,
>>                                            int offset);
>> +void spapr_numa_write_assoc_nvlink2(void *fdt, int offset, int numa_id,
>> +                                    SpaprPhbState *sphb);
>>   
>>   
>>   #endif /* HW_SPAPR_NUMA_H */
> 


  reply	other threads:[~2020-09-03 14:21 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-01 12:56 [PATCH v2 0/7] pseries NUMA distance rework Daniel Henrique Barboza
2020-09-01 12:56 ` [PATCH v2 1/7] ppc: introducing spapr_numa.c NUMA code helper Daniel Henrique Barboza
2020-09-01 12:56 ` [PATCH v2 2/7] ppc/spapr_nvdimm: turn spapr_dt_nvdimm() static Daniel Henrique Barboza
2020-09-01 12:56 ` [PATCH v2 3/7] spapr: introduce SpaprMachineClass::numa_assoc_array Daniel Henrique Barboza
2020-09-03  1:51   ` David Gibson
2020-09-03 11:28     ` Daniel Henrique Barboza
2020-09-01 12:56 ` [PATCH v2 4/7] spapr, spapr_numa: handle vcpu ibm,associativity Daniel Henrique Barboza
2020-09-01 12:56 ` [PATCH v2 5/7] spapr, spapr_numa: move lookup-arrays handling to spapr_numa.c Daniel Henrique Barboza
2020-09-03  1:34   ` David Gibson
2020-09-03 11:22     ` Daniel Henrique Barboza
2020-09-01 12:56 ` [PATCH v2 6/7] spapr_numa: move NVLink2 associativity " Daniel Henrique Barboza
2020-09-03  1:56   ` David Gibson
2020-09-03 14:20     ` Daniel Henrique Barboza [this message]
2020-09-01 12:56 ` [PATCH v2 7/7] spapr_hcall: h_home_node_associativity now reads numa_assoc_array Daniel Henrique Barboza
2020-09-03  1:46   ` David Gibson
2020-09-03 11:17     ` Daniel Henrique Barboza
2020-09-03  1:35 ` [PATCH v2 0/7] pseries NUMA distance rework David Gibson
2020-09-03  1:49   ` David Gibson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=606dd354-5c95-9112-1c0f-919e142c9df8@gmail.com \
    --to=danielhb413@gmail.com \
    --cc=david@gibson.dropbear.id.au \
    --cc=qemu-devel@nongnu.org \
    --cc=qemu-ppc@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).