qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Bharata B Rao <bharata@linux.vnet.ibm.com>
To: Michael Roth <mdroth@linux.vnet.ibm.com>
Cc: qemu-devel@nongnu.org, qemu-ppc@nongnu.org, afaerber@suse.de,
	david@gibson.dropbear.id.au, imammedo@redhat.com,
	armbru@redhat.com, thuth@redhat.com, aik@ozlabs.ru,
	agraf@suse.de, pbonzini@redhat.com, ehabkost@redhat.com,
	pkrempa@redhat.com, eblake@redhat.com,
	mjrosato@linux.vnet.ibm.com, borntraeger@de.ibm.com
Subject: Re: [Qemu-devel] [RFC PATCH v2.1 10/12] spapr: CPU hotplug support
Date: Thu, 5 May 2016 14:52:44 +0530	[thread overview]
Message-ID: <20160505092244.GB16087@in.ibm.com> (raw)
In-Reply-To: <20160405234716.589.89901@loki>

On Tue, Apr 05, 2016 at 06:47:16PM -0500, Michael Roth wrote:
> Quoting Bharata B Rao (2016-03-31 03:39:19)
> > Set up device tree entries for the hotplugged CPU core and use the
> > exising RTAS event logging infrastructure to send CPU hotplug notification
> > to the guest.
> > 
> > Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com>
> > ---
> >  hw/ppc/spapr.c                  | 58 ++++++++++++++++++++++++++++++++++
> >  hw/ppc/spapr_cpu_core.c         | 70 +++++++++++++++++++++++++++++++++++++++++
> >  hw/ppc/spapr_events.c           |  3 ++
> >  hw/ppc/spapr_rtas.c             | 24 ++++++++++++++
> >  include/hw/ppc/spapr.h          |  2 ++
> >  include/hw/ppc/spapr_cpu_core.h |  2 ++
> >  6 files changed, 159 insertions(+)
> > 
> > diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
> > index 1ead043..1a5dbd9 100644
> > --- a/hw/ppc/spapr.c
> > +++ b/hw/ppc/spapr.c
> > @@ -603,6 +603,16 @@ static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset,
> >      size_t page_sizes_prop_size;
> >      uint32_t vcpus_per_socket = smp_threads * smp_cores;
> >      uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
> > +    sPAPRDRConnector *drc;
> > +    sPAPRDRConnectorClass *drck;
> > +    int drc_index;
> > +
> > +    drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index);
> > +    if (drc) {
> > +        drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
> > +        drc_index = drck->get_index(drc);
> > +        _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)));
> > +    }
> > 
> >      /* Note: we keep CI large pages off for now because a 64K capable guest
> >       * provisioned with large pages might otherwise try to map a qemu
> > @@ -987,6 +997,16 @@ static void spapr_finalize_fdt(sPAPRMachineState *spapr,
> >          _FDT(spapr_drc_populate_dt(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_LMB));
> >      }
> > 
> > +    if (smc->dr_cpu_enabled) {
> > +        int offset = fdt_path_offset(fdt, "/cpus");
> > +        ret = spapr_drc_populate_dt(fdt, offset, NULL,
> > +                                    SPAPR_DR_CONNECTOR_TYPE_CPU);
> > +        if (ret < 0) {
> > +            error_report("Couldn't set up CPU DR device tree properties");
> > +            exit(1);
> > +        }
> > +    }
> > +
> >      _FDT((fdt_pack(fdt)));
> > 
> >      if (fdt_totalsize(fdt) > FDT_MAX_SIZE) {
> > @@ -1622,6 +1642,8 @@ static void spapr_boot_set(void *opaque, const char *boot_device,
> >  void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu, Error **errp)
> >  {
> >      CPUPPCState *env = &cpu->env;
> > +    CPUState *cs = CPU(cpu);
> > +    int i;
> > 
> >      /* Set time-base frequency to 512 MHz */
> >      cpu_ppc_tb_init(env, TIMEBASE_FREQ);
> > @@ -1646,6 +1668,14 @@ void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu, Error **errp)
> >          }
> >      }
> > 
> > +    /* Set NUMA node for the added CPUs  */
> > +    for (i = 0; i < nb_numa_nodes; i++) {
> > +        if (test_bit(cs->cpu_index, numa_info[i].node_cpu)) {
> > +            cs->numa_node = i;
> > +            break;
> > +        }
> > +    }
> > +
> >      xics_cpu_setup(spapr->icp, cpu);
> > 
> >      qemu_register_reset(spapr_cpu_reset, cpu);
> > @@ -1825,6 +1855,11 @@ static void ppc_spapr_init(MachineState *machine)
> > 
> >          for (i = 0; i < spapr_max_cores; i++) {
> >              int core_dt_id = i * smt;
> > +            sPAPRDRConnector *drc =
> > +                spapr_dr_connector_new(OBJECT(spapr),
> > +                                       SPAPR_DR_CONNECTOR_TYPE_CPU, core_dt_id);
> > +
> > +            qemu_register_reset(spapr_drc_reset, drc);
> > 
> >              if (i < spapr_cores) {
> >                  char *type = spapr_get_cpu_core_type(machine->cpu_model);
> > @@ -2247,6 +2282,27 @@ out:
> >      error_propagate(errp, local_err);
> >  }
> > 
> > +void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
> > +                                    sPAPRMachineState *spapr)
> > +{
> > +    PowerPCCPU *cpu = POWERPC_CPU(cs);
> > +    DeviceClass *dc = DEVICE_GET_CLASS(cs);
> > +    int id = ppc_get_vcpu_dt_id(cpu);
> > +    void *fdt;
> > +    int offset, fdt_size;
> > +    char *nodename;
> > +
> > +    fdt = create_device_tree(&fdt_size);
> > +    nodename = g_strdup_printf("%s@%x", dc->fw_name, id);
> > +    offset = fdt_add_subnode(fdt, 0, nodename);
> > +
> > +    spapr_populate_cpu_dt(cs, fdt, offset, spapr);
> > +    g_free(nodename);
> > +
> > +    *fdt_offset = offset;
> > +    return fdt;
> > +}
> > +
> >  static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
> >                                        DeviceState *dev, Error **errp)
> >  {
> > @@ -2287,6 +2343,8 @@ static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
> >          }
> > 
> >          spapr_memory_plug(hotplug_dev, dev, node, errp);
> > +    } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
> > +        spapr_core_plug(hotplug_dev, dev, errp);
> >      }
> >  }
> > 
> > diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
> > index 640d143..a9ba843 100644
> > --- a/hw/ppc/spapr_cpu_core.c
> > +++ b/hw/ppc/spapr_cpu_core.c
> > @@ -18,6 +18,7 @@
> >  void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
> >                           Error **errp)
> >  {
> > +    sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(qdev_get_machine());
> >      sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
> >      int spapr_max_cores = max_cpus / smp_threads;
> >      int index;
> > @@ -25,6 +26,11 @@ void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
> >      Error *local_err = NULL;
> >      CPUCore *cc = CPU_CORE(dev);
> > 
> > +    if (!smc->dr_cpu_enabled && dev->hotplugged) {
> > +        error_setg(&local_err, "CPU hotplug not supported for this machine");
> > +        goto out;
> > +    }
> > +
> >      if (cc->threads != smp_threads) {
> >          error_setg(&local_err, "threads must be %d", smp_threads);
> >          goto out;
> > @@ -49,6 +55,70 @@ out:
> >      error_propagate(errp, local_err);
> >  }
> > 
> > +void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
> > +                     Error **errp)
> > +{
> > +    sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(qdev_get_machine());
> > +    sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
> > +    sPAPRCPUCore *core = SPAPR_CPU_CORE(OBJECT(dev));
> > +    CPUCore *cc = CPU_CORE(dev);
> > +    CPUState *cs = CPU(&core->threads[0]);
> > +    sPAPRDRConnector *drc;
> > +    sPAPRDRConnectorClass *drck;
> > +    Error *local_err = NULL;
> > +    void *fdt = NULL;
> > +    int fdt_offset = 0;
> > +    int index;
> > +    int smt = kvmppc_smt_threads();
> > +
> > +    drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, cc->core);
> > +    index = cc->core / smt;
> > +    spapr->cores[index] = OBJECT(dev);
> > +
> > +    if (!smc->dr_cpu_enabled) {
> > +        /*
> > +         * This is a cold plugged CPU core but the machine doesn't support
> > +         * DR. So skip the hotplug path ensuring that the core is brought
> > +         * up online with out an associated DR connector.
> > +         */
> > +        return;
> > +    }
> > +
> > +    g_assert(drc);
> > +
> > +    /*
> > +     * Setup CPU DT entries only for hotplugged CPUs. For boot time or
> > +     * coldplugged CPUs DT entries are setup in spapr_finalize_fdt().
> > +     */
> > +    if (dev->hotplugged) {
> > +        fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr);
> > +        dev->hotplugged = true;
> 
> This doesn't seem necessary ^

Yes, this hunk refuses to die, I just ensured that it is removed finally.

Regards,
Bharata.

  reply	other threads:[~2016-05-05  9:25 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-03-31  8:39 [Qemu-devel] [RFC PATCH v2.1 00/12] Core based CPU hotplug for PowerPC sPAPR Bharata B Rao
2016-03-31  8:39 ` [Qemu-devel] [RFC PATCH v2.1 01/12] exec: Remove cpu from cpus list during cpu_exec_exit() Bharata B Rao
2016-03-31  8:39 ` [Qemu-devel] [RFC PATCH v2.1 02/12] exec: Do vmstate unregistration from cpu_exec_exit() Bharata B Rao
2016-03-31  8:39 ` [Qemu-devel] [RFC PATCH v2.1 03/12] cpu: Reclaim vCPU objects Bharata B Rao
2016-03-31  8:39 ` [Qemu-devel] [RFC PATCH v2.1 04/12] cpu: Add a sync version of cpu_remove() Bharata B Rao
2016-03-31  8:39 ` [Qemu-devel] [RFC PATCH v2.1 05/12] qdev: hotplug: Introduce HotplugHandler.pre_plug() callback Bharata B Rao
2016-04-01  3:30   ` David Gibson
2016-04-01 10:38     ` Paolo Bonzini
2016-04-04  0:09       ` David Gibson
2016-03-31  8:39 ` [Qemu-devel] [RFC PATCH v2.1 06/12] cpu: Abstract CPU core type Bharata B Rao
2016-03-31  8:39 ` [Qemu-devel] [RFC PATCH v2.1 07/12] spapr: Abstract CPU core device Bharata B Rao
2016-03-31  8:39 ` [Qemu-devel] [RFC PATCH v2.1 08/12] spapr: Add CPU type specific core devices Bharata B Rao
2016-04-01  5:08   ` David Gibson
2016-04-01  6:12     ` Bharata B Rao
2016-04-04  0:13       ` David Gibson
2016-04-09  2:21         ` Michael Roth
2016-04-04  0:16   ` David Gibson
2016-04-08 23:35   ` Michael Roth
2016-03-31  8:39 ` [Qemu-devel] [RFC PATCH v2.1 09/12] spapr: convert boot CPUs into CPU " Bharata B Rao
2016-04-01  5:12   ` David Gibson
2016-04-08 23:35   ` Michael Roth
2016-05-05  9:19     ` Bharata B Rao
2016-03-31  8:39 ` [Qemu-devel] [RFC PATCH v2.1 10/12] spapr: CPU hotplug support Bharata B Rao
2016-04-04  4:23   ` David Gibson
2016-04-05 23:47   ` Michael Roth
2016-05-05  9:22     ` Bharata B Rao [this message]
2016-05-06  8:57   ` Igor Mammedov
2016-05-06 10:14     ` Bharata B Rao
2016-05-06 11:01       ` Igor Mammedov
2016-03-31  8:39 ` [Qemu-devel] [RFC PATCH v2.1 11/12] xics, xics_kvm: Handle CPU unplug correctly Bharata B Rao
2016-04-06  0:24   ` Michael Roth
2016-04-06  0:43     ` David Gibson
2016-04-08 23:40       ` Michael Roth
2016-03-31  8:39 ` [Qemu-devel] [RFC PATCH v2.1 12/12] spapr: CPU hot unplug support Bharata B Rao
2016-04-04  4:27   ` David Gibson
2016-05-09  4:24     ` Bharata B Rao
2016-04-04 14:44 ` [Qemu-devel] [RFC PATCH v2.1 00/12] Core based CPU hotplug for PowerPC sPAPR Igor Mammedov
2016-04-05 14:55   ` Bharata B Rao
2016-04-05 18:40     ` Igor Mammedov
2016-04-05 21:58     ` Igor Mammedov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20160505092244.GB16087@in.ibm.com \
    --to=bharata@linux.vnet.ibm.com \
    --cc=afaerber@suse.de \
    --cc=agraf@suse.de \
    --cc=aik@ozlabs.ru \
    --cc=armbru@redhat.com \
    --cc=borntraeger@de.ibm.com \
    --cc=david@gibson.dropbear.id.au \
    --cc=eblake@redhat.com \
    --cc=ehabkost@redhat.com \
    --cc=imammedo@redhat.com \
    --cc=mdroth@linux.vnet.ibm.com \
    --cc=mjrosato@linux.vnet.ibm.com \
    --cc=pbonzini@redhat.com \
    --cc=pkrempa@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=qemu-ppc@nongnu.org \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).