From: Igor Mammedov <imammedo@redhat.com>
To: Xiao Guangrong <guangrong.xiao@linux.intel.com>
Cc: pbonzini@redhat.com, gleb@kernel.org, mtosatti@redhat.com,
stefanha@redhat.com, mst@redhat.com, rth@twiddle.net,
ehabkost@redhat.com, dan.j.williams@intel.com,
kvm@vger.kernel.org, qemu-devel@nongnu.org
Subject: Re: [Qemu-devel] [PATCH v5 2/5] nvdimm acpi: introduce fit buffer
Date: Fri, 4 Nov 2016 11:08:24 +0100 [thread overview]
Message-ID: <20161104110824.52ee89cf@nial.brq.redhat.com> (raw)
In-Reply-To: <1478198186-45204-3-git-send-email-guangrong.xiao@linux.intel.com>
On Fri, 4 Nov 2016 02:36:23 +0800
Xiao Guangrong <guangrong.xiao@linux.intel.com> wrote:
> The buffer is used to save the FIT info for all the presented nvdimm
> devices which is updated after the nvdimm device is plugged or
> unplugged. In the later patch, it will be used to construct NVDIMM
> ACPI _FIT method which reflects the presented nvdimm devices after
> nvdimm hotplug
>
> As FIT buffer can not completely mapped into guest address space,
^^ be
> OSPM will exit to QEMU multiple times, however, there is the race
> condition - FIT may be changed during these multiple exits, so that
> we mark @dirty whenever the buffer is updated.
>
> @dirty is cleared for the first time OSPM gets fit buffer, if
> dirty is detected in the later access, OSPM will restart the
> access
s/presented/present/g
> Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
> ---
> hw/acpi/nvdimm.c | 59 +++++++++++++++++++++++++++++++------------------
> hw/i386/acpi-build.c | 2 +-
> hw/i386/pc.c | 4 ++++
> include/hw/mem/nvdimm.h | 21 +++++++++++++++++-
> 4 files changed, 62 insertions(+), 24 deletions(-)
>
> diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c
> index 58bc5c6..73db3a7 100644
> --- a/hw/acpi/nvdimm.c
> +++ b/hw/acpi/nvdimm.c
> @@ -33,35 +33,30 @@
> #include "hw/nvram/fw_cfg.h"
> #include "hw/mem/nvdimm.h"
>
> -static int nvdimm_plugged_device_list(Object *obj, void *opaque)
> +static int nvdimm_device_list(Object *obj, void *opaque)
> {
> GSList **list = opaque;
>
> if (object_dynamic_cast(obj, TYPE_NVDIMM)) {
> - DeviceState *dev = DEVICE(obj);
> -
> - if (dev->realized) { /* only realized NVDIMMs matter */
> - *list = g_slist_append(*list, DEVICE(obj));
> - }
> + *list = g_slist_append(*list, DEVICE(obj));
> }
>
> - object_child_foreach(obj, nvdimm_plugged_device_list, opaque);
> + object_child_foreach(obj, nvdimm_device_list, opaque);
> return 0;
> }
above hunk should be in a separate patch, to reduce noise caused by rename
>
> /*
> - * inquire plugged NVDIMM devices and link them into the list which is
> + * inquire NVDIMM devices and link them into the list which is
> * returned to the caller.
> *
> * Note: it is the caller's responsibility to free the list to avoid
> * memory leak.
> */
> -static GSList *nvdimm_get_plugged_device_list(void)
> +static GSList *nvdimm_get_device_list(void)
> {
> GSList *list = NULL;
>
> - object_child_foreach(qdev_get_machine(), nvdimm_plugged_device_list,
> - &list);
> + object_child_foreach(qdev_get_machine(), nvdimm_device_list, &list);
> return list;
> }
>
> @@ -219,7 +214,7 @@ static uint32_t nvdimm_slot_to_dcr_index(int slot)
> static NVDIMMDevice *nvdimm_get_device_by_handle(uint32_t handle)
> {
> NVDIMMDevice *nvdimm = NULL;
> - GSList *list, *device_list = nvdimm_get_plugged_device_list();
> + GSList *list, *device_list = nvdimm_get_device_list();
>
> for (list = device_list; list; list = list->next) {
> NVDIMMDevice *nvd = list->data;
> @@ -348,8 +343,9 @@ static void nvdimm_build_structure_dcr(GArray *structures, DeviceState *dev)
> (DSM) in DSM Spec Rev1.*/);
> }
>
> -static GArray *nvdimm_build_device_structure(GSList *device_list)
> +static GArray *nvdimm_build_device_structure(void)
> {
> + GSList *device_list = nvdimm_get_device_list();
> GArray *structures = g_array_new(false, true /* clear */, 1);
>
> for (; device_list; device_list = device_list->next) {
> @@ -367,14 +363,32 @@ static GArray *nvdimm_build_device_structure(GSList *device_list)
> /* build NVDIMM Control Region Structure. */
> nvdimm_build_structure_dcr(structures, dev);
> }
> + g_slist_free(device_list);
>
> return structures;
> }
>
> -static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets,
> +static void nvdimm_init_fit_buffer(NvdimmFitBuffer *fit_buf)
> +{
> + fit_buf->fit = g_array_new(false, true /* clear */, 1);
> +}
> +
> +static void nvdimm_build_fit_buffer(NvdimmFitBuffer *fit_buf)
> +{
> + g_array_free(fit_buf->fit, true);
> + fit_buf->fit = nvdimm_build_device_structure();
> + fit_buf->dirty = true;
> +}
> +
> +void nvdimm_plug(AcpiNVDIMMState *state)
> +{
> + nvdimm_build_fit_buffer(&state->fit_buf);
> +}
> +
> +static void nvdimm_build_nfit(AcpiNVDIMMState *state, GArray *table_offsets,
> GArray *table_data, BIOSLinker *linker)
> {
> - GArray *structures = nvdimm_build_device_structure(device_list);
> + NvdimmFitBuffer *fit_buf = &state->fit_buf;
> unsigned int header;
>
> acpi_add_table(table_offsets, table_data);
> @@ -383,12 +397,11 @@ static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets,
> header = table_data->len;
> acpi_data_push(table_data, sizeof(NvdimmNfitHeader));
> /* NVDIMM device structures. */
> - g_array_append_vals(table_data, structures->data, structures->len);
> + g_array_append_vals(table_data, fit_buf->fit->data, fit_buf->fit->len);
>
> build_header(linker, table_data,
> (void *)(table_data->data + header), "NFIT",
> - sizeof(NvdimmNfitHeader) + structures->len, 1, NULL, NULL);
> - g_array_free(structures, true);
> + sizeof(NvdimmNfitHeader) + fit_buf->fit->len, 1, NULL, NULL);
> }
>
> struct NvdimmDsmIn {
> @@ -771,6 +784,8 @@ void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
> acpi_data_push(state->dsm_mem, sizeof(NvdimmDsmIn));
> fw_cfg_add_file(fw_cfg, NVDIMM_DSM_MEM_FILE, state->dsm_mem->data,
> state->dsm_mem->len);
> +
> + nvdimm_init_fit_buffer(&state->fit_buf);
> }
>
> #define NVDIMM_COMMON_DSM "NCAL"
> @@ -1045,7 +1060,7 @@ static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data,
> }
>
> void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
> - BIOSLinker *linker, GArray *dsm_dma_arrea,
> + BIOSLinker *linker, AcpiNVDIMMState *state,
> uint32_t ram_slots)
> {
> GSList *device_list;
> @@ -1055,15 +1070,15 @@ void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
> return;
> }
>
> - nvdimm_build_ssdt(table_offsets, table_data, linker, dsm_dma_arrea,
> + nvdimm_build_ssdt(table_offsets, table_data, linker, state->dsm_mem,
> ram_slots);
>
> - device_list = nvdimm_get_plugged_device_list();
> + device_list = nvdimm_get_device_list();
> /* no NVDIMM device is plugged. */
> if (!device_list) {
> return;
> }
>
> - nvdimm_build_nfit(device_list, table_offsets, table_data, linker);
> + nvdimm_build_nfit(state, table_offsets, table_data, linker);
> g_slist_free(device_list);
> }
> diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
> index 6ae4769..bc49958 100644
> --- a/hw/i386/acpi-build.c
> +++ b/hw/i386/acpi-build.c
> @@ -2767,7 +2767,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine)
> }
> if (pcms->acpi_nvdimm_state.is_enabled) {
> nvdimm_build_acpi(table_offsets, tables_blob, tables->linker,
> - pcms->acpi_nvdimm_state.dsm_mem, machine->ram_slots);
> + &pcms->acpi_nvdimm_state, machine->ram_slots);
> }
>
> /* Add tables supplied by user (if any) */
> diff --git a/hw/i386/pc.c b/hw/i386/pc.c
> index 93ff49c..a4e1509 100644
> --- a/hw/i386/pc.c
> +++ b/hw/i386/pc.c
> @@ -1700,6 +1700,10 @@ static void pc_dimm_plug(HotplugHandler *hotplug_dev,
> goto out;
> }
>
> + if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
> + nvdimm_plug(&pcms->acpi_nvdimm_state);
> + }
> +
> hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev);
> hhc->plug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &error_abort);
> out:
> diff --git a/include/hw/mem/nvdimm.h b/include/hw/mem/nvdimm.h
> index 63a2b20..e704e55 100644
> --- a/include/hw/mem/nvdimm.h
> +++ b/include/hw/mem/nvdimm.h
> @@ -98,12 +98,30 @@ typedef struct NVDIMMClass NVDIMMClass;
> #define NVDIMM_ACPI_IO_BASE 0x0a18
> #define NVDIMM_ACPI_IO_LEN 4
>
> +/*
> + * NvdimmFitBuffer:
> + * @fit: FIT structures for present NVDIMMs. It is updated after
s/after/when/
> + * the NVDIMM device is plugged or unplugged.
> + * @dirty: It is set whenever the buffer is updated
. It allows OSPM to detect change and restart read in progress if there is any.
> + so that it
> + * preserves NVDIMM ACPI _FIT method to read incomplete or
> + * obsolete fit info if fit update happens during multiple RFIT
> + * calls.
drop this
> + */
> +struct NvdimmFitBuffer {
> + GArray *fit;
> + bool dirty;
> +};
> +typedef struct NvdimmFitBuffer NvdimmFitBuffer;
> +
> struct AcpiNVDIMMState {
> /* detect if NVDIMM support is enabled. */
> bool is_enabled;
>
> /* the data of the fw_cfg file NVDIMM_DSM_MEM_FILE. */
> GArray *dsm_mem;
> +
> + NvdimmFitBuffer fit_buf;
> +
> /* the IO region used by OSPM to transfer control to QEMU. */
> MemoryRegion io_mr;
> };
> @@ -112,6 +130,7 @@ typedef struct AcpiNVDIMMState AcpiNVDIMMState;
> void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
> FWCfgState *fw_cfg, Object *owner);
> void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
> - BIOSLinker *linker, GArray *dsm_dma_arrea,
> + BIOSLinker *linker, AcpiNVDIMMState *state,
> uint32_t ram_slots);
> +void nvdimm_plug(AcpiNVDIMMState *state);
> #endif
otherwise patch looks good
next prev parent reply other threads:[~2016-11-04 10:08 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-11-03 18:36 [Qemu-devel] [PATCH v5 0/5] nvdimm: hotplug support Xiao Guangrong
2016-11-03 18:36 ` [Qemu-devel] [PATCH v5 1/5] nvdimm acpi: prebuild nvdimm devices for available slots Xiao Guangrong
2016-11-03 18:36 ` [Qemu-devel] [PATCH v5 2/5] nvdimm acpi: introduce fit buffer Xiao Guangrong
2016-11-04 10:08 ` Igor Mammedov [this message]
2016-11-03 18:36 ` [Qemu-devel] [PATCH v5 3/5] nvdimm acpi: define DSM return codes Xiao Guangrong
2016-11-03 18:36 ` [Qemu-devel] [PATCH v5 4/5] nvdimm acpi: introduce _FIT method Xiao Guangrong
2016-11-04 10:24 ` Igor Mammedov
2016-11-03 18:36 ` [Qemu-devel] [PATCH v5 5/5] pc: memhp: enable nvdimm device hotplug Xiao Guangrong
2016-11-04 3:50 ` [Qemu-devel] [PATCH v5 0/5] nvdimm: hotplug support Xiao Guangrong
2016-11-04 4:01 ` Michael S. Tsirkin
2016-11-04 4:22 ` Michael S. Tsirkin
2016-11-04 7:51 ` Xiao Guangrong
2016-11-04 9:15 ` Stefan Hajnoczi
2016-11-04 9:51 ` Igor Mammedov
2016-11-04 10:01 ` Xiao Guangrong
2016-11-07 13:04 ` Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20161104110824.52ee89cf@nial.brq.redhat.com \
--to=imammedo@redhat.com \
--cc=dan.j.williams@intel.com \
--cc=ehabkost@redhat.com \
--cc=gleb@kernel.org \
--cc=guangrong.xiao@linux.intel.com \
--cc=kvm@vger.kernel.org \
--cc=mst@redhat.com \
--cc=mtosatti@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=rth@twiddle.net \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).