qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Yuval Shaia <yuval.shaia@oracle.com>
To: Sukrit Bhatnagar <skrtbhtngr@gmail.com>
Cc: qemu-devel@nongnu.org
Subject: Re: [Qemu-devel] [RFC 1/1] hw/pvrdma: Add live migration support
Date: Tue, 25 Jun 2019 18:38:33 +0300	[thread overview]
Message-ID: <20190625153830.GA10554@lap1> (raw)
In-Reply-To: <20190621144541.13770-2-skrtbhtngr@gmail.com>

On Fri, Jun 21, 2019 at 08:15:41PM +0530, Sukrit Bhatnagar wrote:
> Define and register SaveVMHandlers pvrdma_save and
> pvrdma_load for saving and loading the device state,
> which currently includes only the dma, command slot
> and response slot addresses.
> 
> Remap the DSR, command slot and response slot upon
> loading the addresses in the pvrdma_load function.
> 
> Cc: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
> Cc: Yuval Shaia <yuval.shaia@oracle.com>
> Signed-off-by: Sukrit Bhatnagar <skrtbhtngr@gmail.com>
> ---
>  hw/rdma/vmw/pvrdma_main.c | 56 +++++++++++++++++++++++++++++++++++++++
>  1 file changed, 56 insertions(+)
> 
> diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c
> index adcf79cd63..cd8573173c 100644
> --- a/hw/rdma/vmw/pvrdma_main.c
> +++ b/hw/rdma/vmw/pvrdma_main.c
> @@ -28,6 +28,7 @@
>  #include "sysemu/sysemu.h"
>  #include "monitor/monitor.h"
>  #include "hw/rdma/rdma.h"
> +#include "migration/register.h"
>  
>  #include "../rdma_rm.h"
>  #include "../rdma_backend.h"
> @@ -592,9 +593,62 @@ static void pvrdma_shutdown_notifier(Notifier *n, void *opaque)
>      pvrdma_fini(pci_dev);
>  }
>  
> +static void pvrdma_save(QEMUFile *f, void *opaque)
> +{
> +    PVRDMADev *dev = PVRDMA_DEV(opaque);
> +
> +    qemu_put_be64(f, dev->dsr_info.dma);
> +    qemu_put_be64(f, dev->dsr_info.dsr->cmd_slot_dma);
> +    qemu_put_be64(f, dev->dsr_info.dsr->resp_slot_dma);
> +}
> +
> +static int pvrdma_load(QEMUFile *f, void *opaque, int version_id)
> +{
> +    PVRDMADev *dev = PVRDMA_DEV(opaque);
> +    PCIDevice *pci_dev = PCI_DEVICE(dev);
> +
> +    // Remap DSR
> +    dev->dsr_info.dma = qemu_get_be64(f);
> +    dev->dsr_info.dsr = rdma_pci_dma_map(pci_dev, dev->dsr_info.dma,
> +                                    sizeof(struct pvrdma_device_shared_region));
> +    if (!dev->dsr_info.dsr) {
> +        rdma_error_report("Failed to map to DSR");
> +        return -1;
> +    }
> +    qemu_log("pvrdma_load: successfully remapped to DSR\n");
> +
> +    // Remap cmd slot
> +    dev->dsr_info.dsr->cmd_slot_dma = qemu_get_be64(f);
> +    dev->dsr_info.req = rdma_pci_dma_map(pci_dev, dev->dsr_info.dsr->cmd_slot_dma,
> +                                     sizeof(union pvrdma_cmd_req));
> +    if (!dev->dsr_info.req) {
> +        rdma_error_report("Failed to map to command slot address");
> +        return -1;
> +    }
> +    qemu_log("pvrdma_load: successfully remapped to cmd slot\n");
> +
> +    // Remap rsp slot
> +    dev->dsr_info.dsr->resp_slot_dma = qemu_get_be64(f);
> +    dev->dsr_info.rsp = rdma_pci_dma_map(pci_dev, dev->dsr_info.dsr->resp_slot_dma,
> +                                     sizeof(union pvrdma_cmd_resp));
> +    if (!dev->dsr_info.rsp) {
> +        rdma_error_report("Failed to map to response slot address");
> +        return -1;
> +    }
> +    qemu_log("pvrdma_load: successfully remapped to rsp slot\n");
> +
> +    return 0;
> +}
> +
> +static SaveVMHandlers savevm_pvrdma = {
> +    .save_state = pvrdma_save,
> +    .load_state = pvrdma_load,
> +};
> +
>  static void pvrdma_realize(PCIDevice *pdev, Error **errp)
>  {
>      int rc = 0;
> +    DeviceState *s = DEVICE(pdev);
>      PVRDMADev *dev = PVRDMA_DEV(pdev);
>      Object *memdev_root;
>      bool ram_shared = false;
> @@ -666,6 +720,8 @@ static void pvrdma_realize(PCIDevice *pdev, Error **errp)
>      dev->shutdown_notifier.notify = pvrdma_shutdown_notifier;
>      qemu_register_shutdown_notifier(&dev->shutdown_notifier);
>  
> +    register_savevm_live(s, "pvrdma", -1, 1, &savevm_pvrdma, dev);
> +

Don't forget to unregister_savevm on fini()

>  out:
>      if (rc) {
>          pvrdma_fini(pdev);
> -- 
> 2.21.0
> 
> 


  parent reply	other threads:[~2019-06-25 15:41 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-21 14:45 [Qemu-devel] [RFC 0/1] Add live migration support to the PVRDMA device Sukrit Bhatnagar
2019-06-21 14:45 ` [Qemu-devel] [RFC 1/1] hw/pvrdma: Add live migration support Sukrit Bhatnagar
2019-06-25 15:32   ` Yuval Shaia
2019-06-28 11:26     ` Dr. David Alan Gilbert
2019-06-29 12:45       ` Sukrit Bhatnagar
2019-06-30  8:13         ` Yuval Shaia
2019-07-01  2:15           ` Sukrit Bhatnagar
2019-07-01 12:08             ` Yuval Shaia
2019-07-08 10:54         ` Dr. David Alan Gilbert
2019-06-25 15:38   ` Yuval Shaia [this message]
2019-06-21 17:55 ` [Qemu-devel] [RFC 0/1] Add live migration support to the PVRDMA device no-reply
2019-06-25  8:14 ` Marcel Apfelbaum
2019-06-25  8:39   ` Dmitry Fleytman
2019-06-25  8:49     ` Marcel Apfelbaum
2019-06-25  9:11       ` Dr. David Alan Gilbert
2019-06-25 11:39         ` Marcel Apfelbaum
2019-06-25 10:25       ` Dmitry Fleytman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190625153830.GA10554@lap1 \
    --to=yuval.shaia@oracle.com \
    --cc=qemu-devel@nongnu.org \
    --cc=skrtbhtngr@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).