qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* Re: [Qemu-devel] [PATCH 01/12] savevm: Use a struct to pass all handlers
       [not found] ` <39651e275488caec46861cb5b11ba1c7961a429c.1340910651.git.quintela@redhat.com>
@ 2012-07-01  8:05   ` Orit Wasserman
       [not found]   ` <4FEF3843.1000907@weilnetz.de>
  1 sibling, 0 replies; 20+ messages in thread
From: Orit Wasserman @ 2012-07-01  8:05 UTC (permalink / raw)
  To: Juan Quintela; +Cc: qemu-devel

On 06/28/2012 10:21 PM, Juan Quintela wrote:
> This would make easier to add more operations in the next patches.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  savevm.c  |   54 +++++++++++++++++++++++++-----------------------------
>  vmstate.h |    7 +++++++
>  2 files changed, 32 insertions(+), 29 deletions(-)
> 
> diff --git a/savevm.c b/savevm.c
> index a15c163..73626d4 100644
> --- a/savevm.c
> +++ b/savevm.c
> @@ -1171,10 +1171,7 @@ typedef struct SaveStateEntry {
>      int alias_id;
>      int version_id;
>      int section_id;
> -    SaveSetParamsHandler *set_params;
> -    SaveLiveStateHandler *save_live_state;
> -    SaveStateHandler *save_state;
> -    LoadStateHandler *load_state;
> +    SaveVMHandlers *ops;
>      const VMStateDescription *vmsd;
>      void *opaque;
>      CompatEntry *compat;
> @@ -1237,10 +1234,11 @@ int register_savevm_live(DeviceState *dev,
>      se = g_malloc0(sizeof(SaveStateEntry));
>      se->version_id = version_id;
>      se->section_id = global_section_id++;
> -    se->set_params = set_params;
> -    se->save_live_state = save_live_state;
> -    se->save_state = save_state;
> -    se->load_state = load_state;
> +    se->ops = g_malloc0(sizeof(SaveVMHandlers));
> +    se->ops->set_params = set_params;
> +    se->ops->save_live_state = save_live_state;
> +    se->ops->save_state = save_state;
> +    se->ops->load_state = load_state;
>      se->opaque = opaque;
>      se->vmsd = NULL;
>      se->no_migrate = 0;
> @@ -1309,6 +1307,7 @@ void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque)
>              if (se->compat) {
>                  g_free(se->compat);
>              }
> +            g_free(se->ops);
>              g_free(se);
>          }
>      }
> @@ -1327,9 +1326,6 @@ int vmstate_register_with_alias_id(DeviceState *dev, int instance_id,
>      se = g_malloc0(sizeof(SaveStateEntry));
>      se->version_id = vmsd->version_id;
>      se->section_id = global_section_id++;
> -    se->save_live_state = NULL;
> -    se->save_state = NULL;
> -    se->load_state = NULL;
>      se->opaque = opaque;
>      se->vmsd = vmsd;
>      se->alias_id = alias_id;
> @@ -1524,7 +1520,7 @@ void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
>  static int vmstate_load(QEMUFile *f, SaveStateEntry *se, int version_id)
>  {
>      if (!se->vmsd) {         /* Old style */
> -        return se->load_state(f, se->opaque, version_id);
> +        return se->ops->load_state(f, se->opaque, version_id);
>      }
>      return vmstate_load_state(f, se->vmsd, se->opaque, version_id);
>  }
> @@ -1532,7 +1528,7 @@ static int vmstate_load(QEMUFile *f, SaveStateEntry *se, int version_id)
>  static void vmstate_save(QEMUFile *f, SaveStateEntry *se)
>  {
>      if (!se->vmsd) {         /* Old style */
> -        se->save_state(f, se->opaque);
> +        se->ops->save_state(f, se->opaque);
>          return;
>      }
>      vmstate_save_state(f,se->vmsd, se->opaque);
> @@ -1569,10 +1565,10 @@ int qemu_savevm_state_begin(QEMUFile *f,
>      int ret;
> 
>      QTAILQ_FOREACH(se, &savevm_handlers, entry) {
> -        if(se->set_params == NULL) {
> +        if (!se->ops || !se->ops->set_params) {
>              continue;
>          }
> -        se->set_params(params, se->opaque);
> +        se->ops->set_params(params, se->opaque);
>      }
>      
>      qemu_put_be32(f, QEMU_VM_FILE_MAGIC);
> @@ -1581,9 +1577,9 @@ int qemu_savevm_state_begin(QEMUFile *f,
>      QTAILQ_FOREACH(se, &savevm_handlers, entry) {
>          int len;
> 
> -        if (se->save_live_state == NULL)
> +        if (!se->ops || !se->ops->save_live_state) {
>              continue;
> -
> +        }
>          /* Section type */
>          qemu_put_byte(f, QEMU_VM_SECTION_START);
>          qemu_put_be32(f, se->section_id);
> @@ -1596,7 +1592,7 @@ int qemu_savevm_state_begin(QEMUFile *f,
>          qemu_put_be32(f, se->instance_id);
>          qemu_put_be32(f, se->version_id);
> 
> -        ret = se->save_live_state(f, QEMU_VM_SECTION_START, se->opaque);
> +        ret = se->ops->save_live_state(f, QEMU_VM_SECTION_START, se->opaque);
>          if (ret < 0) {
>              qemu_savevm_state_cancel(f);
>              return ret;
> @@ -1623,9 +1619,9 @@ int qemu_savevm_state_iterate(QEMUFile *f)
>      int ret = 1;
> 
>      QTAILQ_FOREACH(se, &savevm_handlers, entry) {
> -        if (se->save_live_state == NULL)
> +        if (!se->ops || !se->ops->save_live_state) {
>              continue;
> -
> +        }
>          if (qemu_file_rate_limit(f)) {
>              return 0;
>          }
> @@ -1634,7 +1630,7 @@ int qemu_savevm_state_iterate(QEMUFile *f)
>          qemu_put_byte(f, QEMU_VM_SECTION_PART);
>          qemu_put_be32(f, se->section_id);
> 
> -        ret = se->save_live_state(f, QEMU_VM_SECTION_PART, se->opaque);
> +        ret = se->ops->save_live_state(f, QEMU_VM_SECTION_PART, se->opaque);
>          trace_savevm_section_end(se->section_id);
> 
>          if (ret <= 0) {
> @@ -1663,15 +1659,15 @@ int qemu_savevm_state_complete(QEMUFile *f)
>      cpu_synchronize_all_states();
> 
>      QTAILQ_FOREACH(se, &savevm_handlers, entry) {
> -        if (se->save_live_state == NULL)
> +        if (!se->ops || !se->ops->save_live_state) {
>              continue;
> -
> +        }
>          trace_savevm_section_start();
>          /* Section type */
>          qemu_put_byte(f, QEMU_VM_SECTION_END);
>          qemu_put_be32(f, se->section_id);
> 
> -        ret = se->save_live_state(f, QEMU_VM_SECTION_END, se->opaque);
> +        ret = se->ops->save_live_state(f, QEMU_VM_SECTION_END, se->opaque);
>          trace_savevm_section_end(se->section_id);
>          if (ret < 0) {
>              return ret;
> @@ -1681,9 +1677,9 @@ int qemu_savevm_state_complete(QEMUFile *f)
>      QTAILQ_FOREACH(se, &savevm_handlers, entry) {
>          int len;
> 
> -	if (se->save_state == NULL && se->vmsd == NULL)
> +        if ((!se->ops || !se->ops->save_state) && !se->vmsd) {
>  	    continue;
> -
> +        }
>          trace_savevm_section_start();
>          /* Section type */
>          qemu_put_byte(f, QEMU_VM_SECTION_FULL);
> @@ -1711,8 +1707,8 @@ void qemu_savevm_state_cancel(QEMUFile *f)
>      SaveStateEntry *se;
> 
>      QTAILQ_FOREACH(se, &savevm_handlers, entry) {
> -        if (se->save_live_state) {
> -            se->save_live_state(f, -1, se->opaque);
> +        if (se->ops && se->ops->save_live_state) {
> +            se->ops->save_live_state(f, -1, se->opaque);
>          }
>      }
>  }
> @@ -1765,7 +1761,7 @@ static int qemu_save_device_state(QEMUFile *f)
>          if (se->is_ram) {
>              continue;
>          }
> -        if (se->save_state == NULL && se->vmsd == NULL) {
> +        if ((!se->ops || !se->ops->save_state) && !se->vmsd) {
>              continue;
>          }
> 
> diff --git a/vmstate.h b/vmstate.h
> index 5af45e0..909af69 100644
> --- a/vmstate.h
> +++ b/vmstate.h
> @@ -31,6 +31,13 @@ typedef void SaveStateHandler(QEMUFile *f, void *opaque);
>  typedef int SaveLiveStateHandler(QEMUFile *f, int stage, void *opaque);
>  typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id);
> 
> +typedef struct SaveVMHandlers {
> +    SaveSetParamsHandler *set_params;
> +    SaveStateHandler *save_state;
> +    SaveLiveStateHandler *save_live_state;
> +    LoadStateHandler *load_state;
> +} SaveVMHandlers;
> +
>  int register_savevm(DeviceState *dev,
>                      const char *idstr,
>                      int instance_id,
> 

Reviewed-by: Orit Wasserman <owasserm@redhat.com>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 02/12] savevm: Live migration handlers register the struct directly
       [not found] ` <620debbcb9bc99dc6db54d87a8683efbc00c2b66.1340910651.git.quintela@redhat.com>
@ 2012-07-01  8:19   ` Orit Wasserman
  2012-07-01 10:20     ` Juan Quintela
  0 siblings, 1 reply; 20+ messages in thread
From: Orit Wasserman @ 2012-07-01  8:19 UTC (permalink / raw)
  To: Juan Quintela; +Cc: qemu-devel

On 06/28/2012 10:22 PM, Juan Quintela wrote:
> Notice that the live migration users never unregister, so no problem
> about freeing the ops structure.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  arch_init.c       |    9 +++++++--
>  block-migration.c |   10 ++++++++--
>  migration.h       |    4 ++--
>  savevm.c          |   18 +++++++-----------
>  vl.c              |    3 +--
>  vmstate.h         |    5 +----
>  6 files changed, 26 insertions(+), 23 deletions(-)
> 
> diff --git a/arch_init.c b/arch_init.c
> index 7ce074e..0b7e31f 100644
> --- a/arch_init.c
> +++ b/arch_init.c
> @@ -298,7 +298,7 @@ static void migration_end(void)
> 
>  #define MAX_WAIT 50 /* ms, half buffered_file limit */
> 
> -int ram_save_live(QEMUFile *f, int stage, void *opaque)
> +static int ram_save_live(QEMUFile *f, int stage, void *opaque)
>  {
>      ram_addr_t addr;
>      uint64_t bytes_transferred_last;
> @@ -436,7 +436,7 @@ static inline void *host_from_stream_offset(QEMUFile *f,
>      return NULL;
>  }
> 
> -int ram_load(QEMUFile *f, void *opaque, int version_id)
> +static int ram_load(QEMUFile *f, void *opaque, int version_id)
>  {
>      ram_addr_t addr;
>      int flags, ret = 0;
> @@ -533,6 +533,11 @@ done:
>      return ret;
>  }
> 
> +SaveVMHandlers savevm_ram_handlers = {
> +    .save_live_state = ram_save_live,
> +    .load_state = ram_load,
> +};
> +
>  #ifdef HAS_AUDIO
>  struct soundhw {
>      const char *name;
> diff --git a/block-migration.c b/block-migration.c
> index b95b4e1..00151a0 100644
> --- a/block-migration.c
> +++ b/block-migration.c
> @@ -709,11 +709,17 @@ static void block_set_params(const MigrationParams *params, void *opaque)
>      block_mig_state.blk_enable |= params->shared;
>  }
> 
> +SaveVMHandlers savevm_block_handlers = {
> +    .set_params = block_set_params,
> +    .save_live_state = block_save_live,
> +    .load_state = block_load,
> +};
> +
>  void blk_mig_init(void)
>  {
>      QSIMPLEQ_INIT(&block_mig_state.bmds_list);
>      QSIMPLEQ_INIT(&block_mig_state.blk_list);
> 
> -    register_savevm_live(NULL, "block", 0, 1, block_set_params,
> -                         block_save_live, NULL, block_load, &block_mig_state);
> +    register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
> +                         &block_mig_state);
>  }
> diff --git a/migration.h b/migration.h
> index 3990771..98bceda 100644
> --- a/migration.h
> +++ b/migration.h
> @@ -18,6 +18,7 @@
>  #include "qemu-common.h"
>  #include "notify.h"
>  #include "error.h"
> +#include "vmstate.h"
> 
>  struct MigrationParams {
>      int blk;
> @@ -81,8 +82,7 @@ uint64_t ram_bytes_remaining(void);
>  uint64_t ram_bytes_transferred(void);
>  uint64_t ram_bytes_total(void);
> 
> -int ram_save_live(QEMUFile *f, int stage, void *opaque);
> -int ram_load(QEMUFile *f, void *opaque, int version_id);
> +extern SaveVMHandlers savevm_ram_handlers;
> 
>  /**
>   * @migrate_add_blocker - prevent migration from proceeding
> diff --git a/savevm.c b/savevm.c
> index 73626d4..a451be2 100644
> --- a/savevm.c
> +++ b/savevm.c
> @@ -1223,10 +1223,7 @@ int register_savevm_live(DeviceState *dev,
>                           const char *idstr,
>                           int instance_id,
>                           int version_id,
> -                         SaveSetParamsHandler *set_params,
> -                         SaveLiveStateHandler *save_live_state,
> -                         SaveStateHandler *save_state,
> -                         LoadStateHandler *load_state,
> +                         SaveVMHandlers *ops,
>                           void *opaque)
>  {
>      SaveStateEntry *se;
> @@ -1234,16 +1231,12 @@ int register_savevm_live(DeviceState *dev,
>      se = g_malloc0(sizeof(SaveStateEntry));
>      se->version_id = version_id;
>      se->section_id = global_section_id++;
> -    se->ops = g_malloc0(sizeof(SaveVMHandlers));
> -    se->ops->set_params = set_params;
> -    se->ops->save_live_state = save_live_state;
> -    se->ops->save_state = save_state;
> -    se->ops->load_state = load_state;
> +    se->ops = ops;
>      se->opaque = opaque;
>      se->vmsd = NULL;
>      se->no_migrate = 0;
>      /* if this is a live_savem then set is_ram */
> -    if (save_live_state != NULL) {
> +    if (ops->save_live_state != NULL) {
>          se->is_ram = 1;
>      }
> 
> @@ -1282,8 +1275,11 @@ int register_savevm(DeviceState *dev,
>                      LoadStateHandler *load_state,
>                      void *opaque)
>  {
> +    SaveVMHandlers *ops = g_malloc0(sizeof(SaveVMHandlers));
> +    ops->save_state = save_state;
> +    ops->load_state = load_state;
>      return register_savevm_live(dev, idstr, instance_id, version_id,
> -                                NULL, NULL, save_state, load_state, opaque);
> +                                ops, opaque);
>  }
> 
>  void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque)
> diff --git a/vl.c b/vl.c
> index 1329c30..b1f31e8 100644
> --- a/vl.c
> +++ b/vl.c
> @@ -3438,8 +3438,7 @@ int main(int argc, char **argv, char **envp)
>      default_drive(default_sdcard, snapshot, machine->use_scsi,
>                    IF_SD, 0, SD_OPTS);
> 
> -    register_savevm_live(NULL, "ram", 0, 4, NULL, ram_save_live, NULL,
> -                         ram_load, NULL);
> +    register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
> 

Juan,
Can't we move it migration.c (migrate_init and qemu_start_incoming migration)?
this will remove to use an extern for savevm_ram_handlers.

Orit
>      if (nb_numa_nodes > 0) {
>          int i;
> diff --git a/vmstate.h b/vmstate.h
> index 909af69..4bce53b 100644
> --- a/vmstate.h
> +++ b/vmstate.h
> @@ -50,10 +50,7 @@ int register_savevm_live(DeviceState *dev,
>                           const char *idstr,
>                           int instance_id,
>                           int version_id,
> -                         SaveSetParamsHandler *set_params,
> -                         SaveLiveStateHandler *save_live_state,
> -                         SaveStateHandler *save_state,
> -                         LoadStateHandler *load_state,
> +                         SaveVMHandlers *ops,
>                           void *opaque);
> 
>  void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque);
> 

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 03/12] savevm: remove SaveSetParamsHandler
       [not found] ` <704b60a0d2b32fbf6bf229b115082c2b929a4731.1340910651.git.quintela@redhat.com>
@ 2012-07-01  9:40   ` Orit Wasserman
  0 siblings, 0 replies; 20+ messages in thread
From: Orit Wasserman @ 2012-07-01  9:40 UTC (permalink / raw)
  To: Juan Quintela; +Cc: qemu-devel

On 06/28/2012 10:22 PM, Juan Quintela wrote:
> It was used only once, just unfold.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  vmstate.h |    3 +--
>  1 file changed, 1 insertion(+), 2 deletions(-)
> 
> diff --git a/vmstate.h b/vmstate.h
> index 4bce53b..5e1a7cc 100644
> --- a/vmstate.h
> +++ b/vmstate.h
> @@ -26,13 +26,12 @@
>  #ifndef QEMU_VMSTATE_H
>  #define QEMU_VMSTATE_H 1
> 
> -typedef void SaveSetParamsHandler(const MigrationParams *params, void * opaque);
>  typedef void SaveStateHandler(QEMUFile *f, void *opaque);
>  typedef int SaveLiveStateHandler(QEMUFile *f, int stage, void *opaque);
>  typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id);
> 
>  typedef struct SaveVMHandlers {
> -    SaveSetParamsHandler *set_params;
> +    void (*set_params)(const MigrationParams *params, void * opaque);
>      SaveStateHandler *save_state;
>      SaveLiveStateHandler *save_live_state;
>      LoadStateHandler *load_state;
>

Reviewed-by: Orit Wasserman <owasserm@redhat.com>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 04/12] savevm: remove SaveLiveStateHandler
       [not found] ` <509b501d09f18212b4165ee116ddef4ceba59097.1340910651.git.quintela@redhat.com>
@ 2012-07-01  9:40   ` Orit Wasserman
  0 siblings, 0 replies; 20+ messages in thread
From: Orit Wasserman @ 2012-07-01  9:40 UTC (permalink / raw)
  To: Juan Quintela; +Cc: qemu-devel

On 06/28/2012 10:22 PM, Juan Quintela wrote:
> It was used only once, just unfold.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  vmstate.h |    3 +--
>  1 file changed, 1 insertion(+), 2 deletions(-)
> 
> diff --git a/vmstate.h b/vmstate.h
> index 5e1a7cc..0e24834 100644
> --- a/vmstate.h
> +++ b/vmstate.h
> @@ -27,13 +27,12 @@
>  #define QEMU_VMSTATE_H 1
> 
>  typedef void SaveStateHandler(QEMUFile *f, void *opaque);
> -typedef int SaveLiveStateHandler(QEMUFile *f, int stage, void *opaque);
>  typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id);
> 
>  typedef struct SaveVMHandlers {
>      void (*set_params)(const MigrationParams *params, void * opaque);
>      SaveStateHandler *save_state;
> -    SaveLiveStateHandler *save_live_state;
> +    int (*save_live_state)(QEMUFile *f, int stage, void *opaque);
>      LoadStateHandler *load_state;
>  } SaveVMHandlers;
> 
Reviewed-by: Orit Wasserman <owasserm@redhat.com>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 05/12] savevm: Refactor cancel operation in its own operation
       [not found] ` <3dad4b557fac0c11eb6fdb06121859d16247e4f0.1340910651.git.quintela@redhat.com>
@ 2012-07-01  9:47   ` Orit Wasserman
  0 siblings, 0 replies; 20+ messages in thread
From: Orit Wasserman @ 2012-07-01  9:47 UTC (permalink / raw)
  To: Juan Quintela; +Cc: qemu-devel

On 06/28/2012 10:22 PM, Juan Quintela wrote:
> Intead of abusing stage with value -1.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  arch_init.c       |   11 ++++++-----
>  block-migration.c |   10 ++++++----
>  savevm.c          |    4 ++--
>  vmstate.h         |    1 +
>  4 files changed, 15 insertions(+), 11 deletions(-)
> 
> diff --git a/arch_init.c b/arch_init.c
> index 0b7e31f..36e19b0 100644
> --- a/arch_init.c
> +++ b/arch_init.c
> @@ -296,6 +296,11 @@ static void migration_end(void)
>      memory_global_dirty_log_stop();
>  }
> 
> +static void ram_migration_cancel(void *opaque)
> +{
> +    migration_end();
> +}
> +
>  #define MAX_WAIT 50 /* ms, half buffered_file limit */
> 
>  static int ram_save_live(QEMUFile *f, int stage, void *opaque)
> @@ -306,11 +311,6 @@ static int ram_save_live(QEMUFile *f, int stage, void *opaque)
>      int ret;
>      int i;
> 
> -    if (stage < 0) {
> -        migration_end();
> -        return 0;
> -    }
> -
>      memory_global_sync_dirty_bitmap(get_system_memory());
> 
>      if (stage == 1) {
> @@ -536,6 +536,7 @@ done:
>  SaveVMHandlers savevm_ram_handlers = {
>      .save_live_state = ram_save_live,
>      .load_state = ram_load,
> +    .cancel = ram_migration_cancel,
>  };
> 
>  #ifdef HAS_AUDIO
> diff --git a/block-migration.c b/block-migration.c
> index 00151a0..cd8a8dd 100644
> --- a/block-migration.c
> +++ b/block-migration.c
> @@ -536,6 +536,11 @@ static void blk_mig_cleanup(void)
>      }
>  }
> 
> +static void block_migration_cancel(void *opaque)
> +{
> +    blk_mig_cleanup();
> +}
> +
>  static int block_save_live(QEMUFile *f, int stage, void *opaque)
>  {
>      int ret;
> @@ -543,10 +548,6 @@ static int block_save_live(QEMUFile *f, int stage, void *opaque)
>      DPRINTF("Enter save live stage %d submitted %d transferred %d\n",
>              stage, block_mig_state.submitted, block_mig_state.transferred);
> 
> -    if (stage < 0) {
> -        blk_mig_cleanup();
> -        return 0;
> -    }
> 
>      if (block_mig_state.blk_enable != 1) {
>          /* no need to migrate storage */
> @@ -713,6 +714,7 @@ SaveVMHandlers savevm_block_handlers = {
>      .set_params = block_set_params,
>      .save_live_state = block_save_live,
>      .load_state = block_load,
> +    .cancel = block_migration_cancel,
>  };
> 
>  void blk_mig_init(void)
> diff --git a/savevm.c b/savevm.c
> index a451be2..888c5a2 100644
> --- a/savevm.c
> +++ b/savevm.c
> @@ -1703,8 +1703,8 @@ void qemu_savevm_state_cancel(QEMUFile *f)
>      SaveStateEntry *se;
> 
>      QTAILQ_FOREACH(se, &savevm_handlers, entry) {
> -        if (se->ops && se->ops->save_live_state) {
> -            se->ops->save_live_state(f, -1, se->opaque);
> +        if (se->ops && se->ops->cancel) {
> +            se->ops->cancel(se->opaque);
>          }
>      }
>  }
> diff --git a/vmstate.h b/vmstate.h
> index 0e24834..1dd42f5 100644
> --- a/vmstate.h
> +++ b/vmstate.h
> @@ -33,6 +33,7 @@ typedef struct SaveVMHandlers {
>      void (*set_params)(const MigrationParams *params, void * opaque);
>      SaveStateHandler *save_state;
>      int (*save_live_state)(QEMUFile *f, int stage, void *opaque);
> +    void (*cancel)(void *opaque);
>      LoadStateHandler *load_state;
>  } SaveVMHandlers;
> 

Reviewed-by: Orit Wasserman <owasserm@redhat.com>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] Bad mail header?
       [not found]   ` <4FEF3843.1000907@weilnetz.de>
@ 2012-07-01 10:16     ` Juan Quintela
  2012-07-01 10:56       ` Peter Maydell
  0 siblings, 1 reply; 20+ messages in thread
From: Juan Quintela @ 2012-07-01 10:16 UTC (permalink / raw)
  To: Stefan Weil; +Cc: owasserm, qemu-devel

Stefan Weil <sw@weilnetz.de> wrote:
> Am 28.06.2012 21:21, schrieb Juan Quintela:
>> This would make easier to add more operations in the next patches.
>>
>> Signed-off-by: Juan Quintela <quintela@redhat.com>
>> ---
>> savevm.c | 54 +++++++++++++++++++++++++-----------------------------
>> vmstate.h | 7 +++++++
>> 2 files changed, 32 insertions(+), 29 deletions(-)
>>
>
>
> Hi Juan,

Hi stefan

>
> this mail and the other mails from this patch series trigger
> an alert in my mail filter:

Just before we start, I just hate email (TM) :-(.

> X-Amavis-Alert: BAD HEADER SECTION Duplicate header field: "References"

I am looking, at the patches generated by "gfpm" alias just have one of
each.  Looking into my INBOX.

You are right,  I also have duplicate In-Reply-To and Reference fields.
No clue if the problem is with git or mailer.  If it makes any
difference, this is git:


(trasno *)$ rpm -qa git
git-1.7.10.4-1.fc17.x86_64

> The following extract from the mail header shows that there is really
> a duplication of the fields "In-Reply-To" and "References":
>
> From: Juan Quintela <quintela@redhat.com>
> To: qemu-devel@nongnu.org
> Date: Thu, 28 Jun 2012 21:21:59 +0200
> Message-Id:
> <39651e275488caec46861cb5b11ba1c7961a429c.1340910651.git.quintela@redhat.com>
> In-Reply-To: <cover.1340910651.git.quintela@redhat.com>
> References: <cover.1340910651.git.quintela@redhat.com>
> In-Reply-To: <cover.1340910651.git.quintela@redhat.com>
> References: <cover.1340910651.git.quintela@redhat.com>
> X-Scanned-By: MIMEDefang 2.67 on 10.5.11.12
> X-detected-operating-system: by eggs.gnu.org: Genre and OS details not
> recognized.
> X-Received-From: 209.132.183.28
> Cc: owasserm@redhat.com
> Subject: [Qemu-devel] [PATCH 01/12] savevm: Use a struct to pass all
> handlers
>
> This is not a new issue, and there are lots of other mail senders which
> also send mails with duplications. I noticed that recently and would like
> to understand how it happens. Maybe some tool (git-send-email?) which is
> used for sending mails is buggy.
>
> Do you remember how you created the patch mails?

alias gfpm='/usr/bin/git format-patch --signoff --find-renames --thread--cover-letter
alias gsend-qemu='/usr/bin/git send-email --smtp-server=smtp.corp.redhat.com --bcc=quintela@redhat.com --from="Juan Quintela <quintela@redhat.com>" --suppress-cc=self --to=qemu-devel@nongnu.org'

cd $QEMU_DIR
gfpm master -o /tmp/folder/
<edit cover letter until happy>
gsend-qemu /tmp/folder/*patch

I have always send the emails this way (this is why I have the alias).


> Which mailer did you use?

Command line, redhat server is zimbra (I think it has postfix
underneath, but I would not trust my memory so much).

> Do you also see the duplication in your local copy of that mail?

As said, the generated file with git-format-patch, only have one header
field of each, but the mail that I receive have it duplicated.

> Do other users also see the duplicate lines in the full mail header?
>
> Or is this a bug in my MTA?

I also see them, just that I don't get the warning from my mailer.
Anyone has any clue?  I think that the commands that I use are the
"normal" way to send multi-patches series?  And what is more, nobody
complained in the past.

> Regards,

Later, Juan.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 02/12] savevm: Live migration handlers register the struct directly
  2012-07-01  8:19   ` [Qemu-devel] [PATCH 02/12] savevm: Live migration handlers register the struct directly Orit Wasserman
@ 2012-07-01 10:20     ` Juan Quintela
  2012-07-01 11:24       ` Orit Wasserman
  0 siblings, 1 reply; 20+ messages in thread
From: Juan Quintela @ 2012-07-01 10:20 UTC (permalink / raw)
  To: Orit Wasserman; +Cc: qemu-devel

Orit Wasserman <owasserm@redhat.com> wrote:
> On 06/28/2012 10:22 PM, Juan Quintela wrote:
>> Notice that the live migration users never unregister, so no problem
>> about freeing the ops structure.
>>  void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque)
>> diff --git a/vl.c b/vl.c
>> index 1329c30..b1f31e8 100644
>> --- a/vl.c
>> +++ b/vl.c
>> @@ -3438,8 +3438,7 @@ int main(int argc, char **argv, char **envp)
>>      default_drive(default_sdcard, snapshot, machine->use_scsi,
>>                    IF_SD, 0, SD_OPTS);
>> 
>> -    register_savevm_live(NULL, "ram", 0, 4, NULL, ram_save_live, NULL,
>> -                         ram_load, NULL);
>> +    register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
>> 
>
> Juan,
> Can't we move it migration.c (migrate_init and qemu_start_incoming migration)?
> this will remove to use an extern for savevm_ram_handlers.

savevm_ram_handlers are exported from arch_init.c, what do we win if we
move it?  Furthermore, I add more functions to this extructure later in
the series, so not exporting the structure and having to export more
functions what bring to us?

Thanks, Juan.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 06/12] savevm: introduce is_active method
       [not found]   ` <4FEF7C5F.5010700@gmail.com>
@ 2012-07-01 10:44     ` Juan Quintela
  0 siblings, 0 replies; 20+ messages in thread
From: Juan Quintela @ 2012-07-01 10:44 UTC (permalink / raw)
  To: Igor Mitsyanko; +Cc: owasserm, qemu-devel

Igor Mitsyanko <i.mitsyanko@gmail.com> wrote:
> On 6/28/2012 11:22 PM, Juan Quintela wrote:
>> Enable the creation of a method to tell migration if that section is
>> active and should be migrate.  We use it for blk-migration, that is
>> normally not active.  We don't create the method for RAM, as setups
>> without RAM are very strange O:-)
>>
>> Signed-off-by: Juan Quintela <quintela@redhat.com>
>> @@ -1576,6 +1576,11 @@ int qemu_savevm_state_begin(QEMUFile *f,
>>           if (!se->ops || !se->ops->save_live_state) {
>>               continue;
>>           }
>> +        if (se->ops && se->ops->is_active) {
>> +            if (!se->ops->is_active(se->opaque)) {
>
> If we got here then we know for sure that se->ops != NULL,and then
> nested condition could be simplified to if (se->ops->is_active &&
> !se->ops->is_active(se->opaque)). Same for qemu_savevm_state_iterate()
> amd qemu_savevm_state_complete()

I tried to maintain consistency with the previous test, i.e. all have
the same structure.  I am still not sure which one is better :-()

The approach that I did put the things in the same order, yours, remove
two lines and one && operand.

Will think about that one, thanks.

Later, Juan.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] Bad mail header?
  2012-07-01 10:16     ` [Qemu-devel] Bad mail header? Juan Quintela
@ 2012-07-01 10:56       ` Peter Maydell
  2012-07-13  7:26         ` Juan Quintela
  0 siblings, 1 reply; 20+ messages in thread
From: Peter Maydell @ 2012-07-01 10:56 UTC (permalink / raw)
  To: quintela; +Cc: Stefan Weil, qemu-devel, owasserm

On 1 July 2012 11:16, Juan Quintela <quintela@redhat.com> wrote:
> Stefan Weil <sw@weilnetz.de> wrote:
>> this mail and the other mails from this patch series trigger
>> an alert in my mail filter:
>> X-Amavis-Alert: BAD HEADER SECTION Duplicate header field: "References"

> You are right,  I also have duplicate In-Reply-To and Reference fields.
> No clue if the problem is with git or mailer.

It seems to have been reported as a Debian bug against git a while
back:
 http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=593542

but the git-send-email manpage suggests that upstream would like
to consider this as user error:

# It is up to the user to ensure that no In-Reply-To header already
# exists when git send-email is asked to add it (especially note that
# git format-patch can be configured to do the threading itself).
# Failure to do so may not produce the expected result in the
# recipient’s MUA.

I think what this boils down to is "don't tell both format-patch
and send-email to do threading". send-email threads by default,
and format-patch doesn't thread by default, so the path of least
resistance is probably not to pass '--thread' to format-patch.
(Alternatively you could tell format-patch --thread and send-email
--no-thread, but I don't know why you'd want to do that that way
around.)

-- PMM

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 02/12] savevm: Live migration handlers register the struct directly
  2012-07-01 10:20     ` Juan Quintela
@ 2012-07-01 11:24       ` Orit Wasserman
  2012-07-11 17:01         ` Juan Quintela
  0 siblings, 1 reply; 20+ messages in thread
From: Orit Wasserman @ 2012-07-01 11:24 UTC (permalink / raw)
  To: quintela; +Cc: qemu-devel

On 07/01/2012 01:20 PM, Juan Quintela wrote:
> Orit Wasserman <owasserm@redhat.com> wrote:
>> On 06/28/2012 10:22 PM, Juan Quintela wrote:
>>> Notice that the live migration users never unregister, so no problem
>>> about freeing the ops structure.
>>>  void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque)
>>> diff --git a/vl.c b/vl.c
>>> index 1329c30..b1f31e8 100644
>>> --- a/vl.c
>>> +++ b/vl.c
>>> @@ -3438,8 +3438,7 @@ int main(int argc, char **argv, char **envp)
>>>      default_drive(default_sdcard, snapshot, machine->use_scsi,
>>>                    IF_SD, 0, SD_OPTS);
>>>
>>> -    register_savevm_live(NULL, "ram", 0, 4, NULL, ram_save_live, NULL,
>>> -                         ram_load, NULL);
>>> +    register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
>>>
>>
>> Juan,
>> Can't we move it migration.c (migrate_init and qemu_start_incoming migration)?
>> this will remove to use an extern for savevm_ram_handlers.
> 
> savevm_ram_handlers are exported from arch_init.c, what do we win if we
> move it?  Furthermore, I add more functions to this extructure later in
> the series, so not exporting the structure and having to export more
> functions what bring to us?
well it shouldn't be in vl.c , there is no reason (correct me if I'm wrong) to 
register "ram" handlers if there is no migration ...

Orit

> 
> Thanks, Juan.
> 

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 07/12] savevm: split save_live_setup from save_live_state
       [not found] ` <5a3139cd92ad66e5028ad0120e4de63c1a089d65.1340910651.git.quintela@redhat.com>
@ 2012-07-01 12:14   ` Orit Wasserman
  0 siblings, 0 replies; 20+ messages in thread
From: Orit Wasserman @ 2012-07-01 12:14 UTC (permalink / raw)
  To: Juan Quintela; +Cc: qemu-devel

On 06/28/2012 10:22 PM, Juan Quintela wrote:
> This patch splits stage 1 to its own function for both save_live
> users, ram and block.  It is just a copy of the function, removing the
> parts of the other stages.  Optimizations would came later.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  arch_init.c       |   86 +++++++++++++++++++++++++++++++++++++++--------------
>  block-migration.c |   35 +++++++++++++++++-----
>  savevm.c          |    4 +--
>  vmstate.h         |    1 +
>  4 files changed, 95 insertions(+), 31 deletions(-)
> 
> diff --git a/arch_init.c b/arch_init.c
> index 36e19b0..b296e17 100644
> --- a/arch_init.c
> +++ b/arch_init.c
> @@ -303,44 +303,85 @@ static void ram_migration_cancel(void *opaque)
> 
>  #define MAX_WAIT 50 /* ms, half buffered_file limit */
> 
> -static int ram_save_live(QEMUFile *f, int stage, void *opaque)
> +static int ram_save_setup(QEMUFile *f, void *opaque)
>  {
>      ram_addr_t addr;
> -    uint64_t bytes_transferred_last;
> +    RAMBlock *block;
>      double bwidth = 0;
>      int ret;
>      int i;
> 
>      memory_global_sync_dirty_bitmap(get_system_memory());
> 
> -    if (stage == 1) {
> -        RAMBlock *block;
> -        bytes_transferred = 0;
> -        last_block = NULL;
> -        last_offset = 0;
> -        sort_ram_list();
> -
> -        /* Make sure all dirty bits are set */
> -        QLIST_FOREACH(block, &ram_list.blocks, next) {
> -            for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
> -                if (!memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE,
> -                                             DIRTY_MEMORY_MIGRATION)) {
> -                    memory_region_set_dirty(block->mr, addr, TARGET_PAGE_SIZE);
> -                }
> +    bytes_transferred = 0;
> +    last_block = NULL;
> +    last_offset = 0;
> +    sort_ram_list();
> +
> +    /* Make sure all dirty bits are set */
> +    QLIST_FOREACH(block, &ram_list.blocks, next) {
> +        for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
> +            if (!memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE,
> +                                         DIRTY_MEMORY_MIGRATION)) {
> +                memory_region_set_dirty(block->mr, addr, TARGET_PAGE_SIZE);
>              }
>          }
> +    }
> 
> -        memory_global_dirty_log_start();
> +    memory_global_dirty_log_start();
> +
> +    qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
> +
> +    QLIST_FOREACH(block, &ram_list.blocks, next) {
> +        qemu_put_byte(f, strlen(block->idstr));
> +        qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
> +        qemu_put_be64(f, block->length);
> +    }
> +
> +    bwidth = qemu_get_clock_ns(rt_clock);
> 
> -        qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
> +    i = 0;
> +    while ((ret = qemu_file_rate_limit(f)) == 0) {
> +        int bytes_sent;
> 
> -        QLIST_FOREACH(block, &ram_list.blocks, next) {
> -            qemu_put_byte(f, strlen(block->idstr));
> -            qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
> -            qemu_put_be64(f, block->length);
> +        bytes_sent = ram_save_block(f);
> +        bytes_transferred += bytes_sent;
> +        if (bytes_sent == 0) { /* no more blocks */
> +            break;
>          }
> +        /* we want to check in the 1st loop, just in case it was the 1st time
> +           and we had to sync the dirty bitmap.
> +           qemu_get_clock_ns() is a bit expensive, so we only check each some
> +           iterations
> +        */
> +        if ((i & 63) == 0) {
> +            uint64_t t1 = (qemu_get_clock_ns(rt_clock) - bwidth) / 1000000;
> +            if (t1 > MAX_WAIT) {
> +                DPRINTF("big wait: %ld milliseconds, %d iterations\n", t1, i);
> +                break;
> +            }
> +        }
> +        i++;
>      }
> 
> +    if (ret < 0) {
> +        return ret;
> +    }
> +
> +    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
> +
> +    return 0;
> +}
> +
> +static int ram_save_live(QEMUFile *f, int stage, void *opaque)
> +{
> +    uint64_t bytes_transferred_last;
> +    double bwidth = 0;
> +    int ret;
> +    int i;
> +
> +    memory_global_sync_dirty_bitmap(get_system_memory());
> +
>      bytes_transferred_last = bytes_transferred;
>      bwidth = qemu_get_clock_ns(rt_clock);
> 
> @@ -534,6 +575,7 @@ done:
>  }
> 
>  SaveVMHandlers savevm_ram_handlers = {
> +    .save_live_setup = ram_save_setup,
>      .save_live_state = ram_save_live,
>      .load_state = ram_load,
>      .cancel = ram_migration_cancel,
> diff --git a/block-migration.c b/block-migration.c
> index 6d37dc1..fc3d1f4 100644
> --- a/block-migration.c
> +++ b/block-migration.c
> @@ -541,20 +541,40 @@ static void block_migration_cancel(void *opaque)
>      blk_mig_cleanup();
>  }
> 
> -static int block_save_live(QEMUFile *f, int stage, void *opaque)
> +static int block_save_setup(QEMUFile *f, void *opaque)
>  {
>      int ret;
> 
> -    DPRINTF("Enter save live stage %d submitted %d transferred %d\n",
> -            stage, block_mig_state.submitted, block_mig_state.transferred);
> +    DPRINTF("Enter save live setup submitted %d transferred %d\n",
> +            block_mig_state.submitted, block_mig_state.transferred);
> 
> -    if (stage == 1) {
> -        init_blk_migration(f);
> +    init_blk_migration(f);
> +
> +    /* start track dirty blocks */
> +    set_dirty_tracking(1);
> +
> +    flush_blks(f);
> 
> -        /* start track dirty blocks */
> -        set_dirty_tracking(1);
> +    ret = qemu_file_get_error(f);
> +    if (ret) {
> +        blk_mig_cleanup();
> +        return ret;
>      }
> 
> +    blk_mig_reset_dirty_cursor();
> +
> +    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
> +
> +    return 0;
> +}
> +
> +static int block_save_live(QEMUFile *f, int stage, void *opaque)
> +{
> +    int ret;
> +
> +    DPRINTF("Enter save live stage %d submitted %d transferred %d\n",
> +            stage, block_mig_state.submitted, block_mig_state.transferred);
> +
>      flush_blks(f);
> 
>      ret = qemu_file_get_error(f);
> @@ -710,6 +730,7 @@ static bool block_is_active(void *opaque)
> 
>  SaveVMHandlers savevm_block_handlers = {
>      .set_params = block_set_params,
> +    .save_live_setup = block_save_setup,
>      .save_live_state = block_save_live,
>      .load_state = block_load,
>      .cancel = block_migration_cancel,
> diff --git a/savevm.c b/savevm.c
> index afa0c9e..0b80a94 100644
> --- a/savevm.c
> +++ b/savevm.c
> @@ -1573,7 +1573,7 @@ int qemu_savevm_state_begin(QEMUFile *f,
>      QTAILQ_FOREACH(se, &savevm_handlers, entry) {
>          int len;
> 
> -        if (!se->ops || !se->ops->save_live_state) {
> +        if (!se->ops || !se->ops->save_live_setup) {
>              continue;
>          }
>          if (se->ops && se->ops->is_active) {
> @@ -1593,7 +1593,7 @@ int qemu_savevm_state_begin(QEMUFile *f,
>          qemu_put_be32(f, se->instance_id);
>          qemu_put_be32(f, se->version_id);
> 
> -        ret = se->ops->save_live_state(f, QEMU_VM_SECTION_START, se->opaque);
> +        ret = se->ops->save_live_setup(f, se->opaque);
>          if (ret < 0) {
>              qemu_savevm_state_cancel(f);
>              return ret;
> diff --git a/vmstate.h b/vmstate.h
> index 96651a5..049f2b7 100644
> --- a/vmstate.h
> +++ b/vmstate.h
> @@ -32,6 +32,7 @@ typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id);
>  typedef struct SaveVMHandlers {
>      void (*set_params)(const MigrationParams *params, void * opaque);
>      SaveStateHandler *save_state;
> +    int (*save_live_setup)(QEMUFile *f, void *opaque);
>      int (*save_live_state)(QEMUFile *f, int stage, void *opaque);
>      void (*cancel)(void *opaque);
>      LoadStateHandler *load_state;
> 

Reviewed-by: Orit Wasserman <owasserm@redhat.com>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 11/12] ram: iterate phase
       [not found] ` <cd73a246dbbb7a688e0000204f5af2040afe6578.1340910651.git.quintela@redhat.com>
@ 2012-07-01 12:17   ` Igor Mitsyanko
  2012-07-03 10:48     ` Juan Quintela
  0 siblings, 1 reply; 20+ messages in thread
From: Igor Mitsyanko @ 2012-07-01 12:17 UTC (permalink / raw)
  To: Juan Quintela; +Cc: owasserm, qemu-devel

On 6/28/2012 11:22 PM, Juan Quintela wrote:
> We only need to synchronize the bitmap when the number of dirty pages is low.
> Not every time that we call the function.
>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>   arch_init.c |    9 ++++++---
>   1 file changed, 6 insertions(+), 3 deletions(-)
>
> diff --git a/arch_init.c b/arch_init.c
> index fe843a7..8299c15 100644
> --- a/arch_init.c
> +++ b/arch_init.c
> @@ -348,8 +348,6 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
>       int i;
>       uint64_t expected_time;
>
> -    memory_global_sync_dirty_bitmap(get_system_memory());
> -
>       bytes_transferred_last = bytes_transferred;
>       bwidth = qemu_get_clock_ns(rt_clock);
>
> @@ -397,7 +395,12 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
>       DPRINTF("ram_save_live: expected(%ld) <= max(%ld)?\n", expected_time,
>               migrate_max_downtime());
>
> -    return expected_time <= migrate_max_downtime();
> +    if (expected_time <= migrate_max_downtime()) {
> +        memory_global_sync_dirty_bitmap(get_system_memory());
> +
> +        return expected_time <= migrate_max_downtime();

Shouldn't expected_time be recalculated after 
memory_global_sync_dirty_bitmap()?

> +    }
> +    return 0;
>   }
>
>   static int ram_save_complete(QEMUFile *f, void *opaque)
>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 08/12] savevm: split save_live into stage2 and stage3
       [not found] ` <d1e17271bb9fdc52dbd0b591e471b096aca2e721.1340910651.git.quintela@redhat.com>
@ 2012-07-02 11:57   ` Orit Wasserman
  0 siblings, 0 replies; 20+ messages in thread
From: Orit Wasserman @ 2012-07-02 11:57 UTC (permalink / raw)
  To: Juan Quintela; +Cc: qemu-devel

On 06/28/2012 10:22 PM, Juan Quintela wrote:
> We split it into 2 functions, foo_live_iterate, and foo_live_complete.
> At this point, we only remove the bits that are for the other stage,
> functionally this is equivalent to previous code.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  arch_init.c       |   71 +++++++++++++++++++++++++++++---------
>  block-migration.c |   99 ++++++++++++++++++++++++++++++++---------------------
>  savevm.c          |   10 +++---
>  vmstate.h         |    3 +-
>  4 files changed, 121 insertions(+), 62 deletions(-)
> 
> diff --git a/arch_init.c b/arch_init.c
> index b296e17..d5d7a78 100644
> --- a/arch_init.c
> +++ b/arch_init.c
> @@ -373,12 +373,13 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
>      return 0;
>  }
> 
> -static int ram_save_live(QEMUFile *f, int stage, void *opaque)
> +static int ram_save_iterate(QEMUFile *f, void *opaque)
>  {
>      uint64_t bytes_transferred_last;
>      double bwidth = 0;
>      int ret;
>      int i;
> +    uint64_t expected_time;
> 
>      memory_global_sync_dirty_bitmap(get_system_memory());
> 
> @@ -422,28 +423,63 @@ static int ram_save_live(QEMUFile *f, int stage, void *opaque)
>          bwidth = 0.000001;
>      }
> 
> -    /* try transferring iterative blocks of memory */
> -    if (stage == 3) {
> -        int bytes_sent;
> +    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
> +
> +    expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
> +
> +    DPRINTF("ram_save_live: expected(%ld) <= max(%ld)?\n", expected_time,
> +            migrate_max_downtime());
> +
> +    return expected_time <= migrate_max_downtime();
> +}
> +
> +static int ram_save_complete(QEMUFile *f, void *opaque)
> +{
> +    double bwidth = 0;
> +    int ret;
> +    int i;
> +    int bytes_sent;
> 
> -        /* flush all remaining blocks regardless of rate limiting */
> -        while ((bytes_sent = ram_save_block(f)) != 0) {
> -            bytes_transferred += bytes_sent;
> +    memory_global_sync_dirty_bitmap(get_system_memory());
> +
> +    bwidth = qemu_get_clock_ns(rt_clock);
> +
> +    i = 0;
> +    while ((ret = qemu_file_rate_limit(f)) == 0) {
> +        bytes_sent = ram_save_block(f);
> +        bytes_transferred += bytes_sent;
> +        if (bytes_sent == 0) { /* no more blocks */
> +            break;
> +        }
> +        /* we want to check in the 1st loop, just in case it was the 1st time
> +           and we had to sync the dirty bitmap.
> +           qemu_get_clock_ns() is a bit expensive, so we only check each some
> +           iterations
> +        */
> +        if ((i & 63) == 0) {
> +            uint64_t t1 = (qemu_get_clock_ns(rt_clock) - bwidth) / 1000000;
> +            if (t1 > MAX_WAIT) {
> +                DPRINTF("big wait: %ld milliseconds, %d iterations\n", t1, i);
> +                break;
> +            }
>          }
> -        memory_global_dirty_log_stop();
> +        i++;
>      }
> 
> -    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
> -
> -    if (stage == 2) {
> -        uint64_t expected_time;
> -        expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
> +    if (ret < 0) {
> +        return ret;
> +    }
> 
> -        DPRINTF("ram_save_live: expected(%ld) <= max(%ld)?\n", expected_time,
> -                migrate_max_downtime());
> +    /* try transferring iterative blocks of memory */
> 
> -        return expected_time <= migrate_max_downtime();
> +    /* flush all remaining blocks regardless of rate limiting */
> +    while ((bytes_sent = ram_save_block(f)) != 0) {
> +        bytes_transferred += bytes_sent;
>      }
> +    memory_global_dirty_log_stop();
> +
> +    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
> +
>      return 0;
>  }
> 
> @@ -576,7 +612,8 @@ done:
> 
>  SaveVMHandlers savevm_ram_handlers = {
>      .save_live_setup = ram_save_setup,
> -    .save_live_state = ram_save_live,
> +    .save_live_iterate = ram_save_iterate,
> +    .save_live_complete = ram_save_complete,
>      .load_state = ram_load,
>      .cancel = ram_migration_cancel,
>  };
> diff --git a/block-migration.c b/block-migration.c
> index fc3d1f4..7def8ab 100644
> --- a/block-migration.c
> +++ b/block-migration.c
> @@ -568,12 +568,12 @@ static int block_save_setup(QEMUFile *f, void *opaque)
>      return 0;
>  }
> 
> -static int block_save_live(QEMUFile *f, int stage, void *opaque)
> +static int block_save_iterate(QEMUFile *f, void *opaque)
>  {
>      int ret;
> 
> -    DPRINTF("Enter save live stage %d submitted %d transferred %d\n",
> -            stage, block_mig_state.submitted, block_mig_state.transferred);
> +    DPRINTF("Enter save live iterate submitted %d transferred %d\n",
> +            block_mig_state.submitted, block_mig_state.transferred);
> 
>      flush_blks(f);
> 
> @@ -585,56 +585,76 @@ static int block_save_live(QEMUFile *f, int stage, void *opaque)
> 
>      blk_mig_reset_dirty_cursor();
> 
> -    if (stage == 2) {
> -        /* control the rate of transfer */
> -        while ((block_mig_state.submitted +
> -                block_mig_state.read_done) * BLOCK_SIZE <
> -               qemu_file_get_rate_limit(f)) {
> -            if (block_mig_state.bulk_completed == 0) {
> -                /* first finish the bulk phase */
> -                if (blk_mig_save_bulked_block(f) == 0) {
> -                    /* finished saving bulk on all devices */
> -                    block_mig_state.bulk_completed = 1;
> -                }
> -            } else {
> -                if (blk_mig_save_dirty_block(f, 1) == 0) {
> -                    /* no more dirty blocks */
> -                    break;
> -                }
> +    /* control the rate of transfer */
> +    while ((block_mig_state.submitted +
> +            block_mig_state.read_done) * BLOCK_SIZE <
> +           qemu_file_get_rate_limit(f)) {
> +        if (block_mig_state.bulk_completed == 0) {
> +            /* first finish the bulk phase */
> +            if (blk_mig_save_bulked_block(f) == 0) {
> +                /* finished saving bulk on all devices */
> +                block_mig_state.bulk_completed = 1;
> +            }
> +        } else {
> +            if (blk_mig_save_dirty_block(f, 1) == 0) {
> +                /* no more dirty blocks */
> +                break;
>              }
>          }
> +    }
> 
> -        flush_blks(f);
> +    flush_blks(f);
> 
> -        ret = qemu_file_get_error(f);
> -        if (ret) {
> -            blk_mig_cleanup();
> -            return ret;
> -        }
> +    ret = qemu_file_get_error(f);
> +    if (ret) {
> +        blk_mig_cleanup();
> +        return ret;
>      }
> 
> -    if (stage == 3) {
> -        /* we know for sure that save bulk is completed and
> -           all async read completed */
> -        assert(block_mig_state.submitted == 0);
> +    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
> +
> +    return is_stage2_completed();
> +}
> +
> +static int block_save_complete(QEMUFile *f, void *opaque)
> +{
> +    int ret;
> +
> +    DPRINTF("Enter save live complete submitted %d transferred %d\n",
> +            block_mig_state.submitted, block_mig_state.transferred);
> +
> +    flush_blks(f);
> 
> -        while (blk_mig_save_dirty_block(f, 0) != 0);
> +    ret = qemu_file_get_error(f);
> +    if (ret) {
>          blk_mig_cleanup();
> +        return ret;
> +    }
> 
> -        /* report completion */
> -        qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
> +    blk_mig_reset_dirty_cursor();
> 
> -        ret = qemu_file_get_error(f);
> -        if (ret) {
> -            return ret;
> -        }
> +    /* we know for sure that save bulk is completed and
> +       all async read completed */
> +    assert(block_mig_state.submitted == 0);
> +
> +    while (blk_mig_save_dirty_block(f, 0) != 0) {
> +        /* Do nothing */
> +    }
> +    blk_mig_cleanup();
> +
> +    /* report completion */
> +    qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
> 
> -        DPRINTF("Block migration completed\n");
> +    ret = qemu_file_get_error(f);
> +    if (ret) {
> +        return ret;
>      }
> 
> +    DPRINTF("Block migration completed\n");
> +
>      qemu_put_be64(f, BLK_MIG_FLAG_EOS);
> 
> -    return ((stage == 2) && is_stage2_completed());
> +    return 0;
>  }
> 
>  static int block_load(QEMUFile *f, void *opaque, int version_id)
> @@ -731,7 +751,8 @@ static bool block_is_active(void *opaque)
>  SaveVMHandlers savevm_block_handlers = {
>      .set_params = block_set_params,
>      .save_live_setup = block_save_setup,
> -    .save_live_state = block_save_live,
> +    .save_live_iterate = block_save_iterate,
> +    .save_live_complete = block_save_complete,
>      .load_state = block_load,
>      .cancel = block_migration_cancel,
>      .is_active = block_is_active,
> diff --git a/savevm.c b/savevm.c
> index 0b80a94..6e82b2d 100644
> --- a/savevm.c
> +++ b/savevm.c
> @@ -1236,7 +1236,7 @@ int register_savevm_live(DeviceState *dev,
>      se->vmsd = NULL;
>      se->no_migrate = 0;
>      /* if this is a live_savem then set is_ram */
> -    if (ops->save_live_state != NULL) {
> +    if (ops->save_live_setup != NULL) {
>          se->is_ram = 1;
>      }
> 
> @@ -1620,7 +1620,7 @@ int qemu_savevm_state_iterate(QEMUFile *f)
>      int ret = 1;
> 
>      QTAILQ_FOREACH(se, &savevm_handlers, entry) {
> -        if (!se->ops || !se->ops->save_live_state) {
> +        if (!se->ops || !se->ops->save_live_iterate) {
>              continue;
>          }
>          if (se->ops && se->ops->is_active) {
> @@ -1636,7 +1636,7 @@ int qemu_savevm_state_iterate(QEMUFile *f)
>          qemu_put_byte(f, QEMU_VM_SECTION_PART);
>          qemu_put_be32(f, se->section_id);
> 
> -        ret = se->ops->save_live_state(f, QEMU_VM_SECTION_PART, se->opaque);
> +        ret = se->ops->save_live_iterate(f, se->opaque);
>          trace_savevm_section_end(se->section_id);
> 
>          if (ret <= 0) {
> @@ -1665,7 +1665,7 @@ int qemu_savevm_state_complete(QEMUFile *f)
>      cpu_synchronize_all_states();
> 
>      QTAILQ_FOREACH(se, &savevm_handlers, entry) {
> -        if (!se->ops || !se->ops->save_live_state) {
> +        if (!se->ops || !se->ops->save_live_complete) {
>              continue;
>          }
>          if (se->ops && se->ops->is_active) {
> @@ -1678,7 +1678,7 @@ int qemu_savevm_state_complete(QEMUFile *f)
>          qemu_put_byte(f, QEMU_VM_SECTION_END);
>          qemu_put_be32(f, se->section_id);
> 
> -        ret = se->ops->save_live_state(f, QEMU_VM_SECTION_END, se->opaque);
> +        ret = se->ops->save_live_complete(f, se->opaque);
>          trace_savevm_section_end(se->section_id);
>          if (ret < 0) {
>              return ret;
> diff --git a/vmstate.h b/vmstate.h
> index 049f2b7..5bd2b76 100644
> --- a/vmstate.h
> +++ b/vmstate.h
> @@ -33,7 +33,8 @@ typedef struct SaveVMHandlers {
>      void (*set_params)(const MigrationParams *params, void * opaque);
>      SaveStateHandler *save_state;
>      int (*save_live_setup)(QEMUFile *f, void *opaque);
> -    int (*save_live_state)(QEMUFile *f, int stage, void *opaque);
> +    int (*save_live_iterate)(QEMUFile *f, void *opaque);
> +    int (*save_live_complete)(QEMUFile *f, void *opaque);
>      void (*cancel)(void *opaque);
>      LoadStateHandler *load_state;
>      bool (*is_active)(void *opaque);
> 
Reviewed-by: Orit Wasserman <owasserm@redhat.com>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 09/12] ram: save_live_setup() don't need to sent pages
       [not found] ` <07c8945d9b633bc509ebec0ff6646834ec974ccb.1340910651.git.quintela@redhat.com>
@ 2012-07-02 12:00   ` Orit Wasserman
  0 siblings, 0 replies; 20+ messages in thread
From: Orit Wasserman @ 2012-07-02 12:00 UTC (permalink / raw)
  To: qemu-devel

On 06/28/2012 10:22 PM, Juan Quintela wrote:
> We should send pages on interate phase, not in setup one.  This was a
> "bug".  Just removing the loop does what we want.  Tested that it
> works with current ram_load().
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  arch_init.c |   33 ---------------------------------
>  1 file changed, 33 deletions(-)
> 
> diff --git a/arch_init.c b/arch_init.c
> index d5d7a78..1eab331 100644
> --- a/arch_init.c
> +++ b/arch_init.c
> @@ -307,9 +307,6 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
>  {
>      ram_addr_t addr;
>      RAMBlock *block;
> -    double bwidth = 0;
> -    int ret;
> -    int i;
> 
>      memory_global_sync_dirty_bitmap(get_system_memory());
> 
> @@ -338,36 +335,6 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
>          qemu_put_be64(f, block->length);
>      }
> 
> -    bwidth = qemu_get_clock_ns(rt_clock);
> -
> -    i = 0;
> -    while ((ret = qemu_file_rate_limit(f)) == 0) {
> -        int bytes_sent;
> -
> -        bytes_sent = ram_save_block(f);
> -        bytes_transferred += bytes_sent;
> -        if (bytes_sent == 0) { /* no more blocks */
> -            break;
> -        }
> -        /* we want to check in the 1st loop, just in case it was the 1st time
> -           and we had to sync the dirty bitmap.
> -           qemu_get_clock_ns() is a bit expensive, so we only check each some
> -           iterations
> -        */
> -        if ((i & 63) == 0) {
> -            uint64_t t1 = (qemu_get_clock_ns(rt_clock) - bwidth) / 1000000;
> -            if (t1 > MAX_WAIT) {
> -                DPRINTF("big wait: %ld milliseconds, %d iterations\n", t1, i);
> -                break;
> -            }
> -        }
> -        i++;
> -    }
> -
> -    if (ret < 0) {
> -        return ret;
> -    }
> -
>      qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
> 
>      return 0;
> 
Reviewed-by: Orit Wasserman <owasserm@redhat.com>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 10/12] ram: save_live_complete() only do one loop
       [not found] ` <be6bed86c792fb6f237f016aadfb3fcbda746f15.1340910651.git.quintela@redhat.com>
@ 2012-07-02 12:00   ` Orit Wasserman
  0 siblings, 0 replies; 20+ messages in thread
From: Orit Wasserman @ 2012-07-02 12:00 UTC (permalink / raw)
  To: qemu-devel

On 06/28/2012 10:22 PM, Juan Quintela wrote:
> We were doing the same loop that stage2, and a new one for stage3.  We
> only need the one for stage3.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  arch_init.c |   31 -------------------------------
>  1 file changed, 31 deletions(-)
> 
> diff --git a/arch_init.c b/arch_init.c
> index 1eab331..fe843a7 100644
> --- a/arch_init.c
> +++ b/arch_init.c
> @@ -402,41 +402,10 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
> 
>  static int ram_save_complete(QEMUFile *f, void *opaque)
>  {
> -    double bwidth = 0;
> -    int ret;
> -    int i;
>      int bytes_sent;
> 
>      memory_global_sync_dirty_bitmap(get_system_memory());
> 
> -    bwidth = qemu_get_clock_ns(rt_clock);
> -
> -    i = 0;
> -    while ((ret = qemu_file_rate_limit(f)) == 0) {
> -        bytes_sent = ram_save_block(f);
> -        bytes_transferred += bytes_sent;
> -        if (bytes_sent == 0) { /* no more blocks */
> -            break;
> -        }
> -        /* we want to check in the 1st loop, just in case it was the 1st time
> -           and we had to sync the dirty bitmap.
> -           qemu_get_clock_ns() is a bit expensive, so we only check each some
> -           iterations
> -        */
> -        if ((i & 63) == 0) {
> -            uint64_t t1 = (qemu_get_clock_ns(rt_clock) - bwidth) / 1000000;
> -            if (t1 > MAX_WAIT) {
> -                DPRINTF("big wait: %ld milliseconds, %d iterations\n", t1, i);
> -                break;
> -            }
> -        }
> -        i++;
> -    }
> -
> -    if (ret < 0) {
> -        return ret;
> -    }
> -
>      /* try transferring iterative blocks of memory */
> 
>      /* flush all remaining blocks regardless of rate limiting */
> 
Reviewed-by: Orit Wasserman <owasserm@redhat.com>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 12/12] ram: save_live_setup() we don't need to synchronize the dirty bitmap.
       [not found] ` <ab1ca756b1cfe67df94f7c6674ea86f6d7232a2f.1340910651.git.quintela@redhat.com>
@ 2012-07-02 12:05   ` Orit Wasserman
  0 siblings, 0 replies; 20+ messages in thread
From: Orit Wasserman @ 2012-07-02 12:05 UTC (permalink / raw)
  To: Juan Quintela; +Cc: qemu-devel

On 06/28/2012 10:22 PM, Juan Quintela wrote:
> 1st: we were synchonizing the dirty bitmap before calling
>       memory_global_dirty_log_start().
> 
> 2nd: We are marking all pages as dirty anywhere, no reason to go
>      through all the bitmap to "mark" dirty same pages twice.
> 
> So, call removed.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  arch_init.c |    2 --
>  1 file changed, 2 deletions(-)
> 
> diff --git a/arch_init.c b/arch_init.c
> index 8299c15..c1b4f13 100644
> --- a/arch_init.c
> +++ b/arch_init.c
> @@ -308,8 +308,6 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
>      ram_addr_t addr;
>      RAMBlock *block;
> 
> -    memory_global_sync_dirty_bitmap(get_system_memory());
> -
>      bytes_transferred = 0;
>      last_block = NULL;
>      last_offset = 0;
> 
Reviewed-by: Orit Wasserman <owasserm@redhat.com>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 11/12] ram: iterate phase
  2012-07-01 12:17   ` [Qemu-devel] [PATCH 11/12] ram: iterate phase Igor Mitsyanko
@ 2012-07-03 10:48     ` Juan Quintela
  0 siblings, 0 replies; 20+ messages in thread
From: Juan Quintela @ 2012-07-03 10:48 UTC (permalink / raw)
  To: Igor Mitsyanko; +Cc: owasserm, qemu-devel

Igor Mitsyanko <i.mitsyanko@gmail.com> wrote:
> On 6/28/2012 11:22 PM, Juan Quintela wrote:
>> We only need to synchronize the bitmap when the number of dirty pages is low.
>> Not every time that we call the function.
>>
>> Signed-off-by: Juan Quintela <quintela@redhat.com>
>> ---
>>   arch_init.c |    9 ++++++---
>>   1 file changed, 6 insertions(+), 3 deletions(-)
>>
>> diff --git a/arch_init.c b/arch_init.c
>> index fe843a7..8299c15 100644
>> --- a/arch_init.c
>> +++ b/arch_init.c
>> @@ -348,8 +348,6 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
>>       int i;
>>       uint64_t expected_time;
>>
>> -    memory_global_sync_dirty_bitmap(get_system_memory());
>> -
>>       bytes_transferred_last = bytes_transferred;
>>       bwidth = qemu_get_clock_ns(rt_clock);
>>
>> @@ -397,7 +395,12 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
>>       DPRINTF("ram_save_live: expected(%ld) <= max(%ld)?\n", expected_time,
>>               migrate_max_downtime());
>>
>> -    return expected_time <= migrate_max_downtime();
>> +    if (expected_time <= migrate_max_downtime()) {
>> +        memory_global_sync_dirty_bitmap(get_system_memory());
>> +
>> +        return expected_time <= migrate_max_downtime();
>
> Shouldn't expected_time be recalculated after
> memory_global_sync_dirty_bitmap()?

It "depends" only of the network speed,
memory_global_sync_dirty_bitmap() should be really fast (notice that
with lots of memory this couldn't be true).

Later, Juan.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 02/12] savevm: Live migration handlers register the struct directly
  2012-07-01 11:24       ` Orit Wasserman
@ 2012-07-11 17:01         ` Juan Quintela
  2012-07-11 17:28           ` Orit Wasserman
  0 siblings, 1 reply; 20+ messages in thread
From: Juan Quintela @ 2012-07-11 17:01 UTC (permalink / raw)
  To: Orit Wasserman; +Cc: qemu-devel

Orit Wasserman <owasserm@redhat.com> wrote:
> On 07/01/2012 01:20 PM, Juan Quintela wrote:
>> Orit Wasserman <owasserm@redhat.com> wrote:
>>> On 06/28/2012 10:22 PM, Juan Quintela wrote:
>>>> Notice that the live migration users never unregister, so no problem
>>>> about freeing the ops structure.
>>>>  void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque)
>>>> diff --git a/vl.c b/vl.c
>>>> index 1329c30..b1f31e8 100644
>>>> --- a/vl.c
>>>> +++ b/vl.c
>>>> @@ -3438,8 +3438,7 @@ int main(int argc, char **argv, char **envp)
>>>>      default_drive(default_sdcard, snapshot, machine->use_scsi,
>>>>                    IF_SD, 0, SD_OPTS);
>>>>
>>>> -    register_savevm_live(NULL, "ram", 0, 4, NULL, ram_save_live, NULL,
>>>> -                         ram_load, NULL);
>>>> +    register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
>>>>
>>>
>>> Juan,
>>> Can't we move it migration.c (migrate_init and qemu_start_incoming migration)?
>>> this will remove to use an extern for savevm_ram_handlers.
>> 
>> savevm_ram_handlers are exported from arch_init.c, what do we win if we
>> move it?  Furthermore, I add more functions to this extructure later in
>> the series, so not exporting the structure and having to export more
>> functions what bring to us?
> well it shouldn't be in vl.c , there is no reason (correct me if I'm wrong) to 
> register "ram" handlers if there is no migration ...

We always register migration handlers :-(

The other problem is that there is no other function that we export for
arch_init.c where we can piggy_back() that registration :-(

Later, Juan.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 02/12] savevm: Live migration handlers register the struct directly
  2012-07-11 17:01         ` Juan Quintela
@ 2012-07-11 17:28           ` Orit Wasserman
  0 siblings, 0 replies; 20+ messages in thread
From: Orit Wasserman @ 2012-07-11 17:28 UTC (permalink / raw)
  To: quintela; +Cc: qemu-devel

On 07/11/2012 08:01 PM, Juan Quintela wrote:
> Orit Wasserman <owasserm@redhat.com> wrote:
>> On 07/01/2012 01:20 PM, Juan Quintela wrote:
>>> Orit Wasserman <owasserm@redhat.com> wrote:
>>>> On 06/28/2012 10:22 PM, Juan Quintela wrote:
>>>>> Notice that the live migration users never unregister, so no problem
>>>>> about freeing the ops structure.
>>>>>  void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque)
>>>>> diff --git a/vl.c b/vl.c
>>>>> index 1329c30..b1f31e8 100644
>>>>> --- a/vl.c
>>>>> +++ b/vl.c
>>>>> @@ -3438,8 +3438,7 @@ int main(int argc, char **argv, char **envp)
>>>>>      default_drive(default_sdcard, snapshot, machine->use_scsi,
>>>>>                    IF_SD, 0, SD_OPTS);
>>>>>
>>>>> -    register_savevm_live(NULL, "ram", 0, 4, NULL, ram_save_live, NULL,
>>>>> -                         ram_load, NULL);
>>>>> +    register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
>>>>>
>>>>
>>>> Juan,
>>>> Can't we move it migration.c (migrate_init and qemu_start_incoming migration)?
>>>> this will remove to use an extern for savevm_ram_handlers.
>>>
>>> savevm_ram_handlers are exported from arch_init.c, what do we win if we
>>> move it?  Furthermore, I add more functions to this extructure later in
>>> the series, so not exporting the structure and having to export more
>>> functions what bring to us?
>> well it shouldn't be in vl.c , there is no reason (correct me if I'm wrong) to 
>> register "ram" handlers if there is no migration ...
> 
> We always register migration handlers :-(
> 
> The other problem is that there is no other function that we ex
port for
> arch_init.c where we can piggy_back() that registration :-(
> 
Well I guess it can stay as it is for a bit longer ...

Orit
> Later, Juan.
> 

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] Bad mail header?
  2012-07-01 10:56       ` Peter Maydell
@ 2012-07-13  7:26         ` Juan Quintela
  0 siblings, 0 replies; 20+ messages in thread
From: Juan Quintela @ 2012-07-13  7:26 UTC (permalink / raw)
  To: Peter Maydell; +Cc: Stefan Weil, qemu-devel, owasserm

Peter Maydell <peter.maydell@linaro.org> wrote:
> On 1 July 2012 11:16, Juan Quintela <quintela@redhat.com> wrote:
>> Stefan Weil <sw@weilnetz.de> wrote:
>>> this mail and the other mails from this patch series trigger
>>> an alert in my mail filter:
>>> X-Amavis-Alert: BAD HEADER SECTION Duplicate header field: "References"
>
>> You are right,  I also have duplicate In-Reply-To and Reference fields.
>> No clue if the problem is with git or mailer.

Thanks Peter, it appears that now problem is fixed.

For reference:

alias gfpm="/usr/bin/git format-patch --signoff --find-renames --cover-letter"
alias gsend="/usr/bin/git send-email --smtp-server=smtp.corp.redhat.com --bcc=quintela@redhat.com --from=\"Juan Quintela <quintela@redhat.com>\" --suppress-cc=self"

i.e. just remove the --thread from gfpm.

Thanks to both, Juan.

^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2012-07-13  7:27 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <cover.1340910651.git.quintela@redhat.com>
     [not found] ` <39651e275488caec46861cb5b11ba1c7961a429c.1340910651.git.quintela@redhat.com>
2012-07-01  8:05   ` [Qemu-devel] [PATCH 01/12] savevm: Use a struct to pass all handlers Orit Wasserman
     [not found]   ` <4FEF3843.1000907@weilnetz.de>
2012-07-01 10:16     ` [Qemu-devel] Bad mail header? Juan Quintela
2012-07-01 10:56       ` Peter Maydell
2012-07-13  7:26         ` Juan Quintela
     [not found] ` <620debbcb9bc99dc6db54d87a8683efbc00c2b66.1340910651.git.quintela@redhat.com>
2012-07-01  8:19   ` [Qemu-devel] [PATCH 02/12] savevm: Live migration handlers register the struct directly Orit Wasserman
2012-07-01 10:20     ` Juan Quintela
2012-07-01 11:24       ` Orit Wasserman
2012-07-11 17:01         ` Juan Quintela
2012-07-11 17:28           ` Orit Wasserman
     [not found] ` <704b60a0d2b32fbf6bf229b115082c2b929a4731.1340910651.git.quintela@redhat.com>
2012-07-01  9:40   ` [Qemu-devel] [PATCH 03/12] savevm: remove SaveSetParamsHandler Orit Wasserman
     [not found] ` <509b501d09f18212b4165ee116ddef4ceba59097.1340910651.git.quintela@redhat.com>
2012-07-01  9:40   ` [Qemu-devel] [PATCH 04/12] savevm: remove SaveLiveStateHandler Orit Wasserman
     [not found] ` <3dad4b557fac0c11eb6fdb06121859d16247e4f0.1340910651.git.quintela@redhat.com>
2012-07-01  9:47   ` [Qemu-devel] [PATCH 05/12] savevm: Refactor cancel operation in its own operation Orit Wasserman
     [not found] ` <1e9e8749f384425d04c74bc76fc502621e226352.1340910651.git.quintela@redhat.com>
     [not found]   ` <4FEF7C5F.5010700@gmail.com>
2012-07-01 10:44     ` [Qemu-devel] [PATCH 06/12] savevm: introduce is_active method Juan Quintela
     [not found] ` <5a3139cd92ad66e5028ad0120e4de63c1a089d65.1340910651.git.quintela@redhat.com>
2012-07-01 12:14   ` [Qemu-devel] [PATCH 07/12] savevm: split save_live_setup from save_live_state Orit Wasserman
     [not found] ` <cd73a246dbbb7a688e0000204f5af2040afe6578.1340910651.git.quintela@redhat.com>
2012-07-01 12:17   ` [Qemu-devel] [PATCH 11/12] ram: iterate phase Igor Mitsyanko
2012-07-03 10:48     ` Juan Quintela
     [not found] ` <d1e17271bb9fdc52dbd0b591e471b096aca2e721.1340910651.git.quintela@redhat.com>
2012-07-02 11:57   ` [Qemu-devel] [PATCH 08/12] savevm: split save_live into stage2 and stage3 Orit Wasserman
     [not found] ` <07c8945d9b633bc509ebec0ff6646834ec974ccb.1340910651.git.quintela@redhat.com>
2012-07-02 12:00   ` [Qemu-devel] [PATCH 09/12] ram: save_live_setup() don't need to sent pages Orit Wasserman
     [not found] ` <be6bed86c792fb6f237f016aadfb3fcbda746f15.1340910651.git.quintela@redhat.com>
2012-07-02 12:00   ` [Qemu-devel] [PATCH 10/12] ram: save_live_complete() only do one loop Orit Wasserman
     [not found] ` <ab1ca756b1cfe67df94f7c6674ea86f6d7232a2f.1340910651.git.quintela@redhat.com>
2012-07-02 12:05   ` [Qemu-devel] [PATCH 12/12] ram: save_live_setup() we don't need to synchronize the dirty bitmap Orit Wasserman

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).