qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Juan Quintela <quintela@redhat.com>
To: qemu-devel@nongnu.org
Cc: amit.shah@redhat.com, dgilbert@redhat.com
Subject: [Qemu-devel] [PULL 11/12] migration: Send the fd number which we are going to use for this page
Date: Mon, 13 Feb 2017 18:19:47 +0100	[thread overview]
Message-ID: <1487006388-7966-12-git-send-email-quintela@redhat.com> (raw)
In-Reply-To: <1487006388-7966-1-git-send-email-quintela@redhat.com>

We are still sending the page through the main channel, that would
change later in the series

Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
---
 migration/ram.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 66 insertions(+), 5 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index 8d85c49..38789c8 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -527,7 +527,8 @@ void migrate_multifd_send_threads_create(void)
     }
 }

-static int multifd_send_page(uint8_t *address)
+
+static uint16_t multifd_send_page(uint8_t *address, bool last_page)
 {
     int i, j, thread_count;
     static MultiFDPages pages;
@@ -541,8 +542,10 @@ static int multifd_send_page(uint8_t *address)
     pages.address[pages.num] = address;
     pages.num++;

-    if (pages.num < (pages.size - 1)) {
-        return UINT16_MAX;
+    if (!last_page) {
+        if (pages.num < (pages.size - 1)) {
+            return UINT16_MAX;
+        }
     }

     thread_count = migrate_multifd_threads();
@@ -564,16 +567,21 @@ static int multifd_send_page(uint8_t *address)
     qemu_mutex_unlock(&multifd_send[i].mutex);
     qemu_sem_post(&multifd_send[i].sem);

-    return 0;
+    return i;
 }

 struct MultiFDRecvParams {
+    /* not changed */
     QemuThread thread;
     QIOChannel *c;
     QemuSemaphore init;
+    QemuSemaphore ready;
     QemuSemaphore sem;
     QemuMutex mutex;
+    /* proteced by param mutex */
     bool quit;
+    MultiFDPages pages;
+    bool done;
 };
 typedef struct MultiFDRecvParams MultiFDRecvParams;

@@ -586,6 +594,7 @@ static void *multifd_recv_thread(void *opaque)

     qio_channel_read(params->c, &start, 1, &error_abort);
     qemu_sem_post(&params->init);
+    qemu_sem_post(&params->ready);

     while (true) {
         qemu_mutex_lock(&params->mutex);
@@ -593,6 +602,13 @@ static void *multifd_recv_thread(void *opaque)
             qemu_mutex_unlock(&params->mutex);
             break;
         }
+        if (params->pages.num) {
+            params->pages.num = 0;
+            params->done = true;
+            qemu_mutex_unlock(&params->mutex);
+            qemu_sem_post(&params->ready);
+            continue;
+        }
         qemu_mutex_unlock(&params->mutex);
         qemu_sem_wait(&params->sem);
     }
@@ -652,7 +668,10 @@ void migrate_multifd_recv_threads_create(void)
         qemu_mutex_init(&p->mutex);
         qemu_sem_init(&p->sem, 0);
         qemu_sem_init(&p->init, 0);
+        qemu_sem_init(&p->ready, 0);
         p->quit = false;
+        p->done = false;
+        multifd_init_group(&p->pages);
         p->c = socket_recv_channel_create();

         if (!p->c) {
@@ -666,6 +685,42 @@ void migrate_multifd_recv_threads_create(void)
     socket_recv_channel_close_listening();
 }

+static void multifd_recv_page(uint8_t *address, uint16_t fd_num)
+{
+    int i, thread_count;
+    MultiFDRecvParams *params;
+    static MultiFDPages pages;
+    static bool once;
+
+    if (!once) {
+        multifd_init_group(&pages);
+        once = true;
+    }
+
+    pages.address[pages.num] = address;
+    pages.num++;
+
+    if (fd_num == UINT16_MAX) {
+        return;
+    }
+
+    thread_count = migrate_multifd_threads();
+    assert(fd_num < thread_count);
+    params = &multifd_recv[fd_num];
+
+    qemu_sem_wait(&params->ready);
+
+    qemu_mutex_lock(&params->mutex);
+    params->done = false;
+    for (i = 0; i < pages.num; i++) {
+        params->pages.address[i] = pages.address[i];
+    }
+    params->pages.num = pages.num;
+    pages.num = 0;
+    qemu_mutex_unlock(&params->mutex);
+    qemu_sem_post(&params->sem);
+}
+
 /**
  * save_page_header: Write page header to wire
  *
@@ -1085,6 +1140,7 @@ static int ram_multifd_page(QEMUFile *f, PageSearchStatus *pss,
                             bool last_stage, uint64_t *bytes_transferred)
 {
     int pages;
+    uint16_t fd_num;
     uint8_t *p;
     RAMBlock *block = pss->block;
     ram_addr_t offset = pss->offset;
@@ -1098,8 +1154,10 @@ static int ram_multifd_page(QEMUFile *f, PageSearchStatus *pss,
     if (pages == -1) {
         *bytes_transferred +=
             save_page_header(f, block, offset | RAM_SAVE_FLAG_MULTIFD_PAGE);
+        fd_num = multifd_send_page(p, migration_dirty_pages == 1);
+        qemu_put_be16(f, fd_num);
+        *bytes_transferred += 2; /* size of fd_num */
         qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
-        multifd_send_page(p);
         *bytes_transferred += TARGET_PAGE_SIZE;
         pages = 1;
         acct_info.norm_pages++;
@@ -2813,6 +2871,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
     while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
         ram_addr_t addr, total_ram_bytes;
         void *host = NULL;
+        uint16_t fd_num;
         uint8_t ch;

         addr = qemu_get_be64(f);
@@ -2910,6 +2969,8 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
             break;

         case RAM_SAVE_FLAG_MULTIFD_PAGE:
+            fd_num = qemu_get_be16(f);
+            multifd_recv_page(host, fd_num);
             qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
             break;

-- 
2.7.4

  parent reply	other threads:[~2017-02-13 17:20 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-02-13 17:19 [Qemu-devel] [PATCH 00/12] Multifd v4 Juan Quintela
2017-02-13 17:19 ` [Qemu-devel] [PULL 01/12] migration: Test for disabled features on reception Juan Quintela
2017-02-15 13:12   ` Dr. David Alan Gilbert
2017-02-13 17:19 ` [Qemu-devel] [PULL 02/12] migration: Don't create decompression threads if not enabled Juan Quintela
2017-02-15 13:17   ` Dr. David Alan Gilbert
2017-02-13 17:19 ` [Qemu-devel] [PULL 03/12] migration: Add multifd capability Juan Quintela
2017-02-15 13:04   ` Dr. David Alan Gilbert
2017-02-13 17:19 ` [Qemu-devel] [PULL 04/12] migration: Create x-multifd-threads parameter Juan Quintela
2017-02-13 17:19 ` [Qemu-devel] [PULL 05/12] migration: Create x-multifd-group parameter Juan Quintela
2017-02-13 17:19 ` [Qemu-devel] [PULL 06/12] migration: Create multifd migration threads Juan Quintela
2017-02-14 13:02   ` Paolo Bonzini
2017-02-13 17:19 ` [Qemu-devel] [PULL 07/12] migration: Start of multiple fd work Juan Quintela
2017-02-14 11:17   ` Daniel P. Berrange
2017-02-14 12:57   ` Paolo Bonzini
2017-02-14 13:12     ` Juan Quintela
2017-02-14 13:37       ` Paolo Bonzini
2017-02-14 13:52         ` Juan Quintela
2017-02-14 14:08           ` Paolo Bonzini
2017-02-13 17:19 ` [Qemu-devel] [PULL 08/12] migration: Create ram_multifd_page Juan Quintela
2017-02-13 17:19 ` [Qemu-devel] [PULL 09/12] migration: Create thread infrastructure for multifd send side Juan Quintela
2017-02-13 17:19 ` [Qemu-devel] [PULL 10/12] migration: Really use multiple pages at a time Juan Quintela
2017-02-13 17:19 ` Juan Quintela [this message]
2017-02-14 13:02   ` [Qemu-devel] [PULL 11/12] migration: Send the fd number which we are going to use for this page Paolo Bonzini
2017-02-14 13:16     ` Juan Quintela
2017-02-13 17:19 ` [Qemu-devel] [PULL 12/12] migration: Test new fd infrastructure Juan Quintela
2017-02-14  9:55 ` [Qemu-devel] [PATCH 00/12] Multifd v4 Peter Maydell
2017-02-14 12:38   ` Juan Quintela
2017-02-14 13:03 ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1487006388-7966-12-git-send-email-quintela@redhat.com \
    --to=quintela@redhat.com \
    --cc=amit.shah@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).