qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Juan Quintela <quintela@redhat.com>
To: qemu-devel@nongnu.org
Cc: amit.shah@redhat.com, dgilbert@redhat.com
Subject: [Qemu-devel] [PULL 50/57] Postcopy; Handle userfault requests
Date: Mon,  9 Nov 2015 18:28:54 +0100	[thread overview]
Message-ID: <1447090141-29074-51-git-send-email-quintela@redhat.com> (raw)
In-Reply-To: <1447090141-29074-1-git-send-email-quintela@redhat.com>

From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>

userfaultfd is a Linux syscall that gives an fd that receives a stream
of notifications of accesses to pages registered with it and allows
the program to acknowledge those stalls and tell the accessing
thread to carry on.

We convert the requests from the kernel into messages back to the
source asking for the pages.

Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Amit Shah <amit.shah@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
---
 include/migration/migration.h |   3 +
 migration/postcopy-ram.c      | 155 +++++++++++++++++++++++++++++++++++++++---
 trace-events                  |   9 +++
 3 files changed, 158 insertions(+), 9 deletions(-)

diff --git a/include/migration/migration.h b/include/migration/migration.h
index a48471e..329d535 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -89,11 +89,14 @@ struct MigrationIncomingState {
      */
     QemuEvent main_thread_load_event;

+    bool           have_fault_thread;
     QemuThread     fault_thread;
     QemuSemaphore  fault_thread_sem;

     /* For the kernel to send us notifications */
     int       userfault_fd;
+    /* To tell the fault_thread to quit */
+    int       userfault_quit_fd;
     QEMUFile *to_src_file;
     QemuMutex rp_mutex;    /* We send replies from multiple threads */
     void     *postcopy_tmp_page;
diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
index 58492c0..4f1e329 100644
--- a/migration/postcopy-ram.c
+++ b/migration/postcopy-ram.c
@@ -51,6 +51,8 @@ struct PostcopyDiscardState {
  */
 #if defined(__linux__)

+#include <poll.h>
+#include <sys/eventfd.h>
 #include <sys/mman.h>
 #include <sys/ioctl.h>
 #include <sys/syscall.h>
@@ -267,15 +269,41 @@ int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
  */
 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
 {
-    /* TODO: Join the fault thread once we're sure it will exit */
-    if (qemu_ram_foreach_block(cleanup_range, mis)) {
-        return -1;
+    trace_postcopy_ram_incoming_cleanup_entry();
+
+    if (mis->have_fault_thread) {
+        uint64_t tmp64;
+
+        if (qemu_ram_foreach_block(cleanup_range, mis)) {
+            return -1;
+        }
+        /*
+         * Tell the fault_thread to exit, it's an eventfd that should
+         * currently be at 0, we're going to increment it to 1
+         */
+        tmp64 = 1;
+        if (write(mis->userfault_quit_fd, &tmp64, 8) == 8) {
+            trace_postcopy_ram_incoming_cleanup_join();
+            qemu_thread_join(&mis->fault_thread);
+        } else {
+            /* Not much we can do here, but may as well report it */
+            error_report("%s: incrementing userfault_quit_fd: %s", __func__,
+                         strerror(errno));
+        }
+        trace_postcopy_ram_incoming_cleanup_closeuf();
+        close(mis->userfault_fd);
+        close(mis->userfault_quit_fd);
+        mis->have_fault_thread = false;
     }

+    postcopy_state_set(POSTCOPY_INCOMING_END);
+    migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
+
     if (mis->postcopy_tmp_page) {
         munmap(mis->postcopy_tmp_page, getpagesize());
         mis->postcopy_tmp_page = NULL;
     }
+    trace_postcopy_ram_incoming_cleanup_exit();
     return 0;
 }

@@ -314,31 +342,140 @@ static int ram_block_enable_notify(const char *block_name, void *host_addr,
 static void *postcopy_ram_fault_thread(void *opaque)
 {
     MigrationIncomingState *mis = opaque;
+    struct uffd_msg msg;
+    int ret;
+    size_t hostpagesize = getpagesize();
+    RAMBlock *rb = NULL;
+    RAMBlock *last_rb = NULL; /* last RAMBlock we sent part of */

-    fprintf(stderr, "postcopy_ram_fault_thread\n");
-    /* TODO: In later patch */
+    trace_postcopy_ram_fault_thread_entry();
     qemu_sem_post(&mis->fault_thread_sem);
-    while (1) {
-        /* TODO: In later patch */
+
+    while (true) {
+        ram_addr_t rb_offset;
+        ram_addr_t in_raspace;
+        struct pollfd pfd[2];
+
+        /*
+         * We're mainly waiting for the kernel to give us a faulting HVA,
+         * however we can be told to quit via userfault_quit_fd which is
+         * an eventfd
+         */
+        pfd[0].fd = mis->userfault_fd;
+        pfd[0].events = POLLIN;
+        pfd[0].revents = 0;
+        pfd[1].fd = mis->userfault_quit_fd;
+        pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */
+        pfd[1].revents = 0;
+
+        if (poll(pfd, 2, -1 /* Wait forever */) == -1) {
+            error_report("%s: userfault poll: %s", __func__, strerror(errno));
+            break;
+        }
+
+        if (pfd[1].revents) {
+            trace_postcopy_ram_fault_thread_quit();
+            break;
+        }
+
+        ret = read(mis->userfault_fd, &msg, sizeof(msg));
+        if (ret != sizeof(msg)) {
+            if (errno == EAGAIN) {
+                /*
+                 * if a wake up happens on the other thread just after
+                 * the poll, there is nothing to read.
+                 */
+                continue;
+            }
+            if (ret < 0) {
+                error_report("%s: Failed to read full userfault message: %s",
+                             __func__, strerror(errno));
+                break;
+            } else {
+                error_report("%s: Read %d bytes from userfaultfd expected %zd",
+                             __func__, ret, sizeof(msg));
+                break; /* Lost alignment, don't know what we'd read next */
+            }
+        }
+        if (msg.event != UFFD_EVENT_PAGEFAULT) {
+            error_report("%s: Read unexpected event %ud from userfaultfd",
+                         __func__, msg.event);
+            continue; /* It's not a page fault, shouldn't happen */
+        }
+
+        rb = qemu_ram_block_from_host(
+                 (void *)(uintptr_t)msg.arg.pagefault.address,
+                 true, &in_raspace, &rb_offset);
+        if (!rb) {
+            error_report("postcopy_ram_fault_thread: Fault outside guest: %"
+                         PRIx64, (uint64_t)msg.arg.pagefault.address);
+            break;
+        }
+
+        rb_offset &= ~(hostpagesize - 1);
+        trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
+                                                qemu_ram_get_idstr(rb),
+                                                rb_offset);
+
+        /*
+         * Send the request to the source - we want to request one
+         * of our host page sizes (which is >= TPS)
+         */
+        if (rb != last_rb) {
+            last_rb = rb;
+            migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb),
+                                     rb_offset, hostpagesize);
+        } else {
+            /* Save some space */
+            migrate_send_rp_req_pages(mis, NULL,
+                                     rb_offset, hostpagesize);
+        }
     }
-
+    trace_postcopy_ram_fault_thread_exit();
     return NULL;
 }

 int postcopy_ram_enable_notify(MigrationIncomingState *mis)
 {
-    /* Create the fault handler thread and wait for it to be ready */
+    /* Open the fd for the kernel to give us userfaults */
+    mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
+    if (mis->userfault_fd == -1) {
+        error_report("%s: Failed to open userfault fd: %s", __func__,
+                     strerror(errno));
+        return -1;
+    }
+
+    /*
+     * Although the host check already tested the API, we need to
+     * do the check again as an ABI handshake on the new fd.
+     */
+    if (!ufd_version_check(mis->userfault_fd)) {
+        return -1;
+    }
+
+    /* Now an eventfd we use to tell the fault-thread to quit */
+    mis->userfault_quit_fd = eventfd(0, EFD_CLOEXEC);
+    if (mis->userfault_quit_fd == -1) {
+        error_report("%s: Opening userfault_quit_fd: %s", __func__,
+                     strerror(errno));
+        close(mis->userfault_fd);
+        return -1;
+    }
+
     qemu_sem_init(&mis->fault_thread_sem, 0);
     qemu_thread_create(&mis->fault_thread, "postcopy/fault",
                        postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE);
     qemu_sem_wait(&mis->fault_thread_sem);
     qemu_sem_destroy(&mis->fault_thread_sem);
+    mis->have_fault_thread = true;

     /* Mark so that we get notified of accesses to unwritten areas */
     if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) {
         return -1;
     }

+    trace_postcopy_ram_enable_notify();
+
     return 0;
 }

diff --git a/trace-events b/trace-events
index ad7afcb..60be299 100644
--- a/trace-events
+++ b/trace-events
@@ -1554,6 +1554,15 @@ postcopy_cleanup_range(const char *ramblock, void *host_addr, size_t offset, siz
 postcopy_init_range(const char *ramblock, void *host_addr, size_t offset, size_t length) "%s: %p offset=%zx length=%zx"
 postcopy_place_page(void *host_addr) "host=%p"
 postcopy_place_page_zero(void *host_addr) "host=%p"
+postcopy_ram_enable_notify(void) ""
+postcopy_ram_fault_thread_entry(void) ""
+postcopy_ram_fault_thread_exit(void) ""
+postcopy_ram_fault_thread_quit(void) ""
+postcopy_ram_fault_thread_request(uint64_t hostaddr, const char *ramblock, size_t offset) "Request for HVA=%" PRIx64 " rb=%s offset=%zx"
+postcopy_ram_incoming_cleanup_closeuf(void) ""
+postcopy_ram_incoming_cleanup_entry(void) ""
+postcopy_ram_incoming_cleanup_exit(void) ""
+postcopy_ram_incoming_cleanup_join(void) ""

 # kvm-all.c
 kvm_ioctl(int type, void *arg) "type 0x%x, arg %p"
-- 
2.5.0

  parent reply	other threads:[~2015-11-09 17:30 UTC|newest]

Thread overview: 75+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-11-09 17:28 [Qemu-devel] [PULL 00/57] Migration pull Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 01/57] Add postcopy documentation Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 02/57] Provide runtime Target page information Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 03/57] Move configuration section writing Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 04/57] Move page_size_init earlier Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 05/57] qemu_ram_block_from_host Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 06/57] qemu_ram_block_by_name Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 07/57] Rename mis->file to from_src_file Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 08/57] Add qemu_get_buffer_in_place to avoid copies some of the time Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 09/57] Add wrapper for setting blocking status on a QEMUFile Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 10/57] Add QEMU_MADV_NOHUGEPAGE Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 11/57] ram_debug_dump_bitmap: Dump a migration bitmap as text Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 12/57] ram_load: Factor out host_from_stream_offset call and check Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 13/57] migrate_init: Call from savevm Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 14/57] Rename save_live_complete to save_live_complete_precopy Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 15/57] Add Linux userfaultfd.h header Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 16/57] Return path: Open a return path on QEMUFile for sockets Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 17/57] Return path: socket_writev_buffer: Block even on non-blocking fd's Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 18/57] Migration commands Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 19/57] Return path: Control commands Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 20/57] Return path: Send responses from destination to source Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 21/57] migration_is_setup_or_active Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 22/57] Return path: Source handling of return path Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 23/57] Rework loadvm path for subloops Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 24/57] Add migration-capability boolean for postcopy-ram Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 25/57] Add wrappers and handlers for sending/receiving the postcopy-ram migration messages Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 26/57] MIG_CMD_PACKAGED: Send a packaged chunk of migration stream Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 27/57] Modify save_live_pending for postcopy Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 28/57] postcopy: OS support test Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 29/57] migrate_start_postcopy: Command to trigger transition to postcopy Juan Quintela
2015-11-09 22:19   ` Eric Blake
2015-11-10 10:38     ` Dr. David Alan Gilbert
2015-11-10 16:36       ` Eric Blake
2015-11-09 17:28 ` [Qemu-devel] [PULL 30/57] migration_completion: Take current state Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 31/57] MIGRATION_STATUS_POSTCOPY_ACTIVE: Add new migration state Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 32/57] Avoid sending vmdescription during postcopy Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 33/57] Add qemu_savevm_state_complete_postcopy Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 34/57] Postcopy: Maintain unsentmap Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 35/57] migration_completion: Take current state Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 36/57] postcopy: Incoming initialisation Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 37/57] postcopy: ram_enable_notify to switch on userfault Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 38/57] Postcopy: Postcopy startup in migration thread Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 39/57] Postcopy: End of iteration Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 40/57] Page request: Add MIG_RP_MSG_REQ_PAGES reverse command Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 41/57] Page request: Process incoming page request Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 42/57] Page request: Consume pages off the post-copy queue Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 43/57] postcopy_ram.c: place_page and helpers Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 44/57] Postcopy: Use helpers to map pages during migration Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 45/57] postcopy: Check order of received target pages Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 46/57] Don't sync dirty bitmaps in postcopy Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 47/57] Don't iterate on precopy-only devices during postcopy Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 48/57] Host page!=target page: Cleanup bitmaps Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 49/57] Round up RAMBlock sizes to host page sizes Juan Quintela
2015-11-09 17:28 ` Juan Quintela [this message]
2015-11-09 17:28 ` [Qemu-devel] [PULL 51/57] Start up a postcopy/listener thread ready for incoming page data Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 52/57] postcopy: Wire up loadvm_postcopy_handle_ commands Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 53/57] Postcopy: Mark nohugepage before discard Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 54/57] End of migration for postcopy Juan Quintela
2015-11-09 17:28 ` [Qemu-devel] [PULL 55/57] Disable mlock around incoming postcopy Juan Quintela
2015-11-09 17:29 ` [Qemu-devel] [PULL 56/57] Inhibit ballooning during postcopy Juan Quintela
2015-11-09 17:29 ` [Qemu-devel] [PULL 57/57] migration: qemu_savevm_state_cleanup becomes mandatory operation Juan Quintela
2015-11-09 17:33 ` [Qemu-devel] [PULL 00/57] Migration pull Peter Maydell
2015-11-09 17:40   ` Juan Quintela
2015-11-09 17:40   ` Dr. David Alan Gilbert
2015-11-09 17:49     ` Peter Maydell
2015-11-09 17:50 ` Peter Maydell
2015-11-09 18:29   ` Juan Quintela
2015-11-09 22:36     ` Eric Blake
2015-11-10 10:53       ` Peter Maydell
2015-11-10 12:18         ` Peter Maydell
2015-11-10 12:22           ` Dr. David Alan Gilbert
2015-11-10 12:43             ` Peter Maydell
2015-11-09 18:49   ` Juan Quintela
2015-11-09 19:02   ` Markus Armbruster
  -- strict thread matches above, loose matches on Subject: below --
2015-11-10 14:24 [Qemu-devel] [PULL 00/57] Migration pull (take 2) Juan Quintela
2015-11-10 14:25 ` [Qemu-devel] [PULL 50/57] Postcopy; Handle userfault requests Juan Quintela

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1447090141-29074-51-git-send-email-quintela@redhat.com \
    --to=quintela@redhat.com \
    --cc=amit.shah@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).