From: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
To: David Hildenbrand <david@redhat.com>
Cc: LKML <linux-kernel@vger.kernel.org>,
Linux MM <linux-mm@kvack.org>,
virtualization@lists.linux-foundation.org,
Andrew Morton <akpm@linux-foundation.org>,
"Michael S . Tsirkin" <mst@redhat.com>,
Jason Wang <jasowang@redhat.com>
Subject: Re: [PATCH v1 17/29] virito-mem: subblock states are specific to Sub Block Mode (SBM)
Date: Tue, 20 Oct 2020 11:54:36 +0200 [thread overview]
Message-ID: <CAM9Jb+gz6CpDWsjOddH9JTMBGutkoZdfjxiQRgrd4SrDtLUT1g@mail.gmail.com> (raw)
In-Reply-To: <20201012125323.17509-18-david@redhat.com>
> Let's rename and move accordingly. While at it, rename sb_bitmap to
> "sb_states".
>
> Cc: "Michael S. Tsirkin" <mst@redhat.com>
> Cc: Jason Wang <jasowang@redhat.com>
> Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
> drivers/virtio/virtio_mem.c | 118 +++++++++++++++++++-----------------
> 1 file changed, 62 insertions(+), 56 deletions(-)
>
> diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
> index e76d6f769aa5..2cc497ad8298 100644
> --- a/drivers/virtio/virtio_mem.c
> +++ b/drivers/virtio/virtio_mem.c
> @@ -137,17 +137,23 @@ struct virtio_mem {
> * memory in one 4 KiB page.
> */
> uint8_t *mb_states;
> - } sbm;
>
> - /*
> - * $nb_sb_per_mb bit per memory block. Handled similar to sbm.mb_states.
> - *
> - * With 4MB subblocks, we manage 128GB of memory in one page.
> - */
> - unsigned long *sb_bitmap;
> + /*
> + * Bitmap: one bit per subblock. Allocated similar to
> + * sbm.mb_states.
> + *
> + * A set bit means the corresponding subblock is plugged,
> + * otherwise it's unblocked.
> + *
> + * With 4 MiB subblocks, we manage 128 GiB of memory in one
> + * 4 KiB page.
> + */
> + unsigned long *sb_states;
> + } sbm;
>
> /*
> - * Mutex that protects the sbm.mb_count, sbm.mb_states, and sb_bitmap.
> + * Mutex that protects the sbm.mb_count, sbm.mb_states, and
> + * sbm.sb_states.
> *
> * When this lock is held the pointers can't change, ONLINE and
> * OFFLINE blocks can't change the state and no subblocks will get
> @@ -326,13 +332,13 @@ static int virtio_mem_sbm_mb_states_prepare_next_mb(struct virtio_mem *vm)
> *
> * Will not modify the state of the memory block.
> */
> -static void virtio_mem_mb_set_sb_plugged(struct virtio_mem *vm,
> - unsigned long mb_id, int sb_id,
> - int count)
> +static void virtio_mem_sbm_set_sb_plugged(struct virtio_mem *vm,
> + unsigned long mb_id, int sb_id,
> + int count)
> {
> const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
>
> - __bitmap_set(vm->sb_bitmap, bit, count);
> + __bitmap_set(vm->sbm.sb_states, bit, count);
> }
>
> /*
> @@ -340,86 +346,87 @@ static void virtio_mem_mb_set_sb_plugged(struct virtio_mem *vm,
> *
> * Will not modify the state of the memory block.
> */
> -static void virtio_mem_mb_set_sb_unplugged(struct virtio_mem *vm,
> - unsigned long mb_id, int sb_id,
> - int count)
> +static void virtio_mem_sbm_set_sb_unplugged(struct virtio_mem *vm,
> + unsigned long mb_id, int sb_id,
> + int count)
> {
> const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
>
> - __bitmap_clear(vm->sb_bitmap, bit, count);
> + __bitmap_clear(vm->sbm.sb_states, bit, count);
> }
>
> /*
> * Test if all selected subblocks are plugged.
> */
> -static bool virtio_mem_mb_test_sb_plugged(struct virtio_mem *vm,
> - unsigned long mb_id, int sb_id,
> - int count)
> +static bool virtio_mem_sbm_test_sb_plugged(struct virtio_mem *vm,
> + unsigned long mb_id, int sb_id,
> + int count)
> {
> const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
>
> if (count == 1)
> - return test_bit(bit, vm->sb_bitmap);
> + return test_bit(bit, vm->sbm.sb_states);
>
> /* TODO: Helper similar to bitmap_set() */
> - return find_next_zero_bit(vm->sb_bitmap, bit + count, bit) >=
> + return find_next_zero_bit(vm->sbm.sb_states, bit + count, bit) >=
> bit + count;
> }
>
> /*
> * Test if all selected subblocks are unplugged.
> */
> -static bool virtio_mem_mb_test_sb_unplugged(struct virtio_mem *vm,
> - unsigned long mb_id, int sb_id,
> - int count)
> +static bool virtio_mem_sbm_test_sb_unplugged(struct virtio_mem *vm,
> + unsigned long mb_id, int sb_id,
> + int count)
> {
> const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
>
> /* TODO: Helper similar to bitmap_set() */
> - return find_next_bit(vm->sb_bitmap, bit + count, bit) >= bit + count;
> + return find_next_bit(vm->sbm.sb_states, bit + count, bit) >=
> + bit + count;
> }
>
> /*
> * Find the first unplugged subblock. Returns vm->nb_sb_per_mb in case there is
> * none.
> */
> -static int virtio_mem_mb_first_unplugged_sb(struct virtio_mem *vm,
> +static int virtio_mem_sbm_first_unplugged_sb(struct virtio_mem *vm,
> unsigned long mb_id)
> {
> const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb;
>
> - return find_next_zero_bit(vm->sb_bitmap, bit + vm->nb_sb_per_mb, bit) -
> - bit;
> + return find_next_zero_bit(vm->sbm.sb_states,
> + bit + vm->nb_sb_per_mb, bit) - bit;
> }
>
> /*
> * Prepare the subblock bitmap for the next memory block.
> */
> -static int virtio_mem_sb_bitmap_prepare_next_mb(struct virtio_mem *vm)
> +static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm)
> {
> const unsigned long old_nb_mb = vm->next_mb_id - vm->first_mb_id;
> const unsigned long old_nb_bits = old_nb_mb * vm->nb_sb_per_mb;
> const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->nb_sb_per_mb;
> int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long));
> int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long));
> - unsigned long *new_sb_bitmap, *old_sb_bitmap;
> + unsigned long *new_bitmap, *old_bitmap;
>
> - if (vm->sb_bitmap && old_pages == new_pages)
> + if (vm->sbm.sb_states && old_pages == new_pages)
> return 0;
>
> - new_sb_bitmap = vzalloc(new_pages * PAGE_SIZE);
> - if (!new_sb_bitmap)
> + new_bitmap = vzalloc(new_pages * PAGE_SIZE);
> + if (!new_bitmap)
> return -ENOMEM;
>
> mutex_lock(&vm->hotplug_mutex);
> - if (new_sb_bitmap)
> - memcpy(new_sb_bitmap, vm->sb_bitmap, old_pages * PAGE_SIZE);
> + if (new_bitmap)
> + memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE);
>
> - old_sb_bitmap = vm->sb_bitmap;
> - vm->sb_bitmap = new_sb_bitmap;
> + old_bitmap = vm->sbm.sb_states;
> + vm->sbm.sb_states = new_bitmap;
> mutex_unlock(&vm->hotplug_mutex);
>
> - vfree(old_sb_bitmap);
> + vfree(old_bitmap);
> return 0;
> }
>
> @@ -630,7 +637,7 @@ static void virtio_mem_notify_going_offline(struct virtio_mem *vm,
> int sb_id;
>
> for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
> - if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
> + if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
> continue;
> pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
> sb_id * vm->subblock_size);
> @@ -646,7 +653,7 @@ static void virtio_mem_notify_cancel_offline(struct virtio_mem *vm,
> int sb_id;
>
> for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
> - if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
> + if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
> continue;
> pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
> sb_id * vm->subblock_size);
> @@ -936,7 +943,7 @@ static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
> * If plugged, online the pages, otherwise, set them fake
> * offline (PageOffline).
> */
> - if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
> + if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
> generic_online_page(page, order);
> else
> virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
> @@ -1071,7 +1078,7 @@ static int virtio_mem_mb_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
>
> rc = virtio_mem_send_plug_request(vm, addr, size);
> if (!rc)
> - virtio_mem_mb_set_sb_plugged(vm, mb_id, sb_id, count);
> + virtio_mem_sbm_set_sb_plugged(vm, mb_id, sb_id, count);
> return rc;
> }
>
> @@ -1092,7 +1099,7 @@ static int virtio_mem_mb_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
>
> rc = virtio_mem_send_unplug_request(vm, addr, size);
> if (!rc)
> - virtio_mem_mb_set_sb_unplugged(vm, mb_id, sb_id, count);
> + virtio_mem_sbm_set_sb_unplugged(vm, mb_id, sb_id, count);
> return rc;
> }
>
> @@ -1115,14 +1122,14 @@ static int virtio_mem_mb_unplug_any_sb(struct virtio_mem *vm,
> while (*nb_sb) {
> /* Find the next candidate subblock */
> while (sb_id >= 0 &&
> - virtio_mem_mb_test_sb_unplugged(vm, mb_id, sb_id, 1))
> + virtio_mem_sbm_test_sb_unplugged(vm, mb_id, sb_id, 1))
> sb_id--;
> if (sb_id < 0)
> break;
> /* Try to unplug multiple subblocks at a time */
> count = 1;
> while (count < *nb_sb && sb_id > 0 &&
> - virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
> + virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
> count++;
> sb_id--;
> }
> @@ -1168,7 +1175,7 @@ static int virtio_mem_prepare_next_mb(struct virtio_mem *vm,
> return rc;
>
> /* Resize the subblock bitmap if required. */
> - rc = virtio_mem_sb_bitmap_prepare_next_mb(vm);
> + rc = virtio_mem_sbm_sb_states_prepare_next_mb(vm);
> if (rc)
> return rc;
>
> @@ -1253,14 +1260,13 @@ static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
> return -EINVAL;
>
> while (*nb_sb) {
> - sb_id = virtio_mem_mb_first_unplugged_sb(vm, mb_id);
> + sb_id = virtio_mem_sbm_first_unplugged_sb(vm, mb_id);
> if (sb_id >= vm->nb_sb_per_mb)
> break;
> count = 1;
> while (count < *nb_sb &&
> sb_id + count < vm->nb_sb_per_mb &&
> - !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id + count,
> - 1))
> + !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id + count, 1))
> count++;
>
> rc = virtio_mem_mb_plug_sb(vm, mb_id, sb_id, count);
> @@ -1277,7 +1283,7 @@ static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
> virtio_mem_fake_online(pfn, nr_pages);
> }
>
> - if (virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
> + if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
> if (online)
> virtio_mem_sbm_set_mb_state(vm, mb_id,
> VIRTIO_MEM_SBM_MB_ONLINE);
> @@ -1377,13 +1383,13 @@ static int virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem *vm,
> rc = virtio_mem_mb_unplug_any_sb(vm, mb_id, nb_sb);
>
> /* some subblocks might have been unplugged even on failure */
> - if (!virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb))
> + if (!virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb))
> virtio_mem_sbm_set_mb_state(vm, mb_id,
> VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
> if (rc)
> return rc;
>
> - if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
> + if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
> /*
> * Remove the block from Linux - this should never fail.
> * Hinder the block from getting onlined by marking it
> @@ -1452,7 +1458,7 @@ static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm,
>
> /* If possible, try to unplug the complete block in one shot. */
> if (*nb_sb >= vm->nb_sb_per_mb &&
> - virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
> + virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
> rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, 0,
> vm->nb_sb_per_mb);
> if (!rc) {
> @@ -1466,7 +1472,7 @@ static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm,
> for (sb_id = vm->nb_sb_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
> /* Find the next candidate subblock */
> while (sb_id >= 0 &&
> - !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
> + !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
> sb_id--;
> if (sb_id < 0)
> break;
> @@ -1485,7 +1491,7 @@ static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm,
> * remove it. This will usually not fail, as no memory is in use
> * anymore - however some other notifiers might NACK the request.
> */
> - if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
> + if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
> mutex_unlock(&vm->hotplug_mutex);
> rc = virtio_mem_mb_offline_and_remove(vm, mb_id);
> mutex_lock(&vm->hotplug_mutex);
> @@ -2007,7 +2013,7 @@ static void virtio_mem_remove(struct virtio_device *vdev)
>
> /* remove all tracking data - no locking needed */
> vfree(vm->sbm.mb_states);
> - vfree(vm->sb_bitmap);
> + vfree(vm->sbm.sb_states);
>
> /* reset the device and cleanup the queues */
> vdev->config->reset(vdev);
Reviewed-by: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
next prev parent reply other threads:[~2020-10-20 9:54 UTC|newest]
Thread overview: 109+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-10-12 12:52 [PATCH v1 00/29] virtio-mem: Big Block Mode (BBM) David Hildenbrand
2020-10-12 12:52 ` [PATCH v1 01/29] virtio-mem: determine nid only once using memory_add_physaddr_to_nid() David Hildenbrand
2020-10-15 3:56 ` Wei Yang
2020-10-15 19:26 ` Pankaj Gupta
2020-10-12 12:52 ` [PATCH v1 02/29] virtio-mem: simplify calculation in virtio_mem_mb_state_prepare_next_mb() David Hildenbrand
2020-10-15 4:02 ` Wei Yang
2020-10-15 8:00 ` David Hildenbrand
2020-10-15 10:00 ` Wei Yang
2020-10-15 10:01 ` David Hildenbrand
2020-10-15 20:24 ` Pankaj Gupta
2020-10-16 9:00 ` David Hildenbrand
2020-10-12 12:52 ` [PATCH v1 03/29] virtio-mem: simplify MAX_ORDER - 1 / pageblock_order handling David Hildenbrand
2020-10-15 7:06 ` Wei Yang
2020-10-12 12:52 ` [PATCH v1 04/29] virtio-mem: drop rc2 in virtio_mem_mb_plug_and_add() David Hildenbrand
2020-10-12 13:09 ` Pankaj Gupta
2020-10-15 7:14 ` Wei Yang
2020-10-12 12:52 ` [PATCH v1 05/29] virtio-mem: generalize check for added memory David Hildenbrand
2020-10-15 8:28 ` Wei Yang
2020-10-15 8:50 ` David Hildenbrand
2020-10-16 2:16 ` Wei Yang
2020-10-16 9:11 ` David Hildenbrand
2020-10-16 10:02 ` Wei Yang
2020-10-16 10:32 ` David Hildenbrand
2020-10-16 22:38 ` Wei Yang
2020-10-17 7:39 ` David Hildenbrand
2020-10-18 12:27 ` Wei Yang
2020-10-16 22:39 ` Wei Yang
2020-10-12 12:53 ` [PATCH v1 06/29] virtio-mem: generalize virtio_mem_owned_mb() David Hildenbrand
2020-10-15 8:32 ` Wei Yang
2020-10-15 8:37 ` David Hildenbrand
2020-10-15 20:30 ` Pankaj Gupta
2020-10-12 12:53 ` [PATCH v1 07/29] virtio-mem: generalize virtio_mem_overlaps_range() David Hildenbrand
2020-10-20 9:22 ` Pankaj Gupta
2020-10-12 12:53 ` [PATCH v1 08/29] virtio-mem: drop last_mb_id David Hildenbrand
2020-10-15 8:35 ` Wei Yang
2020-10-15 20:32 ` Pankaj Gupta
2020-10-12 12:53 ` [PATCH v1 09/29] virtio-mem: don't always trigger the workqueue when offlining memory David Hildenbrand
2020-10-16 4:03 ` Wei Yang
2020-10-16 9:18 ` David Hildenbrand
2020-10-18 3:57 ` Wei Yang
2020-10-19 9:04 ` David Hildenbrand
2020-10-20 0:41 ` Wei Yang
2020-10-20 9:09 ` David Hildenbrand
2020-10-12 12:53 ` [PATCH v1 10/29] virtio-mem: generalize handling when memory is getting onlined deferred David Hildenbrand
2020-10-12 12:53 ` [PATCH v1 11/29] virtio-mem: use "unsigned long" for nr_pages when fake onlining/offlining David Hildenbrand
2020-10-15 20:31 ` Pankaj Gupta
2020-10-16 6:11 ` Wei Yang
2020-10-12 12:53 ` [PATCH v1 12/29] virtio-mem: factor out fake-offlining into virtio_mem_fake_offline() David Hildenbrand
2020-10-16 6:24 ` Wei Yang
2020-10-20 9:31 ` Pankaj Gupta
2020-10-12 12:53 ` [PATCH v1 13/29] virtio-mem: factor out handling of fake-offline pages in memory notifier David Hildenbrand
2020-10-16 7:15 ` Wei Yang
2020-10-16 8:00 ` Wei Yang
2020-10-16 8:57 ` David Hildenbrand
2020-10-18 12:37 ` Wei Yang
2020-10-18 12:38 ` Wei Yang
2020-10-12 12:53 ` [PATCH v1 14/29] virtio-mem: retry fake-offlining via alloc_contig_range() on ZONE_MOVABLE David Hildenbrand
2020-10-12 12:53 ` [PATCH v1 15/29] virito-mem: document Sub Block Mode (SBM) David Hildenbrand
2020-10-15 9:33 ` David Hildenbrand
2020-10-20 9:38 ` Pankaj Gupta
2020-10-16 8:03 ` Wei Yang
2020-10-12 12:53 ` [PATCH v1 16/29] virtio-mem: memory block states are specific to " David Hildenbrand
2020-10-16 8:40 ` Wei Yang
2020-10-16 8:43 ` Wei Yang
2020-10-20 9:48 ` Pankaj Gupta
2020-10-12 12:53 ` [PATCH v1 17/29] virito-mem: subblock " David Hildenbrand
2020-10-16 8:43 ` Wei Yang
2020-10-20 9:54 ` Pankaj Gupta [this message]
2020-10-12 12:53 ` [PATCH v1 18/29] virtio-mem: factor out calculation of the bit number within the sb_states bitmap David Hildenbrand
2020-10-16 8:46 ` Wei Yang
2020-10-20 9:58 ` Pankaj Gupta
2020-10-12 12:53 ` [PATCH v1 19/29] virito-mem: existing (un)plug functions are specific to Sub Block Mode (SBM) David Hildenbrand
2020-10-16 8:49 ` Wei Yang
2020-10-12 12:53 ` [PATCH v1 20/29] virtio-mem: nb_sb_per_mb and subblock_size " David Hildenbrand
2020-10-16 8:51 ` Wei Yang
2020-10-16 8:53 ` Wei Yang
2020-10-16 13:17 ` David Hildenbrand
2020-10-18 12:41 ` Wei Yang
2020-10-19 11:57 ` David Hildenbrand
2020-10-12 12:53 ` [PATCH v1 21/29] virtio-mem: memory notifier callbacks " David Hildenbrand
2020-10-19 1:57 ` Wei Yang
2020-10-19 10:22 ` David Hildenbrand
2020-10-12 12:53 ` [PATCH v1 22/29] virtio-mem: memory block ids " David Hildenbrand
2020-10-16 8:54 ` Wei Yang
2020-10-12 12:53 ` [PATCH v1 23/29] virtio-mem: factor out adding/removing memory from Linux David Hildenbrand
2020-10-16 8:59 ` Wei Yang
2020-10-12 12:53 ` [PATCH v1 24/29] virtio-mem: print debug messages from virtio_mem_send_*_request() David Hildenbrand
2020-10-16 9:07 ` Wei Yang
2020-10-12 12:53 ` [PATCH v1 25/29] virtio-mem: Big Block Mode (BBM) memory hotplug David Hildenbrand
2020-10-16 9:38 ` Wei Yang
2020-10-16 13:13 ` David Hildenbrand
2020-10-19 2:26 ` Wei Yang
2020-10-19 9:15 ` David Hildenbrand
2020-10-12 12:53 ` [PATCH v1 26/29] virtio-mem: allow to force Big Block Mode (BBM) and set the big block size David Hildenbrand
2020-10-12 12:53 ` [PATCH v1 27/29] mm/memory_hotplug: extend offline_and_remove_memory() to handle more than one memory block David Hildenbrand
2020-10-15 13:08 ` Michael S. Tsirkin
2020-10-19 3:22 ` Wei Yang
2020-10-12 12:53 ` [PATCH v1 28/29] virtio-mem: Big Block Mode (BBM) - basic memory hotunplug David Hildenbrand
2020-10-19 3:48 ` Wei Yang
2020-10-19 9:12 ` David Hildenbrand
2020-10-12 12:53 ` [PATCH v1 29/29] virtio-mem: Big Block Mode (BBM) - safe " David Hildenbrand
2020-10-19 7:54 ` Wei Yang
2020-10-19 8:50 ` David Hildenbrand
2020-10-20 0:23 ` Wei Yang
2020-10-20 0:24 ` Wei Yang
2020-10-18 12:49 ` [PATCH v1 00/29] virtio-mem: Big Block Mode (BBM) Wei Yang
2020-10-18 16:35 ` David Hildenbrand
2020-10-18 15:29 ` Michael S. Tsirkin
2020-10-18 16:34 ` David Hildenbrand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=CAM9Jb+gz6CpDWsjOddH9JTMBGutkoZdfjxiQRgrd4SrDtLUT1g@mail.gmail.com \
--to=pankaj.gupta.linux@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=david@redhat.com \
--cc=jasowang@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mst@redhat.com \
--cc=virtualization@lists.linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).