From: Ackerley Tng <ackerleytng@google.com>
To: pbonzini@redhat.com, seanjc@google.com, tglx@linutronix.de,
x86@kernel.org, kvm@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-kselftest@vger.kernel.org
Cc: mingo@redhat.com, bp@alien8.de, dave.hansen@linux.intel.com,
hpa@zytor.com, shuah@kernel.org, andrew.jones@linux.dev,
ricarkol@google.com, chao.p.peng@linux.intel.com,
tabba@google.com, jarkko@kernel.org, yu.c.zhang@linux.intel.com,
vannapurve@google.com, ackerleytng@google.com,
erdemaktas@google.com, mail@maciej.szmigiero.name,
vbabka@suse.cz, david@redhat.com, qperret@google.com,
michael.roth@amd.com, wei.w.wang@intel.com,
liam.merwick@oracle.com, isaku.yamahata@gmail.com,
kirill.shutemov@linux.intel.com, Sagi Shahar <sagis@google.com>
Subject: [RFC PATCH 09/11] KVM: x86: Handle moving of memory context for intra-host migration
Date: Mon, 7 Aug 2023 23:01:12 +0000 [thread overview]
Message-ID: <a3d025e75df68558dcd8a12656772c27c4a36f97.1691446946.git.ackerleytng@google.com> (raw)
In-Reply-To: <cover.1691446946.git.ackerleytng@google.com>
Migration of memory context involves moving lpage_info and
mem_attr_array from source to destination VM.
Co-developed-by: Sagi Shahar <sagis@google.com>
Signed-off-by: Sagi Shahar <sagis@google.com>
Co-developed-by: Vishal Annapurve <vannapurve@google.com>
Signed-off-by: Vishal Annapurve <vannapurve@google.com>
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
---
arch/x86/kvm/x86.c | 110 +++++++++++++++++++++++++++++++++++++++
include/linux/kvm_host.h | 17 ++++++
virt/kvm/guest_mem.c | 25 +++++++++
3 files changed, 152 insertions(+)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a1a28dd77b94..12688754c556 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4402,6 +4402,33 @@ void kvm_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
}
EXPORT_SYMBOL_GPL(kvm_unlock_two_vms);
+static int kvm_lock_vm_memslots(struct kvm *dst_kvm, struct kvm *src_kvm)
+{
+ int r = -EINVAL;
+
+ if (dst_kvm == src_kvm)
+ return r;
+
+ r = -EINTR;
+ if (mutex_lock_killable(&dst_kvm->slots_lock))
+ return r;
+
+ if (mutex_lock_killable_nested(&src_kvm->slots_lock, SINGLE_DEPTH_NESTING))
+ goto unlock_dst;
+
+ return 0;
+
+unlock_dst:
+ mutex_unlock(&dst_kvm->slots_lock);
+ return r;
+}
+
+static void kvm_unlock_vm_memslots(struct kvm *dst_kvm, struct kvm *src_kvm)
+{
+ mutex_unlock(&src_kvm->slots_lock);
+ mutex_unlock(&dst_kvm->slots_lock);
+}
+
/*
* Read or write a bunch of msrs. All parameters are kernel addresses.
*
@@ -6325,6 +6352,78 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
return 0;
}
+static bool memslot_configurations_match(struct kvm_memslots *src_slots,
+ struct kvm_memslots *dst_slots)
+{
+ struct kvm_memslot_iter src_iter;
+ struct kvm_memslot_iter dst_iter;
+
+ kvm_for_each_memslot_pair(&src_iter, src_slots, &dst_iter, dst_slots) {
+ if (src_iter.slot->base_gfn != dst_iter.slot->base_gfn ||
+ src_iter.slot->npages != dst_iter.slot->npages ||
+ src_iter.slot->flags != dst_iter.slot->flags)
+ return false;
+
+ if (kvm_slot_can_be_private(dst_iter.slot) &&
+ !kvm_gmem_params_match(src_iter.slot, dst_iter.slot))
+ return false;
+ }
+
+ /* There should be no more nodes to iterate if configurations match */
+ return !src_iter.node && !dst_iter.node;
+}
+
+static int kvm_move_memory_ctxt_from(struct kvm *dst, struct kvm *src)
+{
+ struct kvm_memslot_iter src_iter;
+ struct kvm_memslot_iter dst_iter;
+ struct kvm_memslots *src_slots, *dst_slots;
+ int i;
+
+ /* TODO: Do we also need to check consistency for as_id == SMM? */
+ src_slots = __kvm_memslots(src, 0);
+ dst_slots = __kvm_memslots(dst, 0);
+
+ if (!memslot_configurations_match(src_slots, dst_slots))
+ return -EINVAL;
+
+ /*
+ * Transferring lpage_info is an optimization, lpage_info can be rebuilt
+ * by the destination VM.
+ */
+ kvm_for_each_memslot_pair(&src_iter, src_slots, &dst_iter, dst_slots) {
+ for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
+ unsigned long ugfn = dst_iter.slot->userspace_addr >> PAGE_SHIFT;
+ int level = i + 1;
+
+ /*
+ * If the gfn and userspace address are not aligned wrt each
+ * other, skip migrating lpage_info.
+ */
+ if ((dst_iter.slot->base_gfn ^ ugfn) &
+ (KVM_PAGES_PER_HPAGE(level) - 1))
+ continue;
+
+ kvfree(dst_iter.slot->arch.lpage_info[i - 1]);
+ dst_iter.slot->arch.lpage_info[i - 1] =
+ src_iter.slot->arch.lpage_info[i - 1];
+ src_iter.slot->arch.lpage_info[i - 1] = NULL;
+ }
+ }
+
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+ /*
+ * For VMs that don't use private memory, this will just be moving an
+ * empty xarray pointer.
+ */
+ dst->mem_attr_array.xa_head = src->mem_attr_array.xa_head;
+ src->mem_attr_array.xa_head = NULL;
+#endif
+
+ kvm_vm_dead(src);
+ return 0;
+}
+
static int kvm_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
{
int r;
@@ -6351,6 +6450,14 @@ static int kvm_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
if (r)
goto out_mark_migration_done;
+ r = kvm_lock_vm_memslots(kvm, source_kvm);
+ if (r)
+ goto out_unlock;
+
+ r = kvm_move_memory_ctxt_from(kvm, source_kvm);
+ if (r)
+ goto out_unlock_memslots;
+
/*
* Different types of VMs will allow userspace to define if moving
* encryption context should be supported.
@@ -6360,6 +6467,9 @@ static int kvm_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
r = static_call(kvm_x86_vm_move_enc_context_from)(kvm, source_kvm);
}
+out_unlock_memslots:
+ kvm_unlock_vm_memslots(kvm, source_kvm);
+out_unlock:
kvm_unlock_two_vms(kvm, source_kvm);
out_mark_migration_done:
kvm_mark_migration_done(kvm, source_kvm);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 3e03eeca279f..2f44b5d294a8 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1144,6 +1144,15 @@ static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_
kvm_memslot_iter_is_valid(iter, end); \
kvm_memslot_iter_next(iter))
+/* Iterate over a pair of memslots in gfn order until one of the trees end */
+#define kvm_for_each_memslot_pair(iter1, slots1, iter2, slots2) \
+ for (kvm_memslot_iter_start(iter1, slots1, 0), \
+ kvm_memslot_iter_start(iter2, slots2, 0); \
+ kvm_memslot_iter_is_valid(iter1, U64_MAX) && \
+ kvm_memslot_iter_is_valid(iter2, U64_MAX); \
+ kvm_memslot_iter_next(iter1), \
+ kvm_memslot_iter_next(iter2))
+
/*
* KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
* - create a new memory slot
@@ -2359,6 +2368,8 @@ static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
#ifdef CONFIG_KVM_PRIVATE_MEM
int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
gfn_t gfn, kvm_pfn_t *pfn, int *max_order);
+bool kvm_gmem_params_match(struct kvm_memory_slot *slot1,
+ struct kvm_memory_slot *slot2);
#else
static inline int kvm_gmem_get_pfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn,
@@ -2367,6 +2378,12 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm,
KVM_BUG_ON(1, kvm);
return -EIO;
}
+
+static inline bool kvm_gmem_params_match(struct kvm_memory_slot *slot1,
+ struct kvm_memory_slot *slot2)
+{
+ return false;
+}
#endif /* CONFIG_KVM_PRIVATE_MEM */
#endif
diff --git a/virt/kvm/guest_mem.c b/virt/kvm/guest_mem.c
index 1b3df273f785..2f84e5c67942 100644
--- a/virt/kvm/guest_mem.c
+++ b/virt/kvm/guest_mem.c
@@ -686,6 +686,31 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
}
EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn);
+bool kvm_gmem_params_match(struct kvm_memory_slot *slot1,
+ struct kvm_memory_slot *slot2)
+{
+ bool ret;
+ struct file *file1;
+ struct file *file2;
+
+ if (slot1->gmem.pgoff != slot2->gmem.pgoff)
+ return false;
+
+ file1 = kvm_gmem_get_file(slot1);
+ file2 = kvm_gmem_get_file(slot2);
+
+ ret = (file1 && file2 &&
+ file_inode(file1) == file_inode(file2));
+
+ if (file1)
+ fput(file1);
+ if (file2)
+ fput(file2);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kvm_gmem_params_match);
+
static int kvm_gmem_init_fs_context(struct fs_context *fc)
{
if (!init_pseudo(fc, GUEST_MEMORY_MAGIC))
--
2.41.0.640.ga95def55d0-goog
next prev parent reply other threads:[~2023-08-07 23:02 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-07 23:01 [RFC PATCH 00/11] New KVM ioctl to link a gmem inode to a new gmem file Ackerley Tng
2023-08-07 23:01 ` [RFC PATCH 01/11] KVM: guest_mem: Refactor out kvm_gmem_alloc_file() Ackerley Tng
2023-08-07 23:01 ` [RFC PATCH 02/11] KVM: guest_mem: Add ioctl KVM_LINK_GUEST_MEMFD Ackerley Tng
2023-08-18 23:20 ` Sean Christopherson
2025-05-06 16:05 ` Ackerley Tng
2023-08-07 23:01 ` [RFC PATCH 03/11] KVM: selftests: Add tests for KVM_LINK_GUEST_MEMFD ioctl Ackerley Tng
2023-08-07 23:01 ` [RFC PATCH 04/11] KVM: selftests: Test transferring private memory to another VM Ackerley Tng
2023-08-07 23:01 ` [RFC PATCH 05/11] KVM: x86: Refactor sev's flag migration_in_progress to kvm struct Ackerley Tng
2023-08-07 23:01 ` [RFC PATCH 06/11] KVM: x86: Refactor common code out of sev.c Ackerley Tng
2023-08-07 23:01 ` [RFC PATCH 07/11] KVM: x86: Refactor common migration preparation code out of sev_vm_move_enc_context_from Ackerley Tng
2023-08-07 23:01 ` [RFC PATCH 08/11] KVM: x86: Let moving encryption context be configurable Ackerley Tng
2023-08-10 14:03 ` Paolo Bonzini
2023-08-17 16:53 ` Ackerley Tng
2023-08-07 23:01 ` Ackerley Tng [this message]
2023-08-07 23:01 ` [RFC PATCH 10/11] KVM: selftests: Generalize migration functions from sev_migrate_tests.c Ackerley Tng
2023-08-07 23:01 ` [RFC PATCH 11/11] KVM: selftests: Add tests for migration of private mem Ackerley Tng
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=a3d025e75df68558dcd8a12656772c27c4a36f97.1691446946.git.ackerleytng@google.com \
--to=ackerleytng@google.com \
--cc=andrew.jones@linux.dev \
--cc=bp@alien8.de \
--cc=chao.p.peng@linux.intel.com \
--cc=dave.hansen@linux.intel.com \
--cc=david@redhat.com \
--cc=erdemaktas@google.com \
--cc=hpa@zytor.com \
--cc=isaku.yamahata@gmail.com \
--cc=jarkko@kernel.org \
--cc=kirill.shutemov@linux.intel.com \
--cc=kvm@vger.kernel.org \
--cc=liam.merwick@oracle.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=mail@maciej.szmigiero.name \
--cc=michael.roth@amd.com \
--cc=mingo@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qperret@google.com \
--cc=ricarkol@google.com \
--cc=sagis@google.com \
--cc=seanjc@google.com \
--cc=shuah@kernel.org \
--cc=tabba@google.com \
--cc=tglx@linutronix.de \
--cc=vannapurve@google.com \
--cc=vbabka@suse.cz \
--cc=wei.w.wang@intel.com \
--cc=x86@kernel.org \
--cc=yu.c.zhang@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox