* [PATCH 0/2] KVM: fix SMM with ept=1/unrestricted_guest=0
@ 2015-10-12 12:09 Paolo Bonzini
2015-10-12 12:09 ` [PATCH 1/2] KVM: x86: build kvm_userspace_memory_region in x86_set_memory_region Paolo Bonzini
2015-10-12 12:09 ` [PATCH 2/2] KVM: x86: map/unmap private slots in __x86_set_memory_region Paolo Bonzini
0 siblings, 2 replies; 7+ messages in thread
From: Paolo Bonzini @ 2015-10-12 12:09 UTC (permalink / raw)
To: linux-kernel, kvm; +Cc: aderumier, rkrcmar
The fix is explained in the commit message for patch 2. A little
bit of refactoring is needed.
Paolo Bonzini (2):
KVM: x86: build kvm_userspace_memory_region in x86_set_memory_region
KVM: x86: map/unmap private slots in __x86_set_memory_region
arch/x86/include/asm/kvm_host.h | 6 +--
arch/x86/kvm/vmx.c | 26 +++---------
arch/x86/kvm/x86.c | 93 +++++++++++++++++++----------------------
3 files changed, 51 insertions(+), 74 deletions(-)
--
2.5.0
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 1/2] KVM: x86: build kvm_userspace_memory_region in x86_set_memory_region
2015-10-12 12:09 [PATCH 0/2] KVM: fix SMM with ept=1/unrestricted_guest=0 Paolo Bonzini
@ 2015-10-12 12:09 ` Paolo Bonzini
2015-10-13 15:03 ` Radim Krčmář
2015-10-12 12:09 ` [PATCH 2/2] KVM: x86: map/unmap private slots in __x86_set_memory_region Paolo Bonzini
1 sibling, 1 reply; 7+ messages in thread
From: Paolo Bonzini @ 2015-10-12 12:09 UTC (permalink / raw)
To: linux-kernel, kvm; +Cc: aderumier, rkrcmar, stable
The next patch will make x86_set_memory_region fill the
userspace_addr. Since the struct is not used untouched
anymore, it makes sense to build it in x86_set_memory_region
directly; it also simplifies the callers.
Reported-by: Alexandre DERUMIER <aderumier@odiso.com>
Cc: stable@vger.kernel.org
Fixes: 9da0e4d5ac969909f6b435ce28ea28135a9cbd69
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
arch/x86/include/asm/kvm_host.h | 6 ++----
arch/x86/kvm/vmx.c | 26 ++++++--------------------
arch/x86/kvm/x86.c | 31 +++++++++++++------------------
3 files changed, 21 insertions(+), 42 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 49ec9038ec14..4e7ad7e022b0 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1199,9 +1199,7 @@ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
int kvm_is_in_guest(void);
-int __x86_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
-int x86_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
+int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
+int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 83b7b5cd75d5..562e2013234d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4105,17 +4105,13 @@ static void seg_setup(int seg)
static int alloc_apic_access_page(struct kvm *kvm)
{
struct page *page;
- struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
mutex_lock(&kvm->slots_lock);
if (kvm->arch.apic_access_page_done)
goto out;
- kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
- kvm_userspace_mem.flags = 0;
- kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE;
- kvm_userspace_mem.memory_size = PAGE_SIZE;
- r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
+ r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
+ APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
if (r)
goto out;
@@ -4140,17 +4136,12 @@ static int alloc_identity_pagetable(struct kvm *kvm)
{
/* Called with kvm->slots_lock held. */
- struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
BUG_ON(kvm->arch.ept_identity_pagetable_done);
- kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
- kvm_userspace_mem.flags = 0;
- kvm_userspace_mem.guest_phys_addr =
- kvm->arch.ept_identity_map_addr;
- kvm_userspace_mem.memory_size = PAGE_SIZE;
- r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
+ r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
+ kvm->arch.ept_identity_map_addr, PAGE_SIZE);
return r;
}
@@ -4949,14 +4940,9 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
{
int ret;
- struct kvm_userspace_memory_region tss_mem = {
- .slot = TSS_PRIVATE_MEMSLOT,
- .guest_phys_addr = addr,
- .memory_size = PAGE_SIZE * 3,
- .flags = 0,
- };
- ret = x86_set_memory_region(kvm, &tss_mem);
+ ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
+ PAGE_SIZE * 3);
if (ret)
return ret;
kvm->arch.tss_addr = addr;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8f0f6eca69da..a3a4cf900e0c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7714,18 +7714,21 @@ void kvm_arch_sync_events(struct kvm *kvm)
kvm_free_pit(kvm);
}
-int __x86_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem)
+int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
{
int i, r;
/* Called with kvm->slots_lock held. */
- BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM);
+ if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
+ return -EINVAL;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
- struct kvm_userspace_memory_region m = *mem;
+ struct kvm_userspace_memory_region m;
- m.slot |= i << 16;
+ m.slot = id | (i << 16);
+ m.flags = 0;
+ m.guest_phys_addr = gpa;
+ m.memory_size = size;
r = __kvm_set_memory_region(kvm, &m);
if (r < 0)
return r;
@@ -7735,13 +7738,12 @@ int __x86_set_memory_region(struct kvm *kvm,
}
EXPORT_SYMBOL_GPL(__x86_set_memory_region);
-int x86_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem)
+int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
{
int r;
mutex_lock(&kvm->slots_lock);
- r = __x86_set_memory_region(kvm, mem);
+ r = __x86_set_memory_region(kvm, id, gpa, size);
mutex_unlock(&kvm->slots_lock);
return r;
@@ -7756,16 +7758,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
* unless the the memory map has changed due to process exit
* or fd copying.
*/
- struct kvm_userspace_memory_region mem;
- memset(&mem, 0, sizeof(mem));
- mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
- x86_set_memory_region(kvm, &mem);
-
- mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
- x86_set_memory_region(kvm, &mem);
-
- mem.slot = TSS_PRIVATE_MEMSLOT;
- x86_set_memory_region(kvm, &mem);
+ x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
+ x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
+ x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
}
kvm_iommu_unmap_guest(kvm);
kfree(kvm->arch.vpic);
--
2.5.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 2/2] KVM: x86: map/unmap private slots in __x86_set_memory_region
2015-10-12 12:09 [PATCH 0/2] KVM: fix SMM with ept=1/unrestricted_guest=0 Paolo Bonzini
2015-10-12 12:09 ` [PATCH 1/2] KVM: x86: build kvm_userspace_memory_region in x86_set_memory_region Paolo Bonzini
@ 2015-10-12 12:09 ` Paolo Bonzini
2015-10-13 15:39 ` Radim Krčmář
1 sibling, 1 reply; 7+ messages in thread
From: Paolo Bonzini @ 2015-10-12 12:09 UTC (permalink / raw)
To: linux-kernel, kvm; +Cc: aderumier, rkrcmar, stable
Otherwise, two copies (one of them never used and thus bogus) are
allocated for the regular and SMM address spaces. This breaks
SMM with EPT but without unrestricted guest support, because the
SMM copy of the identity page map is all zeros.
By moving the allocation to the caller we also remove the last
vestiges of kernel-allocated memory regions (not accessible anymore
in userspace since commit b74a07beed0e, "KVM: Remove kernel-allocated
memory regions", 2010-06-21); that is a nice bonus.
Reported-by: Alexandre DERUMIER <aderumier@odiso.com>
Cc: stable@vger.kernel.org
Fixes: 9da0e4d5ac969909f6b435ce28ea28135a9cbd69
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
arch/x86/kvm/x86.c | 62 ++++++++++++++++++++++++++----------------------------
1 file changed, 30 insertions(+), 32 deletions(-)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a3a4cf900e0c..ab59eccb9e78 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7717,23 +7717,53 @@ void kvm_arch_sync_events(struct kvm *kvm)
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
{
int i, r;
+ u64 hva;
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ struct kvm_memory_slot *slot, old;
/* Called with kvm->slots_lock held. */
if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
return -EINVAL;
+ slot = &slots->memslots[slots->id_to_index[id]];
+ if (size) {
+ if (WARN_ON(slot->npages))
+ return -EEXIST;
+
+ /*
+ * MAP_SHARED to prevent internal slot pages from being moved
+ * by fork()/COW.
+ */
+ hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, 0);
+ if (IS_ERR((void *)hva))
+ return PTR_ERR((void *)hva);
+ } else {
+ if (!slot->npages)
+ return 0;
+
+ hva = 0;
+ }
+
+ old = *slot;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
struct kvm_userspace_memory_region m;
m.slot = id | (i << 16);
m.flags = 0;
m.guest_phys_addr = gpa;
+ m.userspace_addr = hva;
m.memory_size = size;
r = __kvm_set_memory_region(kvm, &m);
if (r < 0)
return r;
}
+ if (!size) {
+ r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
+ WARN_ON(r < 0);
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(__x86_set_memory_region);
@@ -7863,27 +7893,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
- /*
- * Only private memory slots need to be mapped here since
- * KVM_SET_MEMORY_REGION ioctl is no longer supported.
- */
- if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
- unsigned long userspace_addr;
-
- /*
- * MAP_SHARED to prevent internal slot pages from being moved
- * by fork()/COW.
- */
- userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, 0);
-
- if (IS_ERR((void *)userspace_addr))
- return PTR_ERR((void *)userspace_addr);
-
- memslot->userspace_addr = userspace_addr;
- }
-
return 0;
}
@@ -7945,17 +7954,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
{
int nr_mmu_pages = 0;
- if (change == KVM_MR_DELETE && old->id >= KVM_USER_MEM_SLOTS) {
- int ret;
-
- ret = vm_munmap(old->userspace_addr,
- old->npages * PAGE_SIZE);
- if (ret < 0)
- printk(KERN_WARNING
- "kvm_vm_ioctl_set_memory_region: "
- "failed to munmap memory\n");
- }
-
if (!kvm->arch.n_requested_mmu_pages)
nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
--
2.5.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH 1/2] KVM: x86: build kvm_userspace_memory_region in x86_set_memory_region
2015-10-12 12:09 ` [PATCH 1/2] KVM: x86: build kvm_userspace_memory_region in x86_set_memory_region Paolo Bonzini
@ 2015-10-13 15:03 ` Radim Krčmář
0 siblings, 0 replies; 7+ messages in thread
From: Radim Krčmář @ 2015-10-13 15:03 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: linux-kernel, kvm, aderumier, stable
2015-10-12 14:09+0200, Paolo Bonzini:
> The next patch will make x86_set_memory_region fill the
> userspace_addr. Since the struct is not used untouched
> anymore, it makes sense to build it in x86_set_memory_region
> directly; it also simplifies the callers.
>
> Reported-by: Alexandre DERUMIER <aderumier@odiso.com>
> Cc: stable@vger.kernel.org
> Fixes: 9da0e4d5ac969909f6b435ce28ea28135a9cbd69
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> @@ -1199,9 +1199,7 @@ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
> -int __x86_set_memory_region(struct kvm *kvm,
> - const struct kvm_userspace_memory_region *mem);
> -int x86_set_memory_region(struct kvm *kvm,
> - const struct kvm_userspace_memory_region *mem);
> +int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
> +int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
kvm_userspace_memory_region has u64 size, but we only use this for few
pages anyway and will hopefully get a warning from GCC if that changes.
Patch makes the code much better,
Reviewed-by: Radim Krčmář <rkrcmar@redhat.com>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 2/2] KVM: x86: map/unmap private slots in __x86_set_memory_region
2015-10-12 12:09 ` [PATCH 2/2] KVM: x86: map/unmap private slots in __x86_set_memory_region Paolo Bonzini
@ 2015-10-13 15:39 ` Radim Krčmář
2015-10-13 16:28 ` Paolo Bonzini
0 siblings, 1 reply; 7+ messages in thread
From: Radim Krčmář @ 2015-10-13 15:39 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: linux-kernel, kvm, aderumier, stable
2015-10-12 14:09+0200, Paolo Bonzini:
> Otherwise, two copies (one of them never used and thus bogus) are
> allocated for the regular and SMM address spaces. This breaks
> SMM with EPT but without unrestricted guest support, because the
> SMM copy of the identity page map is all zeros.
(Have you found out why EPT+unrestricted didn't use the alternative SMM
mapping as well?)
> By moving the allocation to the caller we also remove the last
> vestiges of kernel-allocated memory regions (not accessible anymore
> in userspace since commit b74a07beed0e, "KVM: Remove kernel-allocated
> memory regions", 2010-06-21); that is a nice bonus.
>
> Reported-by: Alexandre DERUMIER <aderumier@odiso.com>
> Cc: stable@vger.kernel.org
> Fixes: 9da0e4d5ac969909f6b435ce28ea28135a9cbd69
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
vm_mmap() leaks if __kvm_set_memory_region() fails. It's nothing new
and following process termination should take care of it,
Reviewed-by: Radim Krčmář <rkrcmar@redhat.com>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> @@ -7717,23 +7717,53 @@ void kvm_arch_sync_events(struct kvm *kvm)
> int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
> {
> int i, r;
> + u64 hva;
> + struct kvm_memslots *slots = kvm_memslots(kvm);
> + struct kvm_memory_slot *slot, old;
| [...]
> + slot = &slots->memslots[slots->id_to_index[id]];
This seems better written as
slot = id_to_memslot(slots, id);
(Made me remember that I want to refactor the memslot API ...)
| [...]
> + } else {
> + if (!slot->npages)
> + return 0;
> +
> + hva = 0;
> + }
> +
> + old = *slot;
(Assignment could be in the 'else' == !size branch, GCC would have fun.)
| [...]
> + if (!size) {
> + r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
> + WARN_ON(r < 0);
> + }
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 2/2] KVM: x86: map/unmap private slots in __x86_set_memory_region
2015-10-13 15:39 ` Radim Krčmář
@ 2015-10-13 16:28 ` Paolo Bonzini
2015-10-13 16:45 ` Radim Krčmář
0 siblings, 1 reply; 7+ messages in thread
From: Paolo Bonzini @ 2015-10-13 16:28 UTC (permalink / raw)
To: Radim Krčmář; +Cc: linux-kernel, kvm, aderumier, stable
On 13/10/2015 17:39, Radim Krčmář wrote:
> 2015-10-12 14:09+0200, Paolo Bonzini:
>> Otherwise, two copies (one of them never used and thus bogus) are
>> allocated for the regular and SMM address spaces. This breaks
>> SMM with EPT but without unrestricted guest support, because the
>> SMM copy of the identity page map is all zeros.
>
> (Have you found out why EPT+unrestricted didn't use the alternative SMM
> mapping as well?)
Yes, that I already knew; EPT+unrestricted uses CR0.PG=0 directly so
it doesn't use the identity page at all. (CR0.PG=0 w/o unrestricted
instead runs with CR0.PG=1. CR3 load and store exits are enabled,
and the guest CR3 always points to the identity page map while the
guest runs).
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> @@ -7717,23 +7717,53 @@ void kvm_arch_sync_events(struct kvm *kvm)
>> int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
>> {
>> int i, r;
>> + u64 hva;
>> + struct kvm_memslots *slots = kvm_memslots(kvm);
>> + struct kvm_memory_slot *slot, old;
> | [...]
>> + slot = &slots->memslots[slots->id_to_index[id]];
>
> This seems better written as
>
> slot = id_to_memslot(slots, id);
Gah, I could not recall the right API! I'll fix it.
> (Made me remember that I want to refactor the memslot API ...)
>
> | [...]
>> + } else {
>> + if (!slot->npages)
>> + return 0;
>> +
>> + hva = 0;
>> + }
>> +
>> + old = *slot;
>
> (Assignment could be in the 'else' == !size branch, GCC would have fun.)
It would have fun _and_ warn, which is why it's not in the else branch. :)
Paolo
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 2/2] KVM: x86: map/unmap private slots in __x86_set_memory_region
2015-10-13 16:28 ` Paolo Bonzini
@ 2015-10-13 16:45 ` Radim Krčmář
0 siblings, 0 replies; 7+ messages in thread
From: Radim Krčmář @ 2015-10-13 16:45 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: linux-kernel, kvm, aderumier, stable
2015-10-13 18:28+0200, Paolo Bonzini:
> On 13/10/2015 17:39, Radim Krčmář wrote:
>> 2015-10-12 14:09+0200, Paolo Bonzini:
>>> Otherwise, two copies (one of them never used and thus bogus) are
>>> allocated for the regular and SMM address spaces. This breaks
>>> SMM with EPT but without unrestricted guest support, because the
>>> SMM copy of the identity page map is all zeros.
>>
>> (Have you found out why EPT+unrestricted didn't use the alternative SMM
>> mapping as well?)
>
> Yes, that I already knew; EPT+unrestricted uses CR0.PG=0 directly so
> it doesn't use the identity page at all. (CR0.PG=0 w/o unrestricted
> instead runs with CR0.PG=1. CR3 load and store exits are enabled,
> and the guest CR3 always points to the identity page map while the
> guest runs).
Thank you.
>>> + } else {
>>> + if (!slot->npages)
>>> + return 0;
>>> +
>>> + hva = 0;
>>> + }
>>> +
>>> + old = *slot;
>>
>> (Assignment could be in the 'else' == !size branch, GCC would have fun.)
>
> It would have fun _and_ warn, which is why it's not in the else branch. :)
I wondered if its "used uninitialized" analyzer got any better :)
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2015-10-13 16:45 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-10-12 12:09 [PATCH 0/2] KVM: fix SMM with ept=1/unrestricted_guest=0 Paolo Bonzini
2015-10-12 12:09 ` [PATCH 1/2] KVM: x86: build kvm_userspace_memory_region in x86_set_memory_region Paolo Bonzini
2015-10-13 15:03 ` Radim Krčmář
2015-10-12 12:09 ` [PATCH 2/2] KVM: x86: map/unmap private slots in __x86_set_memory_region Paolo Bonzini
2015-10-13 15:39 ` Radim Krčmář
2015-10-13 16:28 ` Paolo Bonzini
2015-10-13 16:45 ` Radim Krčmář
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).