* [PATCH] RFC: alias rework
@ 2010-01-25 13:53 Izik Eidus
2010-01-25 19:45 ` Marcelo Tosatti
0 siblings, 1 reply; 9+ messages in thread
From: Izik Eidus @ 2010-01-25 13:53 UTC (permalink / raw)
To: kvm
>From f94dcd1ccabbcdb51ed7c37c5f58f00a5c1b7eec Mon Sep 17 00:00:00 2001
From: Izik Eidus <ieidus@redhat.com>
Date: Mon, 25 Jan 2010 15:49:41 +0200
Subject: [PATCH] RFC: alias rework
This patch remove the old way of aliasing inside kvm
and move into using aliasing with the same virtual addresses
This patch is really just early RFC just to know if you guys
like this direction, and I need to clean some parts of it
and test it more before I feel it ready to be merged...
Comments are more than welcome.
Thanks.
Signed-off-by: Izik Eidus <ieidus@redhat.com>
---
arch/ia64/include/asm/kvm_host.h | 1 +
arch/ia64/kvm/kvm-ia64.c | 5 --
arch/powerpc/kvm/powerpc.c | 5 --
arch/s390/include/asm/kvm_host.h | 1 +
arch/s390/kvm/kvm-s390.c | 5 --
arch/x86/include/asm/kvm_host.h | 19 ------
arch/x86/include/asm/vmx.h | 6 +-
arch/x86/kvm/mmu.c | 19 ++-----
arch/x86/kvm/x86.c | 114 +++++++++++--------------------------
include/linux/kvm_host.h | 11 +--
virt/kvm/kvm_main.c | 80 +++++++++++++++++++-------
11 files changed, 107 insertions(+), 159 deletions(-)
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index a362e67..d5377c2 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -24,6 +24,7 @@
#define __ASM_KVM_HOST_H
#define KVM_MEMORY_SLOTS 32
+#define KVM_ALIAS_SLOTS 0
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 0618898..3d2559e 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1947,11 +1947,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
return vcpu->arch.timer_fired;
}
-gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
-{
- return gfn;
-}
-
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) ||
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 51aedd7..50b7d5f 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -35,11 +35,6 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
-{
- return gfn;
-}
-
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions);
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 27605b6..6a2112e 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -21,6 +21,7 @@
#define KVM_MAX_VCPUS 64
#define KVM_MEMORY_SLOTS 32
+#define KVM_ALIAS_SLOTS 0
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 8f09959..5d63f6b 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -741,11 +741,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
{
}
-gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
-{
- return gfn;
-}
-
static int __init kvm_s390_init(void)
{
int ret;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a1f0b5d..2d2509f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -367,24 +367,7 @@ struct kvm_vcpu_arch {
u64 hv_vapic;
};
-struct kvm_mem_alias {
- gfn_t base_gfn;
- unsigned long npages;
- gfn_t target_gfn;
-#define KVM_ALIAS_INVALID 1UL
- unsigned long flags;
-};
-
-#define KVM_ARCH_HAS_UNALIAS_INSTANTIATION
-
-struct kvm_mem_aliases {
- struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
- int naliases;
-};
-
struct kvm_arch {
- struct kvm_mem_aliases *aliases;
-
unsigned int n_free_mmu_pages;
unsigned int n_requested_mmu_pages;
unsigned int n_alloc_mmu_pages;
@@ -674,8 +657,6 @@ void kvm_disable_tdp(void);
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
int complete_pio(struct kvm_vcpu *vcpu);
-struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn);
-
static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
{
struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 43f1e9b..bf52a32 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -347,9 +347,9 @@ enum vmcs_field {
#define AR_RESERVD_MASK 0xfffe0f00
-#define TSS_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 0)
-#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 1)
-#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 2)
+#define TSS_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + KVM_ALIAS_SLOTS + 0)
+#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + KVM_ALIAS_SLOTS + 1)
+#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + KVM_ALIAS_SLOTS + 2)
#define VMX_NR_VPIDS (1 << 16)
#define VMX_VPID_EXTENT_SINGLE_CONTEXT 1
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ff2b2e8..6f78e6a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -422,9 +422,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
int *write_count;
int i;
- gfn = unalias_gfn(kvm, gfn);
-
- slot = gfn_to_memslot_unaliased(kvm, gfn);
+ slot = gfn_to_memslot(kvm, gfn);
for (i = PT_DIRECTORY_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
write_count = slot_largepage_idx(gfn, slot, i);
@@ -438,10 +436,9 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
int *write_count;
int i;
- gfn = unalias_gfn(kvm, gfn);
for (i = PT_DIRECTORY_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
- slot = gfn_to_memslot_unaliased(kvm, gfn);
+ slot = gfn_to_memslot(kvm, gfn);
write_count = slot_largepage_idx(gfn, slot, i);
*write_count -= 1;
WARN_ON(*write_count < 0);
@@ -455,8 +452,7 @@ static int has_wrprotected_page(struct kvm *kvm,
struct kvm_memory_slot *slot;
int *largepage_idx;
- gfn = unalias_gfn(kvm, gfn);
- slot = gfn_to_memslot_unaliased(kvm, gfn);
+ slot = gfn_to_memslot(kvm, gfn);
if (slot) {
largepage_idx = slot_largepage_idx(gfn, slot, level);
return *largepage_idx;
@@ -523,7 +519,6 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
/*
* Take gfn and return the reverse mapping to it.
- * Note: gfn must be unaliased before this function get called
*/
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
@@ -563,7 +558,6 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
if (!is_rmap_spte(*spte))
return count;
- gfn = unalias_gfn(vcpu->kvm, gfn);
sp = page_header(__pa(spte));
sp->gfns[spte - sp->spt] = gfn;
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
@@ -700,7 +694,6 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
u64 *spte;
int i, write_protected = 0;
- gfn = unalias_gfn(kvm, gfn);
rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
spte = rmap_next(kvm, rmapp, NULL);
@@ -879,7 +872,6 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
sp = page_header(__pa(spte));
- gfn = unalias_gfn(vcpu->kvm, gfn);
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
@@ -2896,7 +2888,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
if (pt[i] & PT_WRITABLE_MASK)
pt[i] &= ~PT_WRITABLE_MASK;
}
- kvm_flush_remote_tlbs(kvm);
}
void kvm_mmu_zap_all(struct kvm *kvm)
@@ -3403,8 +3394,8 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
if (sp->unsync)
continue;
- gfn = unalias_gfn(vcpu->kvm, sp->gfn);
- slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
+ gfn = sp->gfn;
+ slot = gfn_to_memslot(vcpu->kvm, gfn);
rmapp = &slot->rmap[gfn - slot->base_gfn];
spte = rmap_next(vcpu->kvm, rmapp, NULL);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 56a90a6..7683c0b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2396,42 +2396,6 @@ static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
return kvm->arch.n_alloc_mmu_pages;
}
-gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
-{
- int i;
- struct kvm_mem_alias *alias;
- struct kvm_mem_aliases *aliases;
-
- aliases = rcu_dereference(kvm->arch.aliases);
-
- for (i = 0; i < aliases->naliases; ++i) {
- alias = &aliases->aliases[i];
- if (alias->flags & KVM_ALIAS_INVALID)
- continue;
- if (gfn >= alias->base_gfn
- && gfn < alias->base_gfn + alias->npages)
- return alias->target_gfn + gfn - alias->base_gfn;
- }
- return gfn;
-}
-
-gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
-{
- int i;
- struct kvm_mem_alias *alias;
- struct kvm_mem_aliases *aliases;
-
- aliases = rcu_dereference(kvm->arch.aliases);
-
- for (i = 0; i < aliases->naliases; ++i) {
- alias = &aliases->aliases[i];
- if (gfn >= alias->base_gfn
- && gfn < alias->base_gfn + alias->npages)
- return alias->target_gfn + gfn - alias->base_gfn;
- }
- return gfn;
-}
-
/*
* Set a new alias region. Aliases map a portion of physical memory into
* another portion. This is useful for memory windows, for example the PC
@@ -2440,9 +2404,11 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
struct kvm_memory_alias *alias)
{
- int r, n;
- struct kvm_mem_alias *p;
- struct kvm_mem_aliases *aliases, *old_aliases;
+ int r;
+ struct kvm_userspace_memory_region alias_mem;
+ struct kvm_memory_slot *slot;
+ unsigned long offset_addr;
+ gfn_t target_base_gfn;
r = -EINVAL;
/* General sanity checks */
@@ -2459,45 +2425,27 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
< alias->target_phys_addr)
goto out;
- r = -ENOMEM;
- aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
- if (!aliases)
- goto out;
-
mutex_lock(&kvm->slots_lock);
- /* invalidate any gfn reference in case of deletion/shrinking */
- memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
- aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID;
- old_aliases = kvm->arch.aliases;
- rcu_assign_pointer(kvm->arch.aliases, aliases);
- synchronize_srcu_expedited(&kvm->srcu);
- kvm_mmu_zap_all(kvm);
- kfree(old_aliases);
-
- r = -ENOMEM;
- aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
- if (!aliases)
+ target_base_gfn = alias->target_phys_addr >> PAGE_SHIFT;
+ slot = gfn_to_memslot(kvm, target_base_gfn);
+ if (!slot)
goto out_unlock;
- memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
-
- p = &aliases->aliases[alias->slot];
- p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
- p->npages = alias->memory_size >> PAGE_SHIFT;
- p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
- p->flags &= ~(KVM_ALIAS_INVALID);
+ if (slot->npages < alias->memory_size >> PAGE_SHIFT)
+ goto out_unlock;
- for (n = KVM_ALIAS_SLOTS; n > 0; --n)
- if (aliases->aliases[n - 1].npages)
- break;
- aliases->naliases = n;
+ alias_mem.slot = alias->slot + KVM_MEMORY_SLOTS;
+ alias_mem.guest_phys_addr = alias->guest_phys_addr;
+ alias_mem.memory_size = alias->memory_size;
+ offset_addr = target_base_gfn - slot->base_gfn;
+ offset_addr = offset_addr << PAGE_SHIFT;
+ alias_mem.userspace_addr = slot->userspace_addr + offset_addr;
+ alias_mem.flags = 0;
- old_aliases = kvm->arch.aliases;
- rcu_assign_pointer(kvm->arch.aliases, aliases);
- synchronize_srcu_expedited(&kvm->srcu);
- kfree(old_aliases);
- r = 0;
+ r = __kvm_set_memory_region(kvm, &alias_mem, 1);
+ if (!r)
+ kvm_mmu_zap_all(kvm);
out_unlock:
mutex_unlock(&kvm->slots_lock);
@@ -2631,6 +2579,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
{
int r, n, i;
struct kvm_memory_slot *memslot;
+ struct kvm_memory_slot *alias_memslot;
+ unsigned long size;
unsigned long is_dirty = 0;
unsigned long *dirty_bitmap = NULL;
@@ -2661,7 +2611,18 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_memslots *slots, *old_slots;
spin_lock(&kvm->mmu_lock);
+ for (i = KVM_MEMORY_SLOTS; i < KVM_MEMORY_SLOTS +
+ KVM_ALIAS_SLOTS; ++i) {
+ alias_memslot = &kvm->memslots->memslots[i];
+ size = memslot->npages << PAGE_SHIFT;
+ if (alias_memslot->userspace_addr >=
+ memslot->userspace_addr &&
+ alias_memslot->userspace_addr <
+ memslot->userspace_addr + size)
+ kvm_mmu_slot_remove_write_access(kvm, i);
+ }
kvm_mmu_slot_remove_write_access(kvm, log->slot);
+ kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
@@ -5484,12 +5445,6 @@ struct kvm *kvm_arch_create_vm(void)
if (!kvm)
return ERR_PTR(-ENOMEM);
- kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
- if (!kvm->arch.aliases) {
- kfree(kvm);
- return ERR_PTR(-ENOMEM);
- }
-
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
@@ -5547,8 +5502,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
if (kvm->arch.ept_identity_pagetable)
put_page(kvm->arch.ept_identity_pagetable);
cleanup_srcu_struct(&kvm->srcu);
- kfree(kvm->arch.aliases);
- kfree(kvm);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -5613,6 +5566,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
}
kvm_mmu_slot_remove_write_access(kvm, mem->slot);
+ kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index dfde04b..b777844 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -117,6 +117,8 @@ struct kvm_memory_slot {
} *lpage_info[KVM_NR_PAGE_SIZES - 1];
unsigned long userspace_addr;
int user_alloc;
+ gfn_t real_base_gfn;
+ unsigned long real_npages;
};
struct kvm_kernel_irq_routing_entry {
@@ -156,7 +158,8 @@ struct kvm_irq_routing_table {};
struct kvm_memslots {
int nmemslots;
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
- KVM_PRIVATE_MEM_SLOTS];
+ KVM_PRIVATE_MEM_SLOTS +
+ KVM_ALIAS_SLOTS];
};
struct kvm {
@@ -267,8 +270,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
int user_alloc);
void kvm_disable_largepages(void);
void kvm_arch_flush_shadow(struct kvm *kvm);
-gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
-gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
@@ -543,10 +544,6 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se
}
#endif
-#ifndef KVM_ARCH_HAS_UNALIAS_INSTANTIATION
-#define unalias_gfn_instantiation unalias_gfn
-#endif
-
#ifdef CONFIG_HAVE_KVM_IRQCHIP
#define KVM_MAX_IRQ_ROUTES 1024
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 7c5c873..6b2aa1f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -450,10 +450,51 @@ out_err_nodisable:
return ERR_PTR(r);
}
+#ifdef CONFIG_X86
+
+static void update_alias_slots(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+ int i;
+
+ for (i = KVM_MEMORY_SLOTS; i < KVM_MEMORY_SLOTS + KVM_ALIAS_SLOTS;
+ ++i) {
+ struct kvm_memory_slot *alias_memslot =
+ &kvm->memslots->memslots[i];
+ unsigned long size = slot->npages << PAGE_SHIFT;
+
+ if (alias_memslot->real_base_gfn >= slot->base_gfn &&
+ alias_memslot->real_base_gfn < slot->base_gfn + size) {
+ if (slot->dirty_bitmap) {
+ unsigned long bitmap_addr;
+ unsigned long dirty_offset;
+ unsigned long offset_addr =
+ (alias_memslot->real_base_gfn -
+ slot->base_gfn) << PAGE_SHIFT;
+ alias_memslot->userspace_addr =
+ slot->userspace_addr + offset_addr;
+
+ dirty_offset =
+ ALIGN(offset_addr, BITS_PER_LONG) / 8;
+ bitmap_addr = (unsigned long) slot->dirty_bitmap;
+ bitmap_addr += dirty_offset;
+ alias_memslot->dirty_bitmap = (unsigned long *)bitmap_addr;
+ alias_memslot->base_gfn = alias_memslot->real_base_gfn;
+ alias_memslot->npages = alias_memslot->real_npages;
+ } else if (!slot->rmap) {
+ alias_memslot->base_gfn = 0;
+ alias_memslot->npages = 0;
+ }
+ }
+ }
+}
+
+#endif
+
/*
* Free any memory in @free but not in @dont.
*/
-static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
+static void kvm_free_physmem_slot(struct kvm *kvm,
+ struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
int i;
@@ -472,9 +513,13 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
}
}
- free->npages = 0;
free->dirty_bitmap = NULL;
free->rmap = NULL;
+
+#ifdef CONFIG_X86
+ update_alias_slots(kvm, free);
+#endif
+ free->npages = 0;
}
void kvm_free_physmem(struct kvm *kvm)
@@ -483,7 +528,7 @@ void kvm_free_physmem(struct kvm *kvm)
struct kvm_memslots *slots = kvm->memslots;
for (i = 0; i < slots->nmemslots; ++i)
- kvm_free_physmem_slot(&slots->memslots[i], NULL);
+ kvm_free_physmem_slot(kvm, &slots->memslots[i], NULL);
kfree(kvm->memslots);
}
@@ -563,7 +608,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
goto out;
if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
goto out;
- if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+ if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS +
+ KVM_ALIAS_SLOTS)
goto out;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
@@ -616,6 +662,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
new.user_alloc = user_alloc;
new.userspace_addr = mem->userspace_addr;
+ new.real_base_gfn = new.base_gfn;
+ new.real_npages = new.npages;
}
if (!npages)
goto skip_lpage;
@@ -740,7 +788,7 @@ skip_lpage:
kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
- kvm_free_physmem_slot(&old, &new);
+ kvm_free_physmem_slot(kvm, &old, &new);
kfree(old_memslots);
if (flush_shadow)
@@ -749,7 +797,7 @@ skip_lpage:
return 0;
out_free:
- kvm_free_physmem_slot(&new, &old);
+ kvm_free_physmem_slot(kvm, &new, &old);
out:
return r;
@@ -842,7 +890,7 @@ int kvm_is_error_hva(unsigned long addr)
}
EXPORT_SYMBOL_GPL(kvm_is_error_hva);
-struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
+struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
int i;
struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
@@ -856,21 +904,14 @@ struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
}
return NULL;
}
-EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
-
-struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
-{
- gfn = unalias_gfn(kvm, gfn);
- return gfn_to_memslot_unaliased(kvm, gfn);
-}
+EXPORT_SYMBOL_GPL(gfn_to_memslot);
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
int i;
struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
- gfn = unalias_gfn_instantiation(kvm, gfn);
- for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
+ for (i = 0; i < KVM_MEMORY_SLOTS + KVM_ALIAS_SLOTS; ++i) {
struct kvm_memory_slot *memslot = &slots->memslots[i];
if (memslot->flags & KVM_MEMSLOT_INVALID)
@@ -890,7 +931,6 @@ int memslot_id(struct kvm *kvm, gfn_t gfn)
struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
struct kvm_memory_slot *memslot = NULL;
- gfn = unalias_gfn(kvm, gfn);
for (i = 0; i < slots->nmemslots; ++i) {
memslot = &slots->memslots[i];
@@ -906,8 +946,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *slot;
- gfn = unalias_gfn_instantiation(kvm, gfn);
- slot = gfn_to_memslot_unaliased(kvm, gfn);
+ slot = gfn_to_memslot(kvm, gfn);
if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
return bad_hva();
return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
@@ -1174,8 +1213,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *memslot;
- gfn = unalias_gfn(kvm, gfn);
- memslot = gfn_to_memslot_unaliased(kvm, gfn);
+ memslot = gfn_to_memslot(kvm, gfn);
if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
--
1.6.5.2
^ permalink raw reply related [flat|nested] 9+ messages in thread* Re: [PATCH] RFC: alias rework
2010-01-25 13:53 [PATCH] RFC: alias rework Izik Eidus
@ 2010-01-25 19:45 ` Marcelo Tosatti
2010-01-25 19:57 ` Izik Eidus
0 siblings, 1 reply; 9+ messages in thread
From: Marcelo Tosatti @ 2010-01-25 19:45 UTC (permalink / raw)
To: Izik Eidus; +Cc: kvm
Izik,
On Mon, Jan 25, 2010 at 03:53:44PM +0200, Izik Eidus wrote:
> >From f94dcd1ccabbcdb51ed7c37c5f58f00a5c1b7eec Mon Sep 17 00:00:00 2001
> From: Izik Eidus <ieidus@redhat.com>
> Date: Mon, 25 Jan 2010 15:49:41 +0200
> Subject: [PATCH] RFC: alias rework
>
> This patch remove the old way of aliasing inside kvm
> and move into using aliasing with the same virtual addresses
>
> This patch is really just early RFC just to know if you guys
> like this direction, and I need to clean some parts of it
> and test it more before I feel it ready to be merged...
>
> Comments are more than welcome.
>
> Thanks.
>
> Signed-off-by: Izik Eidus <ieidus@redhat.com>
> ---
> arch/ia64/include/asm/kvm_host.h | 1 +
> arch/ia64/kvm/kvm-ia64.c | 5 --
> arch/powerpc/kvm/powerpc.c | 5 --
> arch/s390/include/asm/kvm_host.h | 1 +
> arch/s390/kvm/kvm-s390.c | 5 --
> arch/x86/include/asm/kvm_host.h | 19 ------
> arch/x86/include/asm/vmx.h | 6 +-
> arch/x86/kvm/mmu.c | 19 ++-----
> arch/x86/kvm/x86.c | 114 +++++++++++--------------------------
> include/linux/kvm_host.h | 11 +--
> virt/kvm/kvm_main.c | 80 +++++++++++++++++++-------
> 11 files changed, 107 insertions(+), 159 deletions(-)
>
> @@ -2661,7 +2611,18 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
> struct kvm_memslots *slots, *old_slots;
>
> spin_lock(&kvm->mmu_lock);
> + for (i = KVM_MEMORY_SLOTS; i < KVM_MEMORY_SLOTS +
> + KVM_ALIAS_SLOTS; ++i) {
The plan is to kill KVM_ALIAS_SLOTS (aliases will share the 32 mem
slots), right?
> +#ifdef CONFIG_X86
> +
> +static void update_alias_slots(struct kvm *kvm, struct kvm_memory_slot *slot)
> +{
> + int i;
> +
> + for (i = KVM_MEMORY_SLOTS; i < KVM_MEMORY_SLOTS + KVM_ALIAS_SLOTS;
> + ++i) {
> + struct kvm_memory_slot *alias_memslot =
> + &kvm->memslots->memslots[i];
> + unsigned long size = slot->npages << PAGE_SHIFT;
> +
> + if (alias_memslot->real_base_gfn >= slot->base_gfn &&
> + alias_memslot->real_base_gfn < slot->base_gfn + size) {
> + if (slot->dirty_bitmap) {
> + unsigned long bitmap_addr;
> + unsigned long dirty_offset;
> + unsigned long offset_addr =
> + (alias_memslot->real_base_gfn -
> + slot->base_gfn) << PAGE_SHIFT;
> + alias_memslot->userspace_addr =
> + slot->userspace_addr + offset_addr;
> +
> + dirty_offset =
> + ALIGN(offset_addr, BITS_PER_LONG) / 8;
> + bitmap_addr = (unsigned long) slot->dirty_bitmap;
> + bitmap_addr += dirty_offset;
> + alias_memslot->dirty_bitmap = (unsigned long *)bitmap_addr;
> + alias_memslot->base_gfn = alias_memslot->real_base_gfn;
> + alias_memslot->npages = alias_memslot->real_npages;
> + } else if (!slot->rmap) {
> + alias_memslot->base_gfn = 0;
> + alias_memslot->npages = 0;
> + }
> + }
> + }
> +}
> +
> +#endif
Can't see why is this needed. What is the problem with nuking "child"
aliases when deleting a real memslot?
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH] RFC: alias rework
2010-01-25 19:45 ` Marcelo Tosatti
@ 2010-01-25 19:57 ` Izik Eidus
2010-01-25 20:20 ` Marcelo Tosatti
0 siblings, 1 reply; 9+ messages in thread
From: Izik Eidus @ 2010-01-25 19:57 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm
On Mon, 25 Jan 2010 17:45:53 -0200
Marcelo Tosatti <mtosatti@redhat.com> wrote:
> Izik,
>
> On Mon, Jan 25, 2010 at 03:53:44PM +0200, Izik Eidus wrote:
> > >From f94dcd1ccabbcdb51ed7c37c5f58f00a5c1b7eec Mon Sep 17 00:00:00 2001
> > From: Izik Eidus <ieidus@redhat.com>
> > Date: Mon, 25 Jan 2010 15:49:41 +0200
> > Subject: [PATCH] RFC: alias rework
> >
> > This patch remove the old way of aliasing inside kvm
> > and move into using aliasing with the same virtual addresses
> >
> > This patch is really just early RFC just to know if you guys
> > like this direction, and I need to clean some parts of it
> > and test it more before I feel it ready to be merged...
> >
> > Comments are more than welcome.
> >
> > Thanks.
> >
> > Signed-off-by: Izik Eidus <ieidus@redhat.com>
> > ---
> > arch/ia64/include/asm/kvm_host.h | 1 +
> > arch/ia64/kvm/kvm-ia64.c | 5 --
> > arch/powerpc/kvm/powerpc.c | 5 --
> > arch/s390/include/asm/kvm_host.h | 1 +
> > arch/s390/kvm/kvm-s390.c | 5 --
> > arch/x86/include/asm/kvm_host.h | 19 ------
> > arch/x86/include/asm/vmx.h | 6 +-
> > arch/x86/kvm/mmu.c | 19 ++-----
> > arch/x86/kvm/x86.c | 114 +++++++++++--------------------------
> > include/linux/kvm_host.h | 11 +--
> > virt/kvm/kvm_main.c | 80 +++++++++++++++++++-------
> > 11 files changed, 107 insertions(+), 159 deletions(-)
> >
>
> > @@ -2661,7 +2611,18 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
> > struct kvm_memslots *slots, *old_slots;
> >
> > spin_lock(&kvm->mmu_lock);
> > + for (i = KVM_MEMORY_SLOTS; i < KVM_MEMORY_SLOTS +
> > + KVM_ALIAS_SLOTS; ++i) {
>
> The plan is to kill KVM_ALIAS_SLOTS (aliases will share the 32 mem
> slots), right?
Hrmm I think we got to have this addition 4 KVM_MEMORY_SLOTS to keep
the same beahivor with old userspaces
beacuse maybe some userspace apps use 32 slots already?
I dont mind remove it if you guys don`t think this is the case.
>
> > +#ifdef CONFIG_X86
> > +
> > +static void update_alias_slots(struct kvm *kvm, struct kvm_memory_slot *slot)
> > +{
> > + int i;
> > +
> > + for (i = KVM_MEMORY_SLOTS; i < KVM_MEMORY_SLOTS + KVM_ALIAS_SLOTS;
> > + ++i) {
> > + struct kvm_memory_slot *alias_memslot =
> > + &kvm->memslots->memslots[i];
> > + unsigned long size = slot->npages << PAGE_SHIFT;
> > +
> > + if (alias_memslot->real_base_gfn >= slot->base_gfn &&
> > + alias_memslot->real_base_gfn < slot->base_gfn + size) {
> > + if (slot->dirty_bitmap) {
> > + unsigned long bitmap_addr;
> > + unsigned long dirty_offset;
> > + unsigned long offset_addr =
> > + (alias_memslot->real_base_gfn -
> > + slot->base_gfn) << PAGE_SHIFT;
> > + alias_memslot->userspace_addr =
> > + slot->userspace_addr + offset_addr;
> > +
> > + dirty_offset =
> > + ALIGN(offset_addr, BITS_PER_LONG) / 8;
> > + bitmap_addr = (unsigned long) slot->dirty_bitmap;
> > + bitmap_addr += dirty_offset;
> > + alias_memslot->dirty_bitmap = (unsigned long *)bitmap_addr;
> > + alias_memslot->base_gfn = alias_memslot->real_base_gfn;
> > + alias_memslot->npages = alias_memslot->real_npages;
> > + } else if (!slot->rmap) {
> > + alias_memslot->base_gfn = 0;
> > + alias_memslot->npages = 0;
> > + }
> > + }
> > + }
> > +}
> > +
> > +#endif
>
> Can't see why is this needed. What is the problem with nuking "child"
> aliases when deleting a real memslot?
The problem is that this memslot still point in the virtual address of the host,
This mean that gfn_to_memslot/page will still work on gfns and will result in
pages that are mapped into the virtual address that the userspace requested to
remove from KVM.
Thanks.
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH] RFC: alias rework
2010-01-25 19:57 ` Izik Eidus
@ 2010-01-25 20:20 ` Marcelo Tosatti
2010-01-25 20:40 ` Izik Eidus
0 siblings, 1 reply; 9+ messages in thread
From: Marcelo Tosatti @ 2010-01-25 20:20 UTC (permalink / raw)
To: Izik Eidus; +Cc: kvm
On Mon, Jan 25, 2010 at 09:57:43PM +0200, Izik Eidus wrote:
> On Mon, 25 Jan 2010 17:45:53 -0200
> Marcelo Tosatti <mtosatti@redhat.com> wrote:
>
> > Izik,
> >
> > On Mon, Jan 25, 2010 at 03:53:44PM +0200, Izik Eidus wrote:
> > > >From f94dcd1ccabbcdb51ed7c37c5f58f00a5c1b7eec Mon Sep 17 00:00:00 2001
> > > From: Izik Eidus <ieidus@redhat.com>
> > > Date: Mon, 25 Jan 2010 15:49:41 +0200
> > > Subject: [PATCH] RFC: alias rework
> > >
> > > This patch remove the old way of aliasing inside kvm
> > > and move into using aliasing with the same virtual addresses
> > >
> > > This patch is really just early RFC just to know if you guys
> > > like this direction, and I need to clean some parts of it
> > > and test it more before I feel it ready to be merged...
> > >
> > > Comments are more than welcome.
> > >
> > > Thanks.
> > >
> > > Signed-off-by: Izik Eidus <ieidus@redhat.com>
> > > ---
> > > arch/ia64/include/asm/kvm_host.h | 1 +
> > > arch/ia64/kvm/kvm-ia64.c | 5 --
> > > arch/powerpc/kvm/powerpc.c | 5 --
> > > arch/s390/include/asm/kvm_host.h | 1 +
> > > arch/s390/kvm/kvm-s390.c | 5 --
> > > arch/x86/include/asm/kvm_host.h | 19 ------
> > > arch/x86/include/asm/vmx.h | 6 +-
> > > arch/x86/kvm/mmu.c | 19 ++-----
> > > arch/x86/kvm/x86.c | 114 +++++++++++--------------------------
> > > include/linux/kvm_host.h | 11 +--
> > > virt/kvm/kvm_main.c | 80 +++++++++++++++++++-------
> > > 11 files changed, 107 insertions(+), 159 deletions(-)
> > >
> >
> > > @@ -2661,7 +2611,18 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
> > > struct kvm_memslots *slots, *old_slots;
> > >
> > > spin_lock(&kvm->mmu_lock);
> > > + for (i = KVM_MEMORY_SLOTS; i < KVM_MEMORY_SLOTS +
> > > + KVM_ALIAS_SLOTS; ++i) {
> >
> > The plan is to kill KVM_ALIAS_SLOTS (aliases will share the 32 mem
> > slots), right?
>
> Hrmm I think we got to have this addition 4 KVM_MEMORY_SLOTS to keep
> the same beahivor with old userspaces
> beacuse maybe some userspace apps use 32 slots already?
>
> I dont mind remove it if you guys don`t think this is the case.
I'm fine with sharing (it also makes the code simpler).
Don't know of any apps/configs using more than (or close to) 32 slots.
> > > +#ifdef CONFIG_X86
> > > +
> > > +static void update_alias_slots(struct kvm *kvm, struct kvm_memory_slot *slot)
> > > +{
> > > + int i;
> > > +
> > > + for (i = KVM_MEMORY_SLOTS; i < KVM_MEMORY_SLOTS + KVM_ALIAS_SLOTS;
> > > + ++i) {
> > > + struct kvm_memory_slot *alias_memslot =
> > > + &kvm->memslots->memslots[i];
> > > + unsigned long size = slot->npages << PAGE_SHIFT;
> > > +
> > > + if (alias_memslot->real_base_gfn >= slot->base_gfn &&
> > > + alias_memslot->real_base_gfn < slot->base_gfn + size) {
> > > + if (slot->dirty_bitmap) {
> > > + unsigned long bitmap_addr;
> > > + unsigned long dirty_offset;
> > > + unsigned long offset_addr =
> > > + (alias_memslot->real_base_gfn -
> > > + slot->base_gfn) << PAGE_SHIFT;
> > > + alias_memslot->userspace_addr =
> > > + slot->userspace_addr + offset_addr;
> > > +
> > > + dirty_offset =
> > > + ALIGN(offset_addr, BITS_PER_LONG) / 8;
> > > + bitmap_addr = (unsigned long) slot->dirty_bitmap;
> > > + bitmap_addr += dirty_offset;
> > > + alias_memslot->dirty_bitmap = (unsigned long *)bitmap_addr;
> > > + alias_memslot->base_gfn = alias_memslot->real_base_gfn;
> > > + alias_memslot->npages = alias_memslot->real_npages;
> > > + } else if (!slot->rmap) {
> > > + alias_memslot->base_gfn = 0;
> > > + alias_memslot->npages = 0;
> > > + }
> > > + }
> > > + }
> > > +}
> > > +
> > > +#endif
> >
> > Can't see why is this needed. What is the problem with nuking "child"
> > aliases when deleting a real memslot?
>
> The problem is that this memslot still point in the virtual address of the host,
> This mean that gfn_to_memslot/page will still work on gfns and will result in
> pages that are mapped into the virtual address that the userspace requested to
> remove from KVM.
With current code, if a memslot is deleted, access through any aliases
that use it will fail (BTW it looks this is not properly handled, but
thats a separate problem).
So AFAICS there is no requirement for an alias to continue "operable"
if its parent memslot is deleted.
Or is this a feature you need?
Motivation is that nukeing aliases is simpler than adjusting them.
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH] RFC: alias rework
2010-01-25 20:20 ` Marcelo Tosatti
@ 2010-01-25 20:40 ` Izik Eidus
2010-01-25 20:49 ` Marcelo Tosatti
2010-01-26 14:14 ` Avi Kivity
0 siblings, 2 replies; 9+ messages in thread
From: Izik Eidus @ 2010-01-25 20:40 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm
On Mon, 25 Jan 2010 18:20:39 -0200
Marcelo Tosatti <mtosatti@redhat.com> wrote:
> With current code, if a memslot is deleted, access through any aliases
> that use it will fail (BTW it looks this is not properly handled, but
> thats a separate problem).
Yea I had some still open concerns about this code (this why I sent it on RFC)
>
> So AFAICS there is no requirement for an alias to continue "operable"
> if its parent memslot is deleted.
With this patch alias will stop to opearte when the parent is deleted
just like the behivor with the current code...
base_gfn will be set to 0 and npages will be set to 0 as well
(the true values wil be hide in real_base_gfn...), so gfn_to_memslot
and gfn_to_page will fail....
>
> Or is this a feature you need?
I dont need it (I asked Avi to do something), So he said he want to nuke the aliasing
from kvm and keep supporting the old userspace`s
Do you have any other way to achive this?
Btw I do realize it might be better not to push this patch and just keep the old
way of treating aliasing as we have now, I really don`t mind.
>
> Motivation is that nukeing aliases is simpler than adjusting them.
>
Agree.
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH] RFC: alias rework
2010-01-25 20:40 ` Izik Eidus
@ 2010-01-25 20:49 ` Marcelo Tosatti
2010-01-25 21:51 ` Izik Eidus
2010-01-26 14:14 ` Avi Kivity
1 sibling, 1 reply; 9+ messages in thread
From: Marcelo Tosatti @ 2010-01-25 20:49 UTC (permalink / raw)
To: Izik Eidus; +Cc: kvm
On Mon, Jan 25, 2010 at 10:40:32PM +0200, Izik Eidus wrote:
> On Mon, 25 Jan 2010 18:20:39 -0200
> Marcelo Tosatti <mtosatti@redhat.com> wrote:
>
> > With current code, if a memslot is deleted, access through any aliases
> > that use it will fail (BTW it looks this is not properly handled, but
> > thats a separate problem).
>
>
> Yea I had some still open concerns about this code (this why I sent it on RFC)
>
> >
> > So AFAICS there is no requirement for an alias to continue "operable"
> > if its parent memslot is deleted.
>
>
> With this patch alias will stop to opearte when the parent is deleted
> just like the behivor with the current code...
>
> base_gfn will be set to 0 and npages will be set to 0 as well
> (the true values wil be hide in real_base_gfn...), so gfn_to_memslot
> and gfn_to_page will fail....
But you adjust the alias (and keep it valid) if dirty logging is
enabled?
> >
> > Or is this a feature you need?
>
>
> I dont need it (I asked Avi to do something), So he said he want to nuke the aliasing
> from kvm and keep supporting the old userspace`s
With feature i meant keeping the alias around when parent slot is
deleted.
> Do you have any other way to achive this?
No.
> Btw I do realize it might be better not to push this patch and just keep the old
> way of treating aliasing as we have now, I really don`t mind.
>
> >
> > Motivation is that nukeing aliases is simpler than adjusting them.
> >
>
> Agree.
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH] RFC: alias rework
2010-01-25 20:49 ` Marcelo Tosatti
@ 2010-01-25 21:51 ` Izik Eidus
0 siblings, 0 replies; 9+ messages in thread
From: Izik Eidus @ 2010-01-25 21:51 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm
On Mon, 25 Jan 2010 18:49:25 -0200
Marcelo Tosatti <mtosatti@redhat.com> wrote:
> On Mon, Jan 25, 2010 at 10:40:32PM +0200, Izik Eidus wrote:
> > On Mon, 25 Jan 2010 18:20:39 -0200
> > Marcelo Tosatti <mtosatti@redhat.com> wrote:
> >
> > > With current code, if a memslot is deleted, access through any aliases
> > > that use it will fail (BTW it looks this is not properly handled, but
> > > thats a separate problem).
> >
> >
> > Yea I had some still open concerns about this code (this why I sent it on RFC)
> >
> > >
> > > So AFAICS there is no requirement for an alias to continue "operable"
> > > if its parent memslot is deleted.
> >
> >
> > With this patch alias will stop to opearte when the parent is deleted
> > just like the behivor with the current code...
> >
> > base_gfn will be set to 0 and npages will be set to 0 as well
> > (the true values wil be hide in real_base_gfn...), so gfn_to_memslot
> > and gfn_to_page will fail....
>
> But you adjust the alias (and keep it valid) if dirty logging is
> enabled?
I am sorry, but probaby you got confused beacuse the code is wrong
the adjust of aliasing should happen in every case of:
if(slot->rmap -> valid (!NULL)):
this mean we got NEW parent slot that mapped into the gfn
that the alias is mapped to, and we want the userspace address
of the alias slot to intersect with the new parent slot.
and the latter adjustmant of the dirty_bitmap should happen only in case
of - if(slot->dirty_bitmap -> valid (!NULL)):
the alias slot need to mark_page_dirty the bitmap of the new parent slot
I hope this will make things more clear
(I think there is another small issue there, but I will send it when it wont be RFC)
>
> > >
> > > Or is this a feature you need?
> >
> >
> > I dont need it (I asked Avi to do something), So he said he want to nuke the aliasing
> > from kvm and keep supporting the old userspace`s
>
> With feature i meant keeping the alias around when parent slot is
> deleted.
The code doesnt try to do this, infact:
} else if (!slot->rmap) {
alias_memslot->base_gfn = 0;
alias_memslot->npages = 0;
}
came to invalidate the alias slot.
Sorry if I made to much mess :).
>
> > Do you have any other way to achive this?
>
> No.
>
> > Btw I do realize it might be better not to push this patch and just keep the old
> > way of treating aliasing as we have now, I really don`t mind.
> >
> > >
> > > Motivation is that nukeing aliases is simpler than adjusting them.
> > >
> >
> > Agree.
>
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH] RFC: alias rework
2010-01-25 20:40 ` Izik Eidus
2010-01-25 20:49 ` Marcelo Tosatti
@ 2010-01-26 14:14 ` Avi Kivity
2010-01-26 14:29 ` Izik Eidus
1 sibling, 1 reply; 9+ messages in thread
From: Avi Kivity @ 2010-01-26 14:14 UTC (permalink / raw)
To: Izik Eidus; +Cc: Marcelo Tosatti, kvm
On 01/25/2010 10:40 PM, Izik Eidus wrote:
>
>> Or is this a feature you need?
>>
>
> I dont need it (I asked Avi to do something), So he said he want to nuke the aliasing
> from kvm and keep supporting the old userspace`s
>
> Do you have any other way to achive this?
>
> Btw I do realize it might be better not to push this patch and just keep the old
> way of treating aliasing as we have now, I really don`t mind.
>
How about implementing an alias pointing at a deleted slot as an invalid
slot?
If the slot comes back later, we can revalidate it.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH] RFC: alias rework
2010-01-26 14:14 ` Avi Kivity
@ 2010-01-26 14:29 ` Izik Eidus
0 siblings, 0 replies; 9+ messages in thread
From: Izik Eidus @ 2010-01-26 14:29 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm
On Tue, 26 Jan 2010 16:14:47 +0200
Avi Kivity <avi@redhat.com> wrote:
> On 01/25/2010 10:40 PM, Izik Eidus wrote:
> >
> >> Or is this a feature you need?
> >>
> >
> > I dont need it (I asked Avi to do something), So he said he want to nuke the aliasing
> > from kvm and keep supporting the old userspace`s
> >
> > Do you have any other way to achive this?
> >
> > Btw I do realize it might be better not to push this patch and just keep the old
> > way of treating aliasing as we have now, I really don`t mind.
> >
>
> How about implementing an alias pointing at a deleted slot as an invalid
> slot?
>
> If the slot comes back later, we can revalidate it.
>
Ok, didn`t notice this invalid memslot flag,
I will add this, I will still leave the update_aliased_memslot()
in order to update the userspace virtual address...
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2010-01-26 14:29 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-01-25 13:53 [PATCH] RFC: alias rework Izik Eidus
2010-01-25 19:45 ` Marcelo Tosatti
2010-01-25 19:57 ` Izik Eidus
2010-01-25 20:20 ` Marcelo Tosatti
2010-01-25 20:40 ` Izik Eidus
2010-01-25 20:49 ` Marcelo Tosatti
2010-01-25 21:51 ` Izik Eidus
2010-01-26 14:14 ` Avi Kivity
2010-01-26 14:29 ` Izik Eidus
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox