public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0 of 3] create kvm_x86
@ 2007-11-20 16:57 Hollis Blanchard
  2007-11-20 16:57 ` [PATCH 1 of 3] Use kvm_x86 to hold x86 specific kvm fields Hollis Blanchard
                   ` (2 more replies)
  0 siblings, 3 replies; 23+ messages in thread
From: Hollis Blanchard @ 2007-11-20 16:57 UTC (permalink / raw)
  To: Avi Kivity
  Cc: Zhang-Rn83F4s8Lwc+UXBhvPuGgqsjOiXwFzmk,
	kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Xiantao,
	carsteno-tA70FqPdS9bQT0dZR+AlfA

These patches are based on Xiantao's work to create struct kvm_x86. Patch 1 replaces his "KVM Portability split: Splitting kvm structure (V2)", and patches 2 and 3 build on it.

9 files changed, 180 insertions(+), 116 deletions(-)
drivers/kvm/i8259.c    |    1 
drivers/kvm/ioapic.c   |   16 ++++---
drivers/kvm/irq.h      |    3 -
drivers/kvm/kvm.h      |   30 --------------
drivers/kvm/kvm_main.c |   13 ++----
drivers/kvm/mmu.c      |   99 ++++++++++++++++++++++++++++--------------------
drivers/kvm/vmx.c      |   20 ++++++---
drivers/kvm/x86.c      |   67 ++++++++++++++++++++++----------
drivers/kvm/x86.h      |   47 ++++++++++++++++++++++

-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/

^ permalink raw reply	[flat|nested] 23+ messages in thread

* [PATCH 1 of 3] Use kvm_x86 to hold x86 specific kvm fields
  2007-11-20 16:57 [PATCH 0 of 3] create kvm_x86 Hollis Blanchard
@ 2007-11-20 16:57 ` Hollis Blanchard
  2007-11-28 21:20   ` Anthony Liguori
  2007-11-20 16:57 ` [PATCH 2 of 3] Move vm_stat and apic_access_page to kvm_x86 Hollis Blanchard
  2007-11-21  9:09 ` [PATCH 0 of 3] create kvm_x86 Carsten Otte
  2 siblings, 1 reply; 23+ messages in thread
From: Hollis Blanchard @ 2007-11-20 16:57 UTC (permalink / raw)
  To: Avi Kivity
  Cc: Zhang-Rn83F4s8Lwc+UXBhvPuGgqsjOiXwFzmk,
	kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Xiantao,
	carsteno-tA70FqPdS9bQT0dZR+AlfA

Signed-off-by: Zhang xiantao <xiantao.zhang-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
Signed-off-by: Hollis Blanchard <hollisb-r/Jw6+rmf7HQT0dZR+AlfA@public.gmane.org>
---
Changes from Xiantao's patch:
- Fix whitespace
- Reorder variables to avoid stack padding
- Leave descriptor_table relocation for another patch

8 files changed, 132 insertions(+), 85 deletions(-)
drivers/kvm/ioapic.c   |    7 ++-
drivers/kvm/irq.h      |    1 
drivers/kvm/kvm.h      |   26 --------------
drivers/kvm/kvm_main.c |    9 ++---
drivers/kvm/mmu.c      |   85 ++++++++++++++++++++++++++++--------------------
drivers/kvm/vmx.c      |    9 +++--
drivers/kvm/x86.c      |   37 ++++++++++++--------
drivers/kvm/x86.h      |   43 +++++++++++++++++++++++-


diff --git a/drivers/kvm/ioapic.c b/drivers/kvm/ioapic.c
--- a/drivers/kvm/ioapic.c
+++ b/drivers/kvm/ioapic.c
@@ -276,7 +276,9 @@ static int get_eoi_gsi(struct kvm_ioapic
 
 void kvm_ioapic_update_eoi(struct kvm *kvm, int vector)
 {
-	struct kvm_ioapic *ioapic = kvm->vioapic;
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+	struct kvm_ioapic *ioapic = kvm_x86->vioapic;
 	union ioapic_redir_entry *ent;
 	int gsi;
 
@@ -386,11 +388,12 @@ int kvm_ioapic_init(struct kvm *kvm)
 int kvm_ioapic_init(struct kvm *kvm)
 {
 	struct kvm_ioapic *ioapic;
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
 
 	ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
 	if (!ioapic)
 		return -ENOMEM;
-	kvm->vioapic = ioapic;
+	kvm_x86->vioapic = ioapic;
 	kvm_ioapic_reset(ioapic);
 	ioapic->dev.read = ioapic_mmio_read;
 	ioapic->dev.write = ioapic_mmio_write;
diff --git a/drivers/kvm/irq.h b/drivers/kvm/irq.h
--- a/drivers/kvm/irq.h
+++ b/drivers/kvm/irq.h
@@ -23,6 +23,7 @@
 #define __IRQ_H
 
 #include "kvm.h"
+#include "x86.h"
 
 typedef void irq_request_func(void *opaque, int level);
 
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -309,41 +309,15 @@ struct kvm {
 	int nmemslots;
 	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
 					KVM_PRIVATE_MEM_SLOTS];
-	/*
-	 * Hash table of struct kvm_mmu_page.
-	 */
-	struct list_head active_mmu_pages;
-	unsigned int n_free_mmu_pages;
-	unsigned int n_requested_mmu_pages;
-	unsigned int n_alloc_mmu_pages;
-	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
 	struct list_head vm_list;
 	struct file *filp;
 	struct kvm_io_bus mmio_bus;
 	struct kvm_io_bus pio_bus;
-	struct kvm_pic *vpic;
-	struct kvm_ioapic *vioapic;
 	int round_robin_prev_vcpu;
-	unsigned int tss_addr;
 	struct page *apic_access_page;
 	struct kvm_vm_stat stat;
 };
-
-static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
-{
-	return kvm->vpic;
-}
-
-static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
-{
-	return kvm->vioapic;
-}
-
-static inline int irqchip_in_kernel(struct kvm *kvm)
-{
-	return pic_irqchip(kvm) != NULL;
-}
 
 struct descriptor_table {
 	u16 limit;
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -232,6 +232,7 @@ int __kvm_set_memory_region(struct kvm *
 	unsigned long i;
 	struct kvm_memory_slot *memslot;
 	struct kvm_memory_slot old, new;
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
 
 	r = -EINVAL;
 	/* General sanity checks */
@@ -332,18 +333,18 @@ int __kvm_set_memory_region(struct kvm *
 	if (mem->slot >= kvm->nmemslots)
 		kvm->nmemslots = mem->slot + 1;
 
-	if (!kvm->n_requested_mmu_pages) {
+	if (!kvm_x86->n_requested_mmu_pages) {
 		unsigned int n_pages;
 
 		if (npages) {
 			n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000;
-			kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages +
-						 n_pages);
+			kvm_mmu_change_mmu_pages(kvm,
+			                  kvm_x86->n_alloc_mmu_pages + n_pages);
 		} else {
 			unsigned int nr_mmu_pages;
 
 			n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000;
-			nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
+			nr_mmu_pages = kvm_x86->n_alloc_mmu_pages - n_pages;
 			nr_mmu_pages = max(nr_mmu_pages,
 				        (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
 			kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -526,12 +526,14 @@ static void kvm_mmu_free_page(struct kvm
 static void kvm_mmu_free_page(struct kvm *kvm,
 			      struct kvm_mmu_page *page_head)
 {
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
 	ASSERT(is_empty_shadow_page(page_head->spt));
 	list_del(&page_head->link);
 	__free_page(virt_to_page(page_head->spt));
 	__free_page(virt_to_page(page_head->gfns));
 	kfree(page_head);
-	++kvm->n_free_mmu_pages;
+	++kvm_x86->n_free_mmu_pages;
 }
 
 static unsigned kvm_page_table_hashfn(gfn_t gfn)
@@ -543,8 +545,9 @@ static struct kvm_mmu_page *kvm_mmu_allo
 					       u64 *parent_pte)
 {
 	struct kvm_mmu_page *page;
-
-	if (!vcpu->kvm->n_free_mmu_pages)
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
+
+	if (!kvm_x86->n_free_mmu_pages)
 		return NULL;
 
 	page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
@@ -552,12 +555,12 @@ static struct kvm_mmu_page *kvm_mmu_allo
 	page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
 	page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
 	set_page_private(virt_to_page(page->spt), (unsigned long)page);
-	list_add(&page->link, &vcpu->kvm->active_mmu_pages);
+	list_add(&page->link, &kvm_x86->active_mmu_pages);
 	ASSERT(is_empty_shadow_page(page->spt));
 	page->slot_bitmap = 0;
 	page->multimapped = 0;
 	page->parent_pte = parent_pte;
-	--vcpu->kvm->n_free_mmu_pages;
+	--kvm_x86->n_free_mmu_pages;
 	return page;
 }
 
@@ -643,10 +646,11 @@ static struct kvm_mmu_page *kvm_mmu_look
 	struct hlist_head *bucket;
 	struct kvm_mmu_page *page;
 	struct hlist_node *node;
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
 
 	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
 	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-	bucket = &kvm->mmu_page_hash[index];
+	bucket = &kvm_x86->mmu_page_hash[index];
 	hlist_for_each_entry(page, node, bucket, hash_link)
 		if (page->gfn == gfn && !page->role.metaphysical) {
 			pgprintk("%s: found role %x\n",
@@ -670,6 +674,7 @@ static struct kvm_mmu_page *kvm_mmu_get_
 	struct hlist_head *bucket;
 	struct kvm_mmu_page *page;
 	struct hlist_node *node;
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
 
 	role.word = 0;
 	role.glevels = vcpu->mmu.root_level;
@@ -684,7 +689,7 @@ static struct kvm_mmu_page *kvm_mmu_get_
 	pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
 		 gfn, role.word);
 	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-	bucket = &vcpu->kvm->mmu_page_hash[index];
+	bucket = &kvm_x86->mmu_page_hash[index];
 	hlist_for_each_entry(page, node, bucket, hash_link)
 		if (page->gfn == gfn && page->role.word == role.word) {
 			mmu_page_add_parent_pte(vcpu, page, parent_pte);
@@ -754,6 +759,7 @@ static void kvm_mmu_zap_page(struct kvm 
 			     struct kvm_mmu_page *page)
 {
 	u64 *parent_pte;
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
 
 	++kvm->stat.mmu_shadow_zapped;
 	while (page->multimapped || page->parent_pte) {
@@ -775,7 +781,7 @@ static void kvm_mmu_zap_page(struct kvm 
 		hlist_del(&page->hash_link);
 		kvm_mmu_free_page(kvm, page);
 	} else
-		list_move(&page->link, &kvm->active_mmu_pages);
+		list_move(&page->link, &kvm_x86->active_mmu_pages);
 	kvm_mmu_reset_last_pte_updated(kvm);
 }
 
@@ -785,36 +791,39 @@ static void kvm_mmu_zap_page(struct kvm 
  */
 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
 {
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
 	/*
 	 * If we set the number of mmu pages to be smaller be than the
 	 * number of actived pages , we must to free some mmu pages before we
 	 * change the value
 	 */
 
-	if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
+	if ((kvm_x86->n_alloc_mmu_pages - kvm_x86->n_free_mmu_pages) >
 	    kvm_nr_mmu_pages) {
-		int n_used_mmu_pages = kvm->n_alloc_mmu_pages
-				       - kvm->n_free_mmu_pages;
+		int n_used_mmu_pages = kvm_x86->n_alloc_mmu_pages
+				       - kvm_x86->n_free_mmu_pages;
 
 		while (n_used_mmu_pages > kvm_nr_mmu_pages) {
 			struct kvm_mmu_page *page;
 
-			page = container_of(kvm->active_mmu_pages.prev,
+			page = container_of(kvm_x86->active_mmu_pages.prev,
 					    struct kvm_mmu_page, link);
 			kvm_mmu_zap_page(kvm, page);
 			n_used_mmu_pages--;
 		}
-		kvm->n_free_mmu_pages = 0;
+		kvm_x86->n_free_mmu_pages = 0;
 	}
 	else
-		kvm->n_free_mmu_pages += kvm_nr_mmu_pages
-					 - kvm->n_alloc_mmu_pages;
-
-	kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
+		kvm_x86->n_free_mmu_pages += kvm_nr_mmu_pages
+					 - kvm_x86->n_alloc_mmu_pages;
+
+	kvm_x86->n_alloc_mmu_pages = kvm_nr_mmu_pages;
 }
 
 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 {
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
 	unsigned index;
 	struct hlist_head *bucket;
 	struct kvm_mmu_page *page;
@@ -824,7 +833,7 @@ static int kvm_mmu_unprotect_page(struct
 	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
 	r = 0;
 	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-	bucket = &kvm->mmu_page_hash[index];
+	bucket = &kvm_x86->mmu_page_hash[index];
 	hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
 		if (page->gfn == gfn && !page->role.metaphysical) {
 			pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
@@ -1251,6 +1260,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 		       const u8 *new, int bytes)
 {
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
 	gfn_t gfn = gpa >> PAGE_SHIFT;
 	struct kvm_mmu_page *page;
 	struct hlist_node *node, *n;
@@ -1280,7 +1290,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
 		vcpu->last_pte_updated = NULL;
 	}
 	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-	bucket = &vcpu->kvm->mmu_page_hash[index];
+	bucket = &kvm_x86->mmu_page_hash[index];
 	hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
 		if (page->gfn != gfn || page->role.metaphysical)
 			continue;
@@ -1344,10 +1354,12 @@ int kvm_mmu_unprotect_page_virt(struct k
 
 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 {
-	while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
+
+	while (kvm_x86->n_free_mmu_pages < KVM_REFILL_PAGES) {
 		struct kvm_mmu_page *page;
 
-		page = container_of(vcpu->kvm->active_mmu_pages.prev,
+		page = container_of(kvm_x86->active_mmu_pages.prev,
 				    struct kvm_mmu_page, link);
 		kvm_mmu_zap_page(vcpu->kvm, page);
 		++vcpu->kvm->stat.mmu_recycled;
@@ -1397,9 +1409,10 @@ static void free_mmu_pages(struct kvm_vc
 static void free_mmu_pages(struct kvm_vcpu *vcpu)
 {
 	struct kvm_mmu_page *page;
-
-	while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
-		page = container_of(vcpu->kvm->active_mmu_pages.next,
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
+
+	while (!list_empty(&kvm_x86->active_mmu_pages)) {
+		page = container_of(kvm_x86->active_mmu_pages.next,
 				    struct kvm_mmu_page, link);
 		kvm_mmu_zap_page(vcpu->kvm, page);
 	}
@@ -1408,15 +1421,16 @@ static void free_mmu_pages(struct kvm_vc
 
 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
 {
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
 	struct page *page;
 	int i;
 
 	ASSERT(vcpu);
 
-	if (vcpu->kvm->n_requested_mmu_pages)
-		vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
+	if (kvm_x86->n_requested_mmu_pages)
+		kvm_x86->n_free_mmu_pages = kvm_x86->n_requested_mmu_pages;
 	else
-		vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
+		kvm_x86->n_free_mmu_pages = kvm_x86->n_alloc_mmu_pages;
 	/*
 	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
 	 * Therefore we need to allocate shadow page tables in the first
@@ -1464,8 +1478,9 @@ void kvm_mmu_slot_remove_write_access(st
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
 {
 	struct kvm_mmu_page *page;
-
-	list_for_each_entry(page, &kvm->active_mmu_pages, link) {
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+	list_for_each_entry(page, &kvm_x86->active_mmu_pages, link) {
 		int i;
 		u64 *pt;
 
@@ -1483,8 +1498,9 @@ void kvm_mmu_zap_all(struct kvm *kvm)
 void kvm_mmu_zap_all(struct kvm *kvm)
 {
 	struct kvm_mmu_page *page, *node;
-
-	list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+	list_for_each_entry_safe(page, node, &kvm_x86->active_mmu_pages, link)
 		kvm_mmu_zap_page(kvm, page);
 
 	kvm_flush_remote_tlbs(kvm);
@@ -1637,7 +1653,7 @@ static int count_writable_mappings(struc
 	struct kvm_mmu_page *page;
 	int i;
 
-	list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
+	list_for_each_entry(page, &kvm_x86->active_mmu_pages, link) {
 		u64 *pt = page->spt;
 
 		if (page->role.level != PT_PAGE_TABLE_LEVEL)
@@ -1672,8 +1688,9 @@ static void audit_write_protection(struc
 	struct kvm_memory_slot *slot;
 	unsigned long *rmapp;
 	gfn_t gfn;
-
-	list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+	list_for_each_entry(page, &kvm_x86->active_mmu_pages, link) {
 		if (page->role.metaphysical)
 			continue;
 
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -1141,12 +1141,14 @@ static void enter_pmode(struct kvm_vcpu 
 
 static gva_t rmode_tss_base(struct kvm *kvm)
 {
-	if (!kvm->tss_addr) {
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+	if (!kvm_x86->tss_addr) {
 		gfn_t base_gfn = kvm->memslots[0].base_gfn +
 				 kvm->memslots[0].npages - 3;
 		return base_gfn << PAGE_SHIFT;
 	}
-	return kvm->tss_addr;
+	return kvm_x86->tss_addr;
 }
 
 static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
@@ -1768,6 +1770,7 @@ static void do_interrupt_requests(struct
 
 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
 {
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
 	int ret;
 	struct kvm_userspace_memory_region tss_mem = {
 		.slot = 8,
@@ -1779,7 +1782,7 @@ static int vmx_set_tss_addr(struct kvm *
 	ret = kvm_set_memory_region(kvm, &tss_mem, 0);
 	if (ret)
 		return ret;
-	kvm->tss_addr = addr;
+	kvm_x86->tss_addr = addr;
 	return 0;
 }
 
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -815,13 +815,15 @@ static int kvm_vm_ioctl_set_nr_mmu_pages
 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
 					  u32 kvm_nr_mmu_pages)
 {
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
 	if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
 		return -EINVAL;
 
 	mutex_lock(&kvm->lock);
 
 	kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
-	kvm->n_requested_mmu_pages = kvm_nr_mmu_pages;
+	kvm_x86->n_requested_mmu_pages = kvm_nr_mmu_pages;
 
 	mutex_unlock(&kvm->lock);
 	return 0;
@@ -829,7 +831,9 @@ static int kvm_vm_ioctl_set_nr_mmu_pages
 
 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
 {
-	return kvm->n_alloc_mmu_pages;
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+	return kvm_x86->n_alloc_mmu_pages;
 }
 
 /*
@@ -972,6 +976,7 @@ long kvm_arch_vm_ioctl(struct file *filp
 		       unsigned int ioctl, unsigned long arg)
 {
 	struct kvm *kvm = filp->private_data;
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
 	void __user *argp = (void __user *)arg;
 	int r = -EINVAL;
 
@@ -1018,12 +1023,12 @@ long kvm_arch_vm_ioctl(struct file *filp
 	}
 	case KVM_CREATE_IRQCHIP:
 		r = -ENOMEM;
-		kvm->vpic = kvm_create_pic(kvm);
-		if (kvm->vpic) {
+		kvm_x86->vpic = kvm_create_pic(kvm);
+		if (kvm_x86->vpic) {
 			r = kvm_ioapic_init(kvm);
 			if (r) {
-				kfree(kvm->vpic);
-				kvm->vpic = NULL;
+				kfree(kvm_x86->vpic);
+				kvm_x86->vpic = NULL;
 				goto out;
 			}
 		} else
@@ -1041,7 +1046,7 @@ long kvm_arch_vm_ioctl(struct file *filp
 				kvm_pic_set_irq(pic_irqchip(kvm),
 					irq_event.irq,
 					irq_event.level);
-			kvm_ioapic_set_irq(kvm->vioapic,
+			kvm_ioapic_set_irq(kvm_x86->vioapic,
 					irq_event.irq,
 					irq_event.level);
 			mutex_unlock(&kvm->lock);
@@ -2603,14 +2608,14 @@ void kvm_arch_vcpu_uninit(struct kvm_vcp
 
 struct  kvm *kvm_arch_create_vm(void)
 {
-	struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
-
-	if (!kvm)
+	struct kvm_x86 *kvm_x86 = kzalloc(sizeof(struct kvm_x86), GFP_KERNEL);
+
+	if (!kvm_x86)
 		return ERR_PTR(-ENOMEM);
 
-	INIT_LIST_HEAD(&kvm->active_mmu_pages);
-
-	return kvm;
+	INIT_LIST_HEAD(&kvm_x86->active_mmu_pages);
+
+	return &kvm_x86->kvm;
 }
 
 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
@@ -2641,8 +2646,10 @@ static void kvm_free_vcpus(struct kvm *k
 
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
-	kfree(kvm->vpic);
-	kfree(kvm->vioapic);
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+	kfree(kvm_x86->vpic);
+	kfree(kvm_x86->vioapic);
 	kvm_free_vcpus(kvm);
 	kvm_free_physmem(kvm);
 	kfree(kvm);
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -155,6 +155,45 @@ struct kvm_vcpu {
 
 	struct x86_emulate_ctxt emulate_ctxt;
 };
+
+struct kvm_x86 {
+	struct kvm kvm;
+	/*
+	 * Hash table of struct kvm_mmu_page.
+	 */
+	struct list_head active_mmu_pages;
+	unsigned int n_free_mmu_pages;
+	unsigned int n_requested_mmu_pages;
+	unsigned int n_alloc_mmu_pages;
+	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
+	struct kvm_pic *vpic;
+	struct kvm_ioapic *vioapic;
+	unsigned int tss_addr;
+};
+
+static struct kvm_x86 *to_kvm_x86(struct kvm *kvm)
+{
+	return container_of(kvm, struct kvm_x86, kvm);
+}
+
+static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
+{
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+	return kvm_x86->vpic;
+}
+
+static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
+{
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+	return kvm_x86->vioapic;
+}
+
+static inline int irqchip_in_kernel(struct kvm *kvm)
+{
+	return pic_irqchip(kvm) != NULL;
+}
 
 struct kvm_x86_ops {
 	int (*cpu_has_kvm_support)(void);          /* __init */
@@ -313,7 +352,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *
 
 static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 {
-	if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
+
+	if (unlikely(kvm_x86->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
 		__kvm_mmu_free_some_pages(vcpu);
 }
 

-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/

^ permalink raw reply	[flat|nested] 23+ messages in thread

* [PATCH 2 of 3] Move vm_stat and apic_access_page to kvm_x86
  2007-11-20 16:57 [PATCH 0 of 3] create kvm_x86 Hollis Blanchard
  2007-11-20 16:57 ` [PATCH 1 of 3] Use kvm_x86 to hold x86 specific kvm fields Hollis Blanchard
@ 2007-11-20 16:57 ` Hollis Blanchard
  2007-11-21  9:09 ` [PATCH 0 of 3] create kvm_x86 Carsten Otte
  2 siblings, 0 replies; 23+ messages in thread
From: Hollis Blanchard @ 2007-11-20 16:57 UTC (permalink / raw)
  To: Avi Kivity
  Cc: Zhang-Rn83F4s8Lwc+UXBhvPuGgqsjOiXwFzmk,
	kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Xiantao,
	carsteno-tA70FqPdS9bQT0dZR+AlfA

Signed-off-by: Hollis Blanchard <hollisb-r/Jw6+rmf7HQT0dZR+AlfA@public.gmane.org>

---
5 files changed, 18 insertions(+), 13 deletions(-)
drivers/kvm/kvm.h |    2 --
drivers/kvm/mmu.c |   14 ++++++++------
drivers/kvm/vmx.c |   11 +++++++----
drivers/kvm/x86.c |    2 +-
drivers/kvm/x86.h |    2 ++


diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -315,8 +315,6 @@ struct kvm {
 	struct kvm_io_bus mmio_bus;
 	struct kvm_io_bus pio_bus;
 	int round_robin_prev_vcpu;
-	struct page *apic_access_page;
-	struct kvm_vm_stat stat;
 };
 
 struct descriptor_table {
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -761,7 +761,7 @@ static void kvm_mmu_zap_page(struct kvm 
 	u64 *parent_pte;
 	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
 
-	++kvm->stat.mmu_shadow_zapped;
+	++kvm_x86->stat.mmu_shadow_zapped;
 	while (page->multimapped || page->parent_pte) {
 		if (!page->multimapped)
 			parent_pte = page->parent_pte;
@@ -1236,12 +1236,14 @@ static void mmu_pte_write_new_pte(struct
 				  const void *new, int bytes,
 				  int offset_in_pte)
 {
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
+
 	if (page->role.level != PT_PAGE_TABLE_LEVEL) {
-		++vcpu->kvm->stat.mmu_pde_zapped;
+		++kvm_x86->stat.mmu_pde_zapped;
 		return;
 	}
 
-	++vcpu->kvm->stat.mmu_pte_updated;
+	++kvm_x86->stat.mmu_pte_updated;
 	if (page->role.glevels == PT32_ROOT_LEVEL)
 		paging32_update_pte(vcpu, page, spte, new, bytes,
 				    offset_in_pte);
@@ -1277,7 +1279,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
 	int npte;
 
 	pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
-	++vcpu->kvm->stat.mmu_pte_write;
+	++kvm_x86->stat.mmu_pte_write;
 	kvm_mmu_audit(vcpu, "pre pte write");
 	if (gfn == vcpu->last_pt_write_gfn
 	    && !last_updated_pte_accessed(vcpu)) {
@@ -1311,7 +1313,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
 			pgprintk("misaligned: gpa %llx bytes %d role %x\n",
 				 gpa, bytes, page->role.word);
 			kvm_mmu_zap_page(vcpu->kvm, page);
-			++vcpu->kvm->stat.mmu_flooded;
+			++kvm_x86->stat.mmu_flooded;
 			continue;
 		}
 		page_offset = offset;
@@ -1362,7 +1364,7 @@ void __kvm_mmu_free_some_pages(struct kv
 		page = container_of(kvm_x86->active_mmu_pages.prev,
 				    struct kvm_mmu_page, link);
 		kvm_mmu_zap_page(vcpu->kvm, page);
-		++vcpu->kvm->stat.mmu_recycled;
+		++kvm_x86->stat.mmu_recycled;
 	}
 }
 
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -1468,11 +1468,12 @@ static void seg_setup(int seg)
 
 static int alloc_apic_access_page(struct kvm *kvm)
 {
+	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
 	struct kvm_userspace_memory_region kvm_userspace_mem;
 	int r = 0;
 
 	mutex_lock(&kvm->lock);
-	if (kvm->apic_access_page)
+	if (kvm_x86->apic_access_page)
 		goto out;
 	kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
 	kvm_userspace_mem.flags = 0;
@@ -1481,7 +1482,7 @@ static int alloc_apic_access_page(struct
 	r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
 	if (r)
 		goto out;
-	kvm->apic_access_page = gfn_to_page(kvm, 0xfee00);
+	kvm_x86->apic_access_page = gfn_to_page(kvm, 0xfee00);
 out:
 	mutex_unlock(&kvm->lock);
 	return r;
@@ -1694,9 +1695,11 @@ static int vmx_vcpu_reset(struct kvm_vcp
 		vmcs_write32(TPR_THRESHOLD, 0);
 	}
 
-	if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
+	if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) {
+		struct kvm_x86 *kvm_x86 = to_kvm_x86(vmx->vcpu.kvm);
 		vmcs_write64(APIC_ACCESS_ADDR,
-			     page_to_phys(vmx->vcpu.kvm->apic_access_page));
+			     page_to_phys(kvm_x86->apic_access_page));
+	}
 
 	vmx->vcpu.cr0 = 0x60000010;
 	vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); /* enter rmode */
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -42,7 +42,7 @@
 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
 #define EFER_RESERVED_BITS 0xfffffffffffff2fe
 
-#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
+#define VM_STAT(x) offsetof(struct kvm_x86, stat.x), KVM_STAT_VM
 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
 
 struct kvm_x86_ops *kvm_x86_ops;
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -169,6 +169,8 @@ struct kvm_x86 {
 	struct kvm_pic *vpic;
 	struct kvm_ioapic *vioapic;
 	unsigned int tss_addr;
+	struct page *apic_access_page;
+	struct kvm_vm_stat stat;
 };
 
 static struct kvm_x86 *to_kvm_x86(struct kvm *kvm)

-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
  2007-11-20 16:57 [PATCH 0 of 3] create kvm_x86 Hollis Blanchard
  2007-11-20 16:57 ` [PATCH 1 of 3] Use kvm_x86 to hold x86 specific kvm fields Hollis Blanchard
  2007-11-20 16:57 ` [PATCH 2 of 3] Move vm_stat and apic_access_page to kvm_x86 Hollis Blanchard
@ 2007-11-21  9:09 ` Carsten Otte
       [not found]   ` <4743F5AE.8090707-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
  2 siblings, 1 reply; 23+ messages in thread
From: Carsten Otte @ 2007-11-21  9:09 UTC (permalink / raw)
  To: Hollis Blanchard
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	carsteno-tA70FqPdS9bQT0dZR+AlfA, Xiantao, Avi Kivity

Hollis Blanchard wrote:
> These patches are based on Xiantao's work to create struct kvm_x86. Patch 1 replaces his "KVM Portability split: Splitting kvm structure (V2)", and patches 2 and 3 build on it.
Looks like a clean approach with to to_kvm_x86 macro. Whole series:
Acked-by: Carsten Otte <cotte-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>

-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
       [not found]   ` <4743F5AE.8090707-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
@ 2007-11-21  9:18     ` Avi Kivity
       [not found]       ` <4743F7DF.4000107-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Avi Kivity @ 2007-11-21  9:18 UTC (permalink / raw)
  To: carsteno-tA70FqPdS9bQT0dZR+AlfA
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Xiantao,
	Hollis Blanchard

Carsten Otte wrote:
> Hollis Blanchard wrote:
>   
>> These patches are based on Xiantao's work to create struct kvm_x86. Patch 1 replaces his "KVM Portability split: Splitting kvm structure (V2)", and patches 2 and 3 build on it.
>>     
> Looks like a clean approach with to to_kvm_x86 macro. Whole series:
> Acked-by: Carsten Otte <cotte-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
>
>   

Well, I hate to say it, but the resulting code doesn't look too well
(all the kvm_x86 variables), and it's entirely my fault as I recommended
this approach.  Not like it was difficult to predict.

I'm thinking again of

    struct kvm {
        struct kvm_arch a;
        ...
    }

Where each arch defines its own kvm_arch.  Now the changes look like a
bunch of "kvm->blah" to "kvm->a.blah" conversions.

IIRC a downside was mentioned that it is easier to cause a build failure
for another arch now.

Opinions?  In theory correctness should win over style every time, no?

-- 
Do not meddle in the internals of kernels, for they are subtle and quick to panic.


-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
       [not found]       ` <4743F7DF.4000107-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2007-11-21  9:32         ` Amit Shah
  2007-11-21  9:39         ` Carsten Otte
                           ` (2 subsequent siblings)
  3 siblings, 0 replies; 23+ messages in thread
From: Amit Shah @ 2007-11-21  9:32 UTC (permalink / raw)
  To: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f
  Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Xiantao,
	Avi Kivity, Hollis Blanchard

* Avi Kivity wrote:
> Carsten Otte wrote:
> > Hollis Blanchard wrote:
> >> These patches are based on Xiantao's work to create struct kvm_x86.
> >> Patch 1 replaces his "KVM Portability split: Splitting kvm structure
> >> (V2)", and patches 2 and 3 build on it.
> >
> > Looks like a clean approach with to to_kvm_x86 macro. Whole series:
> > Acked-by: Carsten Otte <cotte-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
>
> Well, I hate to say it, but the resulting code doesn't look too well
> (all the kvm_x86 variables), and it's entirely my fault as I recommended
> this approach.  Not like it was difficult to predict.
>
> I'm thinking again of
>
>     struct kvm {
>         struct kvm_arch a;
>         ...
>     }
>
> Where each arch defines its own kvm_arch.  Now the changes look like a
> bunch of "kvm->blah" to "kvm->a.blah" conversions.

I was thinking the exact same thing. Having an arch-specific kvm as the 
super-structure decouples 'struct kvm' from everything and it's extremely 
clumsy to look at and program with. container_of() for getting struct kvm 
might be better than getting the arch-specific thing.

>
> IIRC a downside was mentioned that it is easier to cause a build failure
> for another arch now.

How? I think it would work the same way in either case. Any arch-specific 
stuff happen in kvm_arch and generic stuff in kvm.

>
> Opinions?  In theory correctness should win over style every time, no?

But are those two conflicting here? And if you don't like what you're looking 
at, it's going to be tougher to make it correct without making it look better

-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
       [not found]       ` <4743F7DF.4000107-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
  2007-11-21  9:32         ` Amit Shah
@ 2007-11-21  9:39         ` Carsten Otte
  2007-11-21  9:42         ` Zhang, Xiantao
  2007-11-28 21:15         ` Hollis Blanchard
  3 siblings, 0 replies; 23+ messages in thread
From: Carsten Otte @ 2007-11-21  9:39 UTC (permalink / raw)
  To: Avi Kivity
  Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
	kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Xiantao,
	Hollis Blanchard

Avi Kivity wrote:
> Well, I hate to say it, but the resulting code doesn't look too well
> (all the kvm_x86 variables), and it's entirely my fault as I recommended
> this approach.  Not like it was difficult to predict.
> 
> I'm thinking again of
> 
>     struct kvm {
>         struct kvm_arch a;
>         ...
>     }
> 
> Where each arch defines its own kvm_arch.  Now the changes look like a
> bunch of "kvm->blah" to "kvm->a.blah" conversions.
> 
> IIRC a downside was mentioned that it is easier to cause a build failure
> for another arch now.
> 
> Opinions?  In theory correctness should win over style every time, no?
It's a matter of taste. I favor embedding an kvm_arch too, but I 
recommend to name member "a" different. "arch" maybe?

An advantage of this solution is, that some (not all) architectures 
can share common code. If kvm_arch contains member foo for x86 and 
ia64, a common function could do kvm->a.foo. With the posted approach, 
we'd have
#ifdef CONFIG_ARCH_X86
	val = to_kvm_x86(kvm).foo;
#else
	val = to_kvm_ia74(kvm).foo;
#endif


-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
       [not found]       ` <4743F7DF.4000107-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
  2007-11-21  9:32         ` Amit Shah
  2007-11-21  9:39         ` Carsten Otte
@ 2007-11-21  9:42         ` Zhang, Xiantao
       [not found]           ` <42DFA526FC41B1429CE7279EF83C6BDC9E7193-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
  2007-11-28 21:15         ` Hollis Blanchard
  3 siblings, 1 reply; 23+ messages in thread
From: Zhang, Xiantao @ 2007-11-21  9:42 UTC (permalink / raw)
  To: Avi Kivity, carsteno-tA70FqPdS9bQT0dZR+AlfA
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Hollis Blanchard

Avi Kivity wrote:
> Carsten Otte wrote:
>> Hollis Blanchard wrote:
>> 
>>> These patches are based on Xiantao's work to create struct kvm_x86.
>>> Patch 1 replaces his "KVM Portability split: Splitting kvm
>>> structure (V2)", and patches 2 and 3 build on it.  
>>> 
>> Looks like a clean approach with to to_kvm_x86 macro. Whole series:
>> Acked-by: Carsten Otte <cotte-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
>> 
>> 
> 
> Well, I hate to say it, but the resulting code doesn't look too well
> (all the kvm_x86 variables), and it's entirely my fault as I
> recommended this approach.  Not like it was difficult to predict.
> 
> I'm thinking again of
> 
>     struct kvm {
>         struct kvm_arch a;
>         ...
>     }

Agree. The kvm_arch approach is natural, and easy to understand. I
prefer it here.

> Where each arch defines its own kvm_arch.  Now the changes look like a
> bunch of "kvm->blah" to "kvm->a.blah" conversions.

container_of approach has similar trobles, and has to use to_kvm_x86 to
translate "kvm" to "kvm_x86" for every arch-specific field access.

> IIRC a downside was mentioned that it is easier to cause a build
> failure for another arch now.

I can't figure out why it can cause more build failure.

-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
       [not found]           ` <42DFA526FC41B1429CE7279EF83C6BDC9E7193-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2007-11-21  9:55             ` Avi Kivity
  0 siblings, 0 replies; 23+ messages in thread
From: Avi Kivity @ 2007-11-21  9:55 UTC (permalink / raw)
  To: Zhang, Xiantao
  Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
	kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Hollis Blanchard

Zhang, Xiantao wrote:
>> IIRC a downside was mentioned that it is easier to cause a build
>> failure for another arch now.
>>     
>
> I can't figure out why it can cause more build failure.
>   

Nothing stops you from doing kvm->arch.blah in kvm_main.c, but blah will 
not be present for all architectures.  It would compile for you but not 
others.

With to_kvm_x86(), the function and arch specific structure aren't even 
visible on kvm_main.c, so you can't make that mistake.

IMO this is a serious drawback to kvm->arch.blah.  But kvm_x86->blah 
doesn't look good.

Maybe if the variable was named 'kvm' it would look better.

-- 
error compiling committee.c: too many arguments to function


-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
       [not found]       ` <4743F7DF.4000107-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
                           ` (2 preceding siblings ...)
  2007-11-21  9:42         ` Zhang, Xiantao
@ 2007-11-28 21:15         ` Hollis Blanchard
  2007-11-30  7:26           ` Avi Kivity
  3 siblings, 1 reply; 23+ messages in thread
From: Hollis Blanchard @ 2007-11-28 21:15 UTC (permalink / raw)
  To: Avi Kivity
  Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
	kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Xiantao

On Wed, 2007-11-21 at 11:18 +0200, Avi Kivity wrote:
> Carsten Otte wrote:
> > Hollis Blanchard wrote:
> >   
> >> These patches are based on Xiantao's work to create struct kvm_x86. Patch 1 replaces his "KVM Portability split: Splitting kvm structure (V2)", and patches 2 and 3 build on it.
> >>     
> > Looks like a clean approach with to to_kvm_x86 macro. Whole series:
> > Acked-by: Carsten Otte <cotte-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
> >
> >   
> 
> Well, I hate to say it, but the resulting code doesn't look too well
> (all the kvm_x86 variables), and it's entirely my fault as I recommended
> this approach.  Not like it was difficult to predict.

I guess we still have reached no conclusion on this question?

> I'm thinking again of
> 
>     struct kvm {
>         struct kvm_arch a;
>         ...
>     }
> 
> Where each arch defines its own kvm_arch.  Now the changes look like a
> bunch of "kvm->blah" to "kvm->a.blah" conversions.

The simplest "container" changes would be a bunch of "kvm->blah" to
"to_x86(kvm)->blah" conversions. How is that worse? If it's the
"kvm_x86" variables you're objecting to, it would be easy enough to
remove them in favor of this approach.

> IIRC a downside was mentioned that it is easier to cause a build failure
> for another arch now.
> 
> Opinions?  In theory correctness should win over style every time, no?

Which approach is not correct?

-- 
Hollis Blanchard
IBM Linux Technology Center


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 1 of 3] Use kvm_x86 to hold x86 specific kvm fields
  2007-11-20 16:57 ` [PATCH 1 of 3] Use kvm_x86 to hold x86 specific kvm fields Hollis Blanchard
@ 2007-11-28 21:20   ` Anthony Liguori
       [not found]     ` <474DDB97.6090400-rdkfGonbjUSkNkDKm+mE6A@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Anthony Liguori @ 2007-11-28 21:20 UTC (permalink / raw)
  To: Hollis Blanchard
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	carsteno-tA70FqPdS9bQT0dZR+AlfA, Xiantao, Avi Kivity

Hollis Blanchard wrote:
> Signed-off-by: Zhang xiantao <xiantao.zhang-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> Signed-off-by: Hollis Blanchard <hollisb-r/Jw6+rmf7HQT0dZR+AlfA@public.gmane.org>
> ---
> Changes from Xiantao's patch:
> - Fix whitespace
> - Reorder variables to avoid stack padding
> - Leave descriptor_table relocation for another patch
>
> 8 files changed, 132 insertions(+), 85 deletions(-)
> drivers/kvm/ioapic.c   |    7 ++-
> drivers/kvm/irq.h      |    1 
> drivers/kvm/kvm.h      |   26 --------------
> drivers/kvm/kvm_main.c |    9 ++---
> drivers/kvm/mmu.c      |   85 ++++++++++++++++++++++++++++--------------------
> drivers/kvm/vmx.c      |    9 +++--
> drivers/kvm/x86.c      |   37 ++++++++++++--------
> drivers/kvm/x86.h      |   43 +++++++++++++++++++++++-
>
>
> diff --git a/drivers/kvm/ioapic.c b/drivers/kvm/ioapic.c
> --- a/drivers/kvm/ioapic.c
> +++ b/drivers/kvm/ioapic.c
> @@ -276,7 +276,9 @@ static int get_eoi_gsi(struct kvm_ioapic
>  
>  void kvm_ioapic_update_eoi(struct kvm *kvm, int vector)
>  {
> -	struct kvm_ioapic *ioapic = kvm->vioapic;
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
> +
> +	struct kvm_ioapic *ioapic = kvm_x86->vioapic;
>  	union ioapic_redir_entry *ent;
>  	int gsi;
>  
> @@ -386,11 +388,12 @@ int kvm_ioapic_init(struct kvm *kvm)
>  int kvm_ioapic_init(struct kvm *kvm)
>  {
>  	struct kvm_ioapic *ioapic;
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>  
>  	ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
>  	if (!ioapic)
>  		return -ENOMEM;
> -	kvm->vioapic = ioapic;
> +	kvm_x86->vioapic = ioapic;
>  	kvm_ioapic_reset(ioapic);
>  	ioapic->dev.read = ioapic_mmio_read;
>  	ioapic->dev.write = ioapic_mmio_write;
>   

If the ioapic depends on x86_specific information, why is it taking a 
generic kvm structure instead of the x86 one?

> diff --git a/drivers/kvm/irq.h b/drivers/kvm/irq.h
> --- a/drivers/kvm/irq.h
> +++ b/drivers/kvm/irq.h
> @@ -23,6 +23,7 @@
>  #define __IRQ_H
>  
>  #include "kvm.h"
> +#include "x86.h"
>  
>  typedef void irq_request_func(void *opaque, int level);
>  
> diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
> --- a/drivers/kvm/kvm.h
> +++ b/drivers/kvm/kvm.h
> @@ -309,41 +309,15 @@ struct kvm {
>  	int nmemslots;
>  	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
>  					KVM_PRIVATE_MEM_SLOTS];
> -	/*
> -	 * Hash table of struct kvm_mmu_page.
> -	 */
> -	struct list_head active_mmu_pages;
> -	unsigned int n_free_mmu_pages;
> -	unsigned int n_requested_mmu_pages;
> -	unsigned int n_alloc_mmu_pages;
> -	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
>  	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
>  	struct list_head vm_list;
>  	struct file *filp;
>  	struct kvm_io_bus mmio_bus;
>  	struct kvm_io_bus pio_bus;
> -	struct kvm_pic *vpic;
> -	struct kvm_ioapic *vioapic;
>  	int round_robin_prev_vcpu;
> -	unsigned int tss_addr;
>  	struct page *apic_access_page;
>  	struct kvm_vm_stat stat;
>  };
> -
> -static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
> -{
> -	return kvm->vpic;
> -}
> -
> -static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
> -{
> -	return kvm->vioapic;
> -}
> -
> -static inline int irqchip_in_kernel(struct kvm *kvm)
> -{
> -	return pic_irqchip(kvm) != NULL;
> -}
>  
>  struct descriptor_table {
>  	u16 limit;
> diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
> --- a/drivers/kvm/kvm_main.c
> +++ b/drivers/kvm/kvm_main.c
> @@ -232,6 +232,7 @@ int __kvm_set_memory_region(struct kvm *
>  	unsigned long i;
>  	struct kvm_memory_slot *memslot;
>  	struct kvm_memory_slot old, new;
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>  
>  	r = -EINVAL;
>  	/* General sanity checks */
> @@ -332,18 +333,18 @@ int __kvm_set_memory_region(struct kvm *
>  	if (mem->slot >= kvm->nmemslots)
>  		kvm->nmemslots = mem->slot + 1;
>  
> -	if (!kvm->n_requested_mmu_pages) {
> +	if (!kvm_x86->n_requested_mmu_pages) {
>   

Why is anything in kvm_main.c accessing a memory of kvm_x86?  Something 
is broken in the abstraction if that's the case.

>  		unsigned int n_pages;
>  
>  		if (npages) {
>  			n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000;
> -			kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages +
> -						 n_pages);
> +			kvm_mmu_change_mmu_pages(kvm,
> +			                  kvm_x86->n_alloc_mmu_pages + n_pages);
>  		} else {
>  			unsigned int nr_mmu_pages;
>  
>  			n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000;
> -			nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
> +			nr_mmu_pages = kvm_x86->n_alloc_mmu_pages - n_pages;
>  			nr_mmu_pages = max(nr_mmu_pages,
>  				        (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
>  			kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
> diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
> --- a/drivers/kvm/mmu.c
> +++ b/drivers/kvm/mmu.c
> @@ -526,12 +526,14 @@ static void kvm_mmu_free_page(struct kvm
>  static void kvm_mmu_free_page(struct kvm *kvm,
>  			      struct kvm_mmu_page *page_head)
>  {
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
> +
>  	ASSERT(is_empty_shadow_page(page_head->spt));
>  	list_del(&page_head->link);
>  	__free_page(virt_to_page(page_head->spt));
>  	__free_page(virt_to_page(page_head->gfns));
>  	kfree(page_head);
> -	++kvm->n_free_mmu_pages;
> +	++kvm_x86->n_free_mmu_pages;
>  }

The mmu functions should probably take kvm_x86 since this won't be 
shared with any other architecture.

Regards,

Anthony Liguori

>  
>  static unsigned kvm_page_table_hashfn(gfn_t gfn)
> @@ -543,8 +545,9 @@ static struct kvm_mmu_page *kvm_mmu_allo
>  					       u64 *parent_pte)
>  {
>  	struct kvm_mmu_page *page;
> -
> -	if (!vcpu->kvm->n_free_mmu_pages)
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
> +
> +	if (!kvm_x86->n_free_mmu_pages)
>  		return NULL;
>  
>  	page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
> @@ -552,12 +555,12 @@ static struct kvm_mmu_page *kvm_mmu_allo
>  	page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
>  	page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
>  	set_page_private(virt_to_page(page->spt), (unsigned long)page);
> -	list_add(&page->link, &vcpu->kvm->active_mmu_pages);
> +	list_add(&page->link, &kvm_x86->active_mmu_pages);
>  	ASSERT(is_empty_shadow_page(page->spt));
>  	page->slot_bitmap = 0;
>  	page->multimapped = 0;
>  	page->parent_pte = parent_pte;
> -	--vcpu->kvm->n_free_mmu_pages;
> +	--kvm_x86->n_free_mmu_pages;
>  	return page;
>  }
>  
> @@ -643,10 +646,11 @@ static struct kvm_mmu_page *kvm_mmu_look
>  	struct hlist_head *bucket;
>  	struct kvm_mmu_page *page;
>  	struct hlist_node *node;
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>  
>  	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
>  	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
> -	bucket = &kvm->mmu_page_hash[index];
> +	bucket = &kvm_x86->mmu_page_hash[index];
>  	hlist_for_each_entry(page, node, bucket, hash_link)
>  		if (page->gfn == gfn && !page->role.metaphysical) {
>  			pgprintk("%s: found role %x\n",
> @@ -670,6 +674,7 @@ static struct kvm_mmu_page *kvm_mmu_get_
>  	struct hlist_head *bucket;
>  	struct kvm_mmu_page *page;
>  	struct hlist_node *node;
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
>  
>  	role.word = 0;
>  	role.glevels = vcpu->mmu.root_level;
> @@ -684,7 +689,7 @@ static struct kvm_mmu_page *kvm_mmu_get_
>  	pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
>  		 gfn, role.word);
>  	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
> -	bucket = &vcpu->kvm->mmu_page_hash[index];
> +	bucket = &kvm_x86->mmu_page_hash[index];
>  	hlist_for_each_entry(page, node, bucket, hash_link)
>  		if (page->gfn == gfn && page->role.word == role.word) {
>  			mmu_page_add_parent_pte(vcpu, page, parent_pte);
> @@ -754,6 +759,7 @@ static void kvm_mmu_zap_page(struct kvm 
>  			     struct kvm_mmu_page *page)
>  {
>  	u64 *parent_pte;
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>  
>  	++kvm->stat.mmu_shadow_zapped;
>  	while (page->multimapped || page->parent_pte) {
> @@ -775,7 +781,7 @@ static void kvm_mmu_zap_page(struct kvm 
>  		hlist_del(&page->hash_link);
>  		kvm_mmu_free_page(kvm, page);
>  	} else
> -		list_move(&page->link, &kvm->active_mmu_pages);
> +		list_move(&page->link, &kvm_x86->active_mmu_pages);
>  	kvm_mmu_reset_last_pte_updated(kvm);
>  }
>  
> @@ -785,36 +791,39 @@ static void kvm_mmu_zap_page(struct kvm 
>   */
>  void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
>  {
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
> +
>  	/*
>  	 * If we set the number of mmu pages to be smaller be than the
>  	 * number of actived pages , we must to free some mmu pages before we
>  	 * change the value
>  	 */
>  
> -	if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
> +	if ((kvm_x86->n_alloc_mmu_pages - kvm_x86->n_free_mmu_pages) >
>  	    kvm_nr_mmu_pages) {
> -		int n_used_mmu_pages = kvm->n_alloc_mmu_pages
> -				       - kvm->n_free_mmu_pages;
> +		int n_used_mmu_pages = kvm_x86->n_alloc_mmu_pages
> +				       - kvm_x86->n_free_mmu_pages;
>  
>  		while (n_used_mmu_pages > kvm_nr_mmu_pages) {
>  			struct kvm_mmu_page *page;
>  
> -			page = container_of(kvm->active_mmu_pages.prev,
> +			page = container_of(kvm_x86->active_mmu_pages.prev,
>  					    struct kvm_mmu_page, link);
>  			kvm_mmu_zap_page(kvm, page);
>  			n_used_mmu_pages--;
>  		}
> -		kvm->n_free_mmu_pages = 0;
> +		kvm_x86->n_free_mmu_pages = 0;
>  	}
>  	else
> -		kvm->n_free_mmu_pages += kvm_nr_mmu_pages
> -					 - kvm->n_alloc_mmu_pages;
> -
> -	kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
> +		kvm_x86->n_free_mmu_pages += kvm_nr_mmu_pages
> +					 - kvm_x86->n_alloc_mmu_pages;
> +
> +	kvm_x86->n_alloc_mmu_pages = kvm_nr_mmu_pages;
>  }
>  
>  static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
>  {
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>  	unsigned index;
>  	struct hlist_head *bucket;
>  	struct kvm_mmu_page *page;
> @@ -824,7 +833,7 @@ static int kvm_mmu_unprotect_page(struct
>  	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
>  	r = 0;
>  	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
> -	bucket = &kvm->mmu_page_hash[index];
> +	bucket = &kvm_x86->mmu_page_hash[index];
>  	hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
>  		if (page->gfn == gfn && !page->role.metaphysical) {
>  			pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
> @@ -1251,6 +1260,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
>  void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
>  		       const u8 *new, int bytes)
>  {
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
>  	gfn_t gfn = gpa >> PAGE_SHIFT;
>  	struct kvm_mmu_page *page;
>  	struct hlist_node *node, *n;
> @@ -1280,7 +1290,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
>  		vcpu->last_pte_updated = NULL;
>  	}
>  	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
> -	bucket = &vcpu->kvm->mmu_page_hash[index];
> +	bucket = &kvm_x86->mmu_page_hash[index];
>  	hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
>  		if (page->gfn != gfn || page->role.metaphysical)
>  			continue;
> @@ -1344,10 +1354,12 @@ int kvm_mmu_unprotect_page_virt(struct k
>  
>  void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
>  {
> -	while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
> +
> +	while (kvm_x86->n_free_mmu_pages < KVM_REFILL_PAGES) {
>  		struct kvm_mmu_page *page;
>  
> -		page = container_of(vcpu->kvm->active_mmu_pages.prev,
> +		page = container_of(kvm_x86->active_mmu_pages.prev,
>  				    struct kvm_mmu_page, link);
>  		kvm_mmu_zap_page(vcpu->kvm, page);
>  		++vcpu->kvm->stat.mmu_recycled;
> @@ -1397,9 +1409,10 @@ static void free_mmu_pages(struct kvm_vc
>  static void free_mmu_pages(struct kvm_vcpu *vcpu)
>  {
>  	struct kvm_mmu_page *page;
> -
> -	while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
> -		page = container_of(vcpu->kvm->active_mmu_pages.next,
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
> +
> +	while (!list_empty(&kvm_x86->active_mmu_pages)) {
> +		page = container_of(kvm_x86->active_mmu_pages.next,
>  				    struct kvm_mmu_page, link);
>  		kvm_mmu_zap_page(vcpu->kvm, page);
>  	}
> @@ -1408,15 +1421,16 @@ static void free_mmu_pages(struct kvm_vc
>  
>  static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
>  {
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
>  	struct page *page;
>  	int i;
>  
>  	ASSERT(vcpu);
>  
> -	if (vcpu->kvm->n_requested_mmu_pages)
> -		vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
> +	if (kvm_x86->n_requested_mmu_pages)
> +		kvm_x86->n_free_mmu_pages = kvm_x86->n_requested_mmu_pages;
>  	else
> -		vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
> +		kvm_x86->n_free_mmu_pages = kvm_x86->n_alloc_mmu_pages;
>  	/*
>  	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
>  	 * Therefore we need to allocate shadow page tables in the first
> @@ -1464,8 +1478,9 @@ void kvm_mmu_slot_remove_write_access(st
>  void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
>  {
>  	struct kvm_mmu_page *page;
> -
> -	list_for_each_entry(page, &kvm->active_mmu_pages, link) {
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
> +
> +	list_for_each_entry(page, &kvm_x86->active_mmu_pages, link) {
>  		int i;
>  		u64 *pt;
>  
> @@ -1483,8 +1498,9 @@ void kvm_mmu_zap_all(struct kvm *kvm)
>  void kvm_mmu_zap_all(struct kvm *kvm)
>  {
>  	struct kvm_mmu_page *page, *node;
> -
> -	list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
> +
> +	list_for_each_entry_safe(page, node, &kvm_x86->active_mmu_pages, link)
>  		kvm_mmu_zap_page(kvm, page);
>  
>  	kvm_flush_remote_tlbs(kvm);
> @@ -1637,7 +1653,7 @@ static int count_writable_mappings(struc
>  	struct kvm_mmu_page *page;
>  	int i;
>  
> -	list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
> +	list_for_each_entry(page, &kvm_x86->active_mmu_pages, link) {
>  		u64 *pt = page->spt;
>  
>  		if (page->role.level != PT_PAGE_TABLE_LEVEL)
> @@ -1672,8 +1688,9 @@ static void audit_write_protection(struc
>  	struct kvm_memory_slot *slot;
>  	unsigned long *rmapp;
>  	gfn_t gfn;
> -
> -	list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
> +
> +	list_for_each_entry(page, &kvm_x86->active_mmu_pages, link) {
>  		if (page->role.metaphysical)
>  			continue;
>  
> diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
> --- a/drivers/kvm/vmx.c
> +++ b/drivers/kvm/vmx.c
> @@ -1141,12 +1141,14 @@ static void enter_pmode(struct kvm_vcpu 
>  
>  static gva_t rmode_tss_base(struct kvm *kvm)
>  {
> -	if (!kvm->tss_addr) {
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
> +
> +	if (!kvm_x86->tss_addr) {
>  		gfn_t base_gfn = kvm->memslots[0].base_gfn +
>  				 kvm->memslots[0].npages - 3;
>  		return base_gfn << PAGE_SHIFT;
>  	}
> -	return kvm->tss_addr;
> +	return kvm_x86->tss_addr;
>  }
>  
>  static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
> @@ -1768,6 +1770,7 @@ static void do_interrupt_requests(struct
>  
>  static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
>  {
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>  	int ret;
>  	struct kvm_userspace_memory_region tss_mem = {
>  		.slot = 8,
> @@ -1779,7 +1782,7 @@ static int vmx_set_tss_addr(struct kvm *
>  	ret = kvm_set_memory_region(kvm, &tss_mem, 0);
>  	if (ret)
>  		return ret;
> -	kvm->tss_addr = addr;
> +	kvm_x86->tss_addr = addr;
>  	return 0;
>  }
>  
> diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
> --- a/drivers/kvm/x86.c
> +++ b/drivers/kvm/x86.c
> @@ -815,13 +815,15 @@ static int kvm_vm_ioctl_set_nr_mmu_pages
>  static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
>  					  u32 kvm_nr_mmu_pages)
>  {
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
> +
>  	if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
>  		return -EINVAL;
>  
>  	mutex_lock(&kvm->lock);
>  
>  	kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
> -	kvm->n_requested_mmu_pages = kvm_nr_mmu_pages;
> +	kvm_x86->n_requested_mmu_pages = kvm_nr_mmu_pages;
>  
>  	mutex_unlock(&kvm->lock);
>  	return 0;
> @@ -829,7 +831,9 @@ static int kvm_vm_ioctl_set_nr_mmu_pages
>  
>  static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
>  {
> -	return kvm->n_alloc_mmu_pages;
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
> +
> +	return kvm_x86->n_alloc_mmu_pages;
>  }
>  
>  /*
> @@ -972,6 +976,7 @@ long kvm_arch_vm_ioctl(struct file *filp
>  		       unsigned int ioctl, unsigned long arg)
>  {
>  	struct kvm *kvm = filp->private_data;
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
>  	void __user *argp = (void __user *)arg;
>  	int r = -EINVAL;
>  
> @@ -1018,12 +1023,12 @@ long kvm_arch_vm_ioctl(struct file *filp
>  	}
>  	case KVM_CREATE_IRQCHIP:
>  		r = -ENOMEM;
> -		kvm->vpic = kvm_create_pic(kvm);
> -		if (kvm->vpic) {
> +		kvm_x86->vpic = kvm_create_pic(kvm);
> +		if (kvm_x86->vpic) {
>  			r = kvm_ioapic_init(kvm);
>  			if (r) {
> -				kfree(kvm->vpic);
> -				kvm->vpic = NULL;
> +				kfree(kvm_x86->vpic);
> +				kvm_x86->vpic = NULL;
>  				goto out;
>  			}
>  		} else
> @@ -1041,7 +1046,7 @@ long kvm_arch_vm_ioctl(struct file *filp
>  				kvm_pic_set_irq(pic_irqchip(kvm),
>  					irq_event.irq,
>  					irq_event.level);
> -			kvm_ioapic_set_irq(kvm->vioapic,
> +			kvm_ioapic_set_irq(kvm_x86->vioapic,
>  					irq_event.irq,
>  					irq_event.level);
>  			mutex_unlock(&kvm->lock);
> @@ -2603,14 +2608,14 @@ void kvm_arch_vcpu_uninit(struct kvm_vcp
>  
>  struct  kvm *kvm_arch_create_vm(void)
>  {
> -	struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
> -
> -	if (!kvm)
> +	struct kvm_x86 *kvm_x86 = kzalloc(sizeof(struct kvm_x86), GFP_KERNEL);
> +
> +	if (!kvm_x86)
>  		return ERR_PTR(-ENOMEM);
>  
> -	INIT_LIST_HEAD(&kvm->active_mmu_pages);
> -
> -	return kvm;
> +	INIT_LIST_HEAD(&kvm_x86->active_mmu_pages);
> +
> +	return &kvm_x86->kvm;
>  }
>  
>  static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
> @@ -2641,8 +2646,10 @@ static void kvm_free_vcpus(struct kvm *k
>  
>  void kvm_arch_destroy_vm(struct kvm *kvm)
>  {
> -	kfree(kvm->vpic);
> -	kfree(kvm->vioapic);
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
> +
> +	kfree(kvm_x86->vpic);
> +	kfree(kvm_x86->vioapic);
>  	kvm_free_vcpus(kvm);
>  	kvm_free_physmem(kvm);
>  	kfree(kvm);
> diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
> --- a/drivers/kvm/x86.h
> +++ b/drivers/kvm/x86.h
> @@ -155,6 +155,45 @@ struct kvm_vcpu {
>  
>  	struct x86_emulate_ctxt emulate_ctxt;
>  };
> +
> +struct kvm_x86 {
> +	struct kvm kvm;
> +	/*
> +	 * Hash table of struct kvm_mmu_page.
> +	 */
> +	struct list_head active_mmu_pages;
> +	unsigned int n_free_mmu_pages;
> +	unsigned int n_requested_mmu_pages;
> +	unsigned int n_alloc_mmu_pages;
> +	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
> +	struct kvm_pic *vpic;
> +	struct kvm_ioapic *vioapic;
> +	unsigned int tss_addr;
> +};
> +
> +static struct kvm_x86 *to_kvm_x86(struct kvm *kvm)
> +{
> +	return container_of(kvm, struct kvm_x86, kvm);
> +}
> +
> +static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
> +{
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
> +
> +	return kvm_x86->vpic;
> +}
> +
> +static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
> +{
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
> +
> +	return kvm_x86->vioapic;
> +}
> +
> +static inline int irqchip_in_kernel(struct kvm *kvm)
> +{
> +	return pic_irqchip(kvm) != NULL;
> +}
>  
>  struct kvm_x86_ops {
>  	int (*cpu_has_kvm_support)(void);          /* __init */
> @@ -313,7 +352,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *
>  
>  static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
>  {
> -	if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
> +	struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
> +
> +	if (unlikely(kvm_x86->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
>  		__kvm_mmu_free_some_pages(vcpu);
>  }
>  
>
> -------------------------------------------------------------------------
> This SF.net email is sponsored by: Microsoft
> Defy all challenges. Microsoft(R) Visual Studio 2005.
> http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
> _______________________________________________
> kvm-devel mailing list
> kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
> https://lists.sourceforge.net/lists/listinfo/kvm-devel
>
>   


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 1 of 3] Use kvm_x86 to hold x86 specific kvm fields
       [not found]     ` <474DDB97.6090400-rdkfGonbjUSkNkDKm+mE6A@public.gmane.org>
@ 2007-11-28 21:43       ` Hollis Blanchard
  0 siblings, 0 replies; 23+ messages in thread
From: Hollis Blanchard @ 2007-11-28 21:43 UTC (permalink / raw)
  To: Anthony Liguori
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	carsteno-tA70FqPdS9bQT0dZR+AlfA, Avi Kivity, Xiantao

On Wed, 2007-11-28 at 15:20 -0600, Anthony Liguori wrote:
> Hollis Blanchard wrote:
> > diff --git a/drivers/kvm/ioapic.c b/drivers/kvm/ioapic.c
> > --- a/drivers/kvm/ioapic.c
> > +++ b/drivers/kvm/ioapic.c
> > @@ -276,7 +276,9 @@ static int get_eoi_gsi(struct kvm_ioapic
> >  
> >  void kvm_ioapic_update_eoi(struct kvm *kvm, int vector)
> >  {
> > -     struct kvm_ioapic *ioapic = kvm->vioapic;
> > +     struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
> > +
> > +     struct kvm_ioapic *ioapic = kvm_x86->vioapic;
> >       union ioapic_redir_entry *ent;
> >       int gsi;
> >  
> > @@ -386,11 +388,12 @@ int kvm_ioapic_init(struct kvm *kvm)
> >  int kvm_ioapic_init(struct kvm *kvm)
> >  {
> >       struct kvm_ioapic *ioapic;
> > +     struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
> >  
> >       ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
> >       if (!ioapic)
> >               return -ENOMEM;
> > -     kvm->vioapic = ioapic;
> > +     kvm_x86->vioapic = ioapic;
> >       kvm_ioapic_reset(ioapic);
> >       ioapic->dev.read = ioapic_mmio_read;
> >       ioapic->dev.write = ioapic_mmio_write;
> >   
> 
> If the ioapic depends on x86_specific information, why is it taking a 
> generic kvm structure instead of the x86 one?

Fine, I can move the conversion up into x86.c .

> > diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
> > --- a/drivers/kvm/kvm_main.c
> > +++ b/drivers/kvm/kvm_main.c
> > @@ -232,6 +232,7 @@ int __kvm_set_memory_region(struct kvm *
> >       unsigned long i;
> >       struct kvm_memory_slot *memslot;
> >       struct kvm_memory_slot old, new;
> > +     struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
> >  
> >       r = -EINVAL;
> >       /* General sanity checks */
> > @@ -332,18 +333,18 @@ int __kvm_set_memory_region(struct kvm *
> >       if (mem->slot >= kvm->nmemslots)
> >               kvm->nmemslots = mem->slot + 1;
> >  
> > -     if (!kvm->n_requested_mmu_pages) {
> > +     if (!kvm_x86->n_requested_mmu_pages) {
> >   
> 
> Why is anything in kvm_main.c accessing a memory of kvm_x86?
> Something is broken in the abstraction if that's the case.

kvm_main.c was chock full of x86 code. The fact that there's still a
little bit remaining (even after all our work so far) shouldn't be
surprising.

> >               unsigned int n_pages;
> >  
> >               if (npages) {
> >                       n_pages = npages * KVM_PERMILLE_MMU_PAGES /
> 1000;
> > -                     kvm_mmu_change_mmu_pages(kvm,
> kvm->n_alloc_mmu_pages +
> > -                                              n_pages);
> > +                     kvm_mmu_change_mmu_pages(kvm,
> > +                                       kvm_x86->n_alloc_mmu_pages +
> n_pages);
> >               } else {
> >                       unsigned int nr_mmu_pages;
> >  
> >                       n_pages = old.npages *
> KVM_PERMILLE_MMU_PAGES / 1000;
> > -                     nr_mmu_pages = kvm->n_alloc_mmu_pages -
> n_pages;
> > +                     nr_mmu_pages = kvm_x86->n_alloc_mmu_pages -
> n_pages;
> >                       nr_mmu_pages = max(nr_mmu_pages,
> >                                       (unsigned int)
> KVM_MIN_ALLOC_MMU_PAGES);
> >                       kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
> > diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
> > --- a/drivers/kvm/mmu.c
> > +++ b/drivers/kvm/mmu.c
> > @@ -526,12 +526,14 @@ static void kvm_mmu_free_page(struct kvm
> >  static void kvm_mmu_free_page(struct kvm *kvm,
> >                             struct kvm_mmu_page *page_head)
> >  {
> > +     struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
> > +
> >       ASSERT(is_empty_shadow_page(page_head->spt));
> >       list_del(&page_head->link);
> >       __free_page(virt_to_page(page_head->spt));
> >       __free_page(virt_to_page(page_head->gfns));
> >       kfree(page_head);
> > -     ++kvm->n_free_mmu_pages;
> > +     ++kvm_x86->n_free_mmu_pages;
> >  }
> 
> The mmu functions should probably take kvm_x86 since this won't be 
> shared with any other architecture.

Sure, and that can be addressed in the future. However, changing all of
that code at once results in a far larger patch, and this patch works
as-is.

-- 
Hollis Blanchard
IBM Linux Technology Center


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
  2007-11-28 21:15         ` Hollis Blanchard
@ 2007-11-30  7:26           ` Avi Kivity
       [not found]             ` <474FBB17.6080800-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Avi Kivity @ 2007-11-30  7:26 UTC (permalink / raw)
  To: Hollis Blanchard
  Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
	kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Xiantao

Hollis Blanchard wrote:
> On Wed, 2007-11-21 at 11:18 +0200, Avi Kivity wrote:
>   
>> Carsten Otte wrote:
>>     
>>> Hollis Blanchard wrote:
>>>   
>>>       
>>>> These patches are based on Xiantao's work to create struct kvm_x86. Patch 1 replaces his "KVM Portability split: Splitting kvm structure (V2)", and patches 2 and 3 build on it.
>>>>     
>>>>         
>>> Looks like a clean approach with to to_kvm_x86 macro. Whole series:
>>> Acked-by: Carsten Otte <cotte-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
>>>
>>>   
>>>       
>> Well, I hate to say it, but the resulting code doesn't look too well
>> (all the kvm_x86 variables), and it's entirely my fault as I recommended
>> this approach.  Not like it was difficult to predict.
>>     
>
> I guess we still have reached no conclusion on this question?
>
>   

Right.  Thanks for re-raising it.

>> I'm thinking again of
>>
>>     struct kvm {
>>         struct kvm_arch a;
>>         ...
>>     }
>>
>> Where each arch defines its own kvm_arch.  Now the changes look like a
>> bunch of "kvm->blah" to "kvm->a.blah" conversions.
>>     
>
> The simplest "container" changes would be a bunch of "kvm->blah" to
> "to_x86(kvm)->blah" conversions. How is that worse? If it's the
> "kvm_x86" variables you're objecting to, it would be easy enough to
> remove them in favor of this approach.
>
>   

It's horribly obfuscated.  I'm accessing a member of a structure but
that is hidden in a no-op function call.


>> IIRC a downside was mentioned that it is easier to cause a build failure
>> for another arch now.
>>
>> Opinions?  In theory correctness should win over style every time, no?
>>     
>
> Which approach is not correct?
>
>   

The nicer one:

   struct kvm {
        struct kvm_arch arch;
        // common fields
   }

or the similar

   struct kvm {
       struct kvm_common s;
       // arch specific fields
   }

"not correct" is an exaggeration; more prone to breaking the build is
more accurate.  Maybe we can set up an hourly cross-compile to
compensate.  Code clarity is important to me.

-- 
Do not meddle in the internals of kernels, for they are subtle and quick to panic.


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
       [not found]             ` <474FBB17.6080800-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2007-11-30  8:36               ` Zhang, Xiantao
       [not found]                 ` <42DFA526FC41B1429CE7279EF83C6BDCA397C1-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Zhang, Xiantao @ 2007-11-30  8:36 UTC (permalink / raw)
  To: Avi Kivity, Hollis Blanchard
  Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
	kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

Avi Kivity wrote:
> Hollis Blanchard wrote:
>> On Wed, 2007-11-21 at 11:18 +0200, Avi Kivity wrote:

>>>> 
>>> Well, I hate to say it, but the resulting code doesn't look too well
>>> (all the kvm_x86 variables), and it's entirely my fault as I
>>> recommended this approach.  Not like it was difficult to predict.
>>> 
>> 
>> I guess we still have reached no conclusion on this question?
>> 
>> 
> 
> Right.  Thanks for re-raising it.

Thanks too. I have almost done the rebase work for IA64 support, maybe
we should work out a solution for that :)

>>> I'm thinking again of
>>> 
>>>     struct kvm {
>>>         struct kvm_arch a;
>>>         ...
>>>     }
>>> 
>>> Where each arch defines its own kvm_arch.  Now the changes look
>>> like a bunch of "kvm->blah" to "kvm->a.blah" conversions.
>>> 

>> 
>> 
> 
> The nicer one:
> 
>    struct kvm {
>         struct kvm_arch arch;
>         // common fields
>    }

I prefer this one, seems it is more direct and readable. Same thinking
about kvm_vcpu structure:)

-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
       [not found]                 ` <42DFA526FC41B1429CE7279EF83C6BDCA397C1-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2007-11-30  9:04                   ` Avi Kivity
       [not found]                     ` <474FD234.5060203-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Avi Kivity @ 2007-11-30  9:04 UTC (permalink / raw)
  To: Zhang, Xiantao
  Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
	kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Hollis Blanchard

Zhang, Xiantao wrote:
>
>>>       
>> The nicer one:
>>
>>    struct kvm {
>>         struct kvm_arch arch;
>>         // common fields
>>    }
>>     
>
> I prefer this one, seems it is more direct and readable. Same thinking
> about kvm_vcpu structure:)
>   

I agree, kvm_vcpu should use the same method.

-- 
Do not meddle in the internals of kernels, for they are subtle and quick to panic.


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
       [not found]                     ` <474FD234.5060203-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2007-11-30 11:42                       ` Zhang, Xiantao
  2007-11-30 18:43                       ` Hollis Blanchard
  1 sibling, 0 replies; 23+ messages in thread
From: Zhang, Xiantao @ 2007-11-30 11:42 UTC (permalink / raw)
  To: Avi Kivity
  Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
	kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Hollis Blanchard

Avi Kivity wrote:
> Zhang, Xiantao wrote:
>> 
>>>> 
>>> The nicer one:
>>> 
>>>    struct kvm {
>>>         struct kvm_arch arch;
>>>         // common fields
>>>    }
>>> 
>> 
>> I prefer this one, seems it is more direct and readable. Same
>> thinking about kvm_vcpu structure:) 
>> 
> 
> I agree, kvm_vcpu should use the same method.

OK, I will rebase the kvm structure split patches according to this
method. :-)

-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
       [not found]                     ` <474FD234.5060203-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
  2007-11-30 11:42                       ` Zhang, Xiantao
@ 2007-11-30 18:43                       ` Hollis Blanchard
  2007-11-30 20:31                         ` Avi Kivity
  1 sibling, 1 reply; 23+ messages in thread
From: Hollis Blanchard @ 2007-11-30 18:43 UTC (permalink / raw)
  To: Avi Kivity
  Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
	kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Zhang,  Xiantao

On Fri, 2007-11-30 at 11:04 +0200, Avi Kivity wrote:
> Zhang, Xiantao wrote:
> >
> >>>       
> >> The nicer one:
> >>
> >>    struct kvm {
> >>         struct kvm_arch arch;
> >>         // common fields
> >>    }
> >>     
> >
> > I prefer this one, seems it is more direct and readable. Same thinking
> > about kvm_vcpu structure:)
> >   
> 
> I agree, kvm_vcpu should use the same method.

And we will convert vcpu_vmx/vcpu_svm as well?

-- 
Hollis Blanchard
IBM Linux Technology Center


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
  2007-11-30 18:43                       ` Hollis Blanchard
@ 2007-11-30 20:31                         ` Avi Kivity
       [not found]                           ` <4750732B.7070502-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Avi Kivity @ 2007-11-30 20:31 UTC (permalink / raw)
  To: Hollis Blanchard
  Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
	kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Zhang,  Xiantao

Hollis Blanchard wrote:
> On Fri, 2007-11-30 at 11:04 +0200, Avi Kivity wrote:
>   
>> Zhang, Xiantao wrote:
>>     
>>>>>       
>>>>>           
>>>> The nicer one:
>>>>
>>>>    struct kvm {
>>>>         struct kvm_arch arch;
>>>>         // common fields
>>>>    }
>>>>     
>>>>         
>>> I prefer this one, seems it is more direct and readable. Same thinking
>>> about kvm_vcpu structure:)
>>>   
>>>       
>> I agree, kvm_vcpu should use the same method.
>>     
>
> And we will convert vcpu_vmx/vcpu_svm as well?
>
>   

These cannot use the same method, since we need to support both vmx and
svm in the same binary.  The arch specific members aren't the same size,
nor do the symbols they use have the same visibility.

-- 
Do not meddle in the internals of kernels, for they are subtle and quick to panic.


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
       [not found]                           ` <4750732B.7070502-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2007-11-30 21:09                             ` Hollis Blanchard
  2007-11-30 21:43                               ` Anthony Liguori
  2007-12-01 10:02                               ` Avi Kivity
  0 siblings, 2 replies; 23+ messages in thread
From: Hollis Blanchard @ 2007-11-30 21:09 UTC (permalink / raw)
  To: Avi Kivity
  Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
	kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Zhang,  Xiantao


On Fri, 2007-11-30 at 22:31 +0200, Avi Kivity wrote:
> Hollis Blanchard wrote:
> > On Fri, 2007-11-30 at 11:04 +0200, Avi Kivity wrote:
> >   
> >> Zhang, Xiantao wrote:
> >>     
> >>>>>       
> >>>>>           
> >>>> The nicer one:
> >>>>
> >>>>    struct kvm {
> >>>>         struct kvm_arch arch;
> >>>>         // common fields
> >>>>    }
> >>>>     
> >>>>         
> >>> I prefer this one, seems it is more direct and readable. Same thinking
> >>> about kvm_vcpu structure:)
> >>>   
> >>>       
> >> I agree, kvm_vcpu should use the same method.
> >>     
> >
> > And we will convert vcpu_vmx/vcpu_svm as well?
> >
> >   
> 
> These cannot use the same method, since we need to support both vmx and
> svm in the same binary.  The arch specific members aren't the same size,
> nor do the symbols they use have the same visibility.

I have never understood this. Why on earth do you need to support VMX
and SVM in the same binary? For example, when would you overwrite
kvm_x86_ops after initialization? If you wouldn't, then why are you
using function pointers instead of the linker?

PowerPC will also need to support multiple processor types, and so I
expect to have one kvm_arch structure for each. That also means struct
kvm_arch must be the *last* member in struct kvm, which is not how it is
shown above.

-- 
Hollis Blanchard
IBM Linux Technology Center


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
  2007-11-30 21:09                             ` Hollis Blanchard
@ 2007-11-30 21:43                               ` Anthony Liguori
       [not found]                                 ` <47508408.8050202-rdkfGonbjUSkNkDKm+mE6A@public.gmane.org>
  2007-12-01 10:02                               ` Avi Kivity
  1 sibling, 1 reply; 23+ messages in thread
From: Anthony Liguori @ 2007-11-30 21:43 UTC (permalink / raw)
  To: Hollis Blanchard
  Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
	kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Zhang,  Xiantao,
	Avi Kivity

Hollis Blanchard wrote:
> On Fri, 2007-11-30 at 22:31 +0200, Avi Kivity wrote:
>   
>> Hollis Blanchard wrote:
>>     
>>> On Fri, 2007-11-30 at 11:04 +0200, Avi Kivity wrote:
>>>   
>>>       
>>>> Zhang, Xiantao wrote:
>>>>     
>>>>         
>>>>>>>       
>>>>>>>           
>>>>>>>               
>>>>>> The nicer one:
>>>>>>
>>>>>>    struct kvm {
>>>>>>         struct kvm_arch arch;
>>>>>>         // common fields
>>>>>>    }
>>>>>>     
>>>>>>         
>>>>>>             
>>>>> I prefer this one, seems it is more direct and readable. Same thinking
>>>>> about kvm_vcpu structure:)
>>>>>   
>>>>>       
>>>>>           
>>>> I agree, kvm_vcpu should use the same method.
>>>>     
>>>>         
>>> And we will convert vcpu_vmx/vcpu_svm as well?
>>>
>>>   
>>>       
>> These cannot use the same method, since we need to support both vmx and
>> svm in the same binary.  The arch specific members aren't the same size,
>> nor do the symbols they use have the same visibility.
>>     
>
> I have never understood this. Why on earth do you need to support VMX
> and SVM in the same binary? For example, when would you overwrite
> kvm_x86_ops after initialization? If you wouldn't, then why are you
> using function pointers instead of the linker?
>   

It's necessary for the distros to be able to ship both AMD and Intel 
support in a single binary.  We aren't talking, in general, about a 
single static binary but instead loadable modules.  There maybe some 
cases where it's useful to support both in a static kernel binary.

If you used the linker instead of function pointers, it would be 
impossible to build a static kernel binary that supported both.  Plus, 
depmod would get very confused because two modules would be providing 
the same symbols.  It can be made to work, but it's kind of funky.

> PowerPC will also need to support multiple processor types, and so I
> expect to have one kvm_arch structure for each. That also means struct
> kvm_arch must be the *last* member in struct kvm, which is not how it is
> shown above.
>   

Instead of having a kvm.ko and a kvm-ppc-440.ko, you probably should 
have a kvm.ko and a kvm-ppc.ko and then build the kvm-ppc.ko based on 
the board.  You would never build multiple kvm-ppc-XXX.ko modules in the 
same binary right?

Regards,

Anthony Liguori

>   



-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
       [not found]                                 ` <47508408.8050202-rdkfGonbjUSkNkDKm+mE6A@public.gmane.org>
@ 2007-11-30 22:08                                   ` Hollis Blanchard
  2007-12-01 10:04                                     ` Avi Kivity
  0 siblings, 1 reply; 23+ messages in thread
From: Hollis Blanchard @ 2007-11-30 22:08 UTC (permalink / raw)
  To: Anthony Liguori
  Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
	kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Zhang,  Xiantao,
	Avi Kivity

On Fri, 2007-11-30 at 15:43 -0600, Anthony Liguori wrote:
> Hollis Blanchard wrote:
> > On Fri, 2007-11-30 at 22:31 +0200, Avi Kivity wrote:
> >      
> >> These cannot use the same method, since we need to support both vmx and
> >> svm in the same binary.  The arch specific members aren't the same size,
> >> nor do the symbols they use have the same visibility.
> >>     
> >
> > I have never understood this. Why on earth do you need to support VMX
> > and SVM in the same binary? For example, when would you overwrite
> > kvm_x86_ops after initialization? If you wouldn't, then why are you
> > using function pointers instead of the linker?
> >   
> 
> It's necessary for the distros to be able to ship both AMD and Intel 
> support in a single binary.  We aren't talking, in general, about a 
> single static binary but instead loadable modules.  There maybe some 
> cases where it's useful to support both in a static kernel binary.

I think the monolithic case is the one I overlooked. As long as
everything is a module, there should be no problem loading the
appropriate module for the host processor type. However, once you want
to support both processor types in a monolithic kernel, that's where you
need the function pointer flexibility.

> If you used the linker instead of function pointers, it would be 
> impossible to build a static kernel binary that supported both.  Plus, 
> depmod would get very confused because two modules would be providing 
> the same symbols.  It can be made to work, but it's kind of funky.
> 
> > PowerPC will also need to support multiple processor types, and so I
> > expect to have one kvm_arch structure for each. That also means struct
> > kvm_arch must be the *last* member in struct kvm, which is not how it is
> > shown above.
> >   
> 
> Instead of having a kvm.ko and a kvm-ppc-440.ko, you probably should 
> have a kvm.ko and a kvm-ppc.ko and then build the kvm-ppc.ko based on 
> the board.  You would never build multiple kvm-ppc-XXX.ko modules in the 
> same binary right?

I hope to have multiple kvm-ppc-XXX.ko modules loaded simultaneously to
support different guest types on the same host. I haven't yet figured
out what that interface should look like, but obviously linking is
preferable to function pointers where feasible.

-- 
Hollis Blanchard
IBM Linux Technology Center


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
  2007-11-30 21:09                             ` Hollis Blanchard
  2007-11-30 21:43                               ` Anthony Liguori
@ 2007-12-01 10:02                               ` Avi Kivity
  1 sibling, 0 replies; 23+ messages in thread
From: Avi Kivity @ 2007-12-01 10:02 UTC (permalink / raw)
  To: Hollis Blanchard
  Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
	kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Zhang,  Xiantao

Hollis Blanchard wrote:
>>
>> These cannot use the same method, since we need to support both vmx and
>> svm in the same binary.  The arch specific members aren't the same size,
>> nor do the symbols they use have the same visibility.
>>     
>
> I have never understood this. Why on earth do you need to support VMX
> and SVM in the same binary? For example, when would you overwrite
> kvm_x86_ops after initialization? If you wouldn't, then why are you
> using function pointers instead of the linker?
>   

Consider a non-modular build; common code needs to call either 
svm_vcpu_load() or vmx_vcpu_load() depending on the current hardware.

I imagine it could be done using linker tricks (duplicating the common 
code to be linked twice, once per subarch, and leaving a stub to detect 
which duplicate needs to be used), but I never found either the time or 
necessity to do them.

> PowerPC will also need to support multiple processor types, and so I
> expect to have one kvm_arch structure for each. That also means struct
> kvm_arch must be the *last* member in struct kvm, which is not how it is
> shown above.
>   

Do you plan to support multiple processor types in a single binary as 
well? Or require modules and compile them multiple times? (that was how 
AMD support was initially added; it was changed before merging).

-- 
Any sufficiently difficult bug is indistinguishable from a feature.


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 0 of 3] create kvm_x86
  2007-11-30 22:08                                   ` Hollis Blanchard
@ 2007-12-01 10:04                                     ` Avi Kivity
  0 siblings, 0 replies; 23+ messages in thread
From: Avi Kivity @ 2007-12-01 10:04 UTC (permalink / raw)
  To: Hollis Blanchard
  Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
	kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	kvm-ppc-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Zhang,  Xiantao

Hollis Blanchard wrote:
> I hope to have multiple kvm-ppc-XXX.ko modules loaded simultaneously to
> support different guest types on the same host. I haven't yet figured
> out what that interface should look like, but obviously linking is
> preferable to function pointers where feasible.
>   

At least on modern x86, indirect calls are quite cheap. If it can be 
done cleanly, of course direct linking is preferable.

-- 
Any sufficiently difficult bug is indistinguishable from a feature.


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 23+ messages in thread

end of thread, other threads:[~2007-12-01 10:04 UTC | newest]

Thread overview: 23+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-11-20 16:57 [PATCH 0 of 3] create kvm_x86 Hollis Blanchard
2007-11-20 16:57 ` [PATCH 1 of 3] Use kvm_x86 to hold x86 specific kvm fields Hollis Blanchard
2007-11-28 21:20   ` Anthony Liguori
     [not found]     ` <474DDB97.6090400-rdkfGonbjUSkNkDKm+mE6A@public.gmane.org>
2007-11-28 21:43       ` Hollis Blanchard
2007-11-20 16:57 ` [PATCH 2 of 3] Move vm_stat and apic_access_page to kvm_x86 Hollis Blanchard
2007-11-21  9:09 ` [PATCH 0 of 3] create kvm_x86 Carsten Otte
     [not found]   ` <4743F5AE.8090707-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
2007-11-21  9:18     ` Avi Kivity
     [not found]       ` <4743F7DF.4000107-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-11-21  9:32         ` Amit Shah
2007-11-21  9:39         ` Carsten Otte
2007-11-21  9:42         ` Zhang, Xiantao
     [not found]           ` <42DFA526FC41B1429CE7279EF83C6BDC9E7193-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-11-21  9:55             ` Avi Kivity
2007-11-28 21:15         ` Hollis Blanchard
2007-11-30  7:26           ` Avi Kivity
     [not found]             ` <474FBB17.6080800-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-11-30  8:36               ` Zhang, Xiantao
     [not found]                 ` <42DFA526FC41B1429CE7279EF83C6BDCA397C1-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-11-30  9:04                   ` Avi Kivity
     [not found]                     ` <474FD234.5060203-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-11-30 11:42                       ` Zhang, Xiantao
2007-11-30 18:43                       ` Hollis Blanchard
2007-11-30 20:31                         ` Avi Kivity
     [not found]                           ` <4750732B.7070502-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-11-30 21:09                             ` Hollis Blanchard
2007-11-30 21:43                               ` Anthony Liguori
     [not found]                                 ` <47508408.8050202-rdkfGonbjUSkNkDKm+mE6A@public.gmane.org>
2007-11-30 22:08                                   ` Hollis Blanchard
2007-12-01 10:04                                     ` Avi Kivity
2007-12-01 10:02                               ` Avi Kivity

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox