public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] Split kvm_vcpu to support new archs.
@ 2007-10-18  7:34 Zhang, Xiantao
       [not found] ` <42DFA526FC41B1429CE7279EF83C6BDC809A6A-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
  0 siblings, 1 reply; 15+ messages in thread
From: Zhang, Xiantao @ 2007-10-18  7:34 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

Hi Avi,
According to your and community's suggestions, I changed the kvm_vcpu
structure to two parts. To avoid the much intrusive into current code,
one is common part which is defined as a macro, and the other one is
arch-specific part.  
In addition, I have a suggestion to re-organize the head files, such as
kvm.h and x86.h.  IMO, kvm.h is changed to kvm_comm.h, and only includes
common code for all archs.Then x86.h will be changed to kvm-x86.h, and
linked as kvm.h at compile time.  So, other archs also defines its
kvm-xx.h to accommodate its arch-specific structures.  What's your ideas
?(This idea doesn't include in this patch.)

>From 34cebd3a3fc0afba4df511219912bc3277e2a8c7 Mon Sep 17 00:00:00 2001
From: Zhang Xiantao <xiantao.zhang-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
Date: Thu, 18 Oct 2007 12:51:02 +0800
Subject: [PATCH] First step to split kvm_vcpu. Currently, we just use an
macro to define the common fields in kvm_vcpu for all archs, and all
archs need to define its own kvm_vcpu struct.
Signed-off-by: Zhang Xiantao <xiantao.zhang-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
---
 drivers/kvm/ioapic.c      |    2 +
 drivers/kvm/irq.c         |    1 +
 drivers/kvm/kvm.h         |  166
+++++++-------------------------------------
 drivers/kvm/kvm_main.c    |    4 +-
 drivers/kvm/lapic.c       |    2 +
 drivers/kvm/mmu.c         |    1 +
 drivers/kvm/svm.c         |    2 +-
 drivers/kvm/vmx.c         |    1 +
 drivers/kvm/x86.h         |  128 ++++++++++++++++++++++++++++++++++
 drivers/kvm/x86_emulate.c |    1 +
 10 files changed, 166 insertions(+), 142 deletions(-)

diff --git a/drivers/kvm/ioapic.c b/drivers/kvm/ioapic.c
index 1a5e59a..cf1d50b 100644
--- a/drivers/kvm/ioapic.c
+++ b/drivers/kvm/ioapic.c
@@ -27,6 +27,8 @@
  */
 
 #include "kvm.h"
+#include "x86.h"
+
 #include <linux/kvm.h>
 #include <linux/mm.h>
 #include <linux/highmem.h>
diff --git a/drivers/kvm/irq.c b/drivers/kvm/irq.c
index 851995d..22bfeee 100644
--- a/drivers/kvm/irq.c
+++ b/drivers/kvm/irq.c
@@ -22,6 +22,7 @@
 #include <linux/module.h>
 
 #include "kvm.h"
+#include "x86.h"
 #include "irq.h"
 
 /*
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index c38e4a3..28e664e 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -308,93 +308,37 @@ struct kvm_io_device *kvm_io_bus_find_dev(struct
kvm_io_bus *bus, gpa_t addr);
 void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
 			     struct kvm_io_device *dev);
 
-struct kvm_vcpu {
-	struct kvm *kvm;
-	struct preempt_notifier preempt_notifier;
-	int vcpu_id;
-	struct mutex mutex;
-	int   cpu;
-	u64 host_tsc;
-	struct kvm_run *run;
-	int interrupt_window_open;
-	int guest_mode;
-	unsigned long requests;
-	unsigned long irq_summary; /* bit vector: 1 per word in
irq_pending */
-	DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
-	unsigned long regs[NR_VCPU_REGS]; /* for rsp:
vcpu_load_rsp_rip() */
-	unsigned long rip;      /* needs vcpu_load_rsp_rip() */
-
-	unsigned long cr0;
-	unsigned long cr2;
-	unsigned long cr3;
-	unsigned long cr4;
-	unsigned long cr8;
-	u64 pdptrs[4]; /* pae */
-	u64 shadow_efer;
-	u64 apic_base;
-	struct kvm_lapic *apic;    /* kernel irqchip context */
-#define VCPU_MP_STATE_RUNNABLE          0
-#define VCPU_MP_STATE_UNINITIALIZED     1
-#define VCPU_MP_STATE_INIT_RECEIVED     2
-#define VCPU_MP_STATE_SIPI_RECEIVED     3
-#define VCPU_MP_STATE_HALTED            4
-	int mp_state;
-	int sipi_vector;
-	u64 ia32_misc_enable_msr;
-
-	struct kvm_mmu mmu;
-
-	struct kvm_mmu_memory_cache mmu_pte_chain_cache;
-	struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
-	struct kvm_mmu_memory_cache mmu_page_cache;
-	struct kvm_mmu_memory_cache mmu_page_header_cache;
-
-	gfn_t last_pt_write_gfn;
-	int   last_pt_write_count;
-	u64  *last_pte_updated;
-
-	struct kvm_guest_debug guest_debug;
-
-	struct i387_fxsave_struct host_fx_image;
-	struct i387_fxsave_struct guest_fx_image;
-	int fpu_active;
-	int guest_fpu_loaded;
-
-	int mmio_needed;
-	int mmio_read_completed;
-	int mmio_is_write;
-	int mmio_size;
-	unsigned char mmio_data[8];
+#ifdef CONFIG_HAS_IOMEM
+#define KVM_VCPU_MMIO \
+	int mmio_needed;\
+	int mmio_read_completed;\
+	int mmio_is_write;\
+	int mmio_size;\
+	unsigned char mmio_data[8];\
 	gpa_t mmio_phys_addr;
-	gva_t mmio_fault_cr2;
-	struct kvm_pio_request pio;
-	void *pio_data;
-	wait_queue_head_t wq;
 
-	int sigset_active;
-	sigset_t sigset;
+#else
+#define KVM_VCPU_MMIO
 
-	struct kvm_stat stat;
+#endif
 
-	struct {
-		int active;
-		u8 save_iopl;
-		struct kvm_save_segment {
-			u16 selector;
-			unsigned long base;
-			u32 limit;
-			u32 ar;
-		} tr, es, ds, fs, gs;
-	} rmode;
-	int halt_request; /* real mode on Intel only */
-
-	int cpuid_nent;
-	struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
-
-	/* emulate context */
-
-	struct x86_emulate_ctxt emulate_ctxt;
-};
+#define KVM_VCPU_COMM \
+	struct kvm *kvm; \
+	struct preempt_notifier preempt_notifier;\
+	int vcpu_id;\
+	struct mutex mutex;\
+	int   cpu;\
+	struct kvm_run *run;\
+	int guest_mode;\
+	unsigned long requests;\
+	struct kvm_guest_debug guest_debug;\
+	int fpu_active; \
+	int guest_fpu_loaded;\
+	wait_queue_head_t wq;\
+	int sigset_active;\
+	sigset_t sigset;\
+	struct kvm_stat stat;\
+	KVM_VCPU_MMIO
 
 struct kvm_mem_alias {
 	gfn_t base_gfn;
@@ -677,62 +621,6 @@ static inline void kvm_guest_exit(void)
 	current->flags &= ~PF_VCPU;
 }
 
-static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
-				     u32 error_code)
-{
-	return vcpu->mmu.page_fault(vcpu, gva, error_code);
-}
-
-static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
-{
-	if (unlikely(vcpu->kvm->n_free_mmu_pages <
KVM_MIN_FREE_MMU_PAGES))
-		__kvm_mmu_free_some_pages(vcpu);
-}
-
-static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
-{
-	if (likely(vcpu->mmu.root_hpa != INVALID_PAGE))
-		return 0;
-
-	return kvm_mmu_load(vcpu);
-}
-
-static inline int is_long_mode(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_X86_64
-	return vcpu->shadow_efer & EFER_LME;
-#else
-	return 0;
-#endif
-}
-
-static inline int is_pae(struct kvm_vcpu *vcpu)
-{
-	return vcpu->cr4 & X86_CR4_PAE;
-}
-
-static inline int is_pse(struct kvm_vcpu *vcpu)
-{
-	return vcpu->cr4 & X86_CR4_PSE;
-}
-
-static inline int is_paging(struct kvm_vcpu *vcpu)
-{
-	return vcpu->cr0 & X86_CR0_PG;
-}
-
-static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot
*slot)
-{
-	return slot - kvm->memslots;
-}
-
-static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
-{
-	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
-
-	return (struct kvm_mmu_page *)page_private(page);
-}
-
 static inline u16 read_fs(void)
 {
 	u16 seg;
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index aab465d..9ff049c 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -2272,7 +2272,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu
*vcpu, struct kvm_run *kvm_run)
 		if (r)
 			goto out;
 	}
-
+#if CONFIG_HAS_IOMEM
 	if (vcpu->mmio_needed) {
 		memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
 		vcpu->mmio_read_completed = 1;
@@ -2287,7 +2287,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu
*vcpu, struct kvm_run *kvm_run)
 			goto out;
 		}
 	}
-
+#endif
 	if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
 		kvm_x86_ops->cache_regs(vcpu);
 		vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
diff --git a/drivers/kvm/lapic.c b/drivers/kvm/lapic.c
index 1eca94f..68cab3f 100644
--- a/drivers/kvm/lapic.c
+++ b/drivers/kvm/lapic.c
@@ -18,6 +18,8 @@
  */
 
 #include "kvm.h"
+#include "x86.h"
+
 #include <linux/kvm.h>
 #include <linux/mm.h>
 #include <linux/highmem.h>
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index f52604a..750c74a 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -19,6 +19,7 @@
 
 #include "vmx.h"
 #include "kvm.h"
+#include "x86.h"
 
 #include <linux/types.h>
 #include <linux/string.h>
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index e8ab3b2..87e2448 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -13,7 +13,7 @@
  * the COPYING file in the top-level directory.
  *
  */
-
+#include "x86.h"
 #include "kvm_svm.h"
 #include "x86_emulate.h"
 #include "irq.h"
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index c8b8045..4690433 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -16,6 +16,7 @@
  */
 
 #include "kvm.h"
+#include "x86.h"
 #include "x86_emulate.h"
 #include "irq.h"
 #include "vmx.h"
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index 1e2f71b..266dc2c 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -13,4 +13,132 @@
 
 #include "kvm.h"
 
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <linux/kvm.h>
+#include <linux/kvm_para.h>
+
+struct kvm_vcpu {
+	KVM_VCPU_COMM;
+	u64 host_tsc;
+	int interrupt_window_open;
+	unsigned long irq_summary; /* bit vector: 1 per word in
irq_pending */
+	DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
+	unsigned long regs[NR_VCPU_REGS]; /* for rsp:
vcpu_load_rsp_rip() */
+	unsigned long rip;      /* needs vcpu_load_rsp_rip() */
+
+	unsigned long cr0;
+	unsigned long cr2;
+	unsigned long cr3;
+	unsigned long cr4;
+	unsigned long cr8;
+	u64 pdptrs[4]; /* pae */
+	u64 shadow_efer;
+	u64 apic_base;
+	struct kvm_lapic *apic;    /* kernel irqchip context */
+#define VCPU_MP_STATE_RUNNABLE          0
+#define VCPU_MP_STATE_UNINITIALIZED     1
+#define VCPU_MP_STATE_INIT_RECEIVED     2
+#define VCPU_MP_STATE_SIPI_RECEIVED     3
+#define VCPU_MP_STATE_HALTED            4
+	int mp_state;
+	int sipi_vector;
+	u64 ia32_misc_enable_msr;
+
+	struct kvm_mmu mmu;
+
+	struct kvm_mmu_memory_cache mmu_pte_chain_cache;
+	struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
+	struct kvm_mmu_memory_cache mmu_page_cache;
+	struct kvm_mmu_memory_cache mmu_page_header_cache;
+
+	gfn_t last_pt_write_gfn;
+	int   last_pt_write_count;
+	u64  *last_pte_updated;
+
+
+	struct i387_fxsave_struct host_fx_image;
+	struct i387_fxsave_struct guest_fx_image;
+
+	gva_t mmio_fault_cr2;
+	struct kvm_pio_request pio;
+	void *pio_data;
+
+	struct {
+		int active;
+		u8 save_iopl;
+		struct kvm_save_segment {
+			u16 selector;
+			unsigned long base;
+			u32 limit;
+			u32 ar;
+		} tr, es, ds, fs, gs;
+	} rmode;
+	int halt_request; /* real mode on Intel only */
+
+	int cpuid_nent;
+	struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
+
+	/* emulate context */
+
+	struct x86_emulate_ctxt emulate_ctxt;
+};
+
+static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
+				     u32 error_code)
+{
+	return vcpu->mmu.page_fault(vcpu, gva, error_code);
+}
+
+static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
+{
+	if (unlikely(vcpu->kvm->n_free_mmu_pages <
KVM_MIN_FREE_MMU_PAGES))
+		__kvm_mmu_free_some_pages(vcpu);
+}
+
+static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
+{
+	if (likely(vcpu->mmu.root_hpa != INVALID_PAGE))
+		return 0;
+
+	return kvm_mmu_load(vcpu);
+}
+
+static inline int is_long_mode(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_X86_64
+	return vcpu->shadow_efer & EFER_LME;
+#else
+	return 0;
+#endif
+}
+
+static inline int is_pae(struct kvm_vcpu *vcpu)
+{
+	return vcpu->cr4 & X86_CR4_PAE;
+}
+
+static inline int is_pse(struct kvm_vcpu *vcpu)
+{
+	return vcpu->cr4 & X86_CR4_PSE;
+}
+
+static inline int is_paging(struct kvm_vcpu *vcpu)
+{
+	return vcpu->cr0 & X86_CR0_PG;
+}
+
+static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot
*slot)
+{
+	return slot - kvm->memslots;
+}
+
+static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
+{
+	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
+
+	return (struct kvm_mmu_page *)page_private(page);
+}
+
 #endif
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index ced0939..8465eba 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -26,6 +26,7 @@
 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
 #else
 #include "kvm.h"
+#include "x86.h"
 #define DPRINTF(x...) do {} while (0)
 #endif
 #include "x86_emulate.h"
-- 
1.5.1.2

-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH] Split kvm_vcpu to support new archs.
       [not found] ` <42DFA526FC41B1429CE7279EF83C6BDC809A6A-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2007-10-18 14:22   ` Avi Kivity
  2007-10-18 20:01   ` Hollis Blanchard
  1 sibling, 0 replies; 15+ messages in thread
From: Avi Kivity @ 2007-10-18 14:22 UTC (permalink / raw)
  To: Zhang, Xiantao; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

Zhang, Xiantao wrote:
> Hi Avi,
> According to your and community's suggestions, I changed the kvm_vcpu
> structure to two parts. To avoid the much intrusive into current code,
> one is common part which is defined as a macro, and the other one is
> arch-specific part.  
> In addition, I have a suggestion to re-organize the head files, such as
> kvm.h and x86.h.  IMO, kvm.h is changed to kvm_comm.h, and only includes
> common code for all archs.Then x86.h will be changed to kvm-x86.h, and
> linked as kvm.h at compile time.  So, other archs also defines its
> kvm-xx.h to accommodate its arch-specific structures.  What's your ideas
> ?(This idea doesn't include in this patch.)
>   

Let's not hurry with file layout, but split off x86 first.

> >From 34cebd3a3fc0afba4df511219912bc3277e2a8c7 Mon Sep 17 00:00:00 2001
> From: Zhang Xiantao <xiantao.zhang-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> Date: Thu, 18 Oct 2007 12:51:02 +0800
> Subject: [PATCH] First step to split kvm_vcpu. Currently, we just use an
> macro to define the common fields in kvm_vcpu for all archs, and all
> archs need to define its own kvm_vcpu struct.
> Signed-off-by: Zhang Xiantao <xiantao.zhang-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
>   

Patch is good, but your mailer wordwrapped it.  Use git-send-email, or 
another mailer, or attach.

> +#define KVM_VCPU_COMM \
>   

KVM_VCPU_COMMON.

> +	struct kvm *kvm; \
> +	struct preempt_notifier preempt_notifier;\
> +	int vcpu_id;\
> +	struct mutex mutex;\
> +	int   cpu;\
> +	struct kvm_run *run;\
> +	int guest_mode;\
> +	unsigned long requests;\
> +	struct kvm_guest_debug guest_debug;\
> +	int fpu_active; \
> +	int guest_fpu_loaded;\
> +	wait_queue_head_t wq;\
> +	int sigset_active;\
> +	sigset_t sigset;\
> +	struct kvm_stat stat;\
> +	KVM_VCPU_MMIO
>  
>   

Put some spaces between ';' and '\'.

>  
> -static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot
> *slot)
> -{
> -	return slot - kvm->memslots;
> -}
> -
>   

This baby is not x86 specific.


-- 
error compiling committee.c: too many arguments to function


-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] Split kvm_vcpu to support new archs.
       [not found] ` <42DFA526FC41B1429CE7279EF83C6BDC809A6A-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
  2007-10-18 14:22   ` Avi Kivity
@ 2007-10-18 20:01   ` Hollis Blanchard
  2007-10-18 21:04     ` Anthony Liguori
  2007-10-21  6:40     ` Avi Kivity
  1 sibling, 2 replies; 15+ messages in thread
From: Hollis Blanchard @ 2007-10-18 20:01 UTC (permalink / raw)
  To: Zhang, Xiantao
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, kvm-ppc-devel,
	Avi Kivity

On Thu, 2007-10-18 at 15:34 +0800, Zhang, Xiantao wrote:
> Hi Avi,
> According to your and community's suggestions, I changed the kvm_vcpu
> structure to two parts. To avoid the much intrusive into current code,
> one is common part which is defined as a macro, and the other one is
> arch-specific part.  
> In addition, I have a suggestion to re-organize the head files, such as
> kvm.h and x86.h.  IMO, kvm.h is changed to kvm_comm.h, and only includes
> common code for all archs.Then x86.h will be changed to kvm-x86.h, and
> linked as kvm.h at compile time.  So, other archs also defines its
> kvm-xx.h to accommodate its arch-specific structures.  What's your ideas
> ?(This idea doesn't include in this patch.)

First of all let me say that I hate cpp macros. What is the problem with
embedding an architecture-specific sub-structure, i.e.
        struct kvm_vcpu {
                ...
                struct arch_kvm_vcpu arch_vcpu;
        };

This has a nice software engineering property too: common code will have
to explicitly dereference "arch_vcpu", which in the best case wouldn't
even compile, but even in the worst case is at least a visual red flag.
The way you're using macros, there is nothing obviously wrong about
"vcpu->host_tsc" in shared code.

One more comment below.

> >From 34cebd3a3fc0afba4df511219912bc3277e2a8c7 Mon Sep 17 00:00:00 2001
> From: Zhang Xiantao <xiantao.zhang-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> Date: Thu, 18 Oct 2007 12:51:02 +0800
> Subject: [PATCH] First step to split kvm_vcpu. Currently, we just use an
> macro to define the common fields in kvm_vcpu for all archs, and all
> archs need to define its own kvm_vcpu struct.
> Signed-off-by: Zhang Xiantao <xiantao.zhang-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> ---
>  drivers/kvm/ioapic.c      |    2 +
>  drivers/kvm/irq.c         |    1 +
>  drivers/kvm/kvm.h         |  166
> +++++++-------------------------------------
>  drivers/kvm/kvm_main.c    |    4 +-
>  drivers/kvm/lapic.c       |    2 +
>  drivers/kvm/mmu.c         |    1 +
>  drivers/kvm/svm.c         |    2 +-
>  drivers/kvm/vmx.c         |    1 +
>  drivers/kvm/x86.h         |  128 ++++++++++++++++++++++++++++++++++
>  drivers/kvm/x86_emulate.c |    1 +
>  10 files changed, 166 insertions(+), 142 deletions(-)
...
> +#ifdef CONFIG_HAS_IOMEM
> +#define KVM_VCPU_MMIO \
> +	int mmio_needed;\
> +	int mmio_read_completed;\
> +	int mmio_is_write;\
> +	int mmio_size;\
> +	unsigned char mmio_data[8];\
>  	gpa_t mmio_phys_addr;
> -	gva_t mmio_fault_cr2;
> -	struct kvm_pio_request pio;
> -	void *pio_data;
> -	wait_queue_head_t wq;
> 
> -	int sigset_active;
> -	sigset_t sigset;
> +#else
> +#define KVM_VCPU_MMIO
> 
> -	struct kvm_stat stat;
> +#endif

...

> diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
> index aab465d..9ff049c 100644
> --- a/drivers/kvm/kvm_main.c
> +++ b/drivers/kvm/kvm_main.c
> @@ -2272,7 +2272,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu
> *vcpu, struct kvm_run *kvm_run)
>  		if (r)
>  			goto out;
>  	}
> -
> +#if CONFIG_HAS_IOMEM
>  	if (vcpu->mmio_needed) {
>  		memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
>  		vcpu->mmio_read_completed = 1;
> @@ -2287,7 +2287,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu
> *vcpu, struct kvm_run *kvm_run)
>  			goto out;
>  		}
>  	}
> -
> +#endif

It does not make sense to share kvm_vcpu_ioctl_run(). Just look at it.

FYI, "char mmio_data[8]" has alignment problems. PowerPC has
endian-reversed load/store instructions, and to use them target data
must be aligned.

Also, memcpy() doesn't work for big-endian systems with sub-word loads.
Imagine if I do a single-byte load: "memcpy(&gpr, mmio_data, 1)" would
set the MSB, but the byte should land in the LSB of the register.

-- 
Hollis Blanchard
IBM Linux Technology Center


-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] Split kvm_vcpu to support new archs.
  2007-10-18 20:01   ` Hollis Blanchard
@ 2007-10-18 21:04     ` Anthony Liguori
       [not found]       ` <4717CA4B.7040307-rdkfGonbjUSkNkDKm+mE6A@public.gmane.org>
  2007-10-21  6:40     ` Avi Kivity
  1 sibling, 1 reply; 15+ messages in thread
From: Anthony Liguori @ 2007-10-18 21:04 UTC (permalink / raw)
  To: Hollis Blanchard
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, kvm-ppc-devel,
	Avi Kivity, Zhang, Xiantao

Hollis Blanchard wrote:
> On Thu, 2007-10-18 at 15:34 +0800, Zhang, Xiantao wrote:
>   
>> Hi Avi,
>> According to your and community's suggestions, I changed the kvm_vcpu
>> structure to two parts. To avoid the much intrusive into current code,
>> one is common part which is defined as a macro, and the other one is
>> arch-specific part.  
>> In addition, I have a suggestion to re-organize the head files, such as
>> kvm.h and x86.h.  IMO, kvm.h is changed to kvm_comm.h, and only includes
>> common code for all archs.Then x86.h will be changed to kvm-x86.h, and
>> linked as kvm.h at compile time.  So, other archs also defines its
>> kvm-xx.h to accommodate its arch-specific structures.  What's your ideas
>> ?(This idea doesn't include in this patch.)
>>     
>
> First of all let me say that I hate cpp macros. What is the problem with
> embedding an architecture-specific sub-structure, i.e.
>         struct kvm_vcpu {
>                 ...
>                 struct arch_kvm_vcpu arch_vcpu;
>         };
>   

I think you want the opposite direction of nesting.

There already is such a thing for vt/svm.  What's needed is just another 
level for x86/ppc, etc.  All the necessary hooks are already in place to 
allocate at the very bottom of the stack.

So to summarize, today we have:

struct kvm_vcpu {
  /* stuff common to vt/svm and possibly other archs*/
};

struct vcpu_svm {
   struct kvm_vcpu vcpu;
   /* svm specific stuff */
};
 
But we should move to:

struct kvm_vcpu {
 /* stuff common to x86/ppc/ia64 */
};

struct vcpu_x86 {
  struct kvm_vcpu vcpu;
  /* stuff common to vt/svm */
}

struct vcpu_svm {
  struct vcpu_x86 vcpu;
  /* svm specific stuff  */
};

Regards,

Anthony Liguori

> This has a nice software engineering property too: common code will have
> to explicitly dereference "arch_vcpu", which in the best case wouldn't
> even compile, but even in the worst case is at least a visual red flag.
> The way you're using macros, there is nothing obviously wrong about
> "vcpu->host_tsc" in shared code.
>
> One more comment below.
>
>   
>> >From 34cebd3a3fc0afba4df511219912bc3277e2a8c7 Mon Sep 17 00:00:00 2001
>> From: Zhang Xiantao <xiantao.zhang-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
>> Date: Thu, 18 Oct 2007 12:51:02 +0800
>> Subject: [PATCH] First step to split kvm_vcpu. Currently, we just use an
>> macro to define the common fields in kvm_vcpu for all archs, and all
>> archs need to define its own kvm_vcpu struct.
>> Signed-off-by: Zhang Xiantao <xiantao.zhang-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
>> ---
>>  drivers/kvm/ioapic.c      |    2 +
>>  drivers/kvm/irq.c         |    1 +
>>  drivers/kvm/kvm.h         |  166
>> +++++++-------------------------------------
>>  drivers/kvm/kvm_main.c    |    4 +-
>>  drivers/kvm/lapic.c       |    2 +
>>  drivers/kvm/mmu.c         |    1 +
>>  drivers/kvm/svm.c         |    2 +-
>>  drivers/kvm/vmx.c         |    1 +
>>  drivers/kvm/x86.h         |  128 ++++++++++++++++++++++++++++++++++
>>  drivers/kvm/x86_emulate.c |    1 +
>>  10 files changed, 166 insertions(+), 142 deletions(-)
>>     
> ...
>   
>> +#ifdef CONFIG_HAS_IOMEM
>> +#define KVM_VCPU_MMIO \
>> +	int mmio_needed;\
>> +	int mmio_read_completed;\
>> +	int mmio_is_write;\
>> +	int mmio_size;\
>> +	unsigned char mmio_data[8];\
>>  	gpa_t mmio_phys_addr;
>> -	gva_t mmio_fault_cr2;
>> -	struct kvm_pio_request pio;
>> -	void *pio_data;
>> -	wait_queue_head_t wq;
>>
>> -	int sigset_active;
>> -	sigset_t sigset;
>> +#else
>> +#define KVM_VCPU_MMIO
>>
>> -	struct kvm_stat stat;
>> +#endif
>>     
>
> ...
>
>   
>> diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
>> index aab465d..9ff049c 100644
>> --- a/drivers/kvm/kvm_main.c
>> +++ b/drivers/kvm/kvm_main.c
>> @@ -2272,7 +2272,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu
>> *vcpu, struct kvm_run *kvm_run)
>>  		if (r)
>>  			goto out;
>>  	}
>> -
>> +#if CONFIG_HAS_IOMEM
>>  	if (vcpu->mmio_needed) {
>>  		memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
>>  		vcpu->mmio_read_completed = 1;
>> @@ -2287,7 +2287,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu
>> *vcpu, struct kvm_run *kvm_run)
>>  			goto out;
>>  		}
>>  	}
>> -
>> +#endif
>>     
>
> It does not make sense to share kvm_vcpu_ioctl_run(). Just look at it.
>
> FYI, "char mmio_data[8]" has alignment problems. PowerPC has
> endian-reversed load/store instructions, and to use them target data
> must be aligned.
>
> Also, memcpy() doesn't work for big-endian systems with sub-word loads.
> Imagine if I do a single-byte load: "memcpy(&gpr, mmio_data, 1)" would
> set the MSB, but the byte should land in the LSB of the register.
>
>   


-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] Split kvm_vcpu to support new archs.
       [not found]       ` <4717CA4B.7040307-rdkfGonbjUSkNkDKm+mE6A@public.gmane.org>
@ 2007-10-18 21:14         ` Hollis Blanchard
  2007-10-18 21:31           ` Anthony Liguori
  0 siblings, 1 reply; 15+ messages in thread
From: Hollis Blanchard @ 2007-10-18 21:14 UTC (permalink / raw)
  To: Anthony Liguori
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, kvm-ppc-devel,
	Avi Kivity, Zhang, Xiantao

On Thu, 2007-10-18 at 16:04 -0500, Anthony Liguori wrote:
> Hollis Blanchard wrote:
> > What is the problem with
> > embedding an architecture-specific sub-structure, i.e.
> >         struct kvm_vcpu {
> >                 ...
> >                 struct arch_kvm_vcpu arch_vcpu;
> >         };
> >   
> 
> I think you want the opposite direction of nesting. 
...
> We should move to:
> 
> struct kvm_vcpu {
>  /* stuff common to x86/ppc/ia64 */
> };
> 
> struct vcpu_x86 {
>   struct kvm_vcpu vcpu;
>   /* stuff common to vt/svm */
> }
> 
> struct vcpu_svm {
>   struct vcpu_x86 vcpu;
>   /* svm specific stuff  */
> };

Why?

-- 
Hollis Blanchard
IBM Linux Technology Center


-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] Split kvm_vcpu to support new archs.
  2007-10-18 21:14         ` Hollis Blanchard
@ 2007-10-18 21:31           ` Anthony Liguori
       [not found]             ` <4717D095.40708-rdkfGonbjUSkNkDKm+mE6A@public.gmane.org>
  0 siblings, 1 reply; 15+ messages in thread
From: Anthony Liguori @ 2007-10-18 21:31 UTC (permalink / raw)
  To: Hollis Blanchard
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, kvm-ppc-devel,
	Avi Kivity, Zhang, Xiantao

Hollis Blanchard wrote:
> On Thu, 2007-10-18 at 16:04 -0500, Anthony Liguori wrote:
>   
>> Hollis Blanchard wrote:
>>     
>>> What is the problem with
>>> embedding an architecture-specific sub-structure, i.e.
>>>         struct kvm_vcpu {
>>>                 ...
>>>                 struct arch_kvm_vcpu arch_vcpu;
>>>         };
>>>   
>>>       
>> I think you want the opposite direction of nesting. 
>>     
> ...
>   
>> We should move to:
>>
>> struct kvm_vcpu {
>>  /* stuff common to x86/ppc/ia64 */
>> };
>>
>> struct vcpu_x86 {
>>   struct kvm_vcpu vcpu;
>>   /* stuff common to vt/svm */
>> }
>>
>> struct vcpu_svm {
>>   struct vcpu_x86 vcpu;
>>   /* svm specific stuff  */
>> };
>>     
>
> Why?
>   

It provides better encapsulation.  If you have a kvm_vcpu, unless you do 
container_of(), you can't access the arch_vcpu.  It helps make sure that 
architecture common code remains common.

It also leaves open the possibility of supporting multiple architectures 
at the same time.  I don't know why you would want to do that :-)

Regards,

Anthony Liguori


-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [kvm-ppc-devel] [PATCH] Split kvm_vcpu to support new archs.
       [not found]             ` <4717D095.40708-rdkfGonbjUSkNkDKm+mE6A@public.gmane.org>
@ 2007-10-18 21:43               ` Hollis Blanchard
  2007-10-18 22:04                 ` Anthony Liguori
  2007-10-19 13:34                 ` Carsten Otte
  0 siblings, 2 replies; 15+ messages in thread
From: Hollis Blanchard @ 2007-10-18 21:43 UTC (permalink / raw)
  To: Anthony Liguori
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, kvm-ppc-devel,
	Zhang,  Xiantao

On Thu, 2007-10-18 at 16:31 -0500, Anthony Liguori wrote:
> Hollis Blanchard wrote:
> > On Thu, 2007-10-18 at 16:04 -0500, Anthony Liguori wrote:
> >   
> >> Hollis Blanchard wrote:
> >>     
> >>> What is the problem with
> >>> embedding an architecture-specific sub-structure, i.e.
> >>>         struct kvm_vcpu {
> >>>                 ...
> >>>                 struct arch_kvm_vcpu arch_vcpu;
> >>>         };
> >>>   
> >>>       
> >> I think you want the opposite direction of nesting. 
> >>     
> > ...
> >   
> >> We should move to:
> >>
> >> struct kvm_vcpu {
> >>  /* stuff common to x86/ppc/ia64 */
> >> };
> >>
> >> struct vcpu_x86 {
> >>   struct kvm_vcpu vcpu;
> >>   /* stuff common to vt/svm */
> >> }
> >>
> >> struct vcpu_svm {
> >>   struct vcpu_x86 vcpu;
> >>   /* svm specific stuff  */
> >> };
> >>     
> >
> > Why?
> >   
> 
> It provides better encapsulation.  If you have a kvm_vcpu, unless you do 
> container_of(), you can't access the arch_vcpu.  It helps make sure that 
> architecture common code remains common.

I must be misunderstanding, because this seems completely backwards to
me. With your nesting, any time architecture code wants to access
architecture state (which is almost all the time), you'd *need*
container_of:

        void arch_func(struct kvm_vcpu *vcpu) {
                struct arch_vcpu *arch = container_of(vcpu, arch_vcpu,
                arch);
                arch->gpr[3] = 0;
        }

In contrast, my nesting proposal would look like this:

        void arch_func(struct kvm_vcpu *vcpu) {
                vcpu->arch.gpr[3] = 0;
        }

> It also leaves open the possibility of supporting multiple architectures 
> at the same time.  I don't know why you would want to do that :-)

That's true, though this could also be accomplished by keeping arch_vcpu
as the last member of kvm_vcpu.

-- 
Hollis Blanchard
IBM Linux Technology Center


-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [kvm-ppc-devel] [PATCH] Split kvm_vcpu to support new archs.
  2007-10-18 21:43               ` [kvm-ppc-devel] " Hollis Blanchard
@ 2007-10-18 22:04                 ` Anthony Liguori
       [not found]                   ` <4717D87E.5010000-rdkfGonbjUSkNkDKm+mE6A@public.gmane.org>
  2007-10-19 13:34                 ` Carsten Otte
  1 sibling, 1 reply; 15+ messages in thread
From: Anthony Liguori @ 2007-10-18 22:04 UTC (permalink / raw)
  To: Hollis Blanchard
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, kvm-ppc-devel,
	Zhang,  Xiantao

Hollis Blanchard wrote:
> On Thu, 2007-10-18 at 16:31 -0500, Anthony Liguori wrote:
>   
>> Hollis Blanchard wrote:
>>     
>>> On Thu, 2007-10-18 at 16:04 -0500, Anthony Liguori wrote:
>>>   
>>>       
>>>> Hollis Blanchard wrote:
>>>>     
>>>>         
>>>>> What is the problem with
>>>>> embedding an architecture-specific sub-structure, i.e.
>>>>>         struct kvm_vcpu {
>>>>>                 ...
>>>>>                 struct arch_kvm_vcpu arch_vcpu;
>>>>>         };
>>>>>   
>>>>>       
>>>>>           
>>>> I think you want the opposite direction of nesting. 
>>>>     
>>>>         
>>> ...
>>>   
>>>       
>>>> We should move to:
>>>>
>>>> struct kvm_vcpu {
>>>>  /* stuff common to x86/ppc/ia64 */
>>>> };
>>>>
>>>> struct vcpu_x86 {
>>>>   struct kvm_vcpu vcpu;
>>>>   /* stuff common to vt/svm */
>>>> }
>>>>
>>>> struct vcpu_svm {
>>>>   struct vcpu_x86 vcpu;
>>>>   /* svm specific stuff  */
>>>> };
>>>>     
>>>>         
>>> Why?
>>>   
>>>       
>> It provides better encapsulation.  If you have a kvm_vcpu, unless you do 
>> container_of(), you can't access the arch_vcpu.  It helps make sure that 
>> architecture common code remains common.
>>     
>
> I must be misunderstanding, because this seems completely backwards to
> me. With your nesting, any time architecture code wants to access
> architecture state (which is almost all the time), you'd *need*
> container_of:
>
>         void arch_func(struct kvm_vcpu *vcpu) {
>                 struct arch_vcpu *arch = container_of(vcpu, arch_vcpu,
>                 arch);
>                 arch->gpr[3] = 0;
>         }
>
> In contrast, my nesting proposal would look like this:
>
>         void arch_func(struct kvm_vcpu *vcpu) {
>                 vcpu->arch.gpr[3] = 0;
>         }
>   

Well, you'd probably define a to_ppc() and then do something like:

void arch_func(struct kvm_vcpu *vcpu) {
       to_arch(vcpu)->gpr[3] = 0;
}

Which is exactly what's done in the vt/svm backend (see usage of 
to_svm/to_vmx).

Regards,

Anthony Liguori

>> It also leaves open the possibility of supporting multiple architectures 
>> at the same time.  I don't know why you would want to do that :-)
>>     
>
> That's true, though this could also be accomplished by keeping arch_vcpu
> as the last member of kvm_vcpu.
>
>   


-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [kvm-ppc-devel] [PATCH] Split kvm_vcpu to support new archs.
  2007-10-18 21:43               ` [kvm-ppc-devel] " Hollis Blanchard
  2007-10-18 22:04                 ` Anthony Liguori
@ 2007-10-19 13:34                 ` Carsten Otte
  1 sibling, 0 replies; 15+ messages in thread
From: Carsten Otte @ 2007-10-19 13:34 UTC (permalink / raw)
  To: Hollis Blanchard
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, kvm-ppc-devel,
	Zhang,  Xiantao

Hollis Blanchard wrote:
> I must be misunderstanding, because this seems completely backwards to
> me. With your nesting, any time architecture code wants to access
> architecture state (which is almost all the time), you'd *need*
> container_of:
> 
>         void arch_func(struct kvm_vcpu *vcpu) {
>                 struct arch_vcpu *arch = container_of(vcpu, arch_vcpu,
>                 arch);
>                 arch->gpr[3] = 0;
>         }
> 
> In contrast, my nesting proposal would look like this:
> 
>         void arch_func(struct kvm_vcpu *vcpu) {
>                 vcpu->arch.gpr[3] = 0;
>         }
> 
I'm with Hollis on this one. It looks clearly preferable to me.

-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [kvm-ppc-devel] [PATCH] Split kvm_vcpu to support new archs.
       [not found]                   ` <4717D87E.5010000-rdkfGonbjUSkNkDKm+mE6A@public.gmane.org>
@ 2007-10-19 17:16                     ` Hollis Blanchard
  0 siblings, 0 replies; 15+ messages in thread
From: Hollis Blanchard @ 2007-10-19 17:16 UTC (permalink / raw)
  To: Anthony Liguori
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, kvm-ppc-devel,
	Carsten Otte, Zhang,  Xiantao

On Thu, 2007-10-18 at 17:04 -0500, Anthony Liguori wrote:
> Hollis Blanchard wrote:
> >
> > I must be misunderstanding, because this seems completely backwards to
> > me. With your nesting, any time architecture code wants to access
> > architecture state (which is almost all the time), you'd *need*
> > container_of:
> >
> >         void arch_func(struct kvm_vcpu *vcpu) {
> >                 struct arch_vcpu *arch = container_of(vcpu, arch_vcpu,
> >                 arch);
> >                 arch->gpr[3] = 0;
> >         }
> >
> > In contrast, my nesting proposal would look like this:
> >
> >         void arch_func(struct kvm_vcpu *vcpu) {
> >                 vcpu->arch.gpr[3] = 0;
> >         }
> >   
> 
> Well, you'd probably define a to_ppc() and then do something like:
> 
> void arch_func(struct kvm_vcpu *vcpu) {
>        to_arch(vcpu)->gpr[3] = 0;
> }
> 
> Which is exactly what's done in the vt/svm backend (see usage of 
> to_svm/to_vmx).

Ah, so you're saying that once we have a "to_x86()" macro, the nesting
becomes irrelevant.

Furthermore, "to_x86()" won't even be defined when compiling shared
code, which will ensure that nobody tries to slip something in.

Finally, it means we don't have to keep the "arch" member at the end of
kvm_vcpu (though I don't think that's a big deal really).

I think I can live with that. Carsten?

[BTW, a quick glance at svm.c turns up weirdness like this:
        
        static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
        {
        	u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX];
        	u64 data;
        
        	if (svm_get_msr(&svm->vcpu, ecx, &data))
        		svm_inject_gp(&svm->vcpu, 0);
        	...
        }
        
        static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
        {
        	struct vcpu_svm *svm = to_svm(vcpu);
        	...
        }

There's some unnecessary kvm_vcpu/svm_vcpu conversion going on there.]

-- 
Hollis Blanchard
IBM Linux Technology Center


-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] Split kvm_vcpu to support new archs.
  2007-10-18 20:01   ` Hollis Blanchard
  2007-10-18 21:04     ` Anthony Liguori
@ 2007-10-21  6:40     ` Avi Kivity
       [not found]       ` <471AF450.9040202-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
  1 sibling, 1 reply; 15+ messages in thread
From: Avi Kivity @ 2007-10-21  6:40 UTC (permalink / raw)
  To: Hollis Blanchard
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, kvm-ppc-devel,
	Zhang,  Xiantao

Hollis Blanchard wrote:
> On Thu, 2007-10-18 at 15:34 +0800, Zhang, Xiantao wrote:
>   
>> Hi Avi,
>> According to your and community's suggestions, I changed the kvm_vcpu
>> structure to two parts. To avoid the much intrusive into current code,
>> one is common part which is defined as a macro, and the other one is
>> arch-specific part.  
>> In addition, I have a suggestion to re-organize the head files, such as
>> kvm.h and x86.h.  IMO, kvm.h is changed to kvm_comm.h, and only includes
>> common code for all archs.Then x86.h will be changed to kvm-x86.h, and
>> linked as kvm.h at compile time.  So, other archs also defines its
>> kvm-xx.h to accommodate its arch-specific structures.  What's your ideas
>> ?(This idea doesn't include in this patch.)
>>     
>
> First of all let me say that I hate cpp macros. What is the problem with
> embedding an architecture-specific sub-structure, i.e.
>         struct kvm_vcpu {
>                 ...
>                 struct arch_kvm_vcpu arch_vcpu;
>         };
>
> This has a nice software engineering property too: common code will have
> to explicitly dereference "arch_vcpu", which in the best case wouldn't
> even compile, but even in the worst case is at least a visual red flag.
> The way you're using macros, there is nothing obviously wrong about
> "vcpu->host_tsc" in shared code.
>
>   

The usage of the macro is only for an intermediate stage, so this patch 
shows the changes in the data structures, while the next one will be 
littered with code changes due to the changes in the way fields are 
addressed.

I was initially in favor of doing

    struct kvm_vcpu {
        struct kvm_vcpu_common common;
        ...
    };

in order to avoid the majority of fields requiring an 'arch.' prefix 
(most fields are arch dependent, very few are common), but using 
container_of() as someone suggested seems to be a better idea.


> It does not make sense to share kvm_vcpu_ioctl_run(). Just look at it.
>
>   

Agree.


-- 
error compiling committee.c: too many arguments to function


-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] Split kvm_vcpu to support new archs.
       [not found]       ` <471AF450.9040202-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2007-10-22 19:18         ` Hollis Blanchard
  2007-10-23 12:14           ` Carsten Otte
  2007-10-25  2:56           ` Jerone Young
  0 siblings, 2 replies; 15+ messages in thread
From: Hollis Blanchard @ 2007-10-22 19:18 UTC (permalink / raw)
  To: Avi Kivity
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, kvm-ppc-devel,
	Zhang,  Xiantao

On Sun, 2007-10-21 at 08:40 +0200, Avi Kivity wrote:
> 
> The usage of the macro is only for an intermediate stage, so this
> patch shows the changes in the data structures, while the next one
> will be littered with code changes due to the changes in the way
> fields are addressed.

OK.

What is the plan here Xiantao? If I want to begin PPC integration,
should I submit some patches too (hopefully in areas where we will not
conflict)? Or should I just review your submissions and hold off on PPC
code changes until the dust settles?

> I was initially in favor of doing
> 
>     struct kvm_vcpu {
>         struct kvm_vcpu_common common;
>         ...
>     };
> 
> in order to avoid the majority of fields requiring an 'arch.' prefix 
> (most fields are arch dependent, very few are common), but using 
> container_of() as someone suggested seems to be a better idea. 

Note: container_of() enables the above layout, and I agree with that
approach. To avoid misunderstandings, this is what we're talking about:
        
        kvm_common_foo(struct kvm_vcpu_common *vcpu)
        {
        	kvm_arch_foo(vcpu);
        }
        
        kvm_common_bar(struct kvm_vcpu_common *vcpu)
        {
        	...
        }
        
        ----------
        
        struct kvm_vcpu_ppc440 {
        	struct kvm_vcpu_common common;
        	u32 gpr[32];
        };
        
        #define to_ppc440(v) container_of(...)
        
        kvm_arch_foo(struct kvm_vcpu_common *vcpu)
        {
        	struct kvm_vcpu_ppc440 *ppc440 = to_ppc440(vcpu);
        
        	ppc440->gpr[3] = 0;
        
        	kvm_common_bar(ppc440->common);
        }
        
I've chosen specific PPC names since I expect to support more than one
PowerPC processor type simultaneously, e.g. "modprobe kvm-powerpc-440
kvm-powerpc-e500". (This will require some additional "kvm_ppc_ops"
support not shown here.)

Personally I think "common" is too much typing, but I've left the name
as you suggested for now. :)

-- 
Hollis Blanchard
IBM Linux Technology Center


-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] Split kvm_vcpu to support new archs.
  2007-10-22 19:18         ` Hollis Blanchard
@ 2007-10-23 12:14           ` Carsten Otte
       [not found]             ` <471DE5B2.4030709-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
  2007-10-25  2:56           ` Jerone Young
  1 sibling, 1 reply; 15+ messages in thread
From: Carsten Otte @ 2007-10-23 12:14 UTC (permalink / raw)
  To: Hollis Blanchard
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, kvm-ppc-devel,
	Zhang,  Xiantao, Avi Kivity

Hollis Blanchard wrote:
> What is the plan here Xiantao? If I want to begin PPC integration,
> should I submit some patches too (hopefully in areas where we will not
> conflict)? Or should I just review your submissions and hold off on PPC
> code changes until the dust settles?
Oh, let's throw more patches at Avi. Now that our kvm test machine is 
up and running again, I'll continue to do so as well. This way, we'll 
have something that works for all four archs once the dust settles.


-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] Split kvm_vcpu to support new archs.
       [not found]             ` <471DE5B2.4030709-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
@ 2007-10-24 11:44               ` Zhang, Xiantao
  0 siblings, 0 replies; 15+ messages in thread
From: Zhang, Xiantao @ 2007-10-24 11:44 UTC (permalink / raw)
  To: carsteno-tA70FqPdS9bQT0dZR+AlfA, Hollis Blanchard
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, kvm-ppc-devel,
	Avi Kivity

Carsten Otte wrote:
> Hollis Blanchard wrote:
>> What is the plan here Xiantao? If I want to begin PPC integration,
>> should I submit some patches too (hopefully in areas where we will
>> not conflict)? Or should I just review your submissions and hold off
>> on PPC code changes until the dust settles?
> Oh, let's throw more patches at Avi. Now that our kvm test machine is
> up and running again, I'll continue to do so as well. This way, we'll
> have something that works for all four archs once the dust settles.

Yes, we are working on it too. In fact, I also prefer using container_of
approach for kvm_vcpu split, because macro is not elegant from the view
of coding style.  OK, let's work together for its happen.  I will
prepare the patch. :)

Thanks 
Xiantao

-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] Split kvm_vcpu to support new archs.
  2007-10-22 19:18         ` Hollis Blanchard
  2007-10-23 12:14           ` Carsten Otte
@ 2007-10-25  2:56           ` Jerone Young
  1 sibling, 0 replies; 15+ messages in thread
From: Jerone Young @ 2007-10-25  2:56 UTC (permalink / raw)
  To: Hollis Blanchard
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, kvm-ppc-devel,
	Zhang,  Xiantao, Avi Kivity

This may be helpful to everyone if Hollis's explanation of container_of
didn't help:

Lookup up "container_of" or look at the secion named "Lists"
at this link:
http://www.win.tue.nl/~aeb/linux/lk/lk-2.html

It also contains the macro used in the kernel.





On Mon, 2007-10-22 at 14:18 -0500, Hollis Blanchard wrote:
> On Sun, 2007-10-21 at 08:40 +0200, Avi Kivity wrote:
> > 
> > The usage of the macro is only for an intermediate stage, so this
> > patch shows the changes in the data structures, while the next one
> > will be littered with code changes due to the changes in the way
> > fields are addressed.
> 
> OK.
> 
> What is the plan here Xiantao? If I want to begin PPC integration,
> should I submit some patches too (hopefully in areas where we will not
> conflict)? Or should I just review your submissions and hold off on PPC
> code changes until the dust settles?
> 
> > I was initially in favor of doing
> > 
> >     struct kvm_vcpu {
> >         struct kvm_vcpu_common common;
> >         ...
> >     };
> > 
> > in order to avoid the majority of fields requiring an 'arch.' prefix 
> > (most fields are arch dependent, very few are common), but using 
> > container_of() as someone suggested seems to be a better idea. 
> 
> Note: container_of() enables the above layout, and I agree with that
> approach. To avoid misunderstandings, this is what we're talking about:
>         
>         kvm_common_foo(struct kvm_vcpu_common *vcpu)
>         {
>         	kvm_arch_foo(vcpu);
>         }
>         
>         kvm_common_bar(struct kvm_vcpu_common *vcpu)
>         {
>         	...
>         }
>         
>         ----------
>         
>         struct kvm_vcpu_ppc440 {
>         	struct kvm_vcpu_common common;
>         	u32 gpr[32];
>         };
>         
>         #define to_ppc440(v) container_of(...)
>         
>         kvm_arch_foo(struct kvm_vcpu_common *vcpu)
>         {
>         	struct kvm_vcpu_ppc440 *ppc440 = to_ppc440(vcpu);
>         
>         	ppc440->gpr[3] = 0;
>         
>         	kvm_common_bar(ppc440->common);
>         }
>         
> I've chosen specific PPC names since I expect to support more than one
> PowerPC processor type simultaneously, e.g. "modprobe kvm-powerpc-440
> kvm-powerpc-e500". (This will require some additional "kvm_ppc_ops"
> support not shown here.)
> 
> Personally I think "common" is too much typing, but I've left the name
> as you suggested for now. :)
> 


-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2007-10-25  2:56 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-10-18  7:34 [PATCH] Split kvm_vcpu to support new archs Zhang, Xiantao
     [not found] ` <42DFA526FC41B1429CE7279EF83C6BDC809A6A-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-10-18 14:22   ` Avi Kivity
2007-10-18 20:01   ` Hollis Blanchard
2007-10-18 21:04     ` Anthony Liguori
     [not found]       ` <4717CA4B.7040307-rdkfGonbjUSkNkDKm+mE6A@public.gmane.org>
2007-10-18 21:14         ` Hollis Blanchard
2007-10-18 21:31           ` Anthony Liguori
     [not found]             ` <4717D095.40708-rdkfGonbjUSkNkDKm+mE6A@public.gmane.org>
2007-10-18 21:43               ` [kvm-ppc-devel] " Hollis Blanchard
2007-10-18 22:04                 ` Anthony Liguori
     [not found]                   ` <4717D87E.5010000-rdkfGonbjUSkNkDKm+mE6A@public.gmane.org>
2007-10-19 17:16                     ` Hollis Blanchard
2007-10-19 13:34                 ` Carsten Otte
2007-10-21  6:40     ` Avi Kivity
     [not found]       ` <471AF450.9040202-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-10-22 19:18         ` Hollis Blanchard
2007-10-23 12:14           ` Carsten Otte
     [not found]             ` <471DE5B2.4030709-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
2007-10-24 11:44               ` Zhang, Xiantao
2007-10-25  2:56           ` Jerone Young

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox