* [PATCH RFC] paravirt: cleanup lazy mode handling
@ 2007-10-01 23:46 Jeremy Fitzhardinge
2007-10-02 1:34 ` Rusty Russell
2007-10-02 5:48 ` Avi Kivity
0 siblings, 2 replies; 7+ messages in thread
From: Jeremy Fitzhardinge @ 2007-10-01 23:46 UTC (permalink / raw)
To: Virtualization Mailing List
Cc: Linux Kernel Mailing List, Andi Kleen, Zachary Amsden,
Rusty Russell, Avi Kivity, Anthony Liguori,
Glauber de Oliveira Costa, Nakajima, Jun
Currently, the set_lazy_mode pv_op is overloaded with 5 functions:
1. enter lazy cpu mode
2. leave lazy cpu mode
3. enter lazy mmu mode
4. leave lazy mmu mode
5. flush pending batched operations
This complicates each paravirt backend, since it needs to deal with
all the possible state transitions, handling flushing, etc. In
particular, flushing is quite distinct from the other 4 functions, and
seems to just cause complication.
This patch removes the set_lazy_mode operation, and adds "enter" and
"leave" lazy mode operations on mmu_ops and cpu_ops. All the logic
associated with enter and leaving lazy states is now in common code
(basically BUG_ONs to make sure that no mode is current when entering
a lazy mode, and make sure that the mode is current when leaving).
Also, flush is handled in a common way, by simply leaving and
re-entering the lazy mode.
The result is that the Xen and VMI lazy mode implementations are much
simpler; as would lguest's be.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Zach Amsden <zach@vmware.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Avi Kivity <avi@qumranet.com>
Cc: Anthony Liguory <aliguori@us.ibm.com>
Cc: "Glauber de Oliveira Costa" <glommer@gmail.com>
Cc: "Nakajima, Jun" <jun.nakajima@intel.com>
---
arch/i386/kernel/paravirt.c | 78 ++++++++++++++++++++++++++++++++++++++++---
arch/i386/kernel/vmi.c | 42 ++++++++++++-----------
arch/i386/xen/enlighten.c | 43 +++++++----------------
arch/i386/xen/mmu.c | 2 -
arch/i386/xen/multicalls.h | 2 -
arch/i386/xen/xen-ops.h | 7 ---
include/asm-i386/paravirt.h | 67 ++++++++++++++----------------------
7 files changed, 137 insertions(+), 104 deletions(-)
===================================================================
--- a/arch/i386/kernel/paravirt.c
+++ b/arch/i386/kernel/paravirt.c
@@ -265,6 +265,69 @@ int paravirt_disable_iospace(void)
}
return ret;
+}
+
+static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
+
+static void enter_lazy(enum paravirt_lazy_mode mode, struct pv_lazy_ops *ops)
+{
+ BUG_ON(x86_read_percpu(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
+ BUG_ON(preemptible());
+
+ x86_write_percpu(paravirt_lazy_mode, mode);
+ (*ops->enter)();
+}
+
+static void leave_lazy(enum paravirt_lazy_mode mode, struct pv_lazy_ops *ops)
+{
+ BUG_ON(x86_read_percpu(paravirt_lazy_mode) != mode);
+ BUG_ON(preemptible());
+
+ x86_write_percpu(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
+ (*ops->leave)();
+}
+
+static void flush_lazy(enum paravirt_lazy_mode mode, struct pv_lazy_ops *ops)
+{
+ if (x86_read_percpu(paravirt_lazy_mode) == mode) {
+ (*ops->leave)();
+ (*ops->enter)();
+ }
+}
+
+void arch_enter_lazy_mmu_mode(void)
+{
+ enter_lazy(PARAVIRT_LAZY_MMU, &pv_mmu_ops.lazy_mode);
+}
+
+void arch_leave_lazy_mmu_mode(void)
+{
+ leave_lazy(PARAVIRT_LAZY_MMU, &pv_mmu_ops.lazy_mode);
+}
+
+void arch_flush_lazy_mmu_mode(void)
+{
+ flush_lazy(PARAVIRT_LAZY_MMU, &pv_mmu_ops.lazy_mode);
+}
+
+void arch_enter_lazy_cpu_mode(void)
+{
+ enter_lazy(PARAVIRT_LAZY_CPU, &pv_cpu_ops.lazy_mode);
+}
+
+void arch_leave_lazy_cpu_mode(void)
+{
+ leave_lazy(PARAVIRT_LAZY_CPU, &pv_cpu_ops.lazy_mode);
+}
+
+void arch_flush_lazy_cpu_mode(void)
+{
+ flush_lazy(PARAVIRT_LAZY_CPU, &pv_cpu_ops.lazy_mode);
+}
+
+enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
+{
+ return x86_read_percpu(paravirt_lazy_mode);
}
struct paravirt_ops paravirt_ops = {
@@ -333,6 +396,11 @@ struct paravirt_ops paravirt_ops = {
.set_iopl_mask = native_set_iopl_mask,
.io_delay = native_io_delay,
+
+ .lazy_mode = {
+ .enter = paravirt_nop,
+ .leave = paravirt_nop,
+ },
},
.pv_apic_ops = {
@@ -346,10 +414,6 @@ struct paravirt_ops paravirt_ops = {
#endif
},
- .pv_misc_ops = {
- .set_lazy_mode = paravirt_nop,
- },
-
.pv_mmu_ops = {
.pagetable_setup_start = native_pagetable_setup_start,
.pagetable_setup_done = native_pagetable_setup_done,
@@ -400,6 +464,11 @@ struct paravirt_ops paravirt_ops = {
.dup_mmap = paravirt_nop,
.exit_mmap = paravirt_nop,
.activate_mm = paravirt_nop,
+
+ .lazy_mode = {
+ .enter = paravirt_nop,
+ .leave = paravirt_nop,
+ },
},
};
@@ -417,7 +486,6 @@ static void __init __used pv_aliases(voi
substructure(pv_info);
substructure(pv_init_ops);
- substructure(pv_misc_ops);
substructure(pv_time_ops);
substructure(pv_cpu_ops);
substructure(pv_irq_ops);
===================================================================
--- a/arch/i386/kernel/vmi.c
+++ b/arch/i386/kernel/vmi.c
@@ -552,24 +552,19 @@ vmi_startup_ipi_hook(int phys_apicid, un
}
#endif
-static void vmi_set_lazy_mode(enum paravirt_lazy_mode mode)
-{
- static DEFINE_PER_CPU(enum paravirt_lazy_mode, lazy_mode);
-
- if (!vmi_ops.set_lazy_mode)
- return;
-
- /* Modes should never nest or overlap */
- BUG_ON(__get_cpu_var(lazy_mode) && !(mode == PARAVIRT_LAZY_NONE ||
- mode == PARAVIRT_LAZY_FLUSH));
-
- if (mode == PARAVIRT_LAZY_FLUSH) {
- vmi_ops.set_lazy_mode(0);
- vmi_ops.set_lazy_mode(__get_cpu_var(lazy_mode));
- } else {
- vmi_ops.set_lazy_mode(mode);
- __get_cpu_var(lazy_mode) = mode;
- }
+static void vmi_enter_lazy_cpu(void)
+{
+ vmi_ops.set_lazy_mode(2);
+}
+
+static void vmi_enter_lazy_mmu(void)
+{
+ vmi_ops.set_lazy_mode(1);
+}
+
+static void vmi_leave_lazy(void)
+{
+ vmi_ops.set_lazy_mode(0);
}
static inline int __init check_vmi_rom(struct vrom_header *rom)
@@ -798,7 +793,16 @@ static inline int __init activate_vmi(vo
para_wrap(pv_cpu_ops.load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack);
para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
para_fill(pv_cpu_ops.io_delay, IODelay);
- para_wrap(pv_misc_ops.set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode);
+
+ para_wrap(pv_cpu_ops.lazy_mode.enter, vmi_enter_lazy_cpu,
+ set_lazy_mode, SetLazyMode);
+ para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy,
+ set_lazy_mode, SetLazyMode);
+
+ para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu,
+ set_lazy_mode, SetLazyMode);
+ para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy,
+ set_lazy_mode, SetLazyMode);
/* user and kernel flush are just handled with different flags to FlushTLB */
para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB);
===================================================================
--- a/arch/i386/xen/enlighten.c
+++ b/arch/i386/xen/enlighten.c
@@ -51,8 +51,6 @@
EXPORT_SYMBOL_GPL(hypercall_page);
-DEFINE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode);
-
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
DEFINE_PER_CPU(unsigned long, xen_cr3);
@@ -248,29 +246,9 @@ static void xen_halt(void)
xen_safe_halt();
}
-static void xen_set_lazy_mode(enum paravirt_lazy_mode mode)
-{
- BUG_ON(preemptible());
-
- switch (mode) {
- case PARAVIRT_LAZY_NONE:
- BUG_ON(x86_read_percpu(xen_lazy_mode) == PARAVIRT_LAZY_NONE);
- break;
-
- case PARAVIRT_LAZY_MMU:
- case PARAVIRT_LAZY_CPU:
- BUG_ON(x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE);
- break;
-
- case PARAVIRT_LAZY_FLUSH:
- /* flush if necessary, but don't change state */
- if (x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE)
- xen_mc_flush();
- return;
- }
-
+static void xen_leave_lazy(void)
+{
xen_mc_flush();
- x86_write_percpu(xen_lazy_mode, mode);
}
static unsigned long xen_store_tr(void)
@@ -357,7 +335,7 @@ static void xen_load_tls(struct thread_s
* loaded properly. This will go away as soon as Xen has been
* modified to not save/restore %gs for normal hypercalls.
*/
- if (xen_get_lazy_mode() == PARAVIRT_LAZY_CPU)
+ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)
loadsegment(gs, 0);
}
@@ -961,6 +939,11 @@ static const struct pv_cpu_ops xen_cpu_o
.set_iopl_mask = xen_set_iopl_mask,
.io_delay = xen_io_delay,
+
+ .lazy_mode = {
+ .enter = paravirt_nop,
+ .leave = xen_leave_lazy,
+ },
};
static const struct pv_irq_ops xen_irq_ops __initdata = {
@@ -1036,10 +1019,11 @@ static const struct pv_mmu_ops xen_mmu_o
.activate_mm = xen_activate_mm,
.dup_mmap = xen_dup_mmap,
.exit_mmap = xen_exit_mmap,
-};
-
-static const struct pv_misc_ops xen_misc_ops __initdata = {
- .set_lazy_mode = xen_set_lazy_mode,
+
+ .lazy_mode = {
+ .enter = paravirt_nop,
+ .leave = xen_leave_lazy,
+ },
};
#ifdef CONFIG_SMP
@@ -1113,7 +1097,6 @@ asmlinkage void __init xen_start_kernel(
pv_irq_ops = xen_irq_ops;
pv_apic_ops = xen_apic_ops;
pv_mmu_ops = xen_mmu_ops;
- pv_misc_ops = xen_misc_ops;
machine_ops = xen_machine_ops;
===================================================================
--- a/arch/i386/xen/mmu.c
+++ b/arch/i386/xen/mmu.c
@@ -154,7 +154,7 @@ void xen_set_pte_at(struct mm_struct *mm
pte_t *ptep, pte_t pteval)
{
if (mm == current->mm || mm == &init_mm) {
- if (xen_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
struct multicall_space mcs;
mcs = xen_mc_entry(0);
===================================================================
--- a/arch/i386/xen/multicalls.h
+++ b/arch/i386/xen/multicalls.h
@@ -35,7 +35,7 @@ void xen_mc_flush(void);
/* Issue a multicall if we're not in a lazy mode */
static inline void xen_mc_issue(unsigned mode)
{
- if ((xen_get_lazy_mode() & mode) == 0)
+ if ((paravirt_get_lazy_mode() & mode) == 0)
xen_mc_flush();
/* restore flags saved in xen_mc_batch */
===================================================================
--- a/arch/i386/xen/xen-ops.h
+++ b/arch/i386/xen/xen-ops.h
@@ -31,13 +31,6 @@ bool xen_vcpu_stolen(int vcpu);
void xen_mark_init_mm_pinned(void);
-DECLARE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode);
-
-static inline unsigned xen_get_lazy_mode(void)
-{
- return x86_read_percpu(xen_lazy_mode);
-}
-
void __init xen_fill_possible_map(void);
void __init xen_setup_vcpu_info_placement(void);
===================================================================
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -24,15 +24,6 @@ struct tss_struct;
struct tss_struct;
struct mm_struct;
struct desc_struct;
-
-/* Lazy mode for batching updates / context switch */
-enum paravirt_lazy_mode {
- PARAVIRT_LAZY_NONE = 0,
- PARAVIRT_LAZY_MMU = 1,
- PARAVIRT_LAZY_CPU = 2,
- PARAVIRT_LAZY_FLUSH = 3,
-};
-
/* general info */
struct pv_info {
@@ -64,9 +55,10 @@ struct pv_init_ops {
};
-struct pv_misc_ops {
+struct pv_lazy_ops {
/* Set deferred update mode, used for batching operations. */
- void (*set_lazy_mode)(enum paravirt_lazy_mode mode);
+ void (*enter)(void);
+ void (*leave)(void);
};
struct pv_time_ops {
@@ -131,6 +123,8 @@ struct pv_cpu_ops {
/* These two are jmp to, not actually called. */
void (*irq_enable_sysexit)(void);
void (*iret)(void);
+
+ struct pv_lazy_ops lazy_mode;
};
struct pv_irq_ops {
@@ -244,13 +238,16 @@ struct pv_mmu_ops {
#ifdef CONFIG_HIGHPTE
void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
#endif
+
+ struct pv_lazy_ops lazy_mode;
};
struct paravirt_ops
{
struct pv_info pv_info;
struct pv_init_ops pv_init_ops;
- struct pv_misc_ops pv_misc_ops;
+ struct pv_lazy_ops pv_lazy_cpu_ops;
+ struct pv_lazy_ops pv_lazy_mmu_ops;
struct pv_time_ops pv_time_ops;
struct pv_cpu_ops pv_cpu_ops;
struct pv_irq_ops pv_irq_ops;
@@ -260,7 +257,8 @@ struct paravirt_ops
extern struct pv_info pv_info;
extern struct pv_init_ops pv_init_ops;
-extern struct pv_misc_ops pv_misc_ops;
+extern struct pv_lazy_ops pv_lazy_cpu_ops;
+extern struct pv_lazy_ops pv_lazy_mmu_ops;
extern struct pv_time_ops pv_time_ops;
extern struct pv_cpu_ops pv_cpu_ops;
extern struct pv_irq_ops pv_irq_ops;
@@ -952,37 +950,24 @@ static inline void set_pmd(pmd_t *pmdp,
#endif /* CONFIG_X86_PAE */
#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
-static inline void arch_enter_lazy_cpu_mode(void)
-{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_CPU);
-}
-
-static inline void arch_leave_lazy_cpu_mode(void)
-{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_NONE);
-}
-
-static inline void arch_flush_lazy_cpu_mode(void)
-{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_FLUSH);
-}
+void arch_enter_lazy_cpu_mode(void);
+void arch_leave_lazy_cpu_mode(void);
+void arch_flush_lazy_cpu_mode(void);
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
-static inline void arch_enter_lazy_mmu_mode(void)
-{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_MMU);
-}
-
-static inline void arch_leave_lazy_mmu_mode(void)
-{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_NONE);
-}
-
-static inline void arch_flush_lazy_mmu_mode(void)
-{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_FLUSH);
-}
+void arch_enter_lazy_mmu_mode(void);
+void arch_leave_lazy_mmu_mode(void);
+void arch_flush_lazy_mmu_mode(void);
+
+/* Lazy mode for batching updates / context switch */
+enum paravirt_lazy_mode {
+ PARAVIRT_LAZY_NONE,
+ PARAVIRT_LAZY_MMU,
+ PARAVIRT_LAZY_CPU,
+};
+
+enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
void _paravirt_nop(void);
#define paravirt_nop ((void *)_paravirt_nop)
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH RFC] paravirt: cleanup lazy mode handling
2007-10-01 23:46 [PATCH RFC] paravirt: cleanup lazy mode handling Jeremy Fitzhardinge
@ 2007-10-02 1:34 ` Rusty Russell
2007-10-02 6:29 ` Jeremy Fitzhardinge
2007-10-02 5:48 ` Avi Kivity
1 sibling, 1 reply; 7+ messages in thread
From: Rusty Russell @ 2007-10-02 1:34 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Virtualization Mailing List, Linux Kernel Mailing List,
Andi Kleen, Zachary Amsden, Avi Kivity, Anthony Liguori,
Glauber de Oliveira Costa, Nakajima, Jun
On Mon, 2007-10-01 at 16:46 -0700, Jeremy Fitzhardinge wrote:
> This patch removes the set_lazy_mode operation, and adds "enter" and
> "leave" lazy mode operations on mmu_ops and cpu_ops. All the logic
> associated with enter and leaving lazy states is now in common code
> (basically BUG_ONs to make sure that no mode is current when entering
> a lazy mode, and make sure that the mode is current when leaving).
> Also, flush is handled in a common way, by simply leaving and
> re-entering the lazy mode.
That's good, but this code does lose on native because we no longer
simply replace the entire thing with noops.
Perhaps inverting this and having (inline) helpers is the way to go?
I'm thinking something like:
static inline void paravirt_enter_lazy(enum paravirt_lazy_mode mode)
{
BUG_ON(x86_read_percpu(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
BUG_ON(preemptible());
x86_write_percpu(paravirt_lazy_mode, mode);
}
static inline void paravirt_exit_lazy(enum paravirt_lazy_mode mode)
{
BUG_ON(x86_read_percpu(paravirt_lazy_mode) != mode);
BUG_ON(preemptible());
x86_write_percpu(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
}
The only trick would be that the flushes are so rarely required it's
probably worth putting the unlikely() in the top level:
static void arch_flush_lazy_cpu_mode(void)
{
if (unlikely(x86_read_percpu(paravirt_lazy_mode)) {
PVOP_VCALL0(cpu_ops.enter_lazy);
PVOP_VCALL0(cpu_ops.exit_lazy);
}
}
static void arch_flush_lazy_mmy_mode(void)
{
if (unlikely(x86_read_percpu(paravirt_lazy_mode)) {
PVOP_VCALL0(mmu_ops.enter_lazy);
PVOP_VCALL0(mmu_ops.exit_lazy);
}
}
Thoughts?
Rusty.
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH RFC] paravirt: cleanup lazy mode handling
2007-10-01 23:46 [PATCH RFC] paravirt: cleanup lazy mode handling Jeremy Fitzhardinge
2007-10-02 1:34 ` Rusty Russell
@ 2007-10-02 5:48 ` Avi Kivity
2007-10-02 6:24 ` Jeremy Fitzhardinge
1 sibling, 1 reply; 7+ messages in thread
From: Avi Kivity @ 2007-10-02 5:48 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Virtualization Mailing List, Linux Kernel Mailing List,
Andi Kleen, Zachary Amsden, Rusty Russell, Anthony Liguori,
Glauber de Oliveira Costa, Nakajima, Jun
Jeremy Fitzhardinge wrote:
> Currently, the set_lazy_mode pv_op is overloaded with 5 functions:
> 1. enter lazy cpu mode
> 2. leave lazy cpu mode
> 3. enter lazy mmu mode
> 4. leave lazy mmu mode
> 5. flush pending batched operations
>
> This complicates each paravirt backend, since it needs to deal with
> all the possible state transitions, handling flushing, etc. In
> particular, flushing is quite distinct from the other 4 functions, and
> seems to just cause complication.
>
> This patch removes the set_lazy_mode operation, and adds "enter" and
> "leave" lazy mode operations on mmu_ops and cpu_ops. All the logic
> associated with enter and leaving lazy states is now in common code
> (basically BUG_ONs to make sure that no mode is current when entering
> a lazy mode, and make sure that the mode is current when leaving).
> Also, flush is handled in a common way, by simply leaving and
> re-entering the lazy mode.
>
> The result is that the Xen and VMI lazy mode implementations are much
> simpler; as would lguest's be.
>
>
The code doesn't support having both lazy modes active at once. Maybe
that's not an issue, but aren't the two modes orthogonal?
> --- a/arch/i386/xen/multicalls.h
> +++ b/arch/i386/xen/multicalls.h
> @@ -35,7 +35,7 @@ void xen_mc_flush(void);
> /* Issue a multicall if we're not in a lazy mode */
> static inline void xen_mc_issue(unsigned mode)
> {
> - if ((xen_get_lazy_mode() & mode) == 0)
> + if ((paravirt_get_lazy_mode() & mode) == 0)
> xen_mc_flush();
This snippet looks like it wants to support concurrently active lazy modes.
--
Any sufficiently difficult bug is indistinguishable from a feature.
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH RFC] paravirt: cleanup lazy mode handling
2007-10-02 5:48 ` Avi Kivity
@ 2007-10-02 6:24 ` Jeremy Fitzhardinge
0 siblings, 0 replies; 7+ messages in thread
From: Jeremy Fitzhardinge @ 2007-10-02 6:24 UTC (permalink / raw)
To: Avi Kivity
Cc: Virtualization Mailing List, Linux Kernel Mailing List,
Andi Kleen, Zachary Amsden, Rusty Russell, Anthony Liguori,
Glauber de Oliveira Costa, Nakajima, Jun
Avi Kivity wrote:
> The code doesn't support having both lazy modes active at once. Maybe
> that's not an issue, but aren't the two modes orthogonal?
Hm, well, that's a good question. The initial semantics of the lazy
mode calls were "what VMI wants", and they're still not really nailed
down. VMI doesn't support having both active at once, and its a little
unclear what it would mean anyway. For now, we don't support multiple
lazy modes active at once, and the kernel never tries to do it (in fact,
it would be a bug, since lazy mmu must be preempt disabled, and lazy cpu
is only used for context switches).
>
>> --- a/arch/i386/xen/multicalls.h
>> +++ b/arch/i386/xen/multicalls.h
>> @@ -35,7 +35,7 @@ void xen_mc_flush(void);
>> /* Issue a multicall if we're not in a lazy mode */
>> static inline void xen_mc_issue(unsigned mode)
>> {
>> - if ((xen_get_lazy_mode() & mode) == 0)
>> + if ((paravirt_get_lazy_mode() & mode) == 0)
>> xen_mc_flush();
>
> This snippet looks like it wants to support concurrently active lazy
> modes.
Yeah, its a little more general than it needs to be.
J
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH RFC] paravirt: cleanup lazy mode handling
2007-10-02 1:34 ` Rusty Russell
@ 2007-10-02 6:29 ` Jeremy Fitzhardinge
2007-10-02 7:53 ` Rusty Russell
0 siblings, 1 reply; 7+ messages in thread
From: Jeremy Fitzhardinge @ 2007-10-02 6:29 UTC (permalink / raw)
To: Rusty Russell
Cc: Virtualization Mailing List, Linux Kernel Mailing List,
Andi Kleen, Zachary Amsden, Avi Kivity, Anthony Liguori,
Glauber de Oliveira Costa, Nakajima, Jun
Rusty Russell wrote:
> That's good, but this code does lose on native because we no longer
> simply replace the entire thing with noops.
>
> Perhaps inverting this and having (inline) helpers is the way to go?
>
I'm thinking that the overhead will be unmeasurably small, and its not
really worth any more complexity. That's almost certainly true for lazy
mmu mode, but lazy cpu is used in the middle of a context switch, so
it's probably worth a bit more attention.
> I'm thinking something like:
>
> static inline void paravirt_enter_lazy(enum paravirt_lazy_mode mode)
> {
> BUG_ON(x86_read_percpu(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
> BUG_ON(preemptible());
>
> x86_write_percpu(paravirt_lazy_mode, mode);
> }
>
> static inline void paravirt_exit_lazy(enum paravirt_lazy_mode mode)
> {
> BUG_ON(x86_read_percpu(paravirt_lazy_mode) != mode);
> BUG_ON(preemptible());
>
> x86_write_percpu(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
> }
>
Er, they should probably call something to make the switch actually
happen, no?
> The only trick would be that the flushes are so rarely required it's
> probably worth putting the unlikely() in the top level:
>
Sure, I guess. Would it make any difference? (I've never personally
noticed likely/unlikely change the generated code in any seriously
positive way.)
J
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH RFC] paravirt: cleanup lazy mode handling
2007-10-02 6:29 ` Jeremy Fitzhardinge
@ 2007-10-02 7:53 ` Rusty Russell
2007-10-02 22:43 ` Jeremy Fitzhardinge
0 siblings, 1 reply; 7+ messages in thread
From: Rusty Russell @ 2007-10-02 7:53 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Virtualization Mailing List, Linux Kernel Mailing List,
Andi Kleen, Zachary Amsden, Avi Kivity, Anthony Liguori,
Glauber de Oliveira Costa, Nakajima, Jun
On Mon, 2007-10-01 at 23:29 -0700, Jeremy Fitzhardinge wrote:
> Rusty Russell wrote:
> > That's good, but this code does lose on native because we no longer
> > simply replace the entire thing with noops.
> >
> > Perhaps inverting this and having (inline) helpers is the way to go?
> >
> > static inline void paravirt_enter_lazy(enum paravirt_lazy_mode mode)
> > {
> > BUG_ON(x86_read_percpu(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
> > BUG_ON(preemptible());
> >
> > x86_write_percpu(paravirt_lazy_mode, mode);
> > }
> >
> > static inline void paravirt_exit_lazy(enum paravirt_lazy_mode mode)
> > {
> > BUG_ON(x86_read_percpu(paravirt_lazy_mode) != mode);
> > BUG_ON(preemptible());
> >
> > x86_write_percpu(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
> > }
> >
>
> Er, they should probably call something to make the switch actually
> happen, no?
No, they're helpers. eg:
static void lguest_exit_lazy(enum paravirt_lazy_mode mode)
{
paravirt_exit_lazy(mode);
lguest_flush_hcalls();
}
> > The only trick would be that the flushes are so rarely required it's
> > probably worth putting the unlikely() in the top level:
>
> Sure, I guess. Would it make any difference? (I've never personally
> noticed likely/unlikely change the generated code in any seriously
> positive way.)
Probably overkill (I was trying to avoid the branch for the case where
we don't need to flush, as that's always what happens).
So just expose a flush hook:
static inline void arch_flush_lazy_cpu_mode(void)
{
PVOP_VCALL1(flush_lazy_mode, PARAVIRT_LAZY_CPU);
}
....
static void lguest_flush_lazy_mode(enum paravirt_lazy_mode mode)
{
if (unlikely(x86_read_percpu(paravirt_lazy_mode) == mode)) {
lguest_lazy_cpu_leave();
lguest_lazy_cpu_enter();
}
}
Cheers,
Rusty.
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH RFC] paravirt: cleanup lazy mode handling
2007-10-02 7:53 ` Rusty Russell
@ 2007-10-02 22:43 ` Jeremy Fitzhardinge
0 siblings, 0 replies; 7+ messages in thread
From: Jeremy Fitzhardinge @ 2007-10-02 22:43 UTC (permalink / raw)
To: Rusty Russell
Cc: Virtualization Mailing List, Linux Kernel Mailing List,
Andi Kleen, Zachary Amsden, Avi Kivity, Anthony Liguori,
Glauber de Oliveira Costa, Nakajima, Jun
Rusty Russell wrote:
> No, they're helpers. eg:
>
> static void lguest_exit_lazy(enum paravirt_lazy_mode mode)
> {
> paravirt_exit_lazy(mode);
> lguest_flush_hcalls();
> }
>
OK, how does this sit with you?
Subject: paravirt: clean up lazy mode handling
Currently, the set_lazy_mode pv_op is overloaded with 5 functions:
1. enter lazy cpu mode
2. leave lazy cpu mode
3. enter lazy mmu mode
4. leave lazy mmu mode
5. flush pending batched operations
This complicates each paravirt backend, since it needs to deal with
all the possible state transitions, handling flushing, etc. In
particular, flushing is quite distinct from the other 4 functions, and
seems to just cause complication.
This patch removes the set_lazy_mode operation, and adds "enter" and
"leave" lazy mode operations on mmu_ops and cpu_ops. All the logic
associated with enter and leaving lazy states is now in common code
(basically BUG_ONs to make sure that no mode is current when entering
a lazy mode, and make sure that the mode is current when leaving).
Also, flush is handled in a common way, by simply leaving and
re-entering the lazy mode.
The result is that the Xen and VMI lazy mode implementations are much
simpler; as would lguest's be.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Zach Amsden <zach@vmware.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Avi Kivity <avi@qumranet.com>
Cc: Anthony Liguory <aliguori@us.ibm.com>
Cc: "Glauber de Oliveira Costa" <glommer@gmail.com>
Cc: "Nakajima, Jun" <jun.nakajima@intel.com>
---
arch/i386/kernel/paravirt.c | 58 +++++++++++++++++++++++++++++++++++++++----
arch/i386/kernel/vmi.c | 45 +++++++++++++++++++--------------
arch/i386/xen/enlighten.c | 44 ++++++++++----------------------
arch/i386/xen/mmu.c | 2 -
arch/i386/xen/multicalls.h | 2 -
arch/i386/xen/xen-ops.h | 7 -----
include/asm-i386/paravirt.h | 52 ++++++++++++++++++++++++--------------
7 files changed, 128 insertions(+), 82 deletions(-)
===================================================================
--- a/arch/i386/kernel/paravirt.c
+++ b/arch/i386/kernel/paravirt.c
@@ -265,6 +265,49 @@ int paravirt_disable_iospace(void)
}
return ret;
+}
+
+static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
+
+static inline void enter_lazy(enum paravirt_lazy_mode mode)
+{
+ BUG_ON(x86_read_percpu(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
+ BUG_ON(preemptible());
+
+ x86_write_percpu(paravirt_lazy_mode, mode);
+}
+
+void paravirt_leave_lazy(enum paravirt_lazy_mode mode)
+{
+ BUG_ON(x86_read_percpu(paravirt_lazy_mode) != mode);
+ BUG_ON(preemptible());
+
+ x86_write_percpu(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
+}
+
+void paravirt_enter_lazy_mmu(void)
+{
+ enter_lazy(PARAVIRT_LAZY_MMU);
+}
+
+void paravirt_leave_lazy_mmu(void)
+{
+ paravirt_leave_lazy(PARAVIRT_LAZY_MMU);
+}
+
+void paravirt_enter_lazy_cpu(void)
+{
+ enter_lazy(PARAVIRT_LAZY_CPU);
+}
+
+void paravirt_leave_lazy_cpu(void)
+{
+ paravirt_leave_lazy(PARAVIRT_LAZY_CPU);
+}
+
+enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
+{
+ return x86_read_percpu(paravirt_lazy_mode);
}
struct paravirt_ops paravirt_ops = {
@@ -333,6 +376,11 @@ struct paravirt_ops paravirt_ops = {
.set_iopl_mask = native_set_iopl_mask,
.io_delay = native_io_delay,
+
+ .lazy_mode = {
+ .enter = paravirt_nop,
+ .leave = paravirt_nop,
+ },
},
.pv_apic_ops = {
@@ -346,10 +394,6 @@ struct paravirt_ops paravirt_ops = {
#endif
},
- .pv_misc_ops = {
- .set_lazy_mode = paravirt_nop,
- },
-
.pv_mmu_ops = {
.pagetable_setup_start = native_pagetable_setup_start,
.pagetable_setup_done = native_pagetable_setup_done,
@@ -400,6 +444,11 @@ struct paravirt_ops paravirt_ops = {
.dup_mmap = paravirt_nop,
.exit_mmap = paravirt_nop,
.activate_mm = paravirt_nop,
+
+ .lazy_mode = {
+ .enter = paravirt_nop,
+ .leave = paravirt_nop,
+ },
},
};
@@ -417,7 +466,6 @@ static void __init __used pv_aliases(voi
substructure(pv_info);
substructure(pv_init_ops);
- substructure(pv_misc_ops);
substructure(pv_time_ops);
substructure(pv_cpu_ops);
substructure(pv_irq_ops);
===================================================================
--- a/arch/i386/kernel/vmi.c
+++ b/arch/i386/kernel/vmi.c
@@ -552,24 +552,22 @@ vmi_startup_ipi_hook(int phys_apicid, un
}
#endif
-static void vmi_set_lazy_mode(enum paravirt_lazy_mode mode)
-{
- static DEFINE_PER_CPU(enum paravirt_lazy_mode, lazy_mode);
-
- if (!vmi_ops.set_lazy_mode)
- return;
-
- /* Modes should never nest or overlap */
- BUG_ON(__get_cpu_var(lazy_mode) && !(mode == PARAVIRT_LAZY_NONE ||
- mode == PARAVIRT_LAZY_FLUSH));
-
- if (mode == PARAVIRT_LAZY_FLUSH) {
- vmi_ops.set_lazy_mode(0);
- vmi_ops.set_lazy_mode(__get_cpu_var(lazy_mode));
- } else {
- vmi_ops.set_lazy_mode(mode);
- __get_cpu_var(lazy_mode) = mode;
- }
+static void vmi_enter_lazy_cpu(void)
+{
+ paravirt_enter_lazy_cpu();
+ vmi_ops.set_lazy_mode(2);
+}
+
+static void vmi_enter_lazy_mmu(void)
+{
+ paravirt_enter_lazy_mmu();
+ vmi_ops.set_lazy_mode(1);
+}
+
+static void vmi_leave_lazy(void)
+{
+ paravirt_leave_lazy(paravirt_get_lazy_mode());
+ vmi_ops.set_lazy_mode(0);
}
static inline int __init check_vmi_rom(struct vrom_header *rom)
@@ -798,7 +796,16 @@ static inline int __init activate_vmi(vo
para_wrap(pv_cpu_ops.load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack);
para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
para_fill(pv_cpu_ops.io_delay, IODelay);
- para_wrap(pv_misc_ops.set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode);
+
+ para_wrap(pv_cpu_ops.lazy_mode.enter, vmi_enter_lazy_cpu,
+ set_lazy_mode, SetLazyMode);
+ para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy,
+ set_lazy_mode, SetLazyMode);
+
+ para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu,
+ set_lazy_mode, SetLazyMode);
+ para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy,
+ set_lazy_mode, SetLazyMode);
/* user and kernel flush are just handled with different flags to FlushTLB */
para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB);
===================================================================
--- a/arch/i386/xen/enlighten.c
+++ b/arch/i386/xen/enlighten.c
@@ -51,8 +51,6 @@
EXPORT_SYMBOL_GPL(hypercall_page);
-DEFINE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode);
-
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
DEFINE_PER_CPU(unsigned long, xen_cr3);
@@ -248,29 +246,10 @@ static void xen_halt(void)
xen_safe_halt();
}
-static void xen_set_lazy_mode(enum paravirt_lazy_mode mode)
-{
- BUG_ON(preemptible());
-
- switch (mode) {
- case PARAVIRT_LAZY_NONE:
- BUG_ON(x86_read_percpu(xen_lazy_mode) == PARAVIRT_LAZY_NONE);
- break;
-
- case PARAVIRT_LAZY_MMU:
- case PARAVIRT_LAZY_CPU:
- BUG_ON(x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE);
- break;
-
- case PARAVIRT_LAZY_FLUSH:
- /* flush if necessary, but don't change state */
- if (x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE)
- xen_mc_flush();
- return;
- }
-
+static void xen_leave_lazy(void)
+{
+ paravirt_leave_lazy(paravirt_get_lazy_mode());
xen_mc_flush();
- x86_write_percpu(xen_lazy_mode, mode);
}
static unsigned long xen_store_tr(void)
@@ -357,7 +336,7 @@ static void xen_load_tls(struct thread_s
* loaded properly. This will go away as soon as Xen has been
* modified to not save/restore %gs for normal hypercalls.
*/
- if (xen_get_lazy_mode() == PARAVIRT_LAZY_CPU)
+ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)
loadsegment(gs, 0);
}
@@ -961,6 +940,11 @@ static const struct pv_cpu_ops xen_cpu_o
.set_iopl_mask = xen_set_iopl_mask,
.io_delay = xen_io_delay,
+
+ .lazy_mode = {
+ .enter = paravirt_enter_lazy_cpu,
+ .leave = xen_leave_lazy,
+ },
};
static const struct pv_irq_ops xen_irq_ops __initdata = {
@@ -1036,10 +1020,11 @@ static const struct pv_mmu_ops xen_mmu_o
.activate_mm = xen_activate_mm,
.dup_mmap = xen_dup_mmap,
.exit_mmap = xen_exit_mmap,
-};
-
-static const struct pv_misc_ops xen_misc_ops __initdata = {
- .set_lazy_mode = xen_set_lazy_mode,
+
+ .lazy_mode = {
+ .enter = paravirt_enter_lazy_mmu,
+ .leave = xen_leave_lazy,
+ },
};
#ifdef CONFIG_SMP
@@ -1113,7 +1098,6 @@ asmlinkage void __init xen_start_kernel(
pv_irq_ops = xen_irq_ops;
pv_apic_ops = xen_apic_ops;
pv_mmu_ops = xen_mmu_ops;
- pv_misc_ops = xen_misc_ops;
machine_ops = xen_machine_ops;
===================================================================
--- a/arch/i386/xen/mmu.c
+++ b/arch/i386/xen/mmu.c
@@ -154,7 +154,7 @@ void xen_set_pte_at(struct mm_struct *mm
pte_t *ptep, pte_t pteval)
{
if (mm == current->mm || mm == &init_mm) {
- if (xen_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
struct multicall_space mcs;
mcs = xen_mc_entry(0);
===================================================================
--- a/arch/i386/xen/multicalls.h
+++ b/arch/i386/xen/multicalls.h
@@ -35,7 +35,7 @@ void xen_mc_flush(void);
/* Issue a multicall if we're not in a lazy mode */
static inline void xen_mc_issue(unsigned mode)
{
- if ((xen_get_lazy_mode() & mode) == 0)
+ if ((paravirt_get_lazy_mode() & mode) == 0)
xen_mc_flush();
/* restore flags saved in xen_mc_batch */
===================================================================
--- a/arch/i386/xen/xen-ops.h
+++ b/arch/i386/xen/xen-ops.h
@@ -31,13 +31,6 @@ bool xen_vcpu_stolen(int vcpu);
void xen_mark_init_mm_pinned(void);
-DECLARE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode);
-
-static inline unsigned xen_get_lazy_mode(void)
-{
- return x86_read_percpu(xen_lazy_mode);
-}
-
void __init xen_fill_possible_map(void);
void __init xen_setup_vcpu_info_placement(void);
===================================================================
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -24,15 +24,6 @@ struct tss_struct;
struct tss_struct;
struct mm_struct;
struct desc_struct;
-
-/* Lazy mode for batching updates / context switch */
-enum paravirt_lazy_mode {
- PARAVIRT_LAZY_NONE = 0,
- PARAVIRT_LAZY_MMU = 1,
- PARAVIRT_LAZY_CPU = 2,
- PARAVIRT_LAZY_FLUSH = 3,
-};
-
/* general info */
struct pv_info {
@@ -64,9 +55,10 @@ struct pv_init_ops {
};
-struct pv_misc_ops {
+struct pv_lazy_ops {
/* Set deferred update mode, used for batching operations. */
- void (*set_lazy_mode)(enum paravirt_lazy_mode mode);
+ void (*enter)(void);
+ void (*leave)(void);
};
struct pv_time_ops {
@@ -131,6 +123,8 @@ struct pv_cpu_ops {
/* These two are jmp to, not actually called. */
void (*irq_enable_sysexit)(void);
void (*iret)(void);
+
+ struct pv_lazy_ops lazy_mode;
};
struct pv_irq_ops {
@@ -244,13 +238,14 @@ struct pv_mmu_ops {
#ifdef CONFIG_HIGHPTE
void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
#endif
+
+ struct pv_lazy_ops lazy_mode;
};
struct paravirt_ops
{
struct pv_info pv_info;
struct pv_init_ops pv_init_ops;
- struct pv_misc_ops pv_misc_ops;
struct pv_time_ops pv_time_ops;
struct pv_cpu_ops pv_cpu_ops;
struct pv_irq_ops pv_irq_ops;
@@ -260,7 +255,6 @@ struct paravirt_ops
extern struct pv_info pv_info;
extern struct pv_init_ops pv_init_ops;
-extern struct pv_misc_ops pv_misc_ops;
extern struct pv_time_ops pv_time_ops;
extern struct pv_cpu_ops pv_cpu_ops;
extern struct pv_irq_ops pv_irq_ops;
@@ -951,37 +945,57 @@ static inline void set_pmd(pmd_t *pmdp,
}
#endif /* CONFIG_X86_PAE */
+/* Lazy mode for batching updates / context switch */
+enum paravirt_lazy_mode {
+ PARAVIRT_LAZY_NONE,
+ PARAVIRT_LAZY_MMU,
+ PARAVIRT_LAZY_CPU,
+};
+
+enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
+void paravirt_enter_lazy_cpu(void);
+void paravirt_leave_lazy_cpu(void);
+void paravirt_enter_lazy_mmu(void);
+void paravirt_leave_lazy_mmu(void);
+void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
+
#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
static inline void arch_enter_lazy_cpu_mode(void)
{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_CPU);
+ PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
}
static inline void arch_leave_lazy_cpu_mode(void)
{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_NONE);
+ PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
}
static inline void arch_flush_lazy_cpu_mode(void)
{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_FLUSH);
+ if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
+ arch_leave_lazy_cpu_mode();
+ arch_enter_lazy_cpu_mode();
+ }
}
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void)
{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_MMU);
+ PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
}
static inline void arch_leave_lazy_mmu_mode(void)
{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_NONE);
+ PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
}
static inline void arch_flush_lazy_mmu_mode(void)
{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_FLUSH);
+ if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
+ arch_leave_lazy_mmu_mode();
+ arch_enter_lazy_mmu_mode();
+ }
}
void _paravirt_nop(void);
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2007-10-02 22:43 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-10-01 23:46 [PATCH RFC] paravirt: cleanup lazy mode handling Jeremy Fitzhardinge
2007-10-02 1:34 ` Rusty Russell
2007-10-02 6:29 ` Jeremy Fitzhardinge
2007-10-02 7:53 ` Rusty Russell
2007-10-02 22:43 ` Jeremy Fitzhardinge
2007-10-02 5:48 ` Avi Kivity
2007-10-02 6:24 ` Jeremy Fitzhardinge
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).