* [PATCHEs]: support more than 32 VCPUs in guests
@ 2010-06-09 23:09 Mukesh Rathor
2010-06-09 23:44 ` Jeremy Fitzhardinge
0 siblings, 1 reply; 15+ messages in thread
From: Mukesh Rathor @ 2010-06-09 23:09 UTC (permalink / raw)
To: Xen-devel@lists.xensource.com; +Cc: Jeremy Fitzhardinge
[-- Attachment #1: Type: text/plain, Size: 905 bytes --]
Hi,
I am attaching three patches, in case anyone is interested. These
patches allow linux guests to support more than 32 VCPUs in 64bit mode
only to whatever linux supports. I tested all 3 with 128 vcpus on a
system with 128 CPU threads. Some scalability work is needed at 128
vcpus (some soft lockups during load) as expected.
Jeremy, pv ops is OK as it is on 128 vcpus, but I reworked the
xen_vcpu_setup() a little to address more than 32vcpus on xen that
doesn't have vcpu placement. Please take a look.
1. Patch for 5u5 (2.6.18-190*): tested 64bit. compiled 32bit.
2. Patch for 5u4 (2.6.18-164*): tested 64bit. not compiled on 32bit.
(NOTE: increased NR_DYNIRQS to 1024)
3. Patch for PVOPS: minor change to xen_vcpu_setup(). tested 64bit.
not compiled on 32bit.
thanks,
Mukesh
PS: make sure to do full build: vmlinux, modules, etc.. when using el5
kernel patches.
[-- Attachment #2: 5u5.diff --]
[-- Type: text/x-patch, Size: 9976 bytes --]
diff --git a/arch/i386/kernel/time-xen.c b/arch/i386/kernel/time-xen.c
index 8809ff8..4b716b8 100644
--- a/arch/i386/kernel/time-xen.c
+++ b/arch/i386/kernel/time-xen.c
@@ -236,7 +236,7 @@ void init_cpu_khz(void)
{
u64 __cpu_khz = 1000000ULL << US_SCALE;
struct vcpu_time_info *info;
- info = &HYPERVISOR_shared_info->vcpu_info[0].time;
+ info = &((vcpu_infop(0))->time);
do_div(__cpu_khz, info->tsc_to_system_mul);
if (info->tsc_shift < 0)
cpu_khz = __cpu_khz << -info->tsc_shift;
@@ -306,11 +306,10 @@ static void update_wallclock(void)
*/
static void get_time_values_from_xen(void)
{
- shared_info_t *s = HYPERVISOR_shared_info;
struct vcpu_time_info *src;
struct shadow_time_info *dst;
- src = &s->vcpu_info[smp_processor_id()].time;
+ src = &(vcpu_infop(smp_processor_id())->time);
dst = &per_cpu(shadow_time, smp_processor_id());
do {
@@ -331,7 +330,7 @@ static inline int time_values_up_to_date(int cpu)
struct vcpu_time_info *src;
struct shadow_time_info *dst;
- src = &HYPERVISOR_shared_info->vcpu_info[cpu].time;
+ src = &(vcpu_infop(cpu)->time);
dst = &per_cpu(shadow_time, cpu);
rmb();
diff --git a/arch/x86_64/kernel/irqflags-xen.c b/arch/x86_64/kernel/irqflags-xen.c
index e3b7ab5..2826dc6 100644
--- a/arch/x86_64/kernel/irqflags-xen.c
+++ b/arch/x86_64/kernel/irqflags-xen.c
@@ -17,7 +17,7 @@ unsigned long __raw_local_save_flags(void)
unsigned long flags;
preempt_disable();
- _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
+ _vcpu = per_cpu(xen_vcpup, __vcpu_id);
flags = _vcpu->evtchn_upcall_mask;
preempt_enable();
@@ -29,7 +29,7 @@ void raw_local_irq_restore(unsigned long flags)
{
struct vcpu_info *_vcpu;
preempt_disable();
- _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
+ _vcpu = per_cpu(xen_vcpup, __vcpu_id);
if ((_vcpu->evtchn_upcall_mask = flags) == 0) {
barrier(); /* unmask then check (avoid races) */
if ( unlikely(_vcpu->evtchn_upcall_pending) )
@@ -45,7 +45,7 @@ void raw_local_irq_disable(void)
struct vcpu_info *_vcpu;
preempt_disable();
- _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
+ _vcpu = per_cpu(xen_vcpup, __vcpu_id);
_vcpu->evtchn_upcall_mask = 1;
preempt_enable_no_resched();
}
@@ -56,7 +56,7 @@ void raw_local_irq_enable(void)
struct vcpu_info *_vcpu;
preempt_disable();
- _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
+ _vcpu = per_cpu(xen_vcpup, __vcpu_id);
_vcpu->evtchn_upcall_mask = 0;
barrier(); /* unmask then check (avoid races) */
if ( unlikely(_vcpu->evtchn_upcall_pending) )
@@ -75,7 +75,7 @@ unsigned long __raw_local_irq_save(void)
unsigned long flags;
preempt_disable();
- _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
+ _vcpu = per_cpu(xen_vcpup, __vcpu_id);
flags = _vcpu->evtchn_upcall_mask;
_vcpu->evtchn_upcall_mask = 1;
preempt_enable_no_resched();
@@ -91,7 +91,7 @@ int raw_irqs_disabled(void)
int disabled;
preempt_disable();
- _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
+ _vcpu = per_cpu(xen_vcpup, __vcpu_id);
disabled = (_vcpu->evtchn_upcall_mask != 0);
preempt_enable_no_resched();
diff --git a/arch/x86_64/kernel/xen_entry.S b/arch/x86_64/kernel/xen_entry.S
index b3d7f19..8f2d77d 100644
--- a/arch/x86_64/kernel/xen_entry.S
+++ b/arch/x86_64/kernel/xen_entry.S
@@ -12,19 +12,18 @@
//#define preempt_enable(reg) decl threadinfo_preempt_count(reg)
#define preempt_disable(reg)
#define preempt_enable(reg)
-#define XEN_GET_VCPU_INFO(reg) preempt_disable(%rbp) ; \
- movq %gs:pda_cpunumber,reg ; \
- shl $32, reg ; \
- shr $32-sizeof_vcpu_shift,reg ; \
- addq HYPERVISOR_shared_info,reg
#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%rbp) ; \
#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
#else
-#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
#define XEN_PUT_VCPU_INFO(reg)
#define XEN_PUT_VCPU_INFO_fixup
#endif
+#define XEN_GET_VCPU_INFO(reg) preempt_disable(%rbp) ; \
+ movq %gs:pda_data_offset,reg ; \
+ addq $per_cpu__xen_vcpup,reg ; \
+ movq (reg), reg
+
#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
diff --git a/arch/x86_64/mm/fault-xen.c b/arch/x86_64/mm/fault-xen.c
index 8a9a09d..6e6f983 100644
--- a/arch/x86_64/mm/fault-xen.c
+++ b/arch/x86_64/mm/fault-xen.c
@@ -411,8 +411,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
prefetchw(&mm->mmap_sem);
/* get the address */
- address = HYPERVISOR_shared_info->vcpu_info[
- smp_processor_id()].arch.cr2;
+ address = (__get_cpu_var(xen_vcpup))->arch.cr2;
info.si_code = SEGV_MAPERR;
diff --git a/arch/x86_64/mm/init-xen.c b/arch/x86_64/mm/init-xen.c
index d433ecb..16ad19d 100644
--- a/arch/x86_64/mm/init-xen.c
+++ b/arch/x86_64/mm/init-xen.c
@@ -792,6 +792,8 @@ void __init paging_init(void)
__set_fixmap(FIX_ISAMAP_BEGIN - i,
virt_to_mfn(empty_zero_page) << PAGE_SHIFT,
PAGE_KERNEL_RO);
+
+ per_cpu(xen_vcpup, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
}
#endif
diff --git a/drivers/xen/core/evtchn.c b/drivers/xen/core/evtchn.c
index 323b85f..8939f0a 100644
--- a/drivers/xen/core/evtchn.c
+++ b/drivers/xen/core/evtchn.c
@@ -219,7 +219,7 @@ asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
unsigned int l1i, l2i, port, count;
int irq, cpu = smp_processor_id();
shared_info_t *s = HYPERVISOR_shared_info;
- vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
+ vcpu_info_t *vcpu_info = vcpu_infop(cpu);
do {
@@ -788,7 +788,7 @@ void unmask_evtchn(int port)
{
shared_info_t *s = HYPERVISOR_shared_info;
unsigned int cpu = smp_processor_id();
- vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
+ vcpu_info_t *vcpu_info = vcpu_infop(cpu);
BUG_ON(!irqs_disabled());
diff --git a/drivers/xen/core/smpboot.c b/drivers/xen/core/smpboot.c
index d2d1182..0d8b5b1 100644
--- a/drivers/xen/core/smpboot.c
+++ b/drivers/xen/core/smpboot.c
@@ -80,6 +80,46 @@ EXPORT_SYMBOL(x86_cpu_to_apicid);
unsigned int maxcpus = NR_CPUS;
#endif
+#ifdef __x86_64__
+DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
+DEFINE_PER_CPU(struct vcpu_info *, xen_vcpup) =
+ { (struct vcpu_info *)empty_zero_page };
+EXPORT_PER_CPU_SYMBOL(xen_vcpup);
+
+static void check_relocate_vcpus(void)
+{
+ struct vcpu_register_vcpu_info info;
+ struct vcpu_info *vcpup;
+ int rc, cpu, relocate=0;
+
+ if (num_possible_cpus() > MAX_VIRT_CPUS)
+ relocate = 1;
+
+ for_each_possible_cpu (cpu) {
+ if (relocate) {
+ vcpup = &per_cpu(xen_vcpu_info, cpu);
+ info.mfn = virt_to_mfn(vcpup);
+ info.offset = offset_in_page(vcpup);
+ rc = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu,
+ &info);
+ if (rc == ENOSYS) {
+ printk(KERN_ERR "Current xen does not support "
+ "more than 32 VCPUs\n");
+ } else if (rc)
+ printk(KERN_ERR "VCPUOP failed. rc:%d\n", rc);
+
+ BUG_ON(rc);
+ } else {
+ /* use shared page so we can run on older xen without
+ * VCPUOP_register_vcpu_info */
+ vcpup = &HYPERVISOR_shared_info->vcpu_info[cpu];
+ }
+
+ per_cpu(xen_vcpup, cpu) = vcpup;
+ }
+}
+#endif
+
void __init prefill_possible_map(void)
{
int i, rc;
@@ -359,6 +399,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (!skip_ioapic_setup && nr_ioapics)
setup_IO_APIC();
#endif
+#ifdef __x86_64__
+ check_relocate_vcpus();
+#endif
}
void __devinit smp_prepare_boot_cpu(void)
diff --git a/include/asm-i386/mach-xen/asm/hypervisor.h b/include/asm-i386/mach-xen/asm/hypervisor.h
index 89cde62..7568584 100644
--- a/include/asm-i386/mach-xen/asm/hypervisor.h
+++ b/include/asm-i386/mach-xen/asm/hypervisor.h
@@ -103,6 +103,14 @@ void xen_tlb_flush_mask(cpumask_t *mask);
void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr);
#endif
+#ifdef __x86_64__
+#include <asm/percpu.h>
+DECLARE_PER_CPU(struct vcpu_info *, xen_vcpup);
+#define vcpu_infop(cpu) (per_cpu(xen_vcpup, cpu))
+#else
+#define vcpu_infop(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu))
+#endif
+
/* Returns zero on success else negative errno. */
int xen_create_contiguous_region(
unsigned long vstart, unsigned int order, unsigned int address_bits);
diff --git a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h
index eb8b5a1..42a131a 100644
--- a/include/xen/interface/vcpu.h
+++ b/include/xen/interface/vcpu.h
@@ -108,7 +108,23 @@ struct vcpu_register_runstate_memory_area {
};
typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t;
+/*
+ * Register a memory location in the guest address space for the
+ * vcpu_info structure. This allows the guest to place the vcpu_info
+ * structure in a convenient place, such as in a per-cpu data area.
+ * The pointer need not be page aligned, but the structure must not
+ * cross a page boundary.
+ *
+ * This may be called only once per vcpu.
+ */
#define VCPUOP_register_vcpu_info 10 /* arg == vcpu_register_vcpu_info_t */
+struct vcpu_register_vcpu_info {
+ uint64_t mfn; /* mfn of page to place vcpu_info */
+ uint32_t offset; /* offset within page */
+ uint32_t rsvd; /* unused */
+};
+typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t);
/*
* Get the physical ID information for a pinned vcpu's underlying physical
[-- Attachment #3: 5u4.diff --]
[-- Type: text/x-patch, Size: 6555 bytes --]
diff --git a/arch/x86_64/kernel/xen_entry.S b/arch/x86_64/kernel/xen_entry.S
index 66efa07..2ffb030 100644
--- a/arch/x86_64/kernel/xen_entry.S
+++ b/arch/x86_64/kernel/xen_entry.S
@@ -12,19 +12,18 @@
//#define preempt_enable(reg) decl threadinfo_preempt_count(reg)
#define preempt_disable(reg)
#define preempt_enable(reg)
-#define XEN_GET_VCPU_INFO(reg) preempt_disable(%rbp) ; \
- movq %gs:pda_cpunumber,reg ; \
- shl $32, reg ; \
- shr $32-sizeof_vcpu_shift,reg ; \
- addq HYPERVISOR_shared_info,reg
#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%rbp) ; \
#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
#else
-#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
#define XEN_PUT_VCPU_INFO(reg)
#define XEN_PUT_VCPU_INFO_fixup
#endif
+#define XEN_GET_VCPU_INFO(reg) preempt_disable(%rbp) ; \
+ movq %gs:pda_data_offset,reg ; \
+ addq $per_cpu__xen_vcpu,reg ; \
+ movq (reg), reg
+
#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
diff --git a/arch/x86_64/mm/init-xen.c b/arch/x86_64/mm/init-xen.c
index 72fbf89..d6bc553 100644
--- a/arch/x86_64/mm/init-xen.c
+++ b/arch/x86_64/mm/init-xen.c
@@ -728,6 +728,7 @@ static void xen_finish_init_mapping(void)
/* Switch to the real shared_info page, and clear the dummy page. */
set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
+ per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
memset(empty_zero_page, 0, sizeof(empty_zero_page));
/* Set up mapping of lowest 1MB of physical memory. */
diff --git a/drivers/xen/core/evtchn.c b/drivers/xen/core/evtchn.c
index 63ea957..554d1f0 100644
--- a/drivers/xen/core/evtchn.c
+++ b/drivers/xen/core/evtchn.c
@@ -242,14 +242,13 @@ asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
int irq;
unsigned int cpu = smp_processor_id();
shared_info_t *s = HYPERVISOR_shared_info;
- vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
exit_idle();
irq_enter();
do {
/* Avoid a callback storm when we reenable delivery. */
- vcpu_info->evtchn_upcall_pending = 0;
+ vcpu_info(cpu)->evtchn_upcall_pending = 0;
/* Nested invocations bail immediately. */
if (unlikely(per_cpu(upcall_count, cpu)++))
@@ -259,7 +258,7 @@ asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
/* Clear master flag /before/ clearing selector flag. */
wmb();
#endif
- l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
+ l1 = xchg(&vcpu_info(cpu)->evtchn_pending_sel, 0);
l1i = per_cpu(last_processed_l1i, cpu);
l2i = per_cpu(last_processed_l2i, cpu);
@@ -943,7 +942,6 @@ void unmask_evtchn(int port)
{
shared_info_t *s = HYPERVISOR_shared_info;
unsigned int cpu = smp_processor_id();
- vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
BUG_ON(!irqs_disabled());
@@ -959,8 +957,8 @@ void unmask_evtchn(int port)
/* Did we miss an interrupt 'edge'? Re-fire if so. */
if (synch_test_bit(port, s->evtchn_pending) &&
!synch_test_and_set_bit(port / BITS_PER_LONG,
- &vcpu_info->evtchn_pending_sel))
- vcpu_info->evtchn_upcall_pending = 1;
+ &vcpu_info(cpu)->evtchn_pending_sel))
+ vcpu_info(cpu)->evtchn_upcall_pending = 1;
}
EXPORT_SYMBOL_GPL(unmask_evtchn);
diff --git a/drivers/xen/core/smpboot.c b/drivers/xen/core/smpboot.c
index 80bd1bb..b285b8a 100644
--- a/drivers/xen/core/smpboot.c
+++ b/drivers/xen/core/smpboot.c
@@ -66,6 +66,40 @@ EXPORT_SYMBOL(x86_cpu_to_apicid);
unsigned int maxcpus = NR_CPUS;
#endif
+#ifdef __x86_64__
+DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
+DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu) =
+ { (struct vcpu_info *)empty_zero_page };
+EXPORT_PER_CPU_SYMBOL(xen_vcpu);
+
+static void check_relocate_vcpus(void)
+{
+ struct vcpu_register_vcpu_info info;
+ struct vcpu_info *vcpup;
+ int rc, cpu, relocate=0;
+
+ if (num_possible_cpus() > MAX_VIRT_CPUS)
+ relocate = 1;
+
+ for_each_possible_cpu (cpu) {
+ if (relocate) {
+ vcpup = &per_cpu(xen_vcpu_info, cpu);
+ info.mfn = virt_to_mfn(vcpup);
+ info.offset = offset_in_page(vcpup);
+ rc = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu,
+ &info);
+ BUG_ON(rc);
+ } else {
+ /* use shared page so we can run on older xen without
+ * VCPUOP_register_vcpu_info */
+ vcpup = &HYPERVISOR_shared_info->vcpu_info[cpu];
+ }
+
+ per_cpu(xen_vcpu, cpu) = vcpup;
+ }
+}
+#endif
+
void __init prefill_possible_map(void)
{
int i, rc;
@@ -364,6 +398,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (!skip_ioapic_setup && nr_ioapics)
setup_IO_APIC();
#endif
+#ifdef __x86_64__
+ check_relocate_vcpus();
+#endif
}
void __devinit smp_prepare_boot_cpu(void)
diff --git a/include/asm-i386/mach-xen/asm/hypervisor.h b/include/asm-i386/mach-xen/asm/hypervisor.h
index b38ab1b..dbe13e5 100644
--- a/include/asm-i386/mach-xen/asm/hypervisor.h
+++ b/include/asm-i386/mach-xen/asm/hypervisor.h
@@ -55,10 +55,16 @@
#elif defined(__x86_64__) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
# include <asm-generic/pgtable-nopud.h>
#endif
+#if defined(__x86_64__)
+#include <asm/percpu.h>
+DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
+#define vcpu_info(cpu) (per_cpu(xen_vcpu, cpu))
+#else
+#define vcpu_info(cpu) (&HYPERVISOR_shared_info->vcpu_info[cpu])
+#endif
extern shared_info_t *HYPERVISOR_shared_info;
-#define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu))
#ifdef CONFIG_SMP
#define current_vcpu_info() vcpu_info(smp_processor_id())
#else
diff --git a/include/asm-x86_64/mach-xen/irq_vectors.h b/include/asm-x86_64/mach-xen/irq_vectors.h
index 4391b08..594048b 100644
--- a/include/asm-x86_64/mach-xen/irq_vectors.h
+++ b/include/asm-x86_64/mach-xen/irq_vectors.h
@@ -109,7 +109,7 @@
#define NR_PIRQS 256
#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
-#define NR_DYNIRQS 256
+#define NR_DYNIRQS 1024
#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
#define NR_IRQ_VECTORS NR_IRQS
[-- Attachment #4: pvops.diff --]
[-- Type: text/x-patch, Size: 2829 bytes --]
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 615897c..4653c83 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -111,40 +111,43 @@ static int have_vcpu_info_placement = 1;
static void xen_vcpu_setup(int cpu)
{
- struct vcpu_register_vcpu_info info;
- int err;
- struct vcpu_info *vcpup;
-
- BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
- per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
-
- if (!have_vcpu_info_placement)
- return; /* already tested, not available */
-
- vcpup = &per_cpu(xen_vcpu_info, cpu);
-
- info.mfn = arbitrary_virt_to_mfn(vcpup);
- info.offset = offset_in_page(vcpup);
-
- printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n",
- cpu, vcpup, info.mfn, info.offset);
-
- /* Check to see if the hypervisor will put the vcpu_info
- structure where we want it, which allows direct access via
- a percpu-variable. */
- err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
-
- if (err) {
- printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
- have_vcpu_info_placement = 0;
- } else {
- /* This cpu is using the registered vcpu info, even if
- later ones fail to. */
- per_cpu(xen_vcpu, cpu) = vcpup;
-
- printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n",
- cpu, vcpup);
- }
+ struct vcpu_register_vcpu_info info;
+ int err;
+ struct vcpu_info *vcpup;
+
+ BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
+
+ if (cpu < MAX_VIRT_CPUS) {
+ per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+ if (!have_vcpu_info_placement)
+ return;
+ } else
+ BUG_ON(!have_vcpu_info_placement);
+
+ vcpup = &per_cpu(xen_vcpu_info, cpu);
+ info.mfn = arbitrary_virt_to_mfn(vcpup);
+ info.offset = offset_in_page(vcpup);
+
+ printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n",
+ cpu, vcpup, info.mfn, info.offset);
+
+ /* Check to see if the hypervisor will put the vcpu_info
+ structure where we want it, which allows direct access via
+ a percpu-variable. */
+ err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
+
+ if (err) {
+ printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
+ have_vcpu_info_placement = 0;
+ BUG_ON(cpu >= MAX_VIRT_CPUS);
+ } else {
+ /* This cpu is using the registered vcpu info, even if
+ later ones fail to. */
+ per_cpu(xen_vcpu, cpu) = vcpup;
+
+ printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n",
+ cpu, vcpup);
+ }
}
/*
[-- Attachment #5: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply related [flat|nested] 15+ messages in thread
* Re: [PATCHEs]: support more than 32 VCPUs in guests
2010-06-09 23:09 [PATCHEs]: support more than 32 VCPUs in guests Mukesh Rathor
@ 2010-06-09 23:44 ` Jeremy Fitzhardinge
2010-06-10 0:08 ` Mukesh Rathor
0 siblings, 1 reply; 15+ messages in thread
From: Jeremy Fitzhardinge @ 2010-06-09 23:44 UTC (permalink / raw)
To: Mukesh Rathor; +Cc: Xen-devel@lists.xensource.com
On 06/09/2010 04:09 PM, Mukesh Rathor wrote:
> Jeremy, pv ops is OK as it is on 128 vcpus, but I reworked the
> xen_vcpu_setup() a little to address more than 32vcpus on xen that
> doesn't have vcpu placement. Please take a look.
>
Why BUG_ON if the number of cpus is too high? Why not just ignore the
excess ones?
J
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCHEs]: support more than 32 VCPUs in guests
2010-06-09 23:44 ` Jeremy Fitzhardinge
@ 2010-06-10 0:08 ` Mukesh Rathor
2010-06-10 0:49 ` Jeremy Fitzhardinge
0 siblings, 1 reply; 15+ messages in thread
From: Mukesh Rathor @ 2010-06-10 0:08 UTC (permalink / raw)
To: Jeremy Fitzhardinge; +Cc: Jan, Xen-devel@lists.xensource.com
On Wed, 09 Jun 2010 16:44:02 -0700
Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> On 06/09/2010 04:09 PM, Mukesh Rathor wrote:
> > Jeremy, pv ops is OK as it is on 128 vcpus, but I reworked the
> > xen_vcpu_setup() a little to address more than 32vcpus on xen that
> > doesn't have vcpu placement. Please take a look.
> >
>
> Why BUG_ON if the number of cpus is too high? Why not just ignore the
> excess ones?
>
> J
Yeah, that was my first thought also... but then i realized i couldn't
just ignore the excess cpus in that function, but would need to go back
and fixup all the cpu_present, cpu_online, etc maps (and any assoc data
structs, if any), and it just didn't seem worth it in the 2.6.18*
kernels at least. Would have been easier to do if the vcpu setup
function returned a value instead of being void.
The 2.6.18 kernel will BUG_ON() somewhere right now with excess
cpus anyways, so it is not a regression in that sense :)...
thanks,
Mukesh
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCHEs]: support more than 32 VCPUs in guests
2010-06-10 0:08 ` Mukesh Rathor
@ 2010-06-10 0:49 ` Jeremy Fitzhardinge
2010-06-10 2:13 ` Mukesh Rathor
0 siblings, 1 reply; 15+ messages in thread
From: Jeremy Fitzhardinge @ 2010-06-10 0:49 UTC (permalink / raw)
To: Mukesh Rathor; +Cc: Xen-devel@lists.xensource.com
On 06/09/2010 05:08 PM, Mukesh Rathor wrote:
>> Why BUG_ON if the number of cpus is too high? Why not just ignore the
>> excess ones?
>>
> Yeah, that was my first thought also... but then i realized i couldn't
> just ignore the excess cpus in that function, but would need to go back
> and fixup all the cpu_present, cpu_online, etc maps (and any assoc data
> structs, if any), and it just didn't seem worth it in the 2.6.18*
> kernels at least. Would have been easier to do if the vcpu setup
> function returned a value instead of being void.
>
Yes, but if have_vcpu_info_placement ends up being false (which is
tested before any other cpus are brought up) then you can simply fail to
online the ones above the limit.
BUG_ON is way too brutal. You need to fail more softly.
J
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCHEs]: support more than 32 VCPUs in guests
2010-06-10 0:49 ` Jeremy Fitzhardinge
@ 2010-06-10 2:13 ` Mukesh Rathor
2010-06-14 9:37 ` Jeremy Fitzhardinge
0 siblings, 1 reply; 15+ messages in thread
From: Mukesh Rathor @ 2010-06-10 2:13 UTC (permalink / raw)
To: Jeremy Fitzhardinge; +Cc: Jan, Xen-devel@lists.xensource.com
On Wed, 09 Jun 2010 17:49:52 -0700
Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> On 06/09/2010 05:08 PM, Mukesh Rathor wrote:
> >> Why BUG_ON if the number of cpus is too high? Why not just ignore
> >> the excess ones?
> >>
> > Yeah, that was my first thought also... but then i realized i
> > couldn't just ignore the excess cpus in that function, but would
> > need to go back and fixup all the cpu_present, cpu_online, etc maps
> > (and any assoc data structs, if any), and it just didn't seem worth
> > it in the 2.6.18* kernels at least. Would have been easier to do if
> > the vcpu setup function returned a value instead of being void.
> >
>
> Yes, but if have_vcpu_info_placement ends up being false (which is
> tested before any other cpus are brought up) then you can simply fail
> to online the ones above the limit.
>
> BUG_ON is way too brutal. You need to fail more softly.
>
> J
Well, BUG_ON is only triggered if booting more than 32 VCPUs on a *very
old* xen (pre xen 3.1.0).
Looking at code closely, we could just set setup_max_cpus to 32 some
where in xen function, perhaps even in xen_vcpu_setup(). That way
later in smp_init() it would just be ok.
One thing tho, the per cpus areas are already setup at that point, so that
would need to be cleaned. BTW, I don't understand why
have_vcpu_info_placement is set to 0 in xen_guest_init()?
What minimum version of xen is required to run pvops kernel?
thanks,
Mukesh
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCHEs]: support more than 32 VCPUs in guests
2010-06-10 2:13 ` Mukesh Rathor
@ 2010-06-14 9:37 ` Jeremy Fitzhardinge
2010-06-15 2:49 ` Mukesh Rathor
0 siblings, 1 reply; 15+ messages in thread
From: Jeremy Fitzhardinge @ 2010-06-14 9:37 UTC (permalink / raw)
To: Mukesh Rathor; +Cc: Xen-devel@lists.xensource.com
On 06/10/2010 03:13 AM, Mukesh Rathor wrote:
> Well, BUG_ON is only triggered if booting more than 32 VCPUs on a *very
> old* xen (pre xen 3.1.0).
>
> Looking at code closely, we could just set setup_max_cpus to 32 some
> where in xen function, perhaps even in xen_vcpu_setup(). That way
> later in smp_init() it would just be ok.
>
Yes.
> One thing tho, the per cpus areas are already setup at that point, so that
> would need to be cleaned. BTW, I don't understand why
> have_vcpu_info_placement is set to 0 in xen_guest_init()?
>
xen_guest_init is used by the pvhvm path, and hvm domains don't have a
notion of vcpu info placement.
> What minimum version of xen is required to run pvops kernel?
>
In theory it should be back-compatible for all Xen 3, but in practice it
tweaks lots of bugs in older Xens (particularly 32-on-64). I don't know
that anyone has definitively established an earliest version. I
implemented vcpu info placement for use in pvops kernels, but it was
never my intention that it be an absolute requirement.
J
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCHEs]: support more than 32 VCPUs in guests
2010-06-14 9:37 ` Jeremy Fitzhardinge
@ 2010-06-15 2:49 ` Mukesh Rathor
2010-06-15 5:02 ` Konrad Rzeszutek Wilk
2010-06-15 8:30 ` Jeremy Fitzhardinge
0 siblings, 2 replies; 15+ messages in thread
From: Mukesh Rathor @ 2010-06-15 2:49 UTC (permalink / raw)
To: Jeremy Fitzhardinge; +Cc: Jan, Xen-devel@lists.xensource.com
[-- Attachment #1: Type: text/plain, Size: 1358 bytes --]
On Mon, 14 Jun 2010 10:37:30 +0100
Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> On 06/10/2010 03:13 AM, Mukesh Rathor wrote:
> > Well, BUG_ON is only triggered if booting more than 32 VCPUs on a
> > *very old* xen (pre xen 3.1.0).
> >
> > Looking at code closely, we could just set setup_max_cpus to 32 some
> > where in xen function, perhaps even in xen_vcpu_setup(). That way
> > later in smp_init() it would just be ok.
> >
>
> Yes.
>
> > One thing tho, the per cpus areas are already setup at that point,
> > so that would need to be cleaned. BTW, I don't understand why
> > have_vcpu_info_placement is set to 0 in xen_guest_init()?
> >
>
> xen_guest_init is used by the pvhvm path, and hvm domains don't have a
> notion of vcpu info placement.
>
> > What minimum version of xen is required to run pvops kernel?
> >
>
> In theory it should be back-compatible for all Xen 3, but in practice
> it tweaks lots of bugs in older Xens (particularly 32-on-64). I
> don't know that anyone has definitively established an earliest
> version. I implemented vcpu info placement for use in pvops kernels,
> but it was never my intention that it be an absolute requirement.
>
> J
Ok, attached patch without BUG_ON. Please feel free to modify
to your liking also.
Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
thanks,
Mukesh
[-- Attachment #2: pvops.diff --]
[-- Type: text/x-patch, Size: 2954 bytes --]
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 615897c..5dc7667 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -111,40 +111,46 @@ static int have_vcpu_info_placement = 1;
static void xen_vcpu_setup(int cpu)
{
- struct vcpu_register_vcpu_info info;
- int err;
- struct vcpu_info *vcpup;
-
- BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
- per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
-
- if (!have_vcpu_info_placement)
- return; /* already tested, not available */
-
- vcpup = &per_cpu(xen_vcpu_info, cpu);
-
- info.mfn = arbitrary_virt_to_mfn(vcpup);
- info.offset = offset_in_page(vcpup);
-
- printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n",
- cpu, vcpup, info.mfn, info.offset);
-
- /* Check to see if the hypervisor will put the vcpu_info
- structure where we want it, which allows direct access via
- a percpu-variable. */
- err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
-
- if (err) {
- printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
- have_vcpu_info_placement = 0;
- } else {
- /* This cpu is using the registered vcpu info, even if
- later ones fail to. */
- per_cpu(xen_vcpu, cpu) = vcpup;
-
- printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n",
- cpu, vcpup);
- }
+ struct vcpu_register_vcpu_info info;
+ int err;
+ struct vcpu_info *vcpup;
+
+ BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
+
+ if (cpu < MAX_VIRT_CPUS)
+ per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+
+ if (!have_vcpu_info_placement) {
+ if (cpu >= MAX_VIRT_CPUS && setup_max_cpus > MAX_VIRT_CPUS)
+ setup_max_cpus = MAX_VIRT_CPUS;
+ return;
+ }
+
+ vcpup = &per_cpu(xen_vcpu_info, cpu);
+ info.mfn = arbitrary_virt_to_mfn(vcpup);
+ info.offset = offset_in_page(vcpup);
+
+ printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n",
+ cpu, vcpup, info.mfn, info.offset);
+
+ /* Check to see if the hypervisor will put the vcpu_info
+ structure where we want it, which allows direct access via
+ a percpu-variable. */
+ err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
+
+ if (err) {
+ printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
+ have_vcpu_info_placement = 0;
+ if (setup_max_cpus > MAX_VIRT_CPUS)
+ setup_max_cpus = MAX_VIRT_CPUS;
+ } else {
+ /* This cpu is using the registered vcpu info, even if
+ later ones fail to. */
+ per_cpu(xen_vcpu, cpu) = vcpup;
+
+ printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n",
+ cpu, vcpup);
+ }
}
/*
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply related [flat|nested] 15+ messages in thread
* Re: Re: [PATCHEs]: support more than 32 VCPUs in guests
2010-06-15 2:49 ` Mukesh Rathor
@ 2010-06-15 5:02 ` Konrad Rzeszutek Wilk
2010-06-15 8:30 ` Jeremy Fitzhardinge
1 sibling, 0 replies; 15+ messages in thread
From: Konrad Rzeszutek Wilk @ 2010-06-15 5:02 UTC (permalink / raw)
To: Mukesh Rathor; +Cc: Jeremy Fitzhardinge, Xen-devel@lists.xensource.com, Jan
> - struct vcpu_register_vcpu_info info;
> - int err;
> - struct vcpu_info *vcpup;
> -
Why the tab to space conversion?
> + struct vcpu_register_vcpu_info info;
> + int err;
> + struct vcpu_info *vcpup;
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCHEs]: support more than 32 VCPUs in guests
2010-06-15 2:49 ` Mukesh Rathor
2010-06-15 5:02 ` Konrad Rzeszutek Wilk
@ 2010-06-15 8:30 ` Jeremy Fitzhardinge
2010-06-15 18:45 ` Mukesh Rathor
1 sibling, 1 reply; 15+ messages in thread
From: Jeremy Fitzhardinge @ 2010-06-15 8:30 UTC (permalink / raw)
To: Mukesh Rathor; +Cc: Xen-devel@lists.xensource.com
On 06/15/2010 03:49 AM, Mukesh Rathor wrote:
> On Mon, 14 Jun 2010 10:37:30 +0100
> Jeremy Fitzhardinge <jeremy@goop.org> wrote:
>
>
>> On 06/10/2010 03:13 AM, Mukesh Rathor wrote:
>>
>>> Well, BUG_ON is only triggered if booting more than 32 VCPUs on a
>>> *very old* xen (pre xen 3.1.0).
>>>
>>> Looking at code closely, we could just set setup_max_cpus to 32 some
>>> where in xen function, perhaps even in xen_vcpu_setup(). That way
>>> later in smp_init() it would just be ok.
>>>
>>>
>> Yes.
>>
>>
>>> One thing tho, the per cpus areas are already setup at that point,
>>> so that would need to be cleaned. BTW, I don't understand why
>>> have_vcpu_info_placement is set to 0 in xen_guest_init()?
>>>
>>>
>> xen_guest_init is used by the pvhvm path, and hvm domains don't have a
>> notion of vcpu info placement.
>>
>>
>>> What minimum version of xen is required to run pvops kernel?
>>>
>>>
>> In theory it should be back-compatible for all Xen 3, but in practice
>> it tweaks lots of bugs in older Xens (particularly 32-on-64). I
>> don't know that anyone has definitively established an earliest
>> version. I implemented vcpu info placement for use in pvops kernels,
>> but it was never my intention that it be an absolute requirement.
>>
>> J
>>
> Ok, attached patch without BUG_ON. Please feel free to modify
> to your liking also.
>
It looks like you smashed all the tabs into spaces so its hard to see
what you've changed in the diff. I'll fix it up and give it a look-over.
J
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCHEs]: support more than 32 VCPUs in guests
2010-06-15 8:30 ` Jeremy Fitzhardinge
@ 2010-06-15 18:45 ` Mukesh Rathor
2010-07-17 1:06 ` Mukesh Rathor
2010-07-26 22:57 ` Jeremy Fitzhardinge
0 siblings, 2 replies; 15+ messages in thread
From: Mukesh Rathor @ 2010-06-15 18:45 UTC (permalink / raw)
To: Jeremy Fitzhardinge; +Cc: Jan, Xen-devel@lists.xensource.com
[-- Attachment #1: Type: text/plain, Size: 1969 bytes --]
On Tue, 15 Jun 2010 09:30:35 +0100
Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> On 06/15/2010 03:49 AM, Mukesh Rathor wrote:
> > On Mon, 14 Jun 2010 10:37:30 +0100
> > Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> >
> >
> >> On 06/10/2010 03:13 AM, Mukesh Rathor wrote:
> >>
> >>> Well, BUG_ON is only triggered if booting more than 32 VCPUs on a
> >>> *very old* xen (pre xen 3.1.0).
> >>>
> >>> Looking at code closely, we could just set setup_max_cpus to 32
> >>> some where in xen function, perhaps even in xen_vcpu_setup().
> >>> That way later in smp_init() it would just be ok.
> >>>
> >>>
> >> Yes.
> >>
> >>
> >>> One thing tho, the per cpus areas are already setup at that point,
> >>> so that would need to be cleaned. BTW, I don't understand why
> >>> have_vcpu_info_placement is set to 0 in xen_guest_init()?
> >>>
> >>>
> >> xen_guest_init is used by the pvhvm path, and hvm domains don't
> >> have a notion of vcpu info placement.
> >>
> >>
> >>> What minimum version of xen is required to run pvops kernel?
> >>>
> >>>
> >> In theory it should be back-compatible for all Xen 3, but in
> >> practice it tweaks lots of bugs in older Xens (particularly
> >> 32-on-64). I don't know that anyone has definitively established
> >> an earliest version. I implemented vcpu info placement for use in
> >> pvops kernels, but it was never my intention that it be an
> >> absolute requirement.
> >>
> >> J
> >>
> > Ok, attached patch without BUG_ON. Please feel free to modify
> > to your liking also.
> >
>
> It looks like you smashed all the tabs into spaces so its hard to see
> what you've changed in the diff. I'll fix it up and give it a
> look-over.
>
> J
Sorry, I've tabs turned off because patches I submit to other product I
work on must be tab free. Anyways, re attached a new one with tabs.
thanks again,
Mukesh
Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
[-- Attachment #2: pvops.diff --]
[-- Type: text/x-patch, Size: 1208 bytes --]
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 615897c..fe24c32 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -116,13 +116,17 @@ static void xen_vcpu_setup(int cpu)
struct vcpu_info *vcpup;
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
- per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
- if (!have_vcpu_info_placement)
- return; /* already tested, not available */
+ if (cpu < MAX_VIRT_CPUS)
+ per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
- vcpup = &per_cpu(xen_vcpu_info, cpu);
+ if (!have_vcpu_info_placement) {
+ if (cpu >= MAX_VIRT_CPUS && setup_max_cpus > MAX_VIRT_CPUS)
+ setup_max_cpus = MAX_VIRT_CPUS;
+ return;
+ }
+ vcpup = &per_cpu(xen_vcpu_info, cpu);
info.mfn = arbitrary_virt_to_mfn(vcpup);
info.offset = offset_in_page(vcpup);
@@ -137,6 +141,8 @@ static void xen_vcpu_setup(int cpu)
if (err) {
printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
have_vcpu_info_placement = 0;
+ if (setup_max_cpus > MAX_VIRT_CPUS)
+ setup_max_cpus = MAX_VIRT_CPUS;
} else {
/* This cpu is using the registered vcpu info, even if
later ones fail to. */
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply related [flat|nested] 15+ messages in thread
* Re: [PATCHEs]: support more than 32 VCPUs in guests
2010-06-15 18:45 ` Mukesh Rathor
@ 2010-07-17 1:06 ` Mukesh Rathor
2010-07-17 1:09 ` Jeremy Fitzhardinge
2010-07-26 22:57 ` Jeremy Fitzhardinge
1 sibling, 1 reply; 15+ messages in thread
From: Mukesh Rathor @ 2010-07-17 1:06 UTC (permalink / raw)
To: Mukesh Rathor; +Cc: Jeremy Fitzhardinge, Xen-devel@lists.xensource.com
On Tue, 15 Jun 2010 11:45:43 -0700
Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
> On Tue, 15 Jun 2010 09:30:35 +0100
> Jeremy Fitzhardinge <jeremy@goop.org> wrote:
>
> > On 06/15/2010 03:49 AM, Mukesh Rathor wrote:
> > > On Mon, 14 Jun 2010 10:37:30 +0100
> > > Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> > >
> > >
> > >> On 06/10/2010 03:13 AM, Mukesh Rathor wrote:
> > >>
> > >>> Well, BUG_ON is only triggered if booting more than 32 VCPUs on
> > >>> a *very old* xen (pre xen 3.1.0).
> > >>>
> > >>> Looking at code closely, we could just set setup_max_cpus to 32
> > >>> some where in xen function, perhaps even in xen_vcpu_setup().
> > >>> That way later in smp_init() it would just be ok.
> > >>>
> > >>>
> > >> Yes.
> > >>
> > >>
> > >>> One thing tho, the per cpus areas are already setup at that
> > >>> point, so that would need to be cleaned. BTW, I don't
> > >>> understand why have_vcpu_info_placement is set to 0 in
> > >>> xen_guest_init()?
> > >>>
> > >> xen_guest_init is used by the pvhvm path, and hvm domains don't
> > >> have a notion of vcpu info placement.
> > >>
> > >>
> > >>> What minimum version of xen is required to run pvops kernel?
> > >>>
> > >>>
> > >> In theory it should be back-compatible for all Xen 3, but in
> > >> practice it tweaks lots of bugs in older Xens (particularly
> > >> 32-on-64). I don't know that anyone has definitively established
> > >> an earliest version. I implemented vcpu info placement for use
> > >> in pvops kernels, but it was never my intention that it be an
> > >> absolute requirement.
> > >>
> > >> J
> > >>
> > > Ok, attached patch without BUG_ON. Please feel free to modify
> > > to your liking also.
> > >
> >
> > It looks like you smashed all the tabs into spaces so its hard to
> > see what you've changed in the diff. I'll fix it up and give it a
> > look-over.
> >
> > J
>
> Sorry, I've tabs turned off because patches I submit to other product
> I work on must be tab free. Anyways, re attached a new one with tabs.
>
> thanks again,
> Mukesh
>
> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
Hi Jeremy,
Just curious, did this patch ever make it?
thanks
Mukesh
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCHEs]: support more than 32 VCPUs in guests
2010-07-17 1:06 ` Mukesh Rathor
@ 2010-07-17 1:09 ` Jeremy Fitzhardinge
2010-07-17 1:11 ` Mukesh Rathor
0 siblings, 1 reply; 15+ messages in thread
From: Jeremy Fitzhardinge @ 2010-07-17 1:09 UTC (permalink / raw)
To: Mukesh Rathor; +Cc: Xen-devel@lists.xensource.com
On 07/16/2010 06:06 PM, Mukesh Rathor wrote:
> On Tue, 15 Jun 2010 11:45:43 -0700
> Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
>
>
>> On Tue, 15 Jun 2010 09:30:35 +0100
>> Jeremy Fitzhardinge <jeremy@goop.org> wrote:
>>
>>
>>> On 06/15/2010 03:49 AM, Mukesh Rathor wrote:
>>>
>>>> On Mon, 14 Jun 2010 10:37:30 +0100
>>>> Jeremy Fitzhardinge <jeremy@goop.org> wrote:
>>>>
>>>>
>>>>
>>>>> On 06/10/2010 03:13 AM, Mukesh Rathor wrote:
>>>>>
>>>>>
>>>>>> Well, BUG_ON is only triggered if booting more than 32 VCPUs on
>>>>>> a *very old* xen (pre xen 3.1.0).
>>>>>>
>>>>>> Looking at code closely, we could just set setup_max_cpus to 32
>>>>>> some where in xen function, perhaps even in xen_vcpu_setup().
>>>>>> That way later in smp_init() it would just be ok.
>>>>>>
>>>>>>
>>>>>>
>>>>> Yes.
>>>>>
>>>>>
>>>>>
>>>>>> One thing tho, the per cpus areas are already setup at that
>>>>>> point, so that would need to be cleaned. BTW, I don't
>>>>>> understand why have_vcpu_info_placement is set to 0 in
>>>>>> xen_guest_init()?
>>>>>>
>>>>>>
>>>>> xen_guest_init is used by the pvhvm path, and hvm domains don't
>>>>> have a notion of vcpu info placement.
>>>>>
>>>>>
>>>>>
>>>>>> What minimum version of xen is required to run pvops kernel?
>>>>>>
>>>>>>
>>>>>>
>>>>> In theory it should be back-compatible for all Xen 3, but in
>>>>> practice it tweaks lots of bugs in older Xens (particularly
>>>>> 32-on-64). I don't know that anyone has definitively established
>>>>> an earliest version. I implemented vcpu info placement for use
>>>>> in pvops kernels, but it was never my intention that it be an
>>>>> absolute requirement.
>>>>>
>>>>> J
>>>>>
>>>>>
>>>> Ok, attached patch without BUG_ON. Please feel free to modify
>>>> to your liking also.
>>>>
>>>>
>>> It looks like you smashed all the tabs into spaces so its hard to
>>> see what you've changed in the diff. I'll fix it up and give it a
>>> look-over.
>>>
>>> J
>>>
>> Sorry, I've tabs turned off because patches I submit to other product
>> I work on must be tab free. Anyways, re attached a new one with tabs.
>>
>> thanks again,
>> Mukesh
>>
>> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
>>
> Hi Jeremy,
>
> Just curious, did this patch ever make it?
>
Probably not. Looks like I forgot to tag it as "mail containing patch"
so it fell through the cracks.
J
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCHEs]: support more than 32 VCPUs in guests
2010-07-17 1:09 ` Jeremy Fitzhardinge
@ 2010-07-17 1:11 ` Mukesh Rathor
0 siblings, 0 replies; 15+ messages in thread
From: Mukesh Rathor @ 2010-07-17 1:11 UTC (permalink / raw)
To: Jeremy Fitzhardinge; +Cc: Jan, Xen-devel@lists.xensource.com
On Fri, 16 Jul 2010 18:09:44 -0700
Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> On 07/16/2010 06:06 PM, Mukesh Rathor wrote:
> > On Tue, 15 Jun 2010 11:45:43 -0700
> > Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
> >
> >
> >> On Tue, 15 Jun 2010 09:30:35 +0100
> >> Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> >>
> >>
> >>> On 06/15/2010 03:49 AM, Mukesh Rathor wrote:
> >>>
> >>>> On Mon, 14 Jun 2010 10:37:30 +0100
> >>>> Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> >>>>
> >>>>
> >>>>
> >>>>> On 06/10/2010 03:13 AM, Mukesh Rathor wrote:
> >>>>>
> >>>>>
> >>>>>> Well, BUG_ON is only triggered if booting more than 32 VCPUs on
> >>>>>> a *very old* xen (pre xen 3.1.0).
> >>>>>>
> >>>>>> Looking at code closely, we could just set setup_max_cpus to 32
> >>>>>> some where in xen function, perhaps even in xen_vcpu_setup().
> >>>>>> That way later in smp_init() it would just be ok.
> >>>>>>
> >>>>>>
> >>>>>>
> >>>>> Yes.
> >>>>>
> >>>>>
> >>>>>
> >>>>>> One thing tho, the per cpus areas are already setup at that
> >>>>>> point, so that would need to be cleaned. BTW, I don't
> >>>>>> understand why have_vcpu_info_placement is set to 0 in
> >>>>>> xen_guest_init()?
> >>>>>>
> >>>>>>
> >>>>> xen_guest_init is used by the pvhvm path, and hvm domains don't
> >>>>> have a notion of vcpu info placement.
> >>>>>
> >>>>>
> >>>>>
> >>>>>> What minimum version of xen is required to run pvops kernel?
> >>>>>>
> >>>>>>
> >>>>>>
> >>>>> In theory it should be back-compatible for all Xen 3, but in
> >>>>> practice it tweaks lots of bugs in older Xens (particularly
> >>>>> 32-on-64). I don't know that anyone has definitively
> >>>>> established an earliest version. I implemented vcpu info
> >>>>> placement for use in pvops kernels, but it was never my
> >>>>> intention that it be an absolute requirement.
> >>>>>
> >>>>> J
> >>>>>
> >>>>>
> >>>> Ok, attached patch without BUG_ON. Please feel free to modify
> >>>> to your liking also.
> >>>>
> >>>>
> >>> It looks like you smashed all the tabs into spaces so its hard to
> >>> see what you've changed in the diff. I'll fix it up and give it a
> >>> look-over.
> >>>
> >>> J
> >>>
> >> Sorry, I've tabs turned off because patches I submit to other
> >> product I work on must be tab free. Anyways, re attached a new
> >> one with tabs.
> >>
> >> thanks again,
> >> Mukesh
> >>
> >> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
> >>
> > Hi Jeremy,
> >
> > Just curious, did this patch ever make it?
> >
>
> Probably not. Looks like I forgot to tag it as "mail containing
> patch" so it fell through the cracks.
>
> J
Did you want me to resubmit it, or can you find the patch?
thanks,
m
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCHEs]: support more than 32 VCPUs in guests
2010-06-15 18:45 ` Mukesh Rathor
2010-07-17 1:06 ` Mukesh Rathor
@ 2010-07-26 22:57 ` Jeremy Fitzhardinge
2010-07-27 0:37 ` Jeremy Fitzhardinge
1 sibling, 1 reply; 15+ messages in thread
From: Jeremy Fitzhardinge @ 2010-07-26 22:57 UTC (permalink / raw)
To: Mukesh Rathor; +Cc: Xen-devel@lists.xensource.com
On 06/15/2010 11:45 AM, Mukesh Rathor wrote:
> On Tue, 15 Jun 2010 09:30:35 +0100
> Jeremy Fitzhardinge<jeremy@goop.org> wrote:
>
>> On 06/15/2010 03:49 AM, Mukesh Rathor wrote:
>>> On Mon, 14 Jun 2010 10:37:30 +0100
>>> Jeremy Fitzhardinge<jeremy@goop.org> wrote:
>>>
>>>
>>>> On 06/10/2010 03:13 AM, Mukesh Rathor wrote:
>>>>
>>>>> Well, BUG_ON is only triggered if booting more than 32 VCPUs on a
>>>>> *very old* xen (pre xen 3.1.0).
>>>>>
>>>>> Looking at code closely, we could just set setup_max_cpus to 32
>>>>> some where in xen function, perhaps even in xen_vcpu_setup().
>>>>> That way later in smp_init() it would just be ok.
>>>>>
>>>>>
>>>> Yes.
>>>>
>>>>
>>>>> One thing tho, the per cpus areas are already setup at that point,
>>>>> so that would need to be cleaned. BTW, I don't understand why
>>>>> have_vcpu_info_placement is set to 0 in xen_guest_init()?
>>>>>
>>>>>
>>>> xen_guest_init is used by the pvhvm path, and hvm domains don't
>>>> have a notion of vcpu info placement.
>>>>
>>>>
>>>>> What minimum version of xen is required to run pvops kernel?
>>>>>
>>>>>
>>>> In theory it should be back-compatible for all Xen 3, but in
>>>> practice it tweaks lots of bugs in older Xens (particularly
>>>> 32-on-64). I don't know that anyone has definitively established
>>>> an earliest version. I implemented vcpu info placement for use in
>>>> pvops kernels, but it was never my intention that it be an
>>>> absolute requirement.
>>>>
>>>> J
>>>>
>>> Ok, attached patch without BUG_ON. Please feel free to modify
>>> to your liking also.
>>>
>> It looks like you smashed all the tabs into spaces so its hard to see
>> what you've changed in the diff. I'll fix it up and give it a
>> look-over.
>>
>> J
> Sorry, I've tabs turned off because patches I submit to other product I
> work on must be tab free. Anyways, re attached a new one with tabs.
This doesn't compile with CONFIG_SMP=n because setup_max_cpus doesn't
exist. For now I've just put a couple of #ifdef CONFIG_SMPs in there to
avoid compile errors, but could you look at coming up with a cleaner
solution?
Thanks,
J
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: Re: [PATCHEs]: support more than 32 VCPUs in guests
2010-07-26 22:57 ` Jeremy Fitzhardinge
@ 2010-07-27 0:37 ` Jeremy Fitzhardinge
0 siblings, 0 replies; 15+ messages in thread
From: Jeremy Fitzhardinge @ 2010-07-27 0:37 UTC (permalink / raw)
To: Mukesh Rathor; +Cc: Xen-devel@lists.xensource.com
On 07/26/2010 03:57 PM, Jeremy Fitzhardinge wrote:
> On 06/15/2010 11:45 AM, Mukesh Rathor wrote:
>> On Tue, 15 Jun 2010 09:30:35 +0100
>> Jeremy Fitzhardinge<jeremy@goop.org> wrote:
>>
>>> On 06/15/2010 03:49 AM, Mukesh Rathor wrote:
>>>> On Mon, 14 Jun 2010 10:37:30 +0100
>>>> Jeremy Fitzhardinge<jeremy@goop.org> wrote:
>>>>
>>>>
>>>>> On 06/10/2010 03:13 AM, Mukesh Rathor wrote:
>>>>>
>>>>>> Well, BUG_ON is only triggered if booting more than 32 VCPUs on a
>>>>>> *very old* xen (pre xen 3.1.0).
>>>>>>
>>>>>> Looking at code closely, we could just set setup_max_cpus to 32
>>>>>> some where in xen function, perhaps even in xen_vcpu_setup().
>>>>>> That way later in smp_init() it would just be ok.
>>>>>>
>>>>>>
>>>>> Yes.
>>>>>
>>>>>
>>>>>> One thing tho, the per cpus areas are already setup at that point,
>>>>>> so that would need to be cleaned. BTW, I don't understand why
>>>>>> have_vcpu_info_placement is set to 0 in xen_guest_init()?
>>>>>>
>>>>>>
>>>>> xen_guest_init is used by the pvhvm path, and hvm domains don't
>>>>> have a notion of vcpu info placement.
>>>>>
>>>>>
>>>>>> What minimum version of xen is required to run pvops kernel?
>>>>>>
>>>>>>
>>>>> In theory it should be back-compatible for all Xen 3, but in
>>>>> practice it tweaks lots of bugs in older Xens (particularly
>>>>> 32-on-64). I don't know that anyone has definitively established
>>>>> an earliest version. I implemented vcpu info placement for use in
>>>>> pvops kernels, but it was never my intention that it be an
>>>>> absolute requirement.
>>>>>
>>>>> J
>>>>>
>>>> Ok, attached patch without BUG_ON. Please feel free to modify
>>>> to your liking also.
>>>>
>>> It looks like you smashed all the tabs into spaces so its hard to see
>>> what you've changed in the diff. I'll fix it up and give it a
>>> look-over.
>>>
>>> J
>> Sorry, I've tabs turned off because patches I submit to other product I
>> work on must be tab free. Anyways, re attached a new one with tabs.
>
> This doesn't compile with CONFIG_SMP=n because setup_max_cpus doesn't
> exist. For now I've just put a couple of #ifdef CONFIG_SMPs in there
> to avoid compile errors, but could you look at coming up with a
> cleaner solution?
Hm, I committed a tidier version. I don't see it getting much better.
J
^ permalink raw reply [flat|nested] 15+ messages in thread
end of thread, other threads:[~2010-07-27 0:37 UTC | newest]
Thread overview: 15+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-06-09 23:09 [PATCHEs]: support more than 32 VCPUs in guests Mukesh Rathor
2010-06-09 23:44 ` Jeremy Fitzhardinge
2010-06-10 0:08 ` Mukesh Rathor
2010-06-10 0:49 ` Jeremy Fitzhardinge
2010-06-10 2:13 ` Mukesh Rathor
2010-06-14 9:37 ` Jeremy Fitzhardinge
2010-06-15 2:49 ` Mukesh Rathor
2010-06-15 5:02 ` Konrad Rzeszutek Wilk
2010-06-15 8:30 ` Jeremy Fitzhardinge
2010-06-15 18:45 ` Mukesh Rathor
2010-07-17 1:06 ` Mukesh Rathor
2010-07-17 1:09 ` Jeremy Fitzhardinge
2010-07-17 1:11 ` Mukesh Rathor
2010-07-26 22:57 ` Jeremy Fitzhardinge
2010-07-27 0:37 ` Jeremy Fitzhardinge
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).