kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH kvm-unit-tests 0/7] x86: introduce and apply on_cpus
@ 2017-06-01 15:48 Andrew Jones
  2017-06-01 15:48 ` [PATCH kvm-unit-tests 1/7] x86/unittests.cfg: create hyperv group Andrew Jones
                   ` (7 more replies)
  0 siblings, 8 replies; 11+ messages in thread
From: Andrew Jones @ 2017-06-01 15:48 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, rkrcmar

This is RFC-ish. There shouldn't be any functional change, and the
value of the diff stats are debatable, but I figured that I already did
the work, so I might as well post and let Paolo and Radim decide.

Andrew Jones (7):
  x86/unittests.cfg: create hyperv group
  lib/x86/smp: introduce on_cpus
  x86/hyperv_clock: apply on_cpus
  x86/hyperv_stimer: apply on_cpus
  x86/hyperv_synic: apply on_cpus
  x86/kvmclock_test: apply on_cpus
  x86/vmexit: apply on_cpus

 lib/x86/smp.c       | 20 ++++++++++++++++++++
 lib/x86/smp.h       |  2 ++
 x86/hyperv_clock.c  | 31 ++++++++-----------------------
 x86/hyperv_stimer.c | 51 ++++++++++++++-------------------------------------
 x86/hyperv_synic.c  | 44 +++++++++++++-------------------------------
 x86/kvmclock.c      |  4 ++--
 x86/kvmclock.h      |  4 ++--
 x86/kvmclock_test.c | 38 ++++++++++++++------------------------
 x86/unittests.cfg   |  3 +++
 x86/vmexit.c        | 19 +++++--------------
 10 files changed, 83 insertions(+), 133 deletions(-)

-- 
2.9.4

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH kvm-unit-tests 1/7] x86/unittests.cfg: create hyperv group
  2017-06-01 15:48 [PATCH kvm-unit-tests 0/7] x86: introduce and apply on_cpus Andrew Jones
@ 2017-06-01 15:48 ` Andrew Jones
  2017-06-01 15:48 ` [PATCH kvm-unit-tests 2/7] lib/x86/smp: introduce on_cpus Andrew Jones
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 11+ messages in thread
From: Andrew Jones @ 2017-06-01 15:48 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, rkrcmar

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 x86/unittests.cfg | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/x86/unittests.cfg b/x86/unittests.cfg
index 5ab46671d631..42f1ad454c6d 100644
--- a/x86/unittests.cfg
+++ b/x86/unittests.cfg
@@ -502,16 +502,19 @@ arch = x86_64
 file = hyperv_synic.flat
 smp = 2
 extra_params = -cpu kvm64,hv_synic -device hyperv-testdev
+groups = hyperv
 
 [hyperv_stimer]
 file = hyperv_stimer.flat
 smp = 2
 extra_params = -cpu kvm64,hv_time,hv_synic,hv_stimer -device hyperv-testdev
+groups = hyperv
 
 [hyperv_clock]
 file = hyperv_clock.flat
 smp = 2
 extra_params = -cpu kvm64,hv_time
+groups = hyperv
 
 [intel_iommu]
 file = intel-iommu.flat
-- 
2.9.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH kvm-unit-tests 2/7] lib/x86/smp: introduce on_cpus
  2017-06-01 15:48 [PATCH kvm-unit-tests 0/7] x86: introduce and apply on_cpus Andrew Jones
  2017-06-01 15:48 ` [PATCH kvm-unit-tests 1/7] x86/unittests.cfg: create hyperv group Andrew Jones
@ 2017-06-01 15:48 ` Andrew Jones
  2017-06-07 14:48   ` Radim Krčmář
  2017-06-01 15:48 ` [PATCH kvm-unit-tests 3/7] x86/hyperv_clock: apply on_cpus Andrew Jones
                   ` (5 subsequent siblings)
  7 siblings, 1 reply; 11+ messages in thread
From: Andrew Jones @ 2017-06-01 15:48 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, rkrcmar

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 lib/x86/smp.c | 20 ++++++++++++++++++++
 lib/x86/smp.h |  2 ++
 2 files changed, 22 insertions(+)

diff --git a/lib/x86/smp.c b/lib/x86/smp.c
index 4bdbeaeb8b68..b16c98c02ad2 100644
--- a/lib/x86/smp.c
+++ b/lib/x86/smp.c
@@ -1,5 +1,6 @@
 
 #include <libcflat.h>
+#include "atomic.h"
 #include "smp.h"
 #include "apic.h"
 #include "fwcfg.h"
@@ -15,6 +16,7 @@ static void *volatile ipi_data;
 static volatile int ipi_done;
 static volatile bool ipi_wait;
 static int _cpu_count;
+static atomic_t active_cpus;
 
 static __attribute__((used)) void ipi()
 {
@@ -27,6 +29,7 @@ static __attribute__((used)) void ipi()
 	apic_write(APIC_EOI, 0);
     }
     function(data);
+    atomic_dec(&active_cpus);
     if (wait) {
 	ipi_done = 1;
 	apic_write(APIC_EOI, 0);
@@ -68,6 +71,7 @@ static void __on_cpu(int cpu, void (*function)(void *data), void *data,
     if (cpu == smp_id())
 	function(data);
     else {
+	atomic_inc(&active_cpus);
 	ipi_done = 0;
 	ipi_function = function;
 	ipi_data = data;
@@ -91,6 +95,21 @@ void on_cpu_async(int cpu, void (*function)(void *data), void *data)
     __on_cpu(cpu, function, data, 0);
 }
 
+void on_cpus(void (*func)(void))
+{
+    int cpu;
+
+    for (cpu = cpu_count() - 1; cpu >= 0; --cpu)
+        on_cpu_async(cpu, (ipi_function_type)func, NULL);
+
+    while (cpus_active())
+        ;
+}
+
+int cpus_active(void)
+{
+    return atomic_read(&active_cpus) > 1;
+}
 
 void smp_init(void)
 {
@@ -106,4 +125,5 @@ void smp_init(void)
     for (i = 1; i < cpu_count(); ++i)
         on_cpu(i, setup_smp_id, 0);
 
+    atomic_inc(&active_cpus);
 }
diff --git a/lib/x86/smp.h b/lib/x86/smp.h
index afabac8495f1..5b8deb4a185e 100644
--- a/lib/x86/smp.h
+++ b/lib/x86/smp.h
@@ -6,7 +6,9 @@ void smp_init(void);
 
 int cpu_count(void);
 int smp_id(void);
+int cpus_active(void);
 void on_cpu(int cpu, void (*function)(void *data), void *data);
 void on_cpu_async(int cpu, void (*function)(void *data), void *data);
+void on_cpus(void (*func)(void));
 
 #endif
-- 
2.9.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH kvm-unit-tests 3/7] x86/hyperv_clock: apply on_cpus
  2017-06-01 15:48 [PATCH kvm-unit-tests 0/7] x86: introduce and apply on_cpus Andrew Jones
  2017-06-01 15:48 ` [PATCH kvm-unit-tests 1/7] x86/unittests.cfg: create hyperv group Andrew Jones
  2017-06-01 15:48 ` [PATCH kvm-unit-tests 2/7] lib/x86/smp: introduce on_cpus Andrew Jones
@ 2017-06-01 15:48 ` Andrew Jones
  2017-06-01 15:48 ` [PATCH kvm-unit-tests 4/7] x86/hyperv_stimer: " Andrew Jones
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 11+ messages in thread
From: Andrew Jones @ 2017-06-01 15:48 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, rkrcmar

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 x86/hyperv_clock.c | 31 ++++++++-----------------------
 1 file changed, 8 insertions(+), 23 deletions(-)

diff --git a/x86/hyperv_clock.c b/x86/hyperv_clock.c
index 6c4dd56f948d..83d653231c71 100644
--- a/x86/hyperv_clock.c
+++ b/x86/hyperv_clock.c
@@ -55,13 +55,12 @@ uint64_t hv_clock_read(void)
 	return hvclock_tsc_to_ticks(&shadow, rdtsc());
 }
 
-atomic_t cpus_left;
 bool ok[MAX_CPU];
 uint64_t loops[MAX_CPU];
 
 #define iabs(x)   ((x) < 0 ? -(x) : (x))
 
-static void hv_clock_test(void *data)
+static void hv_clock_test(void)
 {
 	int i = smp_id();
 	uint64_t t = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
@@ -99,7 +98,6 @@ static void hv_clock_test(void *data)
 	if (!got_drift)
 		printf("delta on CPU %d was %d...%d\n", smp_id(), min_delta, max_delta);
 	barrier();
-	atomic_dec(&cpus_left);
 }
 
 static void check_test(int ncpus)
@@ -107,13 +105,7 @@ static void check_test(int ncpus)
 	int i;
 	bool pass;
 
-	atomic_set(&cpus_left, ncpus);
-	for (i = ncpus - 1; i >= 0; i--)
-		on_cpu_async(i, hv_clock_test, NULL);
-
-	/* Wait for the end of other vcpu */
-	while(atomic_read(&cpus_left))
-		;
+	on_cpus(hv_clock_test);
 
 	pass = true;
 	for (i = ncpus - 1; i >= 0; i--)
@@ -122,7 +114,7 @@ static void check_test(int ncpus)
 	report("TSC reference precision test", pass);
 }
 
-static void hv_perf_test(void *data)
+static void hv_perf_test(void)
 {
 	uint64_t t = hv_clock_read();
 	uint64_t end = t + 1000000000 / 100;
@@ -134,7 +126,6 @@ static void hv_perf_test(void *data)
 	} while(t < end);
 
 	loops[smp_id()] = local_loops;
-	atomic_dec(&cpus_left);
 }
 
 static void perf_test(int ncpus)
@@ -142,13 +133,7 @@ static void perf_test(int ncpus)
 	int i;
 	uint64_t total_loops;
 
-	atomic_set(&cpus_left, ncpus);
-	for (i = ncpus - 1; i >= 0; i--)
-		on_cpu_async(i, hv_perf_test, NULL);
-
-	/* Wait for the end of other vcpu */
-	while(atomic_read(&cpus_left))
-		;
+	on_cpus(hv_perf_test);
 
 	total_loops = 0;
 	for (i = ncpus - 1; i >= 0; i--)
@@ -167,6 +152,10 @@ int main(int ac, char **av)
 	setup_vm();
 	smp_init();
 
+	ncpus = cpu_count();
+	if (ncpus > MAX_CPU)
+		report_abort("number cpus exceeds %d", MAX_CPU);
+
 	hv_clock = alloc_page();
 	wrmsr(HV_X64_MSR_REFERENCE_TSC, (u64)(uintptr_t)hv_clock | 1);
 	report("MSR value after enabling",
@@ -195,10 +184,6 @@ int main(int ac, char **av)
 	       "TSC reference %" PRId64" (delta %" PRId64")\n",
 	       ref2, ref2 - ref1, tsc2, t2, t2 - t1);
 
-	ncpus = cpu_count();
-	if (ncpus > MAX_CPU)
-		ncpus = MAX_CPU;
-
 	check_test(ncpus);
 	perf_test(ncpus);
 
-- 
2.9.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH kvm-unit-tests 4/7] x86/hyperv_stimer: apply on_cpus
  2017-06-01 15:48 [PATCH kvm-unit-tests 0/7] x86: introduce and apply on_cpus Andrew Jones
                   ` (2 preceding siblings ...)
  2017-06-01 15:48 ` [PATCH kvm-unit-tests 3/7] x86/hyperv_clock: apply on_cpus Andrew Jones
@ 2017-06-01 15:48 ` Andrew Jones
  2017-06-01 15:48 ` [PATCH kvm-unit-tests 5/7] x86/hyperv_synic: " Andrew Jones
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 11+ messages in thread
From: Andrew Jones @ 2017-06-01 15:48 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, rkrcmar

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 x86/hyperv_stimer.c | 51 ++++++++++++++-------------------------------------
 1 file changed, 14 insertions(+), 37 deletions(-)

diff --git a/x86/hyperv_stimer.c b/x86/hyperv_stimer.c
index 5292523709ea..dcbe1363fe4e 100644
--- a/x86/hyperv_stimer.c
+++ b/x86/hyperv_stimer.c
@@ -19,9 +19,8 @@
 #define SINT2_NUM 3
 #define ONE_MS_IN_100NS 10000
 
-static atomic_t g_cpus_comp_count;
-static int g_cpus_count;
 static struct spinlock g_synic_alloc_lock;
+static ulong cr3;
 
 struct stimer {
     int sint;
@@ -216,20 +215,15 @@ static void synic_disable(void)
     synic_free_page(svcpu->msg_page);
 }
 
-static void cpu_comp(void)
-{
-    atomic_inc(&g_cpus_comp_count);
-}
 
-static void stimer_test_prepare(void *ctx)
+static void stimer_test_prepare(void)
 {
     int vcpu = smp_id();
 
-    write_cr3((ulong)ctx);
+    write_cr3(cr3);
     synic_enable();
     synic_sint_create(vcpu, SINT1_NUM, SINT1_VEC, false);
     synic_sint_create(vcpu, SINT2_NUM, SINT2_VEC, true);
-    cpu_comp();
 }
 
 static void stimer_test_periodic(int vcpu, struct stimer *timer1,
@@ -280,7 +274,7 @@ static void stimer_test_auto_enable_periodic(int vcpu, struct stimer *timer)
     stimer_shutdown(timer);
 }
 
-static void stimer_test(void *ctx)
+static void stimer_test(void)
 {
     int vcpu = smp_id();
     struct svcpu *svcpu = &g_synic_vcpu[vcpu];
@@ -297,10 +291,9 @@ static void stimer_test(void *ctx)
     stimer_test_auto_enable_periodic(vcpu, timer1);
 
     irq_disable();
-    cpu_comp();
 }
 
-static void stimer_test_cleanup(void *ctx)
+static void stimer_test_cleanup(void)
 {
     int vcpu = smp_id();
 
@@ -308,20 +301,6 @@ static void stimer_test_cleanup(void *ctx)
     synic_sint_destroy(vcpu, SINT1_NUM);
     synic_sint_destroy(vcpu, SINT2_NUM);
     synic_disable();
-    cpu_comp();
-}
-
-static void on_each_cpu_async_wait(void (*func)(void *ctx), void *ctx)
-{
-    int i;
-
-    atomic_set(&g_cpus_comp_count, 0);
-    for (i = 0; i < g_cpus_count; i++) {
-        on_cpu_async(i, func, ctx);
-    }
-    while (atomic_read(&g_cpus_comp_count) != g_cpus_count) {
-        pause();
-    }
 }
 
 static void stimer_test_all(void)
@@ -332,20 +311,18 @@ static void stimer_test_all(void)
     smp_init();
     enable_apic();
 
-    handle_irq(SINT1_VEC, stimer_isr);
-    handle_irq(SINT2_VEC, stimer_isr_auto_eoi);
-
     ncpus = cpu_count();
-    if (ncpus > MAX_CPUS) {
-        ncpus = MAX_CPUS;
-    }
-
+    if (ncpus > MAX_CPUS)
+        report_abort("number cpus exceeds %d", MAX_CPUS);
     printf("cpus = %d\n", ncpus);
-    g_cpus_count = ncpus;
 
-    on_each_cpu_async_wait(stimer_test_prepare, (void *)read_cr3());
-    on_each_cpu_async_wait(stimer_test, NULL);
-    on_each_cpu_async_wait(stimer_test_cleanup, NULL);
+    handle_irq(SINT1_VEC, stimer_isr);
+    handle_irq(SINT2_VEC, stimer_isr_auto_eoi);
+
+    cr3 = read_cr3();
+    on_cpus(stimer_test_prepare);
+    on_cpus(stimer_test);
+    on_cpus(stimer_test_cleanup);
 }
 
 int main(int ac, char **av)
-- 
2.9.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH kvm-unit-tests 5/7] x86/hyperv_synic: apply on_cpus
  2017-06-01 15:48 [PATCH kvm-unit-tests 0/7] x86: introduce and apply on_cpus Andrew Jones
                   ` (3 preceding siblings ...)
  2017-06-01 15:48 ` [PATCH kvm-unit-tests 4/7] x86/hyperv_stimer: " Andrew Jones
@ 2017-06-01 15:48 ` Andrew Jones
  2017-06-01 15:48 ` [PATCH kvm-unit-tests 6/7] x86/kvmclock_test: " Andrew Jones
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 11+ messages in thread
From: Andrew Jones @ 2017-06-01 15:48 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, rkrcmar

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 x86/hyperv_synic.c | 44 +++++++++++++-------------------------------
 1 file changed, 13 insertions(+), 31 deletions(-)

diff --git a/x86/hyperv_synic.c b/x86/hyperv_synic.c
index a08e69f7da5f..fd74d1d79ee3 100644
--- a/x86/hyperv_synic.c
+++ b/x86/hyperv_synic.c
@@ -12,7 +12,7 @@
 #define MAX_CPUS 4
 
 static atomic_t isr_enter_count[MAX_CPUS];
-static atomic_t cpus_comp_count;
+static ulong cr3;
 
 static void synic_sint_auto_eoi_isr(isr_regs_t *regs)
 {
@@ -73,12 +73,12 @@ static void synic_sints_prepare(int vcpu)
     }
 }
 
-static void synic_test_prepare(void *ctx)
+static void synic_test_prepare(void)
 {
     u64 r;
     int i = 0;
 
-    write_cr3((ulong)ctx);
+    write_cr3(cr3);
     irq_enable();
 
     rdmsr(HV_X64_MSR_SVERSION);
@@ -91,7 +91,7 @@ static void synic_test_prepare(void *ctx)
     r = rdmsr(HV_X64_MSR_EOM);
     if (r != 0) {
         report("Hyper-V SynIC test, EOM read %#" PRIx64, false, r);
-        goto ret;
+        return;
     }
 
     wrmsr(HV_X64_MSR_SIMP, (u64)virt_to_phys(alloc_page()) |
@@ -101,8 +101,6 @@ static void synic_test_prepare(void *ctx)
     wrmsr(HV_X64_MSR_SCONTROL, HV_SYNIC_CONTROL_ENABLE);
 
     synic_sints_prepare(smp_id());
-ret:
-    atomic_inc(&cpus_comp_count);
 }
 
 static void synic_sints_test(int dst_vcpu)
@@ -125,10 +123,9 @@ static void synic_test(void *ctx)
 
     irq_enable();
     synic_sints_test(dst_vcpu);
-    atomic_inc(&cpus_comp_count);
 }
 
-static void synic_test_cleanup(void *ctx)
+static void synic_test_cleanup(void)
 {
     int vcpu = smp_id();
     int i;
@@ -142,7 +139,6 @@ static void synic_test_cleanup(void *ctx)
     wrmsr(HV_X64_MSR_SCONTROL, 0);
     wrmsr(HV_X64_MSR_SIMP, 0);
     wrmsr(HV_X64_MSR_SIEFP, 0);
-    atomic_inc(&cpus_comp_count);
 }
 
 int main(int ac, char **av)
@@ -156,40 +152,26 @@ int main(int ac, char **av)
         smp_init();
         enable_apic();
 
-        synic_prepare_sint_vecs();
-
         ncpus = cpu_count();
-        if (ncpus > MAX_CPUS) {
-            ncpus = MAX_CPUS;
-        }
+        if (ncpus > MAX_CPUS)
+            report_abort("number cpus exceeds %d", MAX_CPUS);
         printf("ncpus = %d\n", ncpus);
 
-        atomic_set(&cpus_comp_count, 0);
-        for (i = 0; i < ncpus; i++) {
-            on_cpu_async(i, synic_test_prepare, (void *)read_cr3());
-        }
+        synic_prepare_sint_vecs();
+
         printf("prepare\n");
-        while (atomic_read(&cpus_comp_count) != ncpus) {
-            pause();
-        }
+        cr3 = read_cr3();
+        on_cpus(synic_test_prepare);
 
-        atomic_set(&cpus_comp_count, 0);
         for (i = 0; i < ncpus; i++) {
             printf("test %d -> %d\n", i, ncpus - 1 - i);
             on_cpu_async(i, synic_test, (void *)(ulong)(ncpus - 1 - i));
         }
-        while (atomic_read(&cpus_comp_count) != ncpus) {
+        while (cpus_active())
             pause();
-        }
 
-        atomic_set(&cpus_comp_count, 0);
-        for (i = 0; i < ncpus; i++) {
-            on_cpu_async(i, synic_test_cleanup, NULL);
-        }
         printf("cleanup\n");
-        while (atomic_read(&cpus_comp_count) != ncpus) {
-            pause();
-        }
+        on_cpus(synic_test_cleanup);
 
         ok = true;
         for (i = 0; i < ncpus; ++i) {
-- 
2.9.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH kvm-unit-tests 6/7] x86/kvmclock_test: apply on_cpus
  2017-06-01 15:48 [PATCH kvm-unit-tests 0/7] x86: introduce and apply on_cpus Andrew Jones
                   ` (4 preceding siblings ...)
  2017-06-01 15:48 ` [PATCH kvm-unit-tests 5/7] x86/hyperv_synic: " Andrew Jones
@ 2017-06-01 15:48 ` Andrew Jones
  2017-06-01 15:48 ` [PATCH kvm-unit-tests 7/7] x86/vmexit: " Andrew Jones
  2017-06-01 15:49 ` [PATCH kvm-unit-tests 0/7] x86: introduce and " Paolo Bonzini
  7 siblings, 0 replies; 11+ messages in thread
From: Andrew Jones @ 2017-06-01 15:48 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, rkrcmar

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 x86/kvmclock.c      |  4 ++--
 x86/kvmclock.h      |  4 ++--
 x86/kvmclock_test.c | 38 ++++++++++++++------------------------
 3 files changed, 18 insertions(+), 28 deletions(-)

diff --git a/x86/kvmclock.c b/x86/kvmclock.c
index bad078495a4a..abfbd4272ef0 100644
--- a/x86/kvmclock.c
+++ b/x86/kvmclock.c
@@ -232,7 +232,7 @@ cycle_t kvm_clock_read()
         return ret;
 }
 
-void kvm_clock_init(void *data)
+void kvm_clock_init(void)
 {
         int index = smp_id();
         struct pvclock_vcpu_time_info *hvc = &hv_clock[index];
@@ -241,7 +241,7 @@ void kvm_clock_init(void *data)
         wrmsr(MSR_KVM_SYSTEM_TIME_NEW, (unsigned long)hvc | 1);
 }
 
-void kvm_clock_clear(void *data)
+void kvm_clock_clear(void)
 {
         wrmsr(MSR_KVM_SYSTEM_TIME_NEW, 0LL);
 }
diff --git a/x86/kvmclock.h b/x86/kvmclock.h
index dff680212758..a9e470a70af6 100644
--- a/x86/kvmclock.h
+++ b/x86/kvmclock.h
@@ -38,7 +38,7 @@ struct timespec {
 void pvclock_set_flags(unsigned char flags);
 cycle_t kvm_clock_read();
 void kvm_get_wallclock(struct timespec *ts);
-void kvm_clock_init(void *data);
-void kvm_clock_clear(void *data);
+void kvm_clock_init(void);
+void kvm_clock_clear(void);
 
 #endif
diff --git a/x86/kvmclock_test.c b/x86/kvmclock_test.c
index b90203e0f624..19e4cacfe75e 100644
--- a/x86/kvmclock_test.c
+++ b/x86/kvmclock_test.c
@@ -17,11 +17,11 @@ struct test_info {
         u64 stalls;               /* stall count */
         long long worst;          /* worst warp */
         volatile cycle_t last;    /* last cycle seen by test */
-        atomic_t ncpus;           /* number of cpu in the test*/
         int check;                /* check cycle ? */
 };
 
 struct test_info ti[4];
+struct test_info *hv_test_info;
 
 static void wallclock_test(void *data)
 {
@@ -42,9 +42,8 @@ static void wallclock_test(void *data)
         }
 }
 
-static void kvm_clock_test(void *data)
+static void kvm_clock_test(void)
 {
-        struct test_info *hv_test_info = (struct test_info *)data;
         long i, check = hv_test_info->check;
 
         for (i = 0; i < loops; i++){
@@ -78,29 +77,21 @@ static void kvm_clock_test(void *data)
                 if (!((unsigned long)i & 31))
                         asm volatile("rep; nop");
         }
-
-        atomic_dec(&hv_test_info->ncpus);
 }
 
-static int cycle_test(int ncpus, int check, struct test_info *ti)
+static int cycle_test(int check, struct test_info *ti)
 {
-        int i;
         unsigned long long begin, end;
 
         begin = rdtsc();
 
-        atomic_set(&ti->ncpus, ncpus);
         ti->check = check;
-        for (i = ncpus - 1; i >= 0; i--)
-                on_cpu_async(i, kvm_clock_test, (void *)ti);
-
-        /* Wait for the end of other vcpu */
-        while(atomic_read(&ti->ncpus))
-                ;
+        hv_test_info = ti;
+        on_cpus(kvm_clock_test);
 
         end = rdtsc();
 
-        printf("Total vcpus: %d\n", ncpus);
+        printf("Total vcpus: %d\n", cpu_count());
         printf("Test  loops: %ld\n", loops);
         if (check == 1) {
                 printf("Total warps:  %" PRId64 "\n", ti->warps);
@@ -129,9 +120,9 @@ int main(int ac, char **av)
 
         ncpus = cpu_count();
         if (ncpus > MAX_CPU)
-                ncpus = MAX_CPU;
-        for (i = 0; i < ncpus; ++i)
-                on_cpu(i, kvm_clock_init, (void *)0);
+                report_abort("number cpus exceeds %d", MAX_CPU);
+
+        on_cpus(kvm_clock_init);
 
         if (ac > 2) {
                 printf("Wallclock test, threshold %ld\n", threshold);
@@ -143,26 +134,25 @@ int main(int ac, char **av)
         printf("Check the stability of raw cycle ...\n");
         pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT
                           | PVCLOCK_RAW_CYCLE_BIT);
-        if (cycle_test(ncpus, 1, &ti[0]))
+        if (cycle_test(1, &ti[0]))
                 printf("Raw cycle is not stable\n");
         else
                 printf("Raw cycle is stable\n");
 
         pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
         printf("Monotonic cycle test:\n");
-        nerr += cycle_test(ncpus, 1, &ti[1]);
+        nerr += cycle_test(1, &ti[1]);
 
         printf("Measure the performance of raw cycle ...\n");
         pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT
                           | PVCLOCK_RAW_CYCLE_BIT);
-        cycle_test(ncpus, 0, &ti[2]);
+        cycle_test(0, &ti[2]);
 
         printf("Measure the performance of adjusted cycle ...\n");
         pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
-        cycle_test(ncpus, 0, &ti[3]);
+        cycle_test(0, &ti[3]);
 
-        for (i = 0; i < ncpus; ++i)
-                on_cpu(i, kvm_clock_clear, (void *)0);
+        on_cpus(kvm_clock_clear);
 
         return nerr > 0 ? 1 : 0;
 }
-- 
2.9.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH kvm-unit-tests 7/7] x86/vmexit: apply on_cpus
  2017-06-01 15:48 [PATCH kvm-unit-tests 0/7] x86: introduce and apply on_cpus Andrew Jones
                   ` (5 preceding siblings ...)
  2017-06-01 15:48 ` [PATCH kvm-unit-tests 6/7] x86/kvmclock_test: " Andrew Jones
@ 2017-06-01 15:48 ` Andrew Jones
  2017-06-01 15:49 ` [PATCH kvm-unit-tests 0/7] x86: introduce and " Paolo Bonzini
  7 siblings, 0 replies; 11+ messages in thread
From: Andrew Jones @ 2017-06-01 15:48 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, rkrcmar

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 x86/vmexit.c | 19 +++++--------------
 1 file changed, 5 insertions(+), 14 deletions(-)

diff --git a/x86/vmexit.c b/x86/vmexit.c
index 8d2bf1a31982..7533ef3a3e0a 100644
--- a/x86/vmexit.c
+++ b/x86/vmexit.c
@@ -419,24 +419,20 @@ static struct test tests[] = {
 };
 
 unsigned iterations;
-static atomic_t nr_cpus_done;
+void (*func)(void);
 
-static void run_test(void *_func)
+static void run_test(void)
 {
     int i;
-    void (*func)(void) = _func;
 
     for (i = 0; i < iterations; ++i)
         func();
-
-    atomic_inc(&nr_cpus_done);
 }
 
 static bool do_test(struct test *test)
 {
 	int i;
 	unsigned long long t1, t2;
-        void (*func)(void);
 
         iterations = 32;
 
@@ -463,11 +459,7 @@ static bool do_test(struct test *test)
 			for (i = 0; i < iterations; ++i)
 				func();
 		} else {
-			atomic_set(&nr_cpus_done, 0);
-			for (i = cpu_count(); i > 0; i--)
-				on_cpu_async(i-1, run_test, func);
-			while (atomic_read(&nr_cpus_done) < cpu_count())
-				;
+			on_cpus(run_test);
 		}
 		t2 = rdtsc();
 	} while ((t2 - t1) < GOAL);
@@ -475,7 +467,7 @@ static bool do_test(struct test *test)
 	return test->next;
 }
 
-static void enable_nx(void *junk)
+static void enable_nx(void)
 {
 	if (cpuid(0x80000001).d & (1 << 20))
 		wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX_MASK);
@@ -509,8 +501,7 @@ int main(int ac, char **av)
 	nr_cpus = cpu_count();
 
 	irq_enable();
-	for (i = cpu_count(); i > 0; i--)
-		on_cpu(i-1, enable_nx, 0);
+	on_cpus(enable_nx);
 
 	fadt = find_acpi_table_addr(FACP_SIGNATURE);
 	pm_tmr_blk = fadt->pm_tmr_blk;
-- 
2.9.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH kvm-unit-tests 0/7] x86: introduce and apply on_cpus
  2017-06-01 15:48 [PATCH kvm-unit-tests 0/7] x86: introduce and apply on_cpus Andrew Jones
                   ` (6 preceding siblings ...)
  2017-06-01 15:48 ` [PATCH kvm-unit-tests 7/7] x86/vmexit: " Andrew Jones
@ 2017-06-01 15:49 ` Paolo Bonzini
  7 siblings, 0 replies; 11+ messages in thread
From: Paolo Bonzini @ 2017-06-01 15:49 UTC (permalink / raw)
  To: Andrew Jones, kvm; +Cc: rkrcmar



On 01/06/2017 17:48, Andrew Jones wrote:
> This is RFC-ish. There shouldn't be any functional change, and the
> value of the diff stats are debatable, but I figured that I already did
> the work, so I might as well post and let Paolo and Radim decide.

Actually I was going to ask you about this or do it.  So thanks---though
I'll let Radim look at the patches, because it's almost long weekend
time. :)

Thanks,

Paolo

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH kvm-unit-tests 2/7] lib/x86/smp: introduce on_cpus
  2017-06-01 15:48 ` [PATCH kvm-unit-tests 2/7] lib/x86/smp: introduce on_cpus Andrew Jones
@ 2017-06-07 14:48   ` Radim Krčmář
  2017-06-13  8:14     ` Andrew Jones
  0 siblings, 1 reply; 11+ messages in thread
From: Radim Krčmář @ 2017-06-07 14:48 UTC (permalink / raw)
  To: Andrew Jones; +Cc: kvm, pbonzini

2017-06-01 17:48+0200, Andrew Jones:
> Signed-off-by: Andrew Jones <drjones@redhat.com>
> ---
> diff --git a/lib/x86/smp.c b/lib/x86/smp.c
> @@ -1,5 +1,6 @@
> @@ -91,6 +95,21 @@ void on_cpu_async(int cpu, void (*function)(void *data), void *data)
>      __on_cpu(cpu, function, data, 0);
>  }
>  

(I'm sorry the review took so long.)

> +void on_cpus(void (*func)(void))
> +{
> +    int cpu;
> +
> +    for (cpu = cpu_count() - 1; cpu >= 0; --cpu)
> +        on_cpu_async(cpu, (ipi_function_type)func, NULL);

Calling a casted function pointer is undefined behavior in C and I think
that keeping the argument is better anyway -- the API is consistent that
way and you don't need to introduce a global in some patches.

> +
> +    while (cpus_active())
> +        ;

Add a pause() here,

Thanks.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH kvm-unit-tests 2/7] lib/x86/smp: introduce on_cpus
  2017-06-07 14:48   ` Radim Krčmář
@ 2017-06-13  8:14     ` Andrew Jones
  0 siblings, 0 replies; 11+ messages in thread
From: Andrew Jones @ 2017-06-13  8:14 UTC (permalink / raw)
  To: Radim Krčmář; +Cc: kvm, pbonzini

On Wed, Jun 07, 2017 at 04:48:34PM +0200, Radim Krčmář wrote:
> 2017-06-01 17:48+0200, Andrew Jones:
> > Signed-off-by: Andrew Jones <drjones@redhat.com>
> > ---
> > diff --git a/lib/x86/smp.c b/lib/x86/smp.c
> > @@ -1,5 +1,6 @@
> > @@ -91,6 +95,21 @@ void on_cpu_async(int cpu, void (*function)(void *data), void *data)
> >      __on_cpu(cpu, function, data, 0);
> >  }
> >  
> 
> (I'm sorry the review took so long.)
> 
> > +void on_cpus(void (*func)(void))
> > +{
> > +    int cpu;
> > +
> > +    for (cpu = cpu_count() - 1; cpu >= 0; --cpu)
> > +        on_cpu_async(cpu, (ipi_function_type)func, NULL);
> 
> Calling a casted function pointer is undefined behavior in C and I think
> that keeping the argument is better anyway -- the API is consistent that
> way and you don't need to introduce a global in some patches.

I agree I shouldn't use undefined behavior and I have no strong preference
on the API, so I 'll change this, and I'll fix ARM too.

> 
> > +
> > +    while (cpus_active())
> > +        ;
> 
> Add a pause() here,

Will do.

Thanks,
drew

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2017-06-13  8:14 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-06-01 15:48 [PATCH kvm-unit-tests 0/7] x86: introduce and apply on_cpus Andrew Jones
2017-06-01 15:48 ` [PATCH kvm-unit-tests 1/7] x86/unittests.cfg: create hyperv group Andrew Jones
2017-06-01 15:48 ` [PATCH kvm-unit-tests 2/7] lib/x86/smp: introduce on_cpus Andrew Jones
2017-06-07 14:48   ` Radim Krčmář
2017-06-13  8:14     ` Andrew Jones
2017-06-01 15:48 ` [PATCH kvm-unit-tests 3/7] x86/hyperv_clock: apply on_cpus Andrew Jones
2017-06-01 15:48 ` [PATCH kvm-unit-tests 4/7] x86/hyperv_stimer: " Andrew Jones
2017-06-01 15:48 ` [PATCH kvm-unit-tests 5/7] x86/hyperv_synic: " Andrew Jones
2017-06-01 15:48 ` [PATCH kvm-unit-tests 6/7] x86/kvmclock_test: " Andrew Jones
2017-06-01 15:48 ` [PATCH kvm-unit-tests 7/7] x86/vmexit: " Andrew Jones
2017-06-01 15:49 ` [PATCH kvm-unit-tests 0/7] x86: introduce and " Paolo Bonzini

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).