From mboxrd@z Thu Jan 1 00:00:00 1970 From: David Vrabel Subject: Re: [PATCH 0/3] xen: evtchn and gntdev device fixes and perf improvements Date: Fri, 19 Jul 2013 15:57:10 +0100 Message-ID: <51E953C6.8050906@citrix.com> References: <1374245520-19270-1-git-send-email-david.vrabel@citrix.com> Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="------------080003000807070700080303" Return-path: In-Reply-To: <1374245520-19270-1-git-send-email-david.vrabel@citrix.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: David Vrabel Cc: xen-devel@lists.xen.org List-Id: xen-devel@lists.xenproject.org --------------080003000807070700080303 Content-Type: text/plain; charset="ISO-8859-1" Content-Transfer-Encoding: 7bit On 19/07/13 15:51, David Vrabel wrote: > > Patch 3 improves the scalability of the evtchn device when it is used > by multiple processes (e.g., multiple qemus). As you can see from the > graph[1] it is a signficant improvement but still less than ideal. I > suspect that this may be due to the per-domain event lock inside Xen > rather than anything kernel-side. > > The graphed data was collected from dom0 with 8 VCPUs on a host with 8 > CPUs. Attached is the (slightly cheesy) test program I used to generate the results. This also triggered the deadlock fixed by patch 1. It spawns N threads each of which opens /dev/xen/evtchn channel sets up an event channel with both ends in the same domain. The event channels are manually distributed between the VCPUs. David --------------080003000807070700080303 Content-Type: text/x-csrc; name="evtchn-stress.c" Content-Transfer-Encoding: 7bit Content-Disposition: attachment; filename="evtchn-stress.c" #include #include #include #include #include #include #define MAX_THREADS 128 #define MAX_EVENTS 100000 #define FIRST_IRQ 62 #define MAX_VCPUS 8 static unsigned long long elapsed[MAX_THREADS]; static void bind_irq(unsigned port, unsigned cpu) { char buf[64]; FILE *f; unsigned irq = port + FIRST_IRQ; snprintf(buf, sizeof(buf), "/proc/irq/%d/smp_affinity", irq); f = fopen(buf, "w"); if (!f) return; snprintf(buf, sizeof(buf), "%x", 1 << cpu); fwrite(buf, strlen(buf), 1, f); fclose(f); } void *test_thread(void *arg) { unsigned i = (intptr_t)arg; xc_evtchn *xce; evtchn_port_t lport; evtchn_port_t rport; unsigned events = 0; struct timespec start, end; xce = xc_evtchn_open(NULL, 0); if (!xce) return NULL; lport = xc_evtchn_bind_unbound_port(xce, 0); if (lport < 0) return NULL; rport = xc_evtchn_bind_interdomain(xce, 0, lport); bind_irq(lport, lport % MAX_VCPUS); bind_irq(rport, rport % MAX_VCPUS); xc_evtchn_notify(xce, lport); clock_gettime(CLOCK_MONOTONIC, &start); while (events < MAX_EVENTS) { evtchn_port_t port; port = xc_evtchn_pending(xce); if (port == lport) xc_evtchn_notify(xce, lport); else xc_evtchn_notify(xce, rport); xc_evtchn_unmask(xce, port); events++; } clock_gettime(CLOCK_MONOTONIC, &end); xc_evtchn_close(xce); elapsed[i] = (end.tv_sec - start.tv_sec) * 1000000000ull + (end.tv_nsec - start.tv_nsec); return NULL; } static pthread_t threads[MAX_THREADS]; int start_thread(unsigned i) { return pthread_create(&threads[i], NULL, test_thread, (void *)(intptr_t)i); } void run_test(unsigned num_threads) { unsigned i; double accum = 0.0; for (i = 0; i < num_threads; i++) start_thread(i); for (i = 0; i < num_threads; i++) pthread_join(threads[i], NULL); for (i = 0; i < num_threads; i++) accum += MAX_EVENTS / (double)elapsed[i]; printf(" num_threads: %u\n", num_threads); printf(" rate: %.1f event/s\n", accum / num_threads * 1000000000.0); } int main(int argc, char *argv[]) { unsigned num_threads; unsigned num_iters; unsigned i; if (argc <=1 ) fprintf(stderr, "Usage: %s \n", argv[0]); num_iters = atoi(argv[1]); num_threads = atoi(argv[2]); for (i = 0; i < num_iters; i++) { printf("iter: %d\n", i); run_test(num_threads); } return 0; } --------------080003000807070700080303 Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: inline _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel --------------080003000807070700080303--