xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] xl: introduce specific VCPU to PCPU mapping in config file
@ 2012-05-11 23:20 Dario Faggioli
  2012-05-11 23:23 ` Dario Faggioli
  2012-05-14  8:19 ` Ian Campbell
  0 siblings, 2 replies; 7+ messages in thread
From: Dario Faggioli @ 2012-05-11 23:20 UTC (permalink / raw)
  To: xen-devel; +Cc: Ian Jackson, Ian Campbell

xm supports the following syntax (in the config file) for
specific VCPU to PCPU mapping:

cpus = ["2", "3"] # VCPU0 runs on CPU2, VCPU1 runs on CPU3

Allow for the same to be done in xl.

Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>

diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c
+++ b/tools/libxl/xl_cmdimpl.c
@@ -71,6 +71,8 @@ static uint32_t domid;
 static const char *common_domname;
 static int fd_lock = -1;
 
+/* Stash for specific vcpu to pcpu mappping */
+static int *vcpu_to_pcpu;
 
 static const char savefileheader_magic[32]=
     "Xen saved domain, xl format\n \0 \r";
@@ -630,6 +632,21 @@ static void parse_config_data(const char
             exit(1);
         }
 
+        /* Prepare the array for single vcpu to pcpu mappings */
+        vcpu_to_pcpu = xmalloc(sizeof(int) * b_info->max_vcpus);
+        memset(vcpu_to_pcpu, -1, sizeof(int) * b_info->max_vcpus);
+
+        /*
+         * Idea here is to let libxl think all the domain's vcpus
+         * have cpu affinity with all the pcpus on the list.
+         * It is then us, here in xl, that matches each single vcpu
+         * to its pcpu (and that's why we need to stash such info in
+         * the vcpu_to_pcpu array now) after the domain has been created.
+         * Doing it like this saves the burden of passing to libxl
+         * some big array hosting the single mappings. Also, using
+         * the cpumap derived from the list ensures memory is being
+         * allocated on the proper nodes anyway.
+         */
         libxl_cpumap_set_none(&b_info->cpumap);
         while ((buf = xlu_cfg_get_listitem(cpus, n_cpus)) != NULL) {
             i = atoi(buf);
@@ -638,6 +655,8 @@ static void parse_config_data(const char
                 exit(1);
             }
             libxl_cpumap_set(&b_info->cpumap, i);
+            if (n_cpus < b_info->max_vcpus)
+                vcpu_to_pcpu[n_cpus] = i;
             n_cpus++;
         }
     }
@@ -1709,6 +1728,31 @@ start:
     if ( ret )
         goto error_out;
 
+    /* If single vcpu to pcpu mapping was requested, honour it */
+    if (vcpu_to_pcpu) {
+        libxl_cpumap vcpu_cpumap;
+
+        libxl_cpumap_alloc(ctx, &vcpu_cpumap);
+        for (i = 0; i < d_config.b_info.max_vcpus; i++) {
+
+            if (vcpu_to_pcpu[i] != -1) {
+                libxl_cpumap_set_none(&vcpu_cpumap);
+                libxl_cpumap_set(&vcpu_cpumap, vcpu_to_pcpu[i]);
+            } else {
+                libxl_cpumap_set_any(&vcpu_cpumap);
+            }
+            if (libxl_set_vcpuaffinity(ctx, domid, i, &vcpu_cpumap)) {
+                fprintf(stderr, "setting affinity failed on vcpu `%d'.\n", i);
+                libxl_cpumap_dispose(&vcpu_cpumap);
+                free(vcpu_to_pcpu);
+                ret = ERROR_FAIL;
+                goto error_out;
+            }
+        }
+        libxl_cpumap_dispose(&vcpu_cpumap);
+        free(vcpu_to_pcpu);
+    }
+
     ret = libxl_userdata_store(ctx, domid, "xl",
                                     config_data, config_len);
     if (ret) {

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2012-05-14 17:03 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-05-11 23:20 [PATCH] xl: introduce specific VCPU to PCPU mapping in config file Dario Faggioli
2012-05-11 23:23 ` Dario Faggioli
2012-05-13  8:35   ` Dario Faggioli
2012-05-14  8:14     ` Ian Campbell
2012-05-14 10:08       ` Dario Faggioli
2012-05-14  8:19 ` Ian Campbell
2012-05-14 17:03   ` Dario Faggioli

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).