xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [Patch] adjust the cpu-affinity to more than 64 cpus
@ 2010-03-17  8:56 James (song wei)
  2010-03-17  9:25 ` Jan Beulich
  0 siblings, 1 reply; 14+ messages in thread
From: James (song wei) @ 2010-03-17  8:56 UTC (permalink / raw)
  To: xen-devel


Hi,
 There are more than 64 cpus on new intel platform especially on NUMA
system, so that we need break the pcpu limit (that is 64)  when set affinity
of a VCPU. 

-James (Song Wei)

Signed-off-by: James (song wei) <jsong@novell.com> 
diff -r c1f272c3a441 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c	Mon Mar 15 17:08:29 2010 +0000
+++ b/tools/libxc/xc_domain.c	Wed Mar 17 16:51:07 2010 +0800
@@ -98,23 +98,28 @@
 int xc_vcpu_setaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t cpumap)
+                        uint64_t *cpumap, int cpusize)
 {
     DECLARE_DOMCTL;
     int ret = -1;
-    uint8_t local[sizeof (cpumap)];
+    uint8_t *local = malloc(cpusize); 
 
+    if(local == NULL)
+    {
+        PERROR("Could not alloc memory for Xen hypercall");
+        goto out;
+    }
     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu    = vcpu;
 
-    bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8);
+    bitmap_64_to_byte(local, cpumap, cpusize * 8);
 
     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
 
-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
     
-    if ( lock_pages(local, sizeof(local)) != 0 )
+    if ( lock_pages(local, cpusize) != 0 )
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out;
@@ -122,9 +127,10 @@
 
     ret = do_domctl(xc_handle, &domctl);
 
-    unlock_pages(local, sizeof(local));
+    unlock_pages(local, cpusize);
 
  out:
+    free(local);
     return ret;
 }
 
@@ -132,18 +138,26 @@
 int xc_vcpu_getaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap)
+                        uint64_t *cpumap,
+                        int cpusize)
 {
     DECLARE_DOMCTL;
     int ret = -1;
-    uint8_t local[sizeof (cpumap)];
+    uint8_t * local = malloc(cpusize);
+
+    if(local == NULL)
+    {
+        PERROR("Could not alloc memory for Xen hypercall");
+        goto out;
+    }
 
     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
 
+
     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
+    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
     
     if ( lock_pages(local, sizeof(local)) != 0 )
     {
@@ -154,8 +168,9 @@
     ret = do_domctl(xc_handle, &domctl);
 
     unlock_pages(local, sizeof (local));
-    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
- out:
+    bitmap_byte_to_64(cpumap, local, cpusize * 8);
+out:
+    free(local);
     return ret;
 }
 
diff -r c1f272c3a441 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h	Mon Mar 15 17:08:29 2010 +0000
+++ b/tools/libxc/xenctrl.h	Wed Mar 17 16:51:07 2010 +0800
@@ -309,11 +309,13 @@
 int xc_vcpu_setaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t cpumap);
+                        uint64_t *cpumap,
+                        int cpusize);
 int xc_vcpu_getaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap);
+                        uint64_t *cpumap,
+                        int cpusize);
 
 /**
  * This function will return information about one or more domains. It is
diff -r c1f272c3a441 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c	Mon Mar 15 17:08:29 2010 +0000
+++ b/tools/python/xen/lowlevel/xc/xc.c	Wed Mar 17 16:51:07 2010 +0800
@@ -215,35 +215,54 @@
 {
     uint32_t dom;
     int vcpu = 0, i;
-    uint64_t  cpumap = ~0ULL;
+    uint64_t  *cpumap;
     PyObject *cpulist = NULL;
+    int nr_cpus, size;
+    xc_physinfo_t info; 
+    xc_cpu_to_node_t map[1];
+    uint64_t cpumap_size = sizeof(cpumap); 
 
     static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
+    
 
     if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|iO", kwd_list, 
                                       &dom, &vcpu, &cpulist) )
         return NULL;
 
+    set_xen_guest_handle(info.cpu_to_node, map);
+    info.max_cpu_id = 1;
+    if ( xc_physinfo(self->xc_handle, &info) != 0 )
+        return pyxc_error_to_exception();
+  
+    nr_cpus = info.nr_cpus;
+
+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
+    cpumap = malloc(cpumap_size * size);
+    if(cpumap == NULL)
+        return pyxc_error_to_exception();
+    
+
     if ( (cpulist != NULL) && PyList_Check(cpulist) )
     {
-        cpumap = 0ULL;
+        for ( i = 0; i < size; i++)
+        {
+            cpumap[i] = 0ULL;
+        }
         for ( i = 0; i < PyList_Size(cpulist); i++ ) 
         {
             long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
-            if ( cpu >= 64 )
-            {
-                errno = EINVAL;
-                PyErr_SetFromErrno(xc_error_obj);
-                return NULL;
-            }
-            cpumap |= (uint64_t)1 << cpu;
+            *(cpumap + cpu / (cpumap_size * 8)) |= (uint64_t)1 << (cpu %
(cpumap_size * 8));
         }
     }
   
-    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 )
+    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size *
cpumap_size) != 0 )
+    {
+        free(cpumap);
         return pyxc_error_to_exception();
-    
+    }
+
     Py_INCREF(zero);
+    free(cpumap); 
     return zero;
 }
 
@@ -362,7 +381,11 @@
     uint32_t dom, vcpu = 0;
     xc_vcpuinfo_t info;
     int rc, i;
-    uint64_t cpumap;
+    uint64_t *cpumap;
+    int nr_cpus, size;
+    xc_physinfo_t pinfo = { 0 };
+    xc_cpu_to_node_t map[1];
+    uint64_t cpumap_size = sizeof(cpumap);
 
     static char *kwd_list[] = { "domid", "vcpu", NULL };
     
@@ -370,12 +393,25 @@
                                       &dom, &vcpu) )
         return NULL;
 
+    set_xen_guest_handle(pinfo.cpu_to_node, map);
+    pinfo.max_cpu_id = 1;
+    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
+        return pyxc_error_to_exception();
+    nr_cpus = pinfo.nr_cpus;
     rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
     if ( rc < 0 )
         return pyxc_error_to_exception();
-    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap);
+    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); 
+
+    if((cpumap = malloc(cpumap_size * size)) == NULL)
+        return pyxc_error_to_exception(); 
+
+    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap,
cpumap_size * size);
     if ( rc < 0 )
+    {
+        free(cpumap);
         return pyxc_error_to_exception();
+    }
 
     info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}",
                               "online",   info.online,
@@ -385,17 +421,18 @@
                               "cpu",      info.cpu);
 
     cpulist = PyList_New(0);
-    for ( i = 0; cpumap != 0; i++ )
+    for ( i = 0; i < size * cpumap_size * 8; i++ )
     {
-        if ( cpumap & 1 ) {
+        if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
             PyObject *pyint = PyInt_FromLong(i);
             PyList_Append(cpulist, pyint);
             Py_DECREF(pyint);
         }
-        cpumap >>= 1;
+        *(cpumap + i / (cpumap_size * 8)) >>= 1;
     }
     PyDict_SetItemString(info_dict, "cpumap", cpulist);
     Py_DECREF(cpulist);
+    free(cpumap);
     return info_dict;
 }
 

http://old.nabble.com/file/p27928229/adjust_vcpuaffinity_more_cpu.patch
adjust_vcpuaffinity_more_cpu.patch 
-- 
View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu-affinity-to-more-than-64-cpus-tp27928229p27928229.html
Sent from the Xen - Dev mailing list archive at Nabble.com.

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2010-03-31  8:26 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-03-17  8:56 [Patch] adjust the cpu-affinity to more than 64 cpus James (song wei)
2010-03-17  9:25 ` Jan Beulich
2010-03-18  2:26   ` James (song wei)
2010-03-18  3:41     ` James (song wei)
2010-03-19  3:14       ` Masaki Kanno
2010-03-19  9:09         ` James Song
2010-03-19  9:39           ` issue with c/s 21046 (was Re: [Patch] adjust the cpu-affinity to more than 64 cpus) Jan Beulich
2010-03-19 10:28           ` [Patch] adjust the cpu-affinity to more than64 cpus Masaki Kanno
2010-03-19 10:47           ` issue with c/s 21046 (was Re: [Patch] adjust the cpu-affinity to more than 64 cpus) James Song
2010-03-19 11:10           ` Jan Beulich
2010-03-30 18:23           ` [Patch] adjust the cpu-affinity to more than 64 cpus Liu, Jinsong
2010-03-31  2:12             ` James Song
2010-03-31  6:29             ` Masaki Kanno
2010-03-31  8:26               ` Liu, Jinsong

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).