* [PATCH 05/11] [XEN] Basic cpumap utils
@ 2010-04-04 19:30 Dulloor
0 siblings, 0 replies; only message in thread
From: Dulloor @ 2010-04-04 19:30 UTC (permalink / raw)
To: xen-devel
[-- Attachment #1: Type: text/plain, Size: 314 bytes --]
This patch implements very basic helper functions (all of ~130 lines)
for xenctl_cpumap. The patch also uses these macros in
xc_get/set_vcpuaffinity as an example. There are other places that use
xc_get/set_vcpuaffinity too, which I will attend to later.
-dulloor
Signed-off-by: Dulloor Rao <dulloor@gatech.edu>
[-- Attachment #2: xen-05-basic-cpumap-utils.patch --]
[-- Type: text/x-patch, Size: 12644 bytes --]
diff -r d6b7abf0d2d5 -r 6eba62092cb4 tools/libxc/xc_cpumap.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_cpumap.c Thu Apr 01 13:06:14 2010 -0400
@@ -0,0 +1,88 @@
+#include "xc_cpumap.h"
+#include <stdio.h>
+
+/* Author : Dulloor (dulloor@gatech.edu) */
+
+uint32_t xc_cpumap_next(int cpu, struct xenctl_cpumap *srcp)
+{
+ uint8_t *p, pos;
+ uint8_t *addr = xc_cpumap_bits(srcp);
+ uint32_t size = xc_cpumap_len(srcp);
+ uint32_t offset = cpu+1; /* Find the next set cpu */
+
+ if (offset >= size)
+ return size;
+
+ p = addr + XC_BITMAP_BYTE(offset);
+ pos = XC_BITMAP_BYTE_OFFSET(offset);
+
+ do {
+ for (; (pos < XC_BITS_PER_BYTE) && !((*p)&(1<<pos)); pos++);
+ if (pos < XC_BITS_PER_BYTE)
+ break;
+ pos = 0; p++;
+ } while (p < (addr+size));
+
+ return (((p-addr)*XC_BITS_PER_BYTE) + pos);
+}
+
+void __xc_cpumap_or(struct xenctl_cpumap *dstp,
+ struct xenctl_cpumap *src1p, struct xenctl_cpumap *src2p)
+{
+ uint8_t *dp = xc_cpumap_bits(dstp);
+ uint8_t *s1p = xc_cpumap_bits(src1p);
+ uint8_t *s2p = xc_cpumap_bits(src2p);
+ int nr = XC_BITS_TO_BYTES(xc_cpumap_len(dstp));
+ int k;
+ for (k=0; k<nr; k++)
+ dp[k] = s1p[k] | s2p[k];
+}
+
+static inline uint8_t hweight8(uint8_t w)
+{
+ uint8_t res = (w & 0x55) + ((w >> 1) & 0x55);
+ res = (res & 0x33) + ((res >> 2) & 0x33);
+ return (res & 0x0F) + ((res >> 4) & 0x0F);
+}
+
+int __xc_cpumap_weight(struct xenctl_cpumap *srcp)
+{
+ const uint8_t *sp = xc_cpumap_bits(srcp);
+ int k, w = 0, lim = XC_BITS_TO_BYTES(xc_cpumap_len(srcp));
+ for (k=0; k <lim; k++)
+ w += hweight8(sp[k]);
+ return w;
+}
+
+/* xenctl_cpumap print function */
+#define CHUNKSZ 8
+#define roundup_power2(val,modulus) (((val) + (modulus) - 1) & ~((modulus) - 1))
+
+int __xc_cpumap_snprintf(char *buf, unsigned int buflen,
+ const struct xenctl_cpumap *cpumap)
+{
+ const uint8_t *maskp = xc_cpumap_bits(cpumap);
+ int nmaskbits = xc_cpumap_len(cpumap);
+ int i, word, bit, len = 0;
+ unsigned long val;
+ const char *sep = "";
+ int chunksz;
+ uint8_t chunkmask;
+
+ chunksz = nmaskbits & (CHUNKSZ - 1);
+ if (chunksz == 0)
+ chunksz = CHUNKSZ;
+
+ i = roundup_power2(nmaskbits, CHUNKSZ) - CHUNKSZ;
+ for (; i >= 0; i -= CHUNKSZ) {
+ chunkmask = ((1ULL << chunksz) - 1);
+ word = i / XC_BITS_PER_BYTE;
+ bit = i % XC_BITS_PER_BYTE;
+ val = (maskp[word] >> bit) & chunkmask;
+ len += snprintf(buf+len, buflen-len, "%s%0*lx", sep,
+ (chunksz+3)/4, val);
+ chunksz = CHUNKSZ;
+ sep = ",";
+ }
+ return len;
+}
diff -r d6b7abf0d2d5 -r 6eba62092cb4 tools/libxc/xc_cpumap.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_cpumap.h Thu Apr 01 13:06:14 2010 -0400
@@ -0,0 +1,144 @@
+#ifndef __XENCTL_CPUMAP_H
+#define __XENCTL_CPUMAP_H
+
+#include "xc_private.h"
+#include <stdint.h>
+#include <string.h>
+
+#define XC_BITS_PER_BYTE 8
+#define XC_BITS_TO_BYTES(bits) \
+ (((bits)+XC_BITS_PER_BYTE-1)/XC_BITS_PER_BYTE)
+#define XC_BITMAP_BIT(nr) (1 << (nr))
+#define XC_BITMAP_BIT_MASK(nr) (1 << ((nr) % XC_BITS_PER_BYTE))
+#define XC_BITMAP_BYTE(nr) ((nr) / XC_BITS_PER_BYTE)
+#define XC_BITMAP_BYTE_OFFSET(nr) ((nr) % XC_BITS_PER_BYTE)
+#define XC_BITMAP_BYTE_MASK (0xFF)
+#define XC_BITMAP_LAST_BYTE_MASK(nbits) \
+ (((nbits) % XC_BITS_PER_BYTE) ? \
+ ((1<<((nbits) % XC_BITS_PER_BYTE))-1) : \
+ XC_BITMAP_BYTE_MASK)
+
+#define xc_cpumap_bits(maskp) \
+ ({ uint8_t *bitmap; \
+ get_xen_guest_handle(bitmap, (maskp)->bitmap); \
+ bitmap; })
+#define xc_cpumap_len(maskp) ((maskp)->nr_cpus)
+
+/* For iterating over the cpus set in the cpumap */
+#define xc_for_each_cpu(cpu, mask) \
+ __xc_for_each_cpu(cpu, &(mask))
+#define __xc_for_each_cpu(cpu, mask) \
+ for ((cpu) = -1; \
+ (cpu) = xc_cpumap_next((cpu), (mask)), \
+ (cpu) < xc_cpumap_len(mask);)
+extern uint32_t xc_cpumap_next(int n, struct xenctl_cpumap *srcp);
+
+#define xc_cpumap_set_cpu(cpu, dst) __xc_cpumap_set_cpu(cpu, &(dst))
+static inline void __xc_cpumap_set_cpu(int cpu, struct xenctl_cpumap *dstp)
+{
+ uint8_t mask = XC_BITMAP_BIT_MASK(cpu);
+ uint8_t *p = ((uint8_t *)xc_cpumap_bits(dstp)) + XC_BITMAP_BYTE(cpu);
+ *p |= mask;
+}
+
+#define xc_cpumap_clear_cpu(cpu, dst) __xc_cpumap_clear_cpu(cpu, &(dst))
+static inline void __xc_cpumap_clear_cpu(int cpu, struct xenctl_cpumap *dstp)
+{
+ uint8_t mask = XC_BITMAP_BIT_MASK(cpu);
+ uint8_t *p = ((uint8_t *)xc_cpumap_bits(dstp)) + XC_BITMAP_BYTE(cpu);
+ *p &= ~mask;
+}
+
+#define xc_cpumap_test_cpu(cpu, dst) __xc_cpumap_test_cpu(cpu, &(dst))
+static inline int __xc_cpumap_test_cpu(int cpu, struct xenctl_cpumap *dstp)
+{
+ uint8_t mask = XC_BITMAP_BIT_MASK(cpu);
+ uint8_t *p = ((uint8_t *)xc_cpumap_bits(dstp)) + XC_BITMAP_BYTE(cpu);
+ return *p & mask;
+}
+
+#define xc_cpumap_setall(dst) __xc_cpumap_setall(&(dst))
+static inline void __xc_cpumap_setall(struct xenctl_cpumap *dstp)
+{
+ uint8_t *dp = xc_cpumap_bits(dstp);
+ int nbits = xc_cpumap_len(dstp);
+ size_t nbytes = XC_BITS_TO_BYTES(nbits);
+ if (nbytes > 1)
+ memset(dp, 0xff, nbytes);
+ dp[nbytes-1] = XC_BITMAP_LAST_BYTE_MASK(nbits);
+}
+
+#define xc_cpumap_clearall(dst) __xc_cpumap_clearall(&(dst))
+static inline void __xc_cpumap_clearall(struct xenctl_cpumap *dstp)
+{
+ size_t nbytes = XC_BITS_TO_BYTES(xc_cpumap_len(dstp));
+ if (nbytes > 1)
+ memset(xc_cpumap_bits(dstp), 0x00, nbytes);
+}
+
+#define xc_cpumap_or(dst, src1, src2) \
+ __xc_cpumap_or(&(dst), &(src1), &(src2))
+extern void __xc_cpumap_or(struct xenctl_cpumap *dstp,
+ struct xenctl_cpumap *src1p, struct xenctl_cpumap *src2p);
+
+#define xc_cpumap_weight(src) __xc_cpumap_weight(&(src))
+extern int __xc_cpumap_weight(struct xenctl_cpumap *srcp);
+
+#define xc_cpumap_snprintf(buf, len, src) \
+ __xc_cpumap_snprintf((buf), (len), &(src))
+extern int __xc_cpumap_snprintf(char *buf, unsigned int len,
+ const struct xenctl_cpumap *srcp);
+
+/***********************************************************************/
+
+static inline int
+xc_cpumap_allocz_bitmap(int xc_handle, struct xenctl_cpumap *map)
+{
+ int nr_cpus;
+ uint8_t *bitmap;
+ xc_physinfo_t pinfo = { 0 };
+
+ if (xc_physinfo(xc_handle, &pinfo))
+ goto failed;
+ nr_cpus = pinfo.max_cpu_id+1;
+
+ if (!(bitmap = malloc(XC_BITS_TO_BYTES(nr_cpus))))
+ goto failed;
+ memset(bitmap, 0x00, XC_BITS_TO_BYTES(nr_cpus));
+
+ map->nr_cpus = nr_cpus;
+ set_xen_guest_handle(map->bitmap, bitmap);
+ return 0;
+failed:
+ return -1;
+}
+
+static inline void
+xc_cpumap_free_bitmap(struct xenctl_cpumap *map)
+{
+ uint8_t *bitmap;
+ get_xen_guest_handle(bitmap, map->bitmap);
+ free(bitmap);
+}
+
+static inline int
+xc_cpumap_lock_pages(struct xenctl_cpumap *map)
+{
+ uint8_t *bitmap;
+ uint32_t nr_bytes = XC_BITS_TO_BYTES(map->nr_cpus);
+ get_xen_guest_handle(bitmap, map->bitmap);
+ if (lock_pages(bitmap, nr_bytes))
+ return -1;
+ return 0;
+}
+
+static inline void
+xc_cpumap_unlock_pages(struct xenctl_cpumap *map)
+{
+ uint8_t *bitmap;
+ uint32_t nr_bytes = XC_BITS_TO_BYTES(map->nr_cpus);
+ get_xen_guest_handle(bitmap, map->bitmap);
+ unlock_pages(bitmap, nr_bytes);
+}
+
+#endif /* __XENCTL_CPUMAP_H */
diff -r d6b7abf0d2d5 -r 6eba62092cb4 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h Wed Mar 31 19:39:58 2010 -0400
+++ b/tools/libxc/xenctrl.h Thu Apr 01 13:06:14 2010 -0400
@@ -309,13 +309,11 @@
int xc_vcpu_setaffinity(int xc_handle,
uint32_t domid,
int vcpu,
- uint64_t *cpumap,
- int cpusize);
+ struct xenctl_cpumap *cpumap);
int xc_vcpu_getaffinity(int xc_handle,
uint32_t domid,
int vcpu,
- uint64_t *cpumap,
- int cpusize);
+ struct xenctl_cpumap *cpumap);
/**
* This function will return information about one or more domains. It is
diff -r d6b7abf0d2d5 -r 6eba62092cb4 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Wed Mar 31 19:39:58 2010 -0400
+++ b/tools/python/xen/lowlevel/xc/xc.c Thu Apr 01 13:06:14 2010 -0400
@@ -18,6 +18,7 @@
#include <arpa/inet.h>
#include "xenctrl.h"
+#include "xc_cpumap.h"
#include <xen/elfnote.h>
#include <xen/tmem.h>
#include "xc_dom.h"
@@ -215,11 +216,8 @@
{
uint32_t dom;
int vcpu = 0, i;
- uint64_t *cpumap;
+ struct xenctl_cpumap cpumap;
PyObject *cpulist = NULL;
- int nr_cpus, size;
- xc_physinfo_t info = {0};
- uint64_t cpumap_size = sizeof(*cpumap);
static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
@@ -227,36 +225,25 @@
&dom, &vcpu, &cpulist) )
return NULL;
- if ( xc_physinfo(self->xc_handle, &info) != 0 )
+ if (xc_cpumap_allocz_bitmap(self->xc_handle, &cpumap))
return pyxc_error_to_exception();
- nr_cpus = info.nr_cpus;
-
- size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
- cpumap = malloc(cpumap_size * size);
- if(cpumap == NULL)
- return pyxc_error_to_exception();
-
if ( (cpulist != NULL) && PyList_Check(cpulist) )
{
- for ( i = 0; i < size; i++)
- {
- cpumap[i] = 0ULL;
- }
for ( i = 0; i < PyList_Size(cpulist); i++ )
{
long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
- cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu % (cpumap_size * 8));
+ xc_cpumap_set_cpu(cpu, cpumap);
}
- }
-
- if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * cpumap_size) != 0 )
+ }
+
+ if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, &cpumap) != 0 )
{
- free(cpumap);
+ xc_cpumap_free_bitmap(&cpumap);
return pyxc_error_to_exception();
}
Py_INCREF(zero);
- free(cpumap);
+ xc_cpumap_free_bitmap(&cpumap);
return zero;
}
@@ -375,10 +362,7 @@
uint32_t dom, vcpu = 0;
xc_vcpuinfo_t info;
int rc, i;
- uint64_t *cpumap;
- int nr_cpus, size;
- xc_physinfo_t pinfo = { 0 };
- uint64_t cpumap_size = sizeof(*cpumap);
+ struct xenctl_cpumap cpumap;
static char *kwd_list[] = { "domid", "vcpu", NULL };
@@ -386,23 +370,15 @@
&dom, &vcpu) )
return NULL;
- if ( xc_physinfo(self->xc_handle, &pinfo) != 0 )
- return pyxc_error_to_exception();
- nr_cpus = pinfo.nr_cpus;
-
- rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
- if ( rc < 0 )
+ if ( xc_cpumap_allocz_bitmap(self->xc_handle, &cpumap) )
return pyxc_error_to_exception();
- size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
- if((cpumap = malloc(cpumap_size * size)) == NULL)
- return pyxc_error_to_exception();
- memset(cpumap, 0, cpumap_size * size);
+ if ((rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info)) < 0)
+ return pyxc_error_to_exception();
- rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, cpumap_size * size);
- if ( rc < 0 )
+ if ((rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap)) < 0)
{
- free(cpumap);
+ xc_cpumap_free_bitmap(&cpumap);
return pyxc_error_to_exception();
}
@@ -413,18 +389,15 @@
"cpu_time", info.cpu_time,
"cpu", info.cpu);
cpulist = PyList_New(0);
- for ( i = 0; i < nr_cpus; i++ )
+ xc_for_each_cpu(i, cpumap)
{
- if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
- PyObject *pyint = PyInt_FromLong(i);
- PyList_Append(cpulist, pyint);
- Py_DECREF(pyint);
- }
- cpumap[i / (cpumap_size * 8)] >>= 1;
+ PyObject *pyint = PyInt_FromLong(i);
+ PyList_Append(cpulist, pyint);
+ Py_DECREF(pyint);
}
PyDict_SetItemString(info_dict, "cpumap", cpulist);
Py_DECREF(cpulist);
- free(cpumap);
+ xc_cpumap_free_bitmap(&cpumap);
return info_dict;
}
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2010-04-04 19:30 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-04-04 19:30 [PATCH 05/11] [XEN] Basic cpumap utils Dulloor
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).