From: Dulloor <dulloor@gmail.com>
To: xen-devel@lists.xensource.com
Subject: [PATCH] libxc bitmap utils and vcpu-affinity
Date: Sun, 21 Mar 2010 23:33:09 -0400 [thread overview]
Message-ID: <940bcfd21003212033t3c3bf074h663337b56dd5954e@mail.gmail.com> (raw)
[-- Attachment #1: Type: text/plain, Size: 575 bytes --]
This patch adds :
* A byte-based cpumask type(xenctl_cpumask) for setting vcpu-affinity
as well as numa-node-affinity, etc in libxc.
* Add common bitmap utils to libxc, which can be used both for
xenctl_cpumask (and with small changes for xenctl_cpumap, if desired),
so that we can do common operations on cpumask easily.
As opposed to xenctl_cpumap, xenctl_cpumask is a static structure
(just 4 bytes larger for 128 cpus), but keeps the interface/code
cleaner. The domctl_interface version keeps the size of xenctl_cpumask
consistent between xen and xen-tools.
-dulloor
[-- Attachment #2: xc-vcpu-affinity.patch --]
[-- Type: text/x-patch, Size: 31760 bytes --]
diff -r 04cb0829d138 tools/libxc/Makefile
--- a/tools/libxc/Makefile Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/libxc/Makefile Sun Mar 21 23:22:26 2010 -0400
@@ -25,6 +25,7 @@
CTRL_SRCS-y += xc_mem_event.c
CTRL_SRCS-y += xc_mem_paging.c
CTRL_SRCS-y += xc_memshr.c
+CTRL_SRCS-y += xc_bitmap.c
CTRL_SRCS-$(CONFIG_X86) += xc_pagetab.c
CTRL_SRCS-$(CONFIG_Linux) += xc_linux.c
CTRL_SRCS-$(CONFIG_SunOS) += xc_solaris.c
diff -r 04cb0829d138 tools/libxc/xc_bitmap.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_bitmap.c Sun Mar 21 23:22:26 2010 -0400
@@ -0,0 +1,250 @@
+#include "xc_bitmap.h"
+#include <stdio.h>
+
+/*
+ * xc_bitmap_find_next_bit is adapted from the definition of generic
+ * find_next_bit * in Linux, with following copyright.
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Adapted for byte-based bitmap by Dulloor (dulloor@gatech.edu)
+ */
+
+/**
+ * __ffs - find first bit in byte.
+ * @byte: The byte to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline int __xc_ffs(uint8_t byte)
+{
+ int num = 0;
+
+ if ((byte & 0xff) == 0) {
+ num += 8;
+ byte >>= 8;
+ }
+ if ((byte & 0xf) == 0) {
+ num += 4;
+ byte >>= 4;
+ }
+ if ((byte & 0x3) == 0) {
+ num += 2;
+ byte >>= 2;
+ }
+ if ((byte & 0x1) == 0)
+ num += 1;
+ return num;
+}
+
+int
+xc_bitmap_find_next_bit( const uint8_t *addr, uint32_t size, uint32_t offset)
+{
+ const uint8_t *p;
+ uint32_t result;
+ uint8_t tmp;
+
+ if (offset >= size)
+ return size;
+
+ p = addr + XC_BITMAP_BYTE(offset);
+ result = offset & ~(XC_BITS_PER_BYTE-1);
+
+ size -= result;
+ offset %= XC_BITS_PER_BYTE;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (0xff << offset);
+ if (size < XC_BITS_PER_BYTE)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= XC_BITS_PER_BYTE;
+ result += XC_BITS_PER_BYTE;
+ }
+ while (size & ~(XC_BITS_PER_BYTE-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += XC_BITS_PER_BYTE;
+ size -= XC_BITS_PER_BYTE;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= (0xff >> (XC_BITS_PER_BYTE - size));
+ if (!tmp)
+ return result+size;
+found_middle:
+ return result + __xc_ffs(tmp);
+}
+
+void __xc_bitmap_and(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+ int k;
+ int nr = XC_BITS_TO_BYTES(nbits);
+
+ for (k=0; k<nr; k++)
+ dp[k] = s1p[k] & s2p[k];
+}
+
+void __xc_bitmap_or(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+ int k;
+ int nr = XC_BITS_TO_BYTES(nbits);
+
+ for (k=0; k<nr; k++)
+ dp[k] = s1p[k] | s2p[k];
+}
+
+void __xc_bitmap_xor(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+ int k;
+ int nr = XC_BITS_TO_BYTES(nbits);
+
+ for (k=0; k<nr; k++)
+ dp[k] = s1p[k] ^ s2p[k];
+}
+
+void __xc_bitmap_andnot(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+ int k;
+ int nr = XC_BITS_TO_BYTES(nbits);
+
+ for (k=0; k<nr; k++)
+ dp[k] = s1p[k] & ~s2p[k];
+}
+
+void __xc_bitmap_complement(uint8_t *dp, uint8_t *sp, int nbits)
+{
+ int k, lim = nbits/XC_BITS_PER_BYTE;
+ for (k=0; k<lim; k++)
+ dp[k] = ~sp[k];
+
+ if (nbits % XC_BITS_PER_BYTE)
+ dp[k] = ~sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits);
+}
+
+int __xc_bitmap_equal(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+ int k, lim = nbits/XC_BITS_PER_BYTE;
+ for (k=0; k<lim; k++)
+ if (s1p[k] != s2p[k])
+ return 0;
+
+ if (nbits % XC_BITS_PER_BYTE)
+ if ((s1p[k] ^ s2p[k]) & XC_BITMAP_LAST_BYTE_MASK(nbits))
+ return 0;
+
+ return 1;
+}
+
+int __xc_bitmap_intersects(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+ int k, lim = nbits/XC_BITS_PER_BYTE;
+ for (k=0; k<lim; k++)
+ if (s1p[k] & s2p[k])
+ return 1;
+
+ if (nbits % XC_BITS_PER_BYTE)
+ if ((s1p[k] & s2p[k]) & XC_BITMAP_LAST_BYTE_MASK(nbits))
+ return 1;
+
+ return 0;
+}
+
+int __xc_bitmap_subset(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+ int k, lim = nbits/XC_BITS_PER_BYTE;
+ for (k=0; k<lim; k++)
+ if (s1p[k] & ~s2p[k])
+ return 0;
+
+ if (nbits % XC_BITS_PER_BYTE)
+ if ((s1p[k] & ~s2p[k]) & XC_BITMAP_LAST_BYTE_MASK(nbits))
+ return 0;
+
+ return 1;
+}
+
+int __xc_bitmap_empty(uint8_t *sp, int nbits)
+{
+ int k, lim = nbits/XC_BITS_PER_BYTE;
+ for (k=0; k<lim; k++)
+ if (sp[k])
+ return 0;
+
+ if (nbits % XC_BITS_PER_BYTE)
+ if (sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits))
+ return 0;
+
+ return 1;
+}
+
+int __xc_bitmap_full(uint8_t *sp, int nbits)
+{
+ int k, lim = nbits/XC_BITS_PER_BYTE;
+ for (k=0; k<lim; k++)
+ if (~sp[k] & XC_BITMAP_BYTE_MASK)
+ return 0;
+
+ if (nbits % XC_BITS_PER_BYTE)
+ if (~sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits))
+ return 0;
+
+ return 1;
+}
+
+static inline uint8_t hweight8(uint8_t w)
+{
+ uint8_t res = (w & 0x55) + ((w >> 1) & 0x55);
+ res = (res & 0x33) + ((res >> 2) & 0x33);
+ return (res & 0x0F) + ((res >> 4) & 0x0F);
+}
+
+int __xc_bitmap_weight(const uint8_t *sp, int nbits)
+{
+ int k, w = 0, lim = nbits/XC_BITS_PER_BYTE;
+
+ for (k=0; k <lim; k++)
+ w += hweight8(sp[k]);
+
+ if (nbits % XC_BITS_PER_BYTE)
+ w += hweight8(sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+ return w;
+}
+
+/* xenctl_cpumask print functions */
+#define CHUNKSZ 8
+#define roundup_power2(val,modulus) (((val) + (modulus) - 1) & ~((modulus) - 1))
+
+int xc_bitmap_snprintf(char *buf, unsigned int buflen,
+ const uint8_t *maskp, int nmaskbits)
+{
+ int i, word, bit, len = 0;
+ unsigned long val;
+ const char *sep = "";
+ int chunksz;
+ uint8_t chunkmask;
+
+ chunksz = nmaskbits & (CHUNKSZ - 1);
+ if (chunksz == 0)
+ chunksz = CHUNKSZ;
+
+ i = roundup_power2(nmaskbits, CHUNKSZ) - CHUNKSZ;
+ for (; i >= 0; i -= CHUNKSZ) {
+ chunkmask = ((1ULL << chunksz) - 1);
+ word = i / XC_BITS_PER_BYTE;
+ bit = i % XC_BITS_PER_BYTE;
+ val = (maskp[word] >> bit) & chunkmask;
+ len += snprintf(buf+len, buflen-len, "%s%0*lx", sep,
+ (chunksz+3)/4, val);
+ chunksz = CHUNKSZ;
+ sep = ",";
+ }
+ return len;
+}
+
+
diff -r 04cb0829d138 tools/libxc/xc_bitmap.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_bitmap.h Sun Mar 21 23:22:26 2010 -0400
@@ -0,0 +1,193 @@
+#ifndef __XENCTL_BITMAP_H
+#define __XENCTL_BITMAP_H
+
+#include <stdint.h>
+#include <string.h>
+
+#define XC_BITS_PER_BYTE 8
+#define XC_BITS_TO_BYTES(bits) \
+ (((bits)+XC_BITS_PER_BYTE-1)/XC_BITS_PER_BYTE)
+#define XC_BITMAP_BIT(nr) (1 << (nr))
+#define XC_BITMAP_BIT_MASK(nr) (1 << ((nr) % XC_BITS_PER_BYTE))
+#define XC_BITMAP_BYTE(nr) ((nr) / XC_BITS_PER_BYTE)
+
+#define XC_BITMAP_BYTE_MASK (0xff)
+#define XC_BITMAP_LAST_BYTE_MASK(nbits) \
+ (((nbits) % XC_BITS_PER_BYTE) ? \
+ ((1<<((nbits) % XC_BITS_PER_BYTE))-1) : \
+ XC_BITMAP_BYTE_MASK)
+
+#define xc_bitmap_find_first_bit(addr, size) \
+ xc_bitmap_find_next_bit(addr, size, 0)
+extern int
+xc_bitmap_find_next_bit(const uint8_t *addr, uint32_t size, uint32_t offset);
+extern int
+xc_bitmap_find_next_bit(const uint8_t *addr, uint32_t size, uint32_t offset);
+
+#define xc_bitmap_find_first_zero_bit(addr, size) \
+ xc_bitmap_find_next_zero_bit(addr, size, 0)
+extern int xc_bitmap_find_next_zero_bit(
+ const uint8_t *addr, uint32_t size, uint32_t offset);
+
+extern void __xc_bitmap_and(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void __xc_bitmap_or(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void __xc_bitmap_xor(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void
+__xc_bitmap_andnot(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void __xc_bitmap_complement(uint8_t *dp, uint8_t *sp, int nbits);
+extern int __xc_bitmap_equal(uint8_t *s1p, uint8_t *s2p, int nbits);
+extern int __xc_bitmap_intersects(uint8_t *s1p, uint8_t *s2p, int nbits);
+extern int __xc_bitmap_subset(uint8_t *s1p, uint8_t *s2p, int nbits);
+extern int __xc_bitmap_empty(uint8_t *sp, int nbits);
+extern int __xc_bitmap_full(uint8_t *sp, int nbits);
+extern int __xc_bitmap_weight(const uint8_t *sp, int nbits);
+
+extern int xc_bitmap_snprintf(char *buf, unsigned int buflen,
+ const uint8_t *maskp, int nmaskbits);
+
+
+static inline void xc_bitmap_set_bit(int nr, volatile uint8_t *addr)
+{
+ uint8_t mask = XC_BITMAP_BIT_MASK(nr);
+ uint8_t *p = ((uint8_t *)addr) + XC_BITMAP_BYTE(nr);
+ *p |= mask;
+}
+
+static inline void xc_bitmap_clear_bit(int nr, volatile uint8_t *addr)
+{
+ uint8_t mask = XC_BITMAP_BIT_MASK(nr);
+ uint8_t *p = ((uint8_t *)addr) + XC_BITMAP_BYTE(nr);
+ *p &= ~mask;
+}
+
+static inline int xc_bitmap_test_bit(int nr, volatile uint8_t *addr)
+{
+ uint8_t mask = XC_BITMAP_BIT_MASK(nr);
+ uint8_t *p = ((uint8_t *)addr) + XC_BITMAP_BYTE(nr);
+ return *p & mask;
+}
+
+static inline void xc_bitmap_fill(uint8_t *dp, int nbits)
+{
+ size_t nbytes = XC_BITS_TO_BYTES(nbits);
+ if (nbytes > 1)
+ memset(dp, 0xff, nbytes-1);
+ dp[nbytes-1] = XC_BITMAP_LAST_BYTE_MASK(nbits);
+}
+
+static inline void xc_bitmap_zero(uint8_t *dp, int nbits)
+{
+ size_t nbytes = XC_BITS_TO_BYTES(nbits);
+ if (nbytes > 1)
+ memset(dp, 0x00, nbytes-1);
+ dp[nbytes-1] = 0;
+}
+
+
+static inline void
+xc_bitmap_and(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+ if (nbits <= XC_BITS_PER_BYTE)
+ *dp = *s1p & *s2p;
+ else
+ __xc_bitmap_and(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_or(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+ if (nbits <= XC_BITS_PER_BYTE)
+ *dp = *s1p | *s2p;
+ else
+ __xc_bitmap_or(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_xor(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+ if (nbits <= XC_BITS_PER_BYTE)
+ *dp = *s1p ^ *s2p;
+ else
+ __xc_bitmap_xor(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_andnot(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+ if (nbits <= XC_BITS_PER_BYTE)
+ *dp = *s1p & ~(*s2p);
+ else
+ __xc_bitmap_andnot(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_complement(uint8_t *dp, uint8_t *sp, int nbits)
+{
+ if (nbits <= XC_BITS_PER_BYTE)
+ *dp = ~(*sp) & XC_BITMAP_LAST_BYTE_MASK(nbits);
+ else
+ __xc_bitmap_complement(dp, sp, nbits);
+}
+
+static inline int
+xc_bitmap_equal(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+ if (nbits <= XC_BITS_PER_BYTE)
+ return !((*s1p ^ *s2p) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+ return __xc_bitmap_equal(s1p, s2p, nbits);
+}
+
+static inline int
+xc_bitmap_intersects(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+ if (nbits <= XC_BITS_PER_BYTE)
+ return ((*s1p & *s2p) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+ return __xc_bitmap_intersects(s1p, s2p, nbits);
+}
+
+static inline int
+xc_bitmap_subset(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+ if (nbits <= XC_BITS_PER_BYTE)
+ return !((*s1p & ~(*s2p)) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+ return __xc_bitmap_subset(s1p, s2p, nbits);
+}
+
+static inline int
+xc_bitmap_empty(uint8_t *sp, int nbits)
+{
+ if (nbits <= XC_BITS_PER_BYTE)
+ return ! (*sp & XC_BITMAP_LAST_BYTE_MASK(nbits));
+ else
+ return __xc_bitmap_empty(sp, nbits);
+}
+
+static inline int
+xc_bitmap_full(uint8_t *sp, int nbits)
+{
+ if (nbits <= XC_BITS_PER_BYTE)
+ return ! (~(*sp) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+ else
+ return __xc_bitmap_full(sp, nbits);
+}
+
+static inline uint32_t
+xc_bitmap_weight(const uint8_t *sp, int nbits)
+{
+ return __xc_bitmap_weight(sp, nbits);
+}
+
+
+static inline void
+xc_bitmap_copy(uint8_t *dp, const uint8_t *sp, int nbits)
+{
+ if (nbits > XC_BITS_PER_BYTE)
+ *dp = *sp;
+ else
+ memcpy(dp, sp, XC_BITS_TO_BYTES(nbits));
+}
+
+#endif
diff -r 04cb0829d138 tools/libxc/xc_cpumask.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_cpumask.h Sun Mar 21 23:22:26 2010 -0400
@@ -0,0 +1,211 @@
+#ifndef __XENCTL_CPUMASK_H
+#define __XENCTL_CPUMASK_H
+
+#include <xen/domctl.h>
+#include "xc_bitmap.h"
+
+/* Number of cpus set in the bitmap */
+#define xc_cpumask_num_cpus(mask) xc_cpumask_weight(mask)
+
+/**
+ * xc_cpumask_first - get the first cpu in a xenctl_cpumask
+ * @srcp: the xenctl_cpumask pointer
+ *
+ * Returns >= xc_cpumask_len(srcp) if no cpus set.
+ */
+static inline unsigned int
+xc_cpumask_first(struct xenctl_cpumask *srcp)
+{
+ return xc_bitmap_find_first_bit(xc_cpumask_bits(srcp),
+ xc_cpumask_len(srcp));
+}
+
+/**
+ * xc_cpumask_next - get the next cpu in a xenctl_cpumask
+ * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @srcp: the xenctl_cpumask pointer
+ *
+ * Returns >= xc_cpumask_len(srcp) if no further cpus set.
+ */
+static inline uint32_t
+xc_cpumask_next(int n, struct xenctl_cpumask *srcp)
+{
+ return xc_bitmap_find_next_bit(xc_cpumask_bits(srcp),
+ xc_cpumask_len(srcp), n+1);
+}
+
+#if 0
+static inline uint32_t
+xc_cpumask_next_zero(int n, struct xenctl_cpumask *srcp)
+{
+ return xc_bitmap_find_next_zero_bit(xc_cpumask_bits(srcp),
+ xc_cpumask_len(srcp), n+1);
+}
+#endif
+
+/**
+ * xc_for_each_cpu - iterate over every cpu in a mask
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the xenctl_cpumask pointer
+ *
+ * After the loop, cpu is >= xc_cpumask_len(mask)
+ */
+#define xc_for_each_cpu(cpu, mask) \
+ __xc_for_each_cpu(cpu, &(mask))
+
+#define __xc_for_each_cpu(cpu, mask) \
+ for ((cpu) = -1; \
+ (cpu) = xc_cpumask_next((cpu), (mask)), \
+ (cpu) < xc_cpumask_len(mask);)
+
+
+#define xc_cpumask_equal(src1, src2) __xc_cpumask_equal(&(src1), &(src2))
+static inline int
+__xc_cpumask_equal(struct xenctl_cpumask *s1p, struct xenctl_cpumask *s2p)
+{
+ return xc_bitmap_equal(xc_cpumask_bits(s1p), xc_cpumask_bits(s2p),
+ xc_cpumask_len(s1p));
+}
+
+#define xc_cpumask_set_cpu(cpu, dst) __xc_cpumask_set_cpu(cpu, &(dst))
+static inline void __xc_cpumask_set_cpu(int cpu, struct xenctl_cpumask *dstp)
+{
+ xc_bitmap_set_bit(cpu, xc_cpumask_bits(dstp));
+}
+
+#define xc_cpumask_clear_cpu(cpu, dst) __xc_cpumask_clear_cpu(cpu, &(dst))
+static inline void __xc_cpumask_clear_cpu(int cpu, struct xenctl_cpumask *dstp)
+{
+ xc_bitmap_clear_bit(cpu, xc_cpumask_bits(dstp));
+}
+
+#define xc_cpumask_test_cpu(cpu, dst) __xc_cpumask_test_cpu(cpu, &(dst))
+static inline int __xc_cpumask_test_cpu(int cpu, struct xenctl_cpumask *dstp)
+{
+ return xc_bitmap_test_bit(cpu, xc_cpumask_bits(dstp));
+}
+
+
+#define xc_cpumask_setall(dst) __xc_cpumask_setall(&(dst))
+static inline void __xc_cpumask_setall(struct xenctl_cpumask *dstp)
+{
+ xc_bitmap_fill(xc_cpumask_bits(dstp), xc_cpumask_len(dstp));
+}
+
+#define xc_cpumask_clearall(dst) __xc_cpumask_clearall(&(dst))
+static inline void __xc_cpumask_clearall(struct xenctl_cpumask *dstp)
+{
+ xc_bitmap_zero(xc_cpumask_bits(dstp), xc_cpumask_len(dstp));
+}
+
+#define xc_cpumask_and(dst, src1, src2) \
+ __xc_cpumask_and(&(dst), &(src1), &(src2))
+static inline void __xc_cpumask_and(struct xenctl_cpumask *dstp,
+ struct xenctl_cpumask *src1p, struct xenctl_cpumask *src2p)
+{
+ xc_bitmap_and(xc_cpumask_bits(dstp), xc_cpumask_bits(src1p),
+ xc_cpumask_bits(src2p), xc_cpumask_len(dstp));
+}
+
+#define xc_cpumask_or(dst, src1, src2) \
+ __xc_cpumask_or(&(dst), &(src1), &(src2))
+static inline void __xc_cpumask_or(struct xenctl_cpumask *dstp,
+ struct xenctl_cpumask *src1p, struct xenctl_cpumask *src2p)
+{
+ xc_bitmap_or(xc_cpumask_bits(dstp), xc_cpumask_bits(src1p),
+ xc_cpumask_bits(src2p), xc_cpumask_len(dstp));
+}
+
+#define xc_cpumask_xor(dst, src1, src2) \
+ __xc_cpumask_xor(&(dst), &(src1), &(src2))
+static inline void __xc_cpumask_xor(struct xenctl_cpumask *dstp,
+ struct xenctl_cpumask *src1p, struct xenctl_cpumask *src2p)
+{
+ xc_bitmap_xor(xc_cpumask_bits(dstp), xc_cpumask_bits(src1p),
+ xc_cpumask_bits(src2p), xc_cpumask_len(dstp));
+}
+
+#define xc_cpumask_andnot(dst, src1, src2) \
+ __xc_cpumask_andnot(&(dst), &(src1), &(src2))
+static inline void xenctl_cpumask_andnot(struct xenctl_cpumask *dstp,
+ struct xenctl_cpumask *src1p, struct xenctl_cpumask *src2p)
+{
+ xc_bitmap_andnot(xc_cpumask_bits(dstp), xc_cpumask_bits(src1p),
+ xc_cpumask_bits(src2p), xc_cpumask_len(dstp));
+}
+
+#define xc_cpumask_complement(dst, src) \
+ __xc_cpumask_complement(&(dst), &(src))
+static inline void __xc_cpumask_complement(struct xenctl_cpumask *dstp,
+ struct xenctl_cpumask *srcp)
+{
+ xc_bitmap_complement(xc_cpumask_bits(dstp), xc_cpumask_bits(srcp),
+ xc_cpumask_len(dstp));
+}
+
+#define xc_cpumask_intersects(src1, src2) \
+ __xc_cpumask_intersects(&(src1), &(src2))
+static inline int __xc_cpumask_intersects(struct xenctl_cpumask *src1p,
+ struct xenctl_cpumask *src2p)
+{
+ return xc_bitmap_intersects(xc_cpumask_bits(src1p), xc_cpumask_bits(src2p),
+ xc_cpumask_len(src1p));
+}
+
+#define xc_cpumask_subset(src1, src2) \
+ __xc_cpumask_subset(&(src1), &(src2))
+static inline int __xc_cpumask_subset(struct xenctl_cpumask *src1p,
+ struct xenctl_cpumask *src2p)
+{
+ return xc_bitmap_subset(xc_cpumask_bits(src1p), xc_cpumask_bits(src2p),
+ xc_cpumask_len(src1p));
+}
+
+#define xc_cpumask_empty(src) __xc_cpumask_empty(&(src))
+static inline int __xc_cpumask_empty(struct xenctl_cpumask *srcp)
+{
+ return xc_bitmap_empty(xc_cpumask_bits(srcp), xc_cpumask_len(srcp));
+}
+
+#define xc_cpumask_full(src) __xc_cpumask_full(&(src))
+static inline int __xc_cpumask_full(struct xenctl_cpumask *srcp)
+{
+ return xc_bitmap_full(xc_cpumask_bits(srcp), xc_cpumask_len(srcp));
+}
+
+#define xc_cpumask_weight(src) __xc_cpumask_weight(&(src))
+static inline uint32_t __xc_cpumask_weight(struct xenctl_cpumask *srcp)
+{
+ return xc_bitmap_weight(xc_cpumask_bits(srcp), xc_cpumask_len(srcp));
+}
+
+#define xc_cpumask_copy(dst, src) __xc_cpumask_copy(&(dst), &(src))
+static inline void __xc_cpumask_copy(struct xenctl_cpumask *dstp,
+ struct xenctl_cpumask *srcp)
+{
+ xc_bitmap_copy(xc_cpumask_bits(dstp), xc_cpumask_bits(srcp),
+ xc_cpumask_len(dstp));
+}
+
+#define XC_CPU_MASK_LAST_BYTE XC_BITMAP_LAST_BYTE_MASK(XENCTL_NR_CPUS)
+
+#define XC_CPUMASK_ALL \
+/*(xenctl_cpumask)*/ { { \
+ [0 ... XC_BITS_TO_BYTES(XENCTL_NR_CPUS)-2] = 0xff, \
+ [XC_BITS_TO_BYTES(XENCTL_NR_CPUS)-1] = XC_CPU_MASK_LAST_BYTE \
+} }
+
+#define XC_CPUMASK_NONE \
+/*(xenctl_cpumask)*/ { { \
+ [0 ... XC_BITS_TO_BYTES(XENCTL_NR_CPUS)-1] = 0 \
+} }
+
+#define xc_cpumask_snprintf(buf, len, src) \
+ __xc_cpumask_snprintf((buf), (len), &(src), XENCTL_NR_CPUS)
+static inline int __xc_cpumask_snprintf(char *buf, int len,
+ const struct xenctl_cpumask *srcp, int nbits)
+{
+ return xc_bitmap_snprintf(buf, len, srcp->bits, nbits);
+}
+
+#endif /* __XENCTL_CPUMASK_H */
diff -r 04cb0829d138 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/libxc/xc_domain.c Sun Mar 21 23:22:26 2010 -0400
@@ -8,6 +8,7 @@
#include "xc_private.h"
#include "xg_save_restore.h"
+#include "xc_cpumask.h"
#include <xen/memory.h>
#include <xen/hvm/hvm_op.h>
@@ -98,39 +99,18 @@
int xc_vcpu_setaffinity(int xc_handle,
uint32_t domid,
int vcpu,
- uint64_t *cpumap, int cpusize)
+ struct xenctl_cpumask *cpumask)
{
DECLARE_DOMCTL;
int ret = -1;
- uint8_t *local = malloc(cpusize);
- if(local == NULL)
- {
- PERROR("Could not alloc memory for Xen hypercall");
- goto out;
- }
domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
domctl.domain = (domid_t)domid;
domctl.u.vcpuaffinity.vcpu = vcpu;
-
- bitmap_64_to_byte(local, cpumap, cpusize * 8);
-
- set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
-
- domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
+ domctl.u.vcpuaffinity.cpumask = *cpumask;
- if ( lock_pages(local, cpusize) != 0 )
- {
- PERROR("Could not lock memory for Xen hypercall");
- goto out;
- }
-
ret = do_domctl(xc_handle, &domctl);
- unlock_pages(local, cpusize);
-
- out:
- free(local);
return ret;
}
@@ -138,39 +118,19 @@
int xc_vcpu_getaffinity(int xc_handle,
uint32_t domid,
int vcpu,
- uint64_t *cpumap,
- int cpusize)
+ struct xenctl_cpumask *cpumask)
{
DECLARE_DOMCTL;
int ret = -1;
- uint8_t * local = malloc(cpusize);
-
- if(local == NULL)
- {
- PERROR("Could not alloc memory for Xen hypercall");
- goto out;
- }
domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
domctl.domain = (domid_t)domid;
domctl.u.vcpuaffinity.vcpu = vcpu;
-
-
- set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
- domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
-
- if ( lock_pages(local, sizeof(local)) != 0 )
- {
- PERROR("Could not lock memory for Xen hypercall");
- goto out;
- }
+ xc_cpumask_clearall(domctl.u.vcpuaffinity.cpumask);
ret = do_domctl(xc_handle, &domctl);
- unlock_pages(local, sizeof (local));
- bitmap_byte_to_64(cpumap, local, cpusize * 8);
-out:
- free(local);
+ *cpumask = domctl.u.vcpuaffinity.cpumask;
return ret;
}
diff -r 04cb0829d138 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/libxc/xenctrl.h Sun Mar 21 23:22:26 2010 -0400
@@ -309,13 +309,11 @@
int xc_vcpu_setaffinity(int xc_handle,
uint32_t domid,
int vcpu,
- uint64_t *cpumap,
- int cpusize);
+ struct xenctl_cpumask *cpumask);
int xc_vcpu_getaffinity(int xc_handle,
uint32_t domid,
int vcpu,
- uint64_t *cpumap,
- int cpusize);
+ struct xenctl_cpumask *cpumask);
/**
* This function will return information about one or more domains. It is
diff -r 04cb0829d138 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/python/xen/lowlevel/xc/xc.c Sun Mar 21 23:22:26 2010 -0400
@@ -23,6 +23,7 @@
#include "xc_dom.h"
#include <xen/hvm/hvm_info_table.h>
#include <xen/hvm/params.h>
+#include "xc_cpumask.h"
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
@@ -215,12 +216,8 @@
{
uint32_t dom;
int vcpu = 0, i;
- uint64_t *cpumap;
PyObject *cpulist = NULL;
- int nr_cpus, size;
- xc_physinfo_t info;
- xc_cpu_to_node_t map[1];
- uint64_t cpumap_size = sizeof(cpumap);
+ struct xenctl_cpumask cpumask = XC_CPUMASK_NONE;
static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
@@ -229,40 +226,19 @@
&dom, &vcpu, &cpulist) )
return NULL;
- set_xen_guest_handle(info.cpu_to_node, map);
- info.max_cpu_id = 1;
- if ( xc_physinfo(self->xc_handle, &info) != 0 )
- return pyxc_error_to_exception();
-
- nr_cpus = info.nr_cpus;
-
- size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
- cpumap = malloc(cpumap_size * size);
- if(cpumap == NULL)
- return pyxc_error_to_exception();
-
-
if ( (cpulist != NULL) && PyList_Check(cpulist) )
{
- for ( i = 0; i < size; i++)
- {
- cpumap[i] = 0ULL;
- }
for ( i = 0; i < PyList_Size(cpulist); i++ )
{
long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
- *(cpumap + cpu / (cpumap_size * 8)) |= (uint64_t)1 << (cpu % (cpumap_size * 8));
+ xc_cpumask_set_cpu(cpu, cpumask);
}
}
- if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * cpumap_size) != 0 )
- {
- free(cpumap);
+ if (xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, &cpumask))
return pyxc_error_to_exception();
- }
Py_INCREF(zero);
- free(cpumap);
return zero;
}
@@ -377,15 +353,10 @@
PyObject *kwds)
{
PyObject *info_dict, *cpulist;
-
uint32_t dom, vcpu = 0;
xc_vcpuinfo_t info;
int rc, i;
- uint64_t *cpumap;
- int nr_cpus, size;
- xc_physinfo_t pinfo = { 0 };
- xc_cpu_to_node_t map[1];
- uint64_t cpumap_size = sizeof(cpumap);
+ struct xenctl_cpumask cpumask = XC_CPUMASK_NONE;
static char *kwd_list[] = { "domid", "vcpu", NULL };
@@ -393,25 +364,13 @@
&dom, &vcpu) )
return NULL;
- set_xen_guest_handle(pinfo.cpu_to_node, map);
- pinfo.max_cpu_id = 1;
- if ( xc_physinfo(self->xc_handle, &pinfo) != 0 )
- return pyxc_error_to_exception();
- nr_cpus = pinfo.nr_cpus;
rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
if ( rc < 0 )
return pyxc_error_to_exception();
- size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
- if((cpumap = malloc(cpumap_size * size)) == NULL)
- return pyxc_error_to_exception();
-
- rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, cpumap_size * size);
+ rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumask);
if ( rc < 0 )
- {
- free(cpumap);
return pyxc_error_to_exception();
- }
info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}",
"online", info.online,
@@ -421,18 +380,14 @@
"cpu", info.cpu);
cpulist = PyList_New(0);
- for ( i = 0; i < size * cpumap_size * 8; i++ )
+ xc_for_each_cpu(i, cpumask)
{
- if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
- PyObject *pyint = PyInt_FromLong(i);
- PyList_Append(cpulist, pyint);
- Py_DECREF(pyint);
- }
- *(cpumap + i / (cpumap_size * 8)) >>= 1;
+ PyObject *pyint = PyInt_FromLong(i);
+ PyList_Append(cpulist, pyint);
+ Py_DECREF(pyint);
}
PyDict_SetItemString(info_dict, "cpumap", cpulist);
Py_DECREF(cpulist);
- free(cpumap);
return info_dict;
}
diff -r 04cb0829d138 xen/common/domctl.c
--- a/xen/common/domctl.c Wed Mar 17 14:10:43 2010 +0000
+++ b/xen/common/domctl.c Sun Mar 21 23:22:26 2010 -0400
@@ -76,6 +76,36 @@
bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS);
}
+void cpumask_to_xenctl_cpumask(
+ struct xenctl_cpumask *xenctl_cpumask, cpumask_t *cpumask)
+{
+ unsigned int nr_cpus;
+ uint8_t *bytemap;
+
+ /* caller must zero out the cpumask */
+ bytemap = xc_cpumask_bits(xenctl_cpumask);
+ nr_cpus =
+ min_t(unsigned int, xc_cpumask_len(xenctl_cpumask), NR_CPUS);
+
+ bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), nr_cpus);
+}
+
+void xenctl_cpumask_to_cpumask(
+ cpumask_t *cpumask, struct xenctl_cpumask *xenctl_cpumask)
+{
+ unsigned int nr_cpus;
+ uint8_t *bytemap;
+
+ bytemap = xc_cpumask_bits(xenctl_cpumask);
+
+ nr_cpus =
+ min_t(unsigned int, xc_cpumask_len(xenctl_cpumask), NR_CPUS);
+
+ bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, nr_cpus);
+
+ cpus_and(*cpumask, *cpumask, cpu_online_map);
+}
+
static inline int is_free_domid(domid_t dom)
{
struct domain *d;
@@ -574,15 +604,18 @@
if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
{
- xenctl_cpumap_to_cpumask(
- &new_affinity, &op->u.vcpuaffinity.cpumap);
+ xenctl_cpumask_to_cpumask(
+ &new_affinity, &op->u.vcpuaffinity.cpumask);
ret = vcpu_set_affinity(v, &new_affinity);
}
else
{
- cpumask_to_xenctl_cpumap(
- &op->u.vcpuaffinity.cpumap, &v->cpu_affinity);
- ret = 0;
+ cpumask_to_xenctl_cpumask(
+ &op->u.vcpuaffinity.cpumask, &v->cpu_affinity);
+ if ( copy_to_guest(u_domctl, op, 1) )
+ ret = -EFAULT;
+ else
+ ret = 0;
}
vcpuaffinity_out:
diff -r 04cb0829d138 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h Wed Mar 17 14:10:43 2010 +0000
+++ b/xen/include/public/domctl.h Sun Mar 21 23:22:26 2010 -0400
@@ -35,11 +35,32 @@
#include "xen.h"
#include "grant_table.h"
-#define XEN_DOMCTL_INTERFACE_VERSION 0x00000006
+#define XEN_DOMCTL_INTERFACE_VERSION 0x00000007
+
+#define XENCTL_NR_CPUS 128
+#define XENCTL_BITS_PER_BYTE 8
+
+#define XENCTL_BITS_TO_BYTES(bits) \
+ (((bits)+XENCTL_BITS_PER_BYTE-1)/XENCTL_BITS_PER_BYTE)
+#define XENCTL_DECLARE_BITMAP(name,bits) \
+ uint8_t name[XENCTL_BITS_TO_BYTES(bits)]
+
+/* xenctl_cpumask :
+ * This is a static structure as opposed to xenctl_cpumap.
+ * We don't pass any nr_cpus parameter, but XENCTL_NR_CPUS is kept consistent
+ * between xen and xen-tools through interface versions.
+ * However, XENCTL_NR_CPUS and NR_CPUS should be equal.
+ */
+
+#define xc_cpumask_bits(maskp) ((maskp)->bits)
+#define xc_cpumask_len(maskp) (XENCTL_NR_CPUS)
+struct xenctl_cpumask {
+ XENCTL_DECLARE_BITMAP(bits, XENCTL_NR_CPUS);
+};
struct xenctl_cpumap {
+ uint32_t nr_cpus;
XEN_GUEST_HANDLE_64(uint8) bitmap;
- uint32_t nr_cpus;
};
/*
@@ -285,7 +306,7 @@
/* XEN_DOMCTL_getvcpuaffinity */
struct xen_domctl_vcpuaffinity {
uint32_t vcpu; /* IN */
- struct xenctl_cpumap cpumap; /* IN/OUT */
+ struct xenctl_cpumask cpumask; /* IN/OUT */
};
typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
diff -r 04cb0829d138 xen/include/xen/cpumask.h
--- a/xen/include/xen/cpumask.h Wed Mar 17 14:10:43 2010 +0000
+++ b/xen/include/xen/cpumask.h Sun Mar 21 23:22:26 2010 -0400
@@ -425,8 +425,15 @@
/* Copy to/from cpumap provided by control tools. */
struct xenctl_cpumap;
void cpumask_to_xenctl_cpumap(
- struct xenctl_cpumap *enctl_cpumap, cpumask_t *cpumask);
+ struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask);
void xenctl_cpumap_to_cpumask(
cpumask_t *cpumask, struct xenctl_cpumap *enctl_cpumap);
+/* Copy to/from cpumask used by control tools. */
+struct xenctl_cpumask;
+void cpumask_to_xenctl_cpumask(
+ struct xenctl_cpumask *xenctl_cpumask, cpumask_t *cpumask);
+void xenctl_cpumask_to_cpumask(
+ cpumask_t *cpumask, struct xenctl_cpumask *xenctl_cpumask);
+
#endif /* __XEN_CPUMASK_H */
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
next reply other threads:[~2010-03-22 3:33 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-03-22 3:33 Dulloor [this message]
2010-03-22 7:30 ` [PATCH] libxc bitmap utils and vcpu-affinity Keir Fraser
2010-03-22 17:44 ` Dulloor
2010-03-23 10:10 ` Jan Beulich
2010-03-23 11:05 ` Keir Fraser
2010-03-23 16:40 ` Dulloor
2010-03-23 16:41 ` Dulloor
2010-03-23 16:55 ` Dulloor
2010-03-30 14:42 ` Fwd: " Dulloor
2010-03-30 15:16 ` Keir Fraser
2010-03-30 16:05 ` Dulloor
2010-03-30 16:27 ` Keir Fraser
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=940bcfd21003212033t3c3bf074h663337b56dd5954e@mail.gmail.com \
--to=dulloor@gmail.com \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).