Signed-off-by: juergen.gross@ts.fujitsu.com diff -r fadf63ab49e7 tools/python/xen/lowlevel/xc/xc.c --- a/tools/python/xen/lowlevel/xc/xc.c Mon Apr 19 17:57:28 2010 +0100 +++ b/tools/python/xen/lowlevel/xc/xc.c Tue Apr 20 11:10:40 2010 +0200 @@ -97,17 +97,18 @@ static PyObject *pyxc_domain_create(XcOb PyObject *args, PyObject *kwds) { - uint32_t dom = 0, ssidref = 0, flags = 0, target = 0; + uint32_t dom = 0, ssidref = 0, flags = 0, target = 0, cpupool = 0; int ret, i; PyObject *pyhandle = NULL; xen_domain_handle_t handle = { 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef }; - static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", "target", NULL }; + static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", "target", "cpupool", NULL }; - if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOii", kwd_list, - &dom, &ssidref, &pyhandle, &flags, &target)) + if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOiii", kwd_list, &dom, + &ssidref, &pyhandle, &flags, &target, + &cpupool)) return NULL; if ( pyhandle != NULL ) { @@ -124,8 +125,9 @@ static PyObject *pyxc_domain_create(XcOb } } + flags |= XEN_DOMCTL_CDF_pool; if ( (ret = xc_domain_create(self->xc_handle, ssidref, - handle, flags, &dom)) < 0 ) + handle, flags, &dom, cpupool)) < 0 ) return pyxc_error_to_exception(); if ( target ) @@ -329,7 +331,7 @@ static PyObject *pyxc_domain_getinfo(XcO { info_dict = Py_BuildValue( "{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i" - ",s:L,s:L,s:L,s:i,s:i}", + ",s:L,s:L,s:L,s:i,s:i,s:i}", "domid", (int)info[i].domid, "online_vcpus", info[i].nr_online_vcpus, "max_vcpu_id", info[i].max_vcpu_id, @@ -344,7 +346,8 @@ static PyObject *pyxc_domain_getinfo(XcO "cpu_time", (long long)info[i].cpu_time, "maxmem_kb", (long long)info[i].max_memkb, "ssidref", (int)info[i].ssidref, - "shutdown_reason", info[i].shutdown_reason); + "shutdown_reason", info[i].shutdown_reason, + "cpupool", (int)info[i].cpupool); pyhandle = PyList_New(sizeof(xen_domain_handle_t)); if ( (pyhandle == NULL) || (info_dict == NULL) ) { @@ -1893,6 +1896,179 @@ static PyObject *pyxc_dom_set_memshr(XcO return zero; } +static PyObject *cpumap_to_cpulist(uint64_t cpumap) +{ + PyObject *cpulist = NULL; + uint32_t i; + + cpulist = PyList_New(0); + for ( i = 0; cpumap != 0; i++ ) + { + if ( cpumap & 1 ) + { + PyObject* pyint = PyInt_FromLong(i); + + PyList_Append(cpulist, pyint); + Py_DECREF(pyint); + } + cpumap >>= 1; + } + return cpulist; +} + +static PyObject *pyxc_cpupool_create(XcObject *self, + PyObject *args, + PyObject *kwds) +{ + uint32_t cpupool = 0, sched = XEN_SCHEDULER_CREDIT; + + static char *kwd_list[] = { "pool", "sched", NULL }; + + if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list, &cpupool, + &sched)) + return NULL; + + if ( xc_cpupool_create(self->xc_handle, &cpupool, sched) < 0 ) + return pyxc_error_to_exception(); + + return PyInt_FromLong(cpupool); +} + +static PyObject *pyxc_cpupool_destroy(XcObject *self, + PyObject *args) +{ + uint32_t cpupool; + + if (!PyArg_ParseTuple(args, "i", &cpupool)) + return NULL; + + if (xc_cpupool_destroy(self->xc_handle, cpupool) != 0) + return pyxc_error_to_exception(); + + Py_INCREF(zero); + return zero; +} + +static PyObject *pyxc_cpupool_getinfo(XcObject *self, + PyObject *args, + PyObject *kwds) +{ + PyObject *list, *info_dict; + + uint32_t first_pool = 0; + int max_pools = 1024, nr_pools, i; + xc_cpupoolinfo_t *info; + + static char *kwd_list[] = { "first_pool", "max_pools", NULL }; + + if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list, + &first_pool, &max_pools) ) + return NULL; + + info = calloc(max_pools, sizeof(xc_cpupoolinfo_t)); + if (info == NULL) + return PyErr_NoMemory(); + + nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, info); + + if (nr_pools < 0) + { + free(info); + return pyxc_error_to_exception(); + } + + list = PyList_New(nr_pools); + for ( i = 0 ; i < nr_pools; i++ ) + { + info_dict = Py_BuildValue( + "{s:i,s:i,s:i,s:N}", + "cpupool", (int)info[i].cpupool_id, + "sched", info[i].sched_id, + "n_dom", info[i].n_dom, + "cpulist", cpumap_to_cpulist(info[i].cpumap)); + if ( info_dict == NULL ) + { + Py_DECREF(list); + if ( info_dict != NULL ) { Py_DECREF(info_dict); } + free(info); + return NULL; + } + PyList_SetItem(list, i, info_dict); + } + + free(info); + + return list; +} + +static PyObject *pyxc_cpupool_addcpu(XcObject *self, + PyObject *args, + PyObject *kwds) +{ + uint32_t cpupool; + int cpu = -1; + + static char *kwd_list[] = { "cpupool", "cpu", NULL }; + + if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list, + &cpupool, &cpu) ) + return NULL; + + if (xc_cpupool_addcpu(self->xc_handle, cpupool, cpu) != 0) + return pyxc_error_to_exception(); + + Py_INCREF(zero); + return zero; +} + +static PyObject *pyxc_cpupool_removecpu(XcObject *self, + PyObject *args, + PyObject *kwds) +{ + uint32_t cpupool; + int cpu = -1; + + static char *kwd_list[] = { "cpupool", "cpu", NULL }; + + if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list, + &cpupool, &cpu) ) + return NULL; + + if (xc_cpupool_removecpu(self->xc_handle, cpupool, cpu) != 0) + return pyxc_error_to_exception(); + + Py_INCREF(zero); + return zero; +} + +static PyObject *pyxc_cpupool_movedomain(XcObject *self, + PyObject *args, + PyObject *kwds) +{ + uint32_t cpupool, domid; + + static char *kwd_list[] = { "cpupool", "domid", NULL }; + + if ( !PyArg_ParseTupleAndKeywords(args, kwds, "ii", kwd_list, + &cpupool, &domid) ) + return NULL; + + if (xc_cpupool_movedomain(self->xc_handle, cpupool, domid) != 0) + return pyxc_error_to_exception(); + + Py_INCREF(zero); + return zero; +} + +static PyObject *pyxc_cpupool_freeinfo(XcObject *self) +{ + uint64_t cpumap; + + if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0) + return pyxc_error_to_exception(); + + return cpumap_to_cpulist(cpumap); +} static PyMethodDef pyxc_methods[] = { { "handle", @@ -2008,7 +2184,8 @@ static PyMethodDef pyxc_methods[] = { " maxmem_kb [int]: Maximum memory limit, in kilobytes\n" " cpu_time [long]: CPU time consumed, in nanoseconds\n" " shutdown_reason [int]: Numeric code from guest OS, explaining " - "reason why it shut itself down.\n" }, + "reason why it shut itself down.\n" + " cpupool [int] Id of cpupool domain is bound to.\n" }, { "vcpu_getinfo", (PyCFunction)pyxc_vcpu_getinfo, @@ -2148,6 +2325,24 @@ static PyMethodDef pyxc_methods[] = { METH_VARARGS, "\n" "Get the scheduling parameters for a domain when running with the\n" "SMP credit scheduler.\n" + " domid [int]: domain id to get\n" + "Returns: [dict]\n" + " weight [short]: domain's scheduling weight\n"}, + + { "sched_credit2_domain_set", + (PyCFunction)pyxc_sched_credit2_domain_set, + METH_KEYWORDS, "\n" + "Set the scheduling parameters for a domain when running with the\n" + "SMP credit2 scheduler.\n" + " domid [int]: domain id to set\n" + " weight [short]: domain's scheduling weight\n" + "Returns: [int] 0 on success; -1 on error.\n" }, + + { "sched_credit2_domain_get", + (PyCFunction)pyxc_sched_credit2_domain_get, + METH_VARARGS, "\n" + "Get the scheduling parameters for a domain when running with the\n" + "SMP credit2 scheduler.\n" " domid [int]: domain id to get\n" "Returns: [dict]\n" " weight [short]: domain's scheduling weight\n"}, @@ -2438,6 +2633,66 @@ static PyMethodDef pyxc_methods[] = { " enable [int,0|1]: Disable or enable?\n" "Returns: [int] 0 on success; -1 on error.\n" }, + { "cpupool_create", + (PyCFunction)pyxc_cpupool_create, + METH_VARARGS | METH_KEYWORDS, "\n" + "Create new cpupool.\n" + " pool [int, 0]: cpupool identifier to use (allocated if zero).\n" + " sched [int]: scheduler to use (credit if unspecified).\n\n" + "Returns: [int] new cpupool identifier; -1 on error.\n" }, + + { "cpupool_destroy", + (PyCFunction)pyxc_cpupool_destroy, + METH_VARARGS, "\n" + "Destroy a cpupool.\n" + " pool [int]: Identifier of cpupool to be destroyed.\n\n" + "Returns: [int] 0 on success; -1 on error.\n" }, + + { "cpupool_getinfo", + (PyCFunction)pyxc_cpupool_getinfo, + METH_VARARGS | METH_KEYWORDS, "\n" + "Get information regarding a set of cpupools, in increasing id order.\n" + " first_pool [int, 0]: First cpupool to retrieve info about.\n" + " max_pools [int, 1024]: Maximum number of cpupools to retrieve info" + " about.\n\n" + "Returns: [list of dicts] if list length is less than 'max_pools'\n" + " parameter then there was an error, or the end of the\n" + " cpupool-id space was reached.\n" + " pool [int]: Identifier of cpupool to which this info pertains\n" + " sched [int]: Scheduler used for this cpupool\n" + " n_dom [int]: Number of Domains in this cpupool\n" + " cpulist [list]: List of CPUs this cpupool is using\n" }, + + { "cpupool_addcpu", + (PyCFunction)pyxc_cpupool_addcpu, + METH_VARARGS | METH_KEYWORDS, "\n" + "Add a cpu to a cpupool.\n" + " pool [int]: Identifier of cpupool.\n" + " cpu [int, -1]: Cpu to add (lowest free if -1)\n\n" + "Returns: [int] 0 on success; -1 on error.\n" }, + + { "cpupool_removecpu", + (PyCFunction)pyxc_cpupool_removecpu, + METH_VARARGS | METH_KEYWORDS, "\n" + "Remove a cpu from a cpupool.\n" + " pool [int]: Identifier of cpupool.\n" + " cpu [int, -1]: Cpu to remove (highest used if -1)\n\n" + "Returns: [int] 0 on success; -1 on error.\n" }, + + { "cpupool_movedomain", + (PyCFunction)pyxc_cpupool_movedomain, + METH_VARARGS | METH_KEYWORDS, "\n" + "Move a domain to another cpupool.\n" + " pool [int]: Identifier of cpupool to move domain to.\n" + " dom [int]: Domain to move\n\n" + "Returns: [int] 0 on success; -1 on error.\n" }, + + { "cpupool_freeinfo", + (PyCFunction)pyxc_cpupool_freeinfo, + METH_NOARGS, "\n" + "Get info about cpus not in any cpupool.\n" + "Returns: [list]: List of CPUs\n" }, + { NULL, NULL, 0, NULL } }; diff -r fadf63ab49e7 tools/python/xen/xend/XendAPI.py --- a/tools/python/xen/xend/XendAPI.py Mon Apr 19 17:57:28 2010 +0100 +++ b/tools/python/xen/xend/XendAPI.py Tue Apr 20 11:10:40 2010 +0200 @@ -51,6 +51,7 @@ from XendPSCSI import XendPSCSI, XendPSC from XendPSCSI import XendPSCSI, XendPSCSI_HBA from XendDSCSI import XendDSCSI, XendDSCSI_HBA from XendXSPolicy import XendXSPolicy, XendACMPolicy +from xen.xend.XendCPUPool import XendCPUPool from XendAPIConstants import * from xen.util.xmlrpclib2 import stringify @@ -498,6 +499,7 @@ classes = { 'PSCSI_HBA' : valid_object("PSCSI_HBA"), 'DSCSI' : valid_object("DSCSI"), 'DSCSI_HBA' : valid_object("DSCSI_HBA"), + 'cpu_pool' : valid_object("cpu_pool"), } autoplug_classes = { @@ -514,6 +516,7 @@ autoplug_classes = { 'DSCSI_HBA' : XendDSCSI_HBA, 'XSPolicy' : XendXSPolicy, 'ACMPolicy' : XendACMPolicy, + 'cpu_pool' : XendCPUPool, } class XendAPI(object): @@ -914,7 +917,8 @@ class XendAPI(object): 'API_version_minor', 'API_version_vendor', 'API_version_vendor_implementation', - 'enabled'] + 'enabled', + 'resident_cpu_pools'] host_attr_rw = ['name_label', 'name_description', @@ -1014,6 +1018,8 @@ class XendAPI(object): return xen_api_todo() def host_get_logging(self, _, host_ref): return xen_api_todo() + def host_get_resident_cpu_pools(self, _, host_ref): + return xen_api_success(XendCPUPool.get_all()) # object methods def host_disable(self, session, host_ref): @@ -1076,7 +1082,9 @@ class XendAPI(object): 'PBDs': XendPBD.get_all(), 'PPCIs': XendPPCI.get_all(), 'PSCSIs': XendPSCSI.get_all(), - 'PSCSI_HBAs': XendPSCSI_HBA.get_all()} + 'PSCSI_HBAs': XendPSCSI_HBA.get_all(), + 'resident_cpu_pools': XendCPUPool.get_all(), + } return xen_api_success(record) def host_tmem_thaw(self, _, host_ref, cli_id): @@ -1185,7 +1193,10 @@ class XendAPI(object): 'stepping', 'flags', 'utilisation', - 'features'] + 'features', + 'cpu_pool'] + + host_cpu_funcs = [('get_unassigned_cpus', 'Set(host_cpu)')] # attributes def _host_cpu_get(self, ref, field): @@ -1210,21 +1221,28 @@ class XendAPI(object): return self._host_cpu_get(ref, 'flags') def host_cpu_get_utilisation(self, _, ref): return xen_api_success(XendNode.instance().get_host_cpu_load(ref)) + def host_cpu_get_cpu_pool(self, _, ref): + return xen_api_success(XendCPUPool.get_cpu_pool_by_cpu_ref(ref)) # object methods def host_cpu_get_record(self, _, ref): node = XendNode.instance() record = dict([(f, node.get_host_cpu_field(ref, f)) for f in self.host_cpu_attr_ro - if f not in ['uuid', 'host', 'utilisation']]) + if f not in ['uuid', 'host', 'utilisation', 'cpu_pool']]) record['uuid'] = ref record['host'] = node.uuid record['utilisation'] = node.get_host_cpu_load(ref) + record['cpu_pool'] = XendCPUPool.get_cpu_pool_by_cpu_ref(ref) return xen_api_success(record) # class methods def host_cpu_get_all(self, session): return xen_api_success(XendNode.instance().get_host_cpu_refs()) + def host_cpu_get_unassigned_cpus(self, session): + return xen_api_success( + [ref for ref in XendNode.instance().get_host_cpu_refs() + if len(XendCPUPool.get_cpu_pool_by_cpu_ref(ref)) == 0]) # Xen API: Class host_metrics @@ -1284,6 +1302,7 @@ class XendAPI(object): 'is_control_domain', 'metrics', 'crash_dumps', + 'cpu_pool', ] VM_attr_rw = ['name_label', @@ -1312,7 +1331,9 @@ class XendAPI(object): 'platform', 'PCI_bus', 'other_config', - 'security_label'] + 'security_label', + 'pool_name', + ] VM_methods = [('clone', 'VM'), ('start', None), @@ -1340,7 +1361,9 @@ class XendAPI(object): ('set_memory_dynamic_min_live', None), ('send_trigger', None), ('migrate', None), - ('destroy', None)] + ('destroy', None), + ('cpu_pool_migrate', None), + ] VM_funcs = [('create', 'VM'), ('restore', None), @@ -1540,6 +1563,17 @@ class XendAPI(object): return xen_api_success( xd.get_vm_by_uuid(vm_ref) == xd.privilegedDomain()) + def VM_get_cpu_pool(self, session, vm_ref): + dom = XendDomain.instance().get_vm_by_uuid(vm_ref) + pool_ref = XendCPUPool.query_pool_ref(dom.get_cpu_pool()) + return xen_api_success(pool_ref) + + def VM_get_pool_name(self, session, vm_ref): + return self.VM_get('pool_name', session, vm_ref) + + def VM_set_pool_name(self, session, vm_ref, value): + return self.VM_set('pool_name', session, vm_ref, value) + def VM_set_name_label(self, session, vm_ref, label): dom = XendDomain.instance().get_vm_by_uuid(vm_ref) dom.setName(label) @@ -1618,7 +1652,8 @@ class XendAPI(object): if key.startswith("cpumap"): vcpu = int(key[6:]) try: - xendom.domain_pincpu(xeninfo.getDomid(), vcpu, value) + cpus = map(int, value.split(",")) + xendom.domain_pincpu(xeninfo.getDomid(), vcpu, cpus) except Exception, ex: log.exception(ex) @@ -1834,7 +1869,9 @@ class XendAPI(object): 'is_control_domain': xeninfo.info['is_control_domain'], 'metrics': xeninfo.get_metrics(), 'security_label': xeninfo.get_security_label(), - 'crash_dumps': [] + 'crash_dumps': [], + 'pool_name': xeninfo.info.get('pool_name'), + 'cpu_pool' : XendCPUPool.query_pool_ref(xeninfo.get_cpu_pool()), } return xen_api_success(record) @@ -1930,6 +1967,25 @@ class XendAPI(object): def VM_restore(self, _, src, paused): xendom = XendDomain.instance() xendom.domain_restore(src, bool(paused)) + return xen_api_success_void() + + def VM_cpu_pool_migrate(self, session, vm_ref, cpu_pool_ref): + xendom = XendDomain.instance() + xeninfo = xendom.get_vm_by_uuid(vm_ref) + domid = xeninfo.getDomid() + pool = XendAPIStore.get(cpu_pool_ref, XendCPUPool.getClass()) + if pool == None: + return xen_api_error(['HANDLE_INVALID', 'cpu_pool', cpu_pool_ref]) + if domid is not None: + if domid == 0: + return xen_api_error(['OPERATION_NOT_ALLOWED', + 'could not move Domain-0']) + try: + XendCPUPool.move_domain(cpu_pool_ref, domid) + except Exception, ex: + return xen_api_error(['INTERNAL_ERROR', + 'could not move domain']) + self.VM_set('pool_name', session, vm_ref, pool.get_name_label()) return xen_api_success_void() diff -r fadf63ab49e7 tools/python/xen/xend/XendConfig.py --- a/tools/python/xen/xend/XendConfig.py Mon Apr 19 17:57:28 2010 +0100 +++ b/tools/python/xen/xend/XendConfig.py Tue Apr 20 11:10:40 2010 +0200 @@ -128,6 +128,7 @@ XENAPI_CFG_TO_LEGACY_CFG = { 'PV_bootloader': 'bootloader', 'PV_bootloader_args': 'bootloader_args', 'Description': 'description', + 'pool_name' : 'pool_name', } LEGACY_CFG_TO_XENAPI_CFG = reverse_dict(XENAPI_CFG_TO_LEGACY_CFG) @@ -233,6 +234,7 @@ XENAPI_CFG_TYPES = { 's3_integrity' : int, 'superpages' : int, 'memory_sharing': int, + 'pool_name' : str, 'Description': str, } @@ -279,6 +281,7 @@ LEGACY_CFG_TYPES = { 'bootloader': str, 'bootloader_args': str, 'description': str, + 'pool_name': str, } # Values that should be stored in xenstore's /vm/ that is used @@ -300,6 +303,7 @@ LEGACY_XENSTORE_VM_PARAMS = [ 'on_xend_stop', 'bootloader', 'bootloader_args', + 'pool_name', ] ## @@ -408,6 +412,7 @@ class XendConfig(dict): 'other_config': {}, 'platform': {}, 'target': 0, + 'pool_name' : 'Pool-0', 'superpages': 0, 'description': '', } diff -r fadf63ab49e7 tools/python/xen/xend/XendConstants.py --- a/tools/python/xen/xend/XendConstants.py Mon Apr 19 17:57:28 2010 +0100 +++ b/tools/python/xen/xend/XendConstants.py Tue Apr 20 11:10:40 2010 +0200 @@ -133,6 +133,8 @@ VTPM_DELETE_SCRIPT = auxbin.scripts_dir( XS_VMROOT = "/vm/" +XS_POOLROOT = "/local/pool/" + NR_PCI_FUNC = 8 NR_PCI_DEV = 32 NR_PCI_DEVFN = NR_PCI_FUNC * NR_PCI_DEV diff -r fadf63ab49e7 tools/python/xen/xend/XendDomainInfo.py --- a/tools/python/xen/xend/XendDomainInfo.py Mon Apr 19 17:57:28 2010 +0100 +++ b/tools/python/xen/xend/XendDomainInfo.py Tue Apr 20 11:10:40 2010 +0200 @@ -60,6 +60,7 @@ from xen.xend.xenstore.xswatch import xs from xen.xend.xenstore.xswatch import xswatch from xen.xend.XendConstants import * from xen.xend.XendAPIConstants import * +from xen.xend.XendCPUPool import XendCPUPool from xen.xend.server.DevConstants import xenbusState from xen.xend.server.BlktapController import TAPDISK_DEVICE, parseDeviceString @@ -2540,6 +2541,19 @@ class XendDomainInfo: oos = self.info['platform'].get('oos', 1) oos_off = 1 - int(oos) + # look-up pool id to use + pool_name = self.info['pool_name'] + if len(pool_name) == 0: + pool_name = "Pool-0" + + pool = XendCPUPool.lookup_pool(pool_name) + + if pool is None: + raise VmError("unknown pool %s" % pool_name) + pool_id = pool.query_pool_id() + if pool_id is None: + raise VmError("pool %s not activated" % pool_name) + flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2) | (int(oos_off) << 3) try: @@ -2548,6 +2562,7 @@ class XendDomainInfo: ssidref = ssidref, handle = uuid.fromString(self.info['uuid']), flags = flags, + cpupool = pool_id, target = self.info.target()) except Exception, e: # may get here if due to ACM the operation is not permitted @@ -3585,6 +3600,11 @@ class XendDomainInfo: retval = xc.sched_credit_domain_get(self.getDomid()) return retval + def get_cpu_pool(self): + if self.getDomid() is None: + return None + xeninfo = dom_get(self.domid) + return xeninfo['cpupool'] def get_power_state(self): return XEN_API_VM_POWER_STATE[self._stateGet()] def get_platform(self): diff -r fadf63ab49e7 tools/python/xen/xend/XendError.py --- a/tools/python/xen/xend/XendError.py Mon Apr 19 17:57:28 2010 +0100 +++ b/tools/python/xen/xend/XendError.py Tue Apr 20 11:10:40 2010 +0200 @@ -18,6 +18,7 @@ from xmlrpclib import Fault +import types import XendClient class XendInvalidDomain(Fault): @@ -186,6 +187,26 @@ class DirectPCIError(XendAPIError): def __str__(self): return 'DIRECT_PCI_ERROR: %s' % self.error +class PoolError(XendAPIError): + def __init__(self, error, spec=None): + XendAPIError.__init__(self) + self.spec = [] + if spec: + if isinstance(spec, types.ListType): + self.spec = spec + else: + self.spec = [spec] + self.error = error + + def get_api_error(self): + return [self.error] + self.spec + + def __str__(self): + if self.spec: + return '%s: %s' % (self.error, self.spec) + else: + return '%s' % self.error + class VDIError(XendAPIError): def __init__(self, error, vdi): XendAPIError.__init__(self) diff -r fadf63ab49e7 tools/python/xen/xend/XendNode.py --- a/tools/python/xen/xend/XendNode.py Mon Apr 19 17:57:28 2010 +0100 +++ b/tools/python/xen/xend/XendNode.py Tue Apr 20 11:10:40 2010 +0200 @@ -43,6 +43,7 @@ from XendMonitor import XendMonitor from XendMonitor import XendMonitor from XendPPCI import XendPPCI from XendPSCSI import XendPSCSI, XendPSCSI_HBA +from xen.xend.XendCPUPool import XendCPUPool class XendNode: """XendNode - Represents a Domain 0 Host.""" @@ -158,6 +159,8 @@ class XendNode: self._init_PPCIs() self._init_PSCSIs() + + self._init_cpu_pools() def _init_networks(self): @@ -361,6 +364,18 @@ class XendNode: for physical_host, pscsi_HBA_uuid in pscsi_HBA_table.items(): XendPSCSI_HBA(pscsi_HBA_uuid, {'physical_host': physical_host}) + def _init_cpu_pools(self): + # Initialise cpu_pools + saved_cpu_pools = self.state_store.load_state(XendCPUPool.getClass()) + if saved_cpu_pools: + for cpu_pool_uuid, cpu_pool in saved_cpu_pools.items(): + try: + XendCPUPool.recreate(cpu_pool, cpu_pool_uuid) + except CreateUnspecifiedAttributeError: + log.warn("Error recreating %s %s", + (XendCPUPool.getClass(), cpu_pool_uuid)) + XendCPUPool.recreate_active_pools() + def add_network(self, interface): # TODO @@ -581,6 +596,7 @@ class XendNode: self.save_PPCIs() self.save_PSCSIs() self.save_PSCSI_HBAs() + self.save_cpu_pools() def save_PIFs(self): pif_records = dict([(pif_uuid, XendAPIStore.get( @@ -622,6 +638,12 @@ class XendNode: pscsi_HBA_uuid, "PSCSI_HBA").get_record()) for pscsi_HBA_uuid in XendPSCSI_HBA.get_all()]) self.state_store.save_state('pscsi_HBA', pscsi_HBA_records) + + def save_cpu_pools(self): + cpu_pool_records = dict([(cpu_pool_uuid, XendAPIStore.get( + cpu_pool_uuid, XendCPUPool.getClass()).get_record()) + for cpu_pool_uuid in XendCPUPool.get_all_managed()]) + self.state_store.save_state(XendCPUPool.getClass(), cpu_pool_records) def shutdown(self): return 0 @@ -925,6 +947,7 @@ class XendNode: # physinfo is in KiB, need it in MiB info['total_memory'] = info['total_memory'] / 1024 info['free_memory'] = info['free_memory'] / 1024 + info['free_cpus'] = len(XendCPUPool.unbound_cpus()) ITEM_ORDER = ['nr_cpus', 'nr_nodes', @@ -935,6 +958,7 @@ class XendNode: 'virt_caps', 'total_memory', 'free_memory', + 'free_cpus', ] if show_numa != 0: diff -r fadf63ab49e7 tools/python/xen/xend/server/SrvServer.py --- a/tools/python/xen/xend/server/SrvServer.py Mon Apr 19 17:57:28 2010 +0100 +++ b/tools/python/xen/xend/server/SrvServer.py Tue Apr 20 11:10:40 2010 +0200 @@ -52,6 +52,7 @@ from xen.xend.XendLogging import log from xen.xend.XendLogging import log from xen.xend.XendClient import XEN_API_SOCKET from xen.xend.XendDomain import instance as xenddomain +from xen.xend.XendCPUPool import XendCPUPool from xen.web.SrvDir import SrvDir from SrvRoot import SrvRoot @@ -146,6 +147,12 @@ class XendServers: status.close() status = None + # auto start pools before domains are started + try: + XendCPUPool.autostart_pools() + except Exception, e: + log.exception("Failed while autostarting pools") + # Reaching this point means we can auto start domains try: xenddomain().autostart_domains() diff -r fadf63ab49e7 tools/python/xen/xend/server/XMLRPCServer.py --- a/tools/python/xen/xend/server/XMLRPCServer.py Mon Apr 19 17:57:28 2010 +0100 +++ b/tools/python/xen/xend/server/XMLRPCServer.py Tue Apr 20 11:10:40 2010 +0200 @@ -33,6 +33,7 @@ from xen.xend.XendConstants import DOM_S from xen.xend.XendConstants import DOM_STATE_RUNNING from xen.xend.XendLogging import log from xen.xend.XendError import XendInvalidDomain +from xen.xend.XendCPUPool import XendCPUPool # vcpu_avail is a long and is not needed by the clients. It's far easier # to just remove it then to try and marshal the long. @@ -97,6 +98,10 @@ methods = ['device_create', 'device_conf 'getRestartCount', 'getBlockDeviceClass'] exclude = ['domain_create', 'domain_restore'] + +POOL_FUNCS = ['pool_create', 'pool_new', 'pool_start', 'pool_list', + 'pool_destroy', 'pool_delete', 'pool_cpu_add', 'pool_cpu_remove', + 'pool_migrate'] class XMLRPCServer: def __init__(self, auth, use_xenapi, use_tcp = False, @@ -197,6 +202,11 @@ class XMLRPCServer: if name not in exclude: self.server.register_function(fn, "xend.domain.%s" % name[7:]) + # Functions in XendPool + for name in POOL_FUNCS: + fn = getattr(XendCPUPool, name) + self.server.register_function(fn, "xend.cpu_pool.%s" % name[5:]) + # Functions in XendNode and XendDmesg for type, lst, n in [(XendNode, ['info', 'pciinfo', 'send_debug_keys', diff -r fadf63ab49e7 tools/python/xen/xm/create.dtd --- a/tools/python/xen/xm/create.dtd Mon Apr 19 17:57:28 2010 +0100 +++ b/tools/python/xen/xm/create.dtd Tue Apr 20 11:10:40 2010 +0200 @@ -50,6 +50,7 @@ s3_integrity CDATA #REQUIRED vcpus_max CDATA #REQUIRED vcpus_at_startup CDATA #REQUIRED + pool_name CDATA #REQUIRED actions_after_shutdown %NORMAL_EXIT; #REQUIRED actions_after_reboot %NORMAL_EXIT; #REQUIRED actions_after_crash %CRASH_BEHAVIOUR; #REQUIRED diff -r fadf63ab49e7 tools/python/xen/xm/create.py --- a/tools/python/xen/xm/create.py Mon Apr 19 17:57:28 2010 +0100 +++ b/tools/python/xen/xm/create.py Tue Apr 20 11:10:40 2010 +0200 @@ -659,6 +659,10 @@ gopts.var('suppress_spurious_page_faults fn=set_bool, default=None, use="""Do not inject spurious page faults into this guest""") +gopts.var('pool', val='POOL NAME', + fn=set_value, default=None, + use="""CPU pool to use for the VM""") + gopts.var('pci_msitranslate', val='TRANSLATE', fn=set_int, default=1, use="""Global PCI MSI-INTx translation flag (0=disable; @@ -1147,6 +1151,8 @@ def make_config(vals): config.append(['localtime', vals.localtime]) if vals.oos: config.append(['oos', vals.oos]) + if vals.pool: + config.append(['pool_name', vals.pool]) config_image = configure_image(vals) if vals.bootloader: diff -r fadf63ab49e7 tools/python/xen/xm/main.py --- a/tools/python/xen/xm/main.py Mon Apr 19 17:57:28 2010 +0100 +++ b/tools/python/xen/xm/main.py Tue Apr 20 11:10:40 2010 +0200 @@ -56,6 +56,7 @@ import xen.util.xsm.xsm as security import xen.util.xsm.xsm as security from xen.util.xsm.xsm import XSMError from xen.util.acmpolicy import ACM_LABEL_UNLABELED_DISPLAY +from xen.util.sxputils import sxp2map, map2sxp as map_to_sxp from xen.util import auxbin import XenAPI @@ -238,6 +239,23 @@ SUBCOMMAND_HELP = { 'tmem-freeable' : ('', 'Print freeable tmem (in MiB).'), 'tmem-shared-auth' : ('[|-a|--all] [--uuid=] [--auth=<0|1>]', 'De/authenticate shared tmem pool.'), + # + # pool commands + # + 'pool-create' : (' [vars]', + 'Create a CPU pool based an ConfigFile.'), + 'pool-new' : (' [vars]', + 'Adds a CPU pool to Xend CPU pool management'), + 'pool-start' : ('', 'Starts a Xend CPU pool'), + 'pool-list' : ('[] [-l|--long] [-c|--cpus]', 'List CPU pools on host'), + 'pool-destroy' : ('', 'Deactivates a CPU pool'), + 'pool-delete' : ('', + 'Removes a CPU pool from Xend management'), + 'pool-cpu-add' : (' ', 'Adds a CPU to a CPU pool'), + 'pool-cpu-remove': (' ', 'Removes a CPU from a CPU pool'), + 'pool-migrate' : (' ', + 'Moves a domain into a CPU pool'), + # security 'addlabel' : ('