From: David Hildenbrand <david@redhat.com>
To: qemu-devel@nongnu.org
Cc: "David Hildenbrand" <david@redhat.com>,
"Michal Privoznik" <mprivozn@redhat.com>,
"Igor Mammedov" <imammedo@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
"Paolo Bonzini" <pbonzini@redhat.com>,
"Daniel P. Berrangé" <berrange@redhat.com>,
"Eduardo Habkost" <eduardo@habkost.net>,
"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
"Eric Blake" <eblake@redhat.com>,
"Markus Armbruster" <armbru@redhat.com>,
"Richard Henderson" <richard.henderson@linaro.org>,
"Stefan Weil" <sw@weilnetz.de>
Subject: [PATCH RFC 4/7] util: Add write-only "node-affinity" property for ThreadContext
Date: Thu, 21 Jul 2022 14:07:29 +0200 [thread overview]
Message-ID: <20220721120732.118133-5-david@redhat.com> (raw)
In-Reply-To: <20220721120732.118133-1-david@redhat.com>
Let's make it easier to pin threads created via a ThreadContext to
all current CPUs belonging to given NUMA nodes.
As "node-affinity" is simply a shortcut for setting "cpu-affinity", that
property cannot be read and if the CPUs for a node change due do CPU
hotplug, the CPU affinity will not get updated.
Signed-off-by: David Hildenbrand <david@redhat.com>
---
qapi/qom.json | 7 +++-
util/meson.build | 2 +-
util/thread-context.c | 84 +++++++++++++++++++++++++++++++++++++++++++
3 files changed, 91 insertions(+), 2 deletions(-)
diff --git a/qapi/qom.json b/qapi/qom.json
index 4775a333ed..d36bf3355f 100644
--- a/qapi/qom.json
+++ b/qapi/qom.json
@@ -838,10 +838,15 @@
# @cpu-affinity: the CPU affinity for all threads created in the thread
# context (default: QEMU main thread affinity)
#
+# @node-affinity: shortcut for looking up the current CPUs for the given nodes
+# and setting @cpu-affinity (default: QEMU main thread
+# affinity)
+#
# Since: 7.2
##
{ 'struct': 'ThreadContextProperties',
- 'data': { '*cpu-affinity': ['uint16'] } }
+ 'data': { '*cpu-affinity': ['uint16'],
+ '*node-affinity': ['uint16'] } }
##
diff --git a/util/meson.build b/util/meson.build
index e97cd2d779..c0a7bc54d4 100644
--- a/util/meson.build
+++ b/util/meson.build
@@ -1,5 +1,5 @@
util_ss.add(files('osdep.c', 'cutils.c', 'unicode.c', 'qemu-timer-common.c'))
-util_ss.add(files('thread-context.c'))
+util_ss.add(files('thread-context.c'), numa)
if not config_host_data.get('CONFIG_ATOMIC64')
util_ss.add(files('atomic64.c'))
endif
diff --git a/util/thread-context.c b/util/thread-context.c
index dcd607c532..880f0441be 100644
--- a/util/thread-context.c
+++ b/util/thread-context.c
@@ -22,6 +22,10 @@
#include "qemu/module.h"
#include "qemu/bitmap.h"
+#ifdef CONFIG_NUMA
+#include <numa.h>
+#endif
+
enum {
TC_CMD_NONE = 0,
TC_CMD_STOP,
@@ -89,6 +93,11 @@ static void thread_context_set_cpu_affinity(Object *obj, Visitor *v,
int nbits = 0, ret;
Error *err = NULL;
+ if (tc->init_cpu_bitmap) {
+ error_setg(errp, "Mixing CPU and node affinity not supported");
+ return;
+ }
+
visit_type_uint16List(v, name, &host_cpus, &err);
if (err) {
error_propagate(errp, err);
@@ -160,6 +169,79 @@ static void thread_context_get_cpu_affinity(Object *obj, Visitor *v,
qapi_free_uint16List(host_cpus);
}
+static void thread_context_set_node_affinity(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+#ifdef CONFIG_NUMA
+ const int nbits = numa_num_possible_cpus();
+ ThreadContext *tc = THREAD_CONTEXT(obj);
+ uint16List *l, *host_nodes = NULL;
+ unsigned long *bitmap = NULL;
+ struct bitmask *tmp_cpus;
+ Error *err = NULL;
+ int ret, i;
+
+ if (tc->init_cpu_bitmap) {
+ error_setg(errp, "Mixing CPU and node affinity not supported");
+ return;
+ }
+
+ visit_type_uint16List(v, name, &host_nodes, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ if (!host_nodes) {
+ error_setg(errp, "Node list is empty");
+ goto out;
+ }
+
+ bitmap = bitmap_new(nbits);
+ tmp_cpus = numa_allocate_cpumask();
+ for (l = host_nodes; l; l = l->next) {
+ numa_bitmask_clearall(tmp_cpus);
+ ret = numa_node_to_cpus(l->value, tmp_cpus);
+ if (ret) {
+ /* We ignore any errors, such as impossible nodes. */
+ continue;
+ }
+ for (i = 0; i < nbits; i++) {
+ if (numa_bitmask_isbitset(tmp_cpus, i)) {
+ set_bit(i, bitmap);
+ }
+ }
+ }
+ numa_free_cpumask(tmp_cpus);
+
+ if (bitmap_empty(bitmap, nbits)) {
+ error_setg(errp, "The nodes select no CPUs");
+ goto out;
+ }
+
+ if (tc->thread_id != -1) {
+ /*
+ * Note: we won't be adjusting the affinity of any thread that is still
+ * around for now, but only the affinity of the context thread.
+ */
+ ret = qemu_thread_set_affinity(&tc->thread, bitmap, nbits);
+ if (ret) {
+ error_setg(errp, "Setting CPU affinity failed: %s", strerror(ret));
+ }
+ } else {
+ tc->init_cpu_bitmap = bitmap;
+ bitmap = NULL;
+ tc->init_cpu_nbits = nbits;
+ }
+out:
+ g_free(bitmap);
+ qapi_free_uint16List(host_nodes);
+#else
+ error_setg(errp, "NUMA node affinity is not supported by this QEMU");
+#endif
+}
+
static void thread_context_get_thread_id(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
@@ -209,6 +291,8 @@ static void thread_context_class_init(ObjectClass *oc, void *data)
object_class_property_add(oc, "cpu-affinity", "int",
thread_context_get_cpu_affinity,
thread_context_set_cpu_affinity, NULL, NULL);
+ object_class_property_add(oc, "node-affinity", "int", NULL,
+ thread_context_set_node_affinity, NULL, NULL);
}
static void thread_context_instance_init(Object *obj)
--
2.35.3
next prev parent reply other threads:[~2022-07-21 12:09 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-21 12:07 [PATCH RFC 0/7] hostmem: NUMA-aware memory preallocation using ThreadContext David Hildenbrand
2022-07-21 12:07 ` [PATCH RFC 1/7] util: Cleanup and rename os_mem_prealloc() David Hildenbrand
2022-07-21 12:07 ` [PATCH RFC 2/7] util: Introduce qemu_thread_set_affinity() and qemu_thread_get_affinity() David Hildenbrand
2022-07-21 12:07 ` [PATCH RFC 3/7] util: Introduce ThreadContext user-creatable object David Hildenbrand
2022-08-05 11:01 ` Michal Prívozník
2022-07-21 12:07 ` David Hildenbrand [this message]
2022-07-21 12:07 ` [PATCH RFC 5/7] util: Make qemu_prealloc_mem() optionally consume a ThreadContext David Hildenbrand
2022-07-21 12:07 ` [PATCH RFC 6/7] hostmem: Allow for specifying a ThreadContext for preallocation David Hildenbrand
2022-07-21 12:07 ` [PATCH RFC 7/7] vl: Allow ThreadContext objects to be created before the sandbox option David Hildenbrand
2022-08-05 11:01 ` Michal Prívozník
2022-08-05 15:50 ` David Hildenbrand
2022-07-25 13:59 ` [PATCH RFC 0/7] hostmem: NUMA-aware memory preallocation using ThreadContext Michal Prívozník
2022-08-05 11:01 ` Michal Prívozník
2022-08-05 15:47 ` David Hildenbrand
2022-08-09 10:56 ` Joao Martins
2022-08-09 18:06 ` David Hildenbrand
2022-08-10 6:55 ` Michal Prívozník
2022-08-11 10:50 ` Joao Martins
2022-09-21 14:44 ` Michal Prívozník
2022-09-21 14:54 ` David Hildenbrand
2022-09-22 6:45 ` Michal Prívozník
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220721120732.118133-5-david@redhat.com \
--to=david@redhat.com \
--cc=armbru@redhat.com \
--cc=berrange@redhat.com \
--cc=dgilbert@redhat.com \
--cc=eblake@redhat.com \
--cc=eduardo@habkost.net \
--cc=imammedo@redhat.com \
--cc=mprivozn@redhat.com \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
--cc=sw@weilnetz.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).