Archive-only list for patches
 help / color / mirror / Atom feed
From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
To: stable@vger.kernel.org
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	patches@lists.linux.dev, Ming Lei <ming.lei@redhat.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Christoph Hellwig <hch@lst.de>, Jens Axboe <axboe@kernel.dk>,
	Sasha Levin <sashal@kernel.org>
Subject: [PATCH 6.1 116/150] genirq/affinity: Move group_cpus_evenly() into lib/
Date: Mon,  8 Jan 2024 16:36:07 +0100	[thread overview]
Message-ID: <20240108153516.532546137@linuxfoundation.org> (raw)
In-Reply-To: <20240108153511.214254205@linuxfoundation.org>

6.1-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Ming Lei <ming.lei@redhat.com>

[ Upstream commit f7b3ea8cf72f3d6060fe08e461805181e7450a13 ]

group_cpus_evenly() has become a generic function which can be used for
other subsystems than the interrupt subsystem, so move it into lib/.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Link: https://lore.kernel.org/r/20221227022905.352674-6-ming.lei@redhat.com
Stable-dep-of: 0263f92fadbb ("lib/group_cpus.c: avoid acquiring cpu hotplug lock in group_cpus_evenly")
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
 MAINTAINERS                |   2 +
 include/linux/group_cpus.h |  14 ++
 kernel/irq/affinity.c      | 398 +---------------------------------
 lib/Makefile               |   2 +
 lib/group_cpus.c           | 427 +++++++++++++++++++++++++++++++++++++
 5 files changed, 446 insertions(+), 397 deletions(-)
 create mode 100644 include/linux/group_cpus.h
 create mode 100644 lib/group_cpus.c

diff --git a/MAINTAINERS b/MAINTAINERS
index 07a9c274c0e29..13d1078808bb5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10803,6 +10803,8 @@ L:	linux-kernel@vger.kernel.org
 S:	Maintained
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
 F:	kernel/irq/
+F:	include/linux/group_cpus.h
+F:	lib/group_cpus.c
 
 IRQCHIP DRIVERS
 M:	Thomas Gleixner <tglx@linutronix.de>
diff --git a/include/linux/group_cpus.h b/include/linux/group_cpus.h
new file mode 100644
index 0000000000000..e42807ec61f6e
--- /dev/null
+++ b/include/linux/group_cpus.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 Thomas Gleixner.
+ * Copyright (C) 2016-2017 Christoph Hellwig.
+ */
+
+#ifndef __LINUX_GROUP_CPUS_H
+#define __LINUX_GROUP_CPUS_H
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+
+struct cpumask *group_cpus_evenly(unsigned int numgrps);
+
+#endif
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 54083331f1bcb..44a4eba80315c 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -7,403 +7,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/cpu.h>
-#include <linux/sort.h>
-
-static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
-				unsigned int cpus_per_grp)
-{
-	const struct cpumask *siblmsk;
-	int cpu, sibl;
-
-	for ( ; cpus_per_grp > 0; ) {
-		cpu = cpumask_first(nmsk);
-
-		/* Should not happen, but I'm too lazy to think about it */
-		if (cpu >= nr_cpu_ids)
-			return;
-
-		cpumask_clear_cpu(cpu, nmsk);
-		cpumask_set_cpu(cpu, irqmsk);
-		cpus_per_grp--;
-
-		/* If the cpu has siblings, use them first */
-		siblmsk = topology_sibling_cpumask(cpu);
-		for (sibl = -1; cpus_per_grp > 0; ) {
-			sibl = cpumask_next(sibl, siblmsk);
-			if (sibl >= nr_cpu_ids)
-				break;
-			if (!cpumask_test_and_clear_cpu(sibl, nmsk))
-				continue;
-			cpumask_set_cpu(sibl, irqmsk);
-			cpus_per_grp--;
-		}
-	}
-}
-
-static cpumask_var_t *alloc_node_to_cpumask(void)
-{
-	cpumask_var_t *masks;
-	int node;
-
-	masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL);
-	if (!masks)
-		return NULL;
-
-	for (node = 0; node < nr_node_ids; node++) {
-		if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL))
-			goto out_unwind;
-	}
-
-	return masks;
-
-out_unwind:
-	while (--node >= 0)
-		free_cpumask_var(masks[node]);
-	kfree(masks);
-	return NULL;
-}
-
-static void free_node_to_cpumask(cpumask_var_t *masks)
-{
-	int node;
-
-	for (node = 0; node < nr_node_ids; node++)
-		free_cpumask_var(masks[node]);
-	kfree(masks);
-}
-
-static void build_node_to_cpumask(cpumask_var_t *masks)
-{
-	int cpu;
-
-	for_each_possible_cpu(cpu)
-		cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
-}
-
-static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
-				const struct cpumask *mask, nodemask_t *nodemsk)
-{
-	int n, nodes = 0;
-
-	/* Calculate the number of nodes in the supplied affinity mask */
-	for_each_node(n) {
-		if (cpumask_intersects(mask, node_to_cpumask[n])) {
-			node_set(n, *nodemsk);
-			nodes++;
-		}
-	}
-	return nodes;
-}
-
-struct node_groups {
-	unsigned id;
-
-	union {
-		unsigned ngroups;
-		unsigned ncpus;
-	};
-};
-
-static int ncpus_cmp_func(const void *l, const void *r)
-{
-	const struct node_groups *ln = l;
-	const struct node_groups *rn = r;
-
-	return ln->ncpus - rn->ncpus;
-}
-
-/*
- * Allocate group number for each node, so that for each node:
- *
- * 1) the allocated number is >= 1
- *
- * 2) the allocated number is <= active CPU number of this node
- *
- * The actual allocated total groups may be less than @numgrps when
- * active total CPU number is less than @numgrps.
- *
- * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]'
- * for each node.
- */
-static void alloc_nodes_groups(unsigned int numgrps,
-			       cpumask_var_t *node_to_cpumask,
-			       const struct cpumask *cpu_mask,
-			       const nodemask_t nodemsk,
-			       struct cpumask *nmsk,
-			       struct node_groups *node_groups)
-{
-	unsigned n, remaining_ncpus = 0;
-
-	for (n = 0; n < nr_node_ids; n++) {
-		node_groups[n].id = n;
-		node_groups[n].ncpus = UINT_MAX;
-	}
-
-	for_each_node_mask(n, nodemsk) {
-		unsigned ncpus;
-
-		cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
-		ncpus = cpumask_weight(nmsk);
-
-		if (!ncpus)
-			continue;
-		remaining_ncpus += ncpus;
-		node_groups[n].ncpus = ncpus;
-	}
-
-	numgrps = min_t(unsigned, remaining_ncpus, numgrps);
-
-	sort(node_groups, nr_node_ids, sizeof(node_groups[0]),
-	     ncpus_cmp_func, NULL);
-
-	/*
-	 * Allocate groups for each node according to the ratio of this
-	 * node's nr_cpus to remaining un-assigned ncpus. 'numgrps' is
-	 * bigger than number of active numa nodes. Always start the
-	 * allocation from the node with minimized nr_cpus.
-	 *
-	 * This way guarantees that each active node gets allocated at
-	 * least one group, and the theory is simple: over-allocation
-	 * is only done when this node is assigned by one group, so
-	 * other nodes will be allocated >= 1 groups, since 'numgrps' is
-	 * bigger than number of numa nodes.
-	 *
-	 * One perfect invariant is that number of allocated groups for
-	 * each node is <= CPU count of this node:
-	 *
-	 * 1) suppose there are two nodes: A and B
-	 * 	ncpu(X) is CPU count of node X
-	 * 	grps(X) is the group count allocated to node X via this
-	 * 	algorithm
-	 *
-	 * 	ncpu(A) <= ncpu(B)
-	 * 	ncpu(A) + ncpu(B) = N
-	 * 	grps(A) + grps(B) = G
-	 *
-	 * 	grps(A) = max(1, round_down(G * ncpu(A) / N))
-	 * 	grps(B) = G - grps(A)
-	 *
-	 * 	both N and G are integer, and 2 <= G <= N, suppose
-	 * 	G = N - delta, and 0 <= delta <= N - 2
-	 *
-	 * 2) obviously grps(A) <= ncpu(A) because:
-	 *
-	 * 	if grps(A) is 1, then grps(A) <= ncpu(A) given
-	 * 	ncpu(A) >= 1
-	 *
-	 * 	otherwise,
-	 * 		grps(A) <= G * ncpu(A) / N <= ncpu(A), given G <= N
-	 *
-	 * 3) prove how grps(B) <= ncpu(B):
-	 *
-	 * 	if round_down(G * ncpu(A) / N) == 0, vecs(B) won't be
-	 * 	over-allocated, so grps(B) <= ncpu(B),
-	 *
-	 * 	otherwise:
-	 *
-	 * 	grps(A) =
-	 * 		round_down(G * ncpu(A) / N) =
-	 * 		round_down((N - delta) * ncpu(A) / N) =
-	 * 		round_down((N * ncpu(A) - delta * ncpu(A)) / N)	 >=
-	 * 		round_down((N * ncpu(A) - delta * N) / N)	 =
-	 * 		cpu(A) - delta
-	 *
-	 * 	then:
-	 *
-	 * 	grps(A) - G >= ncpu(A) - delta - G
-	 * 	=>
-	 * 	G - grps(A) <= G + delta - ncpu(A)
-	 * 	=>
-	 * 	grps(B) <= N - ncpu(A)
-	 * 	=>
-	 * 	grps(B) <= cpu(B)
-	 *
-	 * For nodes >= 3, it can be thought as one node and another big
-	 * node given that is exactly what this algorithm is implemented,
-	 * and we always re-calculate 'remaining_ncpus' & 'numgrps', and
-	 * finally for each node X: grps(X) <= ncpu(X).
-	 *
-	 */
-	for (n = 0; n < nr_node_ids; n++) {
-		unsigned ngroups, ncpus;
-
-		if (node_groups[n].ncpus == UINT_MAX)
-			continue;
-
-		WARN_ON_ONCE(numgrps == 0);
-
-		ncpus = node_groups[n].ncpus;
-		ngroups = max_t(unsigned, 1,
-				 numgrps * ncpus / remaining_ncpus);
-		WARN_ON_ONCE(ngroups > ncpus);
-
-		node_groups[n].ngroups = ngroups;
-
-		remaining_ncpus -= ncpus;
-		numgrps -= ngroups;
-	}
-}
-
-static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
-			       cpumask_var_t *node_to_cpumask,
-			       const struct cpumask *cpu_mask,
-			       struct cpumask *nmsk, struct cpumask *masks)
-{
-	unsigned int i, n, nodes, cpus_per_grp, extra_grps, done = 0;
-	unsigned int last_grp = numgrps;
-	unsigned int curgrp = startgrp;
-	nodemask_t nodemsk = NODE_MASK_NONE;
-	struct node_groups *node_groups;
-
-	if (cpumask_empty(cpu_mask))
-		return 0;
-
-	nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk);
-
-	/*
-	 * If the number of nodes in the mask is greater than or equal the
-	 * number of groups we just spread the groups across the nodes.
-	 */
-	if (numgrps <= nodes) {
-		for_each_node_mask(n, nodemsk) {
-			/* Ensure that only CPUs which are in both masks are set */
-			cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
-			cpumask_or(&masks[curgrp], &masks[curgrp], nmsk);
-			if (++curgrp == last_grp)
-				curgrp = 0;
-		}
-		return numgrps;
-	}
-
-	node_groups = kcalloc(nr_node_ids,
-			       sizeof(struct node_groups),
-			       GFP_KERNEL);
-	if (!node_groups)
-		return -ENOMEM;
-
-	/* allocate group number for each node */
-	alloc_nodes_groups(numgrps, node_to_cpumask, cpu_mask,
-			   nodemsk, nmsk, node_groups);
-	for (i = 0; i < nr_node_ids; i++) {
-		unsigned int ncpus, v;
-		struct node_groups *nv = &node_groups[i];
-
-		if (nv->ngroups == UINT_MAX)
-			continue;
-
-		/* Get the cpus on this node which are in the mask */
-		cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]);
-		ncpus = cpumask_weight(nmsk);
-		if (!ncpus)
-			continue;
-
-		WARN_ON_ONCE(nv->ngroups > ncpus);
-
-		/* Account for rounding errors */
-		extra_grps = ncpus - nv->ngroups * (ncpus / nv->ngroups);
-
-		/* Spread allocated groups on CPUs of the current node */
-		for (v = 0; v < nv->ngroups; v++, curgrp++) {
-			cpus_per_grp = ncpus / nv->ngroups;
-
-			/* Account for extra groups to compensate rounding errors */
-			if (extra_grps) {
-				cpus_per_grp++;
-				--extra_grps;
-			}
-
-			/*
-			 * wrapping has to be considered given 'startgrp'
-			 * may start anywhere
-			 */
-			if (curgrp >= last_grp)
-				curgrp = 0;
-			grp_spread_init_one(&masks[curgrp], nmsk,
-						cpus_per_grp);
-		}
-		done += nv->ngroups;
-	}
-	kfree(node_groups);
-	return done;
-}
-
-/*
- * build affinity in two stages for each group, and try to put close CPUs
- * in viewpoint of CPU and NUMA locality into same group, and we run
- * two-stage grouping:
- *
- *	1) allocate present CPUs on these groups evenly first
- *	2) allocate other possible CPUs on these groups evenly
- */
-static struct cpumask *group_cpus_evenly(unsigned int numgrps)
-{
-	unsigned int curgrp = 0, nr_present = 0, nr_others = 0;
-	cpumask_var_t *node_to_cpumask;
-	cpumask_var_t nmsk, npresmsk;
-	int ret = -ENOMEM;
-	struct cpumask *masks = NULL;
-
-	if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
-		return NULL;
-
-	if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
-		goto fail_nmsk;
-
-	node_to_cpumask = alloc_node_to_cpumask();
-	if (!node_to_cpumask)
-		goto fail_npresmsk;
-
-	masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
-	if (!masks)
-		goto fail_node_to_cpumask;
-
-	/* Stabilize the cpumasks */
-	cpus_read_lock();
-	build_node_to_cpumask(node_to_cpumask);
-
-	/* grouping present CPUs first */
-	ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
-				  cpu_present_mask, nmsk, masks);
-	if (ret < 0)
-		goto fail_build_affinity;
-	nr_present = ret;
-
-	/*
-	 * Allocate non present CPUs starting from the next group to be
-	 * handled. If the grouping of present CPUs already exhausted the
-	 * group space, assign the non present CPUs to the already
-	 * allocated out groups.
-	 */
-	if (nr_present >= numgrps)
-		curgrp = 0;
-	else
-		curgrp = nr_present;
-	cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
-	ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
-				  npresmsk, nmsk, masks);
-	if (ret >= 0)
-		nr_others = ret;
-
- fail_build_affinity:
-	cpus_read_unlock();
-
-	if (ret >= 0)
-		WARN_ON(nr_present + nr_others < numgrps);
-
- fail_node_to_cpumask:
-	free_node_to_cpumask(node_to_cpumask);
-
- fail_npresmsk:
-	free_cpumask_var(npresmsk);
-
- fail_nmsk:
-	free_cpumask_var(nmsk);
-	if (ret < 0) {
-		kfree(masks);
-		return NULL;
-	}
-	return masks;
-}
+#include <linux/group_cpus.h>
 
 static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
 {
diff --git a/lib/Makefile b/lib/Makefile
index 5ffe72ec99797..6f1611d053e6a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -361,6 +361,8 @@ obj-$(CONFIG_SBITMAP) += sbitmap.o
 
 obj-$(CONFIG_PARMAN) += parman.o
 
+obj-y += group_cpus.o
+
 # GCC library routines
 obj-$(CONFIG_GENERIC_LIB_ASHLDI3) += ashldi3.o
 obj-$(CONFIG_GENERIC_LIB_ASHRDI3) += ashrdi3.o
diff --git a/lib/group_cpus.c b/lib/group_cpus.c
new file mode 100644
index 0000000000000..99f08c6cb9d97
--- /dev/null
+++ b/lib/group_cpus.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Thomas Gleixner.
+ * Copyright (C) 2016-2017 Christoph Hellwig.
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/sort.h>
+#include <linux/group_cpus.h>
+
+static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
+				unsigned int cpus_per_grp)
+{
+	const struct cpumask *siblmsk;
+	int cpu, sibl;
+
+	for ( ; cpus_per_grp > 0; ) {
+		cpu = cpumask_first(nmsk);
+
+		/* Should not happen, but I'm too lazy to think about it */
+		if (cpu >= nr_cpu_ids)
+			return;
+
+		cpumask_clear_cpu(cpu, nmsk);
+		cpumask_set_cpu(cpu, irqmsk);
+		cpus_per_grp--;
+
+		/* If the cpu has siblings, use them first */
+		siblmsk = topology_sibling_cpumask(cpu);
+		for (sibl = -1; cpus_per_grp > 0; ) {
+			sibl = cpumask_next(sibl, siblmsk);
+			if (sibl >= nr_cpu_ids)
+				break;
+			if (!cpumask_test_and_clear_cpu(sibl, nmsk))
+				continue;
+			cpumask_set_cpu(sibl, irqmsk);
+			cpus_per_grp--;
+		}
+	}
+}
+
+static cpumask_var_t *alloc_node_to_cpumask(void)
+{
+	cpumask_var_t *masks;
+	int node;
+
+	masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL);
+	if (!masks)
+		return NULL;
+
+	for (node = 0; node < nr_node_ids; node++) {
+		if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL))
+			goto out_unwind;
+	}
+
+	return masks;
+
+out_unwind:
+	while (--node >= 0)
+		free_cpumask_var(masks[node]);
+	kfree(masks);
+	return NULL;
+}
+
+static void free_node_to_cpumask(cpumask_var_t *masks)
+{
+	int node;
+
+	for (node = 0; node < nr_node_ids; node++)
+		free_cpumask_var(masks[node]);
+	kfree(masks);
+}
+
+static void build_node_to_cpumask(cpumask_var_t *masks)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu)
+		cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
+}
+
+static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
+				const struct cpumask *mask, nodemask_t *nodemsk)
+{
+	int n, nodes = 0;
+
+	/* Calculate the number of nodes in the supplied affinity mask */
+	for_each_node(n) {
+		if (cpumask_intersects(mask, node_to_cpumask[n])) {
+			node_set(n, *nodemsk);
+			nodes++;
+		}
+	}
+	return nodes;
+}
+
+struct node_groups {
+	unsigned id;
+
+	union {
+		unsigned ngroups;
+		unsigned ncpus;
+	};
+};
+
+static int ncpus_cmp_func(const void *l, const void *r)
+{
+	const struct node_groups *ln = l;
+	const struct node_groups *rn = r;
+
+	return ln->ncpus - rn->ncpus;
+}
+
+/*
+ * Allocate group number for each node, so that for each node:
+ *
+ * 1) the allocated number is >= 1
+ *
+ * 2) the allocated number is <= active CPU number of this node
+ *
+ * The actual allocated total groups may be less than @numgrps when
+ * active total CPU number is less than @numgrps.
+ *
+ * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]'
+ * for each node.
+ */
+static void alloc_nodes_groups(unsigned int numgrps,
+			       cpumask_var_t *node_to_cpumask,
+			       const struct cpumask *cpu_mask,
+			       const nodemask_t nodemsk,
+			       struct cpumask *nmsk,
+			       struct node_groups *node_groups)
+{
+	unsigned n, remaining_ncpus = 0;
+
+	for (n = 0; n < nr_node_ids; n++) {
+		node_groups[n].id = n;
+		node_groups[n].ncpus = UINT_MAX;
+	}
+
+	for_each_node_mask(n, nodemsk) {
+		unsigned ncpus;
+
+		cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
+		ncpus = cpumask_weight(nmsk);
+
+		if (!ncpus)
+			continue;
+		remaining_ncpus += ncpus;
+		node_groups[n].ncpus = ncpus;
+	}
+
+	numgrps = min_t(unsigned, remaining_ncpus, numgrps);
+
+	sort(node_groups, nr_node_ids, sizeof(node_groups[0]),
+	     ncpus_cmp_func, NULL);
+
+	/*
+	 * Allocate groups for each node according to the ratio of this
+	 * node's nr_cpus to remaining un-assigned ncpus. 'numgrps' is
+	 * bigger than number of active numa nodes. Always start the
+	 * allocation from the node with minimized nr_cpus.
+	 *
+	 * This way guarantees that each active node gets allocated at
+	 * least one group, and the theory is simple: over-allocation
+	 * is only done when this node is assigned by one group, so
+	 * other nodes will be allocated >= 1 groups, since 'numgrps' is
+	 * bigger than number of numa nodes.
+	 *
+	 * One perfect invariant is that number of allocated groups for
+	 * each node is <= CPU count of this node:
+	 *
+	 * 1) suppose there are two nodes: A and B
+	 * 	ncpu(X) is CPU count of node X
+	 * 	grps(X) is the group count allocated to node X via this
+	 * 	algorithm
+	 *
+	 * 	ncpu(A) <= ncpu(B)
+	 * 	ncpu(A) + ncpu(B) = N
+	 * 	grps(A) + grps(B) = G
+	 *
+	 * 	grps(A) = max(1, round_down(G * ncpu(A) / N))
+	 * 	grps(B) = G - grps(A)
+	 *
+	 * 	both N and G are integer, and 2 <= G <= N, suppose
+	 * 	G = N - delta, and 0 <= delta <= N - 2
+	 *
+	 * 2) obviously grps(A) <= ncpu(A) because:
+	 *
+	 * 	if grps(A) is 1, then grps(A) <= ncpu(A) given
+	 * 	ncpu(A) >= 1
+	 *
+	 * 	otherwise,
+	 * 		grps(A) <= G * ncpu(A) / N <= ncpu(A), given G <= N
+	 *
+	 * 3) prove how grps(B) <= ncpu(B):
+	 *
+	 * 	if round_down(G * ncpu(A) / N) == 0, vecs(B) won't be
+	 * 	over-allocated, so grps(B) <= ncpu(B),
+	 *
+	 * 	otherwise:
+	 *
+	 * 	grps(A) =
+	 * 		round_down(G * ncpu(A) / N) =
+	 * 		round_down((N - delta) * ncpu(A) / N) =
+	 * 		round_down((N * ncpu(A) - delta * ncpu(A)) / N)	 >=
+	 * 		round_down((N * ncpu(A) - delta * N) / N)	 =
+	 * 		cpu(A) - delta
+	 *
+	 * 	then:
+	 *
+	 * 	grps(A) - G >= ncpu(A) - delta - G
+	 * 	=>
+	 * 	G - grps(A) <= G + delta - ncpu(A)
+	 * 	=>
+	 * 	grps(B) <= N - ncpu(A)
+	 * 	=>
+	 * 	grps(B) <= cpu(B)
+	 *
+	 * For nodes >= 3, it can be thought as one node and another big
+	 * node given that is exactly what this algorithm is implemented,
+	 * and we always re-calculate 'remaining_ncpus' & 'numgrps', and
+	 * finally for each node X: grps(X) <= ncpu(X).
+	 *
+	 */
+	for (n = 0; n < nr_node_ids; n++) {
+		unsigned ngroups, ncpus;
+
+		if (node_groups[n].ncpus == UINT_MAX)
+			continue;
+
+		WARN_ON_ONCE(numgrps == 0);
+
+		ncpus = node_groups[n].ncpus;
+		ngroups = max_t(unsigned, 1,
+				 numgrps * ncpus / remaining_ncpus);
+		WARN_ON_ONCE(ngroups > ncpus);
+
+		node_groups[n].ngroups = ngroups;
+
+		remaining_ncpus -= ncpus;
+		numgrps -= ngroups;
+	}
+}
+
+static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
+			       cpumask_var_t *node_to_cpumask,
+			       const struct cpumask *cpu_mask,
+			       struct cpumask *nmsk, struct cpumask *masks)
+{
+	unsigned int i, n, nodes, cpus_per_grp, extra_grps, done = 0;
+	unsigned int last_grp = numgrps;
+	unsigned int curgrp = startgrp;
+	nodemask_t nodemsk = NODE_MASK_NONE;
+	struct node_groups *node_groups;
+
+	if (cpumask_empty(cpu_mask))
+		return 0;
+
+	nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk);
+
+	/*
+	 * If the number of nodes in the mask is greater than or equal the
+	 * number of groups we just spread the groups across the nodes.
+	 */
+	if (numgrps <= nodes) {
+		for_each_node_mask(n, nodemsk) {
+			/* Ensure that only CPUs which are in both masks are set */
+			cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
+			cpumask_or(&masks[curgrp], &masks[curgrp], nmsk);
+			if (++curgrp == last_grp)
+				curgrp = 0;
+		}
+		return numgrps;
+	}
+
+	node_groups = kcalloc(nr_node_ids,
+			       sizeof(struct node_groups),
+			       GFP_KERNEL);
+	if (!node_groups)
+		return -ENOMEM;
+
+	/* allocate group number for each node */
+	alloc_nodes_groups(numgrps, node_to_cpumask, cpu_mask,
+			   nodemsk, nmsk, node_groups);
+	for (i = 0; i < nr_node_ids; i++) {
+		unsigned int ncpus, v;
+		struct node_groups *nv = &node_groups[i];
+
+		if (nv->ngroups == UINT_MAX)
+			continue;
+
+		/* Get the cpus on this node which are in the mask */
+		cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]);
+		ncpus = cpumask_weight(nmsk);
+		if (!ncpus)
+			continue;
+
+		WARN_ON_ONCE(nv->ngroups > ncpus);
+
+		/* Account for rounding errors */
+		extra_grps = ncpus - nv->ngroups * (ncpus / nv->ngroups);
+
+		/* Spread allocated groups on CPUs of the current node */
+		for (v = 0; v < nv->ngroups; v++, curgrp++) {
+			cpus_per_grp = ncpus / nv->ngroups;
+
+			/* Account for extra groups to compensate rounding errors */
+			if (extra_grps) {
+				cpus_per_grp++;
+				--extra_grps;
+			}
+
+			/*
+			 * wrapping has to be considered given 'startgrp'
+			 * may start anywhere
+			 */
+			if (curgrp >= last_grp)
+				curgrp = 0;
+			grp_spread_init_one(&masks[curgrp], nmsk,
+						cpus_per_grp);
+		}
+		done += nv->ngroups;
+	}
+	kfree(node_groups);
+	return done;
+}
+
+#ifdef CONFIG_SMP
+/**
+ * group_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality
+ * @numgrps: number of groups
+ *
+ * Return: cpumask array if successful, NULL otherwise. And each element
+ * includes CPUs assigned to this group
+ *
+ * Try to put close CPUs from viewpoint of CPU and NUMA locality into
+ * same group, and run two-stage grouping:
+ *	1) allocate present CPUs on these groups evenly first
+ *	2) allocate other possible CPUs on these groups evenly
+ *
+ * We guarantee in the resulted grouping that all CPUs are covered, and
+ * no same CPU is assigned to multiple groups
+ */
+struct cpumask *group_cpus_evenly(unsigned int numgrps)
+{
+	unsigned int curgrp = 0, nr_present = 0, nr_others = 0;
+	cpumask_var_t *node_to_cpumask;
+	cpumask_var_t nmsk, npresmsk;
+	int ret = -ENOMEM;
+	struct cpumask *masks = NULL;
+
+	if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
+		return NULL;
+
+	if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
+		goto fail_nmsk;
+
+	node_to_cpumask = alloc_node_to_cpumask();
+	if (!node_to_cpumask)
+		goto fail_npresmsk;
+
+	masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+	if (!masks)
+		goto fail_node_to_cpumask;
+
+	/* Stabilize the cpumasks */
+	cpus_read_lock();
+	build_node_to_cpumask(node_to_cpumask);
+
+	/* grouping present CPUs first */
+	ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
+				  cpu_present_mask, nmsk, masks);
+	if (ret < 0)
+		goto fail_build_affinity;
+	nr_present = ret;
+
+	/*
+	 * Allocate non present CPUs starting from the next group to be
+	 * handled. If the grouping of present CPUs already exhausted the
+	 * group space, assign the non present CPUs to the already
+	 * allocated out groups.
+	 */
+	if (nr_present >= numgrps)
+		curgrp = 0;
+	else
+		curgrp = nr_present;
+	cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
+	ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
+				  npresmsk, nmsk, masks);
+	if (ret >= 0)
+		nr_others = ret;
+
+ fail_build_affinity:
+	cpus_read_unlock();
+
+	if (ret >= 0)
+		WARN_ON(nr_present + nr_others < numgrps);
+
+ fail_node_to_cpumask:
+	free_node_to_cpumask(node_to_cpumask);
+
+ fail_npresmsk:
+	free_cpumask_var(npresmsk);
+
+ fail_nmsk:
+	free_cpumask_var(nmsk);
+	if (ret < 0) {
+		kfree(masks);
+		return NULL;
+	}
+	return masks;
+}
+#else
+struct cpumask *group_cpus_evenly(unsigned int numgrps)
+{
+	struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+
+	if (!masks)
+		return NULL;
+
+	/* assign all CPUs(cpu 0) to the 1st group only */
+	cpumask_copy(&masks[0], cpu_possible_mask);
+	return masks;
+}
+#endif
-- 
2.43.0




  parent reply	other threads:[~2024-01-08 15:42 UTC|newest]

Thread overview: 167+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-01-08 15:34 [PATCH 6.1 000/150] 6.1.72-rc1 review Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 001/150] keys, dns: Fix missing size check of V1 server-list header Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 002/150] block: Dont invalidate pagecache for invalid falloc modes Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 003/150] ALSA: hda/realtek: enable SND_PCI_QUIRK for hp pavilion 14-ec1xxx series Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 004/150] ALSA: hda/realtek: fix mute/micmute LEDs for a HP ZBook Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 005/150] ALSA: hda/realtek: Fix mute and mic-mute LEDs for HP ProBook 440 G6 Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 006/150] mptcp: prevent tcp diag from closing listener subflows Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 007/150] Revert "PCI/ASPM: Remove pcie_aspm_pm_state_change()" Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 008/150] drm/mgag200: Fix gamma lut not initialized for G200ER, G200EV, G200SE Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 009/150] cifs: cifs_chan_is_iface_active should be called with chan_lock held Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 010/150] cifs: do not depend on release_iface for maintaining iface_list Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 011/150] KVM: x86/pmu: fix masking logic for MSR_CORE_PERF_GLOBAL_CTRL Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 012/150] wifi: iwlwifi: pcie: dont synchronize IRQs from IRQ Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 013/150] drm/bridge: ti-sn65dsi86: Never store more than msg->size bytes in AUX xfer Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 014/150] netfilter: use skb_ip_totlen and iph_totlen Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 015/150] netfilter: nf_tables: set transport offset from mac header for netdev/egress Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 016/150] nfc: llcp_core: Hold a ref to llcp_local->dev when holding a ref to llcp_local Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 017/150] octeontx2-af: Fix marking couple of structure as __packed Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 018/150] drm/i915/dp: Fix passing the correct DPCD_REV for drm_dp_set_phy_test_pattern Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 019/150] ice: Fix link_down_on_close message Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 020/150] ice: Shut down VSI with "link-down-on-close" enabled Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 021/150] i40e: Fix filter input checks to prevent config with invalid values Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 022/150] igc: Report VLAN EtherType matching back to user Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 023/150] igc: Check VLAN TCI mask Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 024/150] igc: Check VLAN EtherType mask Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 025/150] ASoC: fsl_rpmsg: Fix error handler with pm_runtime_enable Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 026/150] ASoC: mediatek: mt8186: fix AUD_PAD_TOP register and offset Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 027/150] mlxbf_gige: fix receive packet race condition Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 028/150] net: sched: em_text: fix possible memory leak in em_text_destroy() Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 029/150] r8169: Fix PCI error on system resume Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 030/150] can: raw: add support for SO_MARK Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 031/150] net-timestamp: extend SOF_TIMESTAMPING_OPT_ID to HW timestamps Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 032/150] net: annotate data-races around sk->sk_tsflags Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 033/150] net: annotate data-races around sk->sk_bind_phc Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 034/150] net: Implement missing getsockopt(SO_TIMESTAMPING_NEW) Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 035/150] selftests: bonding: do not set port down when adding to bond Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 036/150] ARM: sun9i: smp: Fix array-index-out-of-bounds read in sunxi_mc_smp_init Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 037/150] sfc: fix a double-free bug in efx_probe_filters Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 038/150] net: bcmgenet: Fix FCS generation for fragmented skbuffs Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 039/150] netfilter: nft_immediate: drop chain reference counter on error Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 040/150] net: Save and restore msg_namelen in sock_sendmsg Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 041/150] i40e: fix use-after-free in i40e_aqc_add_filters() Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 042/150] ASoC: meson: g12a-toacodec: Validate written enum values Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 043/150] ASoC: meson: g12a-tohdmitx: " Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 044/150] ASoC: meson: g12a-toacodec: Fix event generation Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 045/150] ASoC: meson: g12a-tohdmitx: Fix event generation for S/PDIF mux Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 046/150] i40e: Restore VF MSI-X state during PCI reset Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 047/150] igc: Fix hicredit calculation Greg Kroah-Hartman
2024-01-08 15:34 ` [PATCH 6.1 048/150] net/qla3xxx: fix potential memleak in ql_alloc_buffer_queues Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 049/150] net/smc: fix invalid link access in dumping SMC-R connections Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 050/150] octeontx2-af: Always configure NIX TX link credits based on max frame size Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 051/150] octeontx2-af: Re-enable MAC TX in otx2_stop processing Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 052/150] asix: Add check for usbnet_get_endpoints Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 053/150] net: ravb: Wait for operating mode to be applied Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 054/150] bnxt_en: Remove mis-applied code from bnxt_cfg_ntp_filters() Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 055/150] net: Implement missing SO_TIMESTAMPING_NEW cmsg support Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 056/150] selftests: secretmem: floor the memory size to the multiple of page_size Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 057/150] cpu/SMT: Create topology_smt_thread_allowed() Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 058/150] cpu/SMT: Make SMT control more robust against enumeration failures Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 059/150] srcu: Fix callbacks acceleration mishandling Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 060/150] bpf, x64: Fix tailcall infinite loop Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 061/150] bpf, x86: Simplify the parsing logic of structure parameters Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 062/150] bpf, x86: save/restore regs with BPF_DW size Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 063/150] net: Declare MSG_SPLICE_PAGES internal sendmsg() flag Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 064/150] udp: Convert udp_sendpage() to use MSG_SPLICE_PAGES Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 065/150] splice, net: Add a splice_eof op to file-ops and socket-ops Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 066/150] ipv4, ipv6: Use splice_eof() to flush Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 067/150] udp: introduce udp->udp_flags Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 068/150] udp: move udp->no_check6_tx to udp->udp_flags Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 069/150] udp: move udp->no_check6_rx " Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 070/150] udp: move udp->gro_enabled " Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 071/150] udp: move udp->accept_udp_{l4|fraglist} " Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 072/150] udp: lockless UDP_ENCAP_L2TPINUDP / UDP_GRO Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 073/150] udp: annotate data-races around udp->encap_type Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 074/150] wifi: iwlwifi: yoyo: swap cdb and jacket bits values Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 075/150] arm64: dts: qcom: sdm845: align RPMh regulator nodes with bindings Greg Kroah-Hartman
2024-01-08 15:42   ` Konrad Dybcio
2024-01-08 15:49     ` Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 076/150] arm64: dts: qcom: sdm845: Fix PSCI power domain names Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 077/150] fbdev: imsttfb: Release framebuffer and dealloc cmap on error path Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 078/150] fbdev: imsttfb: fix double free in probe() Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 079/150] bpf: decouple prune and jump points Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 080/150] bpf: remove unnecessary " Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 081/150] bpf: Remove unused insn_cnt argument from visit_[func_call_]insn() Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 082/150] bpf: clean up visit_insn()s instruction processing Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 083/150] bpf: Support new 32bit offset jmp instruction Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 084/150] bpf: handle ldimm64 properly in check_cfg() Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 085/150] bpf: fix precision backtracking instruction iteration Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 086/150] blk-mq: make sure active queue usage is held for bio_integrity_prep() Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 087/150] net/mlx5: Increase size of irq name buffer Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 088/150] s390/mm: add missing arch_set_page_dat() call to vmem_crst_alloc() Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 089/150] s390/cpumf: support user space events for counting Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 090/150] f2fs: clean up i_compress_flag and i_compress_level usage Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 091/150] f2fs: convert to use bitmap API Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 092/150] f2fs: assign default compression level Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 093/150] f2fs: set the default compress_level on ioctl Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 094/150] selftests: mptcp: fix fastclose with csum failure Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 095/150] selftests: mptcp: set FAILING_LINKS in run_tests Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 096/150] media: camss: sm8250: Virtual channels for CSID Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 097/150] media: qcom: camss: Fix set CSI2_RX_CFG1_VC_MODE when VC is greater than 3 Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 098/150] ext4: convert move_extent_per_page() to use folios Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 099/150] khugepage: replace try_to_release_page() with filemap_release_folio() Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 100/150] memory-failure: convert truncate_error_page() to use folio Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 101/150] mm: merge folio_has_private()/filemap_release_folio() call pairs Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 102/150] mm, netfs, fscache: stop read optimisation when folio removed from pagecache Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 103/150] filemap: add a per-mapping stable writes flag Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 104/150] block: update the stable_writes flag in bdev_add Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 105/150] smb: client: fix missing mode bits for SMB symlinks Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 106/150] net: dpaa2-eth: rearrange variable in dpaa2_eth_get_ethtool_stats Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 107/150] dpaa2-eth: recycle the RX buffer only after all processing done Greg Kroah-Hartman
2024-01-08 15:35 ` [PATCH 6.1 108/150] ethtool: dont propagate EOPNOTSUPP from dumps Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 109/150] bpf, sockmap: af_unix stream sockets need to hold ref for pair sock Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 110/150] firmware: arm_scmi: Fix frequency truncation by promoting multiplier type Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 111/150] ALSA: hda/realtek: Add quirk for Lenovo Yoga Pro 7 Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 112/150] genirq/affinity: Remove the firstvec parameter from irq_build_affinity_masks Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 113/150] genirq/affinity: Pass affinity managed mask array to irq_build_affinity_masks Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 114/150] genirq/affinity: Dont pass irq_affinity_desc " Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 115/150] genirq/affinity: Rename irq_build_affinity_masks as group_cpus_evenly Greg Kroah-Hartman
2024-01-08 15:36 ` Greg Kroah-Hartman [this message]
2024-01-08 15:36 ` [PATCH 6.1 117/150] lib/group_cpus.c: avoid acquiring cpu hotplug lock in group_cpus_evenly Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 118/150] mm/memory_hotplug: add missing mem_hotplug_lock Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 119/150] mm/memory_hotplug: fix error handling in add_memory_resource() Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 120/150] net: sched: call tcf_ct_params_free to free params in tcf_ct_init Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 121/150] netfilter: flowtable: allow unidirectional rules Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 122/150] netfilter: flowtable: cache info of last offload Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 123/150] net/sched: act_ct: offload UDP NEW connections Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 124/150] net/sched: act_ct: Fix promotion of offloaded unreplied tuple Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 125/150] netfilter: flowtable: GC pushes back packets to classic path Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 126/150] net/sched: act_ct: Take per-cb reference to tcf_ct_flow_table Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 127/150] octeontx2-af: Fix pause frame configuration Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 128/150] octeontx2-af: Support variable number of lmacs Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 129/150] btrfs: fix qgroup_free_reserved_data int overflow Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 130/150] btrfs: mark the len field in struct btrfs_ordered_sum as unsigned Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 131/150] ring-buffer: Fix 32-bit rb_time_read() race with rb_time_cmpxchg() Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 132/150] firewire: ohci: suppress unexpected system reboot in AMD Ryzen machines and ASM108x/VT630x PCIe cards Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 133/150] x86/kprobes: fix incorrect return address calculation in kprobe_emulate_call_indirect Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 134/150] i2c: core: Fix atomic xfer check for non-preempt config Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 135/150] mm: fix unmap_mapping_range high bits shift bug Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 136/150] drm/amdgpu: skip gpu_info fw loading on navi12 Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 137/150] drm/amd/display: add nv12 bounding box Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 138/150] mmc: meson-mx-sdhc: Fix initialization frozen issue Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 139/150] mmc: rpmb: fixes pause retune on all RPMB partitions Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 140/150] mmc: core: Cancel delayed work before releasing host Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 141/150] mmc: sdhci-sprd: Fix eMMC init failure after hw reset Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 142/150] genirq/affinity: Only build SMP-only helper functions on SMP kernels Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 143/150] f2fs: compress: fix to assign compress_level for lz4 correctly Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 144/150] net/sched: act_ct: additional checks for outdated flows Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 145/150] net/sched: act_ct: Always fill offloading tuple iifidx Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 146/150] bpf: Fix a verifier bug due to incorrect branch offset comparison with cpu=v4 Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 147/150] bpf: syzkaller found null ptr deref in unix_bpf proto add Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 148/150] media: qcom: camss: Comment CSID dt_id field Greg Kroah-Hartman
2024-01-08 15:36 ` [PATCH 6.1 149/150] smb3: Replace smb2pdu 1-element arrays with flex-arrays Greg Kroah-Hartman
2024-01-09  1:54   ` Namjae Jeon
2024-01-08 15:36 ` [PATCH 6.1 150/150] Revert "interconnect: qcom: sm8250: Enable sync_state" Greg Kroah-Hartman
2024-01-08 17:57 ` [PATCH 6.1 000/150] 6.1.72-rc1 review SeongJae Park
2024-01-08 18:35 ` Florian Fainelli
2024-01-08 18:48 ` Allen
2024-01-08 20:56 ` Shuah Khan
2024-01-09  9:37 ` Naresh Kamboju
2024-01-09 13:15 ` Conor Dooley
2024-01-09 13:45 ` Jon Hunter
2024-01-09 16:18 ` Sven Joachim
2024-01-09 18:05 ` Ron Economos
2024-01-09 18:58 ` Kelsey Steele
2024-01-09 19:53 ` Pavel Machek
2024-01-10  9:58 ` Yann Sionneau
2024-01-10 10:58 ` Shreeya Patel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240108153516.532546137@linuxfoundation.org \
    --to=gregkh@linuxfoundation.org \
    --cc=axboe@kernel.dk \
    --cc=hch@lst.de \
    --cc=ming.lei@redhat.com \
    --cc=patches@lists.linux.dev \
    --cc=sashal@kernel.org \
    --cc=stable@vger.kernel.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox