* [PATCH v14 2/3] sched: Move task_mm_cid_work to mm timer
[not found] <20250707144824.117014-1-gmonaco@redhat.com>
@ 2025-07-07 14:48 ` Gabriele Monaco
2025-07-07 15:19 ` Mathieu Desnoyers
2025-07-10 4:56 ` kernel test robot
0 siblings, 2 replies; 7+ messages in thread
From: Gabriele Monaco @ 2025-07-07 14:48 UTC (permalink / raw)
To: linux-kernel, Andrew Morton, David Hildenbrand, Ingo Molnar,
Peter Zijlstra, Mathieu Desnoyers, Paul E. McKenney, linux-mm
Cc: Gabriele Monaco, Ingo Molnar
Currently, the task_mm_cid_work function is called in a task work
triggered by a scheduler tick to frequently compact the mm_cids of each
process. This can delay the execution of the corresponding thread for
the entire duration of the function, negatively affecting the response
in case of real time tasks. In practice, we observe task_mm_cid_work
increasing the latency of 30-35us on a 128 cores system, this order of
magnitude is meaningful under PREEMPT_RT.
Run the task_mm_cid_work in a new timer connected to the mm_struct
rather than in the task context before returning to userspace.
This timer is initialised with the mm and disabled before freeing it.
The timer is armed while returning to userspace in
__rseq_handle_notify_resume, with an expiration of MM_CID_SCAN_DELAY.
To make sure this happens predictably also on long running tasks,
trigger a call to __rseq_handle_notify_resume also from the scheduler
tick if the runtime exceeded a 100ms threshold.
The main advantage of this change is that the function can be offloaded
to a different CPU and even preempted by RT tasks.
Moreover, this new behaviour is more predictable with periodic tasks
with short runtime, which may rarely run during a scheduler tick.
Now, the timer is always scheduled when the task returns to userspace.
The timer is disabled during mmdrop, since the function cannot sleep in
all kernel configurations, we cannot wait for a possibly running timer
to terminate. Make sure the mm is valid in case the task is terminating
by reserving it with mmgrab/mmdrop, returning prematurely if the timer
handler is really the last user while it gets to run.
This situation is unlikely since the timer is not armed for exiting
tasks, but it cannot be ruled out.
Fixes: 223baf9d17f2 ("sched: Fix performance regression introduced by mm_cid")
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
---
include/linux/mm_types.h | 23 +++++++--
include/linux/sched.h | 8 ++-
kernel/rseq.c | 2 +
kernel/sched/core.c | 103 +++++++++++++++++++++------------------
kernel/sched/sched.h | 8 +--
5 files changed, 88 insertions(+), 56 deletions(-)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d6b91e8a66d6d..9c159cf70a16c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -990,11 +990,11 @@ struct mm_struct {
*/
struct mm_cid __percpu *pcpu_cid;
/*
- * @mm_cid_next_scan: Next mm_cid scan (in jiffies).
+ * @mm_cid_next_scan: Last mm_cid scan (in jiffies).
*
- * When the next mm_cid scan is due (in jiffies).
+ * When the last mm_cid scan occurred (in jiffies).
*/
- unsigned long mm_cid_next_scan;
+ unsigned long mm_cid_last_scan;
/**
* @nr_cpus_allowed: Number of CPUs allowed for mm.
*
@@ -1017,6 +1017,10 @@ struct mm_struct {
* mm nr_cpus_allowed updates.
*/
raw_spinlock_t cpus_allowed_lock;
+ /*
+ * @cid_timer: Timer to run the mm_cid scan.
+ */
+ struct timer_list cid_timer;
#endif
#ifdef CONFIG_MMU
atomic_long_t pgtables_bytes; /* size of all page tables */
@@ -1321,6 +1325,8 @@ enum mm_cid_state {
MM_CID_LAZY_PUT = (1U << 31),
};
+extern void task_mm_cid_scan(struct timer_list *timer);
+
static inline bool mm_cid_is_unset(int cid)
{
return cid == MM_CID_UNSET;
@@ -1393,12 +1399,14 @@ static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *
if (!mm->pcpu_cid)
return -ENOMEM;
mm_init_cid(mm, p);
+ timer_setup(&mm->cid_timer, task_mm_cid_scan, TIMER_DEFERRABLE);
return 0;
}
#define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__))
static inline void mm_destroy_cid(struct mm_struct *mm)
{
+ timer_shutdown(&mm->cid_timer);
free_percpu(mm->pcpu_cid);
mm->pcpu_cid = NULL;
}
@@ -1420,6 +1428,11 @@ static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumas
WRITE_ONCE(mm->nr_cpus_allowed, cpumask_weight(mm_allowed));
raw_spin_unlock(&mm->cpus_allowed_lock);
}
+
+static inline bool mm_cid_scan_pending(struct mm_struct *mm)
+{
+ return mm && timer_pending(&mm->cid_timer);
+}
#else /* CONFIG_SCHED_MM_CID */
static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { }
static inline int mm_alloc_cid(struct mm_struct *mm, struct task_struct *p) { return 0; }
@@ -1430,6 +1443,10 @@ static inline unsigned int mm_cid_size(void)
return 0;
}
static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumask *cpumask) { }
+static inline bool mm_cid_scan_pending(struct mm_struct *mm)
+{
+ return false;
+}
#endif /* CONFIG_SCHED_MM_CID */
struct mmu_gather;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4f78a64beb52c..e90bc52dece3e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1432,7 +1432,7 @@ struct task_struct {
int last_mm_cid; /* Most recent cid in mm */
int migrate_from_cpu;
int mm_cid_active; /* Whether cid bitmap is active */
- struct callback_head cid_work;
+ unsigned long last_cid_reset; /* Time of last reset in jiffies */
#endif
struct tlbflush_unmap_batch tlb_ubc;
@@ -2277,4 +2277,10 @@ static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct allo
#define alloc_tag_restore(_tag, _old) do {} while (0)
#endif
+#ifdef CONFIG_SCHED_MM_CID
+extern void task_queue_mm_cid(struct task_struct *curr);
+#else
+static inline void task_queue_mm_cid(struct task_struct *curr) { }
+#endif
+
#endif
diff --git a/kernel/rseq.c b/kernel/rseq.c
index b7a1ec327e811..9ce0f79e35bfb 100644
--- a/kernel/rseq.c
+++ b/kernel/rseq.c
@@ -441,6 +441,8 @@ void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
}
if (unlikely(rseq_update_cpu_node_id(t)))
goto error;
+ if (!mm_cid_scan_pending(t->mm))
+ task_queue_mm_cid(t);
return;
error:
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ec68fc686bd74..ed316f0a31d9d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4542,7 +4542,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->wake_entry.u_flags = CSD_TYPE_TTWU;
p->migration_pending = NULL;
#endif
- init_sched_mm_cid(p);
}
DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
@@ -10594,37 +10593,15 @@ static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
}
-static void task_mm_cid_work(struct callback_head *work)
+void task_mm_cid_scan(struct timer_list *timer)
{
- unsigned long now = jiffies, old_scan, next_scan;
- struct task_struct *t = current;
struct cpumask *cidmask;
- struct mm_struct *mm;
+ struct mm_struct *mm = container_of(timer, struct mm_struct, cid_timer);
int weight, cpu;
- WARN_ON_ONCE(t != container_of(work, struct task_struct, cid_work));
-
- work->next = work; /* Prevent double-add */
- if (t->flags & PF_EXITING)
- return;
- mm = t->mm;
- if (!mm)
- return;
- old_scan = READ_ONCE(mm->mm_cid_next_scan);
- next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
- if (!old_scan) {
- unsigned long res;
-
- res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
- if (res != old_scan)
- old_scan = res;
- else
- old_scan = next_scan;
- }
- if (time_before(now, old_scan))
- return;
- if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
- return;
+ /* We are the last user, process already terminated. */
+ if (atomic_read(&mm->mm_count) == 1)
+ goto out_drop;
cidmask = mm_cidmask(mm);
/* Clear cids that were not recently used. */
for_each_possible_cpu(cpu)
@@ -10636,35 +10613,65 @@ static void task_mm_cid_work(struct callback_head *work)
*/
for_each_possible_cpu(cpu)
sched_mm_cid_remote_clear_weight(mm, cpu, weight);
+ WRITE_ONCE(mm->mm_cid_last_scan, jiffies);
+out_drop:
+ mmdrop(mm);
}
-void init_sched_mm_cid(struct task_struct *t)
+void task_tick_mm_cid(struct rq *rq, struct task_struct *t)
{
- struct mm_struct *mm = t->mm;
- int mm_users = 0;
+ u64 rtime = t->se.sum_exec_runtime - t->se.prev_sum_exec_runtime;
- if (mm) {
- mm_users = atomic_read(&mm->mm_users);
- if (mm_users == 1)
- mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
+ /*
+ * If a task is running unpreempted for a long time, it won't get its
+ * mm_cid compacted and won't update its mm_cid value after a
+ * compaction occurs.
+ * For such a task, this function does two things:
+ * A) trigger the mm_cid recompaction,
+ * B) trigger an update of the task's rseq->mm_cid field at some point
+ * after recompaction, so it can get a mm_cid value closer to 0.
+ * A change in the mm_cid triggers an rseq_preempt.
+ *
+ * B occurs once after the compaction work completes, both A and B
+ * don't run as long as the compaction work is pending.
+ */
+ if (!t->mm || (t->flags & (PF_EXITING | PF_KTHREAD)) ||
+ mm_cid_scan_pending(t->mm))
+ return;
+ if (rtime < RSEQ_UNPREEMPTED_THRESHOLD)
+ return;
+ if (time_after(t->mm->mm_cid_last_scan, t->last_cid_reset)) {
+ /* Update mm_cid field */
+ int old_cid = t->mm_cid;
+
+ if (!t->mm_cid_active)
+ return;
+ mm_cid_snapshot_time(rq, t->mm);
+ mm_cid_put_lazy(t);
+ t->last_mm_cid = t->mm_cid = mm_cid_get(rq, t, t->mm);
+ if (old_cid != t->mm_cid)
+ rseq_preempt(t);
+ } else {
+ /* Trigger mm_cid recompaction */
+ rseq_set_notify_resume(t);
}
- t->cid_work.next = &t->cid_work; /* Protect against double add */
- init_task_work(&t->cid_work, task_mm_cid_work);
}
-void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
+void task_queue_mm_cid(struct task_struct *curr)
{
- struct callback_head *work = &curr->cid_work;
- unsigned long now = jiffies;
+ int requeued;
- if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
- work->next != work)
- return;
- if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
- return;
-
- /* No page allocation under rq lock */
- task_work_add(curr, work, TWA_RESUME);
+ /*
+ * @curr must be a user thread and the timer must not be pending.
+ * Access to this timer is not serialised across threads sharing the
+ * same mm: ensure racing threads don't postpone enqueued timers and
+ * don't mmgrab() if they didn't enqueue the timer themselves.
+ * mmgrab() is necessary to ensure the mm exists until the timer runs.
+ */
+ requeued = timer_reduce(&curr->mm->cid_timer,
+ jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY));
+ if (!requeued && timer_pending(&curr->mm->cid_timer))
+ mmgrab(curr->mm);
}
void sched_mm_cid_exit_signals(struct task_struct *t)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 475bb5998295e..3e72323fbde06 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3606,14 +3606,14 @@ extern const char *preempt_modes[];
#define SCHED_MM_CID_PERIOD_NS (100ULL * 1000000) /* 100ms */
#define MM_CID_SCAN_DELAY 100 /* 100ms */
+#define RSEQ_UNPREEMPTED_THRESHOLD SCHED_MM_CID_PERIOD_NS
extern raw_spinlock_t cid_lock;
extern int use_cid_lock;
extern void sched_mm_cid_migrate_from(struct task_struct *t);
extern void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t);
-extern void task_tick_mm_cid(struct rq *rq, struct task_struct *curr);
-extern void init_sched_mm_cid(struct task_struct *t);
+extern void task_tick_mm_cid(struct rq *rq, struct task_struct *t);
static inline void __mm_cid_put(struct mm_struct *mm, int cid)
{
@@ -3809,6 +3809,7 @@ static inline int mm_cid_get(struct rq *rq, struct task_struct *t,
int cid;
lockdep_assert_rq_held(rq);
+ t->last_cid_reset = jiffies;
cpumask = mm_cidmask(mm);
cid = __this_cpu_read(pcpu_cid->cid);
if (mm_cid_is_valid(cid)) {
@@ -3881,8 +3882,7 @@ static inline void switch_mm_cid(struct rq *rq,
static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { }
static inline void sched_mm_cid_migrate_from(struct task_struct *t) { }
static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) { }
-static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { }
-static inline void init_sched_mm_cid(struct task_struct *t) { }
+static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *t) { }
#endif /* !CONFIG_SCHED_MM_CID */
extern u64 avg_vruntime(struct cfs_rq *cfs_rq);
--
2.50.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH v14 2/3] sched: Move task_mm_cid_work to mm timer
2025-07-07 14:48 ` [PATCH v14 2/3] sched: Move task_mm_cid_work to mm timer Gabriele Monaco
@ 2025-07-07 15:19 ` Mathieu Desnoyers
2025-07-10 4:56 ` kernel test robot
1 sibling, 0 replies; 7+ messages in thread
From: Mathieu Desnoyers @ 2025-07-07 15:19 UTC (permalink / raw)
To: Gabriele Monaco, linux-kernel, Andrew Morton, David Hildenbrand,
Ingo Molnar, Peter Zijlstra, Paul E. McKenney, linux-mm
Cc: Ingo Molnar
On 2025-07-07 10:48, Gabriele Monaco wrote:
[...]
>
> -void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
> +void task_queue_mm_cid(struct task_struct *curr)
> {
> - struct callback_head *work = &curr->cid_work;
> - unsigned long now = jiffies;
> + int requeued;
>
> - if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
> - work->next != work)
> - return;
> - if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
> - return;
> -
> - /* No page allocation under rq lock */
> - task_work_add(curr, work, TWA_RESUME);
> + /*
> + * @curr must be a user thread and the timer must not be pending.
> + * Access to this timer is not serialised across threads sharing the
> + * same mm: ensure racing threads don't postpone enqueued timers and
> + * don't mmgrab() if they didn't enqueue the timer themselves.
> + * mmgrab() is necessary to ensure the mm exists until the timer runs.
> + */
> + requeued = timer_reduce(&curr->mm->cid_timer,
> + jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY));
> + if (!requeued && timer_pending(&curr->mm->cid_timer))
> + mmgrab(curr->mm);
> }
>
In v13 we had:
- task_work_add(curr, work, TWA_RESUME);
+/* Call only when curr is a user thread. */
+void task_queue_mm_cid(struct task_struct *curr)
+{
+ /* Ensure the mm exists when we run. */
+ mmgrab(curr->mm);
+ queue_work(system_unbound_wq, &curr->mm->cid_work);
}
The new pattern is to do mmgrab *after* timer_reduce has enqueued
the timer. This seems to be racy with timer execution. What prevents
the timer to run before mmgrab() is done ?
Thanks,
Mathieu
--
Mathieu Desnoyers
EfficiOS Inc.
https://www.efficios.com
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH v14 2/3] sched: Move task_mm_cid_work to mm timer
2025-07-07 14:48 ` [PATCH v14 2/3] sched: Move task_mm_cid_work to mm timer Gabriele Monaco
2025-07-07 15:19 ` Mathieu Desnoyers
@ 2025-07-10 4:56 ` kernel test robot
2025-07-10 13:23 ` Mathieu Desnoyers
2025-07-10 13:47 ` Gabriele Monaco
1 sibling, 2 replies; 7+ messages in thread
From: kernel test robot @ 2025-07-10 4:56 UTC (permalink / raw)
To: Gabriele Monaco
Cc: oe-lkp, lkp, linux-mm, linux-kernel, aubrey.li, yu.c.chen,
Andrew Morton, David Hildenbrand, Ingo Molnar, Peter Zijlstra,
Mathieu Desnoyers, Paul E. McKenney, Gabriele Monaco, Ingo Molnar,
oliver.sang
Hello,
kernel test robot noticed "WARNING:inconsistent_lock_state" on:
commit: d06e66c6025e44136e6715d24c23fb821a415577 ("[PATCH v14 2/3] sched: Move task_mm_cid_work to mm timer")
url: https://github.com/intel-lab-lkp/linux/commits/Gabriele-Monaco/sched-Add-prev_sum_exec_runtime-support-for-RT-DL-and-SCX-classes/20250707-224959
patch link: https://lore.kernel.org/all/20250707144824.117014-3-gmonaco@redhat.com/
patch subject: [PATCH v14 2/3] sched: Move task_mm_cid_work to mm timer
in testcase: boot
config: x86_64-randconfig-003-20250708
compiler: gcc-11
test machine: qemu-system-x86_64 -enable-kvm -cpu SandyBridge -smp 2 -m 16G
(please refer to attached dmesg/kmsg for entire log/backtrace)
+-------------------------------------------------+------------+------------+
| | 50c1dc07ee | d06e66c602 |
+-------------------------------------------------+------------+------------+
| WARNING:inconsistent_lock_state | 0 | 12 |
| inconsistent{SOFTIRQ-ON-W}->{IN-SOFTIRQ-W}usage | 0 | 12 |
+-------------------------------------------------+------------+------------+
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <oliver.sang@intel.com>
| Closes: https://lore.kernel.org/oe-lkp/202507100606.90787fe6-lkp@intel.com
[ 26.556715][ C0] WARNING: inconsistent lock state
[ 26.557127][ C0] 6.16.0-rc5-00002-gd06e66c6025e #1 Tainted: G T
[ 26.557730][ C0] --------------------------------
[ 26.558133][ C0] inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.
[ 26.558662][ C0] stdbuf/386 [HC0[0]:SC1[1]:HE1:SE0] takes:
[ 26.559118][ C0] ffffffff870d4438 (pgd_lock){+.?.}-{3:3}, at: pgd_free (arch/x86/mm/pgtable.c:67 arch/x86/mm/pgtable.c:98 arch/x86/mm/pgtable.c:379)
[ 26.559786][ C0] {SOFTIRQ-ON-W} state was registered at:
[ 26.560232][ C0] mark_usage (kernel/locking/lockdep.c:4669)
[ 26.560561][ C0] __lock_acquire (kernel/locking/lockdep.c:5194)
[ 26.560929][ C0] lock_acquire (kernel/locking/lockdep.c:473 kernel/locking/lockdep.c:5873)
[ 26.561267][ C0] _raw_spin_lock (include/linux/spinlock_api_smp.h:134 kernel/locking/spinlock.c:154)
[ 26.561617][ C0] pgd_alloc (arch/x86/mm/pgtable.c:86 arch/x86/mm/pgtable.c:353)
[ 26.561950][ C0] mm_init+0x64f/0xbfb
[ 26.562342][ C0] mm_alloc (kernel/fork.c:1109)
[ 26.562655][ C0] dma_resv_lockdep (drivers/dma-buf/dma-resv.c:784)
[ 26.563020][ C0] do_one_initcall (init/main.c:1274)
[ 26.563389][ C0] do_initcalls (init/main.c:1335 init/main.c:1352)
[ 26.563744][ C0] kernel_init_freeable (init/main.c:1588)
[ 26.564144][ C0] kernel_init (init/main.c:1476)
[ 26.564402][ C0] ret_from_fork (arch/x86/kernel/process.c:154)
[ 26.564633][ C0] ret_from_fork_asm (arch/x86/entry/entry_64.S:258)
[ 26.564871][ C0] irq event stamp: 4774
[ 26.565070][ C0] hardirqs last enabled at (4774): _raw_spin_unlock_irq (arch/x86/include/asm/irqflags.h:42 arch/x86/include/asm/irqflags.h:119 include/linux/spinlock_api_smp.h:159 kernel/locking/spinlock.c:202)
[ 26.565526][ C0] hardirqs last disabled at (4773): _raw_spin_lock_irq (arch/x86/include/asm/preempt.h:80 include/linux/spinlock_api_smp.h:118 kernel/locking/spinlock.c:170)
[ 26.565971][ C0] softirqs last enabled at (4256): local_bh_enable (include/linux/bottom_half.h:33)
[ 26.566408][ C0] softirqs last disabled at (4771): __do_softirq (kernel/softirq.c:614)
[ 26.566823][ C0]
[ 26.566823][ C0] other info that might help us debug this:
[ 26.567198][ C0] Possible unsafe locking scenario:
[ 26.567198][ C0]
[ 26.567548][ C0] CPU0
[ 26.567709][ C0] ----
[ 26.567869][ C0] lock(pgd_lock);
[ 26.568060][ C0] <Interrupt>
[ 26.568255][ C0] lock(pgd_lock);
[ 26.568452][ C0]
[ 26.568452][ C0] *** DEADLOCK ***
[ 26.568452][ C0]
[ 26.568830][ C0] 3 locks held by stdbuf/386:
[ 26.569056][ C0] #0: ffff888170d5c1a8 (&sb->s_type->i_mutex_key){++++}-{4:4}, at: lookup_slow (fs/namei.c:1834)
[ 26.569535][ C0] #1: ffff888170cf5850 (&lockref->lock){+.+.}-{3:3}, at: d_alloc (include/linux/dcache.h:319 fs/dcache.c:1777)
[ 26.569961][ C0] #2: ffffc90000007d40 ((&mm->cid_timer)){+.-.}-{0:0}, at: call_timer_fn (kernel/time/timer.c:1744)
[ 26.570421][ C0]
[ 26.570421][ C0] stack backtrace:
[ 26.570704][ C0] CPU: 0 UID: 0 PID: 386 Comm: stdbuf Tainted: G T 6.16.0-rc5-00002-gd06e66c6025e #1 PREEMPT(voluntary) 39c5cbdaf5b4eb171776daa7d42daa95c0766676
[ 26.570716][ C0] Tainted: [T]=RANDSTRUCT
[ 26.570719][ C0] Call Trace:
[ 26.570723][ C0] <IRQ>
[ 26.570727][ C0] dump_stack_lvl (lib/dump_stack.c:122 (discriminator 4))
[ 26.570735][ C0] dump_stack (lib/dump_stack.c:130)
[ 26.570740][ C0] print_usage_bug (kernel/locking/lockdep.c:4047)
[ 26.570748][ C0] valid_state (kernel/locking/lockdep.c:4060)
[ 26.570755][ C0] mark_lock_irq (kernel/locking/lockdep.c:4270)
[ 26.570762][ C0] ? save_trace (kernel/locking/lockdep.c:592)
[ 26.570773][ C0] ? mark_lock (kernel/locking/lockdep.c:4728 (discriminator 3))
[ 26.570780][ C0] mark_lock (kernel/locking/lockdep.c:4756)
[ 26.570787][ C0] mark_usage (kernel/locking/lockdep.c:4645)
[ 26.570796][ C0] __lock_acquire (kernel/locking/lockdep.c:5194)
[ 26.570804][ C0] lock_acquire (kernel/locking/lockdep.c:473 kernel/locking/lockdep.c:5873)
[ 26.570811][ C0] ? pgd_free (arch/x86/mm/pgtable.c:67 arch/x86/mm/pgtable.c:98 arch/x86/mm/pgtable.c:379)
[ 26.570822][ C0] ? validate_chain (kernel/locking/lockdep.c:3826 kernel/locking/lockdep.c:3879)
[ 26.570828][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
[ 26.570839][ C0] _raw_spin_lock (include/linux/spinlock_api_smp.h:134 kernel/locking/spinlock.c:154)
[ 26.570845][ C0] ? pgd_free (arch/x86/mm/pgtable.c:67 arch/x86/mm/pgtable.c:98 arch/x86/mm/pgtable.c:379)
[ 26.570854][ C0] pgd_free (arch/x86/mm/pgtable.c:67 arch/x86/mm/pgtable.c:98 arch/x86/mm/pgtable.c:379)
[ 26.570863][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
[ 26.570873][ C0] __mmdrop (kernel/fork.c:681)
[ 26.570882][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
[ 26.570891][ C0] mmdrop (include/linux/sched/mm.h:55)
[ 26.570901][ C0] task_mm_cid_scan (kernel/sched/core.c:10619 (discriminator 3))
[ 26.570910][ C0] ? lock_is_held (include/linux/lockdep.h:249)
[ 26.570918][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
[ 26.570928][ C0] call_timer_fn (arch/x86/include/asm/atomic.h:23 include/linux/atomic/atomic-arch-fallback.h:457 include/linux/jump_label.h:262 include/trace/events/timer.h:127 kernel/time/timer.c:1748)
[ 26.570935][ C0] ? trace_timer_base_idle (kernel/time/timer.c:1724)
[ 26.570943][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
[ 26.570953][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
[ 26.570962][ C0] __run_timers (kernel/time/timer.c:1799 kernel/time/timer.c:2372)
[ 26.570970][ C0] ? add_timer_global (kernel/time/timer.c:2343)
[ 26.570977][ C0] ? __kasan_check_write (mm/kasan/shadow.c:38)
[ 26.570988][ C0] ? do_raw_spin_lock (arch/x86/include/asm/atomic.h:107 include/linux/atomic/atomic-arch-fallback.h:2170 include/linux/atomic/atomic-instrumented.h:1302 include/asm-generic/qspinlock.h:111 kernel/locking/spinlock_debug.c:116)
[ 26.570996][ C0] ? __raw_spin_lock_init (kernel/locking/spinlock_debug.c:114)
[ 26.571006][ C0] __run_timer_base (kernel/time/timer.c:2385)
[ 26.571014][ C0] run_timer_base (kernel/time/timer.c:2394)
[ 26.571021][ C0] run_timer_softirq (arch/x86/include/asm/atomic.h:23 include/linux/atomic/atomic-arch-fallback.h:457 include/linux/jump_label.h:262 kernel/time/timer.c:342 kernel/time/timer.c:2406)
[ 26.571028][ C0] handle_softirqs (arch/x86/include/asm/atomic.h:23 include/linux/atomic/atomic-arch-fallback.h:457 include/linux/jump_label.h:262 include/trace/events/irq.h:142 kernel/softirq.c:580)
[ 26.571039][ C0] __do_softirq (kernel/softirq.c:614)
[ 26.571046][ C0] __irq_exit_rcu (kernel/softirq.c:453 kernel/softirq.c:680)
[ 26.571055][ C0] irq_exit_rcu (kernel/softirq.c:698)
[ 26.571064][ C0] sysvec_apic_timer_interrupt (arch/x86/kernel/apic/apic.c:1050 arch/x86/kernel/apic/apic.c:1050)
[ 26.571076][ C0] </IRQ>
[ 26.571078][ C0] <TASK>
[ 26.571081][ C0] asm_sysvec_apic_timer_interrupt (arch/x86/include/asm/idtentry.h:574)
[ 26.571088][ C0] RIP: 0010:d_alloc (fs/dcache.c:1778)
[ 26.571100][ C0] Code: 8d 7c 24 50 b8 ff ff 37 00 ff 83 f8 00 00 00 48 89 fa 48 c1 e0 2a 48 c1 ea 03 80 3c 02 00 74 05 e8 5f f3 f6 ff 49 89 5c 24 50 <49> 8d bc 24 10 01 00 00 48 8d b3 20 01 00 00 e8 87 bc ff ff 4c 89
All code
========
0: 8d 7c 24 50 lea 0x50(%rsp),%edi
4: b8 ff ff 37 00 mov $0x37ffff,%eax
9: ff 83 f8 00 00 00 incl 0xf8(%rbx)
f: 48 89 fa mov %rdi,%rdx
12: 48 c1 e0 2a shl $0x2a,%rax
16: 48 c1 ea 03 shr $0x3,%rdx
1a: 80 3c 02 00 cmpb $0x0,(%rdx,%rax,1)
1e: 74 05 je 0x25
20: e8 5f f3 f6 ff call 0xfffffffffff6f384
25: 49 89 5c 24 50 mov %rbx,0x50(%r12)
2a:* 49 8d bc 24 10 01 00 lea 0x110(%r12),%rdi <-- trapping instruction
31: 00
32: 48 8d b3 20 01 00 00 lea 0x120(%rbx),%rsi
39: e8 87 bc ff ff call 0xffffffffffffbcc5
3e: 4c rex.WR
3f: 89 .byte 0x89
Code starting with the faulting instruction
===========================================
0: 49 8d bc 24 10 01 00 lea 0x110(%r12),%rdi
7: 00
8: 48 8d b3 20 01 00 00 lea 0x120(%rbx),%rsi
f: e8 87 bc ff ff call 0xffffffffffffbc9b
14: 4c rex.WR
15: 89 .byte 0x89
The kernel config and materials to reproduce are available at:
https://download.01.org/0day-ci/archive/20250710/202507100606.90787fe6-lkp@intel.com
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH v14 2/3] sched: Move task_mm_cid_work to mm timer
2025-07-10 4:56 ` kernel test robot
@ 2025-07-10 13:23 ` Mathieu Desnoyers
2025-07-10 13:40 ` Gabriele Monaco
2025-07-10 13:47 ` Gabriele Monaco
1 sibling, 1 reply; 7+ messages in thread
From: Mathieu Desnoyers @ 2025-07-10 13:23 UTC (permalink / raw)
To: kernel test robot, Gabriele Monaco
Cc: oe-lkp, lkp, linux-mm, linux-kernel, aubrey.li, yu.c.chen,
Andrew Morton, David Hildenbrand, Ingo Molnar, Peter Zijlstra,
Paul E. McKenney, Ingo Molnar
On 2025-07-10 00:56, kernel test robot wrote:
>
>
> Hello,
>
> kernel test robot noticed "WARNING:inconsistent_lock_state" on:
>
> commit: d06e66c6025e44136e6715d24c23fb821a415577 ("[PATCH v14 2/3] sched: Move task_mm_cid_work to mm timer")
> url: https://github.com/intel-lab-lkp/linux/commits/Gabriele-Monaco/sched-Add-prev_sum_exec_runtime-support-for-RT-DL-and-SCX-classes/20250707-224959
> patch link: https://lore.kernel.org/all/20250707144824.117014-3-gmonaco@redhat.com/
> patch subject: [PATCH v14 2/3] sched: Move task_mm_cid_work to mm timer
>
> in testcase: boot
>
> config: x86_64-randconfig-003-20250708
> compiler: gcc-11
> test machine: qemu-system-x86_64 -enable-kvm -cpu SandyBridge -smp 2 -m 16G
>
> (please refer to attached dmesg/kmsg for entire log/backtrace)
>
>
> +-------------------------------------------------+------------+------------+
> | | 50c1dc07ee | d06e66c602 |
> +-------------------------------------------------+------------+------------+
> | WARNING:inconsistent_lock_state | 0 | 12 |
> | inconsistent{SOFTIRQ-ON-W}->{IN-SOFTIRQ-W}usage | 0 | 12 |
> +-------------------------------------------------+------------+------------+
>
I suspect the issue comes from calling mmdrop(mm) from timer context in a scenario
where the mm_count can drop to 0.
This causes calls to pgd_free() and such to take the pgd_lock in softirq
context, when in other cases it's taken with softirqs enabled.
See "mmdrop_sched()" for RT. I think we need something similar for the
non-RT case, e.g. a:
static inline void __mmdrop_delayed(struct rcu_head *rhp)
{
struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
__mmdrop(mm);
}
static inline void mmdrop_timer(struct mm_struct *mm)
{
/* Provides a full memory barrier. See mmdrop() */
if (atomic_dec_and_test(&mm->mm_count))
call_rcu(&mm->delayed_drop, __mmdrop_delayed);
}
Thoughts ?
Thanks,
Mathieu
>
> If you fix the issue in a separate patch/commit (i.e. not just a new version of
> the same patch/commit), kindly add following tags
> | Reported-by: kernel test robot <oliver.sang@intel.com>
> | Closes: https://lore.kernel.org/oe-lkp/202507100606.90787fe6-lkp@intel.com
>
>
> [ 26.556715][ C0] WARNING: inconsistent lock state
> [ 26.557127][ C0] 6.16.0-rc5-00002-gd06e66c6025e #1 Tainted: G T
> [ 26.557730][ C0] --------------------------------
> [ 26.558133][ C0] inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.
> [ 26.558662][ C0] stdbuf/386 [HC0[0]:SC1[1]:HE1:SE0] takes:
> [ 26.559118][ C0] ffffffff870d4438 (pgd_lock){+.?.}-{3:3}, at: pgd_free (arch/x86/mm/pgtable.c:67 arch/x86/mm/pgtable.c:98 arch/x86/mm/pgtable.c:379)
> [ 26.559786][ C0] {SOFTIRQ-ON-W} state was registered at:
> [ 26.560232][ C0] mark_usage (kernel/locking/lockdep.c:4669)
> [ 26.560561][ C0] __lock_acquire (kernel/locking/lockdep.c:5194)
> [ 26.560929][ C0] lock_acquire (kernel/locking/lockdep.c:473 kernel/locking/lockdep.c:5873)
> [ 26.561267][ C0] _raw_spin_lock (include/linux/spinlock_api_smp.h:134 kernel/locking/spinlock.c:154)
> [ 26.561617][ C0] pgd_alloc (arch/x86/mm/pgtable.c:86 arch/x86/mm/pgtable.c:353)
> [ 26.561950][ C0] mm_init+0x64f/0xbfb
> [ 26.562342][ C0] mm_alloc (kernel/fork.c:1109)
> [ 26.562655][ C0] dma_resv_lockdep (drivers/dma-buf/dma-resv.c:784)
> [ 26.563020][ C0] do_one_initcall (init/main.c:1274)
> [ 26.563389][ C0] do_initcalls (init/main.c:1335 init/main.c:1352)
> [ 26.563744][ C0] kernel_init_freeable (init/main.c:1588)
> [ 26.564144][ C0] kernel_init (init/main.c:1476)
> [ 26.564402][ C0] ret_from_fork (arch/x86/kernel/process.c:154)
> [ 26.564633][ C0] ret_from_fork_asm (arch/x86/entry/entry_64.S:258)
> [ 26.564871][ C0] irq event stamp: 4774
> [ 26.565070][ C0] hardirqs last enabled at (4774): _raw_spin_unlock_irq (arch/x86/include/asm/irqflags.h:42 arch/x86/include/asm/irqflags.h:119 include/linux/spinlock_api_smp.h:159 kernel/locking/spinlock.c:202)
> [ 26.565526][ C0] hardirqs last disabled at (4773): _raw_spin_lock_irq (arch/x86/include/asm/preempt.h:80 include/linux/spinlock_api_smp.h:118 kernel/locking/spinlock.c:170)
> [ 26.565971][ C0] softirqs last enabled at (4256): local_bh_enable (include/linux/bottom_half.h:33)
> [ 26.566408][ C0] softirqs last disabled at (4771): __do_softirq (kernel/softirq.c:614)
> [ 26.566823][ C0]
> [ 26.566823][ C0] other info that might help us debug this:
> [ 26.567198][ C0] Possible unsafe locking scenario:
> [ 26.567198][ C0]
> [ 26.567548][ C0] CPU0
> [ 26.567709][ C0] ----
> [ 26.567869][ C0] lock(pgd_lock);
> [ 26.568060][ C0] <Interrupt>
> [ 26.568255][ C0] lock(pgd_lock);
> [ 26.568452][ C0]
> [ 26.568452][ C0] *** DEADLOCK ***
> [ 26.568452][ C0]
> [ 26.568830][ C0] 3 locks held by stdbuf/386:
> [ 26.569056][ C0] #0: ffff888170d5c1a8 (&sb->s_type->i_mutex_key){++++}-{4:4}, at: lookup_slow (fs/namei.c:1834)
> [ 26.569535][ C0] #1: ffff888170cf5850 (&lockref->lock){+.+.}-{3:3}, at: d_alloc (include/linux/dcache.h:319 fs/dcache.c:1777)
> [ 26.569961][ C0] #2: ffffc90000007d40 ((&mm->cid_timer)){+.-.}-{0:0}, at: call_timer_fn (kernel/time/timer.c:1744)
> [ 26.570421][ C0]
> [ 26.570421][ C0] stack backtrace:
> [ 26.570704][ C0] CPU: 0 UID: 0 PID: 386 Comm: stdbuf Tainted: G T 6.16.0-rc5-00002-gd06e66c6025e #1 PREEMPT(voluntary) 39c5cbdaf5b4eb171776daa7d42daa95c0766676
> [ 26.570716][ C0] Tainted: [T]=RANDSTRUCT
> [ 26.570719][ C0] Call Trace:
> [ 26.570723][ C0] <IRQ>
> [ 26.570727][ C0] dump_stack_lvl (lib/dump_stack.c:122 (discriminator 4))
> [ 26.570735][ C0] dump_stack (lib/dump_stack.c:130)
> [ 26.570740][ C0] print_usage_bug (kernel/locking/lockdep.c:4047)
> [ 26.570748][ C0] valid_state (kernel/locking/lockdep.c:4060)
> [ 26.570755][ C0] mark_lock_irq (kernel/locking/lockdep.c:4270)
> [ 26.570762][ C0] ? save_trace (kernel/locking/lockdep.c:592)
> [ 26.570773][ C0] ? mark_lock (kernel/locking/lockdep.c:4728 (discriminator 3))
> [ 26.570780][ C0] mark_lock (kernel/locking/lockdep.c:4756)
> [ 26.570787][ C0] mark_usage (kernel/locking/lockdep.c:4645)
> [ 26.570796][ C0] __lock_acquire (kernel/locking/lockdep.c:5194)
> [ 26.570804][ C0] lock_acquire (kernel/locking/lockdep.c:473 kernel/locking/lockdep.c:5873)
> [ 26.570811][ C0] ? pgd_free (arch/x86/mm/pgtable.c:67 arch/x86/mm/pgtable.c:98 arch/x86/mm/pgtable.c:379)
> [ 26.570822][ C0] ? validate_chain (kernel/locking/lockdep.c:3826 kernel/locking/lockdep.c:3879)
> [ 26.570828][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> [ 26.570839][ C0] _raw_spin_lock (include/linux/spinlock_api_smp.h:134 kernel/locking/spinlock.c:154)
> [ 26.570845][ C0] ? pgd_free (arch/x86/mm/pgtable.c:67 arch/x86/mm/pgtable.c:98 arch/x86/mm/pgtable.c:379)
> [ 26.570854][ C0] pgd_free (arch/x86/mm/pgtable.c:67 arch/x86/mm/pgtable.c:98 arch/x86/mm/pgtable.c:379)
> [ 26.570863][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> [ 26.570873][ C0] __mmdrop (kernel/fork.c:681)
> [ 26.570882][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> [ 26.570891][ C0] mmdrop (include/linux/sched/mm.h:55)
> [ 26.570901][ C0] task_mm_cid_scan (kernel/sched/core.c:10619 (discriminator 3))
> [ 26.570910][ C0] ? lock_is_held (include/linux/lockdep.h:249)
> [ 26.570918][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> [ 26.570928][ C0] call_timer_fn (arch/x86/include/asm/atomic.h:23 include/linux/atomic/atomic-arch-fallback.h:457 include/linux/jump_label.h:262 include/trace/events/timer.h:127 kernel/time/timer.c:1748)
> [ 26.570935][ C0] ? trace_timer_base_idle (kernel/time/timer.c:1724)
> [ 26.570943][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> [ 26.570953][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> [ 26.570962][ C0] __run_timers (kernel/time/timer.c:1799 kernel/time/timer.c:2372)
> [ 26.570970][ C0] ? add_timer_global (kernel/time/timer.c:2343)
> [ 26.570977][ C0] ? __kasan_check_write (mm/kasan/shadow.c:38)
> [ 26.570988][ C0] ? do_raw_spin_lock (arch/x86/include/asm/atomic.h:107 include/linux/atomic/atomic-arch-fallback.h:2170 include/linux/atomic/atomic-instrumented.h:1302 include/asm-generic/qspinlock.h:111 kernel/locking/spinlock_debug.c:116)
> [ 26.570996][ C0] ? __raw_spin_lock_init (kernel/locking/spinlock_debug.c:114)
> [ 26.571006][ C0] __run_timer_base (kernel/time/timer.c:2385)
> [ 26.571014][ C0] run_timer_base (kernel/time/timer.c:2394)
> [ 26.571021][ C0] run_timer_softirq (arch/x86/include/asm/atomic.h:23 include/linux/atomic/atomic-arch-fallback.h:457 include/linux/jump_label.h:262 kernel/time/timer.c:342 kernel/time/timer.c:2406)
> [ 26.571028][ C0] handle_softirqs (arch/x86/include/asm/atomic.h:23 include/linux/atomic/atomic-arch-fallback.h:457 include/linux/jump_label.h:262 include/trace/events/irq.h:142 kernel/softirq.c:580)
> [ 26.571039][ C0] __do_softirq (kernel/softirq.c:614)
> [ 26.571046][ C0] __irq_exit_rcu (kernel/softirq.c:453 kernel/softirq.c:680)
> [ 26.571055][ C0] irq_exit_rcu (kernel/softirq.c:698)
> [ 26.571064][ C0] sysvec_apic_timer_interrupt (arch/x86/kernel/apic/apic.c:1050 arch/x86/kernel/apic/apic.c:1050)
> [ 26.571076][ C0] </IRQ>
> [ 26.571078][ C0] <TASK>
> [ 26.571081][ C0] asm_sysvec_apic_timer_interrupt (arch/x86/include/asm/idtentry.h:574)
> [ 26.571088][ C0] RIP: 0010:d_alloc (fs/dcache.c:1778)
> [ 26.571100][ C0] Code: 8d 7c 24 50 b8 ff ff 37 00 ff 83 f8 00 00 00 48 89 fa 48 c1 e0 2a 48 c1 ea 03 80 3c 02 00 74 05 e8 5f f3 f6 ff 49 89 5c 24 50 <49> 8d bc 24 10 01 00 00 48 8d b3 20 01 00 00 e8 87 bc ff ff 4c 89
> All code
> ========
> 0: 8d 7c 24 50 lea 0x50(%rsp),%edi
> 4: b8 ff ff 37 00 mov $0x37ffff,%eax
> 9: ff 83 f8 00 00 00 incl 0xf8(%rbx)
> f: 48 89 fa mov %rdi,%rdx
> 12: 48 c1 e0 2a shl $0x2a,%rax
> 16: 48 c1 ea 03 shr $0x3,%rdx
> 1a: 80 3c 02 00 cmpb $0x0,(%rdx,%rax,1)
> 1e: 74 05 je 0x25
> 20: e8 5f f3 f6 ff call 0xfffffffffff6f384
> 25: 49 89 5c 24 50 mov %rbx,0x50(%r12)
> 2a:* 49 8d bc 24 10 01 00 lea 0x110(%r12),%rdi <-- trapping instruction
> 31: 00
> 32: 48 8d b3 20 01 00 00 lea 0x120(%rbx),%rsi
> 39: e8 87 bc ff ff call 0xffffffffffffbcc5
> 3e: 4c rex.WR
> 3f: 89 .byte 0x89
>
> Code starting with the faulting instruction
> ===========================================
> 0: 49 8d bc 24 10 01 00 lea 0x110(%r12),%rdi
> 7: 00
> 8: 48 8d b3 20 01 00 00 lea 0x120(%rbx),%rsi
> f: e8 87 bc ff ff call 0xffffffffffffbc9b
> 14: 4c rex.WR
> 15: 89 .byte 0x89
>
>
> The kernel config and materials to reproduce are available at:
> https://download.01.org/0day-ci/archive/20250710/202507100606.90787fe6-lkp@intel.com
>
>
>
--
Mathieu Desnoyers
EfficiOS Inc.
https://www.efficios.com
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH v14 2/3] sched: Move task_mm_cid_work to mm timer
2025-07-10 13:23 ` Mathieu Desnoyers
@ 2025-07-10 13:40 ` Gabriele Monaco
2025-07-10 14:18 ` Mathieu Desnoyers
0 siblings, 1 reply; 7+ messages in thread
From: Gabriele Monaco @ 2025-07-10 13:40 UTC (permalink / raw)
To: Mathieu Desnoyers, kernel test robot
Cc: oe-lkp, lkp, linux-mm, linux-kernel, aubrey.li, yu.c.chen,
Andrew Morton, David Hildenbrand, Ingo Molnar, Peter Zijlstra,
Paul E. McKenney, Ingo Molnar
On Thu, 2025-07-10 at 09:23 -0400, Mathieu Desnoyers wrote:
> On 2025-07-10 00:56, kernel test robot wrote:
> >
> >
> > Hello,
> >
> > kernel test robot noticed "WARNING:inconsistent_lock_state" on:
> >
> > commit: d06e66c6025e44136e6715d24c23fb821a415577 ("[PATCH v14 2/3]
> > sched: Move task_mm_cid_work to mm timer")
> > url:
> > https://github.com/intel-lab-lkp/linux/commits/Gabriele-Monaco/sched-Add-prev_sum_exec_runtime-support-for-RT-DL-and-SCX-classes/20250707-224959
> > patch link:
> > https://lore.kernel.org/all/20250707144824.117014-3-gmonaco@redhat.com/
> > patch subject: [PATCH v14 2/3] sched: Move task_mm_cid_work to mm
> > timer
> >
> > in testcase: boot
> >
> > config: x86_64-randconfig-003-20250708
> > compiler: gcc-11
> > test machine: qemu-system-x86_64 -enable-kvm -cpu SandyBridge -smp
> > 2 -m 16G
> >
> > (please refer to attached dmesg/kmsg for entire log/backtrace)
> >
> >
> > +-------------------------------------------------+------------+---
> > ---------+
> > > | 50c1dc07ee |
> > > d06e66c602 |
> > +-------------------------------------------------+------------+---
> > ---------+
> > > WARNING:inconsistent_lock_state | 0 |
> > > 12 |
> > > inconsistent{SOFTIRQ-ON-W}->{IN-SOFTIRQ-W}usage | 0 |
> > > 12 |
> > +-------------------------------------------------+------------+---
> > ---------+
> >
>
> I suspect the issue comes from calling mmdrop(mm) from timer context
> in a scenario
> where the mm_count can drop to 0.
>
> This causes calls to pgd_free() and such to take the pgd_lock in
> softirq
> context, when in other cases it's taken with softirqs enabled.
>
> See "mmdrop_sched()" for RT. I think we need something similar for
> the
> non-RT case, e.g. a:
>
> static inline void __mmdrop_delayed(struct rcu_head *rhp)
> {
> struct mm_struct *mm = container_of(rhp, struct mm_struct,
> delayed_drop);
>
> __mmdrop(mm);
> }
>
> static inline void mmdrop_timer(struct mm_struct *mm)
> {
> /* Provides a full memory barrier. See mmdrop() */
> if (atomic_dec_and_test(&mm->mm_count))
> call_rcu(&mm->delayed_drop, __mmdrop_delayed);
> }
>
> Thoughts ?
>
Thanks for the suggestion.
I noticed the problem is in the mmdrop over there, but I'm seeing this
is getting unnecessarily complicated.
I'm not sure it's worth going down this path, also considering pushing
the timer wheel like this might end up in unintended effects like it
happened with the workqueue.
I am going to try the alternative approach of running the scan in
batches [1] still using a task_work but triggering it from
__rseq_handle_notify_resume like here.
If that works in the original usecase, I guess it's better to keep it
that way.
What do you think?
Thanks,
Gabriele
[1] -
https://lore.kernel.org/lkml/20250217112317.258716-1-gmonaco@redhat.com
> Thanks,
>
> Mathieu
>
> >
> > If you fix the issue in a separate patch/commit (i.e. not just a
> > new version of
> > the same patch/commit), kindly add following tags
> > > Reported-by: kernel test robot <oliver.sang@intel.com>
> > > Closes:
> > > https://lore.kernel.org/oe-lkp/202507100606.90787fe6-lkp@intel.com
> >
> >
> > [ 26.556715][ C0] WARNING: inconsistent lock state
> > [ 26.557127][ C0] 6.16.0-rc5-00002-gd06e66c6025e #1 Tainted:
> > G T
> > [ 26.557730][ C0] --------------------------------
> > [ 26.558133][ C0] inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-
> > W} usage.
> > [ 26.558662][ C0] stdbuf/386 [HC0[0]:SC1[1]:HE1:SE0] takes:
> > [ 26.559118][ C0] ffffffff870d4438 (pgd_lock){+.?.}-{3:3}, at:
> > pgd_free (arch/x86/mm/pgtable.c:67 arch/x86/mm/pgtable.c:98
> > arch/x86/mm/pgtable.c:379)
> > [ 26.559786][ C0] {SOFTIRQ-ON-W} state was registered at:
> > [ 26.560232][ C0] mark_usage (kernel/locking/lockdep.c:4669)
> > [ 26.560561][ C0] __lock_acquire (kernel/locking/lockdep.c:5194)
> > [ 26.560929][ C0] lock_acquire (kernel/locking/lockdep.c:473
> > kernel/locking/lockdep.c:5873)
> > [ 26.561267][ C0] _raw_spin_lock
> > (include/linux/spinlock_api_smp.h:134
> > kernel/locking/spinlock.c:154)
> > [ 26.561617][ C0] pgd_alloc (arch/x86/mm/pgtable.c:86
> > arch/x86/mm/pgtable.c:353)
> > [ 26.561950][ C0] mm_init+0x64f/0xbfb
> > [ 26.562342][ C0] mm_alloc (kernel/fork.c:1109)
> > [ 26.562655][ C0] dma_resv_lockdep (drivers/dma-buf/dma-resv.c:784)
> > [ 26.563020][ C0] do_one_initcall (init/main.c:1274)
> > [ 26.563389][ C0] do_initcalls (init/main.c:1335 init/main.c:1352)
> > [ 26.563744][ C0] kernel_init_freeable (init/main.c:1588)
> > [ 26.564144][ C0] kernel_init (init/main.c:1476)
> > [ 26.564402][ C0] ret_from_fork (arch/x86/kernel/process.c:154)
> > [ 26.564633][ C0] ret_from_fork_asm (arch/x86/entry/entry_64.S:258)
> > [ 26.564871][ C0] irq event stamp: 4774
> > [ 26.565070][ C0] hardirqs last enabled at (4774):
> > _raw_spin_unlock_irq (arch/x86/include/asm/irqflags.h:42
> > arch/x86/include/asm/irqflags.h:119
> > include/linux/spinlock_api_smp.h:159 kernel/locking/spinlock.c:202)
> > [ 26.565526][ C0] hardirqs last disabled at (4773):
> > _raw_spin_lock_irq (arch/x86/include/asm/preempt.h:80
> > include/linux/spinlock_api_smp.h:118 kernel/locking/spinlock.c:170)
> > [ 26.565971][ C0] softirqs last enabled at (4256): local_bh_enable
> > (include/linux/bottom_half.h:33)
> > [ 26.566408][ C0] softirqs last disabled at (4771): __do_softirq
> > (kernel/softirq.c:614)
> > [ 26.566823][ C0]
> > [ 26.566823][ C0] other info that might help us debug this:
> > [ 26.567198][ C0] Possible unsafe locking scenario:
> > [ 26.567198][ C0]
> > [ 26.567548][ C0] CPU0
> > [ 26.567709][ C0] ----
> > [ 26.567869][ C0] lock(pgd_lock);
> > [ 26.568060][ C0] <Interrupt>
> > [ 26.568255][ C0] lock(pgd_lock);
> > [ 26.568452][ C0]
> > [ 26.568452][ C0] *** DEADLOCK ***
> > [ 26.568452][ C0]
> > [ 26.568830][ C0] 3 locks held by stdbuf/386:
> > [ 26.569056][ C0] #0: ffff888170d5c1a8 (&sb->s_type-
> > >i_mutex_key){++++}-{4:4}, at: lookup_slow (fs/namei.c:1834)
> > [ 26.569535][ C0] #1: ffff888170cf5850 (&lockref->lock){+.+.}-
> > {3:3}, at: d_alloc (include/linux/dcache.h:319 fs/dcache.c:1777)
> > [ 26.569961][ C0] #2: ffffc90000007d40 ((&mm->cid_timer)){+.-.}-
> > {0:0}, at: call_timer_fn (kernel/time/timer.c:1744)
> > [ 26.570421][ C0]
> > [ 26.570421][ C0] stack backtrace:
> > [ 26.570704][ C0] CPU: 0 UID: 0 PID: 386 Comm: stdbuf Tainted:
> > G T 6.16.0-rc5-00002-gd06e66c6025e #1
> > PREEMPT(voluntary) 39c5cbdaf5b4eb171776daa7d42daa95c0766676
> > [ 26.570716][ C0] Tainted: [T]=RANDSTRUCT
> > [ 26.570719][ C0] Call Trace:
> > [ 26.570723][ C0] <IRQ>
> > [ 26.570727][ C0] dump_stack_lvl (lib/dump_stack.c:122
> > (discriminator 4))
> > [ 26.570735][ C0] dump_stack (lib/dump_stack.c:130)
> > [ 26.570740][ C0] print_usage_bug (kernel/locking/lockdep.c:4047)
> > [ 26.570748][ C0] valid_state (kernel/locking/lockdep.c:4060)
> > [ 26.570755][ C0] mark_lock_irq (kernel/locking/lockdep.c:4270)
> > [ 26.570762][ C0] ? save_trace (kernel/locking/lockdep.c:592)
> > [ 26.570773][ C0] ? mark_lock (kernel/locking/lockdep.c:4728
> > (discriminator 3))
> > [ 26.570780][ C0] mark_lock (kernel/locking/lockdep.c:4756)
> > [ 26.570787][ C0] mark_usage (kernel/locking/lockdep.c:4645)
> > [ 26.570796][ C0] __lock_acquire (kernel/locking/lockdep.c:5194)
> > [ 26.570804][ C0] lock_acquire (kernel/locking/lockdep.c:473
> > kernel/locking/lockdep.c:5873)
> > [ 26.570811][ C0] ? pgd_free (arch/x86/mm/pgtable.c:67
> > arch/x86/mm/pgtable.c:98 arch/x86/mm/pgtable.c:379)
> > [ 26.570822][ C0] ? validate_chain (kernel/locking/lockdep.c:3826
> > kernel/locking/lockdep.c:3879)
> > [ 26.570828][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> > [ 26.570839][ C0] _raw_spin_lock
> > (include/linux/spinlock_api_smp.h:134
> > kernel/locking/spinlock.c:154)
> > [ 26.570845][ C0] ? pgd_free (arch/x86/mm/pgtable.c:67
> > arch/x86/mm/pgtable.c:98 arch/x86/mm/pgtable.c:379)
> > [ 26.570854][ C0] pgd_free (arch/x86/mm/pgtable.c:67
> > arch/x86/mm/pgtable.c:98 arch/x86/mm/pgtable.c:379)
> > [ 26.570863][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> > [ 26.570873][ C0] __mmdrop (kernel/fork.c:681)
> > [ 26.570882][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> > [ 26.570891][ C0] mmdrop (include/linux/sched/mm.h:55)
> > [ 26.570901][ C0] task_mm_cid_scan (kernel/sched/core.c:10619
> > (discriminator 3))
> > [ 26.570910][ C0] ? lock_is_held (include/linux/lockdep.h:249)
> > [ 26.570918][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> > [ 26.570928][ C0] call_timer_fn (arch/x86/include/asm/atomic.h:23
> > include/linux/atomic/atomic-arch-fallback.h:457
> > include/linux/jump_label.h:262 include/trace/events/timer.h:127
> > kernel/time/timer.c:1748)
> > [ 26.570935][ C0] ? trace_timer_base_idle
> > (kernel/time/timer.c:1724)
> > [ 26.570943][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> > [ 26.570953][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> > [ 26.570962][ C0] __run_timers (kernel/time/timer.c:1799
> > kernel/time/timer.c:2372)
> > [ 26.570970][ C0] ? add_timer_global (kernel/time/timer.c:2343)
> > [ 26.570977][ C0] ? __kasan_check_write (mm/kasan/shadow.c:38)
> > [ 26.570988][ C0] ? do_raw_spin_lock
> > (arch/x86/include/asm/atomic.h:107 include/linux/atomic/atomic-
> > arch-fallback.h:2170 include/linux/atomic/atomic-
> > instrumented.h:1302 include/asm-generic/qspinlock.h:111
> > kernel/locking/spinlock_debug.c:116)
> > [ 26.570996][ C0] ? __raw_spin_lock_init
> > (kernel/locking/spinlock_debug.c:114)
> > [ 26.571006][ C0] __run_timer_base (kernel/time/timer.c:2385)
> > [ 26.571014][ C0] run_timer_base (kernel/time/timer.c:2394)
> > [ 26.571021][ C0] run_timer_softirq
> > (arch/x86/include/asm/atomic.h:23 include/linux/atomic/atomic-arch-
> > fallback.h:457 include/linux/jump_label.h:262
> > kernel/time/timer.c:342 kernel/time/timer.c:2406)
> > [ 26.571028][ C0] handle_softirqs (arch/x86/include/asm/atomic.h:23
> > include/linux/atomic/atomic-arch-fallback.h:457
> > include/linux/jump_label.h:262 include/trace/events/irq.h:142
> > kernel/softirq.c:580)
> > [ 26.571039][ C0] __do_softirq (kernel/softirq.c:614)
> > [ 26.571046][ C0] __irq_exit_rcu (kernel/softirq.c:453
> > kernel/softirq.c:680)
> > [ 26.571055][ C0] irq_exit_rcu (kernel/softirq.c:698)
> > [ 26.571064][ C0] sysvec_apic_timer_interrupt
> > (arch/x86/kernel/apic/apic.c:1050 arch/x86/kernel/apic/apic.c:1050)
> > [ 26.571076][ C0] </IRQ>
> > [ 26.571078][ C0] <TASK>
> > [ 26.571081][ C0] asm_sysvec_apic_timer_interrupt
> > (arch/x86/include/asm/idtentry.h:574)
> > [ 26.571088][ C0] RIP: 0010:d_alloc (fs/dcache.c:1778)
> > [ 26.571100][ C0] Code: 8d 7c 24 50 b8 ff ff 37 00 ff 83 f8 00 00
> > 00 48 89 fa 48 c1 e0 2a 48 c1 ea 03 80 3c 02 00 74 05 e8 5f f3 f6
> > ff 49 89 5c 24 50 <49> 8d bc 24 10 01 00 00 48 8d b3 20 01 00 00 e8
> > 87 bc ff ff 4c 89
> > All code
> > ========
> > 0: 8d 7c 24 50 lea 0x50(%rsp),%edi
> > 4: b8 ff ff 37 00 mov $0x37ffff,%eax
> > 9: ff 83 f8 00 00 00 incl 0xf8(%rbx)
> > f: 48 89 fa mov %rdi,%rdx
> > 12: 48 c1 e0 2a shl $0x2a,%rax
> > 16: 48 c1 ea 03 shr $0x3,%rdx
> > 1a: 80 3c 02 00 cmpb $0x0,(%rdx,%rax,1)
> > 1e: 74 05 je 0x25
> > 20: e8 5f f3 f6 ff call 0xfffffffffff6f384
> > 25: 49 89 5c 24 50 mov %rbx,0x50(%r12)
> > 2a:* 49 8d bc 24 10 01 00 lea
> > 0x110(%r12),%rdi <-- trapping instruction
> > 31: 00
> > 32: 48 8d b3 20 01 00 00 lea 0x120(%rbx),%rsi
> > 39: e8 87 bc ff ff call 0xffffffffffffbcc5
> > 3e: 4c rex.WR
> > 3f: 89 .byte 0x89
> >
> > Code starting with the faulting instruction
> > ===========================================
> > 0: 49 8d bc 24 10 01 00 lea 0x110(%r12),%rdi
> > 7: 00
> > 8: 48 8d b3 20 01 00 00 lea 0x120(%rbx),%rsi
> > f: e8 87 bc ff ff call 0xffffffffffffbc9b
> > 14: 4c rex.WR
> > 15: 89 .byte 0x89
> >
> >
> > The kernel config and materials to reproduce are available at:
> > https://download.01.org/0day-ci/archive/20250710/202507100606.90787fe6-lkp@intel.com
> >
> >
> >
>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH v14 2/3] sched: Move task_mm_cid_work to mm timer
2025-07-10 4:56 ` kernel test robot
2025-07-10 13:23 ` Mathieu Desnoyers
@ 2025-07-10 13:47 ` Gabriele Monaco
1 sibling, 0 replies; 7+ messages in thread
From: Gabriele Monaco @ 2025-07-10 13:47 UTC (permalink / raw)
To: kernel test robot, Mathieu Desnoyers
Cc: oe-lkp, lkp, linux-mm, linux-kernel, aubrey.li, yu.c.chen,
Andrew Morton, David Hildenbrand, Ingo Molnar, Peter Zijlstra,
Paul E. McKenney, Ingo Molnar
On Thu, 2025-07-10 at 12:56 +0800, kernel test robot wrote:
>
>
> Hello,
>
> kernel test robot noticed "WARNING:inconsistent_lock_state" on:
>
> commit: d06e66c6025e44136e6715d24c23fb821a415577 ("[PATCH v14 2/3]
> sched: Move task_mm_cid_work to mm timer")
> url:
> https://github.com/intel-lab-lkp/linux/commits/Gabriele-Monaco/sched-Add-prev_sum_exec_runtime-support-for-RT-DL-and-SCX-classes/20250707-224959
> patch link:
> https://lore.kernel.org/all/20250707144824.117014-3-gmonaco@redhat.com/
> patch subject: [PATCH v14 2/3] sched: Move task_mm_cid_work to mm
> timer
>
> in testcase: boot
>
It seems calling mmdrop in task_mm_cid_scan (previously
task_mm_cid_work) is not safe in some configurations as it runs from
softirq.
There are solutions to this (mmdrop asynchronously or find another way
to make sure the work runs with a valid mm), but this is getting
unnecessarily complicated.
I am going to proceed with the alternative approach of running the scan
in batches [1] by integrating the findings in this series for more
predictability but still using a task_work.
[1] -
https://lore.kernel.org/lkml/20250217112317.258716-1-gmonaco@redhat.com
Thanks,
Gabriele
> config: x86_64-randconfig-003-20250708
> compiler: gcc-11
> test machine: qemu-system-x86_64 -enable-kvm -cpu SandyBridge -smp 2
> -m 16G
>
> (please refer to attached dmesg/kmsg for entire log/backtrace)
>
>
> +-------------------------------------------------+------------+-----
> -------+
> > | 50c1dc07ee |
> > d06e66c602 |
> +-------------------------------------------------+------------+-----
> -------+
> > WARNING:inconsistent_lock_state | 0 |
> > 12 |
> > inconsistent{SOFTIRQ-ON-W}->{IN-SOFTIRQ-W}usage | 0 |
> > 12 |
> +-------------------------------------------------+------------+-----
> -------+
>
>
> If you fix the issue in a separate patch/commit (i.e. not just a new
> version of
> the same patch/commit), kindly add following tags
> > Reported-by: kernel test robot <oliver.sang@intel.com>
> > Closes:
> > https://lore.kernel.org/oe-lkp/202507100606.90787fe6-lkp@intel.com
>
>
> [ 26.556715][ C0] WARNING: inconsistent lock state
> [ 26.557127][ C0] 6.16.0-rc5-00002-gd06e66c6025e #1 Tainted:
> G T
> [ 26.557730][ C0] --------------------------------
> [ 26.558133][ C0] inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W}
> usage.
> [ 26.558662][ C0] stdbuf/386 [HC0[0]:SC1[1]:HE1:SE0] takes:
> [ 26.559118][ C0] ffffffff870d4438 (pgd_lock){+.?.}-{3:3}, at:
> pgd_free (arch/x86/mm/pgtable.c:67 arch/x86/mm/pgtable.c:98
> arch/x86/mm/pgtable.c:379)
> [ 26.559786][ C0] {SOFTIRQ-ON-W} state was registered at:
> [ 26.560232][ C0] mark_usage (kernel/locking/lockdep.c:4669)
> [ 26.560561][ C0] __lock_acquire (kernel/locking/lockdep.c:5194)
> [ 26.560929][ C0] lock_acquire (kernel/locking/lockdep.c:473
> kernel/locking/lockdep.c:5873)
> [ 26.561267][ C0] _raw_spin_lock
> (include/linux/spinlock_api_smp.h:134 kernel/locking/spinlock.c:154)
> [ 26.561617][ C0] pgd_alloc (arch/x86/mm/pgtable.c:86
> arch/x86/mm/pgtable.c:353)
> [ 26.561950][ C0] mm_init+0x64f/0xbfb
> [ 26.562342][ C0] mm_alloc (kernel/fork.c:1109)
> [ 26.562655][ C0] dma_resv_lockdep (drivers/dma-buf/dma-resv.c:784)
> [ 26.563020][ C0] do_one_initcall (init/main.c:1274)
> [ 26.563389][ C0] do_initcalls (init/main.c:1335 init/main.c:1352)
> [ 26.563744][ C0] kernel_init_freeable (init/main.c:1588)
> [ 26.564144][ C0] kernel_init (init/main.c:1476)
> [ 26.564402][ C0] ret_from_fork (arch/x86/kernel/process.c:154)
> [ 26.564633][ C0] ret_from_fork_asm (arch/x86/entry/entry_64.S:258)
> [ 26.564871][ C0] irq event stamp: 4774
> [ 26.565070][ C0] hardirqs last enabled at (4774):
> _raw_spin_unlock_irq (arch/x86/include/asm/irqflags.h:42
> arch/x86/include/asm/irqflags.h:119
> include/linux/spinlock_api_smp.h:159 kernel/locking/spinlock.c:202)
> [ 26.565526][ C0] hardirqs last disabled at (4773):
> _raw_spin_lock_irq (arch/x86/include/asm/preempt.h:80
> include/linux/spinlock_api_smp.h:118 kernel/locking/spinlock.c:170)
> [ 26.565971][ C0] softirqs last enabled at (4256): local_bh_enable
> (include/linux/bottom_half.h:33)
> [ 26.566408][ C0] softirqs last disabled at (4771): __do_softirq
> (kernel/softirq.c:614)
> [ 26.566823][ C0]
> [ 26.566823][ C0] other info that might help us debug this:
> [ 26.567198][ C0] Possible unsafe locking scenario:
> [ 26.567198][ C0]
> [ 26.567548][ C0] CPU0
> [ 26.567709][ C0] ----
> [ 26.567869][ C0] lock(pgd_lock);
> [ 26.568060][ C0] <Interrupt>
> [ 26.568255][ C0] lock(pgd_lock);
> [ 26.568452][ C0]
> [ 26.568452][ C0] *** DEADLOCK ***
> [ 26.568452][ C0]
> [ 26.568830][ C0] 3 locks held by stdbuf/386:
> [ 26.569056][ C0] #0: ffff888170d5c1a8 (&sb->s_type-
> >i_mutex_key){++++}-{4:4}, at: lookup_slow (fs/namei.c:1834)
> [ 26.569535][ C0] #1: ffff888170cf5850 (&lockref->lock){+.+.}-{3:3},
> at: d_alloc (include/linux/dcache.h:319 fs/dcache.c:1777)
> [ 26.569961][ C0] #2: ffffc90000007d40 ((&mm->cid_timer)){+.-.}-
> {0:0}, at: call_timer_fn (kernel/time/timer.c:1744)
> [ 26.570421][ C0]
> [ 26.570421][ C0] stack backtrace:
> [ 26.570704][ C0] CPU: 0 UID: 0 PID: 386 Comm: stdbuf Tainted:
> G T 6.16.0-rc5-00002-gd06e66c6025e #1
> PREEMPT(voluntary) 39c5cbdaf5b4eb171776daa7d42daa95c0766676
> [ 26.570716][ C0] Tainted: [T]=RANDSTRUCT
> [ 26.570719][ C0] Call Trace:
> [ 26.570723][ C0] <IRQ>
> [ 26.570727][ C0] dump_stack_lvl (lib/dump_stack.c:122 (discriminator
> 4))
> [ 26.570735][ C0] dump_stack (lib/dump_stack.c:130)
> [ 26.570740][ C0] print_usage_bug (kernel/locking/lockdep.c:4047)
> [ 26.570748][ C0] valid_state (kernel/locking/lockdep.c:4060)
> [ 26.570755][ C0] mark_lock_irq (kernel/locking/lockdep.c:4270)
> [ 26.570762][ C0] ? save_trace (kernel/locking/lockdep.c:592)
> [ 26.570773][ C0] ? mark_lock (kernel/locking/lockdep.c:4728
> (discriminator 3))
> [ 26.570780][ C0] mark_lock (kernel/locking/lockdep.c:4756)
> [ 26.570787][ C0] mark_usage (kernel/locking/lockdep.c:4645)
> [ 26.570796][ C0] __lock_acquire (kernel/locking/lockdep.c:5194)
> [ 26.570804][ C0] lock_acquire (kernel/locking/lockdep.c:473
> kernel/locking/lockdep.c:5873)
> [ 26.570811][ C0] ? pgd_free (arch/x86/mm/pgtable.c:67
> arch/x86/mm/pgtable.c:98 arch/x86/mm/pgtable.c:379)
> [ 26.570822][ C0] ? validate_chain (kernel/locking/lockdep.c:3826
> kernel/locking/lockdep.c:3879)
> [ 26.570828][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> [ 26.570839][ C0] _raw_spin_lock
> (include/linux/spinlock_api_smp.h:134 kernel/locking/spinlock.c:154)
> [ 26.570845][ C0] ? pgd_free (arch/x86/mm/pgtable.c:67
> arch/x86/mm/pgtable.c:98 arch/x86/mm/pgtable.c:379)
> [ 26.570854][ C0] pgd_free (arch/x86/mm/pgtable.c:67
> arch/x86/mm/pgtable.c:98 arch/x86/mm/pgtable.c:379)
> [ 26.570863][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> [ 26.570873][ C0] __mmdrop (kernel/fork.c:681)
> [ 26.570882][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> [ 26.570891][ C0] mmdrop (include/linux/sched/mm.h:55)
> [ 26.570901][ C0] task_mm_cid_scan (kernel/sched/core.c:10619
> (discriminator 3))
> [ 26.570910][ C0] ? lock_is_held (include/linux/lockdep.h:249)
> [ 26.570918][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> [ 26.570928][ C0] call_timer_fn (arch/x86/include/asm/atomic.h:23
> include/linux/atomic/atomic-arch-fallback.h:457
> include/linux/jump_label.h:262 include/trace/events/timer.h:127
> kernel/time/timer.c:1748)
> [ 26.570935][ C0] ? trace_timer_base_idle (kernel/time/timer.c:1724)
> [ 26.570943][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> [ 26.570953][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
> [ 26.570962][ C0] __run_timers (kernel/time/timer.c:1799
> kernel/time/timer.c:2372)
> [ 26.570970][ C0] ? add_timer_global (kernel/time/timer.c:2343)
> [ 26.570977][ C0] ? __kasan_check_write (mm/kasan/shadow.c:38)
> [ 26.570988][ C0] ? do_raw_spin_lock
> (arch/x86/include/asm/atomic.h:107 include/linux/atomic/atomic-arch-
> fallback.h:2170 include/linux/atomic/atomic-instrumented.h:1302
> include/asm-generic/qspinlock.h:111
> kernel/locking/spinlock_debug.c:116)
> [ 26.570996][ C0] ? __raw_spin_lock_init
> (kernel/locking/spinlock_debug.c:114)
> [ 26.571006][ C0] __run_timer_base (kernel/time/timer.c:2385)
> [ 26.571014][ C0] run_timer_base (kernel/time/timer.c:2394)
> [ 26.571021][ C0] run_timer_softirq (arch/x86/include/asm/atomic.h:23
> include/linux/atomic/atomic-arch-fallback.h:457
> include/linux/jump_label.h:262 kernel/time/timer.c:342
> kernel/time/timer.c:2406)
> [ 26.571028][ C0] handle_softirqs (arch/x86/include/asm/atomic.h:23
> include/linux/atomic/atomic-arch-fallback.h:457
> include/linux/jump_label.h:262 include/trace/events/irq.h:142
> kernel/softirq.c:580)
> [ 26.571039][ C0] __do_softirq (kernel/softirq.c:614)
> [ 26.571046][ C0] __irq_exit_rcu (kernel/softirq.c:453
> kernel/softirq.c:680)
> [ 26.571055][ C0] irq_exit_rcu (kernel/softirq.c:698)
> [ 26.571064][ C0] sysvec_apic_timer_interrupt
> (arch/x86/kernel/apic/apic.c:1050 arch/x86/kernel/apic/apic.c:1050)
> [ 26.571076][ C0] </IRQ>
> [ 26.571078][ C0] <TASK>
> [ 26.571081][ C0] asm_sysvec_apic_timer_interrupt
> (arch/x86/include/asm/idtentry.h:574)
> [ 26.571088][ C0] RIP: 0010:d_alloc (fs/dcache.c:1778)
> [ 26.571100][ C0] Code: 8d 7c 24 50 b8 ff ff 37 00 ff 83 f8 00 00 00
> 48 89 fa 48 c1 e0 2a 48 c1 ea 03 80 3c 02 00 74 05 e8 5f f3 f6 ff 49
> 89 5c 24 50 <49> 8d bc 24 10 01 00 00 48 8d b3 20 01 00 00 e8 87 bc
> ff ff 4c 89
> All code
> ========
> 0: 8d 7c 24 50 lea 0x50(%rsp),%edi
> 4: b8 ff ff 37 00 mov $0x37ffff,%eax
> 9: ff 83 f8 00 00 00 incl 0xf8(%rbx)
> f: 48 89 fa mov %rdi,%rdx
> 12: 48 c1 e0 2a shl $0x2a,%rax
> 16: 48 c1 ea 03 shr $0x3,%rdx
> 1a: 80 3c 02 00 cmpb $0x0,(%rdx,%rax,1)
> 1e: 74 05 je 0x25
> 20: e8 5f f3 f6 ff call 0xfffffffffff6f384
> 25: 49 89 5c 24 50 mov %rbx,0x50(%r12)
> 2a:* 49 8d bc 24 10 01 00 lea
> 0x110(%r12),%rdi <-- trapping instruction
> 31: 00
> 32: 48 8d b3 20 01 00 00 lea 0x120(%rbx),%rsi
> 39: e8 87 bc ff ff call 0xffffffffffffbcc5
> 3e: 4c rex.WR
> 3f: 89 .byte 0x89
>
> Code starting with the faulting instruction
> ===========================================
> 0: 49 8d bc 24 10 01 00 lea 0x110(%r12),%rdi
> 7: 00
> 8: 48 8d b3 20 01 00 00 lea 0x120(%rbx),%rsi
> f: e8 87 bc ff ff call 0xffffffffffffbc9b
> 14: 4c rex.WR
> 15: 89 .byte 0x89
>
>
> The kernel config and materials to reproduce are available at:
> https://download.01.org/0day-ci/archive/20250710/202507100606.90787fe6-lkp@intel.com
>
>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH v14 2/3] sched: Move task_mm_cid_work to mm timer
2025-07-10 13:40 ` Gabriele Monaco
@ 2025-07-10 14:18 ` Mathieu Desnoyers
0 siblings, 0 replies; 7+ messages in thread
From: Mathieu Desnoyers @ 2025-07-10 14:18 UTC (permalink / raw)
To: Gabriele Monaco, kernel test robot
Cc: oe-lkp, lkp, linux-mm, linux-kernel, aubrey.li, yu.c.chen,
Andrew Morton, David Hildenbrand, Ingo Molnar, Peter Zijlstra,
Paul E. McKenney, Ingo Molnar
On 2025-07-10 09:40, Gabriele Monaco wrote:
>
>
> On Thu, 2025-07-10 at 09:23 -0400, Mathieu Desnoyers wrote:
>> On 2025-07-10 00:56, kernel test robot wrote:
>>>
>>>
>>> Hello,
>>>
>>> kernel test robot noticed "WARNING:inconsistent_lock_state" on:
>>>
>>> commit: d06e66c6025e44136e6715d24c23fb821a415577 ("[PATCH v14 2/3]
>>> sched: Move task_mm_cid_work to mm timer")
>>> url:
>>> https://github.com/intel-lab-lkp/linux/commits/Gabriele-Monaco/sched-Add-prev_sum_exec_runtime-support-for-RT-DL-and-SCX-classes/20250707-224959
>>> patch link:
>>> https://lore.kernel.org/all/20250707144824.117014-3-gmonaco@redhat.com/
>>> patch subject: [PATCH v14 2/3] sched: Move task_mm_cid_work to mm
>>> timer
>>>
>>> in testcase: boot
>>>
>>> config: x86_64-randconfig-003-20250708
>>> compiler: gcc-11
>>> test machine: qemu-system-x86_64 -enable-kvm -cpu SandyBridge -smp
>>> 2 -m 16G
>>>
>>> (please refer to attached dmesg/kmsg for entire log/backtrace)
>>>
>>>
>>> +-------------------------------------------------+------------+---
>>> ---------+
>>>> | 50c1dc07ee |
>>>> d06e66c602 |
>>> +-------------------------------------------------+------------+---
>>> ---------+
>>>> WARNING:inconsistent_lock_state | 0 |
>>>> 12 |
>>>> inconsistent{SOFTIRQ-ON-W}->{IN-SOFTIRQ-W}usage | 0 |
>>>> 12 |
>>> +-------------------------------------------------+------------+---
>>> ---------+
>>>
>>
>> I suspect the issue comes from calling mmdrop(mm) from timer context
>> in a scenario
>> where the mm_count can drop to 0.
>>
>> This causes calls to pgd_free() and such to take the pgd_lock in
>> softirq
>> context, when in other cases it's taken with softirqs enabled.
>>
>> See "mmdrop_sched()" for RT. I think we need something similar for
>> the
>> non-RT case, e.g. a:
>>
>> static inline void __mmdrop_delayed(struct rcu_head *rhp)
>> {
>> struct mm_struct *mm = container_of(rhp, struct mm_struct,
>> delayed_drop);
>>
>> __mmdrop(mm);
>> }
>>
>> static inline void mmdrop_timer(struct mm_struct *mm)
>> {
>> /* Provides a full memory barrier. See mmdrop() */
>> if (atomic_dec_and_test(&mm->mm_count))
>> call_rcu(&mm->delayed_drop, __mmdrop_delayed);
>> }
>>
>> Thoughts ?
>>
>
> Thanks for the suggestion.
>
> I noticed the problem is in the mmdrop over there, but I'm seeing this
> is getting unnecessarily complicated.
> I'm not sure it's worth going down this path, also considering pushing
> the timer wheel like this might end up in unintended effects like it
> happened with the workqueue.
>
> I am going to try the alternative approach of running the scan in
> batches [1] still using a task_work but triggering it from
> __rseq_handle_notify_resume like here.
> If that works in the original usecase, I guess it's better to keep it
> that way.
>
> What do you think?
Yes, I think the batching approach makes sense considering the overhead
of worker threads when used periodically at 100ms intervals, the
complexity that arises from doing mmdrop() from timer context, and also
the fact that doing task_mm_cid_scan (iteration on all possible cpus)
from timer context may introduce latency on configurations that
implement timers with softirqs.
It will delay how much time it takes for cid compaction to react to
threads exiting though (wrt selftests/rseq: Add test for mm_cid
compaction). We will probably want to update this test to take into
account that the time it takes for compaction to complete depends on
the number of possible cpus.
Thanks,
Mathieu
>
> Thanks,
> Gabriele
>
> [1] -
> https://lore.kernel.org/lkml/20250217112317.258716-1-gmonaco@redhat.com
>
>> Thanks,
>>
>> Mathieu
>>
>>>
>>> If you fix the issue in a separate patch/commit (i.e. not just a
>>> new version of
>>> the same patch/commit), kindly add following tags
>>>> Reported-by: kernel test robot <oliver.sang@intel.com>
>>>> Closes:
>>>> https://lore.kernel.org/oe-lkp/202507100606.90787fe6-lkp@intel.com
>>>
>>>
>>> [ 26.556715][ C0] WARNING: inconsistent lock state
>>> [ 26.557127][ C0] 6.16.0-rc5-00002-gd06e66c6025e #1 Tainted:
>>> G T
>>> [ 26.557730][ C0] --------------------------------
>>> [ 26.558133][ C0] inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-
>>> W} usage.
>>> [ 26.558662][ C0] stdbuf/386 [HC0[0]:SC1[1]:HE1:SE0] takes:
>>> [ 26.559118][ C0] ffffffff870d4438 (pgd_lock){+.?.}-{3:3}, at:
>>> pgd_free (arch/x86/mm/pgtable.c:67 arch/x86/mm/pgtable.c:98
>>> arch/x86/mm/pgtable.c:379)
>>> [ 26.559786][ C0] {SOFTIRQ-ON-W} state was registered at:
>>> [ 26.560232][ C0] mark_usage (kernel/locking/lockdep.c:4669)
>>> [ 26.560561][ C0] __lock_acquire (kernel/locking/lockdep.c:5194)
>>> [ 26.560929][ C0] lock_acquire (kernel/locking/lockdep.c:473
>>> kernel/locking/lockdep.c:5873)
>>> [ 26.561267][ C0] _raw_spin_lock
>>> (include/linux/spinlock_api_smp.h:134
>>> kernel/locking/spinlock.c:154)
>>> [ 26.561617][ C0] pgd_alloc (arch/x86/mm/pgtable.c:86
>>> arch/x86/mm/pgtable.c:353)
>>> [ 26.561950][ C0] mm_init+0x64f/0xbfb
>>> [ 26.562342][ C0] mm_alloc (kernel/fork.c:1109)
>>> [ 26.562655][ C0] dma_resv_lockdep (drivers/dma-buf/dma-resv.c:784)
>>> [ 26.563020][ C0] do_one_initcall (init/main.c:1274)
>>> [ 26.563389][ C0] do_initcalls (init/main.c:1335 init/main.c:1352)
>>> [ 26.563744][ C0] kernel_init_freeable (init/main.c:1588)
>>> [ 26.564144][ C0] kernel_init (init/main.c:1476)
>>> [ 26.564402][ C0] ret_from_fork (arch/x86/kernel/process.c:154)
>>> [ 26.564633][ C0] ret_from_fork_asm (arch/x86/entry/entry_64.S:258)
>>> [ 26.564871][ C0] irq event stamp: 4774
>>> [ 26.565070][ C0] hardirqs last enabled at (4774):
>>> _raw_spin_unlock_irq (arch/x86/include/asm/irqflags.h:42
>>> arch/x86/include/asm/irqflags.h:119
>>> include/linux/spinlock_api_smp.h:159 kernel/locking/spinlock.c:202)
>>> [ 26.565526][ C0] hardirqs last disabled at (4773):
>>> _raw_spin_lock_irq (arch/x86/include/asm/preempt.h:80
>>> include/linux/spinlock_api_smp.h:118 kernel/locking/spinlock.c:170)
>>> [ 26.565971][ C0] softirqs last enabled at (4256): local_bh_enable
>>> (include/linux/bottom_half.h:33)
>>> [ 26.566408][ C0] softirqs last disabled at (4771): __do_softirq
>>> (kernel/softirq.c:614)
>>> [ 26.566823][ C0]
>>> [ 26.566823][ C0] other info that might help us debug this:
>>> [ 26.567198][ C0] Possible unsafe locking scenario:
>>> [ 26.567198][ C0]
>>> [ 26.567548][ C0] CPU0
>>> [ 26.567709][ C0] ----
>>> [ 26.567869][ C0] lock(pgd_lock);
>>> [ 26.568060][ C0] <Interrupt>
>>> [ 26.568255][ C0] lock(pgd_lock);
>>> [ 26.568452][ C0]
>>> [ 26.568452][ C0] *** DEADLOCK ***
>>> [ 26.568452][ C0]
>>> [ 26.568830][ C0] 3 locks held by stdbuf/386:
>>> [ 26.569056][ C0] #0: ffff888170d5c1a8 (&sb->s_type-
>>>> i_mutex_key){++++}-{4:4}, at: lookup_slow (fs/namei.c:1834)
>>> [ 26.569535][ C0] #1: ffff888170cf5850 (&lockref->lock){+.+.}-
>>> {3:3}, at: d_alloc (include/linux/dcache.h:319 fs/dcache.c:1777)
>>> [ 26.569961][ C0] #2: ffffc90000007d40 ((&mm->cid_timer)){+.-.}-
>>> {0:0}, at: call_timer_fn (kernel/time/timer.c:1744)
>>> [ 26.570421][ C0]
>>> [ 26.570421][ C0] stack backtrace:
>>> [ 26.570704][ C0] CPU: 0 UID: 0 PID: 386 Comm: stdbuf Tainted:
>>> G T 6.16.0-rc5-00002-gd06e66c6025e #1
>>> PREEMPT(voluntary) 39c5cbdaf5b4eb171776daa7d42daa95c0766676
>>> [ 26.570716][ C0] Tainted: [T]=RANDSTRUCT
>>> [ 26.570719][ C0] Call Trace:
>>> [ 26.570723][ C0] <IRQ>
>>> [ 26.570727][ C0] dump_stack_lvl (lib/dump_stack.c:122
>>> (discriminator 4))
>>> [ 26.570735][ C0] dump_stack (lib/dump_stack.c:130)
>>> [ 26.570740][ C0] print_usage_bug (kernel/locking/lockdep.c:4047)
>>> [ 26.570748][ C0] valid_state (kernel/locking/lockdep.c:4060)
>>> [ 26.570755][ C0] mark_lock_irq (kernel/locking/lockdep.c:4270)
>>> [ 26.570762][ C0] ? save_trace (kernel/locking/lockdep.c:592)
>>> [ 26.570773][ C0] ? mark_lock (kernel/locking/lockdep.c:4728
>>> (discriminator 3))
>>> [ 26.570780][ C0] mark_lock (kernel/locking/lockdep.c:4756)
>>> [ 26.570787][ C0] mark_usage (kernel/locking/lockdep.c:4645)
>>> [ 26.570796][ C0] __lock_acquire (kernel/locking/lockdep.c:5194)
>>> [ 26.570804][ C0] lock_acquire (kernel/locking/lockdep.c:473
>>> kernel/locking/lockdep.c:5873)
>>> [ 26.570811][ C0] ? pgd_free (arch/x86/mm/pgtable.c:67
>>> arch/x86/mm/pgtable.c:98 arch/x86/mm/pgtable.c:379)
>>> [ 26.570822][ C0] ? validate_chain (kernel/locking/lockdep.c:3826
>>> kernel/locking/lockdep.c:3879)
>>> [ 26.570828][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
>>> [ 26.570839][ C0] _raw_spin_lock
>>> (include/linux/spinlock_api_smp.h:134
>>> kernel/locking/spinlock.c:154)
>>> [ 26.570845][ C0] ? pgd_free (arch/x86/mm/pgtable.c:67
>>> arch/x86/mm/pgtable.c:98 arch/x86/mm/pgtable.c:379)
>>> [ 26.570854][ C0] pgd_free (arch/x86/mm/pgtable.c:67
>>> arch/x86/mm/pgtable.c:98 arch/x86/mm/pgtable.c:379)
>>> [ 26.570863][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
>>> [ 26.570873][ C0] __mmdrop (kernel/fork.c:681)
>>> [ 26.570882][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
>>> [ 26.570891][ C0] mmdrop (include/linux/sched/mm.h:55)
>>> [ 26.570901][ C0] task_mm_cid_scan (kernel/sched/core.c:10619
>>> (discriminator 3))
>>> [ 26.570910][ C0] ? lock_is_held (include/linux/lockdep.h:249)
>>> [ 26.570918][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
>>> [ 26.570928][ C0] call_timer_fn (arch/x86/include/asm/atomic.h:23
>>> include/linux/atomic/atomic-arch-fallback.h:457
>>> include/linux/jump_label.h:262 include/trace/events/timer.h:127
>>> kernel/time/timer.c:1748)
>>> [ 26.570935][ C0] ? trace_timer_base_idle
>>> (kernel/time/timer.c:1724)
>>> [ 26.570943][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
>>> [ 26.570953][ C0] ? wake_up_new_task (kernel/sched/core.c:10597)
>>> [ 26.570962][ C0] __run_timers (kernel/time/timer.c:1799
>>> kernel/time/timer.c:2372)
>>> [ 26.570970][ C0] ? add_timer_global (kernel/time/timer.c:2343)
>>> [ 26.570977][ C0] ? __kasan_check_write (mm/kasan/shadow.c:38)
>>> [ 26.570988][ C0] ? do_raw_spin_lock
>>> (arch/x86/include/asm/atomic.h:107 include/linux/atomic/atomic-
>>> arch-fallback.h:2170 include/linux/atomic/atomic-
>>> instrumented.h:1302 include/asm-generic/qspinlock.h:111
>>> kernel/locking/spinlock_debug.c:116)
>>> [ 26.570996][ C0] ? __raw_spin_lock_init
>>> (kernel/locking/spinlock_debug.c:114)
>>> [ 26.571006][ C0] __run_timer_base (kernel/time/timer.c:2385)
>>> [ 26.571014][ C0] run_timer_base (kernel/time/timer.c:2394)
>>> [ 26.571021][ C0] run_timer_softirq
>>> (arch/x86/include/asm/atomic.h:23 include/linux/atomic/atomic-arch-
>>> fallback.h:457 include/linux/jump_label.h:262
>>> kernel/time/timer.c:342 kernel/time/timer.c:2406)
>>> [ 26.571028][ C0] handle_softirqs (arch/x86/include/asm/atomic.h:23
>>> include/linux/atomic/atomic-arch-fallback.h:457
>>> include/linux/jump_label.h:262 include/trace/events/irq.h:142
>>> kernel/softirq.c:580)
>>> [ 26.571039][ C0] __do_softirq (kernel/softirq.c:614)
>>> [ 26.571046][ C0] __irq_exit_rcu (kernel/softirq.c:453
>>> kernel/softirq.c:680)
>>> [ 26.571055][ C0] irq_exit_rcu (kernel/softirq.c:698)
>>> [ 26.571064][ C0] sysvec_apic_timer_interrupt
>>> (arch/x86/kernel/apic/apic.c:1050 arch/x86/kernel/apic/apic.c:1050)
>>> [ 26.571076][ C0] </IRQ>
>>> [ 26.571078][ C0] <TASK>
>>> [ 26.571081][ C0] asm_sysvec_apic_timer_interrupt
>>> (arch/x86/include/asm/idtentry.h:574)
>>> [ 26.571088][ C0] RIP: 0010:d_alloc (fs/dcache.c:1778)
>>> [ 26.571100][ C0] Code: 8d 7c 24 50 b8 ff ff 37 00 ff 83 f8 00 00
>>> 00 48 89 fa 48 c1 e0 2a 48 c1 ea 03 80 3c 02 00 74 05 e8 5f f3 f6
>>> ff 49 89 5c 24 50 <49> 8d bc 24 10 01 00 00 48 8d b3 20 01 00 00 e8
>>> 87 bc ff ff 4c 89
>>> All code
>>> ========
>>> 0: 8d 7c 24 50 lea 0x50(%rsp),%edi
>>> 4: b8 ff ff 37 00 mov $0x37ffff,%eax
>>> 9: ff 83 f8 00 00 00 incl 0xf8(%rbx)
>>> f: 48 89 fa mov %rdi,%rdx
>>> 12: 48 c1 e0 2a shl $0x2a,%rax
>>> 16: 48 c1 ea 03 shr $0x3,%rdx
>>> 1a: 80 3c 02 00 cmpb $0x0,(%rdx,%rax,1)
>>> 1e: 74 05 je 0x25
>>> 20: e8 5f f3 f6 ff call 0xfffffffffff6f384
>>> 25: 49 89 5c 24 50 mov %rbx,0x50(%r12)
>>> 2a:* 49 8d bc 24 10 01 00 lea
>>> 0x110(%r12),%rdi <-- trapping instruction
>>> 31: 00
>>> 32: 48 8d b3 20 01 00 00 lea 0x120(%rbx),%rsi
>>> 39: e8 87 bc ff ff call 0xffffffffffffbcc5
>>> 3e: 4c rex.WR
>>> 3f: 89 .byte 0x89
>>>
>>> Code starting with the faulting instruction
>>> ===========================================
>>> 0: 49 8d bc 24 10 01 00 lea 0x110(%r12),%rdi
>>> 7: 00
>>> 8: 48 8d b3 20 01 00 00 lea 0x120(%rbx),%rsi
>>> f: e8 87 bc ff ff call 0xffffffffffffbc9b
>>> 14: 4c rex.WR
>>> 15: 89 .byte 0x89
>>>
>>>
>>> The kernel config and materials to reproduce are available at:
>>> https://download.01.org/0day-ci/archive/20250710/202507100606.90787fe6-lkp@intel.com
>>>
>>>
>>>
>>
>
--
Mathieu Desnoyers
EfficiOS Inc.
https://www.efficios.com
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2025-07-10 14:18 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <20250707144824.117014-1-gmonaco@redhat.com>
2025-07-07 14:48 ` [PATCH v14 2/3] sched: Move task_mm_cid_work to mm timer Gabriele Monaco
2025-07-07 15:19 ` Mathieu Desnoyers
2025-07-10 4:56 ` kernel test robot
2025-07-10 13:23 ` Mathieu Desnoyers
2025-07-10 13:40 ` Gabriele Monaco
2025-07-10 14:18 ` Mathieu Desnoyers
2025-07-10 13:47 ` Gabriele Monaco
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).