From: kernel test robot <lkp@intel.com>
To: Dmitry Vyukov <dvyukov@google.com>
Cc: llvm@lists.linux.dev, oe-kbuild-all@lists.linux.dev
Subject: [dvyukov:pr/4 1/1] kernel/sched/membarrier.c:337:28: error: no member named 'fence_seq' in 'struct mm_mm_cid'
Date: Tue, 24 Mar 2026 09:29:41 +0800 [thread overview]
Message-ID: <202603240914.TYyGqkoC-lkp@intel.com> (raw)
tree: https://github.com/dvyukov/linux pr/4
head: 1284e3723047cb5afd247f75c53de43efc18db82
commit: 1284e3723047cb5afd247f75c53de43efc18db82 [1/1] rseq: always overwrite cpu_id after membarrier
config: hexagon-allnoconfig (https://download.01.org/0day-ci/archive/20260324/202603240914.TYyGqkoC-lkp@intel.com/config)
compiler: clang version 23.0.0git (https://github.com/llvm/llvm-project 054e11d1a17e5ba88bb1a8ef32fad3346e80b186)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260324/202603240914.TYyGqkoC-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202603240914.TYyGqkoC-lkp@intel.com/
All errors (new ones prefixed by >>):
In file included from kernel/sched/build_utility.c:97:
>> kernel/sched/membarrier.c:337:28: error: no member named 'fence_seq' in 'struct mm_mm_cid'
337 | atomic64_inc(&mm->mm_cid.fence_seq);
| ~~~~~~~~~~ ^
1 error generated.
vim +337 kernel/sched/membarrier.c
316
317 static int membarrier_private_expedited(int flags, int cpu_id)
318 {
319 cpumask_var_t tmpmask;
320 struct mm_struct *mm = current->mm;
321 smp_call_func_t ipi_func = ipi_mb;
322
323 if (flags == MEMBARRIER_FLAG_SYNC_CORE) {
324 if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
325 return -EINVAL;
326 if (!(atomic_read(&mm->membarrier_state) &
327 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
328 return -EPERM;
329 ipi_func = ipi_sync_core;
330 prepare_sync_core_cmd(mm);
331 } else if (flags == MEMBARRIER_FLAG_RSEQ) {
332 if (!IS_ENABLED(CONFIG_RSEQ))
333 return -EINVAL;
334 if (!(atomic_read(&mm->membarrier_state) &
335 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY))
336 return -EPERM;
> 337 atomic64_inc(&mm->mm_cid.fence_seq);
338 rseq_force_update();
339 ipi_func = ipi_rseq;
340 } else {
341 WARN_ON_ONCE(flags);
342 if (!(atomic_read(&mm->membarrier_state) &
343 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
344 return -EPERM;
345 }
346
347 if (flags != MEMBARRIER_FLAG_SYNC_CORE &&
348 (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1))
349 return 0;
350
351 /*
352 * Matches memory barriers after rq->curr modification in
353 * scheduler.
354 *
355 * On RISC-V, this barrier pairing is also needed for the
356 * SYNC_CORE command when switching between processes, cf.
357 * the inline comments in membarrier_arch_switch_mm().
358 */
359 smp_mb(); /* system call entry is not a mb. */
360
361 if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
362 return -ENOMEM;
363
364 SERIALIZE_IPI();
365 cpus_read_lock();
366
367 if (cpu_id >= 0) {
368 struct task_struct *p;
369
370 if (cpu_id >= nr_cpu_ids || !cpu_online(cpu_id))
371 goto out;
372 rcu_read_lock();
373 p = rcu_dereference(cpu_rq(cpu_id)->curr);
374 if (!p || p->mm != mm) {
375 rcu_read_unlock();
376 goto out;
377 }
378 rcu_read_unlock();
379 } else {
380 int cpu;
381
382 rcu_read_lock();
383 for_each_online_cpu(cpu) {
384 struct task_struct *p;
385
386 p = rcu_dereference(cpu_rq(cpu)->curr);
387 if (p && p->mm == mm)
388 __cpumask_set_cpu(cpu, tmpmask);
389 }
390 rcu_read_unlock();
391 }
392
393 if (cpu_id >= 0) {
394 /*
395 * smp_call_function_single() will call ipi_func() if cpu_id
396 * is the calling CPU.
397 */
398 smp_call_function_single(cpu_id, ipi_func, NULL, 1);
399 } else {
400 /*
401 * For regular membarrier, we can save a few cycles by
402 * skipping the current cpu -- we're about to do smp_mb()
403 * below, and if we migrate to a different cpu, this cpu
404 * and the new cpu will execute a full barrier in the
405 * scheduler.
406 *
407 * For SYNC_CORE, we do need a barrier on the current cpu --
408 * otherwise, if we are migrated and replaced by a different
409 * task in the same mm just before, during, or after
410 * membarrier, we will end up with some thread in the mm
411 * running without a core sync.
412 *
413 * For RSEQ, don't invoke rseq_sched_switch_event() on the
414 * caller. User code is not supposed to issue syscalls at
415 * all from inside an rseq critical section.
416 */
417 if (flags != MEMBARRIER_FLAG_SYNC_CORE) {
418 preempt_disable();
419 smp_call_function_many(tmpmask, ipi_func, NULL, true);
420 preempt_enable();
421 } else {
422 on_each_cpu_mask(tmpmask, ipi_func, NULL, true);
423 }
424 }
425
426 out:
427 if (cpu_id < 0)
428 free_cpumask_var(tmpmask);
429 cpus_read_unlock();
430
431 /*
432 * Memory barrier on the caller thread _after_ we finished
433 * waiting for the last IPI. Matches memory barriers before
434 * rq->curr modification in scheduler.
435 */
436 smp_mb(); /* exit from system call is not a mb */
437
438 return 0;
439 }
440
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
reply other threads:[~2026-03-24 1:30 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=202603240914.TYyGqkoC-lkp@intel.com \
--to=lkp@intel.com \
--cc=dvyukov@google.com \
--cc=llvm@lists.linux.dev \
--cc=oe-kbuild-all@lists.linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox