From: kernel test robot <lkp@intel.com>
To: Gregory Price <gourry@gourry.net>
Cc: llvm@lists.linux.dev, oe-kbuild-all@lists.linux.dev,
Gregory Price <gourry@gourry.net>
Subject: [gourryinverse:scratch/gourry/isolation/linus 6/10] kernel/cgroup/cpuset.c:4339:13: warning: variable 'allowed' is used uninitialized whenever 'if' condition is false
Date: Tue, 11 Nov 2025 22:29:20 +0800 [thread overview]
Message-ID: <202511112256.k3RvmZ7Q-lkp@intel.com> (raw)
tree: https://github.com/gourryinverse/linux scratch/gourry/isolation/linus
head: c74a85115777a0e4ff7ba671c4631adb4ba32ecc
commit: 81b4628c13856d1bc84678ed6655180ca0554688 [6/10] cpuset: introduce cpuset.mems.default
config: x86_64-kexec (https://download.01.org/0day-ci/archive/20251111/202511112256.k3RvmZ7Q-lkp@intel.com/config)
compiler: clang version 20.1.8 (https://github.com/llvm/llvm-project 87f0227cb60147a26a1eeb4fb06e3b505e9c7261)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20251111/202511112256.k3RvmZ7Q-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202511112256.k3RvmZ7Q-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> kernel/cgroup/cpuset.c:4339:13: warning: variable 'allowed' is used uninitialized whenever 'if' condition is false [-Wsometimes-uninitialized]
4339 | } else if (node_isset(node, current->mems_default))
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/nodemask.h:149:36: note: expanded from macro 'node_isset'
149 | #define node_isset(node, nodemask) test_bit((node), (nodemask).bits)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/bitops.h:60:29: note: expanded from macro 'test_bit'
60 | #define test_bit(nr, addr) bitop(_test_bit, nr, addr)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/bitops.h:43:2: note: expanded from macro 'bitop'
43 | ((__builtin_constant_p(nr) && \
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
44 | __builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) && \
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
45 | (uintptr_t)(addr) != (uintptr_t)NULL && \
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
46 | __builtin_constant_p(*(const unsigned long *)(addr))) ? \
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
47 | const##op(nr, addr) : op(nr, addr))
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
kernel/cgroup/cpuset.c:4342:6: note: uninitialized use occurs here
4342 | if (allowed)
| ^~~~~~~
kernel/cgroup/cpuset.c:4339:9: note: remove the 'if' if its condition is always true
4339 | } else if (node_isset(node, current->mems_default))
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4340 | allowed = true;
kernel/cgroup/cpuset.c:4327:14: note: initialize the variable 'allowed' to silence this warning
4327 | bool allowed; /* is allocation in zone z allowed? */
| ^
| = 0
1 warning generated.
vim +4339 kernel/cgroup/cpuset.c
4282
4283 /*
4284 * cpuset_current_node_allowed - Can current task allocate on a memory node?
4285 * @node: is this an allowed node?
4286 * @gfp_mask: memory allocation flags
4287 *
4288 * If we're in interrupt, yes, we can always allocate. If @node is set in
4289 * current's mems_default, yes. If it's not a __GFP_HARDWALL request and this
4290 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
4291 * yes. If current has access to memory reserves as an oom victim, yes.
4292 * Otherwise, no.
4293 *
4294 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
4295 * and do not allow allocations outside the current tasks cpuset
4296 * unless the task has been OOM killed.
4297 * GFP_KERNEL allocations are not so marked, so can escape to the
4298 * nearest enclosing hardwalled ancestor cpuset.
4299 *
4300 * Scanning up parent cpusets requires callback_lock. The
4301 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
4302 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
4303 * current tasks mems_default came up empty on the first pass over
4304 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
4305 * cpuset are short of memory, might require taking the callback_lock.
4306 *
4307 * The first call here from mm/page_alloc:get_page_from_freelist()
4308 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
4309 * so no allocation on a node outside the cpuset is allowed (unless
4310 * in interrupt, of course).
4311 *
4312 * The second pass through get_page_from_freelist() doesn't even call
4313 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
4314 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
4315 * in alloc_flags. That logic and the checks below have the combined
4316 * affect that:
4317 * in_interrupt - any node ok (current task context irrelevant)
4318 * GFP_ATOMIC - any node ok
4319 * tsk_is_oom_victim - any node ok
4320 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
4321 * GFP_USER - only nodes in current tasks mems allowed ok.
4322 * GFP_PROTECTED - allow non-sysram nodes in mems_allowed
4323 */
4324 bool cpuset_current_node_allowed(int node, gfp_t gfp_mask)
4325 {
4326 struct cpuset *cs; /* current cpuset ancestors */
4327 bool allowed; /* is allocation in zone z allowed? */
4328 unsigned long flags;
4329 bool protected_node = gfp_mask & __GFP_PROTECTED;
4330
4331 if (in_interrupt())
4332 return true;
4333
4334 if (protected_node) {
4335 rcu_read_lock();
4336 cs = task_cs(current);
4337 allowed = node_isset(node, cs->mems_allowed);
4338 rcu_read_unlock();
> 4339 } else if (node_isset(node, current->mems_default))
4340 allowed = true;
4341
4342 if (allowed)
4343 return allowed;
4344
4345 /*
4346 * Allow tasks that have access to memory reserves because they have
4347 * been OOM killed to get memory anywhere.
4348 */
4349 if (unlikely(tsk_is_oom_victim(current)))
4350 return true;
4351 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
4352 return false;
4353
4354 if (current->flags & PF_EXITING) /* Let dying task have memory */
4355 return true;
4356
4357 /* Not hardwall and node outside mems_allowed: scan up cpusets */
4358 spin_lock_irqsave(&callback_lock, flags);
4359
4360 cs = nearest_hardwall_ancestor(task_cs(current));
4361 allowed = node_isset(node, cs->mems_allowed); /* include protected */
4362 if (!protected_node && !nodes_empty(mt_sysram_nodelist))
4363 allowed &= node_isset(node, mt_sysram_nodelist);
4364
4365 spin_unlock_irqrestore(&callback_lock, flags);
4366 return allowed;
4367 }
4368
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
reply other threads:[~2025-11-11 14:29 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=202511112256.k3RvmZ7Q-lkp@intel.com \
--to=lkp@intel.com \
--cc=gourry@gourry.net \
--cc=llvm@lists.linux.dev \
--cc=oe-kbuild-all@lists.linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox