public inbox for llvm@lists.linux.dev
 help / color / mirror / Atom feed
* [kas:uffd/rfc-v2 3/7] mm/memory.c:6347:7: error: call to undeclared function 'userfaultfd_rwp'; ISO C99 and later do not support implicit function declarations
@ 2026-04-18 10:55 kernel test robot
  0 siblings, 0 replies; only message in thread
From: kernel test robot @ 2026-04-18 10:55 UTC (permalink / raw)
  To: Kiryl Shutsemau (Meta); +Cc: llvm, oe-kbuild-all

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/kas/linux.git uffd/rfc-v2
head:   8f55f0ce2de130272cb80eee2434a185922b3340
commit: a5a0f28bfc37d5fb52d9268238be0f8280ca737c [3/7] userfaultfd: add UFFDIO_REGISTER_MODE_RWP
config: hexagon-allnoconfig (https://download.01.org/0day-ci/archive/20260418/202604181821.IQDdBaRk-lkp@intel.com/config)
compiler: clang version 23.0.0git (https://github.com/llvm/llvm-project 5bac06718f502014fade905512f1d26d578a18f3)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260418/202604181821.IQDdBaRk-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202604181821.IQDdBaRk-lkp@intel.com/

All error/warnings (new ones prefixed by >>):

>> mm/memory.c:1485:26: warning: shift count >= width of type [-Wshift-count-overflow]
    1485 |         if (dst_vma->vm_flags & VM_COPY_ON_FORK)
         |                                 ^~~~~~~~~~~~~~~
   include/linux/mm.h:643:65: note: expanded from macro 'VM_COPY_ON_FORK'
     643 | #define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_UFFD_RWP | \
         |                                                                 ^~~~~~~~~~~
   include/linux/mm.h:501:21: note: expanded from macro 'VM_UFFD_RWP'
     501 | #define VM_UFFD_RWP     INIT_VM_FLAG(UFFD_RWP)
         |                         ^~~~~~~~~~~~~~~~~~~~~~
   include/linux/mm.h:402:28: note: expanded from macro 'INIT_VM_FLAG'
     402 | #define INIT_VM_FLAG(name) BIT((__force int) VMA_ ## name ## _BIT)
         |                            ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   include/vdso/bits.h:7:26: note: expanded from macro 'BIT'
       7 | #define BIT(nr)                 (UL(1) << (nr))
         |                                        ^  ~~~~
   mm/memory.c:6069:31: warning: shift count >= width of type [-Wshift-count-overflow]
    6069 |         return handle_userfault(vmf, VM_UFFD_RWP);
         |                                      ^~~~~~~~~~~
   include/linux/mm.h:501:21: note: expanded from macro 'VM_UFFD_RWP'
     501 | #define VM_UFFD_RWP     INIT_VM_FLAG(UFFD_RWP)
         |                         ^~~~~~~~~~~~~~~~~~~~~~
   include/linux/mm.h:402:28: note: expanded from macro 'INIT_VM_FLAG'
     402 | #define INIT_VM_FLAG(name) BIT((__force int) VMA_ ## name ## _BIT)
         |                            ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   include/vdso/bits.h:7:26: note: expanded from macro 'BIT'
       7 | #define BIT(nr)                 (UL(1) << (nr))
         |                                        ^  ~~~~
>> mm/memory.c:6347:7: error: call to undeclared function 'userfaultfd_rwp'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
    6347 |                 if (userfaultfd_rwp(vmf->vma))
         |                     ^
   mm/memory.c:6347:7: note: did you mean 'userfaultfd_wp'?
   include/linux/userfaultfd_k.h:369:20: note: 'userfaultfd_wp' declared here
     369 | static inline bool userfaultfd_wp(struct vm_area_struct *vma)
         |                    ^
   mm/memory.c:6465:8: error: call to undeclared function 'userfaultfd_rwp'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
    6465 |                         if (userfaultfd_rwp(vma))
         |                             ^
   2 warnings and 2 errors generated.


vim +/userfaultfd_rwp +6347 mm/memory.c

  6242	
  6243	/*
  6244	 * The page faults may be spurious because of the racy access to the
  6245	 * page table.  For example, a non-populated virtual page is accessed
  6246	 * on 2 CPUs simultaneously, thus the page faults are triggered on
  6247	 * both CPUs.  However, it's possible that one CPU (say CPU A) cannot
  6248	 * find the reason for the page fault if the other CPU (say CPU B) has
  6249	 * changed the page table before the PTE is checked on CPU A.  Most of
  6250	 * the time, the spurious page faults can be ignored safely.  However,
  6251	 * if the page fault is for the write access, it's possible that a
  6252	 * stale read-only TLB entry exists in the local CPU and needs to be
  6253	 * flushed on some architectures.  This is called the spurious page
  6254	 * fault fixing.
  6255	 *
  6256	 * Note: flush_tlb_fix_spurious_fault() is defined as flush_tlb_page()
  6257	 * by default and used as such on most architectures, while
  6258	 * flush_tlb_fix_spurious_fault_pmd() is defined as NOP by default and
  6259	 * used as such on most architectures.
  6260	 */
  6261	static void fix_spurious_fault(struct vm_fault *vmf,
  6262				       enum pgtable_level ptlevel)
  6263	{
  6264		/* Skip spurious TLB flush for retried page fault */
  6265		if (vmf->flags & FAULT_FLAG_TRIED)
  6266			return;
  6267		/*
  6268		 * This is needed only for protection faults but the arch code
  6269		 * is not yet telling us if this is a protection fault or not.
  6270		 * This still avoids useless tlb flushes for .text page faults
  6271		 * with threads.
  6272		 */
  6273		if (vmf->flags & FAULT_FLAG_WRITE) {
  6274			if (ptlevel == PGTABLE_LEVEL_PTE)
  6275				flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
  6276							     vmf->pte);
  6277			else
  6278				flush_tlb_fix_spurious_fault_pmd(vmf->vma, vmf->address,
  6279								 vmf->pmd);
  6280		}
  6281	}
  6282	/*
  6283	 * These routines also need to handle stuff like marking pages dirty
  6284	 * and/or accessed for architectures that don't do it in hardware (most
  6285	 * RISC architectures).  The early dirtying is also good on the i386.
  6286	 *
  6287	 * There is also a hook called "update_mmu_cache()" that architectures
  6288	 * with external mmu caches can use to update those (ie the Sparc or
  6289	 * PowerPC hashed page tables that act as extended TLBs).
  6290	 *
  6291	 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
  6292	 * concurrent faults).
  6293	 *
  6294	 * The mmap_lock may have been released depending on flags and our return value.
  6295	 * See filemap_fault() and __folio_lock_or_retry().
  6296	 */
  6297	static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
  6298	{
  6299		pte_t entry;
  6300	
  6301		if (unlikely(pmd_none(*vmf->pmd))) {
  6302			/*
  6303			 * Leave __pte_alloc() until later: because vm_ops->fault may
  6304			 * want to allocate huge page, and if we expose page table
  6305			 * for an instant, it will be difficult to retract from
  6306			 * concurrent faults and from rmap lookups.
  6307			 */
  6308			vmf->pte = NULL;
  6309			vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
  6310		} else {
  6311			pmd_t dummy_pmdval;
  6312	
  6313			/*
  6314			 * A regular pmd is established and it can't morph into a huge
  6315			 * pmd by anon khugepaged, since that takes mmap_lock in write
  6316			 * mode; but shmem or file collapse to THP could still morph
  6317			 * it into a huge pmd: just retry later if so.
  6318			 *
  6319			 * Use the maywrite version to indicate that vmf->pte may be
  6320			 * modified, but since we will use pte_same() to detect the
  6321			 * change of the !pte_none() entry, there is no need to recheck
  6322			 * the pmdval. Here we choose to pass a dummy variable instead
  6323			 * of NULL, which helps new user think about why this place is
  6324			 * special.
  6325			 */
  6326			vmf->pte = pte_offset_map_rw_nolock(vmf->vma->vm_mm, vmf->pmd,
  6327							    vmf->address, &dummy_pmdval,
  6328							    &vmf->ptl);
  6329			if (unlikely(!vmf->pte))
  6330				return 0;
  6331			vmf->orig_pte = ptep_get_lockless(vmf->pte);
  6332			vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
  6333	
  6334			if (pte_none(vmf->orig_pte)) {
  6335				pte_unmap(vmf->pte);
  6336				vmf->pte = NULL;
  6337			}
  6338		}
  6339	
  6340		if (!vmf->pte)
  6341			return do_pte_missing(vmf);
  6342	
  6343		if (!pte_present(vmf->orig_pte))
  6344			return do_swap_page(vmf);
  6345	
  6346		if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) {
> 6347			if (userfaultfd_rwp(vmf->vma))
  6348				return do_uffd_rwp(vmf);
  6349			return do_numa_page(vmf);
  6350		}
  6351	
  6352		spin_lock(vmf->ptl);
  6353		entry = vmf->orig_pte;
  6354		if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) {
  6355			update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
  6356			goto unlock;
  6357		}
  6358		if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
  6359			if (!pte_write(entry))
  6360				return do_wp_page(vmf);
  6361			else if (likely(vmf->flags & FAULT_FLAG_WRITE))
  6362				entry = pte_mkdirty(entry);
  6363		}
  6364		entry = pte_mkyoung(entry);
  6365		if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
  6366					vmf->flags & FAULT_FLAG_WRITE))
  6367			update_mmu_cache_range(vmf, vmf->vma, vmf->address,
  6368					vmf->pte, 1);
  6369		else
  6370			fix_spurious_fault(vmf, PGTABLE_LEVEL_PTE);
  6371	unlock:
  6372		pte_unmap_unlock(vmf->pte, vmf->ptl);
  6373		return 0;
  6374	}
  6375	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2026-04-18 10:55 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-18 10:55 [kas:uffd/rfc-v2 3/7] mm/memory.c:6347:7: error: call to undeclared function 'userfaultfd_rwp'; ISO C99 and later do not support implicit function declarations kernel test robot

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox