public inbox for llvm@lists.linux.dev
 help / color / mirror / Atom feed
From: kernel test robot <lkp@intel.com>
To: Kiryl Shutsemau <kas@kernel.org>
Cc: llvm@lists.linux.dev, oe-kbuild-all@lists.linux.dev
Subject: [kas:pte_size 26/30] mm/huge_memory.c:3098:20: error: too many arguments to function call, expected 5, have 6
Date: Sun, 22 Mar 2026 16:55:33 +0800	[thread overview]
Message-ID: <202603221654.IVQxREaL-lkp@intel.com> (raw)

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/kas/linux.git pte_size
head:   3ecd2bc82d7d0382233099de8d07616df26745c4
commit: cbef1d0b647d13181325aba9b7fb964d22e03829 [26/30] tmp
config: sparc64-allmodconfig (https://download.01.org/0day-ci/archive/20260322/202603221654.IVQxREaL-lkp@intel.com/config)
compiler: clang version 23.0.0git (https://github.com/llvm/llvm-project 4abb927bacf37f18f6359a41639a6d1b3bffffb5)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260322/202603221654.IVQxREaL-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202603221654.IVQxREaL-lkp@intel.com/

All errors (new ones prefixed by >>):

>> mm/huge_memory.c:3098:20: error: too many arguments to function call, expected 5, have 6
    3097 |                         folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
         |                         ~~~~~~~~~~~~~~~~~~~~~~~~
    3098 |                                                  vma, haddr, rmap_flags);
         |                                                              ^~~~~~~~~~
   include/linux/rmap.h:405:6: note: 'folio_add_anon_rmap_ptes' declared here
     405 | void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_ptes,
         |      ^                        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     406 |                 struct vm_area_struct *, rmap_t flags);
         |                 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   1 error generated.


vim +3098 mm/huge_memory.c

eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  2986  
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  2987  static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
ba98828088ad3f Kiryl Shutsemau   2016-01-15  2988  		unsigned long haddr, bool freeze)
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  2989  {
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  2990  	struct mm_struct *mm = vma->vm_mm;
91b2978a348073 David Hildenbrand 2023-12-20  2991  	struct folio *folio;
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  2992  	struct page *page;
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  2993  	pgtable_t pgtable;
423ac9af3ceff9 Aneesh Kumar K.V  2018-01-31  2994  	pmd_t old_pmd, _pmd;
1462872900233e Balbir Singh      2025-10-01  2995  	bool soft_dirty, uffd_wp = false, young = false, write = false;
0ccf7f168e17bb Peter Xu          2022-08-11  2996  	bool anon_exclusive = false, dirty = false;
2ac015e293bbe3 Kiryl Shutsemau   2016-02-24  2997  	unsigned long addr;
c9c1ee20ee84b1 Hugh Dickins      2023-06-08  2998  	pte_t *pte;
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  2999  	int i;
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3000  
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3001  	VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3002  	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3003  	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
1462872900233e Balbir Singh      2025-10-01  3004  
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3005  	VM_WARN_ON_ONCE(!pmd_is_valid_softleaf(*pmd) && !pmd_trans_huge(*pmd));
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3006  
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3007  	count_vm_event(THP_SPLIT_PMD);
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3008  
d21b9e57c74ce8 Kiryl Shutsemau   2016-07-26  3009  	if (!vma_is_anonymous(vma)) {
ec8832d007cb7b Alistair Popple   2023-07-25  3010  		old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
953c66c2b22a30 Aneesh Kumar K.V  2016-12-12  3011  		/*
953c66c2b22a30 Aneesh Kumar K.V  2016-12-12  3012  		 * We are going to unmap this huge page. So
953c66c2b22a30 Aneesh Kumar K.V  2016-12-12  3013  		 * just go ahead and zap it
953c66c2b22a30 Aneesh Kumar K.V  2016-12-12  3014  		 */
953c66c2b22a30 Aneesh Kumar K.V  2016-12-12  3015  		if (arch_needs_pgtable_deposit())
953c66c2b22a30 Aneesh Kumar K.V  2016-12-12  3016  			zap_deposited_table(mm, pmd);
38607c62b34b46 Alistair Popple   2025-02-28  3017  		if (!vma_is_dax(vma) && vma_is_special_huge(vma))
d21b9e57c74ce8 Kiryl Shutsemau   2016-07-26  3018  			return;
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3019  		if (unlikely(pmd_is_migration_entry(old_pmd))) {
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3020  			const softleaf_t old_entry = softleaf_from_pmd(old_pmd);
99fa8a48203d62 Hugh Dickins      2021-06-15  3021  
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3022  			folio = softleaf_to_folio(old_entry);
38607c62b34b46 Alistair Popple   2025-02-28  3023  		} else if (is_huge_zero_pmd(old_pmd)) {
38607c62b34b46 Alistair Popple   2025-02-28  3024  			return;
99fa8a48203d62 Hugh Dickins      2021-06-15  3025  		} else {
99fa8a48203d62 Hugh Dickins      2021-06-15  3026  			page = pmd_page(old_pmd);
a8e61d584eda0d David Hildenbrand 2023-12-20  3027  			folio = page_folio(page);
a8e61d584eda0d David Hildenbrand 2023-12-20  3028  			if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
db44c658f798ad David Hildenbrand 2024-01-22  3029  				folio_mark_dirty(folio);
a8e61d584eda0d David Hildenbrand 2023-12-20  3030  			if (!folio_test_referenced(folio) && pmd_young(old_pmd))
a8e61d584eda0d David Hildenbrand 2023-12-20  3031  				folio_set_referenced(folio);
a8e61d584eda0d David Hildenbrand 2023-12-20  3032  			folio_remove_rmap_pmd(folio, page, vma);
a8e61d584eda0d David Hildenbrand 2023-12-20  3033  			folio_put(folio);
99fa8a48203d62 Hugh Dickins      2021-06-15  3034  		}
6b27cc6c66abf0 Kefeng Wang       2024-01-11  3035  		add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR);
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3036  		return;
99fa8a48203d62 Hugh Dickins      2021-06-15  3037  	}
99fa8a48203d62 Hugh Dickins      2021-06-15  3038  
3b77e8c8cde581 Hugh Dickins      2021-06-15  3039  	if (is_huge_zero_pmd(*pmd)) {
4645b9fe84bf48 Jérôme Glisse     2017-11-15  3040  		/*
4645b9fe84bf48 Jérôme Glisse     2017-11-15  3041  		 * FIXME: Do we want to invalidate secondary mmu by calling
1af5a8109904b7 Alistair Popple   2023-07-25  3042  		 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
1af5a8109904b7 Alistair Popple   2023-07-25  3043  		 * inside __split_huge_pmd() ?
4645b9fe84bf48 Jérôme Glisse     2017-11-15  3044  		 *
4645b9fe84bf48 Jérôme Glisse     2017-11-15  3045  		 * We are going from a zero huge page write protected to zero
4645b9fe84bf48 Jérôme Glisse     2017-11-15  3046  		 * small page also write protected so it does not seems useful
4645b9fe84bf48 Jérôme Glisse     2017-11-15  3047  		 * to invalidate secondary mmu at this time.
4645b9fe84bf48 Jérôme Glisse     2017-11-15  3048  		 */
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3049  		return __split_huge_zero_page_pmd(vma, haddr, pmd);
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3050  	}
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3051  
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3052  	if (pmd_is_migration_entry(*pmd)) {
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3053  		softleaf_t entry;
84c3fc4e9c563d Zi Yan            2017-09-08  3054  
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3055  		old_pmd = *pmd;
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3056  		entry = softleaf_from_pmd(old_pmd);
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3057  		page = softleaf_to_page(entry);
1462872900233e Balbir Singh      2025-10-01  3058  		folio = page_folio(page);
1462872900233e Balbir Singh      2025-10-01  3059  
1462872900233e Balbir Singh      2025-10-01  3060  		soft_dirty = pmd_swp_soft_dirty(old_pmd);
1462872900233e Balbir Singh      2025-10-01  3061  		uffd_wp = pmd_swp_uffd_wp(old_pmd);
1462872900233e Balbir Singh      2025-10-01  3062  
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3063  		write = softleaf_is_migration_write(entry);
6c287605fd5646 David Hildenbrand 2022-05-09  3064  		if (PageAnon(page))
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3065  			anon_exclusive = softleaf_is_migration_read_exclusive(entry);
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3066  		young = softleaf_is_migration_young(entry);
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3067  		dirty = softleaf_is_migration_dirty(entry);
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3068  	} else if (pmd_is_device_private_entry(*pmd)) {
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3069  		softleaf_t entry;
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3070  
1462872900233e Balbir Singh      2025-10-01  3071  		old_pmd = *pmd;
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3072  		entry = softleaf_from_pmd(old_pmd);
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3073  		page = softleaf_to_page(entry);
1462872900233e Balbir Singh      2025-10-01  3074  		folio = page_folio(page);
1462872900233e Balbir Singh      2025-10-01  3075  
2e83ee1d8694a6 Peter Xu          2018-12-21  3076  		soft_dirty = pmd_swp_soft_dirty(old_pmd);
f45ec5ff16a75f Peter Xu          2020-04-06  3077  		uffd_wp = pmd_swp_uffd_wp(old_pmd);
1462872900233e Balbir Singh      2025-10-01  3078  
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3079  		write = softleaf_is_device_private_write(entry);
1462872900233e Balbir Singh      2025-10-01  3080  		anon_exclusive = PageAnonExclusive(page);
1462872900233e Balbir Singh      2025-10-01  3081  
1462872900233e Balbir Singh      2025-10-01  3082  		/*
1462872900233e Balbir Singh      2025-10-01  3083  		 * Device private THP should be treated the same as regular
1462872900233e Balbir Singh      2025-10-01  3084  		 * folios w.r.t anon exclusive handling. See the comments for
1462872900233e Balbir Singh      2025-10-01  3085  		 * folio handling and anon_exclusive below.
1462872900233e Balbir Singh      2025-10-01  3086  		 */
1462872900233e Balbir Singh      2025-10-01  3087  		if (freeze && anon_exclusive &&
1462872900233e Balbir Singh      2025-10-01  3088  		    folio_try_share_anon_rmap_pmd(folio, page))
1462872900233e Balbir Singh      2025-10-01  3089  			freeze = false;
1462872900233e Balbir Singh      2025-10-01  3090  		if (!freeze) {
1462872900233e Balbir Singh      2025-10-01  3091  			rmap_t rmap_flags = RMAP_NONE;
1462872900233e Balbir Singh      2025-10-01  3092  
1462872900233e Balbir Singh      2025-10-01  3093  			folio_ref_add(folio, HPAGE_PMD_NR - 1);
1462872900233e Balbir Singh      2025-10-01  3094  			if (anon_exclusive)
1462872900233e Balbir Singh      2025-10-01  3095  				rmap_flags |= RMAP_EXCLUSIVE;
1462872900233e Balbir Singh      2025-10-01  3096  
1462872900233e Balbir Singh      2025-10-01  3097  			folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
1462872900233e Balbir Singh      2025-10-01 @3098  						 vma, haddr, rmap_flags);
1462872900233e Balbir Singh      2025-10-01  3099  		}
2e83ee1d8694a6 Peter Xu          2018-12-21  3100  	} else {
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3101  		/*
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3102  		 * Up to this point the pmd is present and huge and userland has
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3103  		 * the whole access to the hugepage during the split (which
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3104  		 * happens in place). If we overwrite the pmd with the not-huge
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3105  		 * version pointing to the pte here (which of course we could if
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3106  		 * all CPUs were bug free), userland could trigger a small page
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3107  		 * size TLB miss on the small sized TLB while the hugepage TLB
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3108  		 * entry is still established in the huge TLB. Some CPU doesn't
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3109  		 * like that. See
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3110  		 * http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3111  		 * 383 on page 105. Intel should be safe but is also warns that
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3112  		 * it's only safe if the permission and cache attributes of the
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3113  		 * two entries loaded in the two TLB is identical (which should
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3114  		 * be the case here). But it is generally safer to never allow
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3115  		 * small and huge TLB entries for the same virtual address to be
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3116  		 * loaded simultaneously. So instead of doing "pmd_populate();
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3117  		 * flush_pmd_tlb_range();" we first mark the current pmd
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3118  		 * notpresent (atomically because here the pmd_trans_huge must
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3119  		 * remain set at all times on the pmd until the split is
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3120  		 * complete for this pmd), then we flush the SMP TLB and finally
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3121  		 * we write the non-huge version of the pmd entry with
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3122  		 * pmd_populate.
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3123  		 */
3a5a8d343e1cf9 Ryan Roberts      2024-05-01  3124  		old_pmd = pmdp_invalidate(vma, haddr, pmd);
423ac9af3ceff9 Aneesh Kumar K.V  2018-01-31  3125  		page = pmd_page(old_pmd);
91b2978a348073 David Hildenbrand 2023-12-20  3126  		folio = page_folio(page);
0ccf7f168e17bb Peter Xu          2022-08-11  3127  		if (pmd_dirty(old_pmd)) {
0ccf7f168e17bb Peter Xu          2022-08-11  3128  			dirty = true;
91b2978a348073 David Hildenbrand 2023-12-20  3129  			folio_set_dirty(folio);
0ccf7f168e17bb Peter Xu          2022-08-11  3130  		}
423ac9af3ceff9 Aneesh Kumar K.V  2018-01-31  3131  		write = pmd_write(old_pmd);
423ac9af3ceff9 Aneesh Kumar K.V  2018-01-31  3132  		young = pmd_young(old_pmd);
423ac9af3ceff9 Aneesh Kumar K.V  2018-01-31  3133  		soft_dirty = pmd_soft_dirty(old_pmd);
292924b2602474 Peter Xu          2020-04-06  3134  		uffd_wp = pmd_uffd_wp(old_pmd);
6c287605fd5646 David Hildenbrand 2022-05-09  3135  
91b2978a348073 David Hildenbrand 2023-12-20  3136  		VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
91b2978a348073 David Hildenbrand 2023-12-20  3137  		VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
6c287605fd5646 David Hildenbrand 2022-05-09  3138  
6c287605fd5646 David Hildenbrand 2022-05-09  3139  		/*
6c287605fd5646 David Hildenbrand 2022-05-09  3140  		 * Without "freeze", we'll simply split the PMD, propagating the
6c287605fd5646 David Hildenbrand 2022-05-09  3141  		 * PageAnonExclusive() flag for each PTE by setting it for
6c287605fd5646 David Hildenbrand 2022-05-09  3142  		 * each subpage -- no need to (temporarily) clear.
6c287605fd5646 David Hildenbrand 2022-05-09  3143  		 *
6c287605fd5646 David Hildenbrand 2022-05-09  3144  		 * With "freeze" we want to replace mapped pages by
6c287605fd5646 David Hildenbrand 2022-05-09  3145  		 * migration entries right away. This is only possible if we
6c287605fd5646 David Hildenbrand 2022-05-09  3146  		 * managed to clear PageAnonExclusive() -- see
6c287605fd5646 David Hildenbrand 2022-05-09  3147  		 * set_pmd_migration_entry().
6c287605fd5646 David Hildenbrand 2022-05-09  3148  		 *
6c287605fd5646 David Hildenbrand 2022-05-09  3149  		 * In case we cannot clear PageAnonExclusive(), split the PMD
6c287605fd5646 David Hildenbrand 2022-05-09  3150  		 * only and let try_to_migrate_one() fail later.
088b8aa537c2c7 David Hildenbrand 2022-09-01  3151  		 *
e3b4b1374f87c7 David Hildenbrand 2023-12-20  3152  		 * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
6c287605fd5646 David Hildenbrand 2022-05-09  3153  		 */
91b2978a348073 David Hildenbrand 2023-12-20  3154  		anon_exclusive = PageAnonExclusive(page);
e3b4b1374f87c7 David Hildenbrand 2023-12-20  3155  		if (freeze && anon_exclusive &&
e3b4b1374f87c7 David Hildenbrand 2023-12-20  3156  		    folio_try_share_anon_rmap_pmd(folio, page))
6c287605fd5646 David Hildenbrand 2022-05-09  3157  			freeze = false;
91b2978a348073 David Hildenbrand 2023-12-20  3158  		if (!freeze) {
91b2978a348073 David Hildenbrand 2023-12-20  3159  			rmap_t rmap_flags = RMAP_NONE;
91b2978a348073 David Hildenbrand 2023-12-20  3160  
91b2978a348073 David Hildenbrand 2023-12-20  3161  			folio_ref_add(folio, HPAGE_PMD_NR - 1);
91b2978a348073 David Hildenbrand 2023-12-20  3162  			if (anon_exclusive)
91b2978a348073 David Hildenbrand 2023-12-20  3163  				rmap_flags |= RMAP_EXCLUSIVE;
91b2978a348073 David Hildenbrand 2023-12-20  3164  			folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
cbef1d0b647d13 Kiryl Shutsemau   2026-02-12  3165  						 vma, rmap_flags);
91b2978a348073 David Hildenbrand 2023-12-20  3166  		}
9d84604b845c38 Hugh Dickins      2022-03-22  3167  	}
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3168  
423ac9af3ceff9 Aneesh Kumar K.V  2018-01-31  3169  	/*
423ac9af3ceff9 Aneesh Kumar K.V  2018-01-31  3170  	 * Withdraw the table only after we mark the pmd entry invalid.
423ac9af3ceff9 Aneesh Kumar K.V  2018-01-31  3171  	 * This's critical for some architectures (Power).
423ac9af3ceff9 Aneesh Kumar K.V  2018-01-31  3172  	 */
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3173  	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3174  	pmd_populate(mm, &_pmd, pgtable);
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3175  
c9c1ee20ee84b1 Hugh Dickins      2023-06-08  3176  	pte = pte_offset_map(&_pmd, haddr);
c9c1ee20ee84b1 Hugh Dickins      2023-06-08  3177  	VM_BUG_ON(!pte);
2bdba9868a4ffc Ryan Roberts      2024-02-15  3178  
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3179  	/*
2bdba9868a4ffc Ryan Roberts      2024-02-15  3180  	 * Note that NUMA hinting access restrictions are not transferred to
2bdba9868a4ffc Ryan Roberts      2024-02-15  3181  	 * avoid any possibility of altering permissions across VMAs.
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3182  	 */
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3183  	if (freeze || pmd_is_migration_entry(old_pmd)) {
2bdba9868a4ffc Ryan Roberts      2024-02-15  3184  		pte_t entry;
ba98828088ad3f Kiryl Shutsemau   2016-01-15  3185  		swp_entry_t swp_entry;
2bdba9868a4ffc Ryan Roberts      2024-02-15  3186  
1462872900233e Balbir Singh      2025-10-01  3187  		for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
4dd845b5a3e57a Alistair Popple   2021-06-30  3188  			if (write)
4dd845b5a3e57a Alistair Popple   2021-06-30  3189  				swp_entry = make_writable_migration_entry(
4dd845b5a3e57a Alistair Popple   2021-06-30  3190  							page_to_pfn(page + i));
6c287605fd5646 David Hildenbrand 2022-05-09  3191  			else if (anon_exclusive)
6c287605fd5646 David Hildenbrand 2022-05-09  3192  				swp_entry = make_readable_exclusive_migration_entry(
6c287605fd5646 David Hildenbrand 2022-05-09  3193  							page_to_pfn(page + i));
4dd845b5a3e57a Alistair Popple   2021-06-30  3194  			else
4dd845b5a3e57a Alistair Popple   2021-06-30  3195  				swp_entry = make_readable_migration_entry(
4dd845b5a3e57a Alistair Popple   2021-06-30  3196  							page_to_pfn(page + i));
2e3468778dbe3e Peter Xu          2022-08-11  3197  			if (young)
2e3468778dbe3e Peter Xu          2022-08-11  3198  				swp_entry = make_migration_entry_young(swp_entry);
2e3468778dbe3e Peter Xu          2022-08-11  3199  			if (dirty)
2e3468778dbe3e Peter Xu          2022-08-11  3200  				swp_entry = make_migration_entry_dirty(swp_entry);
ba98828088ad3f Kiryl Shutsemau   2016-01-15  3201  			entry = swp_entry_to_pte(swp_entry);
804dd150468cfd Andrea Arcangeli  2016-08-25  3202  			if (soft_dirty)
804dd150468cfd Andrea Arcangeli  2016-08-25  3203  				entry = pte_swp_mksoft_dirty(entry);
f45ec5ff16a75f Peter Xu          2020-04-06  3204  			if (uffd_wp)
f45ec5ff16a75f Peter Xu          2020-04-06  3205  				entry = pte_swp_mkuffd_wp(entry);
2bdba9868a4ffc Ryan Roberts      2024-02-15  3206  			VM_WARN_ON(!pte_none(ptep_get(pte + i)));
2bdba9868a4ffc Ryan Roberts      2024-02-15  3207  			set_pte_at(mm, addr, pte + i, entry);
2bdba9868a4ffc Ryan Roberts      2024-02-15  3208  		}
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3209  	} else if (pmd_is_device_private_entry(old_pmd)) {
2bdba9868a4ffc Ryan Roberts      2024-02-15  3210  		pte_t entry;
1462872900233e Balbir Singh      2025-10-01  3211  		swp_entry_t swp_entry;
2bdba9868a4ffc Ryan Roberts      2024-02-15  3212  
1462872900233e Balbir Singh      2025-10-01  3213  		for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
1462872900233e Balbir Singh      2025-10-01  3214  			/*
1462872900233e Balbir Singh      2025-10-01  3215  			 * anon_exclusive was already propagated to the relevant
1462872900233e Balbir Singh      2025-10-01  3216  			 * pages corresponding to the pte entries when freeze
1462872900233e Balbir Singh      2025-10-01  3217  			 * is false.
1462872900233e Balbir Singh      2025-10-01  3218  			 */
1462c52e9f2b99 David Hildenbrand 2023-04-11  3219  			if (write)
1462872900233e Balbir Singh      2025-10-01  3220  				swp_entry = make_writable_device_private_entry(
1462872900233e Balbir Singh      2025-10-01  3221  							page_to_pfn(page + i));
1462872900233e Balbir Singh      2025-10-01  3222  			else
1462872900233e Balbir Singh      2025-10-01  3223  				swp_entry = make_readable_device_private_entry(
1462872900233e Balbir Singh      2025-10-01  3224  							page_to_pfn(page + i));
1462872900233e Balbir Singh      2025-10-01  3225  			/*
1462872900233e Balbir Singh      2025-10-01  3226  			 * Young and dirty bits are not progated via swp_entry
1462872900233e Balbir Singh      2025-10-01  3227  			 */
1462872900233e Balbir Singh      2025-10-01  3228  			entry = swp_entry_to_pte(swp_entry);
1462872900233e Balbir Singh      2025-10-01  3229  			if (soft_dirty)
1462872900233e Balbir Singh      2025-10-01  3230  				entry = pte_swp_mksoft_dirty(entry);
1462872900233e Balbir Singh      2025-10-01  3231  			if (uffd_wp)
1462872900233e Balbir Singh      2025-10-01  3232  				entry = pte_swp_mkuffd_wp(entry);
2bdba9868a4ffc Ryan Roberts      2024-02-15  3233  			VM_WARN_ON(!pte_none(ptep_get(pte + i)));
2bdba9868a4ffc Ryan Roberts      2024-02-15  3234  			set_pte_at(mm, addr, pte + i, entry);
2bdba9868a4ffc Ryan Roberts      2024-02-15  3235  		}
ba98828088ad3f Kiryl Shutsemau   2016-01-15  3236  	} else {
2bdba9868a4ffc Ryan Roberts      2024-02-15  3237  		pte_t entry;
2bdba9868a4ffc Ryan Roberts      2024-02-15  3238  
2bdba9868a4ffc Ryan Roberts      2024-02-15  3239  		entry = mk_pte(page, READ_ONCE(vma->vm_page_prot));
1462c52e9f2b99 David Hildenbrand 2023-04-11  3240  		if (write)
161e393c0f6359 Rick Edgecombe    2023-06-12  3241  			entry = pte_mkwrite(entry, vma);
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3242  		if (!young)
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3243  			entry = pte_mkold(entry);
e833bc50340502 Peter Xu          2022-11-25  3244  		/* NOTE: this may set soft-dirty too on some archs */
e833bc50340502 Peter Xu          2022-11-25  3245  		if (dirty)
e833bc50340502 Peter Xu          2022-11-25  3246  			entry = pte_mkdirty(entry);
804dd150468cfd Andrea Arcangeli  2016-08-25  3247  		if (soft_dirty)
804dd150468cfd Andrea Arcangeli  2016-08-25  3248  			entry = pte_mksoft_dirty(entry);
292924b2602474 Peter Xu          2020-04-06  3249  		if (uffd_wp)
292924b2602474 Peter Xu          2020-04-06  3250  			entry = pte_mkuffd_wp(entry);
2bdba9868a4ffc Ryan Roberts      2024-02-15  3251  
2bdba9868a4ffc Ryan Roberts      2024-02-15  3252  		for (i = 0; i < HPAGE_PMD_NR; i++)
2bdba9868a4ffc Ryan Roberts      2024-02-15  3253  			VM_WARN_ON(!pte_none(ptep_get(pte + i)));
2bdba9868a4ffc Ryan Roberts      2024-02-15  3254  
2bdba9868a4ffc Ryan Roberts      2024-02-15  3255  		set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR);
ba98828088ad3f Kiryl Shutsemau   2016-01-15  3256  	}
2bdba9868a4ffc Ryan Roberts      2024-02-15  3257  	pte_unmap(pte);
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3258  
0ac881efe16468 Lorenzo Stoakes   2025-11-10  3259  	if (!pmd_is_migration_entry(*pmd))
a8e61d584eda0d David Hildenbrand 2023-12-20  3260  		folio_remove_rmap_pmd(folio, page, vma);
96d82deb743ab4 Hugh Dickins      2022-11-22  3261  	if (freeze)
96d82deb743ab4 Hugh Dickins      2022-11-22  3262  		put_page(page);
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3263  
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3264  	smp_wmb(); /* make pte visible before pmd */
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3265  	pmd_populate(mm, pmd, pgtable);
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3266  }
eef1b3ba053aa6 Kiryl Shutsemau   2016-01-15  3267  

:::::: The code at line 3098 was first introduced by commit
:::::: 1462872900233e58fb2f9fc8babc24a0d5c03fd9 mm/huge_memory: implement device-private THP splitting

:::::: TO: Balbir Singh <balbirs@nvidia.com>
:::::: CC: Andrew Morton <akpm@linux-foundation.org>

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

                 reply	other threads:[~2026-03-22  8:56 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=202603221654.IVQxREaL-lkp@intel.com \
    --to=lkp@intel.com \
    --cc=kas@kernel.org \
    --cc=llvm@lists.linux.dev \
    --cc=oe-kbuild-all@lists.linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox