Building the Linux kernel with Clang and LLVM
 help / color / mirror / Atom feed
* Re: [PATCH v4 3/7] KVM: x86/mmu: Recover TDP MMU NX huge pages using MMU read lock
       [not found] <20250613202315.2790592-4-jthoughton@google.com>
@ 2025-06-14 14:03 ` kernel test robot
  2025-06-16 18:11   ` James Houghton
  0 siblings, 1 reply; 2+ messages in thread
From: kernel test robot @ 2025-06-14 14:03 UTC (permalink / raw)
  To: James Houghton, Paolo Bonzini, Sean Christopherson
  Cc: llvm, oe-kbuild-all, Vipin Sharma, David Matlack, James Houghton,
	kvm, linux-kernel

Hi James,

kernel test robot noticed the following build errors:

[auto build test ERROR on 8046d29dde17002523f94d3e6e0ebe486ce52166]

url:    https://github.com/intel-lab-lkp/linux/commits/James-Houghton/KVM-x86-mmu-Track-TDP-MMU-NX-huge-pages-separately/20250614-042620
base:   8046d29dde17002523f94d3e6e0ebe486ce52166
patch link:    https://lore.kernel.org/r/20250613202315.2790592-4-jthoughton%40google.com
patch subject: [PATCH v4 3/7] KVM: x86/mmu: Recover TDP MMU NX huge pages using MMU read lock
config: i386-buildonly-randconfig-003-20250614 (https://download.01.org/0day-ci/archive/20250614/202506142129.ClBlxdtW-lkp@intel.com/config)
compiler: clang version 20.1.2 (https://github.com/llvm/llvm-project 58df0ef89dd64126512e4ee27b4ac3fd8ddf6247)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250614/202506142129.ClBlxdtW-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202506142129.ClBlxdtW-lkp@intel.com/

All errors (new ones prefixed by >>):

   arch/x86/kvm/mmu/mmu.c:7570:28: error: use of undeclared identifier 'KVM_TDP_MMU'
    7570 |         bool is_tdp = mmu_type == KVM_TDP_MMU;
         |                                   ^
>> arch/x86/kvm/mmu/mmu.c:7594:25: error: no member named 'tdp_mmu_pages_lock' in 'struct kvm_arch'
    7594 |                         spin_lock(&kvm->arch.tdp_mmu_pages_lock);
         |                                    ~~~~~~~~~ ^
   arch/x86/kvm/mmu/mmu.c:7597:28: error: no member named 'tdp_mmu_pages_lock' in 'struct kvm_arch'
    7597 |                                 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
         |                                              ~~~~~~~~~ ^
   arch/x86/kvm/mmu/mmu.c:7617:27: error: no member named 'tdp_mmu_pages_lock' in 'struct kvm_arch'
    7617 |                         spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
         |                                      ~~~~~~~~~ ^
   4 errors generated.


vim +7594 arch/x86/kvm/mmu/mmu.c

  7565	
  7566	static void kvm_recover_nx_huge_pages(struct kvm *kvm,
  7567					      enum kvm_mmu_type mmu_type)
  7568	{
  7569		unsigned long to_zap = nx_huge_pages_to_zap(kvm, mmu_type);
> 7570		bool is_tdp = mmu_type == KVM_TDP_MMU;
  7571		struct list_head *nx_huge_pages;
  7572		struct kvm_mmu_page *sp;
  7573		LIST_HEAD(invalid_list);
  7574		bool flush = false;
  7575		int rcu_idx;
  7576	
  7577		nx_huge_pages = &kvm->arch.possible_nx_huge_pages[mmu_type].pages;
  7578	
  7579		rcu_idx = srcu_read_lock(&kvm->srcu);
  7580		if (is_tdp)
  7581			read_lock(&kvm->mmu_lock);
  7582		else
  7583			write_lock(&kvm->mmu_lock);
  7584	
  7585		/*
  7586		 * Zapping TDP MMU shadow pages, including the remote TLB flush, must
  7587		 * be done under RCU protection, because the pages are freed via RCU
  7588		 * callback.
  7589		 */
  7590		rcu_read_lock();
  7591	
  7592		for ( ; to_zap; --to_zap) {
  7593			if (is_tdp)
> 7594				spin_lock(&kvm->arch.tdp_mmu_pages_lock);
  7595			if (list_empty(nx_huge_pages)) {
  7596				if (is_tdp)
  7597					spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
  7598				break;
  7599			}
  7600	
  7601			/*
  7602			 * We use a separate list instead of just using active_mmu_pages
  7603			 * because the number of shadow pages that be replaced with an
  7604			 * NX huge page is expected to be relatively small compared to
  7605			 * the total number of shadow pages.  And because the TDP MMU
  7606			 * doesn't use active_mmu_pages.
  7607			 */
  7608			sp = list_first_entry(nx_huge_pages,
  7609					      struct kvm_mmu_page,
  7610					      possible_nx_huge_page_link);
  7611			WARN_ON_ONCE(!sp->nx_huge_page_disallowed);
  7612			WARN_ON_ONCE(!sp->role.direct);
  7613	
  7614			unaccount_nx_huge_page(kvm, sp);
  7615	
  7616			if (is_tdp)
  7617				spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
  7618	
  7619			/*
  7620			 * Do not attempt to recover any NX Huge Pages that are being
  7621			 * dirty tracked, as they would just be faulted back in as 4KiB
  7622			 * pages. The NX Huge Pages in this slot will be recovered,
  7623			 * along with all the other huge pages in the slot, when dirty
  7624			 * logging is disabled.
  7625			 */
  7626			if (!kvm_mmu_sp_dirty_logging_enabled(kvm, sp)) {
  7627				if (is_tdp)
  7628					flush |= kvm_tdp_mmu_zap_possible_nx_huge_page(kvm, sp);
  7629				else
  7630					kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
  7631	
  7632			}
  7633	
  7634			WARN_ON_ONCE(sp->nx_huge_page_disallowed);
  7635	
  7636			if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
  7637				kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
  7638				rcu_read_unlock();
  7639	
  7640				if (is_tdp)
  7641					cond_resched_rwlock_read(&kvm->mmu_lock);
  7642				else
  7643					cond_resched_rwlock_write(&kvm->mmu_lock);
  7644	
  7645				flush = false;
  7646				rcu_read_lock();
  7647			}
  7648		}
  7649		kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
  7650	
  7651		rcu_read_unlock();
  7652	
  7653		if (is_tdp)
  7654			read_unlock(&kvm->mmu_lock);
  7655		else
  7656			write_unlock(&kvm->mmu_lock);
  7657		srcu_read_unlock(&kvm->srcu, rcu_idx);
  7658	}
  7659	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH v4 3/7] KVM: x86/mmu: Recover TDP MMU NX huge pages using MMU read lock
  2025-06-14 14:03 ` [PATCH v4 3/7] KVM: x86/mmu: Recover TDP MMU NX huge pages using MMU read lock kernel test robot
@ 2025-06-16 18:11   ` James Houghton
  0 siblings, 0 replies; 2+ messages in thread
From: James Houghton @ 2025-06-16 18:11 UTC (permalink / raw)
  To: lkp, pbonzini, seanjc
  Cc: dmatlack, jthoughton, kvm, linux-kernel, llvm, oe-kbuild-all,
	vipinsh

> All errors (new ones prefixed by >>):
> 
>    arch/x86/kvm/mmu/mmu.c:7570:28: error: use of undeclared identifier 'KVM_TDP_MMU'
>     7570 |         bool is_tdp = mmu_type == KVM_TDP_MMU;
>          |                                   ^
> >> arch/x86/kvm/mmu/mmu.c:7594:25: error: no member named 'tdp_mmu_pages_lock' in 'struct kvm_arch'
>     7594 |                         spin_lock(&kvm->arch.tdp_mmu_pages_lock);
>          |                                    ~~~~~~~~~ ^
>    arch/x86/kvm/mmu/mmu.c:7597:28: error: no member named 'tdp_mmu_pages_lock' in 'struct kvm_arch'
>     7597 |                                 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
>          |                                              ~~~~~~~~~ ^
>    arch/x86/kvm/mmu/mmu.c:7617:27: error: no member named 'tdp_mmu_pages_lock' in 'struct kvm_arch'
>     7617 |                         spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
>          |                                      ~~~~~~~~~ ^
>    4 errors generated.

Fixup for this below.

I also realized that the variable name `is_tdp` is bad/misleading, so I've
changed it to `is_tdp_mmu` as part of this fixup too. Sean/Paolo, let me know
if I should just go ahead and post the fixed series, given the size of this
fixup.

I don't really like having to #ifdef all the places where we take
tdp_mmu_pages_lock, but I couldn't find a way to avoid that. Even doing

  #ifndef CONFIG_X86_64
  #define is_tdp_mmu false
  #endif

didn't work. :(

Anyway, here's the fixup:

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 10ba328b664d7..7df1b4ead705b 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -7564,10 +7564,10 @@ static bool kvm_mmu_sp_dirty_logging_enabled(struct kvm *kvm,
 }
 
 static void kvm_recover_nx_huge_pages(struct kvm *kvm,
-				      enum kvm_mmu_type mmu_type)
+				      const enum kvm_mmu_type mmu_type)
 {
 	unsigned long to_zap = nx_huge_pages_to_zap(kvm, mmu_type);
-	bool is_tdp = mmu_type == KVM_TDP_MMU;
+	bool is_tdp_mmu = mmu_type == KVM_TDP_MMU;
 	struct list_head *nx_huge_pages;
 	struct kvm_mmu_page *sp;
 	LIST_HEAD(invalid_list);
@@ -7577,7 +7577,7 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm,
 	nx_huge_pages = &kvm->arch.possible_nx_huge_pages[mmu_type].pages;
 
 	rcu_idx = srcu_read_lock(&kvm->srcu);
-	if (is_tdp)
+	if (is_tdp_mmu)
 		read_lock(&kvm->mmu_lock);
 	else
 		write_lock(&kvm->mmu_lock);
@@ -7590,11 +7590,15 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm,
 	rcu_read_lock();
 
 	for ( ; to_zap; --to_zap) {
-		if (is_tdp)
+#ifdef CONFIG_X86_64
+		if (is_tdp_mmu)
 			spin_lock(&kvm->arch.tdp_mmu_pages_lock);
+#endif
 		if (list_empty(nx_huge_pages)) {
-			if (is_tdp)
+#ifdef CONFIG_X86_64
+			if (is_tdp_mmu)
 				spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
+#endif
 			break;
 		}
 
@@ -7613,8 +7617,10 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm,
 
 		unaccount_nx_huge_page(kvm, sp);
 
-		if (is_tdp)
+#ifdef CONFIG_X86_64
+		if (is_tdp_mmu)
 			spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
+#endif
 
 		/*
 		 * Do not attempt to recover any NX Huge Pages that are being
@@ -7624,7 +7630,7 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm,
 		 * logging is disabled.
 		 */
 		if (!kvm_mmu_sp_dirty_logging_enabled(kvm, sp)) {
-			if (is_tdp)
+			if (is_tdp_mmu)
 				flush |= kvm_tdp_mmu_zap_possible_nx_huge_page(kvm, sp);
 			else
 				kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
@@ -7637,7 +7643,7 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm,
 			kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
 			rcu_read_unlock();
 
-			if (is_tdp)
+			if (is_tdp_mmu)
 				cond_resched_rwlock_read(&kvm->mmu_lock);
 			else
 				cond_resched_rwlock_write(&kvm->mmu_lock);
@@ -7650,7 +7656,7 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm,
 
 	rcu_read_unlock();
 
-	if (is_tdp)
+	if (is_tdp_mmu)
 		read_unlock(&kvm->mmu_lock);
 	else
 		write_unlock(&kvm->mmu_lock);

^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2025-06-16 18:11 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <20250613202315.2790592-4-jthoughton@google.com>
2025-06-14 14:03 ` [PATCH v4 3/7] KVM: x86/mmu: Recover TDP MMU NX huge pages using MMU read lock kernel test robot
2025-06-16 18:11   ` James Houghton

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox