linux-mips.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Sean Christopherson <seanjc@google.com>
To: Paolo Bonzini <pbonzini@redhat.com>,
	Marc Zyngier <maz@kernel.org>,
	 Oliver Upton <oliver.upton@linux.dev>,
	Tianrui Zhao <zhaotianrui@loongson.cn>,
	 Bibo Mao <maobibo@loongson.cn>,
	Huacai Chen <chenhuacai@kernel.org>,
	 Michael Ellerman <mpe@ellerman.id.au>,
	Anup Patel <anup@brainfault.org>,
	 Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	 Albert Ou <aou@eecs.berkeley.edu>,
	Christian Borntraeger <borntraeger@linux.ibm.com>,
	 Janosch Frank <frankja@linux.ibm.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	 Sean Christopherson <seanjc@google.com>
Cc: kvm@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.linux.dev, loongarch@lists.linux.dev,
	linux-mips@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
	kvm-riscv@lists.infradead.org, linux-riscv@lists.infradead.org,
	linux-kernel@vger.kernel.org,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Yan Zhao" <yan.y.zhao@intel.com>,
	"David Matlack" <dmatlack@google.com>,
	"David Stevens" <stevensd@chromium.org>,
	"Andrew Jones" <ajones@ventanamicro.com>
Subject: [PATCH v13 06/85] KVM: x86/mmu: Invert @can_unsync and renamed to @synchronizing
Date: Thu, 10 Oct 2024 11:23:08 -0700	[thread overview]
Message-ID: <20241010182427.1434605-7-seanjc@google.com> (raw)
In-Reply-To: <20241010182427.1434605-1-seanjc@google.com>

Invert the polarity of "can_unsync" and rename the parameter to
"synchronizing" to allow a future change to set the Accessed bit if KVM
is synchronizing an existing SPTE.  Querying "can_unsync" in that case is
nonsensical, as the fact that KVM can't unsync SPTEs doesn't provide any
justification for setting the Accessed bit.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/mmu/mmu.c          | 12 ++++++------
 arch/x86/kvm/mmu/mmu_internal.h |  2 +-
 arch/x86/kvm/mmu/paging_tmpl.h  |  2 +-
 arch/x86/kvm/mmu/spte.c         |  4 ++--
 arch/x86/kvm/mmu/spte.h         |  2 +-
 arch/x86/kvm/mmu/tdp_mmu.c      |  4 ++--
 6 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a8c64069aa89..0f21d6f76cab 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2795,7 +2795,7 @@ static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
  * be write-protected.
  */
 int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
-			    gfn_t gfn, bool can_unsync, bool prefetch)
+			    gfn_t gfn, bool synchronizing, bool prefetch)
 {
 	struct kvm_mmu_page *sp;
 	bool locked = false;
@@ -2810,12 +2810,12 @@ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
 
 	/*
 	 * The page is not write-tracked, mark existing shadow pages unsync
-	 * unless KVM is synchronizing an unsync SP (can_unsync = false).  In
-	 * that case, KVM must complete emulation of the guest TLB flush before
-	 * allowing shadow pages to become unsync (writable by the guest).
+	 * unless KVM is synchronizing an unsync SP.  In that case, KVM must
+	 * complete emulation of the guest TLB flush before allowing shadow
+	 * pages to become unsync (writable by the guest).
 	 */
 	for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
-		if (!can_unsync)
+		if (synchronizing)
 			return -EPERM;
 
 		if (sp->unsync)
@@ -2941,7 +2941,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
 	}
 
 	wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
-			   true, host_writable, &spte);
+			   false, host_writable, &spte);
 
 	if (*sptep == spte) {
 		ret = RET_PF_SPURIOUS;
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index c98827840e07..4da83544c4e1 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -164,7 +164,7 @@ static inline gfn_t gfn_round_for_level(gfn_t gfn, int level)
 }
 
 int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
-			    gfn_t gfn, bool can_unsync, bool prefetch);
+			    gfn_t gfn, bool synchronizing, bool prefetch);
 
 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index ae7d39ff2d07..6e7bd8921c6f 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -963,7 +963,7 @@ static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int
 	host_writable = spte & shadow_host_writable_mask;
 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
 	make_spte(vcpu, sp, slot, pte_access, gfn,
-		  spte_to_pfn(spte), spte, true, false,
+		  spte_to_pfn(spte), spte, true, true,
 		  host_writable, &spte);
 
 	return mmu_spte_update(sptep, spte);
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index 5521608077ec..0e47fea1a2d9 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -157,7 +157,7 @@ bool spte_has_volatile_bits(u64 spte)
 bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 	       const struct kvm_memory_slot *slot,
 	       unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
-	       u64 old_spte, bool prefetch, bool can_unsync,
+	       u64 old_spte, bool prefetch, bool synchronizing,
 	       bool host_writable, u64 *new_spte)
 {
 	int level = sp->role.level;
@@ -248,7 +248,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 		 * e.g. it's write-tracked (upper-level SPs) or has one or more
 		 * shadow pages and unsync'ing pages is not allowed.
 		 */
-		if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, can_unsync, prefetch)) {
+		if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, synchronizing, prefetch)) {
 			wrprot = true;
 			pte_access &= ~ACC_WRITE_MASK;
 			spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 2cb816ea2430..c81cac9358e0 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -499,7 +499,7 @@ bool spte_has_volatile_bits(u64 spte);
 bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 	       const struct kvm_memory_slot *slot,
 	       unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
-	       u64 old_spte, bool prefetch, bool can_unsync,
+	       u64 old_spte, bool prefetch, bool synchronizing,
 	       bool host_writable, u64 *new_spte);
 u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte,
 		      	      union kvm_mmu_page_role role, int index);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 3c6583468742..76bca7a726c1 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1033,8 +1033,8 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
 	else
 		wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
-					 fault->pfn, iter->old_spte, fault->prefetch, true,
-					 fault->map_writable, &new_spte);
+				   fault->pfn, iter->old_spte, fault->prefetch,
+				   false, fault->map_writable, &new_spte);
 
 	if (new_spte == iter->old_spte)
 		ret = RET_PF_SPURIOUS;
-- 
2.47.0.rc1.288.g06298d1525-goog


  parent reply	other threads:[~2024-10-10 18:25 UTC|newest]

Thread overview: 99+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-10 18:23 [PATCH v13 00/85] KVM: Stop grabbing references to PFNMAP'd pages Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 01/85] KVM: Drop KVM_ERR_PTR_BAD_PAGE and instead return NULL to indicate an error Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 02/85] KVM: Allow calling kvm_release_page_{clean,dirty}() on a NULL page pointer Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 03/85] KVM: Add kvm_release_page_unused() API to put pages that KVM never consumes Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 04/85] KVM: x86/mmu: Skip the "try unsync" path iff the old SPTE was a leaf SPTE Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 05/85] KVM: x86/mmu: Don't overwrite shadow-present MMU SPTEs when prefaulting Sean Christopherson
2024-10-10 18:23 ` Sean Christopherson [this message]
2024-10-10 18:23 ` [PATCH v13 07/85] KVM: x86/mmu: Mark new SPTE as Accessed when synchronizing existing SPTE Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 08/85] KVM: x86/mmu: Mark folio dirty when creating SPTE, not when zapping/modifying Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 09/85] KVM: x86/mmu: Mark page/folio accessed only when zapping leaf SPTEs Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 10/85] KVM: x86/mmu: Use gfn_to_page_many_atomic() when prefetching indirect PTEs Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 11/85] KVM: Rename gfn_to_page_many_atomic() to kvm_prefetch_pages() Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 12/85] KVM: Drop @atomic param from gfn=>pfn and hva=>pfn APIs Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 13/85] KVM: Annotate that all paths in hva_to_pfn() might sleep Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 14/85] KVM: Return ERR_SIGPENDING from hva_to_pfn() if GUP returns -EGAIN Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 15/85] KVM: Drop extra GUP (via check_user_page_hwpoison()) to detect poisoned page Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 16/85] KVM: Replace "async" pointer in gfn=>pfn with "no_wait" and error code Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 17/85] KVM: x86/mmu: Drop kvm_page_fault.hva, i.e. don't track intermediate hva Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 18/85] KVM: Drop unused "hva" pointer from __gfn_to_pfn_memslot() Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 19/85] KVM: Introduce kvm_follow_pfn() to eventually replace "gfn_to_pfn" APIs Sean Christopherson
2024-10-21  8:49   ` Yan Zhao
2024-10-21 18:08     ` Sean Christopherson
2024-10-22  1:25       ` Yan Zhao
2024-10-10 18:23 ` [PATCH v13 20/85] KVM: Remove pointless sanity check on @map param to kvm_vcpu_(un)map() Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 21/85] KVM: Explicitly initialize all fields at the start of kvm_vcpu_map() Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 22/85] KVM: Use NULL for struct page pointer to indicate mremapped memory Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 23/85] KVM: nVMX: Rely on kvm_vcpu_unmap() to track validity of eVMCS mapping Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 24/85] KVM: nVMX: Drop pointless msr_bitmap_map field from struct nested_vmx Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 25/85] KVM: nVMX: Add helper to put (unmap) vmcs12 pages Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 26/85] KVM: Use plain "struct page" pointer instead of single-entry array Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 27/85] KVM: Provide refcounted page as output field in struct kvm_follow_pfn Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 28/85] KVM: Move kvm_{set,release}_page_{clean,dirty}() helpers up in kvm_main.c Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 29/85] KVM: pfncache: Precisely track refcounted pages Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 30/85] KVM: Migrate kvm_vcpu_map() to kvm_follow_pfn() Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 31/85] KVM: Pin (as in FOLL_PIN) pages during kvm_vcpu_map() Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 32/85] KVM: nVMX: Mark vmcs12's APIC access page dirty when unmapping Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 33/85] KVM: Pass in write/dirty to kvm_vcpu_map(), not kvm_vcpu_unmap() Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 34/85] KVM: Get writable mapping for __kvm_vcpu_map() only when necessary Sean Christopherson
2024-10-21  9:25   ` Yan Zhao
2024-10-21 18:13     ` Sean Christopherson
2024-10-22  1:51       ` Yan Zhao
2024-10-10 18:23 ` [PATCH v13 35/85] KVM: Disallow direct access (w/o mmu_notifier) to unpinned pfn by default Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 36/85] KVM: x86: Don't fault-in APIC access page during initial allocation Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 37/85] KVM: x86/mmu: Add "mmu" prefix fault-in helpers to free up generic names Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 38/85] KVM: x86/mmu: Put direct prefetched pages via kvm_release_page_clean() Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 39/85] KVM: x86/mmu: Add common helper to handle prefetching SPTEs Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 40/85] KVM: x86/mmu: Add helper to "finish" handling a guest page fault Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 41/85] KVM: x86/mmu: Mark pages/folios dirty at the origin of make_spte() Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 42/85] KVM: Move declarations of memslot accessors up in kvm_host.h Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 43/85] KVM: Add kvm_faultin_pfn() to specifically service guest page faults Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 44/85] KVM: x86/mmu: Convert page fault paths to kvm_faultin_pfn() Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 45/85] KVM: guest_memfd: Pass index, not gfn, to __kvm_gmem_get_pfn() Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 46/85] KVM: guest_memfd: Provide "struct page" as output from kvm_gmem_get_pfn() Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 47/85] KVM: x86/mmu: Put refcounted pages instead of blindly releasing pfns Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 48/85] KVM: x86/mmu: Don't mark unused faultin pages as accessed Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 49/85] KVM: Move x86's API to release a faultin page to common KVM Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 50/85] KVM: VMX: Hold mmu_lock until page is released when updating APIC access page Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 51/85] KVM: VMX: Use __kvm_faultin_page() to get APIC access page/pfn Sean Christopherson
2024-10-21 10:22   ` Yan Zhao
2024-10-21 18:57     ` Sean Christopherson
2024-10-22  2:15       ` Yan Zhao
2024-10-10 18:23 ` [PATCH v13 52/85] KVM: PPC: e500: Mark "struct page" dirty in kvmppc_e500_shadow_map() Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 53/85] KVM: PPC: e500: Mark "struct page" pfn accessed before dropping mmu_lock Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 54/85] KVM: PPC: e500: Use __kvm_faultin_pfn() to handle page faults Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 55/85] KVM: arm64: Mark "struct page" pfns accessed/dirty before dropping mmu_lock Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 56/85] KVM: arm64: Use __kvm_faultin_pfn() to handle memory aborts Sean Christopherson
2024-10-10 18:23 ` [PATCH v13 57/85] KVM: RISC-V: Mark "struct page" pfns dirty iff a stage-2 PTE is installed Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 58/85] KVM: RISC-V: Mark "struct page" pfns accessed before dropping mmu_lock Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 59/85] KVM: RISC-V: Use kvm_faultin_pfn() when mapping pfns into the guest Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 60/85] KVM: PPC: Use __kvm_faultin_pfn() to handle page faults on Book3s HV Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 61/85] KVM: PPC: Use __kvm_faultin_pfn() to handle page faults on Book3s Radix Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 62/85] KVM: PPC: Drop unused @kvm_ro param from kvmppc_book3s_instantiate_page() Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 63/85] KVM: PPC: Book3S: Mark "struct page" pfns dirty/accessed after installing PTE Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 64/85] KVM: PPC: Use kvm_faultin_pfn() to handle page faults on Book3s PR Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 65/85] KVM: LoongArch: Mark "struct page" pfns dirty only in "slow" page fault path Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 66/85] KVM: LoongArch: Mark "struct page" pfns accessed " Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 67/85] KVM: LoongArch: Mark "struct page" pfn accessed before dropping mmu_lock Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 68/85] KVM: LoongArch: Use kvm_faultin_pfn() to map pfns into the guest Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 69/85] KVM: MIPS: Mark "struct page" pfns dirty only in "slow" page fault path Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 70/85] KVM: MIPS: Mark "struct page" pfns accessed " Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 71/85] KVM: MIPS: Mark "struct page" pfns accessed prior to dropping mmu_lock Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 72/85] KVM: MIPS: Use kvm_faultin_pfn() to map pfns into the guest Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 73/85] KVM: PPC: Remove extra get_page() to fix page refcount leak Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 74/85] KVM: PPC: Use kvm_vcpu_map() to map guest memory to patch dcbz instructions Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 75/85] KVM: Convert gfn_to_page() to use kvm_follow_pfn() Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 76/85] KVM: Add support for read-only usage of gfn_to_page() Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 77/85] KVM: arm64: Use __gfn_to_page() when copying MTE tags to/from userspace Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 78/85] KVM: PPC: Explicitly require struct page memory for Ultravisor sharing Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 79/85] KVM: Drop gfn_to_pfn() APIs now that all users are gone Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 80/85] KVM: s390: Use kvm_release_page_dirty() to unpin "struct page" memory Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 81/85] KVM: Make kvm_follow_pfn.refcounted_page a required field Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 82/85] KVM: x86/mmu: Don't mark "struct page" accessed when zapping SPTEs Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 83/85] KVM: arm64: Don't mark "struct page" accessed when making SPTE young Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 84/85] KVM: Drop APIs that manipulate "struct page" via pfns Sean Christopherson
2024-10-10 18:24 ` [PATCH v13 85/85] KVM: Don't grab reference on VM_MIXEDMAP pfns that have a "struct page" Sean Christopherson
2024-10-17 17:40 ` [PATCH v13 00/85] KVM: Stop grabbing references to PFNMAP'd pages Paolo Bonzini
2024-10-22  0:25   ` Sean Christopherson
2024-10-25 17:41     ` Paolo Bonzini
2024-10-24  3:37 ` Dmitry Osipenko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241010182427.1434605-7-seanjc@google.com \
    --to=seanjc@google.com \
    --cc=ajones@ventanamicro.com \
    --cc=alex.bennee@linaro.org \
    --cc=anup@brainfault.org \
    --cc=aou@eecs.berkeley.edu \
    --cc=borntraeger@linux.ibm.com \
    --cc=chenhuacai@kernel.org \
    --cc=dmatlack@google.com \
    --cc=frankja@linux.ibm.com \
    --cc=imbrenda@linux.ibm.com \
    --cc=kvm-riscv@lists.infradead.org \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.linux.dev \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=loongarch@lists.linux.dev \
    --cc=maobibo@loongson.cn \
    --cc=maz@kernel.org \
    --cc=mpe@ellerman.id.au \
    --cc=oliver.upton@linux.dev \
    --cc=palmer@dabbelt.com \
    --cc=paul.walmsley@sifive.com \
    --cc=pbonzini@redhat.com \
    --cc=stevensd@chromium.org \
    --cc=yan.y.zhao@intel.com \
    --cc=zhaotianrui@loongson.cn \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).