public inbox for linux-riscv@lists.infradead.org
 help / color / mirror / Atom feed
From: Sean Christopherson <seanjc@google.com>
To: Paolo Bonzini <pbonzini@redhat.com>,
	Marc Zyngier <maz@kernel.org>,
	 Oliver Upton <oliver.upton@linux.dev>,
	Tianrui Zhao <zhaotianrui@loongson.cn>,
	 Bibo Mao <maobibo@loongson.cn>,
	Huacai Chen <chenhuacai@kernel.org>,
	 Michael Ellerman <mpe@ellerman.id.au>,
	Anup Patel <anup@brainfault.org>,
	 Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	 Albert Ou <aou@eecs.berkeley.edu>,
	Christian Borntraeger <borntraeger@linux.ibm.com>,
	 Janosch Frank <frankja@linux.ibm.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	 Sean Christopherson <seanjc@google.com>
Cc: kvm@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
	 kvmarm@lists.linux.dev, loongarch@lists.linux.dev,
	linux-mips@vger.kernel.org,  linuxppc-dev@lists.ozlabs.org,
	kvm-riscv@lists.infradead.org,  linux-riscv@lists.infradead.org,
	linux-kernel@vger.kernel.org,
	 David Matlack <dmatlack@google.com>,
	David Stevens <stevensd@chromium.org>
Subject: [PATCH v12 17/84] KVM: Introduce kvm_follow_pfn() to eventually replace "gfn_to_pfn" APIs
Date: Fri, 26 Jul 2024 16:51:26 -0700	[thread overview]
Message-ID: <20240726235234.228822-18-seanjc@google.com> (raw)
In-Reply-To: <20240726235234.228822-1-seanjc@google.com>

From: David Stevens <stevensd@chromium.org>

Introduce kvm_follow_pfn() to eventually supplant the various "gfn_to_pfn"
APIs, albeit by adding more wrappers.  The primary motivation of the new
helper is to pass a structure instead of an ever changing set of parameters,
e.g. so that tweaking the behavior, inputs, and/or outputs of the "to pfn"
helpers doesn't require churning half of KVM.

In the more distant future, the APIs exposed to arch code could also
follow suit, e.g. by adding something akin to x86's "struct kvm_page_fault"
when faulting in guest memory.  But for now, the goal is purely to clean
up KVM's "internal" MMU code.

As part of the conversion, replace the write_fault, interruptible, and
no-wait boolean flags with FOLL_WRITE, FOLL_INTERRUPTIBLE, and FOLL_NOWAIT
respectively.  Collecting the various FOLL_* flags into a single field
will again ease the pain of passing new flags.

Signed-off-by: David Stevens <stevensd@chromium.org>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 virt/kvm/kvm_main.c | 166 +++++++++++++++++++++++---------------------
 virt/kvm/kvm_mm.h   |  20 +++++-
 virt/kvm/pfncache.c |   9 ++-
 3 files changed, 111 insertions(+), 84 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6e3bb202c1b3..56c2d11761e0 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2761,8 +2761,7 @@ static inline int check_user_page_hwpoison(unsigned long addr)
  * true indicates success, otherwise false is returned.  It's also the
  * only part that runs if we can in atomic context.
  */
-static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
-			    bool *writable, kvm_pfn_t *pfn)
+static bool hva_to_pfn_fast(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn)
 {
 	struct page *page[1];
 
@@ -2771,14 +2770,13 @@ static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
 	 * or the caller allows to map a writable pfn for a read fault
 	 * request.
 	 */
-	if (!(write_fault || writable))
+	if (!((kfp->flags & FOLL_WRITE) || kfp->map_writable))
 		return false;
 
-	if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
+	if (get_user_page_fast_only(kfp->hva, FOLL_WRITE, page)) {
 		*pfn = page_to_pfn(page[0]);
-
-		if (writable)
-			*writable = true;
+		if (kfp->map_writable)
+			*kfp->map_writable = true;
 		return true;
 	}
 
@@ -2789,8 +2787,7 @@ static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
  * The slow path to get the pfn of the specified host virtual address,
  * 1 indicates success, -errno is returned if error is detected.
  */
-static int hva_to_pfn_slow(unsigned long addr, bool no_wait, bool write_fault,
-			   bool interruptible, bool *writable, kvm_pfn_t *pfn)
+static int hva_to_pfn_slow(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn)
 {
 	/*
 	 * When a VCPU accesses a page that is not mapped into the secondary
@@ -2803,34 +2800,30 @@ static int hva_to_pfn_slow(unsigned long addr, bool no_wait, bool write_fault,
 	 * Note that get_user_page_fast_only() and FOLL_WRITE for now
 	 * implicitly honor NUMA hinting faults and don't need this flag.
 	 */
-	unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT;
-	struct page *page;
+	unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT | kfp->flags;
+	struct page *page, *wpage;
 	int npages;
 
-	if (writable)
-		*writable = write_fault;
-
-	if (write_fault)
-		flags |= FOLL_WRITE;
-	if (no_wait)
-		flags |= FOLL_NOWAIT;
-	if (interruptible)
-		flags |= FOLL_INTERRUPTIBLE;
-
-	npages = get_user_pages_unlocked(addr, 1, &page, flags);
+	npages = get_user_pages_unlocked(kfp->hva, 1, &page, flags);
 	if (npages != 1)
 		return npages;
 
+	if (!kfp->map_writable)
+		goto out;
+
+	if (kfp->flags & FOLL_WRITE) {
+		*kfp->map_writable = true;
+		goto out;
+	}
+
 	/* map read fault as writable if possible */
-	if (unlikely(!write_fault) && writable) {
-		struct page *wpage;
-
-		if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
-			*writable = true;
-			put_page(page);
-			page = wpage;
-		}
+	if (get_user_page_fast_only(kfp->hva, FOLL_WRITE, &wpage)) {
+		*kfp->map_writable = true;
+		put_page(page);
+		page = wpage;
 	}
+
+out:
 	*pfn = page_to_pfn(page);
 	return npages;
 }
@@ -2857,23 +2850,23 @@ static int kvm_try_get_pfn(kvm_pfn_t pfn)
 }
 
 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
-			       unsigned long addr, bool write_fault,
-			       bool *writable, kvm_pfn_t *p_pfn)
+			       struct kvm_follow_pfn *kfp, kvm_pfn_t *p_pfn)
 {
 	kvm_pfn_t pfn;
 	pte_t *ptep;
 	pte_t pte;
 	spinlock_t *ptl;
+	bool write_fault = kfp->flags & FOLL_WRITE;
 	int r;
 
-	r = follow_pte(vma, addr, &ptep, &ptl);
+	r = follow_pte(vma, kfp->hva, &ptep, &ptl);
 	if (r) {
 		/*
 		 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
 		 * not call the fault handler, so do it here.
 		 */
 		bool unlocked = false;
-		r = fixup_user_fault(current->mm, addr,
+		r = fixup_user_fault(current->mm, kfp->hva,
 				     (write_fault ? FAULT_FLAG_WRITE : 0),
 				     &unlocked);
 		if (unlocked)
@@ -2881,7 +2874,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
 		if (r)
 			return r;
 
-		r = follow_pte(vma, addr, &ptep, &ptl);
+		r = follow_pte(vma, kfp->hva, &ptep, &ptl);
 		if (r)
 			return r;
 	}
@@ -2893,8 +2886,8 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
 		goto out;
 	}
 
-	if (writable)
-		*writable = pte_write(pte);
+	if (kfp->map_writable)
+		*kfp->map_writable = pte_write(pte);
 	pfn = pte_pfn(pte);
 
 	/*
@@ -2924,22 +2917,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
 	return r;
 }
 
-/*
- * Pin guest page in memory and return its pfn.
- * @addr: host virtual address which maps memory to the guest
- * @interruptible: whether the process can be interrupted by non-fatal signals
- * @no_wait: whether or not this function need to wait IO complete if the
- *	     host page is not in the memory
- * @write_fault: whether we should get a writable host page
- * @writable: whether it allows to map a writable host page for !@write_fault
- *
- * The function will map a writable host page for these two cases:
- * 1): @write_fault = true
- * 2): @write_fault = false && @writable, @writable will tell the caller
- *     whether the mapping is writable.
- */
-kvm_pfn_t hva_to_pfn(unsigned long addr, bool interruptible, bool no_wait,
-		     bool write_fault, bool *writable)
+kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp)
 {
 	struct vm_area_struct *vma;
 	kvm_pfn_t pfn;
@@ -2947,11 +2925,10 @@ kvm_pfn_t hva_to_pfn(unsigned long addr, bool interruptible, bool no_wait,
 
 	might_sleep();
 
-	if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
+	if (hva_to_pfn_fast(kfp, &pfn))
 		return pfn;
 
-	npages = hva_to_pfn_slow(addr, no_wait, write_fault, interruptible,
-				 writable, &pfn);
+	npages = hva_to_pfn_slow(kfp, &pfn);
 	if (npages == 1)
 		return pfn;
 	if (npages == -EINTR)
@@ -2959,24 +2936,25 @@ kvm_pfn_t hva_to_pfn(unsigned long addr, bool interruptible, bool no_wait,
 
 	mmap_read_lock(current->mm);
 	if (npages == -EHWPOISON ||
-	    (!no_wait && check_user_page_hwpoison(addr))) {
+	    (!(kfp->flags & FOLL_NOWAIT) && check_user_page_hwpoison(kfp->hva))) {
 		pfn = KVM_PFN_ERR_HWPOISON;
 		goto exit;
 	}
 
 retry:
-	vma = vma_lookup(current->mm, addr);
+	vma = vma_lookup(current->mm, kfp->hva);
 
 	if (vma == NULL)
 		pfn = KVM_PFN_ERR_FAULT;
 	else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
-		r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn);
+		r = hva_to_pfn_remapped(vma, kfp, &pfn);
 		if (r == -EAGAIN)
 			goto retry;
 		if (r < 0)
 			pfn = KVM_PFN_ERR_FAULT;
 	} else {
-		if (no_wait && vma_is_valid(vma, write_fault))
+		if ((kfp->flags & FOLL_NOWAIT) &&
+		    vma_is_valid(vma, kfp->flags & FOLL_WRITE))
 			pfn = KVM_PFN_ERR_NEEDS_IO;
 		else
 			pfn = KVM_PFN_ERR_FAULT;
@@ -2986,41 +2964,69 @@ kvm_pfn_t hva_to_pfn(unsigned long addr, bool interruptible, bool no_wait,
 	return pfn;
 }
 
+static kvm_pfn_t kvm_follow_pfn(struct kvm_follow_pfn *kfp)
+{
+	kfp->hva = __gfn_to_hva_many(kfp->slot, kfp->gfn, NULL,
+				     kfp->flags & FOLL_WRITE);
+
+	if (kfp->hva == KVM_HVA_ERR_RO_BAD)
+		return KVM_PFN_ERR_RO_FAULT;
+
+	if (kvm_is_error_hva(kfp->hva))
+		return KVM_PFN_NOSLOT;
+
+	if (memslot_is_readonly(kfp->slot) && kfp->map_writable) {
+		*kfp->map_writable = false;
+		kfp->map_writable = NULL;
+	}
+
+	return hva_to_pfn(kfp);
+}
+
 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
 			       bool interruptible, bool no_wait,
 			       bool write_fault, bool *writable)
 {
-	unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
-
-	if (kvm_is_error_hva(addr)) {
-		if (writable)
-			*writable = false;
-
-		return addr == KVM_HVA_ERR_RO_BAD ? KVM_PFN_ERR_RO_FAULT :
-						    KVM_PFN_NOSLOT;
-	}
-
-	/* Do not map writable pfn in the readonly memslot. */
-	if (writable && memslot_is_readonly(slot)) {
-		*writable = false;
-		writable = NULL;
-	}
-
-	return hva_to_pfn(addr, interruptible, no_wait, write_fault, writable);
+	struct kvm_follow_pfn kfp = {
+		.slot = slot,
+		.gfn = gfn,
+		.map_writable = writable,
+	};
+
+	if (write_fault)
+		kfp.flags |= FOLL_WRITE;
+	if (no_wait)
+		kfp.flags |= FOLL_NOWAIT;
+	if (interruptible)
+		kfp.flags |= FOLL_INTERRUPTIBLE;
+
+	return kvm_follow_pfn(&kfp);
 }
 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
 
 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
 		      bool *writable)
 {
-	return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false,
-				    write_fault, writable);
+	struct kvm_follow_pfn kfp = {
+		.slot = gfn_to_memslot(kvm, gfn),
+		.gfn = gfn,
+		.flags = write_fault ? FOLL_WRITE : 0,
+		.map_writable = writable,
+	};
+
+	return kvm_follow_pfn(&kfp);
 }
 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
 
 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
 {
-	return __gfn_to_pfn_memslot(slot, gfn, false, false, true, NULL);
+	struct kvm_follow_pfn kfp = {
+		.slot = slot,
+		.gfn = gfn,
+		.flags = FOLL_WRITE,
+	};
+
+	return kvm_follow_pfn(&kfp);
 }
 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
 
diff --git a/virt/kvm/kvm_mm.h b/virt/kvm/kvm_mm.h
index 51f3fee4ca3f..d5a215958f06 100644
--- a/virt/kvm/kvm_mm.h
+++ b/virt/kvm/kvm_mm.h
@@ -20,8 +20,24 @@
 #define KVM_MMU_UNLOCK(kvm)		spin_unlock(&(kvm)->mmu_lock)
 #endif /* KVM_HAVE_MMU_RWLOCK */
 
-kvm_pfn_t hva_to_pfn(unsigned long addr, bool interruptible, bool no_wait,
-		     bool write_fault, bool *writable);
+
+struct kvm_follow_pfn {
+	const struct kvm_memory_slot *slot;
+	const gfn_t gfn;
+
+	unsigned long hva;
+
+	/* FOLL_* flags modifying lookup behavior, e.g. FOLL_WRITE. */
+	unsigned int flags;
+
+	/*
+	 * If non-NULL, try to get a writable mapping even for a read fault.
+	 * Set to true if a writable mapping was obtained.
+	 */
+	bool *map_writable;
+};
+
+kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp);
 
 #ifdef CONFIG_HAVE_KVM_PFNCACHE
 void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index 32dc61f48c81..067daf9ad6ef 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -159,6 +159,12 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
 	kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
 	void *new_khva = NULL;
 	unsigned long mmu_seq;
+	struct kvm_follow_pfn kfp = {
+		.slot = gpc->memslot,
+		.gfn = gpa_to_gfn(gpc->gpa),
+		.flags = FOLL_WRITE,
+		.hva = gpc->uhva,
+	};
 
 	lockdep_assert_held(&gpc->refresh_lock);
 
@@ -197,8 +203,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
 			cond_resched();
 		}
 
-		/* We always request a writable mapping */
-		new_pfn = hva_to_pfn(gpc->uhva, false, false, true, NULL);
+		new_pfn = hva_to_pfn(&kfp);
 		if (is_error_noslot_pfn(new_pfn))
 			goto out_error;
 
-- 
2.46.0.rc1.232.g9752f9e123-goog


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

  parent reply	other threads:[~2024-07-26 23:59 UTC|newest]

Thread overview: 150+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-07-26 23:51 [PATCH v12 00/84] KVM: Stop grabbing references to PFNMAP'd pages Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 01/84] KVM: arm64: Release pfn, i.e. put page, if copying MTE tags hits ZONE_DEVICE Sean Christopherson
2024-07-31 16:23   ` Alex Bennée
2024-07-31 20:36     ` Sean Christopherson
2024-08-01 10:07   ` Marc Zyngier
2024-08-07 14:15   ` Catalin Marinas
2024-08-08  9:54     ` Steven Price
2024-08-22 14:24   ` (subset) " Marc Zyngier
2024-07-26 23:51 ` [PATCH v12 02/84] KVM: arm64: Disallow copying MTE to guest memory while KVM is dirty logging Sean Christopherson
2024-08-01  7:34   ` Aneesh Kumar K.V
2024-08-01 18:01     ` Sean Christopherson
2024-08-05  7:57       ` Aneesh Kumar K.V
2024-08-05 22:09         ` Sean Christopherson
2024-08-07 16:21   ` Catalin Marinas
2024-08-08  9:54     ` Steven Price
2024-08-22 14:24   ` (subset) " Marc Zyngier
2024-07-26 23:51 ` [PATCH v12 03/84] KVM: Drop KVM_ERR_PTR_BAD_PAGE and instead return NULL to indicate an error Sean Christopherson
2024-08-01  8:57   ` Alex Bennée
2024-07-26 23:51 ` [PATCH v12 04/84] KVM: Allow calling kvm_release_page_{clean,dirty}() on a NULL page pointer Sean Christopherson
2024-08-01  9:03   ` Alex Bennée
2024-07-26 23:51 ` [PATCH v12 05/84] KVM: Add kvm_release_page_unused() API to put pages that KVM never consumes Sean Christopherson
2024-08-01  9:20   ` Alex Bennée
2024-08-01 14:43     ` Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 06/84] KVM: x86/mmu: Skip the "try unsync" path iff the old SPTE was a leaf SPTE Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 07/84] KVM: x86/mmu: Mark folio dirty when creating SPTE, not when zapping/modifying Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 08/84] KVM: x86/mmu: Mark page/folio accessed only when zapping leaf SPTEs Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 09/84] KVM: x86/mmu: Don't force flush if SPTE update clears Accessed bit Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 10/84] KVM: x86/mmu: Use gfn_to_page_many_atomic() when prefetching indirect PTEs Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 11/84] KVM: Rename gfn_to_page_many_atomic() to kvm_prefetch_pages() Sean Christopherson
2024-08-02 11:16   ` Alex Bennée
2024-07-26 23:51 ` [PATCH v12 12/84] KVM: Drop @atomic param from gfn=>pfn and hva=>pfn APIs Sean Christopherson
2024-08-01  9:31   ` Alex Bennée
2024-07-26 23:51 ` [PATCH v12 13/84] KVM: Annotate that all paths in hva_to_pfn() might sleep Sean Christopherson
2024-08-08 12:00   ` Alex Bennée
2024-08-08 13:16     ` Sean Christopherson
2024-08-08 15:18       ` Alex Bennée
2024-08-08 15:31         ` Sean Christopherson
2024-08-08 16:16           ` Alex Bennée
2024-07-26 23:51 ` [PATCH v12 14/84] KVM: Replace "async" pointer in gfn=>pfn with "no_wait" and error code Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 15/84] KVM: x86/mmu: Drop kvm_page_fault.hva, i.e. don't track intermediate hva Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 16/84] KVM: Drop unused "hva" pointer from __gfn_to_pfn_memslot() Sean Christopherson
2024-07-26 23:51 ` Sean Christopherson [this message]
2024-07-26 23:51 ` [PATCH v12 18/84] KVM: Remove pointless sanity check on @map param to kvm_vcpu_(un)map() Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 19/84] KVM: Explicitly initialize all fields at the start of kvm_vcpu_map() Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 20/84] KVM: Use NULL for struct page pointer to indicate mremapped memory Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 21/84] KVM: nVMX: Rely on kvm_vcpu_unmap() to track validity of eVMCS mapping Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 22/84] KVM: nVMX: Drop pointless msr_bitmap_map field from struct nested_vmx Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 23/84] KVM: nVMX: Add helper to put (unmap) vmcs12 pages Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 24/84] KVM: Use plain "struct page" pointer instead of single-entry array Sean Christopherson
2024-08-01  9:53   ` Alex Bennée
2024-07-26 23:51 ` [PATCH v12 25/84] KVM: Provide refcounted page as output field in struct kvm_follow_pfn Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 26/84] KVM: Move kvm_{set,release}_page_{clean,dirty}() helpers up in kvm_main.c Sean Christopherson
2024-08-01  9:55   ` Alex Bennée
2024-07-26 23:51 ` [PATCH v12 27/84] KVM: pfncache: Precisely track refcounted pages Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 28/84] KVM: Migrate kvm_vcpu_map() to kvm_follow_pfn() Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 29/84] KVM: Pin (as in FOLL_PIN) pages during kvm_vcpu_map() Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 30/84] KVM: nVMX: Mark vmcs12's APIC access page dirty when unmapping Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 31/84] KVM: Pass in write/dirty to kvm_vcpu_map(), not kvm_vcpu_unmap() Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 32/84] KVM: Get writable mapping for __kvm_vcpu_map() only when necessary Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 33/84] KVM: Disallow direct access (w/o mmu_notifier) to unpinned pfn by default Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 34/84] KVM: Add a helper to lookup a pfn without grabbing a reference Sean Christopherson
2024-07-30 10:41   ` Paolo Bonzini
2024-07-30 20:15     ` Sean Christopherson
2024-07-31 10:11       ` Paolo Bonzini
2024-07-26 23:51 ` [PATCH v12 35/84] KVM: x86: Use kvm_lookup_pfn() to check if retrying #PF is useful Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 36/84] KVM: x86: Use kvm_lookup_pfn() to check if APIC access page was installed Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 37/84] KVM: x86/mmu: Add "mmu" prefix fault-in helpers to free up generic names Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 38/84] KVM: x86/mmu: Put direct prefetched pages via kvm_release_page_clean() Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 39/84] KVM: x86/mmu: Add common helper to handle prefetching SPTEs Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 40/84] KVM: x86/mmu: Add helper to "finish" handling a guest page fault Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 41/84] KVM: x86/mmu: Mark pages/folios dirty at the origin of make_spte() Sean Christopherson
2024-07-30  8:57   ` Paolo Bonzini
2024-07-26 23:51 ` [PATCH v12 42/84] KVM: Move declarations of memslot accessors up in kvm_host.h Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 43/84] KVM: Add kvm_faultin_pfn() to specifically service guest page faults Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 44/84] KVM: x86/mmu: Convert page fault paths to kvm_faultin_pfn() Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 45/84] KVM: guest_memfd: Provide "struct page" as output from kvm_gmem_get_pfn() Sean Christopherson
2024-07-30  9:05   ` Paolo Bonzini
2024-07-30 20:00     ` Sean Christopherson
2024-07-31 10:12       ` Paolo Bonzini
2024-07-26 23:51 ` [PATCH v12 46/84] KVM: x86/mmu: Put refcounted pages instead of blindly releasing pfns Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 47/84] KVM: x86/mmu: Don't mark unused faultin pages as accessed Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 48/84] KVM: Move x86's API to release a faultin page to common KVM Sean Christopherson
2024-07-30  8:58   ` Paolo Bonzini
2024-07-30 19:15     ` Sean Christopherson
2024-07-31 10:18       ` Paolo Bonzini
2024-07-26 23:51 ` [PATCH v12 49/84] KVM: VMX: Hold mmu_lock until page is released when updating APIC access page Sean Christopherson
2024-07-26 23:51 ` [PATCH v12 50/84] KVM: VMX: Use __kvm_faultin_page() to get APIC access page/pfn Sean Christopherson
2024-07-30  8:59   ` Paolo Bonzini
2024-07-26 23:52 ` [PATCH v12 51/84] KVM: PPC: e500: Mark "struct page" dirty in kvmppc_e500_shadow_map() Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 52/84] KVM: PPC: e500: Mark "struct page" pfn accessed before dropping mmu_lock Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 53/84] KVM: PPC: e500: Use __kvm_faultin_pfn() to handle page faults Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 54/84] KVM: arm64: Mark "struct page" pfns accessed/dirty before dropping mmu_lock Sean Christopherson
2024-08-05 23:25   ` Oliver Upton
2024-08-05 23:26     ` Oliver Upton
2024-08-05 23:53       ` Sean Christopherson
2024-08-05 23:56         ` Oliver Upton
2024-08-06  8:55       ` Marc Zyngier
2024-08-06 15:19         ` Sean Christopherson
2024-08-06  8:24     ` Fuad Tabba
2024-07-26 23:52 ` [PATCH v12 55/84] KVM: arm64: Use __kvm_faultin_pfn() to handle memory aborts Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 56/84] KVM: RISC-V: Mark "struct page" pfns dirty iff a stage-2 PTE is installed Sean Christopherson
2024-07-31  8:11   ` Andrew Jones
2024-08-06 15:03   ` Anup Patel
2024-07-26 23:52 ` [PATCH v12 57/84] KVM: RISC-V: Mark "struct page" pfns accessed before dropping mmu_lock Sean Christopherson
2024-07-31  8:12   ` Andrew Jones
2024-08-06 15:04   ` Anup Patel
2024-07-26 23:52 ` [PATCH v12 58/84] KVM: RISC-V: Use kvm_faultin_pfn() when mapping pfns into the guest Sean Christopherson
2024-07-31  8:11   ` Andrew Jones
2024-08-06 15:04   ` Anup Patel
2024-07-26 23:52 ` [PATCH v12 59/84] KVM: PPC: Use __kvm_faultin_pfn() to handle page faults on Book3s HV Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 60/84] KVM: PPC: Use __kvm_faultin_pfn() to handle page faults on Book3s Radix Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 61/84] KVM: PPC: Drop unused @kvm_ro param from kvmppc_book3s_instantiate_page() Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 62/84] KVM: PPC: Book3S: Mark "struct page" pfns dirty/accessed after installing PTE Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 63/84] KVM: PPC: Use kvm_faultin_pfn() to handle page faults on Book3s PR Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 64/84] KVM: LoongArch: Mark "struct page" pfns dirty only in "slow" page fault path Sean Christopherson
2024-08-02  7:53   ` maobibo
2024-08-02 19:32     ` Sean Christopherson
2024-08-03  3:02       ` maobibo
2024-08-05 23:22         ` Sean Christopherson
2024-08-06  1:16           ` maobibo
2024-08-08 11:38   ` maobibo
2024-07-26 23:52 ` [PATCH v12 65/84] KVM: LoongArch: Mark "struct page" pfns accessed " Sean Christopherson
2024-08-02  7:34   ` maobibo
2024-07-26 23:52 ` [PATCH v12 66/84] KVM: LoongArch: Mark "struct page" pfn accessed before dropping mmu_lock Sean Christopherson
2024-08-08 11:47   ` maobibo
2024-07-26 23:52 ` [PATCH v12 67/84] KVM: LoongArch: Use kvm_faultin_pfn() to map pfns into the guest Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 68/84] KVM: MIPS: Mark "struct page" pfns dirty only in "slow" page fault path Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 69/84] KVM: MIPS: Mark "struct page" pfns accessed " Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 70/84] KVM: MIPS: Mark "struct page" pfns accessed prior to dropping mmu_lock Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 71/84] KVM: MIPS: Use kvm_faultin_pfn() to map pfns into the guest Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 72/84] KVM: PPC: Remove extra get_page() to fix page refcount leak Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 73/84] KVM: PPC: Use kvm_vcpu_map() to map guest memory to patch dcbz instructions Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 74/84] KVM: Convert gfn_to_page() to use kvm_follow_pfn() Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 75/84] KVM: Add support for read-only usage of gfn_to_page() Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 76/84] KVM: arm64: Use __gfn_to_page() when copying MTE tags to/from userspace Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 77/84] KVM: PPC: Explicitly require struct page memory for Ultravisor sharing Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 78/84] KVM: Drop gfn_to_pfn() APIs now that all users are gone Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 79/84] KVM: s390: Use kvm_release_page_dirty() to unpin "struct page" memory Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 80/84] KVM: Make kvm_follow_pfn.refcounted_page a required field Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 81/84] KVM: x86/mmu: Don't mark "struct page" accessed when zapping SPTEs Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 82/84] KVM: arm64: Don't mark "struct page" accessed when making SPTE young Sean Christopherson
2024-07-26 23:52 ` [PATCH v12 83/84] KVM: Drop APIs that manipulate "struct page" via pfns Sean Christopherson
2024-08-02 11:03   ` Alex Bennée
2024-07-26 23:52 ` [PATCH v12 84/84] KVM: Don't grab reference on VM_MIXEDMAP pfns that have a "struct page" Sean Christopherson
2024-07-30 11:38   ` Paolo Bonzini
2024-07-30 20:21     ` Sean Christopherson
2024-07-31  9:50       ` Paolo Bonzini
2024-07-30 11:52 ` [PATCH v12 00/84] KVM: Stop grabbing references to PFNMAP'd pages Paolo Bonzini
2024-07-30 22:35   ` Sean Christopherson
2024-08-27  9:06 ` Alex Bennée

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240726235234.228822-18-seanjc@google.com \
    --to=seanjc@google.com \
    --cc=anup@brainfault.org \
    --cc=aou@eecs.berkeley.edu \
    --cc=borntraeger@linux.ibm.com \
    --cc=chenhuacai@kernel.org \
    --cc=dmatlack@google.com \
    --cc=frankja@linux.ibm.com \
    --cc=imbrenda@linux.ibm.com \
    --cc=kvm-riscv@lists.infradead.org \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.linux.dev \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=loongarch@lists.linux.dev \
    --cc=maobibo@loongson.cn \
    --cc=maz@kernel.org \
    --cc=mpe@ellerman.id.au \
    --cc=oliver.upton@linux.dev \
    --cc=palmer@dabbelt.com \
    --cc=paul.walmsley@sifive.com \
    --cc=pbonzini@redhat.com \
    --cc=stevensd@chromium.org \
    --cc=zhaotianrui@loongson.cn \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox