From: Nikolas Wipper <nikwip@amazon.de>
To: Paolo Bonzini <pbonzini@redhat.com>,
Sean Christopherson <seanjc@google.com>,
Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: Nicolas Saenz Julienne <nsaenz@amazon.com>,
Alexander Graf <graf@amazon.de>,
James Gowans <jgowans@amazon.com>, <nh-open-source@amazon.com>,
Thomas Gleixner <tglx@linutronix.de>,
"Ingo Molnar" <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
<linux-kernel@vger.kernel.org>, <kvm@vger.kernel.org>,
<x86@kernel.org>, <linux-doc@vger.kernel.org>,
<linux-kselftest@vger.kernel.org>, <kvmarm@lists.linux.dev>,
<kvm-riscv@lists.infradead.org>,
Nikolas Wipper <nikwip@amazon.de>
Subject: [PATCH 09/15] KVM: x86/mmu: Introduce status parameter to page walker
Date: Tue, 10 Sep 2024 15:22:01 +0000 [thread overview]
Message-ID: <20240910152207.38974-10-nikwip@amazon.de> (raw)
In-Reply-To: <20240910152207.38974-1-nikwip@amazon.de>
Introduce the status parameter to walk_addr_generic() which is used in
later patches to provide the caller with information on whether setting
the accessed/dirty bits succeeded.
No functional change intended.
Signed-off-by: Nikolas Wipper <nikwip@amazon.de>
---
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/hyperv.c | 2 +-
arch/x86/kvm/mmu.h | 8 +++++---
arch/x86/kvm/mmu/mmu.c | 5 +++--
arch/x86/kvm/mmu/paging_tmpl.h | 26 ++++++++++++++++----------
arch/x86/kvm/x86.c | 25 ++++++++++++++-----------
6 files changed, 40 insertions(+), 28 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index cd2c391d6a24..1c5aaf55c683 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -460,7 +460,7 @@ struct kvm_mmu {
struct x86_exception *fault);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gpa_t gva_or_gpa, u64 access, u64 flags,
- struct x86_exception *exception);
+ struct x86_exception *exception, u16 *status);
int (*sync_spte)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, int i);
struct kvm_mmu_root_info root;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index b237231ace61..30d5b86bc306 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -2037,7 +2037,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
*/
if (!hc->fast && is_guest_mode(vcpu)) {
hc->ingpa = translate_nested_gpa(vcpu, hc->ingpa, 0,
- PWALK_SET_ALL, NULL);
+ PWALK_SET_ALL, NULL, NULL);
if (unlikely(hc->ingpa == INVALID_GPA))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 35030f6466b5..272ce93f855f 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -275,15 +275,17 @@ static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
}
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
- u64 flags, struct x86_exception *exception);
+ u64 flags, struct x86_exception *exception,
+ u16 *status);
static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu,
gpa_t gpa, u64 access, u64 flags,
- struct x86_exception *exception)
+ struct x86_exception *exception,
+ u16 *status)
{
if (mmu != &vcpu->arch.nested_mmu)
return gpa;
- return translate_nested_gpa(vcpu, gpa, access, flags, exception);
+ return translate_nested_gpa(vcpu, gpa, access, flags, exception, status);
}
#endif
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 50c635142bf7..2ab0437edf54 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4103,11 +4103,12 @@ void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gpa_t vaddr, u64 access, u64 flags,
- struct x86_exception *exception)
+ struct x86_exception *exception, u16 *status)
{
if (exception)
exception->error_code = 0;
- return kvm_translate_gpa(vcpu, mmu, vaddr, access, flags, exception);
+ return kvm_translate_gpa(vcpu, mmu, vaddr, access, flags, exception,
+ status);
}
static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 2cc40fd17f53..985a19dda603 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -197,7 +197,8 @@ static inline unsigned FNAME(gpte_access)(u64 gpte)
static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu,
struct guest_walker *walker,
- gpa_t addr, int write_fault)
+ gpa_t addr, int write_fault,
+ u16 *status)
{
unsigned level, index;
pt_element_t pte, orig_pte;
@@ -301,7 +302,8 @@ static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu,
*/
static int FNAME(walk_addr_generic)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
- gpa_t addr, u64 access, u64 flags)
+ gpa_t addr, u64 access, u64 flags,
+ u16 *status)
{
int ret;
pt_element_t pte;
@@ -344,6 +346,9 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
walker->fault.flags = 0;
+ if (status)
+ *status = 0;
+
/*
* FIXME: on Intel processors, loads of the PDPTE registers for PAE paging
* by the MOV to CR instruction are treated as reads and do not cause the
@@ -383,7 +388,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(table_gfn),
nested_access, flags,
- &walker->fault);
+ &walker->fault, status);
/*
* FIXME: This can happen if emulation (for of an INS/OUTS
@@ -453,8 +458,8 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
gfn += pse36_gfn_delta(pte);
#endif
- real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access,
- flags, &walker->fault);
+ real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, flags,
+ &walker->fault, status);
if (real_gpa == INVALID_GPA)
goto late_exit;
@@ -473,7 +478,8 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
if (unlikely(set_accessed && !accessed_dirty)) {
ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, addr,
- write_fault && set_dirty);
+ write_fault && set_dirty,
+ status);
if (unlikely(ret < 0))
goto error;
else if (ret)
@@ -538,7 +544,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
*/
++walker->level;
FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, addr,
- false);
+ false, status);
--walker->level;
}
return 0;
@@ -548,7 +554,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker, struct kvm_vcpu *vcpu,
gpa_t addr, u64 access, u64 flags)
{
return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
- access, flags);
+ access, flags, NULL);
}
static bool
@@ -891,7 +897,7 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
/* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gpa_t addr, u64 access, u64 flags,
- struct x86_exception *exception)
+ struct x86_exception *exception, u16 *status)
{
struct guest_walker walker;
gpa_t gpa = INVALID_GPA;
@@ -902,7 +908,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
WARN_ON_ONCE((addr >> 32) && mmu == vcpu->arch.walk_mmu);
#endif
- r = FNAME(walk_addr_generic)(&walker, vcpu, mmu, addr, access, flags);
+ r = FNAME(walk_addr_generic)(&walker, vcpu, mmu, addr, access, flags, status);
if (r) {
gpa = gfn_to_gpa(walker.gfn);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 32e81cd502ee..be696b60aba6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1068,7 +1068,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
*/
real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn),
PFERR_USER_MASK | PFERR_WRITE_MASK,
- PWALK_SET_ALL, NULL);
+ PWALK_SET_ALL, NULL, NULL);
if (real_gpa == INVALID_GPA)
return 0;
@@ -7561,7 +7561,8 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
}
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
- u64 flags, struct x86_exception *exception)
+ u64 flags, struct x86_exception *exception,
+ u16 *status)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
gpa_t t_gpa;
@@ -7570,7 +7571,8 @@ gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
/* NPT walks are always user-walks */
access |= PFERR_USER_MASK;
- t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, flags, exception);
+ t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, flags, exception,
+ status);
return t_gpa;
}
@@ -7582,7 +7584,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
return mmu->gva_to_gpa(vcpu, mmu, gva, access, PWALK_SET_ALL,
- exception);
+ exception, NULL);
}
EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
@@ -7594,7 +7596,7 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_WRITE_MASK;
return mmu->gva_to_gpa(vcpu, mmu, gva, access, PWALK_SET_ALL,
- exception);
+ exception, NULL);
}
EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write);
@@ -7604,7 +7606,7 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
- return mmu->gva_to_gpa(vcpu, mmu, gva, 0, PWALK_SET_ALL, exception);
+ return mmu->gva_to_gpa(vcpu, mmu, gva, 0, PWALK_SET_ALL, exception, NULL);
}
static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
@@ -7617,7 +7619,7 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
while (bytes) {
gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access,
- PWALK_SET_ALL, exception);
+ PWALK_SET_ALL, exception, NULL);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
@@ -7652,7 +7654,8 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
/* Inline kvm_read_guest_virt_helper for speed. */
gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access | PFERR_FETCH_MASK,
- PWALK_SET_ALL, exception);
+ PWALK_SET_ALL,
+ exception, NULL);
if (unlikely(gpa == INVALID_GPA))
return X86EMUL_PROPAGATE_FAULT;
@@ -7710,7 +7713,7 @@ static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes
while (bytes) {
gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access,
- PWALK_SET_ALL, exception);
+ PWALK_SET_ALL, exception, NULL);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
@@ -7830,7 +7833,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
return 1;
}
- *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, PWALK_SET_ALL, exception);
+ *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, PWALK_SET_ALL, exception, NULL);
if (*gpa == INVALID_GPA)
return -1;
@@ -13651,7 +13654,7 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c
if (!(error_code & PFERR_PRESENT_MASK) ||
mmu->gva_to_gpa(vcpu, mmu, gva, access, PWALK_SET_ALL,
- &fault) != INVALID_GPA) {
+ &fault, NULL) != INVALID_GPA) {
/*
* If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
* tables probably do not match the TLB. Just proceed
--
2.40.1
Amazon Web Services Development Center Germany GmbH
Krausenstr. 38
10117 Berlin
Geschaeftsfuehrung: Christian Schlaeger, Jonathan Weiss
Eingetragen am Amtsgericht Charlottenburg unter HRB 257764 B
Sitz: Berlin
Ust-ID: DE 365 538 597
next prev parent reply other threads:[~2024-09-10 15:24 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-10 15:21 [PATCH 00/15] KVM: x86: Introduce new ioctl KVM_TRANSLATE2 Nikolas Wipper
2024-09-10 15:21 ` [PATCH 01/15] KVM: Add API documentation for KVM_TRANSLATE2 Nikolas Wipper
2024-09-10 15:21 ` [PATCH 02/15] KVM: x86/mmu: Abort page walk if permission checks fail Nikolas Wipper
2024-09-10 15:21 ` [PATCH 03/15] KVM: x86/mmu: Introduce exception flag for unmapped GPAs Nikolas Wipper
2024-09-10 15:21 ` [PATCH 04/15] KVM: x86/mmu: Store GPA in exception if applicable Nikolas Wipper
2024-09-10 15:21 ` [PATCH 05/15] KVM: x86/mmu: Introduce flags parameter to page walker Nikolas Wipper
2024-09-10 15:21 ` [PATCH 06/15] KVM: x86/mmu: Implement PWALK_SET_ACCESSED in " Nikolas Wipper
2024-09-10 15:21 ` [PATCH 07/15] KVM: x86/mmu: Implement PWALK_SET_DIRTY " Nikolas Wipper
2024-09-10 15:22 ` [PATCH 08/15] KVM: x86/mmu: Implement PWALK_FORCE_SET_ACCESSED " Nikolas Wipper
2024-09-10 15:22 ` Nikolas Wipper [this message]
2024-09-10 15:22 ` [PATCH 10/15] KVM: x86/mmu: Implement PWALK_STATUS_READ_ONLY_PTE_GPA " Nikolas Wipper
2024-09-10 15:22 ` [PATCH 11/15] KVM: x86: Introduce generic gva to gpa translation function Nikolas Wipper
2024-09-10 15:22 ` [PATCH 12/15] KVM: Introduce KVM_TRANSLATE2 Nikolas Wipper
2024-09-10 15:22 ` [PATCH 13/15] KVM: Add KVM_TRANSLATE2 stub Nikolas Wipper
2024-09-10 15:22 ` [PATCH 14/15] KVM: x86: Implement KVM_TRANSLATE2 Nikolas Wipper
2024-12-11 22:06 ` Sean Christopherson
2024-09-10 15:22 ` [PATCH 15/15] KVM: selftests: Add test for KVM_TRANSLATE2 Nikolas Wipper
2024-10-04 10:44 ` [PATCH 00/15] KVM: x86: Introduce new ioctl KVM_TRANSLATE2 Nikolas Wipper
2024-12-11 22:05 ` Sean Christopherson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240910152207.38974-10-nikwip@amazon.de \
--to=nikwip@amazon.de \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=graf@amazon.de \
--cc=jgowans@amazon.com \
--cc=kvm-riscv@lists.infradead.org \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.linux.dev \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=nh-open-source@amazon.com \
--cc=nsaenz@amazon.com \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
--cc=tglx@linutronix.de \
--cc=vkuznets@redhat.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox