From: Nicolas Saenz Julienne <nsaenz@amazon.com>
To: <linux-kernel@vger.kernel.org>, <kvm@vger.kernel.org>
Cc: <pbonzini@redhat.com>, <seanjc@google.com>, <vkuznets@redhat.com>,
<linux-doc@vger.kernel.org>, <linux-hyperv@vger.kernel.org>,
<linux-arch@vger.kernel.org>,
<linux-trace-kernel@vger.kernel.org>, <graf@amazon.de>,
<dwmw2@infradead.org>, <paul@amazon.com>, <nsaenz@amazon.com>,
<mlevitsk@redhat.com>, <jgowans@amazon.com>, <corbet@lwn.net>,
<decui@microsoft.com>, <tglx@linutronix.de>, <mingo@redhat.com>,
<bp@alien8.de>, <dave.hansen@linux.intel.com>, <x86@kernel.org>,
<amoorthy@google.com>
Subject: [PATCH 16/18] KVM: x86: Take mem attributes into account when faulting memory
Date: Sun, 9 Jun 2024 15:49:45 +0000 [thread overview]
Message-ID: <20240609154945.55332-17-nsaenz@amazon.com> (raw)
In-Reply-To: <20240609154945.55332-1-nsaenz@amazon.com>
Take into account access restrictions memory attributes when faulting
guest memory. Prohibited memory accesses will cause an user-space fault
exit.
Additionally, bypass a warning in the !tdp case. Access restrictions in
guest page tables might not necessarily match the host pte's when memory
attributes are in use.
Signed-off-by: Nicolas Saenz Julienne <nsaenz@amazon.com>
---
arch/x86/kvm/mmu/mmu.c | 64 ++++++++++++++++++++++++++++------
arch/x86/kvm/mmu/mmutrace.h | 29 +++++++++++++++
arch/x86/kvm/mmu/paging_tmpl.h | 2 +-
include/linux/kvm_host.h | 4 +++
4 files changed, 87 insertions(+), 12 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 91edd873dcdbc..dfe50c9c31f7b 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -754,7 +754,8 @@ static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
return sp->role.access;
}
-static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
+static void kvm_mmu_page_set_translation(struct kvm *kvm,
+ struct kvm_mmu_page *sp, int index,
gfn_t gfn, unsigned int access)
{
if (sp_has_gptes(sp)) {
@@ -762,10 +763,17 @@ static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
return;
}
- WARN_ONCE(access != kvm_mmu_page_get_access(sp, index),
- "access mismatch under %s page %llx (expected %u, got %u)\n",
- sp->role.passthrough ? "passthrough" : "direct",
- sp->gfn, kvm_mmu_page_get_access(sp, index), access);
+ /*
+ * Userspace might have introduced memory attributes for this gfn,
+ * breaking the assumption that the spte's access restrictions match
+ * the guest's. Userspace is also responsible from taking care of
+ * faults caused by these 'artificial' access restrictions.
+ */
+ WARN_ONCE(access != kvm_mmu_page_get_access(sp, index) &&
+ !kvm_get_memory_attributes(kvm, gfn),
+ "access mismatch under %s page %llx (expected %u, got %u)\n",
+ sp->role.passthrough ? "passthrough" : "direct", sp->gfn,
+ kvm_mmu_page_get_access(sp, index), access);
WARN_ONCE(gfn != kvm_mmu_page_get_gfn(sp, index),
"gfn mismatch under %s page %llx (expected %llx, got %llx)\n",
@@ -773,12 +781,12 @@ static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
sp->gfn, kvm_mmu_page_get_gfn(sp, index), gfn);
}
-static void kvm_mmu_page_set_access(struct kvm_mmu_page *sp, int index,
- unsigned int access)
+static void kvm_mmu_page_set_access(struct kvm *kvm, struct kvm_mmu_page *sp,
+ int index, unsigned int access)
{
gfn_t gfn = kvm_mmu_page_get_gfn(sp, index);
- kvm_mmu_page_set_translation(sp, index, gfn, access);
+ kvm_mmu_page_set_translation(kvm, sp, index, gfn, access);
}
/*
@@ -1607,7 +1615,7 @@ static void __rmap_add(struct kvm *kvm,
int rmap_count;
sp = sptep_to_sp(spte);
- kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access);
+ kvm_mmu_page_set_translation(kvm, sp, spte_index(spte), gfn, access);
kvm_update_page_stats(kvm, sp->role.level, 1);
rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
@@ -2928,7 +2936,8 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
rmap_add(vcpu, slot, sptep, gfn, pte_access);
} else {
/* Already rmapped but the pte_access bits may have changed. */
- kvm_mmu_page_set_access(sp, spte_index(sptep), pte_access);
+ kvm_mmu_page_set_access(vcpu->kvm, sp, spte_index(sptep),
+ pte_access);
}
return ret;
@@ -4320,6 +4329,38 @@ static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
return RET_PF_CONTINUE;
}
+static int kvm_mem_attributes_faultin_access_prots(struct kvm_vcpu *vcpu,
+ struct kvm_page_fault *fault)
+{
+ bool may_read, may_write, may_exec;
+ unsigned long attrs;
+
+ attrs = kvm_get_memory_attributes(vcpu->kvm, fault->gfn);
+ if (!attrs)
+ return RET_PF_CONTINUE;
+
+ if (!kvm_mem_attributes_valid(vcpu->kvm, attrs)) {
+ kvm_err("Invalid mem attributes 0x%lx found for address 0x%016llx\n",
+ attrs, fault->addr);
+ return -EFAULT;
+ }
+
+ trace_kvm_mem_attributes_faultin_access_prots(vcpu, fault, attrs);
+
+ may_read = kvm_mem_attributes_may_read(attrs);
+ may_write = kvm_mem_attributes_may_write(attrs);
+ may_exec = kvm_mem_attributes_may_exec(attrs);
+
+ if ((fault->user && !may_read) || (fault->write && !may_write) ||
+ (fault->exec && !may_exec))
+ return -EFAULT;
+
+ fault->map_writable = may_write;
+ fault->map_executable = may_exec;
+
+ return RET_PF_CONTINUE;
+}
+
static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
bool async;
@@ -4375,7 +4416,8 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
* Now that we have a snapshot of mmu_invalidate_seq we can check for a
* private vs. shared mismatch.
*/
- if (fault->is_private != kvm_mem_is_private(vcpu->kvm, fault->gfn)) {
+ if (fault->is_private != kvm_mem_is_private(vcpu->kvm, fault->gfn) ||
+ kvm_mem_attributes_faultin_access_prots(vcpu, fault)) {
kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
return -EFAULT;
}
diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h
index 195d98bc8de85..ddbdd7396e9fa 100644
--- a/arch/x86/kvm/mmu/mmutrace.h
+++ b/arch/x86/kvm/mmu/mmutrace.h
@@ -440,6 +440,35 @@ TRACE_EVENT(
__entry->gfn, __entry->spte, __entry->level, __entry->errno)
);
+TRACE_EVENT(kvm_mem_attributes_faultin_access_prots,
+ TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
+ u64 mem_attrs),
+ TP_ARGS(vcpu, fault, mem_attrs),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, vcpu_id)
+ __field(unsigned long, guest_rip)
+ __field(u64, fault_address)
+ __field(bool, write)
+ __field(bool, exec)
+ __field(u64, mem_attrs)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->guest_rip = kvm_rip_read(vcpu);
+ __entry->fault_address = fault->addr;
+ __entry->write = fault->write;
+ __entry->exec = fault->exec;
+ __entry->mem_attrs = mem_attrs;
+ ),
+
+ TP_printk("vcpu %d rip 0x%lx gfn 0x%016llx access %s mem_attrs 0x%llx",
+ __entry->vcpu_id, __entry->guest_rip, __entry->fault_address,
+ __entry->exec ? "X" : (__entry->write ? "W" : "R"),
+ __entry->mem_attrs)
+);
+
#endif /* _TRACE_KVMMMU_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index d3dbcf382ed2d..166f5f0e885e0 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -954,7 +954,7 @@ static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int
return 0;
/* Update the shadowed access bits in case they changed. */
- kvm_mmu_page_set_access(sp, i, pte_access);
+ kvm_mmu_page_set_access(vcpu->kvm, sp, i, pte_access);
sptep = &sp->spt[i];
spte = *sptep;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 85378345e8e77..9c26161d13dea 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -2463,6 +2463,10 @@ static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
{
return false;
}
+static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn)
+{
+ return 0;
+}
#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
#ifdef CONFIG_KVM_PRIVATE_MEM
--
2.40.1
next prev parent reply other threads:[~2024-06-09 15:58 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-06-09 15:49 [PATCH 00/18] Introducing Core Building Blocks for Hyper-V VSM Emulation Nicolas Saenz Julienne
2024-06-09 15:49 ` [PATCH 01/18] KVM: x86: hyper-v: Introduce XMM output support Nicolas Saenz Julienne
2024-07-08 14:59 ` Vitaly Kuznetsov
2024-07-17 14:12 ` Nicolas Saenz Julienne
2024-07-29 13:53 ` Vitaly Kuznetsov
2024-08-05 14:08 ` Nicolas Saenz Julienne
2024-06-09 15:49 ` [PATCH 02/18] KVM: x86: hyper-v: Introduce helpers to check if VSM is exposed to guest Nicolas Saenz Julienne
2024-06-09 15:49 ` [PATCH 03/18] hyperv-tlfs: Update struct hv_send_ipi{_ex}'s declarations Nicolas Saenz Julienne
2024-06-09 15:49 ` [PATCH 04/18] KVM: x86: hyper-v: Introduce VTL awareness to Hyper-V's PV-IPIs Nicolas Saenz Julienne
2024-09-13 18:02 ` Sean Christopherson
2024-09-16 14:52 ` Nicolas Saenz Julienne
2024-06-09 15:49 ` [PATCH 05/18] KVM: x86: hyper-v: Introduce MP_STATE_HV_INACTIVE_VTL Nicolas Saenz Julienne
2024-09-13 19:01 ` Sean Christopherson
2024-09-16 15:33 ` Nicolas Saenz Julienne
2024-09-18 7:56 ` Sean Christopherson
2024-06-09 15:49 ` [PATCH 06/18] KVM: x86: hyper-v: Exit on Get/SetVpRegisters hcall Nicolas Saenz Julienne
2024-06-09 15:49 ` [PATCH 07/18] KVM: x86: hyper-v: Exit on TranslateVirtualAddress hcall Nicolas Saenz Julienne
2024-06-09 15:49 ` [PATCH 08/18] KVM: x86: hyper-v: Exit on StartVirtualProcessor and GetVpIndexFromApicId hcalls Nicolas Saenz Julienne
2024-06-09 15:49 ` [PATCH 09/18] KVM: Define and communicate KVM_EXIT_MEMORY_FAULT RWX flags to userspace Nicolas Saenz Julienne
2024-06-09 15:49 ` [PATCH 10/18] KVM: x86: Keep track of instruction length during faults Nicolas Saenz Julienne
2024-09-13 19:10 ` Sean Christopherson
2024-06-09 15:49 ` [PATCH 11/18] KVM: x86: Pass the instruction length on memory fault user-space exits Nicolas Saenz Julienne
2024-09-13 19:11 ` Sean Christopherson
2024-09-16 15:53 ` Nicolas Saenz Julienne
2024-06-09 15:49 ` [PATCH 12/18] KVM: x86/mmu: Introduce infrastructure to handle non-executable mappings Nicolas Saenz Julienne
2024-06-09 15:49 ` [PATCH 13/18] KVM: x86/mmu: Avoid warning when installing non-private memory attributes Nicolas Saenz Julienne
2024-09-13 19:13 ` Sean Christopherson
2024-06-09 15:49 ` [PATCH 14/18] KVM: x86/mmu: Init memslot if memory attributes available Nicolas Saenz Julienne
2024-06-09 15:49 ` [PATCH 15/18] KVM: Introduce RWX memory attributes Nicolas Saenz Julienne
2024-06-09 15:49 ` Nicolas Saenz Julienne [this message]
2024-08-22 15:21 ` [PATCH 16/18] KVM: x86: Take mem attributes into account when faulting memory Nicolas Saenz Julienne
2024-08-22 16:58 ` Sean Christopherson
2024-09-13 18:26 ` Nicolas Saenz Julienne
2024-06-09 15:49 ` [PATCH 17/18] KVM: Introduce traces to track memory attributes modification Nicolas Saenz Julienne
2024-06-09 15:49 ` [PATCH 18/18] KVM: x86: hyper-v: Handle VSM hcalls in user-space Nicolas Saenz Julienne
2024-07-03 9:55 ` [PATCH 00/18] Introducing Core Building Blocks for Hyper-V VSM Emulation Nicolas Saenz Julienne
2024-07-03 12:48 ` Vitaly Kuznetsov
2024-07-03 13:18 ` Nicolas Saenz Julienne
2024-09-13 19:19 ` Sean Christopherson
2024-09-16 16:32 ` Nicolas Saenz Julienne
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240609154945.55332-17-nsaenz@amazon.com \
--to=nsaenz@amazon.com \
--cc=amoorthy@google.com \
--cc=bp@alien8.de \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=decui@microsoft.com \
--cc=dwmw2@infradead.org \
--cc=graf@amazon.de \
--cc=jgowans@amazon.com \
--cc=kvm@vger.kernel.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-hyperv@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-trace-kernel@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=mlevitsk@redhat.com \
--cc=paul@amazon.com \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
--cc=tglx@linutronix.de \
--cc=vkuznets@redhat.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).