From: Yi Liu <yi.l.liu@intel.com>
To: Zhenzhong Duan <zhenzhong.duan@intel.com>, <qemu-devel@nongnu.org>
Cc: <alex.williamson@redhat.com>, <clg@redhat.com>,
<eric.auger@redhat.com>, <mst@redhat.com>, <peterx@redhat.com>,
<jasowang@redhat.com>, <jgg@nvidia.com>, <nicolinc@nvidia.com>,
<joao.m.martins@oracle.com>, <clement.mathieu--drif@eviden.com>,
<kevin.tian@intel.com>, <chao.p.peng@intel.com>,
Yi Sun <yi.y.sun@linux.intel.com>,
Marcel Apfelbaum <marcel.apfelbaum@gmail.com>,
Paolo Bonzini <pbonzini@redhat.com>,
"Richard Henderson" <richard.henderson@linaro.org>,
Eduardo Habkost <eduardo@habkost.net>
Subject: Re: [PATCH v4 06/17] intel_iommu: Implement stage-1 translation
Date: Sun, 3 Nov 2024 22:21:50 +0800 [thread overview]
Message-ID: <750c9440-b787-4500-b039-a4827e5d7d20@intel.com> (raw)
In-Reply-To: <20240930092631.2997543-7-zhenzhong.duan@intel.com>
On 2024/9/30 17:26, Zhenzhong Duan wrote:
> From: Yi Liu <yi.l.liu@intel.com>
>
> This adds stage-1 page table walking to support stage-1 only
> translation in scalable modern mode.
>
> Signed-off-by: Yi Liu <yi.l.liu@intel.com>
> Co-developed-by: Clément Mathieu--Drif <clement.mathieu--drif@eviden.com>
> Signed-off-by: Clément Mathieu--Drif <clement.mathieu--drif@eviden.com>
> Signed-off-by: Yi Sun <yi.y.sun@linux.intel.com>
> Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
> Acked-by: Jason Wang <jasowang@redhat.com>
> ---
> hw/i386/intel_iommu_internal.h | 24 ++++++
> hw/i386/intel_iommu.c | 143 ++++++++++++++++++++++++++++++++-
> 2 files changed, 163 insertions(+), 4 deletions(-)
>
> diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
> index 20fcc73938..38bf0c7a06 100644
> --- a/hw/i386/intel_iommu_internal.h
> +++ b/hw/i386/intel_iommu_internal.h
> @@ -428,6 +428,22 @@ typedef union VTDInvDesc VTDInvDesc;
> #define VTD_SPTE_LPAGE_L3_RSVD_MASK(aw) \
> (0x3ffff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM))
>
> +/* Rsvd field masks for fpte */
> +#define VTD_FS_UPPER_IGNORED 0xfff0000000000000ULL
> +#define VTD_FPTE_PAGE_L1_RSVD_MASK(aw) \
> + (~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED))
> +#define VTD_FPTE_PAGE_L2_RSVD_MASK(aw) \
> + (~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED))
> +#define VTD_FPTE_PAGE_L3_RSVD_MASK(aw) \
> + (~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED))
> +#define VTD_FPTE_PAGE_L4_RSVD_MASK(aw) \
> + (0x80ULL | ~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED))
> +
> +#define VTD_FPTE_LPAGE_L2_RSVD_MASK(aw) \
> + (0x1fe000ULL | ~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED))
> +#define VTD_FPTE_LPAGE_L3_RSVD_MASK(aw) \
> + (0x3fffe000ULL | ~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED))
> +
> /* Masks for PIOTLB Invalidate Descriptor */
> #define VTD_INV_DESC_PIOTLB_G (3ULL << 4)
> #define VTD_INV_DESC_PIOTLB_ALL_IN_PASID (2ULL << 4)
> @@ -520,6 +536,14 @@ typedef struct VTDRootEntry VTDRootEntry;
> #define VTD_SM_PASID_ENTRY_AW 7ULL /* Adjusted guest-address-width */
> #define VTD_SM_PASID_ENTRY_DID(val) ((val) & VTD_DOMAIN_ID_MASK)
>
> +#define VTD_SM_PASID_ENTRY_FLPM 3ULL
> +#define VTD_SM_PASID_ENTRY_FLPTPTR (~0xfffULL)
> +
> +/* First Level Paging Structure */
> +/* Masks for First Level Paging Entry */
> +#define VTD_FL_P 1ULL
> +#define VTD_FL_RW (1ULL << 1)
> +
> /* Second Level Page Translation Pointer*/
> #define VTD_SM_PASID_ENTRY_SLPTPTR (~0xfffULL)
>
> diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
> index 6f2414898c..56d5933e93 100644
> --- a/hw/i386/intel_iommu.c
> +++ b/hw/i386/intel_iommu.c
> @@ -48,6 +48,8 @@
>
> /* pe operations */
> #define VTD_PE_GET_TYPE(pe) ((pe)->val[0] & VTD_SM_PASID_ENTRY_PGTT)
> +#define VTD_PE_GET_FL_LEVEL(pe) \
> + (4 + (((pe)->val[2] >> 2) & VTD_SM_PASID_ENTRY_FLPM))
> #define VTD_PE_GET_SL_LEVEL(pe) \
> (2 + (((pe)->val[0] >> 2) & VTD_SM_PASID_ENTRY_AW))
>
> @@ -755,6 +757,11 @@ static inline bool vtd_is_sl_level_supported(IntelIOMMUState *s, uint32_t level)
> (1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT));
> }
>
> +static inline bool vtd_is_fl_level_supported(IntelIOMMUState *s, uint32_t level)
> +{
> + return level == VTD_PML4_LEVEL;
> +}
> +
> /* Return true if check passed, otherwise false */
> static inline bool vtd_pe_type_check(X86IOMMUState *x86_iommu,
> VTDPASIDEntry *pe)
> @@ -838,6 +845,11 @@ static int vtd_get_pe_in_pasid_leaf_table(IntelIOMMUState *s,
> return -VTD_FR_PASID_TABLE_ENTRY_INV;
> }
>
> + if (pgtt == VTD_SM_PASID_ENTRY_FLT &&
> + !vtd_is_fl_level_supported(s, VTD_PE_GET_FL_LEVEL(pe))) {
> + return -VTD_FR_PASID_TABLE_ENTRY_INV;
> + }
> +
> return 0;
> }
>
> @@ -973,7 +985,11 @@ static uint32_t vtd_get_iova_level(IntelIOMMUState *s,
>
> if (s->root_scalable) {
> vtd_ce_get_rid2pasid_entry(s, ce, &pe, pasid);
> - return VTD_PE_GET_SL_LEVEL(&pe);
> + if (s->scalable_modern) {
> + return VTD_PE_GET_FL_LEVEL(&pe);
> + } else {
> + return VTD_PE_GET_SL_LEVEL(&pe);
> + }
> }
>
> return vtd_ce_get_level(ce);
> @@ -1060,7 +1076,11 @@ static dma_addr_t vtd_get_iova_pgtbl_base(IntelIOMMUState *s,
>
> if (s->root_scalable) {
> vtd_ce_get_rid2pasid_entry(s, ce, &pe, pasid);
> - return pe.val[0] & VTD_SM_PASID_ENTRY_SLPTPTR;
> + if (s->scalable_modern) {
> + return pe.val[2] & VTD_SM_PASID_ENTRY_FLPTPTR;
> + } else {
> + return pe.val[0] & VTD_SM_PASID_ENTRY_SLPTPTR;
> + }
> }
>
> return vtd_ce_get_slpt_base(ce);
> @@ -1862,6 +1882,104 @@ out:
> trace_vtd_pt_enable_fast_path(source_id, success);
> }
>
> +/*
> + * Rsvd field masks for fpte:
> + * vtd_fpte_rsvd 4k pages
> + * vtd_fpte_rsvd_large large pages
> + *
> + * We support only 4-level page tables.
> + */
> +#define VTD_FPTE_RSVD_LEN 5
> +static uint64_t vtd_fpte_rsvd[VTD_FPTE_RSVD_LEN];
> +static uint64_t vtd_fpte_rsvd_large[VTD_FPTE_RSVD_LEN];
> +
> +static bool vtd_flpte_nonzero_rsvd(uint64_t flpte, uint32_t level)
> +{
> + uint64_t rsvd_mask;
> +
> + /*
> + * We should have caught a guest-mis-programmed level earlier,
> + * via vtd_is_fl_level_supported.
> + */
> + assert(level < VTD_FPTE_RSVD_LEN);
> + /*
> + * Zero level doesn't exist. The smallest level is VTD_PT_LEVEL=1 and
> + * checked by vtd_is_last_pte().
> + */
> + assert(level);
> +
> + if ((level == VTD_PD_LEVEL || level == VTD_PDP_LEVEL) &&
> + (flpte & VTD_PT_PAGE_SIZE_MASK)) {
> + /* large page */
> + rsvd_mask = vtd_fpte_rsvd_large[level];
> + } else {
> + rsvd_mask = vtd_fpte_rsvd[level];
> + }
> +
> + return flpte & rsvd_mask;
> +}
> +
> +static inline bool vtd_flpte_present(uint64_t flpte)
> +{
> + return !!(flpte & VTD_FL_P);
> +}
> +
> +/*
> + * Given the @iova, get relevant @flptep. @flpte_level will be the last level
> + * of the translation, can be used for deciding the size of large page.
> + */
> +static int vtd_iova_to_flpte(IntelIOMMUState *s, VTDContextEntry *ce,
> + uint64_t iova, bool is_write,
> + uint64_t *flptep, uint32_t *flpte_level,
> + bool *reads, bool *writes, uint8_t aw_bits,
> + uint32_t pasid)
> +{
> + dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce, pasid);
> + uint32_t level = vtd_get_iova_level(s, ce, pasid);
> + uint32_t offset;
> + uint64_t flpte;
> +
> + while (true) {
> + offset = vtd_iova_level_offset(iova, level);
> + flpte = vtd_get_pte(addr, offset);
> +
> + if (flpte == (uint64_t)-1) {
> + if (level == vtd_get_iova_level(s, ce, pasid)) {
> + /* Invalid programming of context-entry */
> + return -VTD_FR_CONTEXT_ENTRY_INV;
> + } else {
> + return -VTD_FR_PAGING_ENTRY_INV;
> + }
> + }
> + if (!vtd_flpte_present(flpte)) {
> + *reads = false;
> + *writes = false;
> + return -VTD_FR_PAGING_ENTRY_INV;
> + }
> + *reads = true;
> + *writes = (*writes) && (flpte & VTD_FL_RW);
> + if (is_write && !(flpte & VTD_FL_RW)) {
> + return -VTD_FR_WRITE;
> + }
> + if (vtd_flpte_nonzero_rsvd(flpte, level)) {
> + error_report_once("%s: detected flpte reserved non-zero "
> + "iova=0x%" PRIx64 ", level=0x%" PRIx32
> + "flpte=0x%" PRIx64 ", pasid=0x%" PRIX32 ")",
> + __func__, iova, level, flpte, pasid);
> + return -VTD_FR_PAGING_ENTRY_RSVD;
> + }
> +
> + if (vtd_is_last_pte(flpte, level)) {
> + *flptep = flpte;
> + *flpte_level = level;
> + return 0;
> + }
> +
> + addr = vtd_get_pte_addr(flpte, aw_bits);
> + level--;
> + }
As I replied in last version, it should check the ir range for the
translation result. I saw your reply, but that only covers the input
address, my comment is about the output addr.
[1]
https://lore.kernel.org/qemu-devel/SJ0PR11MB6744D2B572D278DAF8BF267692762@SJ0PR11MB6744.namprd11.prod.outlook.com/
> +}
> +
> static void vtd_report_fault(IntelIOMMUState *s,
> int err, bool is_fpd_set,
> uint16_t source_id,
> @@ -2010,8 +2128,13 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
> }
> }
>
> - ret_fr = vtd_iova_to_slpte(s, &ce, addr, is_write, &pte, &level,
> - &reads, &writes, s->aw_bits, pasid);
> + if (s->scalable_modern && s->root_scalable) {
> + ret_fr = vtd_iova_to_flpte(s, &ce, addr, is_write, &pte, &level,
> + &reads, &writes, s->aw_bits, pasid);
> + } else {
> + ret_fr = vtd_iova_to_slpte(s, &ce, addr, is_write, &pte, &level,
> + &reads, &writes, s->aw_bits, pasid);
> + }
> if (ret_fr) {
> vtd_report_fault(s, -ret_fr, is_fpd_set, source_id,
> addr, is_write, pasid != PCI_NO_PASID, pasid);
> @@ -4239,6 +4362,18 @@ static void vtd_init(IntelIOMMUState *s)
> vtd_spte_rsvd_large[2] = VTD_SPTE_LPAGE_L2_RSVD_MASK(s->aw_bits);
> vtd_spte_rsvd_large[3] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s->aw_bits);
>
> + /*
> + * Rsvd field masks for fpte
> + */
> + vtd_fpte_rsvd[0] = ~0ULL;
> + vtd_fpte_rsvd[1] = VTD_FPTE_PAGE_L1_RSVD_MASK(s->aw_bits);
> + vtd_fpte_rsvd[2] = VTD_FPTE_PAGE_L2_RSVD_MASK(s->aw_bits);
> + vtd_fpte_rsvd[3] = VTD_FPTE_PAGE_L3_RSVD_MASK(s->aw_bits);
> + vtd_fpte_rsvd[4] = VTD_FPTE_PAGE_L4_RSVD_MASK(s->aw_bits);
> +
> + vtd_fpte_rsvd_large[2] = VTD_FPTE_LPAGE_L2_RSVD_MASK(s->aw_bits);
> + vtd_fpte_rsvd_large[3] = VTD_FPTE_LPAGE_L3_RSVD_MASK(s->aw_bits);
> +
> if (s->scalable_mode || s->snoop_control) {
> vtd_spte_rsvd[1] &= ~VTD_SPTE_SNP;
> vtd_spte_rsvd_large[2] &= ~VTD_SPTE_SNP;
--
Regards,
Yi Liu
next prev parent reply other threads:[~2024-11-03 14:18 UTC|newest]
Thread overview: 67+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-30 9:26 [PATCH v4 00/17] intel_iommu: Enable stage-1 translation for emulated device Zhenzhong Duan
2024-09-30 9:26 ` [PATCH v4 01/17] intel_iommu: Use the latest fault reasons defined by spec Zhenzhong Duan
2024-09-30 9:26 ` [PATCH v4 02/17] intel_iommu: Make pasid entry type check accurate Zhenzhong Duan
2024-09-30 9:26 ` [PATCH v4 03/17] intel_iommu: Add a placeholder variable for scalable modern mode Zhenzhong Duan
2024-10-04 5:22 ` CLEMENT MATHIEU--DRIF
2024-11-03 14:21 ` Yi Liu
2024-09-30 9:26 ` [PATCH v4 04/17] intel_iommu: Flush stage-2 cache in PASID-selective PASID-based iotlb invalidation Zhenzhong Duan
2024-11-04 2:49 ` Yi Liu
2024-11-04 7:37 ` CLEMENT MATHIEU--DRIF
2024-11-04 8:45 ` Yi Liu
2024-11-04 11:46 ` Duan, Zhenzhong
2024-11-04 11:50 ` Michael S. Tsirkin
2024-11-04 11:55 ` Duan, Zhenzhong
2024-11-04 12:01 ` Michael S. Tsirkin
2024-11-04 12:03 ` Duan, Zhenzhong
2024-09-30 9:26 ` [PATCH v4 05/17] intel_iommu: Rename slpte to pte Zhenzhong Duan
2024-09-30 9:26 ` [PATCH v4 06/17] intel_iommu: Implement stage-1 translation Zhenzhong Duan
2024-11-03 14:21 ` Yi Liu [this message]
2024-11-04 3:05 ` Duan, Zhenzhong
2024-11-04 7:02 ` Yi Liu
2024-09-30 9:26 ` [PATCH v4 07/17] intel_iommu: Check if the input address is canonical Zhenzhong Duan
2024-11-03 14:22 ` Yi Liu
2024-09-30 9:26 ` [PATCH v4 08/17] intel_iommu: Set accessed and dirty bits during first stage translation Zhenzhong Duan
2024-11-04 2:49 ` Yi Liu
2024-11-08 3:15 ` Jason Wang
2024-09-30 9:26 ` [PATCH v4 09/17] intel_iommu: Flush stage-1 cache in iotlb invalidation Zhenzhong Duan
2024-11-04 2:50 ` Yi Liu
2024-11-04 3:38 ` Duan, Zhenzhong
2024-11-04 7:36 ` Yi Liu
2024-09-30 9:26 ` [PATCH v4 10/17] intel_iommu: Process PASID-based " Zhenzhong Duan
2024-11-04 2:50 ` Yi Liu
2024-11-04 5:40 ` Duan, Zhenzhong
2024-11-04 7:05 ` Yi Liu
2024-09-30 9:26 ` [PATCH v4 11/17] intel_iommu: Add an internal API to find an address space with PASID Zhenzhong Duan
2024-11-04 2:50 ` Yi Liu
2024-11-04 5:47 ` Duan, Zhenzhong
2024-09-30 9:26 ` [PATCH v4 12/17] intel_iommu: Add support for PASID-based device IOTLB invalidation Zhenzhong Duan
2024-11-04 2:51 ` Yi Liu
2024-09-30 9:26 ` [PATCH v4 13/17] intel_iommu: piotlb invalidation should notify unmap Zhenzhong Duan
2024-11-04 3:05 ` Yi Liu
2024-11-04 8:15 ` Duan, Zhenzhong
2024-11-05 6:29 ` Yi Liu
2024-11-05 7:25 ` Duan, Zhenzhong
2024-11-08 4:39 ` Jason Wang
2024-09-30 9:26 ` [PATCH v4 14/17] intel_iommu: Set default aw_bits to 48 in scalable modern mode Zhenzhong Duan
2024-11-04 3:16 ` Yi Liu
2024-11-04 3:19 ` Duan, Zhenzhong
2024-11-04 7:25 ` Yi Liu
2024-11-08 4:41 ` Jason Wang
2024-11-08 5:30 ` Duan, Zhenzhong
2024-11-11 1:24 ` Jason Wang
2024-11-11 2:58 ` Duan, Zhenzhong
2024-11-11 3:03 ` Jason Wang
2024-09-30 9:26 ` [PATCH v4 15/17] intel_iommu: Introduce a property x-fls for " Zhenzhong Duan
2024-11-04 4:25 ` Yi Liu
2024-11-04 6:25 ` Duan, Zhenzhong
2024-11-04 7:23 ` Yi Liu
2024-11-05 3:11 ` Duan, Zhenzhong
2024-11-05 5:56 ` Yi Liu
2024-11-05 6:03 ` Duan, Zhenzhong
2024-11-05 6:26 ` Yi Liu
2024-09-30 9:26 ` [PATCH v4 16/17] intel_iommu: Introduce a property to control FS1GP cap bit setting Zhenzhong Duan
2024-11-04 7:00 ` Yi Liu
2024-11-08 4:45 ` Jason Wang
2024-09-30 9:26 ` [PATCH v4 17/17] tests/qtest: Add intel-iommu test Zhenzhong Duan
2024-09-30 9:52 ` Duan, Zhenzhong
2024-10-25 6:32 ` [PATCH v4 00/17] intel_iommu: Enable stage-1 translation for emulated device Duan, Zhenzhong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=750c9440-b787-4500-b039-a4827e5d7d20@intel.com \
--to=yi.l.liu@intel.com \
--cc=alex.williamson@redhat.com \
--cc=chao.p.peng@intel.com \
--cc=clement.mathieu--drif@eviden.com \
--cc=clg@redhat.com \
--cc=eduardo@habkost.net \
--cc=eric.auger@redhat.com \
--cc=jasowang@redhat.com \
--cc=jgg@nvidia.com \
--cc=joao.m.martins@oracle.com \
--cc=kevin.tian@intel.com \
--cc=marcel.apfelbaum@gmail.com \
--cc=mst@redhat.com \
--cc=nicolinc@nvidia.com \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
--cc=yi.y.sun@linux.intel.com \
--cc=zhenzhong.duan@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).