iommu.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
From: Nick Sarnie <commendsarnex@gmail.com>
To: Tom Lendacky <thomas.lendacky@amd.com>
Cc: linux-arch@vger.kernel.org, linux-efi@vger.kernel.org,
	kvm@vger.kernel.org, linux-doc@vger.kernel.org, x86@kernel.org,
	kexec@lists.infradead.org, linux-kernel@vger.kernel.org,
	kasan-dev@googlegroups.com, linux-mm@kvack.org,
	iommu@lists.linux-foundation.org,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"Rik van Riel" <riel@redhat.com>,
	"Brijesh Singh" <brijesh.singh@amd.com>,
	"Toshimitsu Kani" <toshi.kani@hpe.com>,
	"Arnd Bergmann" <arnd@arndb.de>,
	"Jonathan Corbet" <corbet@lwn.net>,
	"Matt Fleming" <matt@codeblueprint.co.uk>,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Andrey Ryabinin" <aryabinin@virtuozzo.com>,
	"Ingo Molnar" <mingo@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Andy Lutomirski" <luto@kernel.org>,
	"H. Peter Anvin" <hpa@zytor.com>,
	"Borislav Petkov" <bp@alien8.de>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Alexander Potapenko" <glider@google.com>
Subject: Re: [PATCH v6 26/34] iommu/amd: Allow the AMD IOMMU to work with memory encryption
Date: Wed, 7 Jun 2017 22:38:14 -0400	[thread overview]
Message-ID: <CAOcCaLZ5QNx+CdnLn4eHtYOJOezJAsmq2whf8mggOdeMQDWOhw@mail.gmail.com> (raw)
In-Reply-To: <20170607191745.28645.81756.stgit@tlendack-t1.amdoffice.net>

On Wed, Jun 7, 2017 at 3:17 PM, Tom Lendacky <thomas.lendacky@amd.com> wrote:
> The IOMMU is programmed with physical addresses for the various tables
> and buffers that are used to communicate between the device and the
> driver. When the driver allocates this memory it is encrypted. In order
> for the IOMMU to access the memory as encrypted the encryption mask needs
> to be included in these physical addresses during configuration.
>
> The PTE entries created by the IOMMU should also include the encryption
> mask so that when the device behind the IOMMU performs a DMA, the DMA
> will be performed to encrypted memory.
>
> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
> ---
>  arch/x86/include/asm/mem_encrypt.h |    7 +++++++
>  arch/x86/mm/mem_encrypt.c          |   30 ++++++++++++++++++++++++++++++
>  drivers/iommu/amd_iommu.c          |   36 +++++++++++++++++++-----------------
>  drivers/iommu/amd_iommu_init.c     |   18 ++++++++++++------
>  drivers/iommu/amd_iommu_proto.h    |   10 ++++++++++
>  drivers/iommu/amd_iommu_types.h    |    2 +-
>  include/asm-generic/mem_encrypt.h  |    5 +++++
>  7 files changed, 84 insertions(+), 24 deletions(-)
>
> diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
> index c7a2525..d86e544 100644
> --- a/arch/x86/include/asm/mem_encrypt.h
> +++ b/arch/x86/include/asm/mem_encrypt.h
> @@ -31,6 +31,8 @@ void __init sme_early_decrypt(resource_size_t paddr,
>
>  void __init sme_early_init(void);
>
> +bool sme_iommu_supported(void);
> +
>  /* Architecture __weak replacement functions */
>  void __init mem_encrypt_init(void);
>
> @@ -62,6 +64,11 @@ static inline void __init sme_early_init(void)
>  {
>  }
>
> +static inline bool sme_iommu_supported(void)
> +{
> +       return true;
> +}
> +
>  #endif /* CONFIG_AMD_MEM_ENCRYPT */
>
>  static inline bool sme_active(void)
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index 5d7c51d..018b58a 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -197,6 +197,36 @@ void __init sme_early_init(void)
>                 protection_map[i] = pgprot_encrypted(protection_map[i]);
>  }
>
> +bool sme_iommu_supported(void)
> +{
> +       struct cpuinfo_x86 *c = &boot_cpu_data;
> +
> +       if (!sme_me_mask || (c->x86 != 0x17))
> +               return true;
> +
> +       /* For Fam17h, a specific level of support is required */
> +       switch (c->microcode & 0xf000) {
> +       case 0x0000:
> +               return false;
> +       case 0x1000:
> +               switch (c->microcode & 0x0f00) {
> +               case 0x0000:
> +                       return false;
> +               case 0x0100:
> +                       if ((c->microcode & 0xff) < 0x26)
> +                               return false;
> +                       break;
> +               case 0x0200:
> +                       if ((c->microcode & 0xff) < 0x05)
> +                               return false;
> +                       break;
> +               }
> +               break;
> +       }
> +
> +       return true;
> +}
> +
>  /* Architecture __weak replacement functions */
>  void __init mem_encrypt_init(void)
>  {
> diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
> index 63cacf5..94eb130 100644
> --- a/drivers/iommu/amd_iommu.c
> +++ b/drivers/iommu/amd_iommu.c
> @@ -544,7 +544,7 @@ static void dump_dte_entry(u16 devid)
>
>  static void dump_command(unsigned long phys_addr)
>  {
> -       struct iommu_cmd *cmd = phys_to_virt(phys_addr);
> +       struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
>         int i;
>
>         for (i = 0; i < 4; ++i)
> @@ -863,13 +863,15 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
>         writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
>  }
>
> -static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
> +static void build_completion_wait(struct iommu_cmd *cmd, volatile u64 *sem)
>  {
> +       u64 address = iommu_virt_to_phys((void *)sem);
> +
>         WARN_ON(address & 0x7ULL);
>
>         memset(cmd, 0, sizeof(*cmd));
> -       cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
> -       cmd->data[1] = upper_32_bits(__pa(address));
> +       cmd->data[0] = lower_32_bits(address) | CMD_COMPL_WAIT_STORE_MASK;
> +       cmd->data[1] = upper_32_bits(address);
>         cmd->data[2] = 1;
>         CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
>  }
> @@ -1033,7 +1035,7 @@ static int __iommu_queue_command_sync(struct amd_iommu *iommu,
>
>                 iommu->cmd_sem = 0;
>
> -               build_completion_wait(&sync_cmd, (u64)&iommu->cmd_sem);
> +               build_completion_wait(&sync_cmd, &iommu->cmd_sem);
>                 copy_cmd_to_buffer(iommu, &sync_cmd, tail);
>
>                 if ((ret = wait_on_sem(&iommu->cmd_sem)) != 0)
> @@ -1083,7 +1085,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
>                 return 0;
>
>
> -       build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
> +       build_completion_wait(&cmd, &iommu->cmd_sem);
>
>         spin_lock_irqsave(&iommu->lock, flags);
>
> @@ -1328,7 +1330,7 @@ static bool increase_address_space(struct protection_domain *domain,
>                 return false;
>
>         *pte             = PM_LEVEL_PDE(domain->mode,
> -                                       virt_to_phys(domain->pt_root));
> +                                       iommu_virt_to_phys(domain->pt_root));
>         domain->pt_root  = pte;
>         domain->mode    += 1;
>         domain->updated  = true;
> @@ -1365,7 +1367,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
>                         if (!page)
>                                 return NULL;
>
> -                       __npte = PM_LEVEL_PDE(level, virt_to_phys(page));
> +                       __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
>
>                         /* pte could have been changed somewhere. */
>                         if (cmpxchg64(pte, __pte, __npte) != __pte) {
> @@ -1481,10 +1483,10 @@ static int iommu_map_page(struct protection_domain *dom,
>                         return -EBUSY;
>
>         if (count > 1) {
> -               __pte = PAGE_SIZE_PTE(phys_addr, page_size);
> +               __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size);
>                 __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
>         } else
> -               __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
> +               __pte = __sme_set(phys_addr) | IOMMU_PTE_P | IOMMU_PTE_FC;
>
>         if (prot & IOMMU_PROT_IR)
>                 __pte |= IOMMU_PTE_IR;
> @@ -1700,7 +1702,7 @@ static void free_gcr3_tbl_level1(u64 *tbl)
>                 if (!(tbl[i] & GCR3_VALID))
>                         continue;
>
> -               ptr = __va(tbl[i] & PAGE_MASK);
> +               ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
>
>                 free_page((unsigned long)ptr);
>         }
> @@ -1715,7 +1717,7 @@ static void free_gcr3_tbl_level2(u64 *tbl)
>                 if (!(tbl[i] & GCR3_VALID))
>                         continue;
>
> -               ptr = __va(tbl[i] & PAGE_MASK);
> +               ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
>
>                 free_gcr3_tbl_level1(ptr);
>         }
> @@ -1807,7 +1809,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
>         u64 flags = 0;
>
>         if (domain->mode != PAGE_MODE_NONE)
> -               pte_root = virt_to_phys(domain->pt_root);
> +               pte_root = iommu_virt_to_phys(domain->pt_root);
>
>         pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
>                     << DEV_ENTRY_MODE_SHIFT;
> @@ -1819,7 +1821,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
>                 flags |= DTE_FLAG_IOTLB;
>
>         if (domain->flags & PD_IOMMUV2_MASK) {
> -               u64 gcr3 = __pa(domain->gcr3_tbl);
> +               u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
>                 u64 glx  = domain->glx;
>                 u64 tmp;
>
> @@ -3470,10 +3472,10 @@ static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
>                         if (root == NULL)
>                                 return NULL;
>
> -                       *pte = __pa(root) | GCR3_VALID;
> +                       *pte = iommu_virt_to_phys(root) | GCR3_VALID;
>                 }
>
> -               root = __va(*pte & PAGE_MASK);
> +               root = iommu_phys_to_virt(*pte & PAGE_MASK);
>
>                 level -= 1;
>         }
> @@ -3652,7 +3654,7 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
>
>         dte     = amd_iommu_dev_table[devid].data[2];
>         dte     &= ~DTE_IRQ_PHYS_ADDR_MASK;
> -       dte     |= virt_to_phys(table->table);
> +       dte     |= iommu_virt_to_phys(table->table);
>         dte     |= DTE_IRQ_REMAP_INTCTL;
>         dte     |= DTE_IRQ_TABLE_LEN;
>         dte     |= DTE_IRQ_REMAP_ENABLE;
> diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
> index 5a11328..2870a6b 100644
> --- a/drivers/iommu/amd_iommu_init.c
> +++ b/drivers/iommu/amd_iommu_init.c
> @@ -29,6 +29,7 @@
>  #include <linux/export.h>
>  #include <linux/iommu.h>
>  #include <linux/kmemleak.h>
> +#include <linux/mem_encrypt.h>
>  #include <asm/pci-direct.h>
>  #include <asm/iommu.h>
>  #include <asm/gart.h>
> @@ -346,7 +347,7 @@ static void iommu_set_device_table(struct amd_iommu *iommu)
>
>         BUG_ON(iommu->mmio_base == NULL);
>
> -       entry = virt_to_phys(amd_iommu_dev_table);
> +       entry = iommu_virt_to_phys(amd_iommu_dev_table);
>         entry |= (dev_table_size >> 12) - 1;
>         memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
>                         &entry, sizeof(entry));
> @@ -602,7 +603,7 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
>
>         BUG_ON(iommu->cmd_buf == NULL);
>
> -       entry = (u64)virt_to_phys(iommu->cmd_buf);
> +       entry = iommu_virt_to_phys(iommu->cmd_buf);
>         entry |= MMIO_CMD_SIZE_512;
>
>         memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
> @@ -631,7 +632,7 @@ static void iommu_enable_event_buffer(struct amd_iommu *iommu)
>
>         BUG_ON(iommu->evt_buf == NULL);
>
> -       entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
> +       entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
>
>         memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
>                     &entry, sizeof(entry));
> @@ -664,7 +665,7 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
>         if (iommu->ppr_log == NULL)
>                 return;
>
> -       entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
> +       entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
>
>         memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
>                     &entry, sizeof(entry));
> @@ -744,10 +745,10 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
>         if (!iommu->ga_log_tail)
>                 goto err_out;
>
> -       entry = (u64)virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
> +       entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
>         memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
>                     &entry, sizeof(entry));
> -       entry = ((u64)virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
> +       entry = (iommu_virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
>         memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
>                     &entry, sizeof(entry));
>         writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
> @@ -2552,6 +2553,11 @@ int __init amd_iommu_detect(void)
>         if (amd_iommu_disabled)
>                 return -ENODEV;
>
> +       if (!sme_iommu_supported()) {
> +               pr_notice("AMD-Vi: IOMMU not supported when SME is active\n");
> +               return -ENODEV;
> +       }
> +
>         ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
>         if (ret)
>                 return ret;
> diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
> index 466260f..3f12fb2 100644
> --- a/drivers/iommu/amd_iommu_proto.h
> +++ b/drivers/iommu/amd_iommu_proto.h
> @@ -87,4 +87,14 @@ static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
>         return !!(iommu->features & f);
>  }
>
> +static inline u64 iommu_virt_to_phys(void *vaddr)
> +{
> +       return (u64)__sme_set(virt_to_phys(vaddr));
> +}
> +
> +static inline void *iommu_phys_to_virt(unsigned long paddr)
> +{
> +       return phys_to_virt(__sme_clr(paddr));
> +}
> +
>  #endif /* _ASM_X86_AMD_IOMMU_PROTO_H  */
> diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
> index 4de8f41..3ce587d 100644
> --- a/drivers/iommu/amd_iommu_types.h
> +++ b/drivers/iommu/amd_iommu_types.h
> @@ -343,7 +343,7 @@
>
>  #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
>  #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
> -#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
> +#define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK))
>  #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
>
>  #define IOMMU_PROT_MASK 0x03
> diff --git a/include/asm-generic/mem_encrypt.h b/include/asm-generic/mem_encrypt.h
> index fb02ff0..bbc49e1 100644
> --- a/include/asm-generic/mem_encrypt.h
> +++ b/include/asm-generic/mem_encrypt.h
> @@ -27,6 +27,11 @@ static inline u64 sme_dma_mask(void)
>         return 0ULL;
>  }
>
> +static inline bool sme_iommu_supported(void)
> +{
> +       return true;
> +}
> +
>  /*
>   * The __sme_set() and __sme_clr() macros are useful for adding or removing
>   * the encryption mask from a value (e.g. when dealing with pagetable
>
> _______________________________________________
> iommu mailing list
> iommu@lists.linux-foundation.org
> https://lists.linuxfoundation.org/mailman/listinfo/iommu


Hi Tom,

This sounds like a cool feature. I'm trying to test it on my Ryzen
system, but c->microcode & 0xf000 is evaluating as 0, so IOMMU is not
being enabled on my system. I'm using the latest microcode for AGESA
1.0.0.6, 0x08001126. Is this work reliant on a future microcode
update, or is there some other issue?

Thanks,
Sarnex

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  reply	other threads:[~2017-06-08  2:38 UTC|newest]

Thread overview: 94+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-06-07 19:13 [PATCH v6 00/34] x86: Secure Memory Encryption (AMD) Tom Lendacky
2017-06-07 19:13 ` [PATCH v6 04/34] x86/CPU/AMD: Add the Secure Memory Encryption CPU feature Tom Lendacky
2017-06-09 10:55   ` Borislav Petkov
2017-06-07 19:14 ` [PATCH v6 07/34] x86/mm: Don't use phys_to_virt in ioremap() if SME is active Tom Lendacky
2017-06-07 19:14 ` [PATCH v6 08/34] x86/mm: Add support to enable SME in early boot processing Tom Lendacky
2017-06-07 19:14 ` [PATCH v6 09/34] x86/mm: Simplify p[gum]d_page() macros Tom Lendacky
2017-06-10 10:44   ` Borislav Petkov
2017-06-07 19:14 ` [PATCH v6 10/34] x86, x86/mm, x86/xen, olpc: Use __va() against just the physical address in cr3 Tom Lendacky
2017-06-07 22:06   ` Boris Ostrovsky
     [not found]     ` <b15e8924-4069-b5fa-adb2-86c164b1dd36-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2017-06-08 13:42       ` Tom Lendacky
2017-06-08 20:51         ` Boris Ostrovsky
2017-06-08 21:02           ` Tom Lendacky
2017-06-08 21:17             ` Boris Ostrovsky
2017-06-08 22:01               ` [Xen-devel] " Andrew Cooper
     [not found]                 ` <d37917b1-8e49-e8a8-b9ac-59491331640f-Sxgqhf6Nn4DQT0dZR+AlfA@public.gmane.org>
2017-06-09 18:36                   ` Tom Lendacky
2017-06-09 18:43                     ` Boris Ostrovsky
2017-06-09 18:54                       ` Andrew Cooper
2017-06-09 18:59                       ` Tom Lendacky
2017-06-09 19:42                         ` Boris Ostrovsky
2017-06-08  6:05   ` Andy Lutomirski
2017-06-08 22:38     ` Tom Lendacky
     [not found]       ` <85355c4c-2cc1-daac-d8fe-ac6965b34606-5C7GfCeVMHo@public.gmane.org>
2017-06-09 18:46         ` Andy Lutomirski
     [not found]           ` <CALCETrVyHbgJxk6hTRuM0CCpWTvFSX0MxuUQjKOf=7hYCt-yEg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-06-09 21:20             ` Tom Lendacky
     [not found]   ` <20170607191453.28645.92256.stgit-qCXWGYdRb2BnqfbPTmsdiZQ+2ll4COg0XqFh9Ls21Oc@public.gmane.org>
2017-06-08  7:39     ` kbuild test robot
2017-06-07 19:15 ` [PATCH v6 11/34] x86/mm: Provide general kernel support for memory encryption Tom Lendacky
2017-06-07 19:15 ` [PATCH v6 12/34] x86/mm: Extend early_memremap() support with additional attrs Tom Lendacky
2017-06-07 19:15 ` [PATCH v6 13/34] x86/mm: Add support for early encrypt/decrypt of memory Tom Lendacky
2017-06-10 15:56   ` Borislav Petkov
2017-06-07 19:16 ` [PATCH v6 16/34] efi: Add an EFI table address match function Tom Lendacky
2017-06-07 19:16 ` [PATCH v6 17/34] efi: Update efi_mem_type() to return an error rather than 0 Tom Lendacky
2017-06-07 19:16 ` [PATCH v6 19/34] x86/mm: Add support to access boot related data in the clear Tom Lendacky
     [not found]   ` <20170607191636.28645.98914.stgit-qCXWGYdRb2BnqfbPTmsdiZQ+2ll4COg0XqFh9Ls21Oc@public.gmane.org>
2017-06-08  4:24     ` kbuild test robot
2017-06-07 19:17 ` [PATCH v6 22/34] x86/mm: Add support for changing the memory encryption attribute Tom Lendacky
2017-06-14 16:25   ` Borislav Petkov
2017-06-07 19:17 ` [PATCH v6 24/34] x86, swiotlb: Add memory encryption support Tom Lendacky
2017-06-14 16:45   ` Borislav Petkov
     [not found]     ` <20170614164553.jwcfgugpizz5pc2e-fF5Pk5pvG8Y@public.gmane.org>
2017-06-14 19:38       ` Tom Lendacky
2017-06-07 19:17 ` [PATCH v6 26/34] iommu/amd: Allow the AMD IOMMU to work with memory encryption Tom Lendacky
2017-06-08  2:38   ` Nick Sarnie [this message]
2017-06-08 14:26     ` Tom Lendacky
2017-06-14 17:42   ` Borislav Petkov
2017-06-14 20:40     ` Tom Lendacky
2017-06-15  9:41       ` Borislav Petkov
     [not found]         ` <20170615094111.wga334kg2bhxqib3-fF5Pk5pvG8Y@public.gmane.org>
2017-06-15 14:59           ` Tom Lendacky
2017-06-15 15:33             ` Borislav Petkov
2017-06-15 16:33               ` Tom Lendacky
2017-06-19 17:18                 ` Borislav Petkov
2017-06-15 20:13               ` Konrad Rzeszutek Wilk
2017-06-21 15:37         ` Joerg Roedel
2017-06-21 16:59           ` Borislav Petkov
     [not found]             ` <20170621165921.tv2jfhf5dz7hsjsy-fF5Pk5pvG8Y@public.gmane.org>
2017-06-21 18:40               ` Tom Lendacky
2017-06-07 19:18 ` [PATCH v6 28/34] x86, drm, fbdev: Do not specify encrypted memory for video mappings Tom Lendacky
2017-06-07 19:18 ` [PATCH v6 29/34] kvm: x86: svm: Support Secure Memory Encryption within KVM Tom Lendacky
2017-06-15  9:55   ` Borislav Petkov
2017-06-07 19:18 ` [PATCH v6 30/34] x86/mm, kexec: Allow kexec to be used with SME Tom Lendacky
2017-06-15 10:03   ` Borislav Petkov
     [not found]     ` <20170615100345.76pn5ruf6cm3ktpe-fF5Pk5pvG8Y@public.gmane.org>
2017-06-15 17:43       ` Tom Lendacky
     [not found] ` <20170607191309.28645.15241.stgit-qCXWGYdRb2BnqfbPTmsdiZQ+2ll4COg0XqFh9Ls21Oc@public.gmane.org>
2017-06-07 19:13   ` [PATCH v6 01/34] x86: Document AMD Secure Memory Encryption (SME) Tom Lendacky
2017-06-07 19:13   ` [PATCH v6 02/34] x86/mm/pat: Set write-protect cache mode for full PAT support Tom Lendacky
2017-06-07 19:13   ` [PATCH v6 03/34] x86, mpparse, x86/acpi, x86/PCI, x86/dmi, SFI: Use memremap for RAM mappings Tom Lendacky
2017-06-07 19:14   ` [PATCH v6 05/34] x86/CPU/AMD: Handle SME reduction in physical address size Tom Lendacky
     [not found]     ` <20170607191404.28645.41148.stgit-qCXWGYdRb2BnqfbPTmsdiZQ+2ll4COg0XqFh9Ls21Oc@public.gmane.org>
2017-06-09 16:30       ` Borislav Petkov
2017-06-07 19:14   ` [PATCH v6 06/34] x86/mm: Add Secure Memory Encryption (SME) support Tom Lendacky
2017-06-09 16:43     ` Borislav Petkov
2017-06-07 19:15   ` [PATCH v6 14/34] x86/mm: Insure that boot memory areas are mapped properly Tom Lendacky
2017-06-10 16:01     ` Borislav Petkov
     [not found]       ` <20170610160119.bnx5ir5dj3i27igx-fF5Pk5pvG8Y@public.gmane.org>
2017-06-12 13:31         ` Tom Lendacky
2017-06-07 19:15   ` [PATCH v6 15/34] x86/boot/e820: Add support to determine the E820 type of an address Tom Lendacky
2017-06-07 19:16   ` [PATCH v6 18/34] x86/efi: Update EFI pagetable creation to work with SME Tom Lendacky
     [not found]     ` <20170607191627.28645.4398.stgit-qCXWGYdRb2BnqfbPTmsdiZQ+2ll4COg0XqFh9Ls21Oc@public.gmane.org>
2017-06-11 19:44       ` Borislav Petkov
2017-06-07 19:16   ` [PATCH v6 20/34] x86, mpparse: Use memremap to map the mpf and mpc data Tom Lendacky
2017-06-14 16:07     ` Borislav Petkov
     [not found]       ` <20170614160754.c4ywbf5ktqwgc4ij-fF5Pk5pvG8Y@public.gmane.org>
2017-06-14 17:06         ` Tom Lendacky
     [not found]           ` <86f31710-76d0-5fee-f4a7-8cdb4b9b9a8e-5C7GfCeVMHo@public.gmane.org>
2017-06-14 17:27             ` Borislav Petkov
2017-06-07 19:16   ` [PATCH v6 21/34] x86/mm: Add support to access persistent memory in the clear Tom Lendacky
2017-06-07 19:17   ` [PATCH v6 23/34] x86, realmode: Decrypt trampoline area if memory encryption is active Tom Lendacky
2017-06-14 16:24     ` Borislav Petkov
     [not found]       ` <20170614162416.ksa54esy5ql7sjgz-fF5Pk5pvG8Y@public.gmane.org>
2017-06-14 16:38         ` Tom Lendacky
2017-06-07 19:17   ` [PATCH v6 25/34] swiotlb: Add warnings for use of bounce buffers with SME Tom Lendacky
2017-06-08  5:53     ` kbuild test robot
     [not found]       ` <201706081348.u0hG73ce%fengguang.wu-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
2017-06-08 21:09         ` Tom Lendacky
2017-06-08  7:58     ` Christoph Hellwig
2017-06-08 23:04       ` Tom Lendacky
2017-06-14 16:50     ` Borislav Petkov
2017-06-14 19:49       ` Tom Lendacky
2017-06-15  9:08         ` Borislav Petkov
2017-06-15 13:23           ` Tom Lendacky
2017-06-07 19:17   ` [PATCH v6 27/34] x86, realmode: Check for memory encryption on the APs Tom Lendacky
2017-06-07 19:18   ` [PATCH v6 31/34] x86/mm: Use proper encryption attributes with /dev/mem Tom Lendacky
2017-06-07 19:18   ` [PATCH v6 32/34] x86/mm: Add support to encrypt the kernel in-place Tom Lendacky
2017-06-07 19:19 ` [PATCH v6 33/34] x86/boot: Add early cmdline parsing for options with arguments Tom Lendacky
2017-06-07 19:19 ` [PATCH v6 34/34] x86/mm: Add support to make use of Secure Memory Encryption Tom Lendacky
2017-06-08  2:40 ` [PATCH v6 00/34] x86: Secure Memory Encryption (AMD) Nick Sarnie
     [not found]   ` <CAOcCaLYWoOu0c-Fkee-=wegNqkzUp9pLFLmaFrXuhiXRnUZ3Xw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-06-08 16:14     ` Tom Lendacky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CAOcCaLZ5QNx+CdnLn4eHtYOJOezJAsmq2whf8mggOdeMQDWOhw@mail.gmail.com \
    --to=commendsarnex@gmail.com \
    --cc=arnd@arndb.de \
    --cc=aryabinin@virtuozzo.com \
    --cc=bp@alien8.de \
    --cc=brijesh.singh@amd.com \
    --cc=corbet@lwn.net \
    --cc=glider@google.com \
    --cc=hpa@zytor.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=kasan-dev@googlegroups.com \
    --cc=kexec@lists.infradead.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-efi@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=matt@codeblueprint.co.uk \
    --cc=mingo@redhat.com \
    --cc=mst@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=riel@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=toshi.kani@hpe.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).