From: Alexey Kardashevskiy <aik@amd.com>
To: <kvm@vger.kernel.org>
Cc: <iommu@lists.linux.dev>, <linux-coco@lists.linux.dev>,
<linux-pci@vger.kernel.org>,
Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>,
Alex Williamson <alex.williamson@redhat.com>,
Dan Williams <dan.j.williams@intel.com>,
<pratikrajesh.sampat@amd.com>, <michael.day@amd.com>,
<david.kaplan@amd.com>, <dhaval.giani@amd.com>,
Santosh Shukla <santosh.shukla@amd.com>,
Tom Lendacky <thomas.lendacky@amd.com>,
Michael Roth <michael.roth@amd.com>,
"Alexander Graf" <agraf@suse.de>,
Nikunj A Dadhania <nikunj@amd.com>,
Vasant Hegde <vasant.hegde@amd.com>,
Lukas Wunner <lukas@wunner.de>,
Alexey Kardashevskiy <aik@amd.com>
Subject: [RFC PATCH 07/21] pci/tdisp: Introduce tsm module
Date: Fri, 23 Aug 2024 23:21:21 +1000 [thread overview]
Message-ID: <20240823132137.336874-8-aik@amd.com> (raw)
In-Reply-To: <20240823132137.336874-1-aik@amd.com>
The module responsibilities are:
1. detect TEE support in a device and create nodes in the device's sysfs
entry;
2. allow binding a PCI device to a VM for passing it through in a trusted
manner;
3. store measurements/certificates/reports and provide access to those for
the userspace via sysfs.
This relies on the platform to register a set of callbacks,
for both host and guest.
And tdi_enabled in the device struct.
Signed-off-by: Alexey Kardashevskiy <aik@amd.com>
---
drivers/virt/coco/Makefile | 1 +
include/linux/device.h | 5 +
include/linux/tsm.h | 263 ++++
drivers/virt/coco/tsm.c | 1336 ++++++++++++++++++++
Documentation/virt/coco/tsm.rst | 62 +
drivers/virt/coco/Kconfig | 11 +
6 files changed, 1678 insertions(+)
diff --git a/drivers/virt/coco/Makefile b/drivers/virt/coco/Makefile
index 75defec514f8..5d1aefb62714 100644
--- a/drivers/virt/coco/Makefile
+++ b/drivers/virt/coco/Makefile
@@ -3,6 +3,7 @@
# Confidential computing related collateral
#
obj-$(CONFIG_TSM_REPORTS) += tsm-report.o
+obj-$(CONFIG_TSM) += tsm.o
obj-$(CONFIG_EFI_SECRET) += efi_secret/
obj-$(CONFIG_SEV_GUEST) += sev-guest/
obj-$(CONFIG_INTEL_TDX_GUEST) += tdx-guest/
diff --git a/include/linux/device.h b/include/linux/device.h
index 34eb20f5966f..bb58ed1fb8da 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -45,6 +45,7 @@ struct fwnode_handle;
struct iommu_group;
struct dev_pin_info;
struct dev_iommu;
+struct tsm_tdi;
struct msi_device_data;
/**
@@ -801,6 +802,7 @@ struct device {
void (*release)(struct device *dev);
struct iommu_group *iommu_group;
struct dev_iommu *iommu;
+ struct tsm_tdi *tdi;
struct device_physical_location *physical_location;
@@ -822,6 +824,9 @@ struct device {
#ifdef CONFIG_DMA_NEED_SYNC
bool dma_skip_sync:1;
#endif
+#if defined(CONFIG_TSM) || defined(CONFIG_TSM_MODULE)
+ bool tdi_enabled:1;
+#endif
};
/**
diff --git a/include/linux/tsm.h b/include/linux/tsm.h
new file mode 100644
index 000000000000..d48eceaf5bc0
--- /dev/null
+++ b/include/linux/tsm.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef LINUX_TSM_H
+#define LINUX_TSM_H
+
+#include <linux/cdev.h>
+
+/* SPDM control structure for DOE */
+struct tsm_spdm {
+ unsigned long req_len;
+ void *req;
+ unsigned long rsp_len;
+ void *rsp;
+
+ struct pci_doe_mb *doe_mb;
+ struct pci_doe_mb *doe_mb_secured;
+};
+
+/* Data object for measurements/certificates/attestationreport */
+struct tsm_blob {
+ void *data;
+ size_t len;
+ struct kref kref;
+ void (*release)(struct tsm_blob *b);
+};
+
+struct tsm_blob *tsm_blob_new(void *data, size_t len, void (*release)(struct tsm_blob *b));
+struct tsm_blob *tsm_blob_get(struct tsm_blob *b);
+void tsm_blob_put(struct tsm_blob *b);
+
+/**
+ * struct tdisp_interface_id - TDISP INTERFACE_ID Definition
+ *
+ * @function_id: Identifies the function of the device hosting the TDI
+ * 15:0: @rid: Requester ID
+ * 23:16: @rseg: Requester Segment (Reserved if Requester Segment Valid is Clear)
+ * 24: @rseg_valid: Requester Segment Valid
+ * 31:25 – Reserved
+ * 8B - Reserved
+ */
+struct tdisp_interface_id {
+ union {
+ struct {
+ u32 function_id;
+ u8 reserved[8];
+ };
+ struct {
+ u16 rid;
+ u8 rseg;
+ u8 rseg_valid:1;
+ };
+ };
+} __packed;
+
+/*
+ * Measurement block as defined in SPDM DSP0274.
+ */
+struct spdm_measurement_block_header {
+ u8 index;
+ u8 spec; /* MeasurementSpecification */
+ u16 size;
+} __packed;
+
+struct dmtf_measurement_block_header {
+ u8 type; /* DMTFSpecMeasurementValueType */
+ u16 size; /* DMTFSpecMeasurementValueSize */
+} __packed;
+
+struct dmtf_measurement_block_device_mode {
+ u32 opmode_cap; /* OperationalModeCapabilties */
+ u32 opmode_sta; /* OperationalModeState */
+ u32 devmode_cap; /* DeviceModeCapabilties */
+ u32 devmode_sta; /* DeviceModeState */
+} __packed;
+
+struct spdm_certchain_block_header {
+ u16 length;
+ u16 reserved;
+} __packed;
+
+/*
+ * TDI Report Structure as defined in TDISP.
+ */
+struct tdi_report_header {
+ union {
+ u16 interface_info;
+ struct {
+ u16 no_fw_update:1; /* fw updates not permitted in CONFIG_LOCKED or RUN */
+ u16 dma_no_pasid:1; /* TDI generates DMA requests without PASID */
+ u16 dma_pasid:1; /* TDI generates DMA requests with PASID */
+ u16 ats:1; /* ATS supported and enabled for the TDI */
+ u16 prs:1; /* PRS supported and enabled for the TDI */
+ u16 reserved1:11;
+ };
+ };
+ u16 reserved2;
+ u16 msi_x_message_control;
+ u16 lnr_control;
+ u32 tph_control;
+ u32 mmio_range_count;
+} __packed;
+
+/*
+ * Each MMIO Range of the TDI is reported with the MMIO reporting offset added.
+ * Base and size in units of 4K pages
+ */
+struct tdi_report_mmio_range {
+ u64 first_page; /* First 4K page with offset added */
+ u32 num; /* Number of 4K pages in this range */
+ union {
+ u32 range_attributes;
+ struct {
+ u32 msix_table:1;
+ u32 msix_pba:1;
+ u32 is_non_tee_mem:1;
+ u32 is_mem_attr_updatable:1;
+ u32 reserved:12;
+ u32 range_id:16;
+ };
+ };
+} __packed;
+
+struct tdi_report_footer {
+ u32 device_specific_info_len;
+ u8 device_specific_info[];
+} __packed;
+
+#define TDI_REPORT_HDR(rep) ((struct tdi_report_header *) ((rep)->data))
+#define TDI_REPORT_MR_NUM(rep) (TDI_REPORT_HDR(rep)->mmio_range_count)
+#define TDI_REPORT_MR_OFF(rep) ((struct tdi_report_mmio_range *) (TDI_REPORT_HDR(rep) + 1))
+#define TDI_REPORT_MR(rep, rangeid) TDI_REPORT_MR_OFF(rep)[rangeid]
+#define TDI_REPORT_FTR(rep) ((struct tdi_report_footer *) &TDI_REPORT_MR((rep), \
+ TDI_REPORT_MR_NUM(rep)))
+
+/* Physical device descriptor responsible for IDE/TDISP setup */
+struct tsm_dev {
+ struct kref kref;
+ const struct attribute_group *ag;
+ struct pci_dev *pdev; /* Physical PCI function #0 */
+ struct tsm_spdm spdm;
+ struct mutex spdm_mutex;
+
+ u8 tc_mask;
+ u8 cert_slot;
+ u8 connected;
+ struct {
+ u8 enabled:1;
+ u8 enable:1;
+ u8 def:1;
+ u8 dev_ide_cfg:1;
+ u8 dev_tee_limited:1;
+ u8 rootport_ide_cfg:1;
+ u8 rootport_tee_limited:1;
+ u8 id;
+ } selective_ide[256];
+ bool ide_pre;
+
+ struct tsm_blob *meas;
+ struct tsm_blob *certs;
+
+ void *data; /* Platform specific data */
+};
+
+/* PCI function for passing through, can be the same as tsm_dev::pdev */
+struct tsm_tdi {
+ const struct attribute_group *ag;
+ struct pci_dev *pdev;
+ struct tsm_dev *tdev;
+
+ u8 rseg;
+ u8 rseg_valid;
+ bool validated;
+
+ struct tsm_blob *report;
+
+ void *data; /* Platform specific data */
+
+ u64 vmid;
+ u32 asid;
+ u16 guest_rid; /* BDFn of PCI Fn in the VM */
+};
+
+struct tsm_dev_status {
+ u8 ctx_state;
+ u8 tc_mask;
+ u8 certs_slot;
+ u16 device_id;
+ u16 segment_id;
+ u8 no_fw_update;
+ u16 ide_stream_id[8];
+};
+
+enum tsm_spdm_algos {
+ TSM_TDI_SPDM_ALGOS_DHE_SECP256R1,
+ TSM_TDI_SPDM_ALGOS_DHE_SECP384R1,
+ TSM_TDI_SPDM_ALGOS_AEAD_AES_128_GCM,
+ TSM_TDI_SPDM_ALGOS_AEAD_AES_256_GCM,
+ TSM_TDI_SPDM_ALGOS_ASYM_TPM_ALG_RSASSA_3072,
+ TSM_TDI_SPDM_ALGOS_ASYM_TPM_ALG_ECDSA_ECC_NIST_P256,
+ TSM_TDI_SPDM_ALGOS_ASYM_TPM_ALG_ECDSA_ECC_NIST_P384,
+ TSM_TDI_SPDM_ALGOS_HASH_TPM_ALG_SHA_256,
+ TSM_TDI_SPDM_ALGOS_HASH_TPM_ALG_SHA_384,
+ TSM_TDI_SPDM_ALGOS_KEY_SCHED_SPDM_KEY_SCHEDULE,
+};
+
+enum tsm_tdisp_state {
+ TDISP_STATE_UNAVAIL,
+ TDISP_STATE_CONFIG_UNLOCKED,
+ TDISP_STATE_CONFIG_LOCKED,
+ TDISP_STATE_RUN,
+ TDISP_STATE_ERROR,
+};
+
+struct tsm_tdi_status {
+ bool valid;
+ u8 meas_digest_fresh:1;
+ u8 meas_digest_valid:1;
+ u8 all_request_redirect:1;
+ u8 bind_p2p:1;
+ u8 lock_msix:1;
+ u8 no_fw_update:1;
+ u16 cache_line_size;
+ u64 spdm_algos; /* Bitmask of tsm_spdm_algos */
+ u8 certs_digest[48];
+ u8 meas_digest[48];
+ u8 interface_report_digest[48];
+
+ /* HV only */
+ struct tdisp_interface_id id;
+ u8 guest_report_id[16];
+ enum tsm_tdisp_state state;
+};
+
+struct tsm_ops {
+ /* HV hooks */
+ int (*dev_connect)(struct tsm_dev *tdev, void *private_data);
+ int (*dev_reclaim)(struct tsm_dev *tdev, void *private_data);
+ int (*dev_status)(struct tsm_dev *tdev, void *private_data, struct tsm_dev_status *s);
+ int (*ide_refresh)(struct tsm_dev *tdev, void *private_data);
+ int (*tdi_bind)(struct tsm_tdi *tdi, u32 bdfn, u64 vmid, u32 asid, void *private_data);
+ int (*tdi_reclaim)(struct tsm_tdi *tdi, void *private_data);
+
+ int (*guest_request)(struct tsm_tdi *tdi, u32 guest_rid, u64 vmid, void *req_data,
+ enum tsm_tdisp_state *state, void *private_data);
+
+ /* VM hooks */
+ int (*tdi_validate)(struct tsm_tdi *tdi, bool invalidate, void *private_data);
+
+ /* HV and VM hooks */
+ int (*tdi_status)(struct tsm_tdi *tdi, void *private_data, struct tsm_tdi_status *ts);
+};
+
+void tsm_set_ops(struct tsm_ops *ops, void *private_data);
+struct tsm_tdi *tsm_tdi_get(struct device *dev);
+int tsm_tdi_bind(struct tsm_tdi *tdi, u32 guest_rid, u64 vmid, u32 asid);
+void tsm_tdi_unbind(struct tsm_tdi *tdi);
+int tsm_guest_request(struct tsm_tdi *tdi, enum tsm_tdisp_state *state, void *req_data);
+struct tsm_tdi *tsm_tdi_find(u32 guest_rid, u64 vmid);
+
+int pci_dev_tdi_validate(struct pci_dev *pdev);
+ssize_t tsm_report_gen(struct tsm_blob *report, char *b, size_t len);
+
+#endif /* LINUX_TSM_H */
diff --git a/drivers/virt/coco/tsm.c b/drivers/virt/coco/tsm.c
new file mode 100644
index 000000000000..e90455a0267f
--- /dev/null
+++ b/drivers/virt/coco/tsm.c
@@ -0,0 +1,1336 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci-doe.h>
+#include <linux/pci-ide.h>
+#include <linux/file.h>
+#include <linux/fdtable.h>
+#include <linux/tsm.h>
+#include <linux/kvm_host.h>
+
+#define DRIVER_VERSION "0.1"
+#define DRIVER_AUTHOR "aik@amd.com"
+#define DRIVER_DESC "TSM TDISP driver"
+
+static struct {
+ struct tsm_ops *ops;
+ void *private_data;
+
+ uint tc_mask;
+ uint cert_slot;
+ bool physfn;
+} tsm;
+
+module_param_named(tc_mask, tsm.tc_mask, uint, 0644);
+MODULE_PARM_DESC(tc_mask, "Mask of traffic classes enabled in the device");
+
+module_param_named(cert_slot, tsm.cert_slot, uint, 0644);
+MODULE_PARM_DESC(cert_slot, "Slot number of the certificate requested for constructing the SPDM session");
+
+module_param_named(physfn, tsm.physfn, bool, 0644);
+MODULE_PARM_DESC(physfn, "Allow TDI on SR IOV of a physical function");
+
+struct tsm_blob *tsm_blob_new(void *data, size_t len, void (*release)(struct tsm_blob *b))
+{
+ struct tsm_blob *b;
+
+ if (!len || !data)
+ return NULL;
+
+ b = kzalloc(sizeof(*b) + len, GFP_KERNEL);
+ if (!b)
+ return NULL;
+
+ b->data = (void *)b + sizeof(*b);
+ b->len = len;
+ b->release = release;
+ memcpy(b->data, data, len);
+ kref_init(&b->kref);
+
+ return b;
+}
+EXPORT_SYMBOL_GPL(tsm_blob_new);
+
+static void tsm_blob_release(struct kref *kref)
+{
+ struct tsm_blob *b = container_of(kref, struct tsm_blob, kref);
+
+ b->release(b);
+ kfree(b);
+}
+
+struct tsm_blob *tsm_blob_get(struct tsm_blob *b)
+{
+ if (!b)
+ return NULL;
+
+ if (!kref_get_unless_zero(&b->kref))
+ return NULL;
+
+ return b;
+}
+EXPORT_SYMBOL_GPL(tsm_blob_get);
+
+void tsm_blob_put(struct tsm_blob *b)
+{
+ if (!b)
+ return;
+
+ kref_put(&b->kref, tsm_blob_release);
+}
+EXPORT_SYMBOL_GPL(tsm_blob_put);
+
+static struct tsm_dev *tsm_dev_get(struct device *dev)
+{
+ struct tsm_tdi *tdi = dev->tdi;
+
+ if (!tdi || !tdi->tdev || !kref_get_unless_zero(&tdi->tdev->kref))
+ return NULL;
+
+ return tdi->tdev;
+}
+
+static void tsm_dev_free(struct kref *kref);
+static void tsm_dev_put(struct tsm_dev *tdev)
+{
+ kref_put(&tdev->kref, tsm_dev_free);
+}
+
+struct tsm_tdi *tsm_tdi_get(struct device *dev)
+{
+ struct tsm_tdi *tdi = dev->tdi;
+
+ return tdi;
+}
+EXPORT_SYMBOL_GPL(tsm_tdi_get);
+
+static int spdm_forward(struct tsm_spdm *spdm, u8 type)
+{
+ struct pci_doe_mb *doe_mb;
+ int rc;
+
+ if (type == PCI_DOE_PROTOCOL_SECURED_CMA_SPDM)
+ doe_mb = spdm->doe_mb_secured;
+ else if (type == PCI_DOE_PROTOCOL_CMA_SPDM)
+ doe_mb = spdm->doe_mb;
+ else
+ return -EINVAL;
+
+ if (!doe_mb)
+ return -EFAULT;
+
+ rc = pci_doe(doe_mb, PCI_VENDOR_ID_PCI_SIG, type,
+ spdm->req, spdm->req_len, spdm->rsp, spdm->rsp_len);
+ if (rc >= 0)
+ spdm->rsp_len = rc;
+
+ return rc;
+}
+
+/*
+ * Enables IDE between the RC and the device.
+ * TEE Limited, IDE Cfg space and other bits are hardcoded
+ * as this is a sketch.
+ */
+static int tsm_set_sel_ide(struct tsm_dev *tdev)
+{
+ struct pci_dev *rootport;
+ bool printed = false;
+ unsigned int i;
+ int ret = 0;
+
+ rootport = tdev->pdev->bus->self;
+ for (i = 0; i < ARRAY_SIZE(tdev->selective_ide); ++i) {
+ if (!tdev->selective_ide[i].enable)
+ continue;
+
+ if (!printed) {
+ pci_info(rootport, "Configuring IDE with %s\n",
+ pci_name(tdev->pdev));
+ printed = true;
+ }
+ WARN_ON_ONCE(tdev->selective_ide[i].enabled);
+
+ ret = pci_ide_set_sel_rid_assoc(tdev->pdev, i, true, 0, 0, 0xFFFF);
+ if (ret)
+ pci_warn(tdev->pdev,
+ "Failed configuring SelectiveIDE#%d rid1 with %d\n",
+ i, ret);
+ ret = pci_ide_set_sel_addr_assoc(tdev->pdev, i, 0/* RID# */, true,
+ 0, 0xFFFFFFFFFFF00000ULL);
+ if (ret)
+ pci_warn(tdev->pdev,
+ "Failed configuring SelectiveIDE#%d RID#0 with %d\n",
+ i, ret);
+
+ ret = pci_ide_set_sel(tdev->pdev, i,
+ tdev->selective_ide[i].id,
+ tdev->selective_ide[i].enable,
+ tdev->selective_ide[i].def,
+ tdev->selective_ide[i].dev_tee_limited,
+ tdev->selective_ide[i].dev_ide_cfg);
+ if (ret) {
+ pci_warn(tdev->pdev,
+ "Failed configuring SelectiveIDE#%d with %d\n",
+ i, ret);
+ break;
+ }
+
+ ret = pci_ide_set_sel_rid_assoc(rootport, i, true, 0, 0, 0xFFFF);
+ if (ret)
+ pci_warn(rootport,
+ "Failed configuring SelectiveIDE#%d rid1 with %d\n",
+ i, ret);
+
+ ret = pci_ide_set_sel(rootport, i,
+ tdev->selective_ide[i].id,
+ tdev->selective_ide[i].enable,
+ tdev->selective_ide[i].def,
+ tdev->selective_ide[i].rootport_tee_limited,
+ tdev->selective_ide[i].rootport_ide_cfg);
+ if (ret)
+ pci_warn(rootport,
+ "Failed configuring SelectiveIDE#%d with %d\n",
+ i, ret);
+
+ tdev->selective_ide[i].enabled = 1;
+ }
+
+ return ret;
+}
+
+static void tsm_unset_sel_ide(struct tsm_dev *tdev)
+{
+ struct pci_dev *rootport = tdev->pdev->bus->self;
+ bool printed = false;
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(tdev->selective_ide); ++i) {
+ if (!tdev->selective_ide[i].enabled)
+ continue;
+
+ if (!printed) {
+ pci_info(rootport, "Deconfiguring IDE with %s\n", pci_name(tdev->pdev));
+ printed = true;
+ }
+
+ pci_ide_set_sel(rootport, i, 0, 0, 0, false, false);
+ pci_ide_set_sel(tdev->pdev, i, 0, 0, 0, false, false);
+ tdev->selective_ide[i].enabled = 0;
+ }
+}
+
+static int tsm_dev_connect(struct tsm_dev *tdev, void *private_data, unsigned int val)
+{
+ int ret;
+
+ if (WARN_ON(!tsm.ops->dev_connect))
+ return -EPERM;
+
+ tdev->ide_pre = val == 2;
+ if (tdev->ide_pre)
+ tsm_set_sel_ide(tdev);
+
+ mutex_lock(&tdev->spdm_mutex);
+ while (1) {
+ ret = tsm.ops->dev_connect(tdev, tsm.private_data);
+ if (ret <= 0)
+ break;
+
+ ret = spdm_forward(&tdev->spdm, ret);
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&tdev->spdm_mutex);
+
+ if (!tdev->ide_pre)
+ ret = tsm_set_sel_ide(tdev);
+
+ tdev->connected = (ret == 0);
+
+ return ret;
+}
+
+static int tsm_dev_reclaim(struct tsm_dev *tdev, void *private_data)
+{
+ struct pci_dev *pdev = NULL;
+ int ret;
+
+ if (WARN_ON(!tsm.ops->dev_reclaim))
+ return -EPERM;
+
+ /* Do not disconnect with active TDIs */
+ for_each_pci_dev(pdev) {
+ struct tsm_tdi *tdi = tsm_tdi_get(&pdev->dev);
+
+ if (tdi && tdi->tdev == tdev && tdi->data)
+ return -EBUSY;
+ }
+
+ if (!tdev->ide_pre)
+ tsm_unset_sel_ide(tdev);
+
+ mutex_lock(&tdev->spdm_mutex);
+ while (1) {
+ ret = tsm.ops->dev_reclaim(tdev, private_data);
+ if (ret <= 0)
+ break;
+
+ ret = spdm_forward(&tdev->spdm, ret);
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&tdev->spdm_mutex);
+
+ if (tdev->ide_pre)
+ tsm_unset_sel_ide(tdev);
+
+ if (!ret)
+ tdev->connected = false;
+
+ return ret;
+}
+
+static int tsm_dev_status(struct tsm_dev *tdev, void *private_data, struct tsm_dev_status *s)
+{
+ if (WARN_ON(!tsm.ops->dev_status))
+ return -EPERM;
+
+ return tsm.ops->dev_status(tdev, private_data, s);
+}
+
+static int tsm_ide_refresh(struct tsm_dev *tdev, void *private_data)
+{
+ int ret;
+
+ if (!tsm.ops->ide_refresh)
+ return -EPERM;
+
+ mutex_lock(&tdev->spdm_mutex);
+ while (1) {
+ ret = tsm.ops->ide_refresh(tdev, private_data);
+ if (ret <= 0)
+ break;
+
+ ret = spdm_forward(&tdev->spdm, ret);
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&tdev->spdm_mutex);
+
+ return ret;
+}
+
+static void tsm_tdi_reclaim(struct tsm_tdi *tdi, void *private_data)
+{
+ int ret;
+
+ if (WARN_ON(!tsm.ops->tdi_reclaim))
+ return;
+
+ mutex_lock(&tdi->tdev->spdm_mutex);
+ while (1) {
+ ret = tsm.ops->tdi_reclaim(tdi, private_data);
+ if (ret <= 0)
+ break;
+
+ ret = spdm_forward(&tdi->tdev->spdm, ret);
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&tdi->tdev->spdm_mutex);
+}
+
+static int tsm_tdi_validate(struct tsm_tdi *tdi, bool invalidate, void *private_data)
+{
+ int ret;
+
+ if (!tdi || !tsm.ops->tdi_validate)
+ return -EPERM;
+
+ ret = tsm.ops->tdi_validate(tdi, invalidate, private_data);
+ if (ret) {
+ pci_err(tdi->pdev, "Validation failed, ret=%d", ret);
+ tdi->pdev->dev.tdi_enabled = false;
+ }
+
+ return ret;
+}
+
+/* In case BUS_NOTIFY_PCI_BUS_MASTER is no good, a driver can call pci_dev_tdi_validate() */
+int pci_dev_tdi_validate(struct pci_dev *pdev)
+{
+ struct tsm_tdi *tdi = tsm_tdi_get(&pdev->dev);
+
+ return tsm_tdi_validate(tdi, false, tsm.private_data);
+}
+EXPORT_SYMBOL_GPL(pci_dev_tdi_validate);
+
+static int tsm_tdi_status(struct tsm_tdi *tdi, void *private_data, struct tsm_tdi_status *ts)
+{
+ struct tsm_tdi_status tstmp = { 0 };
+ int ret;
+
+ if (WARN_ON(!tsm.ops->tdi_status))
+ return -EPERM;
+
+ mutex_lock(&tdi->tdev->spdm_mutex);
+ while (1) {
+ ret = tsm.ops->tdi_status(tdi, private_data, &tstmp);
+ if (ret <= 0)
+ break;
+
+ ret = spdm_forward(&tdi->tdev->spdm, ret);
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&tdi->tdev->spdm_mutex);
+
+ *ts = tstmp;
+
+ return ret;
+}
+
+static ssize_t tsm_cert_slot_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tsm_dev *tdev = tsm_dev_get(dev);
+ ssize_t ret = count;
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ ret = -EINVAL;
+ else
+ tdev->cert_slot = val;
+
+ tsm_dev_put(tdev);
+
+ return ret;
+}
+
+static ssize_t tsm_cert_slot_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tsm_dev *tdev = tsm_dev_get(dev);
+ ssize_t ret = sysfs_emit(buf, "%u\n", tdev->cert_slot);
+
+ tsm_dev_put(tdev);
+ return ret;
+}
+
+static DEVICE_ATTR_RW(tsm_cert_slot);
+
+static ssize_t tsm_tc_mask_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tsm_dev *tdev = tsm_dev_get(dev);
+ ssize_t ret = count;
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ ret = -EINVAL;
+ else
+ tdev->tc_mask = val;
+ tsm_dev_put(tdev);
+
+ return ret;
+}
+
+static ssize_t tsm_tc_mask_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tsm_dev *tdev = tsm_dev_get(dev);
+ ssize_t ret = sysfs_emit(buf, "%#x\n", tdev->tc_mask);
+
+ tsm_dev_put(tdev);
+ return ret;
+}
+
+static DEVICE_ATTR_RW(tsm_tc_mask);
+
+static ssize_t tsm_dev_connect_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tsm_dev *tdev = tsm_dev_get(dev);
+ unsigned long val;
+ ssize_t ret = -EIO;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ ret = -EINVAL;
+ else if (val && !tdev->connected)
+ ret = tsm_dev_connect(tdev, tsm.private_data, val);
+ else if (!val && tdev->connected)
+ ret = tsm_dev_reclaim(tdev, tsm.private_data);
+
+ if (!ret)
+ ret = count;
+
+ tsm_dev_put(tdev);
+
+ return ret;
+}
+
+static ssize_t tsm_dev_connect_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tsm_dev *tdev = tsm_dev_get(dev);
+ ssize_t ret = sysfs_emit(buf, "%u\n", tdev->connected);
+
+ tsm_dev_put(tdev);
+ return ret;
+}
+
+static DEVICE_ATTR_RW(tsm_dev_connect);
+
+static ssize_t tsm_sel_stream_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int ide_dev = false, tee_dev = true, ide_rp = true, tee_rp = false;
+ unsigned int sel_index, id, def, en;
+ struct tsm_dev *tdev;
+
+ if (sscanf(buf, "%u %u %u %u %u %u %u %u", &sel_index, &id, &def, &en,
+ &ide_dev, &tee_dev, &ide_rp, &tee_rp) != 8) {
+ if (sscanf(buf, "%u %u %u %u", &sel_index, &id, &def, &en) != 4)
+ return -EINVAL;
+ }
+
+ if (sel_index >= ARRAY_SIZE(tdev->selective_ide) || id > 0x100)
+ return -EINVAL;
+
+ tdev = tsm_dev_get(dev);
+ if (en) {
+ tdev->selective_ide[sel_index].id = id;
+ tdev->selective_ide[sel_index].def = def;
+ tdev->selective_ide[sel_index].enable = 1;
+ tdev->selective_ide[sel_index].enabled = 0;
+ tdev->selective_ide[sel_index].dev_ide_cfg = ide_dev;
+ tdev->selective_ide[sel_index].dev_tee_limited = tee_dev;
+ tdev->selective_ide[sel_index].rootport_ide_cfg = ide_rp;
+ tdev->selective_ide[sel_index].rootport_tee_limited = tee_rp;
+ } else {
+ memset(&tdev->selective_ide[sel_index], 0, sizeof(tdev->selective_ide[0]));
+ }
+
+ tsm_dev_put(tdev);
+ return count;
+}
+
+static ssize_t tsm_sel_stream_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tsm_dev *tdev = tsm_dev_get(dev);
+ struct pci_dev *rootport = tdev->pdev->bus->self;
+ unsigned int i;
+ char *buf1;
+ ssize_t ret = 0, sz = PAGE_SIZE;
+
+ buf1 = kmalloc(sz, GFP_KERNEL);
+ if (!buf1)
+ return -ENOMEM;
+
+ buf1[0] = 0;
+ for (i = 0; i < ARRAY_SIZE(tdev->selective_ide); ++i) {
+ if (!tdev->selective_ide[i].enable)
+ continue;
+
+ ret += snprintf(buf1 + ret, sz - ret - 1, "%u: %d%s",
+ i,
+ tdev->selective_ide[i].id,
+ tdev->selective_ide[i].def ? " DEF" : "");
+ if (tdev->selective_ide[i].enabled) {
+ u32 devst = 0, rcst = 0;
+
+ pci_ide_get_sel_sta(tdev->pdev, i, &devst);
+ pci_ide_get_sel_sta(rootport, i, &rcst);
+ ret += snprintf(buf1 + ret, sz - ret - 1,
+ " %x%s %s%s<-> %x%s %s%s rootport:%s",
+ devst,
+ PCI_IDE_SEL_STS_STATUS(devst) == 2 ? "=SECURE" : "",
+ tdev->selective_ide[i].dev_ide_cfg ? "IDECfg " : "",
+ tdev->selective_ide[i].dev_tee_limited ? "TeeLim " : "",
+ rcst,
+ PCI_IDE_SEL_STS_STATUS(rcst) == 2 ? "=SECURE" : "",
+ tdev->selective_ide[i].rootport_ide_cfg ? "IDECfg " : "",
+ tdev->selective_ide[i].rootport_tee_limited ? "TeeLim " : "",
+ pci_name(rootport)
+ );
+ }
+ ret += snprintf(buf1 + ret, sz - ret - 1, "\n");
+ }
+ tsm_dev_put(tdev);
+
+ ret = sysfs_emit(buf, buf1);
+ kfree(buf1);
+
+ return ret;
+}
+
+static DEVICE_ATTR_RW(tsm_sel_stream);
+
+static ssize_t tsm_ide_refresh_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tsm_dev *tdev = tsm_dev_get(dev);
+ int ret;
+
+ ret = tsm_ide_refresh(tdev, tsm.private_data);
+ tsm_dev_put(tdev);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(tsm_ide_refresh);
+
+static ssize_t blob_show(struct tsm_blob *blob, char *buf)
+{
+ unsigned int n, m;
+
+ if (!blob)
+ return sysfs_emit(buf, "none\n");
+
+ n = snprintf(buf, PAGE_SIZE, "%lu %u\n", blob->len,
+ kref_read(&blob->kref));
+ m = hex_dump_to_buffer(blob->data, blob->len, 32, 1,
+ buf + n, PAGE_SIZE - n, false);
+ n += min(PAGE_SIZE - n, m);
+ n += snprintf(buf + n, PAGE_SIZE - n, "...\n");
+ return n;
+}
+
+static ssize_t tsm_certs_gen(struct tsm_blob *certs, char *buf, size_t len)
+{
+ struct spdm_certchain_block_header *h;
+ unsigned int n = 0, m, i, off, o2;
+ u8 *p;
+
+ for (i = 0, off = 0; off < certs->len; ++i) {
+ h = (struct spdm_certchain_block_header *) ((u8 *)certs->data + off);
+ if (WARN_ON_ONCE(h->length > certs->len - off))
+ return 0;
+
+ n += snprintf(buf + n, len - n, "[%d] len=%d:\n", i, h->length);
+
+ for (o2 = 0, p = (u8 *)&h[1]; o2 < h->length; o2 += 32) {
+ m = hex_dump_to_buffer(p + o2, h->length - o2, 32, 1,
+ buf + n, len - n, true);
+ n += min(len - n, m);
+ n += snprintf(buf + n, len - n, "\n");
+ }
+
+ off += h->length; /* Includes the header */
+ }
+
+ return n;
+}
+
+static ssize_t tsm_certs_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tsm_dev *tdev = tsm_dev_get(dev);
+ ssize_t n = 0;
+
+ if (!tdev->certs) {
+ n = sysfs_emit(buf, "none\n");
+ } else {
+ n = tsm_certs_gen(tdev->certs, buf, PAGE_SIZE);
+ if (!n)
+ n = blob_show(tdev->certs, buf);
+ }
+
+ tsm_dev_put(tdev);
+ return n;
+}
+
+static DEVICE_ATTR_RO(tsm_certs);
+
+static ssize_t tsm_meas_gen(struct tsm_blob *meas, char *buf, size_t len)
+{
+ static const char * const whats[] = {
+ "ImmuROM", "MutFW", "HWCfg", "FWCfg",
+ "MeasMft", "DevDbg", "MutFWVer", "MutFWVerSec"
+ };
+ struct dmtf_measurement_block_device_mode *dm;
+ struct spdm_measurement_block_header *mb;
+ struct dmtf_measurement_block_header *h;
+ unsigned int n = 0, m, off, what;
+ bool dmtf;
+
+ for (off = 0; off < meas->len; ) {
+ mb = (struct spdm_measurement_block_header *)(((u8 *) meas->data) + off);
+ dmtf = mb->spec & 1;
+
+ n += snprintf(buf + n, len - n, "#%d (%d) ", mb->index, mb->size);
+ if (dmtf) {
+ h = (void *) &mb[1];
+
+ if (WARN_ON_ONCE(mb->size != (sizeof(*h) + h->size)))
+ return -EINVAL;
+
+ what = h->type & 0x7F;
+ n += snprintf(buf + n, len - n, "%x=[%s %s]: ",
+ h->type,
+ h->type & 0x80 ? "digest" : "raw",
+ what < ARRAY_SIZE(whats) ? whats[what] : "reserved");
+
+ if (what == 5) {
+ dm = (struct dmtf_measurement_block_device_mode *) &h[1];
+ n += snprintf(buf + n, len - n, " %x %x %x %x",
+ dm->opmode_cap, dm->opmode_sta,
+ dm->devmode_cap, dm->devmode_sta);
+ } else {
+ m = hex_dump_to_buffer(&h[1], h->size, 32, 1,
+ buf + n, len - n, false);
+ n += min(PAGE_SIZE - n, m);
+ }
+ } else {
+ n += snprintf(buf + n, len - n, "spec=%x: ", mb->spec);
+ m = hex_dump_to_buffer(&mb[1], min(len - off, mb->size),
+ 32, 1, buf + n, len - n, false);
+ n += min(PAGE_SIZE - n, m);
+ }
+
+ off += sizeof(*mb) + mb->size;
+ n += snprintf(buf + n, PAGE_SIZE - n, "...\n");
+ }
+
+ return n;
+}
+
+static ssize_t tsm_meas_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tsm_dev *tdev = tsm_dev_get(dev);
+ ssize_t n = 0;
+
+ if (!tdev->meas) {
+ n = sysfs_emit(buf, "none\n");
+ } else {
+ if (!n)
+ n = tsm_meas_gen(tdev->meas, buf, PAGE_SIZE);
+ if (!n)
+ n = blob_show(tdev->meas, buf);
+ }
+
+ tsm_dev_put(tdev);
+ return n;
+}
+
+static DEVICE_ATTR_RO(tsm_meas);
+
+static ssize_t tsm_dev_status_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tsm_dev *tdev = tsm_dev_get(dev);
+ struct tsm_dev_status s = { 0 };
+ int ret = tsm_dev_status(tdev, tsm.private_data, &s);
+ ssize_t ret1;
+
+ ret1 = sysfs_emit(buf, "ret=%d\n"
+ "ctx_state=%x\n"
+ "tc_mask=%x\n"
+ "certs_slot=%x\n"
+ "device_id=%x\n"
+ "segment_id=%x\n"
+ "no_fw_update=%x\n",
+ ret,
+ s.ctx_state,
+ s.tc_mask,
+ s.certs_slot,
+ s.device_id,
+ s.segment_id,
+ s.no_fw_update);
+
+ tsm_dev_put(tdev);
+ return ret1;
+}
+
+static DEVICE_ATTR_RO(tsm_dev_status);
+
+static struct attribute *host_dev_attrs[] = {
+ &dev_attr_tsm_cert_slot.attr,
+ &dev_attr_tsm_tc_mask.attr,
+ &dev_attr_tsm_dev_connect.attr,
+ &dev_attr_tsm_sel_stream.attr,
+ &dev_attr_tsm_ide_refresh.attr,
+ &dev_attr_tsm_certs.attr,
+ &dev_attr_tsm_meas.attr,
+ &dev_attr_tsm_dev_status.attr,
+ NULL,
+};
+static const struct attribute_group host_dev_group = {
+ .attrs = host_dev_attrs,
+};
+
+static struct attribute *guest_dev_attrs[] = {
+ &dev_attr_tsm_certs.attr,
+ &dev_attr_tsm_meas.attr,
+ NULL,
+};
+static const struct attribute_group guest_dev_group = {
+ .attrs = guest_dev_attrs,
+};
+
+static ssize_t tsm_tdi_bind_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tsm_tdi *tdi = tsm_tdi_get(dev);
+
+ if (!tdi->vmid)
+ return sysfs_emit(buf, "not bound\n");
+
+ return sysfs_emit(buf, "VM=%#llx ASID=%d BDFn=%x:%x.%d\n",
+ tdi->vmid, tdi->asid,
+ PCI_BUS_NUM(tdi->guest_rid), PCI_SLOT(tdi->guest_rid),
+ PCI_FUNC(tdi->guest_rid));
+}
+
+static DEVICE_ATTR_RO(tsm_tdi_bind);
+
+static ssize_t tsm_tdi_validate_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tsm_tdi *tdi = tsm_tdi_get(dev);
+ unsigned long val;
+ ssize_t ret;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val) {
+ ret = tsm_tdi_validate(tdi, false, tsm.private_data);
+ if (ret)
+ return ret;
+ } else {
+ tsm_tdi_validate(tdi, true, tsm.private_data);
+ }
+
+ tdi->validated = val;
+
+ return count;
+}
+
+static ssize_t tsm_tdi_validate_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tsm_tdi *tdi = tsm_tdi_get(dev);
+
+ return sysfs_emit(buf, "%u\n", tdi->validated);
+}
+
+static DEVICE_ATTR_RW(tsm_tdi_validate);
+
+ssize_t tsm_report_gen(struct tsm_blob *report, char *buf, size_t len)
+{
+ struct tdi_report_header *h = TDI_REPORT_HDR(report);
+ struct tdi_report_mmio_range *mr = TDI_REPORT_MR_OFF(report);
+ struct tdi_report_footer *f = TDI_REPORT_FTR(report);
+ unsigned int n, m, i;
+
+ n = snprintf(buf, len,
+ "no_fw_update=%u\ndma_no_pasid=%u\ndma_pasid=%u\nats=%u\nprs=%u\n",
+ h->no_fw_update, h->dma_no_pasid, h->dma_pasid, h->ats, h->prs);
+ n += snprintf(buf + n, len - n,
+ "msi_x_message_control=%#04x\nlnr_control=%#04x\n",
+ h->msi_x_message_control, h->lnr_control);
+ n += snprintf(buf + n, len - n, "tph_control=%#08x\n", h->tph_control);
+
+ for (i = 0; i < h->mmio_range_count; ++i) {
+ n += snprintf(buf + n, len - n,
+ "[%i] #%u %#016llx +%#lx MSIX%c PBA%c NonTEE%c Upd%c\n",
+ i, mr[i].range_id, mr[i].first_page << PAGE_SHIFT,
+ (unsigned long) mr[i].num << PAGE_SHIFT,
+ mr[i].msix_table ? '+':'-',
+ mr[i].msix_pba ? '+':'-',
+ mr[i].is_non_tee_mem ? '+':'-',
+ mr[i].is_mem_attr_updatable ? '+':'-');
+ if (mr[i].reserved)
+ n += snprintf(buf + n, len - n,
+ "[%i] WARN: reserved=%#x\n", i, mr[i].range_attributes);
+ }
+
+ if (f->device_specific_info_len) {
+ unsigned int num = report->len - ((u8 *)f->device_specific_info - (u8 *)h);
+
+ num = min(num, f->device_specific_info_len);
+ n += snprintf(buf + n, len - n, "DevSp len=%d%s",
+ f->device_specific_info_len, num ? ": " : "");
+ m = hex_dump_to_buffer(f->device_specific_info, num, 32, 1,
+ buf + n, len - n, false);
+ n += min(len - n, m);
+ n += snprintf(buf + n, len - n, m ? "\n" : "...\n");
+ }
+
+ return n;
+}
+EXPORT_SYMBOL_GPL(tsm_report_gen);
+
+static ssize_t tsm_report_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tsm_tdi *tdi = tsm_tdi_get(dev);
+ ssize_t n = 0;
+
+ if (!tdi->report) {
+ n = sysfs_emit(buf, "none\n");
+ } else {
+ if (!n)
+ n = tsm_report_gen(tdi->report, buf, PAGE_SIZE);
+ if (!n)
+ n = blob_show(tdi->report, buf);
+ }
+
+ return n;
+}
+
+static DEVICE_ATTR_RO(tsm_report);
+
+static char *spdm_algos_to_str(u64 algos, char *buf, size_t len)
+{
+ size_t n = 0;
+
+ buf[0] = 0;
+#define __ALGO(x) do { \
+ if ((n < len) && (algos & (1ULL << (TSM_TDI_SPDM_ALGOS_##x)))) \
+ n += snprintf(buf + n, len - n, #x" "); \
+ } while (0)
+
+ __ALGO(DHE_SECP256R1);
+ __ALGO(DHE_SECP384R1);
+ __ALGO(AEAD_AES_128_GCM);
+ __ALGO(AEAD_AES_256_GCM);
+ __ALGO(ASYM_TPM_ALG_RSASSA_3072);
+ __ALGO(ASYM_TPM_ALG_ECDSA_ECC_NIST_P256);
+ __ALGO(ASYM_TPM_ALG_ECDSA_ECC_NIST_P384);
+ __ALGO(HASH_TPM_ALG_SHA_256);
+ __ALGO(HASH_TPM_ALG_SHA_384);
+ __ALGO(KEY_SCHED_SPDM_KEY_SCHEDULE);
+#undef __ALGO
+ return buf;
+}
+
+static const char *tdisp_state_to_str(enum tsm_tdisp_state state)
+{
+ switch (state) {
+#define __ST(x) case TDISP_STATE_##x: return #x
+ case TDISP_STATE_UNAVAIL: return "TDISP state unavailable";
+ __ST(CONFIG_UNLOCKED);
+ __ST(CONFIG_LOCKED);
+ __ST(RUN);
+ __ST(ERROR);
+#undef __ST
+ default: return "unknown";
+ }
+}
+
+static ssize_t tsm_tdi_status_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tsm_tdi *tdi = tsm_tdi_get(dev);
+ struct tsm_tdi_status ts = { 0 };
+ char algos[256] = "";
+ unsigned int n, m;
+ int ret;
+
+ ret = tsm_tdi_status(tdi, tsm.private_data, &ts);
+ if (ret < 0)
+ return sysfs_emit(buf, "ret=%d\n\n", ret);
+
+ if (!ts.valid)
+ return sysfs_emit(buf, "ret=%d\nstate=%d:%s\n",
+ ret, ts.state, tdisp_state_to_str(ts.state));
+
+ n = snprintf(buf, PAGE_SIZE,
+ "ret=%d\n"
+ "state=%d:%s\n"
+ "meas_digest_fresh=%x\n"
+ "meas_digest_valid=%x\n"
+ "all_request_redirect=%x\n"
+ "bind_p2p=%x\n"
+ "lock_msix=%x\n"
+ "no_fw_update=%x\n"
+ "cache_line_size=%d\n"
+ "algos=%#llx:%s\n"
+ ,
+ ret,
+ ts.state, tdisp_state_to_str(ts.state),
+ ts.meas_digest_fresh,
+ ts.meas_digest_valid,
+ ts.all_request_redirect,
+ ts.bind_p2p,
+ ts.lock_msix,
+ ts.no_fw_update,
+ ts.cache_line_size,
+ ts.spdm_algos, spdm_algos_to_str(ts.spdm_algos, algos, sizeof(algos) - 1));
+
+ n += snprintf(buf + n, PAGE_SIZE - n, "Certs digest: ");
+ m = hex_dump_to_buffer(ts.certs_digest, sizeof(ts.certs_digest), 32, 1,
+ buf + n, PAGE_SIZE - n, false);
+ n += min(PAGE_SIZE - n, m);
+ n += snprintf(buf + n, PAGE_SIZE - n, "...\nMeasurements digest: ");
+ m = hex_dump_to_buffer(ts.meas_digest, sizeof(ts.meas_digest), 32, 1,
+ buf + n, PAGE_SIZE - n, false);
+ n += min(PAGE_SIZE - n, m);
+ n += snprintf(buf + n, PAGE_SIZE - n, "...\nInterface report digest: ");
+ m = hex_dump_to_buffer(ts.interface_report_digest, sizeof(ts.interface_report_digest),
+ 32, 1, buf + n, PAGE_SIZE - n, false);
+ n += min(PAGE_SIZE - n, m);
+ n += snprintf(buf + n, PAGE_SIZE - n, "...\n");
+
+ return n;
+}
+
+static DEVICE_ATTR_RO(tsm_tdi_status);
+
+static struct attribute *host_tdi_attrs[] = {
+ &dev_attr_tsm_tdi_bind.attr,
+ &dev_attr_tsm_report.attr,
+ &dev_attr_tsm_tdi_status.attr,
+ NULL,
+};
+
+static const struct attribute_group host_tdi_group = {
+ .attrs = host_tdi_attrs,
+};
+
+static struct attribute *guest_tdi_attrs[] = {
+ &dev_attr_tsm_tdi_validate.attr,
+ &dev_attr_tsm_report.attr,
+ &dev_attr_tsm_tdi_status.attr,
+ NULL,
+};
+
+static const struct attribute_group guest_tdi_group = {
+ .attrs = guest_tdi_attrs,
+};
+
+static int tsm_tdi_init(struct tsm_dev *tdev, struct pci_dev *pdev)
+{
+ struct tsm_tdi *tdi;
+ int ret = 0;
+
+ dev_info(&pdev->dev, "Initializing tdi\n");
+ if (!tdev)
+ return -ENODEV;
+
+ tdi = kzalloc(sizeof(*tdi), GFP_KERNEL);
+ if (!tdi)
+ return -ENOMEM;
+
+ /* tsm_dev_get() requires pdev->dev.tdi which is set later */
+ if (!kref_get_unless_zero(&tdev->kref)) {
+ ret = -EPERM;
+ goto free_exit;
+ }
+
+ if (tsm.ops->dev_connect)
+ tdi->ag = &host_tdi_group;
+ else
+ tdi->ag = &guest_tdi_group;
+
+ ret = sysfs_create_link(&pdev->dev.kobj, &tdev->pdev->dev.kobj, "tsm_dev");
+ if (ret)
+ goto free_exit;
+
+ ret = device_add_group(&pdev->dev, tdi->ag);
+ if (ret)
+ goto sysfs_unlink_exit;
+
+ tdi->tdev = tdev;
+ tdi->pdev = pci_dev_get(pdev);
+
+ pdev->dev.tdi_enabled = !pdev->is_physfn || tsm.physfn;
+ pdev->dev.tdi = tdi;
+ pci_info(pdev, "TDI enabled=%d\n", pdev->dev.tdi_enabled);
+
+ return 0;
+
+sysfs_unlink_exit:
+ sysfs_remove_link(&pdev->dev.kobj, "tsm_dev");
+free_exit:
+ kfree(tdi);
+
+ return ret;
+}
+
+static void tsm_tdi_free(struct tsm_tdi *tdi)
+{
+ tsm_dev_put(tdi->tdev);
+
+ pci_dev_put(tdi->pdev);
+
+ device_remove_group(&tdi->pdev->dev, tdi->ag);
+ sysfs_remove_link(&tdi->pdev->dev.kobj, "tsm_dev");
+ tdi->pdev->dev.tdi = NULL;
+ tdi->pdev->dev.tdi_enabled = false;
+ kfree(tdi);
+}
+
+static int tsm_dev_init(struct pci_dev *pdev, struct tsm_dev **ptdev)
+{
+ struct tsm_dev *tdev;
+ int ret = 0;
+
+ dev_info(&pdev->dev, "Initializing tdev\n");
+ tdev = kzalloc(sizeof(*tdev), GFP_KERNEL);
+ if (!tdev)
+ return -ENOMEM;
+
+ kref_init(&tdev->kref);
+ tdev->tc_mask = tsm.tc_mask;
+ tdev->cert_slot = tsm.cert_slot;
+ tdev->pdev = pci_dev_get(pdev);
+ mutex_init(&tdev->spdm_mutex);
+
+ if (tsm.ops->dev_connect)
+ tdev->ag = &host_dev_group;
+ else
+ tdev->ag = &guest_dev_group;
+
+ ret = device_add_group(&pdev->dev, tdev->ag);
+ if (ret)
+ goto free_exit;
+
+ if (tsm.ops->dev_connect) {
+ ret = -EPERM;
+ tdev->pdev = pci_dev_get(pdev);
+ tdev->spdm.doe_mb = pci_find_doe_mailbox(tdev->pdev,
+ PCI_VENDOR_ID_PCI_SIG,
+ PCI_DOE_PROTOCOL_CMA_SPDM);
+ if (!tdev->spdm.doe_mb)
+ goto pci_dev_put_exit;
+
+ tdev->spdm.doe_mb_secured = pci_find_doe_mailbox(tdev->pdev,
+ PCI_VENDOR_ID_PCI_SIG,
+ PCI_DOE_PROTOCOL_SECURED_CMA_SPDM);
+ if (!tdev->spdm.doe_mb_secured)
+ goto pci_dev_put_exit;
+ }
+
+ *ptdev = tdev;
+ return 0;
+
+pci_dev_put_exit:
+ pci_dev_put(pdev);
+free_exit:
+ kfree(tdev);
+
+ return ret;
+}
+
+static void tsm_dev_free(struct kref *kref)
+{
+ struct tsm_dev *tdev = container_of(kref, struct tsm_dev, kref);
+
+ device_remove_group(&tdev->pdev->dev, tdev->ag);
+
+ if (tdev->connected)
+ tsm_dev_reclaim(tdev, tsm.private_data);
+
+ dev_info(&tdev->pdev->dev, "Freeing TDEV\n");
+ pci_dev_put(tdev->pdev);
+ kfree(tdev);
+}
+
+static int tsm_alloc_device(struct pci_dev *pdev)
+{
+ int ret = 0;
+
+ /* It is guest VM == TVM */
+ if (!tsm.ops->dev_connect) {
+ if (pdev->devcap & PCI_EXP_DEVCAP_TEE_IO) {
+ struct tsm_dev *tdev = NULL;
+
+ ret = tsm_dev_init(pdev, &tdev);
+ if (ret)
+ return ret;
+
+ ret = tsm_tdi_init(tdev, pdev);
+ tsm_dev_put(tdev);
+ return ret;
+ }
+ return 0;
+ }
+
+ if (pdev->is_physfn && (PCI_FUNC(pdev->devfn) == 0) &&
+ (pdev->devcap & PCI_EXP_DEVCAP_TEE_IO)) {
+ struct tsm_dev *tdev = NULL;
+
+
+ ret = tsm_dev_init(pdev, &tdev);
+ if (ret)
+ return ret;
+
+ ret = tsm_tdi_init(tdev, pdev);
+ tsm_dev_put(tdev);
+ return ret;
+ }
+
+ if (pdev->is_virtfn) {
+ struct pci_dev *pf0 = pci_get_slot(pdev->physfn->bus,
+ pdev->physfn->devfn & ~7);
+
+ if (pf0 && (pf0->devcap & PCI_EXP_DEVCAP_TEE_IO)) {
+ struct tsm_dev *tdev = tsm_dev_get(&pf0->dev);
+
+ ret = tsm_tdi_init(tdev, pdev);
+ tsm_dev_put(tdev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void tsm_dev_freeice(struct device *dev)
+{
+ struct tsm_tdi *tdi = tsm_tdi_get(dev);
+
+ if (!tdi)
+ return;
+
+ tsm_tdi_free(tdi);
+}
+
+static int tsm_pci_bus_notifier(struct notifier_block *nb, unsigned long action, void *data)
+{
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ tsm_alloc_device(to_pci_dev(data));
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ tsm_dev_freeice(data);
+ break;
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ tsm_tdi_validate(tsm_tdi_get(data), true, tsm.private_data);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block tsm_pci_bus_nb = {
+ .notifier_call = tsm_pci_bus_notifier,
+};
+
+static int __init tsm_init(void)
+{
+ int ret = 0;
+
+ pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+ return ret;
+}
+
+static void __exit tsm_cleanup(void)
+{
+}
+
+void tsm_set_ops(struct tsm_ops *ops, void *private_data)
+{
+ struct pci_dev *pdev = NULL;
+ int ret;
+
+ if (!tsm.ops && ops) {
+ tsm.ops = ops;
+ tsm.private_data = private_data;
+
+ for_each_pci_dev(pdev) {
+ ret = tsm_alloc_device(pdev);
+ if (ret)
+ break;
+ }
+ bus_register_notifier(&pci_bus_type, &tsm_pci_bus_nb);
+ } else {
+ bus_unregister_notifier(&pci_bus_type, &tsm_pci_bus_nb);
+ for_each_pci_dev(pdev)
+ tsm_dev_freeice(&pdev->dev);
+ tsm.ops = ops;
+ }
+}
+EXPORT_SYMBOL_GPL(tsm_set_ops);
+
+int tsm_tdi_bind(struct tsm_tdi *tdi, u32 guest_rid, u64 vmid, u32 asid)
+{
+ int ret;
+
+ if (WARN_ON(!tsm.ops->tdi_bind))
+ return -EPERM;
+
+ tdi->guest_rid = guest_rid;
+ tdi->vmid = vmid;
+ tdi->asid = asid;
+
+ mutex_lock(&tdi->tdev->spdm_mutex);
+ while (1) {
+ ret = tsm.ops->tdi_bind(tdi, guest_rid, vmid, asid, tsm.private_data);
+ if (ret < 0)
+ break;
+
+ if (!ret)
+ break;
+
+ ret = spdm_forward(&tdi->tdev->spdm, ret);
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&tdi->tdev->spdm_mutex);
+
+ if (ret) {
+ tsm_tdi_unbind(tdi);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tsm_tdi_bind);
+
+void tsm_tdi_unbind(struct tsm_tdi *tdi)
+{
+ tsm_tdi_reclaim(tdi, tsm.private_data);
+ tdi->vmid = 0;
+ tdi->asid = 0;
+ tdi->guest_rid = 0;
+}
+EXPORT_SYMBOL_GPL(tsm_tdi_unbind);
+
+int tsm_guest_request(struct tsm_tdi *tdi, enum tsm_tdisp_state *state, void *req_data)
+{
+ int ret;
+
+ if (!tsm.ops->guest_request)
+ return -EPERM;
+
+ mutex_lock(&tdi->tdev->spdm_mutex);
+ while (1) {
+ ret = tsm.ops->guest_request(tdi, tdi->guest_rid, tdi->vmid, req_data,
+ state, tsm.private_data);
+ if (ret <= 0)
+ break;
+
+ ret = spdm_forward(&tdi->tdev->spdm, ret);
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&tdi->tdev->spdm_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tsm_guest_request);
+
+struct tsm_tdi *tsm_tdi_find(u32 guest_rid, u64 vmid)
+{
+ struct pci_dev *pdev = NULL;
+ struct tsm_tdi *tdi;
+
+ for_each_pci_dev(pdev) {
+ tdi = tsm_tdi_get(&pdev->dev);
+ if (!tdi)
+ continue;
+
+ if (tdi->vmid == vmid && tdi->guest_rid == guest_rid)
+ return tdi;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(tsm_tdi_find);
+
+module_init(tsm_init);
+module_exit(tsm_cleanup);
+
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/Documentation/virt/coco/tsm.rst b/Documentation/virt/coco/tsm.rst
new file mode 100644
index 000000000000..3be6e8491e42
--- /dev/null
+++ b/Documentation/virt/coco/tsm.rst
@@ -0,0 +1,62 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+What it is
+==========
+
+This is for PCI passthrough in confidential computing (CoCo: SEV-SNP, TDX, CoVE).
+Currently passing through PCI devices to a CoCo VM uses SWIOTLB to pre-shared
+memory buffers.
+
+PCIe IDE (Integrity and Data Encryption) and TDISP (TEE Device Interface Security
+Protocol) are protocols to enable encryption over PCIe link and DMA to encrypted
+memory. This doc is focused to DMAing to encrypted VM, the encrypted host memory is
+out of scope.
+
+
+Protocols
+=========
+
+PCIe r6 DOE is a mailbox protocol to read/write object from/to device.
+Objects are of plain SPDM or secure SPDM type. SPDM is responsible for authenticating
+devices, creating a secure link between a device and TSM.
+IDE_KM manages PCIe link encryption keys, it works on top of secure SPDM.
+TDISP manages a passed through PCI function state, also works on top on secure SPDM.
+Additionally, PCIe defines IDE capability which provides the host OS a way
+to enable streams on the PCIe link.
+
+
+TSM module
+==========
+
+This is common place to trigger device authentication and keys management.
+It exposes certificates/measurenets/reports/status via sysfs and provides control
+over the link (limited though by the TSM capabilities).
+A platform is expected to register a specific set of hooks. The same module works
+in host and guest OS, the set of requires platform hooks is quite different.
+
+
+Flow
+====
+
+At the boot time the tsm.ko scans the PCI bus to find and setup TDISP-cabable
+devices; it also listens to hotplug events. If setup was successful, tsm-prefixed
+nodes will appear in sysfs.
+
+Then, the user enables IDE by writing to /sys/bus/pci/devices/0000:e1:00.0/tsm_dev_connect
+and this is how PCIe encryption is enabled.
+
+To pass the device through, a modifined VMM is required.
+
+In the VM, the same tsm.ko loads. In addition to the host's setup, the VM wants
+to receive the report and enable secure DMA or/and secure MMIO, via some VM<->HV
+protocol (such as AMD GHCB). Once this is done, a VM can access validated MMIO
+with the Cbit set and the device can DMA to encrypted memory.
+
+
+References
+==========
+
+[1] TEE Device Interface Security Protocol - TDISP - v2022-07-27
+https://members.pcisig.com/wg/PCI-SIG/document/18268?downloadRevision=21500
+[2] Security Protocol and Data Model (SPDM)
+https://www.dmtf.org/sites/default/files/standards/documents/DSP0274_1.2.1.pdf
diff --git a/drivers/virt/coco/Kconfig b/drivers/virt/coco/Kconfig
index 87d142c1f932..67a9c9daf96d 100644
--- a/drivers/virt/coco/Kconfig
+++ b/drivers/virt/coco/Kconfig
@@ -7,6 +7,17 @@ config TSM_REPORTS
select CONFIGFS_FS
tristate
+config TSM
+ tristate "Platform support for TEE Device Interface Security Protocol (TDISP)"
+ default m
+ depends on AMD_MEM_ENCRYPT
+ select PCI_DOE
+ select PCI_IDE
+ help
+ Add a common place for user visible platform support for PCIe TDISP.
+ TEE Device Interface Security Protocol (TDISP) from PCI-SIG,
+ https://pcisig.com/tee-device-interface-security-protocol-tdisp
+
source "drivers/virt/coco/efi_secret/Kconfig"
source "drivers/virt/coco/sev-guest/Kconfig"
--
2.45.2
next prev parent reply other threads:[~2024-08-23 13:29 UTC|newest]
Thread overview: 128+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-08-23 13:21 [RFC PATCH 00/21] Secure VFIO, TDISP, SEV TIO Alexey Kardashevskiy
2024-08-23 13:21 ` [RFC PATCH 01/21] tsm-report: Rename module to reflect what it does Alexey Kardashevskiy
2024-08-23 22:17 ` Bjorn Helgaas
2024-08-28 13:49 ` Jonathan Cameron
2024-08-30 0:13 ` Dan Williams
2024-09-02 1:29 ` Alexey Kardashevskiy
2024-08-23 13:21 ` [RFC PATCH 02/21] pci/doe: Define protocol types and make those public Alexey Kardashevskiy
2024-08-23 22:18 ` Bjorn Helgaas
2024-08-30 2:15 ` Dan Williams
2024-08-23 13:21 ` [RFC PATCH 03/21] pci: Define TEE-IO bit in PCIe device capabilities Alexey Kardashevskiy
2024-08-23 22:19 ` Bjorn Helgaas
2024-08-28 13:54 ` Jonathan Cameron
2024-08-30 2:21 ` Dan Williams
2024-08-30 4:04 ` Alexey Kardashevskiy
2024-08-30 21:37 ` Dan Williams
2024-08-23 13:21 ` [RFC PATCH 04/21] PCI/IDE: Define Integrity and Data Encryption (IDE) extended capability Alexey Kardashevskiy
2024-08-23 22:28 ` Bjorn Helgaas
2024-08-28 14:24 ` Jonathan Cameron
2024-08-30 2:41 ` Dan Williams
2024-08-23 13:21 ` [RFC PATCH 05/21] crypto/ccp: Make some SEV helpers public Alexey Kardashevskiy
2024-08-30 2:45 ` Dan Williams
2024-08-23 13:21 ` [RFC PATCH 06/21] crypto: ccp: Enable SEV-TIO feature in the PSP when supported Alexey Kardashevskiy
2024-08-28 14:32 ` Jonathan Cameron
2024-09-03 21:27 ` Dan Williams
2024-09-05 2:29 ` Alexey Kardashevskiy
2024-09-05 17:40 ` Dan Williams
2024-08-23 13:21 ` Alexey Kardashevskiy [this message]
2024-08-27 12:32 ` [RFC PATCH 07/21] pci/tdisp: Introduce tsm module Jason Gunthorpe
2024-08-28 3:00 ` Alexey Kardashevskiy
2024-08-28 23:42 ` Jason Gunthorpe
2024-08-29 0:00 ` Dan Williams
2024-08-29 0:09 ` Jason Gunthorpe
2024-08-29 0:20 ` Dan Williams
2024-08-29 12:03 ` Jason Gunthorpe
2024-08-29 4:57 ` Alexey Kardashevskiy
2024-08-29 12:07 ` Jason Gunthorpe
2024-09-02 0:52 ` Alexey Kardashevskiy
2024-08-28 15:04 ` Jonathan Cameron
2024-09-02 6:50 ` Aneesh Kumar K.V
2024-09-02 7:26 ` Alexey Kardashevskiy
2024-09-03 23:51 ` Dan Williams
2024-09-04 11:13 ` Alexey Kardashevskiy
2024-09-04 23:28 ` Dan Williams
2024-08-23 13:21 ` [RFC PATCH 08/21] crypto/ccp: Implement SEV TIO firmware interface Alexey Kardashevskiy
2024-08-28 15:39 ` Jonathan Cameron
2024-08-23 13:21 ` [RFC PATCH 09/21] kvm: Export kvm_vm_set_mem_attributes Alexey Kardashevskiy
2024-08-23 13:21 ` [RFC PATCH 10/21] vfio: Export helper to get vfio_device from fd Alexey Kardashevskiy
2024-08-23 13:21 ` [RFC PATCH 11/21] KVM: SEV: Add TIO VMGEXIT and bind TDI Alexey Kardashevskiy
2024-08-29 10:08 ` Xu Yilun
2024-08-30 4:00 ` Alexey Kardashevskiy
2024-08-30 7:02 ` Xu Yilun
2024-09-02 1:24 ` Alexey Kardashevskiy
2024-09-13 13:50 ` Zhi Wang
2024-09-13 22:08 ` Dan Williams
2024-09-14 2:47 ` Tian, Kevin
2024-09-14 5:19 ` Zhi Wang
2024-09-18 10:45 ` Xu Yilun
2024-09-20 3:41 ` Tian, Kevin
2024-08-23 13:21 ` [RFC PATCH 12/21] KVM: IOMMUFD: MEMFD: Map private pages Alexey Kardashevskiy
2024-08-26 8:39 ` Tian, Kevin
2024-08-26 12:30 ` Jason Gunthorpe
2024-08-29 9:34 ` Xu Yilun
2024-08-29 12:15 ` Jason Gunthorpe
2024-08-30 3:47 ` Alexey Kardashevskiy
2024-08-30 12:35 ` Jason Gunthorpe
2024-09-02 1:09 ` Alexey Kardashevskiy
2024-09-02 23:52 ` Jason Gunthorpe
2024-09-03 0:03 ` Alexey Kardashevskiy
2024-09-03 0:37 ` Jason Gunthorpe
2024-08-30 5:20 ` Xu Yilun
2024-08-30 12:36 ` Jason Gunthorpe
2024-09-03 20:34 ` Dan Williams
2024-09-04 0:02 ` Jason Gunthorpe
2024-09-04 0:59 ` Dan Williams
2024-09-05 8:29 ` Tian, Kevin
2024-09-05 12:02 ` Jason Gunthorpe
2024-09-05 12:07 ` Tian, Kevin
2024-09-05 12:00 ` Jason Gunthorpe
2024-09-05 12:17 ` Tian, Kevin
2024-09-05 12:23 ` Jason Gunthorpe
2024-09-05 20:53 ` Dan Williams
2024-09-05 23:06 ` Jason Gunthorpe
2024-09-06 2:46 ` Tian, Kevin
2024-09-06 13:54 ` Jason Gunthorpe
2024-09-06 2:41 ` Tian, Kevin
2024-08-27 2:27 ` Alexey Kardashevskiy
2024-08-27 2:31 ` Tian, Kevin
2024-09-15 21:07 ` Jason Gunthorpe
2024-09-20 21:10 ` Vishal Annapurve
2024-09-23 5:35 ` Tian, Kevin
2024-09-23 6:34 ` Vishal Annapurve
2024-09-23 8:24 ` Tian, Kevin
2024-09-23 16:02 ` Jason Gunthorpe
2024-09-23 23:52 ` Tian, Kevin
2024-09-24 12:07 ` Jason Gunthorpe
2024-09-25 8:44 ` Vishal Annapurve
2024-09-25 15:41 ` Jason Gunthorpe
2024-09-23 20:53 ` Vishal Annapurve
2024-09-23 23:55 ` Tian, Kevin
2024-08-23 13:21 ` [RFC PATCH 13/21] KVM: X86: Handle private MMIO as shared Alexey Kardashevskiy
2024-08-30 16:57 ` Xu Yilun
2024-09-02 2:22 ` Alexey Kardashevskiy
2024-09-03 5:13 ` Xu Yilun
2024-09-06 3:31 ` Alexey Kardashevskiy
2024-09-09 10:07 ` Xu Yilun
2024-09-10 1:28 ` Alexey Kardashevskiy
2024-08-23 13:21 ` [RFC PATCH 14/21] RFC: iommu/iommufd/amd: Add IOMMU_HWPT_TRUSTED flag, tweak DTE's DomainID, IOTLB Alexey Kardashevskiy
2024-08-27 12:17 ` Jason Gunthorpe
2024-08-23 13:21 ` [RFC PATCH 15/21] coco/sev-guest: Allow multiple source files in the driver Alexey Kardashevskiy
2024-08-23 13:21 ` [RFC PATCH 16/21] coco/sev-guest: Make SEV-to-PSP request helpers public Alexey Kardashevskiy
2024-08-23 13:21 ` [RFC PATCH 17/21] coco/sev-guest: Implement the guest side of things Alexey Kardashevskiy
2024-08-28 15:54 ` Jonathan Cameron
2024-09-14 7:19 ` Zhi Wang
2024-09-16 1:18 ` Alexey Kardashevskiy
2024-08-23 13:21 ` [RFC PATCH 18/21] RFC: pci: Add BUS_NOTIFY_PCI_BUS_MASTER event Alexey Kardashevskiy
2024-08-23 13:21 ` [RFC PATCH 19/21] sev-guest: Stop changing encrypted page state for TDISP devices Alexey Kardashevskiy
2024-08-23 13:21 ` [RFC PATCH 20/21] pci: Allow encrypted MMIO mapping via sysfs Alexey Kardashevskiy
2024-08-23 22:37 ` Bjorn Helgaas
2024-09-02 8:22 ` Alexey Kardashevskiy
2024-09-03 21:46 ` Bjorn Helgaas
2024-08-23 13:21 ` [RFC PATCH 21/21] pci: Define pci_iomap_range_encrypted Alexey Kardashevskiy
2024-08-28 20:43 ` [RFC PATCH 00/21] Secure VFIO, TDISP, SEV TIO Dan Williams
2024-08-29 14:13 ` Alexey Kardashevskiy
2024-08-29 23:41 ` Dan Williams
2024-08-30 4:38 ` Alexey Kardashevskiy
2024-08-30 21:57 ` Dan Williams
2024-09-05 8:21 ` Tian, Kevin
2024-09-03 15:56 ` Sean Christopherson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240823132137.336874-8-aik@amd.com \
--to=aik@amd.com \
--cc=agraf@suse.de \
--cc=alex.williamson@redhat.com \
--cc=dan.j.williams@intel.com \
--cc=david.kaplan@amd.com \
--cc=dhaval.giani@amd.com \
--cc=iommu@lists.linux.dev \
--cc=kvm@vger.kernel.org \
--cc=linux-coco@lists.linux.dev \
--cc=linux-pci@vger.kernel.org \
--cc=lukas@wunner.de \
--cc=michael.day@amd.com \
--cc=michael.roth@amd.com \
--cc=nikunj@amd.com \
--cc=pratikrajesh.sampat@amd.com \
--cc=santosh.shukla@amd.com \
--cc=suravee.suthikulpanit@amd.com \
--cc=thomas.lendacky@amd.com \
--cc=vasant.hegde@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).