* [Patch V4 1/3] spi: Add TPM HW flow flag
2023-02-24 15:49 [Patch V4 0/3] Tegra TPM driver with HW flow control Krishna Yarlagadda
@ 2023-02-24 15:49 ` Krishna Yarlagadda
2023-02-24 15:49 ` [Patch V4 2/3] tpm_tis-spi: Support hardware wait polling Krishna Yarlagadda
2023-02-24 15:49 ` [Patch V4 3/3] spi: tegra210-quad: set half duplex flag Krishna Yarlagadda
2 siblings, 0 replies; 6+ messages in thread
From: Krishna Yarlagadda @ 2023-02-24 15:49 UTC (permalink / raw)
To: robh+dt, broonie, peterhuewe, jgg, jarkko, krzysztof.kozlowski+dt,
linux-spi, linux-tegra, linux-integrity, linux-kernel
Cc: thierry.reding, jonathanh, skomatineni, ldewangan,
Krishna Yarlagadda
TPM spec defines flow control over SPI. Client device can insert a wait
state on MISO when address is trasmitted by controller on MOSI. It can
work only on full duplex.
Half duplex controllers need to implement flow control in HW.
Add a flag for TPM to indicate flow control is expected in controller.
Signed-off-by: Krishna Yarlagadda <kyarlagadda@nvidia.com>
---
include/linux/spi/spi.h | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 988aabc31871..b88494e31239 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -184,8 +184,9 @@ struct spi_device {
u8 chip_select;
u8 bits_per_word;
bool rt;
-#define SPI_NO_TX BIT(31) /* No transmit wire */
-#define SPI_NO_RX BIT(30) /* No receive wire */
+#define SPI_NO_TX BIT(31) /* No transmit wire */
+#define SPI_NO_RX BIT(30) /* No receive wire */
+#define SPI_TPM_HW_FLOW BIT(29) /* TPM flow control */
/*
* All bits defined above should be covered by SPI_MODE_KERNEL_MASK.
* The SPI_MODE_KERNEL_MASK has the SPI_MODE_USER_MASK counterpart,
@@ -195,7 +196,7 @@ struct spi_device {
* These bits must not overlap. A static assert check should make sure of that.
* If adding extra bits, make sure to decrease the bit index below as well.
*/
-#define SPI_MODE_KERNEL_MASK (~(BIT(30) - 1))
+#define SPI_MODE_KERNEL_MASK (~(BIT(29) - 1))
u32 mode;
int irq;
void *controller_state;
--
2.17.1
^ permalink raw reply related [flat|nested] 6+ messages in thread* [Patch V4 2/3] tpm_tis-spi: Support hardware wait polling
2023-02-24 15:49 [Patch V4 0/3] Tegra TPM driver with HW flow control Krishna Yarlagadda
2023-02-24 15:49 ` [Patch V4 1/3] spi: Add TPM HW flow flag Krishna Yarlagadda
@ 2023-02-24 15:49 ` Krishna Yarlagadda
2023-02-24 15:49 ` [Patch V4 3/3] spi: tegra210-quad: set half duplex flag Krishna Yarlagadda
2 siblings, 0 replies; 6+ messages in thread
From: Krishna Yarlagadda @ 2023-02-24 15:49 UTC (permalink / raw)
To: robh+dt, broonie, peterhuewe, jgg, jarkko, krzysztof.kozlowski+dt,
linux-spi, linux-tegra, linux-integrity, linux-kernel
Cc: thierry.reding, jonathanh, skomatineni, ldewangan,
Krishna Yarlagadda
TPM devices raise wait signal on last addr cycle. This can be detected
by software driver by reading MISO line on same clock which requires
full duplex support. In case of half duplex controllers wait detection
has to be implemented in HW.
Support hardware wait state detection by sending entire message and let
controller handle flow control.
QSPI controller in Tegra236 & Tegra241 implement TPM wait polling.
Signed-off-by: Krishna Yarlagadda <kyarlagadda@nvidia.com>
---
drivers/char/tpm/tpm_tis_spi_main.c | 96 ++++++++++++++++++++++++++++-
1 file changed, 94 insertions(+), 2 deletions(-)
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index a0963a3e92bd..b193c950f133 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -71,8 +71,78 @@ static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
return 0;
}
-int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *in, const u8 *out)
+/*
+ * Half duplex controller with support for TPM wait state detection like
+ * Tegra241 need cmd, addr & data sent in single message to manage HW flow
+ * control. Each phase sent in different transfer for controller to idenity
+ * phase.
+ */
+int tpm_tis_spi_hw_flow_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ struct spi_transfer spi_xfer[3];
+ struct spi_message m;
+ u8 transfer_len;
+ int ret;
+
+ spi_bus_lock(phy->spi_device->master);
+
+ while (len) {
+ transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
+
+ spi_message_init(&m);
+ phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
+ phy->iobuf[1] = 0xd4;
+ phy->iobuf[2] = addr >> 8;
+ phy->iobuf[3] = addr;
+
+ memset(&spi_xfer, 0, sizeof(spi_xfer));
+
+ spi_xfer[0].tx_buf = phy->iobuf;
+ spi_xfer[0].len = 1;
+ spi_message_add_tail(&spi_xfer[0], &m);
+
+ spi_xfer[1].tx_buf = phy->iobuf + 1;
+ spi_xfer[1].len = 3;
+ spi_message_add_tail(&spi_xfer[1], &m);
+
+ if (out) {
+ spi_xfer[2].tx_buf = &phy->iobuf[4];
+ spi_xfer[2].rx_buf = NULL;
+ memcpy(&phy->iobuf[4], out, transfer_len);
+ out += transfer_len;
+ }
+
+ if (in) {
+ spi_xfer[2].tx_buf = NULL;
+ spi_xfer[2].rx_buf = &phy->iobuf[4];
+ }
+
+ spi_xfer[2].len = transfer_len;
+ spi_message_add_tail(&spi_xfer[2], &m);
+
+ reinit_completion(&phy->ready);
+
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ goto exit;
+
+ if (in) {
+ memcpy(in, &phy->iobuf[4], transfer_len);
+ in += transfer_len;
+ }
+
+ len -= transfer_len;
+ }
+
+exit:
+ spi_bus_unlock(phy->spi_device->master);
+ return ret;
+}
+
+int tpm_tis_spi_sw_flow_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out)
{
struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
int ret = 0;
@@ -140,6 +210,28 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
return ret;
}
+int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ struct spi_controller *ctlr = phy->spi_device->controller;
+
+ /*
+ * TPM flow control over SPI requires full duplex support.
+ * Send entire message to a half duplex controller to handle
+ * wait polling in controller.
+ * Set TPM HW flow control flag..
+ */
+ if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) {
+ phy->spi_device->mode |= SPI_TPM_HW_FLOW;
+ return tpm_tis_spi_hw_flow_transfer(data, addr, len, in,
+ out);
+ } else {
+ return tpm_tis_spi_sw_flow_transfer(data, addr, len, in,
+ out);
+ }
+}
+
static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
u16 len, u8 *result, enum tpm_tis_io_mode io_mode)
{
--
2.17.1
^ permalink raw reply related [flat|nested] 6+ messages in thread* [Patch V4 3/3] spi: tegra210-quad: set half duplex flag
2023-02-24 15:49 [Patch V4 0/3] Tegra TPM driver with HW flow control Krishna Yarlagadda
2023-02-24 15:49 ` [Patch V4 1/3] spi: Add TPM HW flow flag Krishna Yarlagadda
2023-02-24 15:49 ` [Patch V4 2/3] tpm_tis-spi: Support hardware wait polling Krishna Yarlagadda
@ 2023-02-24 15:49 ` Krishna Yarlagadda
2023-02-24 16:37 ` Mark Brown
2 siblings, 1 reply; 6+ messages in thread
From: Krishna Yarlagadda @ 2023-02-24 15:49 UTC (permalink / raw)
To: robh+dt, broonie, peterhuewe, jgg, jarkko, krzysztof.kozlowski+dt,
linux-spi, linux-tegra, linux-integrity, linux-kernel
Cc: thierry.reding, jonathanh, skomatineni, ldewangan,
Krishna Yarlagadda
Tegra QSPI controller only supports half duplex transfers.
Set half duplex constrain flag.
Signed-off-by: Krishna Yarlagadda <kyarlagadda@nvidia.com>
---
drivers/spi/spi-tegra210-quad.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 39dec2dc161b..fe15fa6eecd1 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -1553,6 +1553,7 @@ static int tegra_qspi_probe(struct platform_device *pdev)
master->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
+ master->flags = SPI_CONTROLLER_HALF_DUPLEX;
master->setup = tegra_qspi_setup;
master->transfer_one_message = tegra_qspi_transfer_one_message;
master->num_chipselect = 1;
--
2.17.1
^ permalink raw reply related [flat|nested] 6+ messages in thread