linux-spi.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 for-next] spi: spi-pci1xxxx: Add support for per-instance DMA interrupt vectors
@ 2025-06-24  3:30 Thangaraj Samynathan
  2025-06-24 17:33 ` Mark Brown
  0 siblings, 1 reply; 2+ messages in thread
From: Thangaraj Samynathan @ 2025-06-24  3:30 UTC (permalink / raw)
  To: broonie, linux-spi, linux-kernel

Add support for dedicated DMA interrupt vectors for each SPI hardware
instance in the pci1xxxx driver. This improves scalability and interrupt
handling for systems using multiple SPI instances with DMA.

Introduce a constant `NUM_VEC_PER_INST` to define the number of IRQ
vectors per instance (main, DMA write, DMA read). Update the
`pci1xxxx_spi_internal` structure to use an IRQ array.

Refactor IRQ allocation and DMA initialization logic:
- Assign separate IRQ vectors for DMA read and write interrupts.
- Split the original DMA ISR into two handlers:
  `pci1xxxx_spi_isr_dma_rd` and `pci1xxxx_spi_isr_dma_wr`.
- Configure IMWR registers per instance using cached MSI data.
- Move DMA register configuration into a new helper function,
  `pci1xxxx_spi_dma_config()`.

Invoke the DMA initialization after all instances are configured to
ensure correct IRQ vector mapping.

Signed-off-by: Thangaraj Samynathan <thangaraj.s@microchip.com>
---
v2
- Resubmitted due to patch apply failures.

v1
- Initial submission

 drivers/spi/spi-pci1xxxx.c | 212 +++++++++++++++++++++++++------------
 1 file changed, 144 insertions(+), 68 deletions(-)

diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c
index a6c8bf228288..f44fe5841139 100644
--- a/drivers/spi/spi-pci1xxxx.c
+++ b/drivers/spi/spi-pci1xxxx.c
@@ -97,8 +97,8 @@
 #define SPI_DMA_CH1_DONE_INT		BIT(1)
 #define SPI_DMA_CH0_ABORT_INT		BIT(16)
 #define SPI_DMA_CH1_ABORT_INT		BIT(17)
-#define SPI_DMA_DONE_INT_MASK		(SPI_DMA_CH0_DONE_INT | SPI_DMA_CH1_DONE_INT)
-#define SPI_DMA_ABORT_INT_MASK		(SPI_DMA_CH0_ABORT_INT | SPI_DMA_CH1_ABORT_INT)
+#define SPI_DMA_DONE_INT_MASK(x)	(1 << (x))
+#define SPI_DMA_ABORT_INT_MASK(x)	(1 << (16 + (x)))
 #define DMA_CH_CONTROL_LIE		BIT(3)
 #define DMA_CH_CONTROL_RIE		BIT(4)
 #define DMA_INTR_EN			(DMA_CH_CONTROL_RIE | DMA_CH_CONTROL_LIE)
@@ -132,10 +132,12 @@
 #define SPI_SUSPEND_CONFIG 0x101
 #define SPI_RESUME_CONFIG 0x203
 
+#define NUM_VEC_PER_INST 3
+
 struct pci1xxxx_spi_internal {
 	u8 hw_inst;
 	u8 clkdiv;
-	int irq;
+	int irq[NUM_VEC_PER_INST];
 	int mode;
 	bool spi_xfer_in_progress;
 	void *rx_buf;
@@ -193,6 +195,9 @@ static const struct pci_device_id pci1xxxx_spi_pci_id_table[] = {
 
 MODULE_DEVICE_TABLE(pci, pci1xxxx_spi_pci_id_table);
 
+static irqreturn_t pci1xxxx_spi_isr_dma_rd(int irq, void *dev);
+static irqreturn_t pci1xxxx_spi_isr_dma_wr(int irq, void *dev);
+
 static int pci1xxxx_set_sys_lock(struct pci1xxxx_spi *par)
 {
 	writel(SPI_SYSLOCK, par->reg_base + SPI_SYSLOCK_REG);
@@ -213,13 +218,16 @@ static void pci1xxxx_release_sys_lock(struct pci1xxxx_spi *par)
 	writel(0x0, par->reg_base + SPI_SYSLOCK_REG);
 }
 
-static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int irq)
+static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int hw_inst, int num_vector)
 {
 	struct pci_dev *pdev = spi_bus->dev;
 	u32 pf_num;
 	u32 regval;
 	int ret;
 
+	if (num_vector != hw_inst * NUM_VEC_PER_INST)
+		return -EOPNOTSUPP;
+
 	/*
 	 * DEV REV Registers is a system register, HW Syslock bit
 	 * should be acquired before accessing the register
@@ -247,16 +255,6 @@ static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int irq)
 	if (spi_bus->dev_rev < 0xC0 || pf_num)
 		return -EOPNOTSUPP;
 
-	/*
-	 * DMA Supported only with MSI Interrupts
-	 * One of the SPI instance's MSI vector address and data
-	 * is used for DMA Interrupt
-	 */
-	if (!irq_get_msi_desc(irq)) {
-		dev_warn(&pdev->dev, "Error MSI Interrupt not supported, will operate in PIO mode\n");
-		return -EOPNOTSUPP;
-	}
-
 	spi_bus->dma_offset_bar = pcim_iomap(pdev, 2, pci_resource_len(pdev, 2));
 	if (!spi_bus->dma_offset_bar) {
 		dev_warn(&pdev->dev, "Error failed to map dma bar, will operate in PIO mode\n");
@@ -273,29 +271,90 @@ static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int irq)
 	return 0;
 }
 
-static int pci1xxxx_spi_dma_init(struct pci1xxxx_spi *spi_bus, int irq)
+static void pci1xxxx_spi_dma_config(struct pci1xxxx_spi *spi_bus)
 {
+	struct pci1xxxx_spi_internal *spi_sub_ptr;
+	u8 iter, irq_index;
 	struct msi_msg msi;
+	u32 regval;
+	u16 data;
+
+	irq_index = spi_bus->total_hw_instances;
+	for (iter = 0; iter < spi_bus->total_hw_instances; iter++) {
+		spi_sub_ptr = spi_bus->spi_int[iter];
+		get_cached_msi_msg(spi_sub_ptr->irq[1], &msi);
+		if (iter == 0) {
+			writel(msi.address_hi, spi_bus->dma_offset_bar +
+			       SPI_DMA_INTR_IMWR_WDONE_HIGH);
+			writel(msi.address_hi, spi_bus->dma_offset_bar +
+			       SPI_DMA_INTR_IMWR_WABORT_HIGH);
+			writel(msi.address_hi, spi_bus->dma_offset_bar +
+			       SPI_DMA_INTR_IMWR_RDONE_HIGH);
+			writel(msi.address_hi, spi_bus->dma_offset_bar +
+			       SPI_DMA_INTR_IMWR_RABORT_HIGH);
+			writel(msi.address_lo, spi_bus->dma_offset_bar +
+			       SPI_DMA_INTR_IMWR_WDONE_LOW);
+			writel(msi.address_lo, spi_bus->dma_offset_bar +
+			       SPI_DMA_INTR_IMWR_WABORT_LOW);
+			writel(msi.address_lo, spi_bus->dma_offset_bar +
+			       SPI_DMA_INTR_IMWR_RDONE_LOW);
+			writel(msi.address_lo, spi_bus->dma_offset_bar +
+			       SPI_DMA_INTR_IMWR_RABORT_LOW);
+			writel(0, spi_bus->dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA);
+			writel(0, spi_bus->dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA);
+		}
+		regval = readl(spi_bus->dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA);
+		data = msi.data + irq_index;
+		writel((regval | (data << (iter * 16))), spi_bus->dma_offset_bar +
+		       SPI_DMA_INTR_WR_IMWR_DATA);
+		regval = readl(spi_bus->dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA);
+		irq_index++;
+
+		data = msi.data + irq_index;
+		regval = readl(spi_bus->dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA);
+		writel(regval | (data << (iter * 16)), spi_bus->dma_offset_bar +
+		       SPI_DMA_INTR_RD_IMWR_DATA);
+		regval = readl(spi_bus->dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA);
+		irq_index++;
+	}
+}
+
+static int pci1xxxx_spi_dma_init(struct pci1xxxx_spi *spi_bus, int hw_inst, int num_vector)
+{
+	struct pci1xxxx_spi_internal *spi_sub_ptr;
+	u8 iter, irq_index;
 	int ret;
 
-	ret = pci1xxxx_check_spi_can_dma(spi_bus, irq);
+	irq_index = hw_inst;
+	ret = pci1xxxx_check_spi_can_dma(spi_bus, hw_inst, num_vector);
 	if (ret)
 		return ret;
 
 	spin_lock_init(&spi_bus->dma_reg_lock);
-	get_cached_msi_msg(irq, &msi);
 	writel(SPI_DMA_ENGINE_EN, spi_bus->dma_offset_bar + SPI_DMA_GLOBAL_WR_ENGINE_EN);
 	writel(SPI_DMA_ENGINE_EN, spi_bus->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN);
-	writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WDONE_HIGH);
-	writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WABORT_HIGH);
-	writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RDONE_HIGH);
-	writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RABORT_HIGH);
-	writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WDONE_LOW);
-	writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WABORT_LOW);
-	writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RDONE_LOW);
-	writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RABORT_LOW);
-	writel(msi.data, spi_bus->dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA);
-	writel(msi.data, spi_bus->dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA);
+
+	for (iter = 0; iter < hw_inst; iter++) {
+		spi_sub_ptr = spi_bus->spi_int[iter];
+		spi_sub_ptr->irq[1] = pci_irq_vector(spi_bus->dev, irq_index);
+		ret = devm_request_irq(&spi_bus->dev->dev, spi_sub_ptr->irq[1],
+				       pci1xxxx_spi_isr_dma_wr, PCI1XXXX_IRQ_FLAGS,
+				       pci_name(spi_bus->dev), spi_sub_ptr);
+		if (ret < 0)
+			return ret;
+
+		irq_index++;
+
+		spi_sub_ptr->irq[2] = pci_irq_vector(spi_bus->dev, irq_index);
+		ret = devm_request_irq(&spi_bus->dev->dev, spi_sub_ptr->irq[2],
+				       pci1xxxx_spi_isr_dma_rd, PCI1XXXX_IRQ_FLAGS,
+				       pci_name(spi_bus->dev), spi_sub_ptr);
+		if (ret < 0)
+			return ret;
+
+		irq_index++;
+	}
+	pci1xxxx_spi_dma_config(spi_bus);
 	dma_set_max_seg_size(&spi_bus->dev->dev, PCI1XXXX_SPI_BUFFER_SIZE);
 	spi_bus->can_dma = true;
 	return 0;
@@ -401,13 +460,13 @@ static void pci1xxxx_spi_setup(struct pci1xxxx_spi *par, u8 hw_inst, u32 mode,
 	writel(regval, par->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
 }
 
-static void pci1xxxx_start_spi_xfer(struct pci1xxxx_spi_internal *p, u8 hw_inst)
+static void pci1xxxx_start_spi_xfer(struct pci1xxxx_spi_internal *p)
 {
 	u32 regval;
 
-	regval = readl(p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
+	regval = readl(p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
 	regval |= SPI_MST_CTL_GO;
-	writel(regval, p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
+	writel(regval, p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
 }
 
 static int pci1xxxx_spi_transfer_with_io(struct spi_controller *spi_ctlr,
@@ -451,7 +510,7 @@ static int pci1xxxx_spi_transfer_with_io(struct spi_controller *spi_ctlr,
 				    &tx_buf[bytes_transfered], len);
 			bytes_transfered += len;
 			pci1xxxx_spi_setup(par, p->hw_inst, spi->mode, clkdiv, len);
-			pci1xxxx_start_spi_xfer(p, p->hw_inst);
+			pci1xxxx_start_spi_xfer(p);
 
 			/* Wait for DMA_TERM interrupt */
 			result = wait_for_completion_timeout(&p->spi_xfer_done,
@@ -627,7 +686,7 @@ static void pci1xxxx_spi_setup_next_dma_transfer(struct pci1xxxx_spi_internal *p
 	}
 }
 
-static irqreturn_t pci1xxxx_spi_isr_dma(int irq, void *dev)
+static irqreturn_t pci1xxxx_spi_isr_dma_rd(int irq, void *dev)
 {
 	struct pci1xxxx_spi_internal *p = dev;
 	irqreturn_t spi_int_fired = IRQ_NONE;
@@ -637,36 +696,53 @@ static irqreturn_t pci1xxxx_spi_isr_dma(int irq, void *dev)
 	spin_lock_irqsave(&p->parent->dma_reg_lock, flags);
 	/* Clear the DMA RD INT and start spi xfer*/
 	regval = readl(p->parent->dma_offset_bar + SPI_DMA_INTR_RD_STS);
-	if (regval & SPI_DMA_DONE_INT_MASK) {
-		if (regval & SPI_DMA_CH0_DONE_INT)
-			pci1xxxx_start_spi_xfer(p, SPI0);
-		if (regval & SPI_DMA_CH1_DONE_INT)
-			pci1xxxx_start_spi_xfer(p, SPI1);
-		spi_int_fired = IRQ_HANDLED;
-	}
-	if (regval & SPI_DMA_ABORT_INT_MASK) {
-		p->dma_aborted_rd = true;
-		spi_int_fired = IRQ_HANDLED;
+	if (regval) {
+		if (regval & SPI_DMA_DONE_INT_MASK(p->hw_inst)) {
+			pci1xxxx_start_spi_xfer(p);
+			spi_int_fired = IRQ_HANDLED;
+		}
+		if (regval & SPI_DMA_ABORT_INT_MASK(p->hw_inst)) {
+			p->dma_aborted_rd = true;
+			spi_int_fired = IRQ_HANDLED;
+		}
 	}
-	writel(regval, p->parent->dma_offset_bar + SPI_DMA_INTR_RD_CLR);
+	writel((SPI_DMA_DONE_INT_MASK(p->hw_inst) | SPI_DMA_ABORT_INT_MASK(p->hw_inst)),
+	       p->parent->dma_offset_bar + SPI_DMA_INTR_RD_CLR);
+	spin_unlock_irqrestore(&p->parent->dma_reg_lock, flags);
+	return spi_int_fired;
+}
 
+static irqreturn_t pci1xxxx_spi_isr_dma_wr(int irq, void *dev)
+{
+	struct pci1xxxx_spi_internal *p = dev;
+	irqreturn_t spi_int_fired = IRQ_NONE;
+	unsigned long flags;
+	u32 regval;
+
+	spin_lock_irqsave(&p->parent->dma_reg_lock, flags);
 	/* Clear the DMA WR INT */
 	regval = readl(p->parent->dma_offset_bar + SPI_DMA_INTR_WR_STS);
-	if (regval & SPI_DMA_DONE_INT_MASK) {
-		if (regval & SPI_DMA_CH0_DONE_INT)
-			pci1xxxx_spi_setup_next_dma_transfer(p->parent->spi_int[SPI0]);
-
-		if (regval & SPI_DMA_CH1_DONE_INT)
-			pci1xxxx_spi_setup_next_dma_transfer(p->parent->spi_int[SPI1]);
-
-		spi_int_fired = IRQ_HANDLED;
-	}
-	if (regval & SPI_DMA_ABORT_INT_MASK) {
-		p->dma_aborted_wr = true;
-		spi_int_fired = IRQ_HANDLED;
+	if (regval) {
+		if (regval & SPI_DMA_DONE_INT_MASK(p->hw_inst)) {
+			pci1xxxx_spi_setup_next_dma_transfer(p);
+			spi_int_fired = IRQ_HANDLED;
+		}
+		if (regval & SPI_DMA_ABORT_INT_MASK(p->hw_inst)) {
+			p->dma_aborted_wr = true;
+			spi_int_fired = IRQ_HANDLED;
+		}
 	}
-	writel(regval, p->parent->dma_offset_bar + SPI_DMA_INTR_WR_CLR);
+	writel((SPI_DMA_DONE_INT_MASK(p->hw_inst) | SPI_DMA_ABORT_INT_MASK(p->hw_inst)),
+	       p->parent->dma_offset_bar + SPI_DMA_INTR_WR_CLR);
 	spin_unlock_irqrestore(&p->parent->dma_reg_lock, flags);
+	return spi_int_fired;
+}
+
+static irqreturn_t pci1xxxx_spi_isr_dma(int irq, void *dev)
+{
+	struct pci1xxxx_spi_internal *p = dev;
+	irqreturn_t spi_int_fired = IRQ_NONE;
+	u32 regval;
 
 	/* Clear the SPI GO_BIT Interrupt */
 	regval = readl(p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
@@ -764,7 +840,7 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
 			if (!spi_bus->reg_base)
 				return -EINVAL;
 
-			num_vector = pci_alloc_irq_vectors(pdev, 1, hw_inst_cnt,
+			num_vector = pci_alloc_irq_vectors(pdev, 1, hw_inst_cnt * NUM_VEC_PER_INST,
 							   PCI_IRQ_INTX | PCI_IRQ_MSI);
 			if (num_vector < 0) {
 				dev_err(&pdev->dev, "Error allocating MSI vectors\n");
@@ -778,27 +854,23 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
 			regval &= ~SPI_INTR;
 			writel(regval, spi_bus->reg_base +
 			       SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
-			spi_sub_ptr->irq = pci_irq_vector(pdev, 0);
+			spi_sub_ptr->irq[0] = pci_irq_vector(pdev, 0);
 
 			if (num_vector >= hw_inst_cnt)
-				ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
+				ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq[0],
 						       pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
 						       pci_name(pdev), spi_sub_ptr);
 			else
-				ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
+				ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq[0],
 						       pci1xxxx_spi_shared_isr,
 						       PCI1XXXX_IRQ_FLAGS | IRQF_SHARED,
 						       pci_name(pdev), spi_bus);
 			if (ret < 0) {
 				dev_err(&pdev->dev, "Unable to request irq : %d",
-					spi_sub_ptr->irq);
+					spi_sub_ptr->irq[0]);
 				return -ENODEV;
 			}
 
-			ret = pci1xxxx_spi_dma_init(spi_bus, spi_sub_ptr->irq);
-			if (ret && ret != -EOPNOTSUPP)
-				return ret;
-
 			/* This register is only applicable for 1st instance */
 			regval = readl(spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
 			if (!only_sec_inst)
@@ -820,13 +892,13 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
 			writel(regval, spi_bus->reg_base +
 			       SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
 			if (num_vector >= hw_inst_cnt) {
-				spi_sub_ptr->irq = pci_irq_vector(pdev, iter);
-				ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
+				spi_sub_ptr->irq[0] = pci_irq_vector(pdev, iter);
+				ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq[0],
 						       pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
 						       pci_name(pdev), spi_sub_ptr);
 				if (ret < 0) {
 					dev_err(&pdev->dev, "Unable to request irq : %d",
-						spi_sub_ptr->irq);
+						spi_sub_ptr->irq[0]);
 					return -ENODEV;
 				}
 			}
@@ -849,6 +921,10 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
 		if (ret)
 			return ret;
 	}
+	ret = pci1xxxx_spi_dma_init(spi_bus, hw_inst_cnt, num_vector);
+	if (ret && ret != -EOPNOTSUPP)
+		return ret;
+
 	pci_set_drvdata(pdev, spi_bus);
 
 	return 0;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH v2 for-next] spi: spi-pci1xxxx: Add support for per-instance DMA interrupt vectors
  2025-06-24  3:30 [PATCH v2 for-next] spi: spi-pci1xxxx: Add support for per-instance DMA interrupt vectors Thangaraj Samynathan
@ 2025-06-24 17:33 ` Mark Brown
  0 siblings, 0 replies; 2+ messages in thread
From: Mark Brown @ 2025-06-24 17:33 UTC (permalink / raw)
  To: linux-spi, linux-kernel, Thangaraj Samynathan

On Tue, 24 Jun 2025 09:00:28 +0530, Thangaraj Samynathan wrote:
> Add support for dedicated DMA interrupt vectors for each SPI hardware
> instance in the pci1xxxx driver. This improves scalability and interrupt
> handling for systems using multiple SPI instances with DMA.
> 
> Introduce a constant `NUM_VEC_PER_INST` to define the number of IRQ
> vectors per instance (main, DMA write, DMA read). Update the
> `pci1xxxx_spi_internal` structure to use an IRQ array.
> 
> [...]

Applied to

   https://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git for-next

Thanks!

[1/1] spi: spi-pci1xxxx: Add support for per-instance DMA interrupt vectors
      commit: 3e36c822506d924894ff7de549b9377d3114c2d7

All being well this means that it will be integrated into the linux-next
tree (usually sometime in the next 24 hours) and sent to Linus during
the next merge window (or sooner if it is a bug fix), however if
problems are discovered then the patch may be dropped or reverted.

You may get further e-mails resulting from automated or manual testing
and review of the tree, please engage with people reporting problems and
send followup patches addressing any issues that are reported if needed.

If any updates are required or you are submitting further changes they
should be sent as incremental updates against current git, existing
patches will not be replaced.

Please add any relevant lists and maintainers to the CCs when replying
to this mail.

Thanks,
Mark


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2025-06-24 17:33 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-06-24  3:30 [PATCH v2 for-next] spi: spi-pci1xxxx: Add support for per-instance DMA interrupt vectors Thangaraj Samynathan
2025-06-24 17:33 ` Mark Brown

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).