public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Jonathan Cameron <jonathan.cameron@huawei.com>
To: Nathan Lynch via B4 Relay <devnull+nathan.lynch.amd.com@kernel.org>
Cc: <nathan.lynch@amd.com>, Vinod Koul <vkoul@kernel.org>,
	Wei Huang <wei.huang2@amd.com>,
	Mario Limonciello <mario.limonciello@amd.com>,
	"Bjorn Helgaas" <bhelgaas@google.com>,
	<linux-pci@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
	<dmaengine@vger.kernel.org>
Subject: Re: [PATCH RFC 11/13] dmaengine: sdxi: Add DMA engine provider
Date: Mon, 15 Sep 2025 16:16:40 +0100	[thread overview]
Message-ID: <20250915161640.00004630@huawei.com> (raw)
In-Reply-To: <20250905-sdxi-base-v1-11-d0341a1292ba@amd.com>

On Fri, 05 Sep 2025 13:48:34 -0500
Nathan Lynch via B4 Relay <devnull+nathan.lynch.amd.com@kernel.org> wrote:

> From: Nathan Lynch <nathan.lynch@amd.com>
> 
> Add support for memcpy and interrupt capabilities. Register one
> channel per SDXI function discovered for now.
> 
> Co-developed-by: Wei Huang <wei.huang2@amd.com>
> Signed-off-by: Wei Huang <wei.huang2@amd.com>
> Signed-off-by: Nathan Lynch <nathan.lynch@amd.com>
A few superficial comments inline.

Good to see support for this standard device btw.

Thanks,

Jonathan

> ---
>  drivers/dma/sdxi/device.c |   4 +
>  drivers/dma/sdxi/dma.c    | 409 ++++++++++++++++++++++++++++++++++++++++++++++
>  drivers/dma/sdxi/dma.h    |  12 ++
>  3 files changed, 425 insertions(+)

> diff --git a/drivers/dma/sdxi/dma.c b/drivers/dma/sdxi/dma.c
> new file mode 100644
> index 0000000000000000000000000000000000000000..ad8515deba53898b2b4ea0d38c40042b566abe1f
> --- /dev/null
> +++ b/drivers/dma/sdxi/dma.c

> +static int sdxi_dma_start_desc(struct sdxi_dma_desc *dma_desc)
> +{
> +	struct sdxi_dev *sdxi;
> +	struct sdxi_cmd *sdxi_cmd;
> +	struct sdxi_cxt *cxt;
> +	struct sdxi_desc desc;
> +	struct sdxi_copy copy;
> +	struct sdxi_cst_blk *cst_blk;
> +	dma_addr_t cst_blk_dma;
> +	int err;
> +
> +	sdxi_cmd = &dma_desc->sdxi_cmd;
> +	sdxi = sdxi_cmd->cxt->sdxi;
> +
> +	cxt = dma_desc->cxt;
Probably makes sense to combine with the declarations above.

> +
> +	if (sdxi_cmd->len > MAX_DMA_COPY_BYTES)
> +		return -EINVAL;
> +
> +	copy = (typeof(copy)) {
> +		.src = sdxi_cmd->src_addr,
> +		.dst = sdxi_cmd->dst_addr,
> +		.src_akey = 0,
> +		.dst_akey = 0,
> +		.len = sdxi_cmd->len,
> +	};
> +
> +	err = sdxi_encode_copy(&desc, &copy);
> +	if (err)
> +		return err;
> +
> +	err = sdxi_encode_copy(&desc, &copy);
> +	if (err)
> +		return err;
> +
> +	/* FIXME convert to pool */
> +	cst_blk = dma_alloc_coherent(sdxi_to_dev(sdxi), sizeof(*cst_blk),
> +				     &cst_blk_dma, GFP_NOWAIT);
> +	if (!cst_blk)
> +		return -ENOMEM;
> +
> +	cst_blk->signal = cpu_to_le64(0xff);
> +
> +	sdxi_cmd->cst_blk = cst_blk;
> +	sdxi_cmd->cst_blk_dma = cst_blk_dma;
> +	sdxi_cmd->ret = 0; /* TODO: get desc submit status & update ret value */
> +
> +	sdxi_desc_set_csb(&desc, cst_blk_dma);
> +	err = sdxi_submit_desc(cxt, &desc);
> +	if (err)
> +		goto free_cst_blk;
> +
> +	sdxi->tdata.cmd = sdxi_cmd; /* FIXME: this is not compatible w/multiple clients */
> +	dma_desc->issued_to_hw = 1;
> +	return 0;
> +free_cst_blk:
> +	dma_free_coherent(sdxi_to_dev(sdxi), sizeof(*cst_blk),
> +			  cst_blk, cst_blk_dma);
> +	return err;
> +}

> +static struct sdxi_dma_desc *sdxi_handle_active_desc(struct sdxi_dma_chan *chan,
> +						     struct sdxi_dma_desc *desc)
> +{
> +	struct dma_async_tx_descriptor *tx_desc;
> +	struct virt_dma_desc *vd;
> +	unsigned long flags;
> +
> +	/* Loop over descriptors until one is found with commands */
> +	do {
> +		if (desc) {
> +			if (!desc->issued_to_hw) {
> +				/* No errors, keep going */
> +				if (desc->status != DMA_ERROR)
> +					return desc;
> +			}
> +
> +			tx_desc = &desc->vd.tx;
> +			vd = &desc->vd;
> +		} else {
> +			tx_desc = NULL;
> +		}
> +
> +		spin_lock_irqsave(&chan->vc.lock, flags);
> +
> +		if (desc) {
> +

No blank line here.

> +			if (desc->status != DMA_COMPLETE) {
> +				if (desc->status != DMA_ERROR)
> +					desc->status = DMA_COMPLETE;
> +
> +				dma_cookie_complete(tx_desc);
> +				dma_descriptor_unmap(tx_desc);
> +				list_del(&desc->vd.node);
> +			} else {
> +				/* Don't handle it twice */
> +				tx_desc = NULL;
> +			}
> +		}
> +
> +		desc = sdxi_next_dma_desc(chan);
> +
> +		spin_unlock_irqrestore(&chan->vc.lock, flags);
> +
> +		if (tx_desc) {
> +			dmaengine_desc_get_callback_invoke(tx_desc, NULL);
> +			dma_run_dependencies(tx_desc);
> +			vchan_vdesc_fini(vd);
> +		}
> +	} while (desc);
> +
> +	return NULL;
> +}
> +
> +static void sdxi_cmd_callback(void *data, int err)
> +{
> +	struct sdxi_dma_desc *desc = data;
> +	struct dma_chan *dma_chan;
> +	struct sdxi_dma_chan *chan;
> +	int ret;
> +
> +	if (err == -EINPROGRESS)
> +		return;
> +
> +	dma_chan = desc->vd.tx.chan;
> +	chan = to_sdxi_dma_chan(dma_chan);
> +
> +	if (err)
> +		desc->status = DMA_ERROR;
> +
> +	while (true) {
> +		/* Check for DMA descriptor completion */
> +		desc = sdxi_handle_active_desc(chan, desc);
> +
> +		/* Don't submit cmd if no descriptor or DMA is paused */
> +		if (!desc)
perhaps return?
> +			break;
> +
> +		ret = sdxi_dma_start_desc(desc);
> +		if (!ret)
> +			break;
Perhaps return to make it clear that there is nothing else to do.
> +
> +		desc->status = DMA_ERROR;
> +	}
> +}
> +

> +
> +static struct sdxi_dma_desc *sdxi_dma_create_desc(struct dma_chan *dma_chan,
> +						  dma_addr_t dst,
> +						  dma_addr_t src,
> +						  unsigned int len,
> +						  unsigned long flags)
> +{
> +	struct sdxi_dma_chan *chan = to_sdxi_dma_chan(dma_chan);
> +	struct sdxi_dma_desc *desc;
> +	struct sdxi_cmd *sdxi_cmd;
> +
> +	desc = sdxi_dma_alloc_dma_desc(chan, flags);
> +	if (!desc)
> +		return NULL;
> +
> +	sdxi_cmd = &desc->sdxi_cmd;
Maybe
	*sdxi_cmd = (struct sdxi_cmd) {
		.ctx = chan->ctx,
etc

	};
> +	sdxi_cmd->cxt = chan->cxt;
> +	sdxi_cmd->cxt->sdxi = chan->cxt->sdxi;
> +	sdxi_cmd->src_addr = src;
> +	sdxi_cmd->dst_addr = dst;
> +	sdxi_cmd->len = len;
> +	sdxi_cmd->sdxi_cmd_callback = sdxi_cmd_callback;
> +	sdxi_cmd->data = desc;
> +
> +	return desc;
> +}

> +
> +static void sdxi_check_trans_status(struct sdxi_dma_chan *chan)
> +{
> +	struct sdxi_cxt *cxt = chan->cxt;
> +	struct sdxi_cmd *cmd;
> +
> +	if (!cxt)
> +		return;
> +
> +	cmd = cxt->sdxi->tdata.cmd;
> +
> +	if (le64_to_cpu(cmd->cst_blk->signal) == 0xfe)

Given that's a magic looking value, I think this 0xfe needs a define.

> +		sdxi_cmd_callback(cmd->data, cmd->ret);
> +}

> +
> +int sdxi_dma_register(struct sdxi_cxt *dma_cxt)
> +{
> +	struct sdxi_dma_chan *chan;
> +	struct sdxi_dev *sdxi = dma_cxt->sdxi;
> +	struct device *dev = sdxi_to_dev(sdxi);
> +	struct dma_device *dma_dev = &sdxi->dma_dev;
> +	int ret = 0;
> +
> +	sdxi->sdxi_dma_chan = devm_kzalloc(dev, sizeof(*sdxi->sdxi_dma_chan),
> +					   GFP_KERNEL);
This results in a mix of manual cleanup and devm.  That's generally something
we want to avoid because it makes code hard to review for race conditions etc.
I'd consider using custom actions and devm_add_action_or_reset() to ensure
that everything up to the first thing you want to not manage is done with
devm and ensure everything after that is done by hand.

Or use devm for everything.

> +	if (!sdxi->sdxi_dma_chan)
> +		return -ENOMEM;
> +
> +	sdxi->sdxi_dma_chan->cxt = dma_cxt;
> +
> +	dma_dev->dev = dev;
> +	dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
> +	dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
> +	dma_dev->directions = BIT(DMA_MEM_TO_MEM);
> +	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
> +	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
> +	dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
> +
> +	dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
> +
> +	INIT_LIST_HEAD(&dma_dev->channels);
> +
> +	chan = sdxi->sdxi_dma_chan;
> +	chan->cxt->sdxi = sdxi;
> +
> +	/* Set base and prep routines */
> +	dma_dev->device_free_chan_resources = sdxi_dma_free_chan_resources;
> +	dma_dev->device_prep_dma_memcpy = sdxi_dma_prep_memcpy;
> +	dma_dev->device_prep_dma_interrupt = sdxi_prep_dma_interrupt;
> +	dma_dev->device_issue_pending = sdxi_dma_issue_pending;
> +	dma_dev->device_tx_status = sdxi_tx_status;
> +	dma_dev->device_terminate_all = sdxi_dma_terminate_all;
> +	dma_dev->device_synchronize = sdxi_dma_synchronize;
> +
> +	chan->vc.desc_free = sdxi_do_cleanup;
> +	vchan_init(&chan->vc, dma_dev);
> +
> +	dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
> +
> +	ret = dma_async_device_register(dma_dev);
> +	if (ret)
> +		goto err_reg;
> +
> +	return 0;
> +
> +err_reg:

Just return early unless there is something to do.

> +	return ret;
> +}
> +
> +void sdxi_dma_unregister(struct sdxi_cxt *dma_cxt)
> +{
> +	dma_async_device_unregister(&dma_cxt->sdxi->dma_dev);
> +}



  reply	other threads:[~2025-09-15 15:16 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-09-05 18:48 [PATCH RFC 00/13] dmaengine: Smart Data Accelerator Interface (SDXI) basic support Nathan Lynch via B4 Relay
2025-09-05 18:48 ` [PATCH RFC 01/13] PCI: Add SNIA SDXI accelerator sub-class Nathan Lynch via B4 Relay
2025-09-15 17:25   ` Bjorn Helgaas
2025-09-15 20:17     ` Nathan Lynch
2025-09-05 18:48 ` [PATCH RFC 02/13] dmaengine: sdxi: Add control structure definitions Nathan Lynch via B4 Relay
2025-09-05 18:48 ` [PATCH RFC 03/13] dmaengine: sdxi: Add descriptor encoding and unit tests Nathan Lynch via B4 Relay
2025-09-15 11:52   ` Jonathan Cameron
2025-09-15 19:30     ` Nathan Lynch
2025-09-16 14:20       ` Jonathan Cameron
2025-09-16 19:06         ` Nathan Lynch
2025-09-05 18:48 ` [PATCH RFC 04/13] dmaengine: sdxi: Add MMIO register definitions Nathan Lynch via B4 Relay
2025-09-05 18:48 ` [PATCH RFC 05/13] dmaengine: sdxi: Add software data structures Nathan Lynch via B4 Relay
2025-09-15 11:59   ` Jonathan Cameron
2025-09-16 19:07     ` Nathan Lynch
2025-09-16  9:38   ` Markus Elfring
2025-09-05 18:48 ` [PATCH RFC 06/13] dmaengine: sdxi: Add error reporting support Nathan Lynch via B4 Relay
2025-09-15 12:11   ` Jonathan Cameron
2025-09-15 20:42     ` Nathan Lynch
2025-09-16 14:23       ` Jonathan Cameron
2025-09-05 18:48 ` [PATCH RFC 07/13] dmaengine: sdxi: Import descriptor enqueue code from spec Nathan Lynch via B4 Relay
2025-09-15 12:18   ` Jonathan Cameron
2025-09-16 17:05   ` [External] : " ALOK TIWARI
2025-09-05 18:48 ` [PATCH RFC 08/13] dmaengine: sdxi: Context creation/removal, descriptor submission Nathan Lynch via B4 Relay
2025-09-15 14:12   ` Jonathan Cameron
2025-09-16 20:40     ` Nathan Lynch
2025-09-17 13:34       ` Jonathan Cameron
2025-09-15 19:42   ` Markus Elfring
2025-09-05 18:48 ` [PATCH RFC 09/13] dmaengine: sdxi: Add core device management code Nathan Lynch via B4 Relay
2025-09-15 14:23   ` Jonathan Cameron
2025-09-16 21:23     ` Nathan Lynch
2025-09-05 18:48 ` [PATCH RFC 10/13] dmaengine: sdxi: Add PCI driver support Nathan Lynch via B4 Relay
2025-09-05 19:14   ` Mario Limonciello
2025-09-10 15:25     ` Nathan Lynch
2025-09-05 20:05   ` Bjorn Helgaas
2025-09-10 15:28     ` Nathan Lynch
2025-09-15 15:03   ` Jonathan Cameron
2025-09-16 16:43   ` [External] : " ALOK TIWARI
2025-09-05 18:48 ` [PATCH RFC 11/13] dmaengine: sdxi: Add DMA engine provider Nathan Lynch via B4 Relay
2025-09-15 15:16   ` Jonathan Cameron [this message]
2025-09-05 18:48 ` [PATCH RFC 12/13] dmaengine: sdxi: Add Kconfig and Makefile Nathan Lynch via B4 Relay
2025-09-15 15:08   ` Jonathan Cameron
2025-09-15 16:44     ` Nathan Lynch
2025-09-05 18:48 ` [PATCH RFC 13/13] MAINTAINERS: Add entry for SDXI driver Nathan Lynch via B4 Relay

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250915161640.00004630@huawei.com \
    --to=jonathan.cameron@huawei.com \
    --cc=bhelgaas@google.com \
    --cc=devnull+nathan.lynch.amd.com@kernel.org \
    --cc=dmaengine@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=mario.limonciello@amd.com \
    --cc=nathan.lynch@amd.com \
    --cc=vkoul@kernel.org \
    --cc=wei.huang2@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox