From: Konrad Dybcio <konrad.dybcio@linaro.org>
To: Vijaya Krishna Nivarthi <quic_vnivarth@quicinc.com>,
agross@kernel.org, andersson@kernel.org, broonie@kernel.org,
dianders@chromium.org, linux-arm-msm@vger.kernel.org,
linux-spi@vger.kernel.org, linux-kernel@vger.kernel.org
Cc: quic_msavaliy@quicinc.com, mka@chromium.org, swboyd@chromium.org,
quic_vtanuku@quicinc.com, quic_ptalari@quicinc.com
Subject: Re: [PATCH v2 2/2] spi: spi-geni-qcom: Do not do DMA map/unmap inside driver, use framework instead
Date: Tue, 6 Jun 2023 18:56:26 +0200 [thread overview]
Message-ID: <9a1cf84c-e48c-f41d-2e2f-5c106fc1fed4@linaro.org> (raw)
In-Reply-To: <1684325894-30252-3-git-send-email-quic_vnivarth@quicinc.com>
On 17.05.2023 14:18, Vijaya Krishna Nivarthi wrote:
> The spi geni driver in SE DMA mode, unlike GSI DMA, is not making use of
> DMA mapping functionality available in the framework.
> The driver does mapping internally which makes dma buffer fields available
> in spi_transfer struct superfluous while requiring additional members in
> spi_geni_master struct.
>
> Conform to the design by having framework handle map/unmap and do only
> DMA transfer in the driver; this also simplifies code a bit.
>
> Fixes: e5f0dfa78ac7 ("spi: spi-geni-qcom: Add support for SE DMA mode")
> Suggested-by: Douglas Anderson <dianders@chromium.org>
> Signed-off-by: Vijaya Krishna Nivarthi <quic_vnivarth@quicinc.com>
> ---
I don't really have a good insight in this code, but these changes
seem sane.
Acked-by: Konrad Dybcio <konrad.dybcio@linaro.org>
Konrad
> v1 -> v2:
> - pass dma address to new geni interfaces instead of pointer
> - set max_dma_len as per HPG
> - remove expendable local variables
> ---
> drivers/spi/spi-geni-qcom.c | 103 +++++++++++++++++++++-----------------------
> 1 file changed, 50 insertions(+), 53 deletions(-)
>
> diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
> index e423efc..206cc04 100644
> --- a/drivers/spi/spi-geni-qcom.c
> +++ b/drivers/spi/spi-geni-qcom.c
> @@ -97,8 +97,6 @@ struct spi_geni_master {
> struct dma_chan *tx;
> struct dma_chan *rx;
> int cur_xfer_mode;
> - dma_addr_t tx_se_dma;
> - dma_addr_t rx_se_dma;
> };
>
> static int get_spi_clk_cfg(unsigned int speed_hz,
> @@ -174,7 +172,7 @@ static void handle_se_timeout(struct spi_master *spi,
> unmap_if_dma:
> if (mas->cur_xfer_mode == GENI_SE_DMA) {
> if (xfer) {
> - if (xfer->tx_buf && mas->tx_se_dma) {
> + if (xfer->tx_buf) {
> spin_lock_irq(&mas->lock);
> reinit_completion(&mas->tx_reset_done);
> writel(1, se->base + SE_DMA_TX_FSM_RST);
> @@ -182,9 +180,8 @@ static void handle_se_timeout(struct spi_master *spi,
> time_left = wait_for_completion_timeout(&mas->tx_reset_done, HZ);
> if (!time_left)
> dev_err(mas->dev, "DMA TX RESET failed\n");
> - geni_se_tx_dma_unprep(se, mas->tx_se_dma, xfer->len);
> }
> - if (xfer->rx_buf && mas->rx_se_dma) {
> + if (xfer->rx_buf) {
> spin_lock_irq(&mas->lock);
> reinit_completion(&mas->rx_reset_done);
> writel(1, se->base + SE_DMA_RX_FSM_RST);
> @@ -192,7 +189,6 @@ static void handle_se_timeout(struct spi_master *spi,
> time_left = wait_for_completion_timeout(&mas->rx_reset_done, HZ);
> if (!time_left)
> dev_err(mas->dev, "DMA RX RESET failed\n");
> - geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
> }
> } else {
> /*
> @@ -523,17 +519,36 @@ static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas
> return 1;
> }
>
> +static u32 get_xfer_len_in_words(struct spi_transfer *xfer,
> + struct spi_geni_master *mas)
> +{
> + u32 len;
> +
> + if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
> + len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
> + else
> + len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
> + len &= TRANS_LEN_MSK;
> +
> + return len;
> +}
> +
> static bool geni_can_dma(struct spi_controller *ctlr,
> struct spi_device *slv, struct spi_transfer *xfer)
> {
> struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
> + u32 len, fifo_size;
>
> - /*
> - * Return true if transfer needs to be mapped prior to
> - * calling transfer_one which is the case only for GPI_DMA.
> - * For SE_DMA mode, map/unmap is done in geni_se_*x_dma_prep.
> - */
> - return mas->cur_xfer_mode == GENI_GPI_DMA;
> + if (mas->cur_xfer_mode == GENI_GPI_DMA)
> + return true;
> +
> + len = get_xfer_len_in_words(xfer, mas);
> + fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word;
> +
> + if (len > fifo_size)
> + return true;
> + else
> + return false;
> }
>
> static int spi_geni_prepare_message(struct spi_master *spi,
> @@ -772,7 +787,7 @@ static int setup_se_xfer(struct spi_transfer *xfer,
> u16 mode, struct spi_master *spi)
> {
> u32 m_cmd = 0;
> - u32 len, fifo_size;
> + u32 len;
> struct geni_se *se = &mas->se;
> int ret;
>
> @@ -804,11 +819,7 @@ static int setup_se_xfer(struct spi_transfer *xfer,
> mas->tx_rem_bytes = 0;
> mas->rx_rem_bytes = 0;
>
> - if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
> - len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
> - else
> - len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
> - len &= TRANS_LEN_MSK;
> + len = get_xfer_len_in_words(xfer, mas);
>
> mas->cur_xfer = xfer;
> if (xfer->tx_buf) {
> @@ -823,9 +834,20 @@ static int setup_se_xfer(struct spi_transfer *xfer,
> mas->rx_rem_bytes = xfer->len;
> }
>
> - /* Select transfer mode based on transfer length */
> - fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word;
> - mas->cur_xfer_mode = (len <= fifo_size) ? GENI_SE_FIFO : GENI_SE_DMA;
> + /*
> + * Select DMA mode if sgt are present; and with only 1 entry
> + * This is not a serious limitation because the xfer buffers are
> + * expected to fit into in 1 entry almost always, and if any
> + * doesn't for any reason we fall back to FIFO mode anyway
> + */
> + if (!xfer->tx_sg.nents && !xfer->rx_sg.nents)
> + mas->cur_xfer_mode = GENI_SE_FIFO;
> + else if (xfer->tx_sg.nents > 1 || xfer->rx_sg.nents > 1) {
> + dev_warn_once(mas->dev, "Doing FIFO, cannot handle tx_nents-%d, rx_nents-%d\n",
> + xfer->tx_sg.nents, xfer->rx_sg.nents);
> + mas->cur_xfer_mode = GENI_SE_FIFO;
> + } else
> + mas->cur_xfer_mode = GENI_SE_DMA;
> geni_se_select_mode(se, mas->cur_xfer_mode);
>
> /*
> @@ -836,35 +858,17 @@ static int setup_se_xfer(struct spi_transfer *xfer,
> geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
>
> if (mas->cur_xfer_mode == GENI_SE_DMA) {
> - if (m_cmd & SPI_RX_ONLY) {
> - ret = geni_se_rx_dma_prep(se, xfer->rx_buf,
> - xfer->len, &mas->rx_se_dma);
> - if (ret) {
> - dev_err(mas->dev, "Failed to setup Rx dma %d\n", ret);
> - mas->rx_se_dma = 0;
> - goto unlock_and_return;
> - }
> - }
> - if (m_cmd & SPI_TX_ONLY) {
> - ret = geni_se_tx_dma_prep(se, (void *)xfer->tx_buf,
> - xfer->len, &mas->tx_se_dma);
> - if (ret) {
> - dev_err(mas->dev, "Failed to setup Tx dma %d\n", ret);
> - mas->tx_se_dma = 0;
> - if (m_cmd & SPI_RX_ONLY) {
> - /* Unmap rx buffer if duplex transfer */
> - geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
> - mas->rx_se_dma = 0;
> - }
> - goto unlock_and_return;
> - }
> - }
> + if (m_cmd & SPI_RX_ONLY)
> + geni_se_rx_init_dma(se, sg_dma_address(xfer->rx_sg.sgl),
> + sg_dma_len(xfer->rx_sg.sgl));
> + if (m_cmd & SPI_TX_ONLY)
> + geni_se_tx_init_dma(se, sg_dma_address(xfer->tx_sg.sgl),
> + sg_dma_len(xfer->tx_sg.sgl));
> } else if (m_cmd & SPI_TX_ONLY) {
> if (geni_spi_handle_tx(mas))
> writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
> }
>
> -unlock_and_return:
> spin_unlock_irq(&mas->lock);
> return ret;
> }
> @@ -965,14 +969,6 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
> if (dma_rx_status & RX_RESET_DONE)
> complete(&mas->rx_reset_done);
> if (!mas->tx_rem_bytes && !mas->rx_rem_bytes && xfer) {
> - if (xfer->tx_buf && mas->tx_se_dma) {
> - geni_se_tx_dma_unprep(se, mas->tx_se_dma, xfer->len);
> - mas->tx_se_dma = 0;
> - }
> - if (xfer->rx_buf && mas->rx_se_dma) {
> - geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
> - mas->rx_se_dma = 0;
> - }
> spi_finalize_current_transfer(spi);
> mas->cur_xfer = NULL;
> }
> @@ -1057,6 +1053,7 @@ static int spi_geni_probe(struct platform_device *pdev)
> spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
> spi->num_chipselect = 4;
> spi->max_speed_hz = 50000000;
> + spi->max_dma_len = 0xffff0; /* 24 bits for tx/rx dma length */
> spi->prepare_message = spi_geni_prepare_message;
> spi->transfer_one = spi_geni_transfer_one;
> spi->can_dma = geni_can_dma;
next prev parent reply other threads:[~2023-06-06 16:56 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-17 12:18 [PATCH v2 0/2] spi-geni-qcom: Add new interfaces and utilise them to do map/unmap in framework for SE DMA Vijaya Krishna Nivarthi
2023-05-17 12:18 ` [PATCH v2 1/2] soc: qcom: geni-se: Add interfaces geni_se_tx_init_dma() and geni_se_rx_init_dma() Vijaya Krishna Nivarthi
2023-05-17 14:18 ` Doug Anderson
2023-06-06 15:53 ` Mark Brown
2023-06-06 16:53 ` Konrad Dybcio
2023-05-17 12:18 ` [PATCH v2 2/2] spi: spi-geni-qcom: Do not do DMA map/unmap inside driver, use framework instead Vijaya Krishna Nivarthi
2023-05-17 14:20 ` Doug Anderson
2023-06-06 16:56 ` Konrad Dybcio [this message]
2023-06-06 17:49 ` [PATCH v2 0/2] spi-geni-qcom: Add new interfaces and utilise them to do map/unmap in framework for SE DMA Mark Brown
2023-06-07 1:14 ` Vijaya Krishna Nivarthi
2023-06-07 12:28 ` Mark Brown
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=9a1cf84c-e48c-f41d-2e2f-5c106fc1fed4@linaro.org \
--to=konrad.dybcio@linaro.org \
--cc=agross@kernel.org \
--cc=andersson@kernel.org \
--cc=broonie@kernel.org \
--cc=dianders@chromium.org \
--cc=linux-arm-msm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-spi@vger.kernel.org \
--cc=mka@chromium.org \
--cc=quic_msavaliy@quicinc.com \
--cc=quic_ptalari@quicinc.com \
--cc=quic_vnivarth@quicinc.com \
--cc=quic_vtanuku@quicinc.com \
--cc=swboyd@chromium.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).