From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1760588AbcAKPDy (ORCPT ); Mon, 11 Jan 2016 10:03:54 -0500 Received: from mga09.intel.com ([134.134.136.24]:52593 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1760223AbcAKPDv (ORCPT ); Mon, 11 Jan 2016 10:03:51 -0500 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.20,553,1444719600"; d="scan'208";a="890854827" Message-ID: <1452524644.26146.34.camel@linux.intel.com> Subject: Re: [PATCH v2 2/2] dmaengine: dw: fix cyclic transfer callbacks From: Andy Shevchenko To: Mans Rullgard , Viresh Kumar , Dan Williams , Vinod Koul , dmaengine@vger.kernel.org, linux-kernel@vger.kernel.org Date: Mon, 11 Jan 2016 17:04:04 +0200 In-Reply-To: <1452517469-26922-3-git-send-email-mans@mansr.com> References: <1452517469-26922-1-git-send-email-mans@mansr.com> <1452517469-26922-3-git-send-email-mans@mansr.com> Organization: Intel Finland Oy Content-Type: text/plain; charset="UTF-8" X-Mailer: Evolution 3.18.3-1 Mime-Version: 1.0 Content-Transfer-Encoding: 8bit Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org On Mon, 2016-01-11 at 13:04 +0000, Mans Rullgard wrote: > Cyclic transfer callbacks rely on block completion interrupts which > were > disabled in commit ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle > block > interrupts").  This re-enables block interrupts so the cyclic > callbacks > can work.  Other transfer types are not affected as they set the > INT_EN > bit only on the last block. > > Fixes: ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block > interrupts") > Signed-off-by: Mans Rullgard How did you test that? >>From my understanding the custom stuff that does cyclic interrupts prepares a set of descriptors per period, which at the end of transfer will generate XFER interrupt. Next period will go in the same way. Maybe I missed something. > --- > Changes: > - new patch > --- >  drivers/dma/dw/core.c | 21 +++++++++++++++------ >  1 file changed, 15 insertions(+), 6 deletions(-) > > diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c > index af2b92f8501e..b92662722404 100644 > --- a/drivers/dma/dw/core.c > +++ b/drivers/dma/dw/core.c > @@ -156,6 +156,7 @@ static void dwc_initialize(struct dw_dma_chan > *dwc) >   >   /* Enable interrupts */ >   channel_set_bit(dw, MASK.XFER, dwc->mask); > + channel_set_bit(dw, MASK.BLOCK, dwc->mask); >   channel_set_bit(dw, MASK.ERROR, dwc->mask); >   >   dwc->initialized = true; > @@ -536,16 +537,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr); >   >  /* Called with dwc->lock held and all DMAC interrupts disabled */ >  static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan > *dwc, > - u32 status_err, u32 status_xfer) > + u32 status_block, u32 status_err, u32 status_xfer) >  { >   unsigned long flags; >   > - if (dwc->mask) { > + if (status_block & dwc->mask) { >   void (*callback)(void *param); >   void *callback_param; >   >   dev_vdbg(chan2dev(&dwc->chan), "new cyclic period > llp 0x%08x\n", >   channel_readl(dwc, LLP)); > + dma_writel(dw, CLEAR.BLOCK, dwc->mask); >   >   callback = dwc->cdesc->period_callback; >   callback_param = dwc->cdesc->period_callback_param; > @@ -577,6 +579,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, > struct dw_dma_chan *dwc, >   channel_writel(dwc, CTL_LO, 0); >   channel_writel(dwc, CTL_HI, 0); >   > + dma_writel(dw, CLEAR.BLOCK, dwc->mask); >   dma_writel(dw, CLEAR.ERROR, dwc->mask); >   dma_writel(dw, CLEAR.XFER, dwc->mask); >   > @@ -593,10 +596,12 @@ static void dw_dma_tasklet(unsigned long data) >  { >   struct dw_dma *dw = (struct dw_dma *)data; >   struct dw_dma_chan *dwc; > + u32 status_block; >   u32 status_xfer; >   u32 status_err; >   int i; >   > + status_block = dma_readl(dw, RAW.BLOCK); >   status_xfer = dma_readl(dw, RAW.XFER); >   status_err = dma_readl(dw, RAW.ERROR); >   > @@ -605,7 +610,8 @@ static void dw_dma_tasklet(unsigned long data) >   for (i = 0; i < dw->dma.chancnt; i++) { >   dwc = &dw->chan[i]; >   if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) > - dwc_handle_cyclic(dw, dwc, status_err, > status_xfer); > + dwc_handle_cyclic(dw, dwc, status_block, > status_err, > + status_xfer); >   else if (status_err & (1 << i)) >   dwc_handle_error(dw, dwc); >   else if (status_xfer & (1 << i)) > @@ -616,6 +622,7 @@ static void dw_dma_tasklet(unsigned long data) >    * Re-enable interrupts. >    */ >   channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); > + channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask); >   channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); >  } >   > @@ -635,6 +642,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void > *dev_id) >    * softirq handler. >    */ >   channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); > + channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); >   channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); >   >   status = dma_readl(dw, STATUS_INT); > @@ -645,6 +653,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void > *dev_id) >   >   /* Try to recover */ >   channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); > + channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); >   channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); >   channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); >   channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); > @@ -1111,6 +1120,7 @@ static void dw_dma_off(struct dw_dma *dw) >   dma_writel(dw, CFG, 0); >   >   channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); > + channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); >   channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); >   channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); >   channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); > @@ -1216,6 +1226,7 @@ static void dwc_free_chan_resources(struct > dma_chan *chan) >   >   /* Disable interrupts */ >   channel_clear_bit(dw, MASK.XFER, dwc->mask); > + channel_clear_bit(dw, MASK.BLOCK, dwc->mask); >   channel_clear_bit(dw, MASK.ERROR, dwc->mask); >   >   spin_unlock_irqrestore(&dwc->lock, flags); > @@ -1458,6 +1469,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan) >   >   dwc_chan_disable(dw, dwc); >   > + dma_writel(dw, CLEAR.BLOCK, dwc->mask); >   dma_writel(dw, CLEAR.ERROR, dwc->mask); >   dma_writel(dw, CLEAR.XFER, dwc->mask); >   > @@ -1546,9 +1558,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, > struct dw_dma_platform_data *pdata) >   /* Force dma off, just in case */ >   dw_dma_off(dw); >   > - /* Disable BLOCK interrupts as well */ > - channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); > - >   /* Create a pool of consistent memory blocks for hardware > descriptors */ >   dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip- > >dev, >    sizeof(struct dw_desc), 4, > 0); -- Andy Shevchenko Intel Finland Oy