Merge tag 'dmaengine-fix-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine fixes from Vinod Koul:
 "A bunch of driver fixes for:

   - ptdma error handling in init

   - lock fix in at_hdmac

   - error path and error num fix for sh dma

   - pm balance fix for stm32"

* tag 'dmaengine-fix-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine:
  dmaengine: shdma: Fix runtime PM imbalance on error
  dmaengine: sh: rcar-dmac: Check for error num after dma_set_max_seg_size
  dmaengine: stm32-dmamux: Fix PM disable depth imbalance in stm32_dmamux_probe
  dmaengine: sh: rcar-dmac: Check for error num after setting mask
  dmaengine: at_xdmac: Fix missing unlock in at_xdmac_tasklet()
  dmaengine: ptdma: Fix the error handling path in pt_core_init()
This commit is contained in:
Linus Torvalds
2022-02-20 11:30:18 -08:00
5 changed files with 25 additions and 13 deletions

View File

@@ -1681,8 +1681,10 @@ static void at_xdmac_tasklet(struct tasklet_struct *t)
__func__, atchan->irq_status); __func__, atchan->irq_status);
if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) && if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
!(atchan->irq_status & error_mask)) !(atchan->irq_status & error_mask)) {
spin_unlock_irq(&atchan->lock);
return; return;
}
if (atchan->irq_status & error_mask) if (atchan->irq_status & error_mask)
at_xdmac_handle_error(atchan); at_xdmac_handle_error(atchan);

View File

@@ -207,7 +207,7 @@ int pt_core_init(struct pt_device *pt)
if (!cmd_q->qbase) { if (!cmd_q->qbase) {
dev_err(dev, "unable to allocate command queue\n"); dev_err(dev, "unable to allocate command queue\n");
ret = -ENOMEM; ret = -ENOMEM;
goto e_dma_alloc; goto e_destroy_pool;
} }
cmd_q->qidx = 0; cmd_q->qidx = 0;
@@ -229,8 +229,10 @@ int pt_core_init(struct pt_device *pt)
/* Request an irq */ /* Request an irq */
ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt); ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
if (ret) if (ret) {
goto e_pool; dev_err(dev, "unable to allocate an IRQ\n");
goto e_free_dma;
}
/* Update the device registers with queue information. */ /* Update the device registers with queue information. */
cmd_q->qcontrol &= ~CMD_Q_SIZE; cmd_q->qcontrol &= ~CMD_Q_SIZE;
@@ -250,21 +252,20 @@ int pt_core_init(struct pt_device *pt)
/* Register the DMA engine support */ /* Register the DMA engine support */
ret = pt_dmaengine_register(pt); ret = pt_dmaengine_register(pt);
if (ret) if (ret)
goto e_dmaengine; goto e_free_irq;
/* Set up debugfs entries */ /* Set up debugfs entries */
ptdma_debugfs_setup(pt); ptdma_debugfs_setup(pt);
return 0; return 0;
e_dmaengine: e_free_irq:
free_irq(pt->pt_irq, pt); free_irq(pt->pt_irq, pt);
e_dma_alloc: e_free_dma:
dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma); dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
e_pool: e_destroy_pool:
dev_err(dev, "unable to allocate an IRQ\n");
dma_pool_destroy(pt->cmd_q.dma_pool); dma_pool_destroy(pt->cmd_q.dma_pool);
return ret; return ret;

View File

@@ -1868,8 +1868,13 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev; dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac); platform_set_drvdata(pdev, dmac);
dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); if (ret)
return ret;
ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
if (ret)
return ret;
ret = rcar_dmac_parse_of(&pdev->dev, dmac); ret = rcar_dmac_parse_of(&pdev->dev, dmac);
if (ret < 0) if (ret < 0)

View File

@@ -115,8 +115,10 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
ret = pm_runtime_get(schan->dev); ret = pm_runtime_get(schan->dev);
spin_unlock_irq(&schan->chan_lock); spin_unlock_irq(&schan->chan_lock);
if (ret < 0) if (ret < 0) {
dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
pm_runtime_put(schan->dev);
}
pm_runtime_barrier(schan->dev); pm_runtime_barrier(schan->dev);

View File

@@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
ret = of_dma_router_register(node, stm32_dmamux_route_allocate, ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
&stm32_dmamux->dmarouter); &stm32_dmamux->dmarouter);
if (ret) if (ret)
goto err_clk; goto pm_disable;
return 0; return 0;
pm_disable:
pm_runtime_disable(&pdev->dev);
err_clk: err_clk:
clk_disable_unprepare(stm32_dmamux->clk); clk_disable_unprepare(stm32_dmamux->clk);