This patch removes a bit of duplicated code by introducing a new
function that implements calculations for DMA copy size, and
prepares for changes to the copy size calculation that will
happen in following patches.

Suggested-by: Vinod Koul <vk...@kernel.org>
Signed-off-by: Andrea Merello <andrea.mere...@gmail.com>
Reviewed-by: Radhey Shyam Pandey <radhey.shyam.pan...@xilinx.com>
---
Changes in v4:
        - introduce this patch in the patch series
Changes in v5:
        None
Changes in v6:
        - 2/7 was basically redoing what done here. Anticipate
          here the introduction of a local temporary variable
          so that 2/7 just add stuff
        - add dma chan ptr argument to xilinx_calc_cma_copysize()
          to prepare for 2/7
        - introduce max_buffer_len variable in advance, to prepare
          for 4/7
        - reword for above changes
---
 drivers/dma/xilinx/xilinx_dma.c | 39 ++++++++++++++++++++++++++-------
 1 file changed, 31 insertions(+), 8 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index c12442312595..2c1db500284f 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -423,6 +423,7 @@ struct xilinx_dma_config {
  * @rxs_clk: DMA s2mm stream clock
  * @nr_channels: Number of channels DMA device supports
  * @chan_id: DMA channel identifier
+ * @max_buffer_len: Max buffer length
  */
 struct xilinx_dma_device {
        void __iomem *regs;
@@ -442,6 +443,7 @@ struct xilinx_dma_device {
        struct clk *rxs_clk;
        u32 nr_channels;
        u32 chan_id;
+       u32 max_buffer_len;
 };
 
 /* Macros */
@@ -957,6 +959,25 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan 
*dchan)
        return 0;
 }
 
+/**
+ * xilinx_dma_calc_copysize - Calculate the amount of data to copy
+ * @chan: Driver specific DMA channel
+ * @size: Total data that needs to be copied
+ * @done: Amount of data that has been already copied
+ *
+ * Return: Amount of data that has to be copied
+ */
+static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
+                                   int size, int done)
+{
+       size_t copy;
+
+       copy = min_t(size_t, size - done,
+                    chan->xdev->max_buffer_len);
+
+       return copy;
+}
+
 /**
  * xilinx_dma_tx_status - Get DMA transaction status
  * @dchan: DMA channel
@@ -990,7 +1011,7 @@ static enum dma_status xilinx_dma_tx_status(struct 
dma_chan *dchan,
                        list_for_each_entry(segment, &desc->segments, node) {
                                hw = &segment->hw;
                                residue += (hw->control - hw->status) &
-                                          XILINX_DMA_MAX_TRANS_LEN;
+                                          chan->xdev->max_buffer_len;
                        }
                }
                spin_unlock_irqrestore(&chan->lock, flags);
@@ -1250,7 +1271,7 @@ static void xilinx_cdma_start_transfer(struct 
xilinx_dma_chan *chan)
 
                /* Start the transfer */
                dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
-                               hw->control & XILINX_DMA_MAX_TRANS_LEN);
+                               hw->control & chan->xdev->max_buffer_len);
        }
 
        list_splice_tail_init(&chan->pending_list, &chan->active_list);
@@ -1353,7 +1374,7 @@ static void xilinx_dma_start_transfer(struct 
xilinx_dma_chan *chan)
 
                /* Start the transfer */
                dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
-                              hw->control & XILINX_DMA_MAX_TRANS_LEN);
+                              hw->control & chan->xdev->max_buffer_len);
        }
 
        list_splice_tail_init(&chan->pending_list, &chan->active_list);
@@ -1714,7 +1735,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, 
dma_addr_t dma_dst,
        struct xilinx_cdma_tx_segment *segment;
        struct xilinx_cdma_desc_hw *hw;
 
-       if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
+       if (!len || len > chan->xdev->max_buffer_len)
                return NULL;
 
        desc = xilinx_dma_alloc_tx_descriptor(chan);
@@ -1804,8 +1825,8 @@ static struct dma_async_tx_descriptor 
*xilinx_dma_prep_slave_sg(
                         * Calculate the maximum number of bytes to transfer,
                         * making sure it is less than the hw limit
                         */
-                       copy = min_t(size_t, sg_dma_len(sg) - sg_used,
-                                    XILINX_DMA_MAX_TRANS_LEN);
+                       copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
+                                                       sg_used);
                        hw = &segment->hw;
 
                        /* Fill in the descriptor */
@@ -1909,8 +1930,8 @@ static struct dma_async_tx_descriptor 
*xilinx_dma_prep_dma_cyclic(
                         * Calculate the maximum number of bytes to transfer,
                         * making sure it is less than the hw limit
                         */
-                       copy = min_t(size_t, period_len - sg_used,
-                                    XILINX_DMA_MAX_TRANS_LEN);
+                       copy = xilinx_dma_calc_copysize(chan, period_len,
+                                                       sg_used);
                        hw = &segment->hw;
                        xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
                                          period_len * i);
@@ -2624,6 +2645,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
 
        /* Retrieve the DMA engine properties from the device tree */
        xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
+       xdev->max_buffer_len = XILINX_DMA_MAX_TRANS_LEN;
+
        if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
                xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
 
-- 
2.17.1

Reply via email to