On 05-09-19, 22:06, Radhey Shyam Pandey wrote:
> From: Nicholas Graumann <nick.graum...@gmail.com>
> 
> Introduce a function that can calculate residues for IPs that support it:
> AXI DMA and CDMA.
> 
> Signed-off-by: Nicholas Graumann <nick.graum...@gmail.com>
> Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pan...@xilinx.com>
> ---
>  drivers/dma/xilinx/xilinx_dma.c | 75 
> ++++++++++++++++++++++++++++++-----------
>  1 file changed, 56 insertions(+), 19 deletions(-)
> 
> diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
> index 9909bfb..4094adb 100644
> --- a/drivers/dma/xilinx/xilinx_dma.c
> +++ b/drivers/dma/xilinx/xilinx_dma.c
> @@ -787,6 +787,51 @@ static void xilinx_dma_free_chan_resources(struct 
> dma_chan *dchan)
>  }
>  
>  /**
> + * xilinx_dma_get_residue - Compute residue for a given descriptor
> + * @chan: Driver specific dma channel
> + * @desc: dma transaction descriptor
> + *
> + * Return: The number of residue bytes for the descriptor.
> + */
> +static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
> +                               struct xilinx_dma_tx_descriptor *desc)
> +{
> +     struct xilinx_cdma_tx_segment *cdma_seg;
> +     struct xilinx_axidma_tx_segment *axidma_seg;
> +     struct xilinx_cdma_desc_hw *cdma_hw;
> +     struct xilinx_axidma_desc_hw *axidma_hw;
> +     struct list_head *entry;
> +     u32 residue = 0;
> +
> +     /**

it should be:
        /*
         * comment...

> +      * VDMA and simple mode do not support residue reporting, so the
> +      * residue field will always be 0.
> +      */
> +     if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA || !chan->has_sg)
> +             return residue;

why not check this in status callback?

> +
> +     list_for_each(entry, &desc->segments) {
> +             if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
> +                     cdma_seg = list_entry(entry,
> +                                           struct xilinx_cdma_tx_segment,
> +                                           node);
> +                     cdma_hw = &cdma_seg->hw;
> +                     residue += (cdma_hw->control - cdma_hw->status) &
> +                                chan->xdev->max_buffer_len;
> +             } else {
> +                     axidma_seg = list_entry(entry,
> +                                             struct xilinx_axidma_tx_segment,
> +                                             node);
> +                     axidma_hw = &axidma_seg->hw;
> +                     residue += (axidma_hw->control - axidma_hw->status) &
> +                                chan->xdev->max_buffer_len;
> +             }
> +     }
> +
> +     return residue;
> +}
> +
> +/**
>   * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
>   * @chan: Driver specific dma channel
>   * @desc: dma transaction descriptor
> @@ -995,33 +1040,22 @@ static enum dma_status xilinx_dma_tx_status(struct 
> dma_chan *dchan,
>  {
>       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
>       struct xilinx_dma_tx_descriptor *desc;
> -     struct xilinx_axidma_tx_segment *segment;
> -     struct xilinx_axidma_desc_hw *hw;
>       enum dma_status ret;
>       unsigned long flags;
> -     u32 residue = 0;
>  
>       ret = dma_cookie_status(dchan, cookie, txstate);
>       if (ret == DMA_COMPLETE || !txstate)
>               return ret;
>  
> -     if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
> -             spin_lock_irqsave(&chan->lock, flags);
> +     spin_lock_irqsave(&chan->lock, flags);
>  
> -             desc = list_last_entry(&chan->active_list,
> -                                    struct xilinx_dma_tx_descriptor, node);
> -             if (chan->has_sg) {
> -                     list_for_each_entry(segment, &desc->segments, node) {
> -                             hw = &segment->hw;
> -                             residue += (hw->control - hw->status) &
> -                                        chan->xdev->max_buffer_len;
> -                     }
> -             }
> -             spin_unlock_irqrestore(&chan->lock, flags);
> +     desc = list_last_entry(&chan->active_list,
> +                            struct xilinx_dma_tx_descriptor, node);
> +     chan->residue = xilinx_dma_get_residue(chan, desc);
>  
> -             chan->residue = residue;
> -             dma_set_residue(txstate, chan->residue);
> -     }
> +     spin_unlock_irqrestore(&chan->lock, flags);
> +
> +     dma_set_residue(txstate, chan->residue);
>  
>       return ret;
>  }
> @@ -2701,12 +2735,15 @@ static int xilinx_dma_probe(struct platform_device 
> *pdev)
>                                         xilinx_dma_prep_dma_cyclic;
>               xdev->common.device_prep_interleaved_dma =
>                                       xilinx_dma_prep_interleaved;
> -             /* Residue calculation is supported by only AXI DMA */
> +             /* Residue calculation is supported by only AXI DMA and CDMA */
>               xdev->common.residue_granularity =
>                                         DMA_RESIDUE_GRANULARITY_SEGMENT;
>       } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
>               dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
>               xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
> +             /* Residue calculation is supported by only AXI DMA and CDMA */
> +             xdev->common.residue_granularity =
> +                                     DMA_RESIDUE_GRANULARITY_SEGMENT;
>       } else {
>               xdev->common.device_prep_interleaved_dma =
>                               xilinx_vdma_dma_prep_interleaved;
> -- 
> 2.7.4

-- 
~Vinod

Reply via email to