On Fri, May 15, 2020 at 01:47:49PM +0300, Serge Semin wrote:
> Each channel of DMA controller may have a limited length of burst
> transaction (number of IO operations performed at ones in a single
> DMA client request). This parameter can be used to setup the most
> optimal DMA Tx/Rx data level values. In order to avoid the Tx buffer
> overrun we can set the DMA Tx level to be of FIFO depth minus the
> maximum burst transactions length. To prevent the Rx buffer underflow
> the DMA Rx level should be set to the maximum burst transactions length.
> This commit setups the DMA channels and the DW SPI DMA Tx/Rx levels
> in accordance with these rules.

It's good one, but see my comments.


I think this patch should go before previous one.
(and without changes regarding FIFO length)

> +static void mid_spi_maxburst_init(struct dw_spi *dws)
> +{
> +     struct dma_slave_caps caps;
> +     u32 max_burst, def_burst;
> +     int ret;
> +
> +     def_burst = dws->fifo_len / 2;
> +
> +     ret = dma_get_slave_caps(dws->rxchan, &caps);
> +     if (!ret && caps.max_burst)
> +             max_burst = caps.max_burst;
> +     else
> +             max_burst = RX_BURST_LEVEL;
> +

> +     dws->rxburst = (def_burst > max_burst) ? max_burst : def_burst;

min() ?

> +
> +     ret = dma_get_slave_caps(dws->txchan, &caps);
> +     if (!ret && caps.max_burst)
> +             max_burst = caps.max_burst;
> +     else
> +             max_burst = TX_BURST_LEVEL;
> +

> +     dws->txburst = (def_burst > max_burst) ? max_burst : def_burst;

Ditto.

> +}
> +
>  static int mid_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
>  {
>       struct dw_dma_slave slave = {0};
> @@ -67,6 +92,8 @@ static int mid_spi_dma_init_mfld(struct device *dev, struct 
> dw_spi *dws)
>       dws->master->dma_rx = dws->rxchan;
>       dws->master->dma_tx = dws->txchan;
>  
> +     mid_spi_maxburst_init(dws);
> +
>       return 0;
>  
>  free_rxchan:
> @@ -92,6 +119,8 @@ static int mid_spi_dma_init_generic(struct device *dev, 
> struct dw_spi *dws)
>       dws->master->dma_rx = dws->rxchan;
>       dws->master->dma_tx = dws->txchan;
>  
> +     mid_spi_maxburst_init(dws);
> +
>       return 0;
>  }
>  
> @@ -195,7 +224,7 @@ static struct dma_async_tx_descriptor 
> *dw_spi_dma_prepare_tx(struct dw_spi *dws,
>       memset(&txconf, 0, sizeof(txconf));
>       txconf.direction = DMA_MEM_TO_DEV;
>       txconf.dst_addr = dws->dma_addr;
> -     txconf.dst_maxburst = TX_BURST_LEVEL;
> +     txconf.dst_maxburst = dws->txburst;
>       txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
>       txconf.dst_addr_width = convert_dma_width(dws->n_bytes);
>       txconf.device_fc = false;
> @@ -268,7 +297,7 @@ static struct dma_async_tx_descriptor 
> *dw_spi_dma_prepare_rx(struct dw_spi *dws,
>       memset(&rxconf, 0, sizeof(rxconf));
>       rxconf.direction = DMA_DEV_TO_MEM;
>       rxconf.src_addr = dws->dma_addr;
> -     rxconf.src_maxburst = RX_BURST_LEVEL;
> +     rxconf.src_maxburst = dws->rxburst;
>       rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
>       rxconf.src_addr_width = convert_dma_width(dws->n_bytes);
>       rxconf.device_fc = false;
> @@ -293,8 +322,8 @@ static int mid_spi_dma_setup(struct dw_spi *dws, struct 
> spi_transfer *xfer)
>  {
>       u16 imr = 0, dma_ctrl = 0;
>  
> -     dw_writel(dws, DW_SPI_DMARDLR, RX_BURST_LEVEL - 1);
> -     dw_writel(dws, DW_SPI_DMATDLR, dws->fifo_len - TX_BURST_LEVEL);
> +     dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1);
> +     dw_writel(dws, DW_SPI_DMATDLR, dws->fifo_len - dws->txburst);
>  
>       if (xfer->tx_buf) {
>               dma_ctrl |= SPI_DMA_TDMAE;

...

>       /* DMA info */
>       struct dma_chan         *txchan;
> +     u32                     txburst;
>       struct dma_chan         *rxchan;
> +     u32                     rxburst;

Leave u32 together, it may be optimal on 64-bit architectures where ABIs 
require padding.

-- 
With Best Regards,
Andy Shevchenko


Reply via email to