It isn't good to have numeric literals in the code especially if there
are multiple of them and they are related. Moreover in current
implementation the Tx DMA transfer activation level isn't optimal,
since it's hardwired to be at 16-32 bytes level, while it's better
to keep the SPI FIFO buffer as full as possible until all available
data is submitted. So lets introduce the DMA burst level
parametrization macros with optimal values - issue Rx transfer if at
least 16 bytes are available in the buffer and execute Tx transaction
if at least 16 bytes room is opened in SPI Tx FIFO.

Co-developed-by: Georgy Vlasov <[email protected]>
Signed-off-by: Georgy Vlasov <[email protected]>
Co-developed-by: Ramil Zaripov <[email protected]>
Signed-off-by: Ramil Zaripov <[email protected]>
Signed-off-by: Serge Semin <[email protected]>
Cc: Alexey Malahov <[email protected]>
Cc: Thomas Bogendoerfer <[email protected]>
Cc: Paul Burton <[email protected]>
Cc: Ralf Baechle <[email protected]>
Cc: Arnd Bergmann <[email protected]>
Cc: Allison Randal <[email protected]>
Cc: Andy Shevchenko <[email protected]>
Cc: Gareth Williams <[email protected]>
Cc: Rob Herring <[email protected]>
Cc: [email protected]
Cc: [email protected]
---
 drivers/spi/spi-dw-mid.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index ca8813a693d8..e43914dbcadf 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -20,7 +20,9 @@
 
 #define WAIT_RETRIES   5
 #define RX_BUSY                0
+#define RX_BURST_LEVEL 16
 #define TX_BUSY                1
+#define TX_BURST_LEVEL 16
 
 static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
 {
@@ -193,7 +195,7 @@ static struct dma_async_tx_descriptor 
*dw_spi_dma_prepare_tx(struct dw_spi *dws,
        memset(&txconf, 0, sizeof(txconf));
        txconf.direction = DMA_MEM_TO_DEV;
        txconf.dst_addr = dws->dma_addr;
-       txconf.dst_maxburst = 16;
+       txconf.dst_maxburst = TX_BURST_LEVEL;
        txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
        txconf.dst_addr_width = convert_dma_width(dws->n_bytes);
        txconf.device_fc = false;
@@ -266,7 +268,7 @@ static struct dma_async_tx_descriptor 
*dw_spi_dma_prepare_rx(struct dw_spi *dws,
        memset(&rxconf, 0, sizeof(rxconf));
        rxconf.direction = DMA_DEV_TO_MEM;
        rxconf.src_addr = dws->dma_addr;
-       rxconf.src_maxburst = 16;
+       rxconf.src_maxburst = RX_BURST_LEVEL;
        rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
        rxconf.src_addr_width = convert_dma_width(dws->n_bytes);
        rxconf.device_fc = false;
@@ -291,8 +293,8 @@ static int mid_spi_dma_setup(struct dw_spi *dws, struct 
spi_transfer *xfer)
 {
        u16 imr = 0, dma_ctrl = 0;
 
-       dw_writel(dws, DW_SPI_DMARDLR, 0xf);
-       dw_writel(dws, DW_SPI_DMATDLR, 0x10);
+       dw_writel(dws, DW_SPI_DMARDLR, RX_BURST_LEVEL - 1);
+       dw_writel(dws, DW_SPI_DMATDLR, dws->fifo_len - TX_BURST_LEVEL);
 
        if (xfer->tx_buf) {
                dma_ctrl |= SPI_DMA_TDMAE;
-- 
2.25.1

Reply via email to