Check return values in prepare_dma() and s3c64xx_spi_config() and
propagate errors upwards.

Signed-off-by: Łukasz Stelmach <l.stelm...@samsung.com>
---
 drivers/spi/spi-s3c64xx.c | 47 ++++++++++++++++++++++++++++++++-------
 1 file changed, 39 insertions(+), 8 deletions(-)

diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 6381a7557def..02de734b8ab1 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -269,12 +269,13 @@ static void s3c64xx_spi_dmacb(void *data)
        spin_unlock_irqrestore(&sdd->lock, flags);
 }
 
-static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
+static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
                        struct sg_table *sgt)
 {
        struct s3c64xx_spi_driver_data *sdd;
        struct dma_slave_config config;
        struct dma_async_tx_descriptor *desc;
+       int ret;
 
        memset(&config, 0, sizeof(config));
 
@@ -298,12 +299,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
 
        desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
                                       dma->direction, DMA_PREP_INTERRUPT);
+       if (!desc) {
+               dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
+                       dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
+               return -ENOMEM;
+       }
 
        desc->callback = s3c64xx_spi_dmacb;
        desc->callback_param = dma;
 
        dma->cookie = dmaengine_submit(desc);
+       ret = dma_submit_error(dma->cookie);
+       if (ret) {
+               dev_err(&sdd->pdev->dev, "DMA submission failed");
+               return -EIO;
+       }
+
        dma_async_issue_pending(dma->ch);
+       return 0;
 }
 
 static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
@@ -353,11 +366,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master,
        return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
 }
 
-static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
+static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
                                    struct spi_transfer *xfer, int dma_mode)
 {
        void __iomem *regs = sdd->regs;
        u32 modecfg, chcfg;
+       int ret = 0;
 
        modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
        modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
@@ -383,7 +397,7 @@ static void s3c64xx_enable_datapath(struct 
s3c64xx_spi_driver_data *sdd,
                chcfg |= S3C64XX_SPI_CH_TXCH_ON;
                if (dma_mode) {
                        modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
-                       prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
+                       ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
                } else {
                        switch (sdd->cur_bpw) {
                        case 32:
@@ -415,12 +429,17 @@ static void s3c64xx_enable_datapath(struct 
s3c64xx_spi_driver_data *sdd,
                        writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
                                        | S3C64XX_SPI_PACKET_CNT_EN,
                                        regs + S3C64XX_SPI_PACKET_CNT);
-                       prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
+                       ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
                }
        }
 
+       if (ret)
+               return ret;
+
        writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
        writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
+
+       return 0;
 }
 
 static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
@@ -553,9 +572,10 @@ static int s3c64xx_wait_for_pio(struct 
s3c64xx_spi_driver_data *sdd,
        return 0;
 }
 
-static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
+static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
 {
        void __iomem *regs = sdd->regs;
+       int ret;
        u32 val;
 
        /* Disable Clock */
@@ -603,7 +623,9 @@ static void s3c64xx_spi_config(struct 
s3c64xx_spi_driver_data *sdd)
 
        if (sdd->port_conf->clk_from_cmu) {
                /* The src_clk clock is divided internally by 2 */
-               clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
+               ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
+               if (ret)
+                       return ret;
        } else {
                /* Configure Clock */
                val = readl(regs + S3C64XX_SPI_CLK_CFG);
@@ -617,6 +639,8 @@ static void s3c64xx_spi_config(struct 
s3c64xx_spi_driver_data *sdd)
                val |= S3C64XX_SPI_ENCLK_ENABLE;
                writel(val, regs + S3C64XX_SPI_CLK_CFG);
        }
+
+       return 0;
 }
 
 #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
@@ -659,7 +683,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master 
*master,
                sdd->cur_bpw = bpw;
                sdd->cur_speed = speed;
                sdd->cur_mode = spi->mode;
-               s3c64xx_spi_config(sdd);
+               status = s3c64xx_spi_config(sdd);
+               if (status)
+                       return status;
        }
 
        if (!is_polling(sdd) && (xfer->len > fifo_len) &&
@@ -686,10 +712,15 @@ static int s3c64xx_spi_transfer_one(struct spi_master 
*master,
                /* Start the signals */
                s3c64xx_spi_set_cs(spi, true);
 
-               s3c64xx_enable_datapath(sdd, xfer, use_dma);
+               status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
 
                spin_unlock_irqrestore(&sdd->lock, flags);
 
+               if (status) {
+                       dev_err(&spi->dev, "failed to enable data path for 
transfer: %d\n", status);
+                       break;
+               }
+
                if (use_dma)
                        status = s3c64xx_wait_for_dma(sdd, xfer);
                else
-- 
2.26.2

Reply via email to