While preparing the sunxi-mmc code for posting the next version, and also
debugging the cubietruck wifi issues I've to make sure that they are not
caused by the sunxi-mmc code I noticed 2 problems:

1) We've a race in our irq handling. If a request completes, we queue a tasklet
to finish it. If then another irq (ie an sdio-irq) comes in the status bits
still are such that we should finish the request, and we queue the tasklet
a second time.

We've a check in this for the tasklet, but it is loud, and if a higher layer
queues a new request before the 2nd run if the tasklet, the tasklet will
mark that request as done, even though it is still being processed.

2) 99.9% of the time we don't need the tasklet, mmc_request_done() is safe to
call from interrupt context. We only need a tasklet when we're doing error
handling (sending a manual stop) as then we poll the control for it to
complete the error handling.

This patch (which I'm posting FYI / for review standalone now, but which
will be squashed into the next version of the code) solves both by not using
the tasklet normally, and when we do need the tasklet it still sets the
mrq pointer, which stores the pending request, to NULL, storing a pointer to
the request which needs error handling in a new manual_stop_mrq pointer, so
that the same request can never be finalized twice.

This patch also makes the locking in sunxi_mmc_request more robust, changing
things so that no global state gets modified without holding the lock.

Signed-off-by: Hans de Goede <hdego...@redhat.com>
---
 drivers/mmc/host/sunxi-mmc.c | 138 +++++++++++++++++++++++++++----------------
 1 file changed, 88 insertions(+), 50 deletions(-)

diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 05e4dfe..a36fea5 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -230,7 +230,7 @@ struct sunxi_mmc_host {
        void __iomem    *reg_base;
 
        spinlock_t      lock;
-       struct tasklet_struct tasklet;
+       struct tasklet_struct manual_stop_tasklet;
 
        /* clock management */
        struct clk      *clk_ahb;
@@ -255,6 +255,7 @@ struct sunxi_mmc_host {
        void            *sg_cpu;
 
        struct mmc_request *mrq;
+       struct mmc_request *manual_stop_mrq;
        u32             ferror;
 };
 
@@ -395,10 +396,10 @@ static enum dma_data_direction 
sunxi_mmc_get_dma_dir(struct mmc_data *data)
                return DMA_FROM_DEVICE;
 }
 
-static int sunxi_mmc_prepare_dma(struct sunxi_mmc_host *smc_host,
-                                struct mmc_data *data)
+static int sunxi_mmc_map_dma(struct sunxi_mmc_host *smc_host,
+                            struct mmc_data *data)
 {
-       u32 i, dma_len, rval;
+       u32 i, dma_len;
        struct scatterlist *sg;
 
        dma_len = dma_map_sg(mmc_dev(smc_host->mmc), data->sg, data->sg_len,
@@ -417,6 +418,14 @@ static int sunxi_mmc_prepare_dma(struct sunxi_mmc_host 
*smc_host,
                }
        }
 
+       return 0;
+}
+
+static void sunxi_mmc_start_dma(struct sunxi_mmc_host *smc_host,
+                               struct mmc_data *data)
+{
+       u32 rval;
+
        sunxi_mmc_init_idma_des(smc_host, data);
 
        rval = mci_readl(smc_host, REG_GCTRL);
@@ -432,8 +441,6 @@ static int sunxi_mmc_prepare_dma(struct sunxi_mmc_host 
*smc_host,
 
        mci_writel(smc_host, REG_DMAC,
                   SDXC_IDMAC_FIX_BURST | SDXC_IDMAC_IDMA_ON);
-
-       return 0;
 }
 
 static void sunxi_mmc_send_manual_stop(struct sunxi_mmc_host *host,
@@ -491,19 +498,13 @@ static void sunxi_mmc_dump_errinfo(struct sunxi_mmc_host 
*smc_host)
                );
 }
 
-static void sunxi_mmc_finalize_request(struct sunxi_mmc_host *host)
+/* Called in interrupt context! */
+static int sunxi_mmc_finalize_request(struct sunxi_mmc_host *host)
 {
-       struct mmc_request *mrq;
-       unsigned long iflags;
+       struct mmc_request *mrq = host->mrq;
 
-       spin_lock_irqsave(&host->lock, iflags);
-
-       mrq = host->mrq;
-       if (!mrq) {
-               spin_unlock_irqrestore(&host->lock, iflags);
-               dev_err(mmc_dev(host->mmc), "no request to finalize\n");
-               return;
-       }
+       mci_writel(host, REG_IMASK, host->sdio_imask);
+       mci_writel(host, REG_IDIE, 0);
 
        if (host->int_sum & SDXC_INTERRUPT_ERROR_BIT) {
                sunxi_mmc_dump_errinfo(host);
@@ -556,22 +557,22 @@ static void sunxi_mmc_finalize_request(struct 
sunxi_mmc_host *host)
        host->int_sum = 0;
        host->wait_dma = false;
 
-       spin_unlock_irqrestore(&host->lock, iflags);
-
        if (mrq->data && mrq->data->error) {
-               dev_err(mmc_dev(host->mmc),
-                       "data error, sending stop command\n");
-               sunxi_mmc_send_manual_stop(host, mrq);
+               host->manual_stop_mrq = mrq;
+               tasklet_schedule(&host->manual_stop_tasklet);
+               return -EBUSY;
        }
 
-       mmc_request_done(host->mmc, mrq);
+       return 0;
 }
 
 static irqreturn_t sunxi_mmc_irq(int irq, void *dev_id)
 {
        struct sunxi_mmc_host *host = dev_id;
-       u32 finalize = 0;
-       u32 sdio_int = 0;
+       struct mmc_request *mrq;
+       bool finalize = false;
+       bool complete = false;
+       bool sdio_int = false;
        u32 msk_int;
        u32 idma_int;
 
@@ -583,7 +584,8 @@ static irqreturn_t sunxi_mmc_irq(int irq, void *dev_id)
        dev_dbg(mmc_dev(host->mmc), "irq: rq %p mi %08x idi %08x\n",
                host->mrq, msk_int, idma_int);
 
-       if (host->mrq) {
+       mrq = host->mrq;
+       if (mrq) {
                if (idma_int & SDXC_IDMAC_RECEIVE_INTERRUPT)
                        host->wait_dma = false;
 
@@ -596,27 +598,27 @@ static irqreturn_t sunxi_mmc_irq(int irq, void *dev_id)
                                   host->sdio_imask | SDXC_COMMAND_DONE);
                /* Don't wait for dma on error */
                else if (host->int_sum & SDXC_INTERRUPT_ERROR_BIT)
-                       finalize = 1;
-               else if (host->int_sum & SDXC_INTERRUPT_DONE_BIT &&
+                       finalize = true;
+               else if ((host->int_sum & SDXC_INTERRUPT_DONE_BIT) &&
                                !host->wait_dma)
-                       finalize = 1;
-
-               if (finalize) {
-                       mci_writel(host, REG_IMASK, host->sdio_imask);
-                       mci_writel(host, REG_IDIE, 0);
-               }
+                       finalize = true;
        }
 
        if (msk_int & SDXC_SDIO_INTERRUPT)
-               sdio_int = 1;
+               sdio_int = true;
 
        mci_writel(host, REG_RINTR, msk_int);
        mci_writel(host, REG_IDST, idma_int);
 
+       if (finalize) {
+               if (sunxi_mmc_finalize_request(host) == 0)
+                       complete = true;
+       }
+
        spin_unlock(&host->lock);
 
-       if (finalize)
-               tasklet_schedule(&host->tasklet);
+       if (complete)
+               mmc_request_done(host->mmc, mrq);
 
        if (sdio_int)
                mmc_signal_sdio_irq(host->mmc);
@@ -624,10 +626,29 @@ static irqreturn_t sunxi_mmc_irq(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static void sunxi_mmc_tasklet(unsigned long data)
+static void sunxi_mmc_manual_stop_tasklet(unsigned long data)
 {
-       struct sunxi_mmc_host *smc_host = (struct sunxi_mmc_host *) data;
-       sunxi_mmc_finalize_request(smc_host);
+       struct sunxi_mmc_host *host = (struct sunxi_mmc_host *) data;
+       struct mmc_request *mrq;
+       unsigned long iflags;
+               
+       spin_lock_irqsave(&host->lock, iflags);
+       mrq = host->manual_stop_mrq;
+       spin_unlock_irqrestore(&host->lock, iflags);
+
+       if (!mrq) {
+               dev_err(mmc_dev(host->mmc), "no request for manual stop\n");
+               return;
+       }
+
+       dev_err(mmc_dev(host->mmc), "data error, sending stop command\n");
+       sunxi_mmc_send_manual_stop(host, mrq);
+
+       spin_lock_irqsave(&host->lock, iflags);
+       host->manual_stop_mrq = NULL;
+       spin_unlock_irqrestore(&host->lock, iflags);
+
+       mmc_request_done(host->mmc, mrq);
 }
 
 static void sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
@@ -835,7 +856,6 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct 
mmc_request *mrq)
        unsigned long iflags;
        u32 imask = SDXC_INTERRUPT_ERROR_BIT;
        u32 cmd_val = SDXC_START | (cmd->opcode & 0x3f);
-       u32 byte_cnt = 0;
        int ret;
 
        if (!mmc_gpio_get_cd(mmc) || host->ferror) {
@@ -846,12 +866,9 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct 
mmc_request *mrq)
        }
 
        if (data) {
-               byte_cnt = data->blksz * data->blocks;
-               mci_writel(host, REG_BLKSZ, data->blksz);
-               mci_writel(host, REG_BCNTR, byte_cnt);
-               ret = sunxi_mmc_prepare_dma(host, data);
+               ret = sunxi_mmc_map_dma(host, data);
                if (ret < 0) {
-                       dev_err(mmc_dev(host->mmc), "prepare DMA failed\n");
+                       dev_err(mmc_dev(host->mmc), "map DMA failed\n");
                        cmd->error = ret;
                        cmd->data->error = ret;
                        mmc_request_done(host->mmc, mrq);
@@ -909,12 +926,32 @@ static void sunxi_mmc_request(struct mmc_host *mmc, 
struct mmc_request *mrq)
                mrq->data ? mrq->data->blksz * mrq->data->blocks : 0);
 
        spin_lock_irqsave(&host->lock, iflags);
+
+       if (host->mrq || host->manual_stop_mrq) {
+               spin_unlock_irqrestore(&host->lock, iflags);
+
+               if (data)
+                       dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+                               data->sg_len, sunxi_mmc_get_dma_dir(data));
+
+               dev_err(mmc_dev(host->mmc), "request already pending\n");
+               mrq->cmd->error = -EBUSY;
+               mmc_request_done(host->mmc, mrq);
+               return;
+       }
+
+       if (data) {
+               mci_writel(host, REG_BLKSZ, data->blksz);
+               mci_writel(host, REG_BCNTR, data->blksz * data->blocks);
+               sunxi_mmc_start_dma(host, data);
+       }
+
        host->mrq = mrq;
        mci_writel(host, REG_IMASK, host->sdio_imask | imask);
-       spin_unlock_irqrestore(&host->lock, iflags);
-
        mci_writel(host, REG_CARG, cmd->arg);
        mci_writel(host, REG_CMDR, cmd_val);
+
+       spin_unlock_irqrestore(&host->lock, iflags);
 }
 
 static const struct of_device_id sunxi_mmc_of_match[] = {
@@ -1005,7 +1042,8 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
        host = mmc_priv(mmc);
        host->mmc = mmc;
        spin_lock_init(&host->lock);
-       tasklet_init(&host->tasklet, sunxi_mmc_tasklet, (unsigned long)host);
+       tasklet_init(&host->manual_stop_tasklet,
+                    sunxi_mmc_manual_stop_tasklet, (unsigned long)host);
 
        ret = sunxi_mmc_resource_request(host, pdev);
        if (ret)
@@ -1064,7 +1102,7 @@ static int sunxi_mmc_remove(struct platform_device *pdev)
 
        mmc_remove_host(mmc);
        sunxi_mmc_exit_host(host);
-       tasklet_disable(&host->tasklet);
+       tasklet_disable(&host->manual_stop_tasklet);
        dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
        mmc_free_host(mmc);
 
-- 
1.9.0

-- 
You received this message because you are subscribed to the Google Groups 
"linux-sunxi" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to linux-sunxi+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.

Reply via email to