From: Anders Berg <anders.b...@lsi.com>

Use the devm_* helpers to avoid messy error recovery and make use of the
virt-dma interface to avoid duplicated code.

Signed-off-by: Anders Berg <anders.b...@lsi.com>
---
 drivers/dma/Kconfig     |    1 +
 drivers/dma/lsi-dma32.c |  527 ++++++++++++++++-------------------------------
 drivers/dma/lsi-dma32.h |   41 ++--
 3 files changed, 200 insertions(+), 369 deletions(-)

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 2ad1215..03eb991 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -91,6 +91,7 @@ config AT_HDMAC
 config LSI_DMA
        tristate "LSI General Purpose DMA support"
        select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
        select ASYNC_TX_ENABLE_CHANNEL_SWITCH
        help
          Enable support for the LSI General Purpose DMA controller found
diff --git a/drivers/dma/lsi-dma32.c b/drivers/dma/lsi-dma32.c
index b16df68..0c679bf 100644
--- a/drivers/dma/lsi-dma32.c
+++ b/drivers/dma/lsi-dma32.c
@@ -23,10 +23,10 @@
 #include <linux/stat.h>
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/kref.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/of_platform.h>
 #include <linux/of_address.h>
@@ -36,11 +36,9 @@
 #include <asm/page.h>
 #include <linux/bitops.h>
 #include <linux/atomic.h>
+#include "virt-dma.h"
 #include "lsi-dma32.h"
 
-#define rd32(_addr)         readl((_addr))
-#define wr32(_value, _addr) writel((_value), (_addr))
-
 #ifdef DEBUG
 #define engine_dbg(engine, fmt, ...) \
        do { \
@@ -54,7 +52,7 @@
                struct gpdma_channel *_c = dmac; \
                (void)_c; \
                pr_debug("dma0ch%d: [%s] " fmt, \
-                       dmac->channel, __func__, ##__VA_ARGS__); \
+                       dmac->id, __func__, ##__VA_ARGS__); \
        } while (0)
 #else
 #define engine_dbg(engine, fmt, ...) do {} while (0)
@@ -62,29 +60,26 @@
 #endif
 
 
-static dma_cookie_t gpdma_tx_submit(struct dma_async_tx_descriptor *txd);
-
-
 static void reset_channel(struct gpdma_channel *dmac)
 {
        const int WAIT = 1024;
        int i;
 
        /* Pause channel */
-       wr32(DMA_STATUS_CH_PAUS_WR_EN | DMA_STATUS_CH_PAUSE,
+       writel(DMA_STATUS_CH_PAUS_WR_EN | DMA_STATUS_CH_PAUSE,
             dmac->base+DMA_STATUS);
        wmb();
 
        /* Disable channel */
-       wr32(0, dmac->base+DMA_CHANNEL_CONFIG);
-       for (i = 0; rd32(dmac->base+DMA_CHANNEL_CONFIG) && i < WAIT; i++)
+       writel(0, dmac->base+DMA_CHANNEL_CONFIG);
+       for (i = 0; readl(dmac->base+DMA_CHANNEL_CONFIG) && i < WAIT; i++)
                cpu_relax();
        if (i == WAIT)
                ch_dbg(dmac, "Failed to DISABLE channel\n");
 
        /* Clear FIFO */
-       wr32(DMA_CONFIG_CLEAR_FIFO, dmac->base+DMA_CHANNEL_CONFIG);
-       for (i = 0; rd32(dmac->base+DMA_CHANNEL_CONFIG) && i < WAIT; i++)
+       writel(DMA_CONFIG_CLEAR_FIFO, dmac->base+DMA_CHANNEL_CONFIG);
+       for (i = 0; readl(dmac->base+DMA_CHANNEL_CONFIG) && i < WAIT; i++)
                cpu_relax();
        if (i == WAIT)
                ch_dbg(dmac, "Failed to clear FIFO\n");
@@ -100,7 +95,7 @@ static void soft_reset(struct gpdma_engine *engine)
                reset_channel(&engine->channel[i]);
 
        /* Reset GPDMA by writing Magic Number to reset reg */
-       wr32(GPDMA_MAGIC, engine->gbase + SOFT_RESET);
+       writel(GPDMA_MAGIC, engine->gbase + SOFT_RESET);
        wmb();
 
        cfg = (engine->pool.phys & 0xfff00000) | GEN_CONFIG_EXT_MEM;
@@ -110,7 +105,7 @@ static void soft_reset(struct gpdma_engine *engine)
                        cfg |= GEN_CONFIG_INT_EDGE(i);
                engine_dbg(engine, "Using edge-triggered interrupts\n");
        }
-       wr32(cfg, engine->gbase + GEN_CONFIG);
+       writel(cfg, engine->gbase + GEN_CONFIG);
        engine_dbg(engine, "engine->desc.phys & 0xfff00000 == %llx\n",
                   (engine->pool.phys & 0xfff00000));
 
@@ -164,28 +159,30 @@ static struct gpdma_desc *get_descriptor(struct 
gpdma_engine *engine)
        unsigned long flags;
        struct gpdma_desc *desc = NULL;
 
-       raw_spin_lock_irqsave(&engine->lock, flags);
+       spin_lock_irqsave(&engine->lock, flags);
        if (engine->pool.next < GPDMA_MAX_DESCRIPTORS)
                desc = engine->pool.free[engine->pool.next++];
-       raw_spin_unlock_irqrestore(&engine->lock, flags);
+       spin_unlock_irqrestore(&engine->lock, flags);
+       desc->engine = engine;
 
        return desc;
 }
 
-static void
-free_descriptor(struct gpdma_engine *engine, struct gpdma_desc *desc)
+static void free_descriptor(struct virt_dma_desc *vd)
 {
+       struct gpdma_desc *desc = to_gpdma_desc(vd);
+       struct gpdma_engine *engine = desc->engine;
        unsigned long flags;
 
-       raw_spin_lock_irqsave(&engine->lock, flags);
+       spin_lock_irqsave(&engine->lock, flags);
        BUG_ON(engine->pool.next == 0);
        engine->pool.free[--engine->pool.next] = desc;
-       raw_spin_unlock_irqrestore(&engine->lock, flags);
+       spin_unlock_irqrestore(&engine->lock, flags);
 }
 
 static int segment_match(struct gpdma_engine *engine, struct gpdma_desc *desc)
 {
-       unsigned int gpreg_dma = rd32(engine->gpreg);
+       unsigned int gpreg_dma = readl(engine->gpreg);
        unsigned int seg_src = (gpreg_dma >> 0) & 0x3f;
        unsigned int seg_dst = (gpreg_dma >> 8) & 0x3f;
 
@@ -193,17 +190,23 @@ static int segment_match(struct gpdma_engine *engine, 
struct gpdma_desc *desc)
                seg_dst == ((desc->dst >> 32) & 0x3f));
 }
 
-static int gpdma_start_job(struct gpdma_channel *dmac)
+static void gpdma_start(struct gpdma_channel *dmac)
 {
-       void __iomem      *base = BASE(dmac);
-       struct gpdma_desc *desc;
-       struct descriptor *d;
-       phys_addr_t        paddr;
+       struct virt_dma_desc *vdesc;
+       struct gpdma_desc    *desc;
+       phys_addr_t           paddr;
 
-       desc = list_first_entry(&dmac->waiting, struct gpdma_desc, node);
-       d = &desc->hw;
-       paddr = virt_to_phys(d);
-       WARN_ON(paddr & 0xf);
+       vdesc = vchan_next_desc(&dmac->vc);
+       if (!vdesc) {
+               clear_bit(dmac->id, &dmac->engine->ch_busy);
+               dmac->active = NULL;
+               return;
+       }
+
+       /* Remove from list and mark as active */
+       list_del(&vdesc->node);
+       desc = to_gpdma_desc(vdesc);
+       dmac->active = desc;
 
        if (!(dmac->engine->chip->flags & LSIDMA_SEG_REGS)) {
                /*
@@ -211,118 +214,37 @@ static int gpdma_start_job(struct gpdma_channel *dmac)
                 * running descriptor on any other channel.
                 */
                if (dmac->engine->ch_busy && !segment_match(dmac->engine, desc))
-                       return -EBUSY;
+                       return;
        }
 
-       /* Remove from 'waiting' list and mark as active */
-       list_del(&desc->node);
-       desc->dma_status = DMA_IN_PROGRESS;
-       dmac->active = desc;
-       set_bit(dmac->channel, &dmac->engine->ch_busy);
-       ch_dbg(dmac, "Load desc va=%p pa=%llx\n", d, paddr);
-
+       paddr = virt_to_phys(&desc->hw);
+       WARN_ON(paddr & 0xf);
        if (dmac->engine->chip->flags & LSIDMA_NEXT_FULL) {
                /* Physical address of descriptor to load */
-               wr32((u32)paddr | 0x8, base+DMA_NXT_DESCR);
+               writel((u32)paddr | 0x8, dmac->base + DMA_NXT_DESCR);
        } else {
-               wr32((u32)paddr & 0xfffff, base+DMA_NXT_DESCR);
+               writel((u32)paddr & 0xfffff, dmac->base + DMA_NXT_DESCR);
        }
 
        if (dmac->engine->chip->flags & LSIDMA_SEG_REGS) {
                /* Segment bits [39..32] of descriptor, src and dst addresses */
-               wr32(paddr >> 32, base+DMA_DESCR_ADDR_SEG);
-               wr32(desc->src >> 32, base+DMA_SRC_ADDR_SEG);
-               wr32(desc->dst >> 32, base+DMA_DST_ADDR_SEG);
+               writel(paddr >> 32, dmac->base + DMA_DESCR_ADDR_SEG);
+               writel(desc->src >> 32, dmac->base + DMA_SRC_ADDR_SEG);
+               writel(desc->dst >> 32, dmac->base + DMA_DST_ADDR_SEG);
        } else {
                unsigned int seg_src = (desc->src >> 32) & 0x3f;
                unsigned int seg_dst = (desc->dst >> 32) & 0x3f;
-               wr32((seg_dst << 8) | seg_src, dmac->engine->gpreg);
+               writel((seg_dst << 8) | seg_src, dmac->engine->gpreg);
        }
        wmb();
-       wr32(DMA_CONFIG_DSC_LOAD, base+DMA_CHANNEL_CONFIG);
-
-       return 0;
-}
-
-static inline void
-gpdma_job_complete(struct gpdma_channel *dmac, struct gpdma_desc *desc)
-{
-       dmac->last_completed = desc->txd.cookie;
-
-       if (desc->txd.callback)
-               desc->txd.callback(desc->txd.callback_param);
-       ch_dbg(dmac, "cookie %d status %d\n",
-               desc->txd.cookie, desc->dma_status);
-       free_descriptor(dmac->engine, desc);
-       clear_bit(dmac->channel, &dmac->engine->ch_busy);
-}
-
-static void flush_channel(struct gpdma_channel *dmac)
-{
-       struct gpdma_desc *desc, *tmp;
-
-       ch_dbg(dmac, "active=%p\n", dmac->active);
-       reset_channel(dmac);
-       if (dmac->active) {
-               gpdma_job_complete(dmac, dmac->active);
-               dmac->active = NULL;
-       }
-       list_for_each_entry_safe(desc, tmp, &dmac->waiting, node) {
-               ch_dbg(dmac, "del wating %p\n", desc);
-               list_del(&desc->node);
-               gpdma_job_complete(dmac, desc);
-       }
-}
-
-static inline void gpdma_sched_job_handler(struct gpdma_engine *engine)
-{
-       if (likely(test_bit(GPDMA_INIT, &engine->state)))
-               tasklet_schedule(&engine->job_task);
-}
-
-static void job_tasklet(unsigned long data)
-{
-       struct gpdma_engine *engine = (struct gpdma_engine *)data;
-       unsigned long        flags;
-       int i;
-
-       /* Handle completed jobs */
-       for (i = 0; i < engine->chip->num_channels; i++) {
-               struct gpdma_channel *dmac = &engine->channel[i];
-               struct gpdma_desc    *desc = dmac->active;
-
-               desc = dmac->active;
-               if (desc != NULL) {
-                       /* Check if job completed */
-                       if (desc->dma_status != DMA_IN_PROGRESS) {
-                               dmac->active = NULL;
-                               gpdma_job_complete(dmac, desc);
-                       }
-               }
-       }
-
-       /* Start new jobs */
-       for (i = 0; i < engine->chip->num_channels; i++) {
-               struct gpdma_channel *dmac = &engine->channel[i];
-
-               raw_spin_lock_irqsave(&dmac->lock, flags);
-
-               if (dmac->active == NULL && !list_empty(&dmac->waiting)) {
-                       /* Start next job */
-                       if (gpdma_start_job(dmac) != 0) {
-                               raw_spin_unlock_irqrestore(&dmac->lock, flags);
-                               break;
-                       }
-               }
-
-               raw_spin_unlock_irqrestore(&dmac->lock, flags);
-       }
+       writel(DMA_CONFIG_DSC_LOAD, dmac->base + DMA_CHANNEL_CONFIG);
+       set_bit(dmac->id, &dmac->engine->ch_busy);
 }
 
 static irqreturn_t gpdma_isr_err(int irqno, void *_engine)
 {
        struct gpdma_engine *engine = _engine;
-       u32 status = rd32(engine->gbase + GEN_STAT);
+       u32 status = readl(engine->gbase + GEN_STAT);
        u32 ch = (status & GEN_STAT_CH0_ERROR) ? 0 : 1;
        struct gpdma_channel *dmac = &engine->channel[ch];
 
@@ -330,10 +252,10 @@ static irqreturn_t gpdma_isr_err(int irqno, void *_engine)
                return IRQ_NONE;
 
        /* Read the channel status bits and dump the error */
-       status = rd32(dmac->base + DMA_STATUS);
-       pr_err("dma: channel%d error %08x\n", dmac->channel, status);
+       status = readl(dmac->base + DMA_STATUS);
+       pr_err("dma: channel%d error %08x\n", dmac->id, status);
        /* Clear the error indication */
-       wr32(DMA_STATUS_ERROR, dmac->base+DMA_STATUS);
+       writel(DMA_STATUS_ERROR, dmac->base+DMA_STATUS);
 
        return IRQ_HANDLED;
 }
@@ -345,11 +267,9 @@ static irqreturn_t gpdma_isr(int irqno, void *_dmac)
        u32                  status;
        u32                  error;
 
-       WARN_ON(desc == NULL);
-
-       status = rd32(dmac->base+DMA_STATUS);
+       status = readl(dmac->base+DMA_STATUS);
        error = status & DMA_STATUS_ERROR;
-       wr32(DMA_STATUS_CLEAR, dmac->base+DMA_STATUS);
+       writel(DMA_STATUS_CLEAR, dmac->base+DMA_STATUS);
 
        ch_dbg(dmac, "irq%u channel status %08x, error %08x\n",
                irqno, status, error);
@@ -360,25 +280,49 @@ static irqreturn_t gpdma_isr(int irqno, void *_dmac)
                if (error & DMA_STATUS_UNALIGNED_ERR) {
                        dev_warn(dmac->engine->dev,
                                 "Unaligned transaction on ch%d (status=%#x)\n",
-                                dmac->channel, status);
+                                dmac->id, status);
                        reset_channel(dmac);
                } else {
                        dev_warn(dmac->engine->dev,
                                 "DMA transaction error on ch%d (status=%#x)\n",
-                                dmac->channel, status);
+                                dmac->id, status);
                }
        }
 
-       dmac->active->dma_status = (error ? DMA_ERROR : DMA_SUCCESS);
+       BUG_ON(desc == NULL);
 
-       wr32(0, dmac->base+DMA_CHANNEL_CONFIG);
-       wr32(DMA_CONFIG_CLEAR_FIFO, dmac->base+DMA_CHANNEL_CONFIG);
-
-       gpdma_sched_job_handler(dmac->engine);
+       spin_lock(&dmac->vc.lock);
+       vchan_cookie_complete(&desc->vdesc);
+       dmac->active = NULL;
+       if (vchan_next_desc(&dmac->vc)) {
+               gpdma_start(dmac);
+       } else {
+               /* Stop channel */
+               writel(0, dmac->base + DMA_CHANNEL_CONFIG);
+               writel(DMA_CONFIG_CLEAR_FIFO, dmac->base + DMA_CHANNEL_CONFIG);
+               clear_bit(dmac->id, &dmac->engine->ch_busy);
+       }
+       spin_unlock(&dmac->vc.lock);
 
        return IRQ_HANDLED;
 }
 
+static void flush_channel(struct gpdma_channel *dmac)
+{
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&dmac->vc.lock, flags);
+       reset_channel(dmac);
+       if (dmac->active) {
+               free_descriptor(&dmac->active->vdesc);
+               dmac->active = NULL;
+       }
+       vchan_get_all_descriptors(&dmac->vc, &head);
+       spin_unlock_irqrestore(&dmac->vc.lock, flags);
+       vchan_dma_desc_free_list(&dmac->vc, &head);
+}
+
 /*
  * Perform soft reset procedure on DMA Engine.  Needed occasionally to work
  * around nasty bug ACP3400 sRIO HW.
@@ -394,41 +338,37 @@ reset_engine(struct device *dev,
        if (!engine)
                return -EINVAL;
 
-       /* Disable interrupts and acquire each channel lock */
-       for (i = 0; i < engine->chip->num_channels; i++)
-               disable_irq(engine->channel[i].irq);
-
-       /* Disable tasklet (synchronized) */
-       tasklet_disable(&engine->job_task);
+       /* Disable interrupts and tasklet and acquire each channel lock */
+       for (i = 0; i < engine->chip->num_channels; i++) {
+               struct gpdma_channel *dmac = &engine->channel[i];
+               disable_irq(dmac->irq);
+               spin_lock(&dmac->vc.lock);
+               tasklet_disable(&dmac->vc.task);
+       }
 
        soft_reset(engine);
 
-       /* Re-queue any active jobs */
        for (i = 0; i < engine->chip->num_channels; i++) {
                struct gpdma_channel *dmac = &engine->channel[i];
-               struct gpdma_desc  *active;
-               raw_spin_lock(&engine->channel[i].lock);
-               active = dmac->active;
-               if (active && active->dma_status == DMA_IN_PROGRESS) {
-                       /* Restart active job after soft reset */
-                       list_add(&active->node, &dmac->waiting);
+               tasklet_enable(&dmac->vc.task);
+               enable_irq(dmac->irq);
+               /* Restart any active jobs */
+               if (dmac->active) {
+                       struct gpdma_desc *active = dmac->active;
                        dmac->active = NULL;
+                       list_add(&active->vdesc.node, &dmac->vc.desc_submitted);
+                       if (vchan_issue_pending(&dmac->vc))
+                               gpdma_start(dmac);
                }
-               raw_spin_unlock(&engine->channel[i].lock);
+               spin_unlock(&dmac->vc.lock);
        }
 
-       tasklet_enable(&engine->job_task);
-
-       for (i = 0; i < engine->chip->num_channels; i++)
-               enable_irq(engine->channel[i].irq);
-
-       gpdma_sched_job_handler(engine);
-
        return count;
 }
 
 static DEVICE_ATTR(soft_reset, S_IWUSR, NULL, reset_engine);
 
+
 /*
  *===========================================================================
  *
@@ -445,7 +385,7 @@ static DEVICE_ATTR(soft_reset, S_IWUSR, NULL, reset_engine);
  */
 static int gpdma_alloc_chan_resources(struct dma_chan *chan)
 {
-       struct gpdma_channel *dmac = dchan_to_gchan(chan);
+       struct gpdma_channel *dmac = to_gpdma_chan(chan);
 
        (void) dmac;
        return 1;
@@ -457,7 +397,7 @@ static int gpdma_alloc_chan_resources(struct dma_chan *chan)
  */
 static void gpdma_free_chan_resources(struct dma_chan *chan)
 {
-       struct gpdma_channel *dmac = dchan_to_gchan(chan);
+       struct gpdma_channel *dmac = to_gpdma_chan(chan);
 
        (void) dmac;
 }
@@ -468,12 +408,12 @@ static void gpdma_free_chan_resources(struct dma_chan 
*chan)
  */
 static struct dma_async_tx_descriptor *
 gpdma_prep_memcpy(struct dma_chan *chan,
-                dma_addr_t dst,
-                dma_addr_t src,
-                size_t size,
-                unsigned long dma_flags)
+                 dma_addr_t dst,
+                 dma_addr_t src,
+                 size_t size,
+                 unsigned long dma_flags)
 {
-       struct gpdma_channel *dmac = dchan_to_gchan(chan);
+       struct gpdma_channel *dmac = to_gpdma_chan(chan);
        struct gpdma_desc *desc;
        u16 rot_len, x_count, src_size, access_size;
 
@@ -532,44 +472,10 @@ gpdma_prep_memcpy(struct dma_chan *chan,
        desc->hw.dst_addr      = cpu_to_le32(dst & 0xffffffff);
 
        /* Setup sw descriptor */
-       INIT_LIST_HEAD(&desc->node);
        desc->src        = src;
        desc->dst        = dst;
-       desc->dma_status = DMA_IN_PROGRESS;
-       dma_async_tx_descriptor_init(&desc->txd, chan);
-       desc->txd.tx_submit = gpdma_tx_submit;
-       desc->txd.flags     = DMA_CTRL_ACK;
-
-       return &desc->txd;
-}
-
-/**
- * gpdma_tx_submit
- *
- */
-static dma_cookie_t gpdma_tx_submit(struct dma_async_tx_descriptor *txd)
-{
-       struct gpdma_channel *dmac = dchan_to_gchan(txd->chan);
-       struct gpdma_desc    *desc = txd_to_desc(txd);
-       unsigned long         flags;
-
-       raw_spin_lock_irqsave(&dmac->lock, flags);
-
-       /* Update last used cookie */
-       if (++dmac->chan.cookie < 0)
-               dmac->chan.cookie = DMA_MIN_COOKIE;
-
-       txd->cookie = dmac->chan.cookie;
-       list_add_tail(&desc->node, &dmac->waiting);
 
-       raw_spin_unlock_irqrestore(&dmac->lock, flags);
-
-       ch_dbg(dmac, "cookie=%d last_completed=%d\n",
-               txd->cookie, dmac->last_completed);
-
-       gpdma_sched_job_handler(dmac->engine);
-
-       return txd->cookie;
+       return vchan_tx_prep(&dmac->vc, &desc->vdesc, DMA_CTRL_ACK);
 }
 
 /**
@@ -578,9 +484,13 @@ static dma_cookie_t gpdma_tx_submit(struct 
dma_async_tx_descriptor *txd)
  */
 static void gpdma_issue_pending(struct dma_chan *chan)
 {
-       struct gpdma_channel *dmac = dchan_to_gchan(chan);
+       struct gpdma_channel *dmac = to_gpdma_chan(chan);
+       unsigned long flags;
 
-       gpdma_sched_job_handler(dmac->engine);
+       spin_lock_irqsave(&dmac->vc.lock, flags);
+       if (vchan_issue_pending(&dmac->vc) && !dmac->active)
+               gpdma_start(dmac);
+       spin_unlock_irqrestore(&dmac->vc.lock, flags);
 }
 
 /**
@@ -593,13 +503,7 @@ static enum dma_status gpdma_tx_status(struct dma_chan 
*chan,
                                       dma_cookie_t cookie,
                                       struct dma_tx_state *txstate)
 {
-       struct gpdma_channel *dmac = dchan_to_gchan(chan);
-       enum dma_status ret = 0;
-
-       ret = dma_async_is_complete(cookie, dmac->last_completed, chan->cookie);
-       dma_set_tx_state(txstate, dmac->last_completed, chan->cookie, 0);
-
-       return ret;
+       return dma_cookie_status(chan, cookie, txstate);
 }
 
 
@@ -608,20 +512,18 @@ static enum dma_status gpdma_tx_status(struct dma_chan 
*chan,
  * returns zero or error code.
  *
  */
-static int gpdma_device_control(struct dma_chan *dchan,
+static int gpdma_device_control(struct dma_chan *chan,
                                enum dma_ctrl_cmd cmd,
                                unsigned long arg)
 {
-       struct gpdma_channel *dmac = dchan_to_gchan(dchan);
+       struct gpdma_channel *dmac = to_gpdma_chan(chan);
 
        if (!dmac)
                return -EINVAL;
 
        switch (cmd) {
        case DMA_TERMINATE_ALL:
-               tasklet_disable(&dmac->engine->job_task);
                flush_channel(dmac);
-               tasklet_enable(&dmac->engine->job_task);
                break;
 
        default:
@@ -631,88 +533,28 @@ static int gpdma_device_control(struct dma_chan *dchan,
        return 0;
 }
 
-static void gpdma_engine_release(struct kref *kref)
-{
-       struct gpdma_engine *engine =
-               container_of(kref, struct gpdma_engine, kref);
-
-       kfree(engine);
-}
-
-/**
- * gpdma_of_remove
- *
- *
- */
-static int gpdma_of_remove(struct platform_device *op)
-{
-       struct gpdma_engine *engine = dev_get_drvdata(&op->dev);
-       int i;
-
-       dev_dbg(&op->dev, "%s\n", __func__);
-
-       if (engine) {
-               if (test_and_clear_bit(GPDMA_INIT, &engine->state)) {
-                       device_remove_file(&op->dev, &dev_attr_soft_reset);
-                       dma_async_device_unregister(&engine->dma_device);
-               }
-
-               tasklet_disable(&engine->job_task);
-               tasklet_kill(&engine->job_task);
-
-               if (engine->err_irq)
-                       free_irq(engine->err_irq, engine);
-
-               for (i = 0; i < engine->chip->num_channels; i++) {
-                       struct gpdma_channel *dmac = &engine->channel[i];
-
-                       if (dmac->irq)
-                               free_irq(dmac->irq, dmac);
-               }
-
-               free_desc_table(engine);
-               iounmap(engine->iobase);
-               kref_put(&engine->kref, gpdma_engine_release);
-               dev_set_drvdata(&op->dev, NULL);
-       }
-
-       return 0;
-}
-
-static int
-setup_channel(struct gpdma_engine *engine, struct device_node *child, int id)
+static int setup_channel(struct gpdma_channel *dmac, struct device_node *child)
 {
-       struct gpdma_channel *dmac;
-       int rc = -ENODEV;
+       struct gpdma_engine *engine = dmac->engine;
+       int rc;
 
-       if (id >= engine->chip->num_channels) {
-               dev_dbg(engine->dev, "Too many channels (%d)\n", id);
-               goto err_init;
-       }
-
-       dmac = &engine->channel[id];
-       dmac->channel = id;
-       dmac->engine = engine;
-       raw_spin_lock_init(&dmac->lock);
-       INIT_LIST_HEAD(&dmac->waiting);
-       dmac->chan.device = &engine->dma_device;
-
-       dmac->base = engine->iobase + id*engine->chip->chregs_offset;
-       dev_dbg(engine->dev, "channel%d base @ %p\n", id, dmac->base);
+       dmac->base = engine->iobase + dmac->id * engine->chip->chregs_offset;
+       dev_dbg(engine->dev, "channel%d base @ %p\n", dmac->id, dmac->base);
 
        /* Find the IRQ line, if it exists in the device tree */
        dmac->irq = irq_of_parse_and_map(child, 0);
-       dev_dbg(engine->dev, "channel %d, irq %d\n", id, dmac->irq);
-       rc = request_irq(dmac->irq, gpdma_isr, IRQF_SHARED, "lsi-dma", dmac);
+       dev_dbg(engine->dev, "channel %d, irq %d\n", dmac->id, dmac->irq);
+       rc = devm_request_irq(engine->dev, dmac->irq, gpdma_isr, 0,
+                             "lsi-dma", dmac);
        if (rc) {
                dev_err(engine->dev, "failed to request_irq, error = %d\n", rc);
-               goto err_init;
+               return rc;
        }
-       /* Add the channel to the DMAC list */
-       list_add_tail(&dmac->chan.device_node, &engine->dma_device.channels);
+       /* Initialize the virt-channel */
+       dmac->vc.desc_free = free_descriptor;
+       vchan_init(&dmac->vc, &engine->dma_device);
 
-err_init:
-       return rc;
+       return 0;
 }
 
 static struct lsidma_hw lsi_dma32 = {
@@ -750,16 +592,12 @@ static const struct of_device_id gpdma_of_ids[] = {
        { }
 };
 
-/**
- * gpdma_of_probe
- *
- *
- */
 static int gpdma_of_probe(struct platform_device *op)
 {
        struct gpdma_engine *engine;
        struct dma_device   *dma;
        struct device_node *child;
+       struct resource *res;
        const struct of_device_id *match;
        int rc = -ENOMEM;
        int id = 0;
@@ -768,14 +606,11 @@ static int gpdma_of_probe(struct platform_device *op)
        if (!match)
                return -EINVAL;
 
-       engine = kzalloc(sizeof(*engine), GFP_KERNEL);
+       engine = devm_kzalloc(&op->dev, sizeof(*engine), GFP_KERNEL);
        if (!engine)
                return -ENOMEM;
 
-       dev_set_drvdata(&op->dev, engine);
-
-       kref_init(&engine->kref);
-       raw_spin_lock_init(&engine->lock);
+       spin_lock_init(&engine->lock);
        engine->dev = &op->dev;
        engine->chip = (struct lsidma_hw *)match->data;
 
@@ -794,50 +629,57 @@ static int gpdma_of_probe(struct platform_device *op)
        dma->device_control = gpdma_device_control;
        INIT_LIST_HEAD(&dma->channels);
 
-       /*
-        * Map device I/O memory
+       /* Map device I/O memory
         */
-       engine->iobase = of_iomap(op->dev.of_node, 0);
-       if (!engine->iobase) {
-               rc = -EINVAL;
-               goto err_init;
-       }
+       res = platform_get_resource(op, IORESOURCE_MEM, 0);
+       engine->iobase = devm_ioremap_resource(&op->dev, res);
+       if (IS_ERR(engine->iobase))
+               return PTR_ERR(engine->iobase);
        dev_dbg(&op->dev, "mapped base @ %p\n", engine->iobase);
 
-       engine->err_irq = irq_of_parse_and_map(op->dev.of_node, 1);
+       res = platform_get_resource(op, IORESOURCE_MEM, 1);
+       if (res) {
+               engine->gpreg = devm_ioremap_nocache(&op->dev,
+                                                    res->start,
+                                                    resource_size(res));
+               if (IS_ERR(engine->gpreg))
+                       return PTR_ERR(engine->gpreg);
+               dev_dbg(&op->dev, "mapped gpreg @ %p\n", engine->gpreg);
+       }
+
+       engine->err_irq = platform_get_irq(op, 1);
        if (engine->err_irq) {
-               rc = request_irq(engine->err_irq, gpdma_isr_err,
-                                IRQF_SHARED, "lsi-dma-err", engine);
+               rc = devm_request_irq(&op->dev, engine->err_irq,
+                                     gpdma_isr_err, 0, "lsi-dma-err", engine);
                if (rc) {
-                       dev_err(engine->dev,
-                               "failed to request irq%d\n",
+                       dev_err(engine->dev, "failed to request irq%d\n",
                                engine->err_irq);
                        engine->err_irq = 0;
                }
        }
 
-       if (!(engine->chip->flags & LSIDMA_SEG_REGS)) {
-               struct device_node *gp_node;
-               gp_node = of_find_compatible_node(NULL, NULL, "lsi,gpreg");
-               if (!gp_node) {
-                       dev_err(engine->dev, "FDT is missing node 'gpreg'\n");
-                       rc = -EINVAL;
-                       goto err_init;
-               }
-               engine->gpreg = of_iomap(gp_node, 0);
-       }
-
        /* General registes at device specific offset */
        engine->gbase = engine->iobase + engine->chip->genregs_offset;
 
-       if (alloc_desc_table(engine))
-               goto err_init;
+       rc = alloc_desc_table(engine);
+       if (rc)
+               return rc;
 
        /* Setup channels */
        for_each_child_of_node(op->dev.of_node, child) {
-               rc = setup_channel(engine, child, id++);
-               if (rc != 0)
-                       goto err_init;
+               struct gpdma_channel *dmac = &engine->channel[id];
+
+               if (id >= engine->chip->num_channels) {
+                       dev_dbg(engine->dev, "Too many channels (%d)\n", id);
+                       return -ENODEV;
+               }
+
+               dmac->id = id;
+               dmac->engine = engine;
+               rc = setup_channel(dmac, child);
+               if (rc)
+                       return rc;
+               ++id;
        }
 
        soft_reset(engine);
@@ -845,19 +687,27 @@ static int gpdma_of_probe(struct platform_device *op)
        rc = dma_async_device_register(&engine->dma_device);
        if (rc) {
                dev_err(engine->dev, "unable to register\n");
-               goto err_init;
+               return rc;
        }
-       tasklet_init(&engine->job_task, job_tasklet, (unsigned long)engine);
-       device_create_file(&op->dev, &dev_attr_soft_reset);
 
-       set_bit(GPDMA_INIT, &engine->state);
+       device_create_file(&op->dev, &dev_attr_soft_reset);
+       dev_set_drvdata(&op->dev, engine);
 
        return 0;
+}
 
-err_init:
-       gpdma_of_remove(op);
+static int gpdma_of_remove(struct platform_device *op)
+{
+       struct gpdma_engine *engine = dev_get_drvdata(&op->dev);
+
+       dev_dbg(&op->dev, "%s\n", __func__);
+
+       device_remove_file(&op->dev, &dev_attr_soft_reset);
+       dma_async_device_unregister(&engine->dma_device);
+       free_desc_table(engine);
        dev_set_drvdata(&op->dev, NULL);
-       return rc;
+
+       return 0;
 }
 
 static struct platform_driver gpdma_of_driver = {
@@ -870,18 +720,7 @@ static struct platform_driver gpdma_of_driver = {
        .remove = gpdma_of_remove,
 };
 
-static __init int gpdma_init(void)
-{
-       return platform_driver_register(&gpdma_of_driver);
-}
-
-static void __exit gpdma_exit(void)
-{
-       platform_driver_unregister(&gpdma_of_driver);
-}
-
-subsys_initcall(gpdma_init);
-module_exit(gpdma_exit);
+module_platform_driver(gpdma_of_driver);
 
 MODULE_DESCRIPTION("LSI DMA driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/dma/lsi-dma32.h b/drivers/dma/lsi-dma32.h
index 5f10198..b4248cd 100644
--- a/drivers/dma/lsi-dma32.h
+++ b/drivers/dma/lsi-dma32.h
@@ -15,9 +15,11 @@
 
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
+#include <linux/dma-mapping.h>
 #include <linux/device.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
+#include "virt-dma.h"
 
 #define MAX_GPDMA_CHANNELS      4
 #define GPDMA_MAX_DESCRIPTORS  128
@@ -151,14 +153,16 @@ struct gpdma_engine;
 
 struct gpdma_desc {
        struct descriptor               hw;
-       struct list_head                node;
        dma_addr_t                      src;
        dma_addr_t                      dst;
-       enum dma_status                 dma_status;
-       struct dma_async_tx_descriptor  txd;
+       struct gpdma_engine            *engine;
+       struct virt_dma_desc            vdesc;
 } __aligned(32);
 
-#define txd_to_desc(n) container_of(n, struct gpdma_desc, txd)
+static struct gpdma_desc *to_gpdma_desc(struct virt_dma_desc *vdesc)
+{
+       return container_of(vdesc, struct gpdma_desc, vdesc);
+}
 
 struct gpdma_channel {
        /* Back reference to DMA engine */
@@ -166,22 +170,19 @@ struct gpdma_channel {
        /* Channel registers */
        void __iomem                    *base;
        /* Channel id */
-       int                             channel;
+       int                             id;
        /* IRQ number as passed to request_irq() */
        int                             irq;
-       /* Cookie for last completed transaction */
-       dma_cookie_t                    last_completed;
-       /* List of prep'ed descriptors */
-       struct list_head                waiting;
        /* Currently running descriptor */
        struct gpdma_desc               *active;
-       /* Lock for accessing 'waiting' and 'active' */
-       raw_spinlock_t                  lock;
        /* Channel parameters (DMA engine framework) */
-       struct dma_chan                 chan;
+       struct virt_dma_chan            vc;
 };
 
-#define dchan_to_gchan(n) container_of(n, struct gpdma_channel, chan)
+static inline struct gpdma_channel *to_gpdma_chan(struct dma_chan *chan)
+{
+       return container_of(chan, struct gpdma_channel, vc.chan);
+}
 
 struct lsidma_hw {
        unsigned int num_channels;
@@ -194,20 +195,16 @@ struct lsidma_hw {
 };
 
 struct gpdma_engine {
-       unsigned long                   state;
-#define GPDMA_INIT      0U
-       struct kref                     kref;
        struct device                   *dev;
        struct lsidma_hw                *chip;
        struct gpdma_channel            channel[MAX_GPDMA_CHANNELS];
-       /* Bit mask where bit[n] == 1 if channel busy */
+       /** Bit mask where bit[n] == 1 if channel busy */
        unsigned long                   ch_busy;
-       struct tasklet_struct           job_task;
        int                             err_irq;
        void __iomem                    *iobase;
        void __iomem                    *gbase;
        void __iomem                    *gpreg;
-       raw_spinlock_t                  lock;
+       spinlock_t                      lock;
        struct {
                u32                     order;
                dma_addr_t              phys;
@@ -220,10 +217,4 @@ struct gpdma_engine {
 
 #define desc_to_engine(n) container_of(n, struct gpdma_engine, desc)
 
-
-static inline void __iomem *BASE(struct gpdma_channel *dmac)
-{
-       return dmac->base;
-}
-
 #endif
-- 
1.7.9.5

-- 
_______________________________________________
linux-yocto mailing list
linux-yocto@yoctoproject.org
https://lists.yoctoproject.org/listinfo/linux-yocto

Reply via email to