On Sat, Mar 21, 2015 at 12:42:06PM -0700, Maxime Ripard wrote:
> This framework aims at easing the development of dmaengine drivers by 
> providing
> generic implementations of the functions usually required by dmaengine, while
> abstracting away most of the logic required.
> 
> It is very relevant for controllers that have more requests than channels,
> where you need to have some scheduling that is usually very bug prone, and
> shouldn't be implemented in each and every driver.
> 
> This introduces a new set of callbacks for drivers to implement the device
> specific behaviour. These new sets of callbacks aims at providing both how to
> handle channel related operations (start the transfer of a new descriptor,
> pause, resume, etc.) and the LLI related operations (iterator and various
> accessors).
> 
> So far, the only transfer types supported are memcpy and slave transfers, but
> eventually should support new transfer types as new drivers get converted.
A good direction indeed, few comments below...

> 
> Signed-off-by: Maxime Ripard <maxime.rip...@free-electrons.com>
> ---
>  drivers/dma/Kconfig         |   4 +
>  drivers/dma/Makefile        |   1 +
>  drivers/dma/scheduled-dma.c | 571 
> ++++++++++++++++++++++++++++++++++++++++++++
>  drivers/dma/scheduled-dma.h | 140 +++++++++++
>  4 files changed, 716 insertions(+)
>  create mode 100644 drivers/dma/scheduled-dma.c
>  create mode 100644 drivers/dma/scheduled-dma.h
> 
> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index a874b6ec6650..032bf5fcd58b 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -406,6 +406,7 @@ config DMA_SUN6I
>       depends on RESET_CONTROLLER
>       select DMA_ENGINE
>       select DMA_VIRTUAL_CHANNELS
> +     select DMA_SCHEDULED
ummm, selecting without converting driver?
>       help
>         Support for the DMA engine first found in Allwinner A31 SoCs.
>  
> @@ -431,6 +432,9 @@ config DMA_ENGINE
>  config DMA_VIRTUAL_CHANNELS
>       tristate
>  
> +config DMA_SCHEDULED
> +     bool
it might make sense to do select DMA_VIRTUAL_CHANNELS but this is fine too.
> +
>  config DMA_ACPI
>       def_bool y
>       depends on ACPI
> diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
> index f915f61ec574..1db31814c749 100644
> --- a/drivers/dma/Makefile
> +++ b/drivers/dma/Makefile
> @@ -5,6 +5,7 @@ obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
>  obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
>  obj-$(CONFIG_DMA_ACPI) += acpi-dma.o
>  obj-$(CONFIG_DMA_OF) += of-dma.o
> +obj-$(CONFIG_DMA_SCHEDULED) += scheduled-dma.o
>  
>  obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
>  obj-$(CONFIG_DMATEST) += dmatest.o
> diff --git a/drivers/dma/scheduled-dma.c b/drivers/dma/scheduled-dma.c
> new file mode 100644
> index 000000000000..482d04f2ccbc
> --- /dev/null
> +++ b/drivers/dma/scheduled-dma.c
> @@ -0,0 +1,571 @@
> +/*
> + * Copyright (C) 2015 Maxime Ripard
> + * Maxime Ripard <maxime.rip...@free-electrons.com>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + */
> +
> +#include <linux/dmaengine.h>
> +#include <linux/dmapool.h>
> +#include <linux/list.h>
> +#include <linux/slab.h>
> +
> +#include "scheduled-dma.h"
> +#include "virt-dma.h"
> +
> +static struct sdma_request *sdma_pop_queued_transfer(struct sdma *sdma,
> +                                                  struct sdma_channel *schan)
> +{
> +     struct sdma_request *sreq = NULL;
> +     unsigned long flags;
> +
> +     spin_lock_irqsave(&sdma->lock, flags);
> +
> +     /* No requests are awaiting an available channel */
> +     if (list_empty(&sdma->pend_reqs))
> +             goto out;
> +
> +     /*
> +      * If we don't have any validate_request callback, any request
> +      * can be dispatched to any channel.
> +      *
> +      * Remove the first entry and return it.
> +      */
> +     if (!sdma->ops->validate_request) {
> +             sreq = list_first_entry(&sdma->pend_reqs,
> +                                     struct sdma_request, node);
> +             list_del_init(&sreq->node);
> +             goto out;
> +     }
> +
> +     list_for_each_entry(sreq, &sdma->pend_reqs, node) {
> +             /*
> +              * Ask the driver to validate that the request can
> +              * happen on the channel.
> +              */
> +             if (sdma->ops->validate_request(schan, sreq)) {
> +                     list_del_init(&sreq->node);
> +                     goto out;
> +             }
> +
> +             /* Otherwise, just keep looping */
> +     }
> +
> +out:
> +     spin_unlock_irqrestore(&sdma->lock, flags);
> +
> +     return sreq;
> +}
> +
> +/*
> + * Besoin d'une fonction pour pusher un descriptor vers un pchan
> + *
> + * Flow normal:
> + *   - Election d'un pchan (Framework)
> + *   - Push d'un descripteur vers le pchan (Driver)
> + *   - idle....
> + *   - interrupt (driver)
> + *   - Transfert terminé, notification vers le framework (driver)
> + *   - Nouveau transfert dans la queue?
> + *     + Si oui, on est reparti
> + *     + Si non, on sort de l'interrupt, le pchan est libéré
Sorry cant understand this, this is actually bad for non french speakers

> + */
> +
> +struct sdma_desc *sdma_report(struct sdma *sdma,
> +                           struct sdma_channel *schan,
> +                           enum sdma_report_status status)
> +{
> +     struct sdma_desc *sdesc = NULL;
> +     struct virt_dma_desc *vdesc;
> +     struct sdma_request *sreq;
> +
> +     switch (status) {
> +     case SDMA_REPORT_TRANSFER:
> +             /*
> +              * We're done with the current transfer that was in this
> +              * physical channel.
> +              */
> +             vchan_cookie_complete(&schan->desc->vdesc);
> +
> +             /*
> +              * Now, try to see if there's any queued transfer
> +              * awaiting an available channel.
> +              *
> +              * If not, just bail out, and mark the pchan as
> +              * available.
> +              *
> +              * If there's such a transfer, validate that the
> +              * driver can handle it, and ask it to do the
> +              * transfer.
> +              */
> +             sreq = sdma_pop_queued_transfer(sdma, schan);
> +             if (!sreq) {
> +                     list_add_tail(&schan->node, &sdma->avail_chans);
> +                     return NULL;
> +             }
> +
> +             /* Mark the request as assigned to a particular channel */
> +             sreq->chan = schan;
> +
> +             /* Retrieve the next transfer descriptor */
> +             vdesc = vchan_next_desc(&sreq->vchan);
> +             schan->desc = sdesc = to_sdma_desc(&vdesc->tx);
> +
> +             break;
> +
> +     default:
> +             break;
since you have only one case, the switch seems overkill here!
Or the plan is to get chunk support added eventually, but then...

> +     }
> +
> +     return sdesc;
> +}
> +EXPORT_SYMBOL_GPL(sdma_report);
> +
> +static enum dma_status sdma_tx_status(struct dma_chan *chan,
> +                                   dma_cookie_t cookie,
> +                                   struct dma_tx_state *state)
> +{
> +     struct sdma_request *sreq = to_sdma_request(chan);
> +     struct sdma *sdma = to_sdma(chan->device);
> +     struct sdma_channel *schan = sreq->chan;
> +     struct virt_dma_desc *vd;
> +     struct sdma_desc *desc;
> +     enum dma_status ret;
> +     unsigned long flags;
> +     size_t bytes = 0;
> +     void *lli;
> +
> +     spin_lock_irqsave(&sreq->vchan.lock, flags);
> +
> +     ret = dma_cookie_status(chan, cookie, state);
> +     if (ret == DMA_COMPLETE)
> +             goto out;
> +
> +     vd = vchan_find_desc(&sreq->vchan, cookie);
> +     desc = to_sdma_desc(&vd->tx);
> +
> +     if (vd) {
> +             lli = desc->v_lli;
> +             while (true) {
> +                     bytes += sdma->ops->lli_size(lli);
> +
> +                     if (!sdma->ops->lli_has_next(lli))
> +                             break;
> +
> +                     lli = sdma->ops->lli_next(lli);
wouldn't it be nicer to just use a link list here which is embedded in a generic
struct which in turn can be embedded in a driver specific one, so we actually
don't care what driver specific implementation is... that way we cna get rid
of lli_size and lli_has_next and lli_next ones. Further that will allow
drivers/framework to break a txn into multiple smaller txns internally.

> +             }
> +     } else if (chan) {
> +             bytes = sdma->ops->channel_residue(schan);
> +     }
> +
> +     dma_set_residue(state, bytes);
> +
> +out:
> +     spin_unlock_irqrestore(&sreq->vchan.lock, flags);
> +
> +     return ret;
> +};
> +
> +static int sdma_config(struct dma_chan *chan,
> +                    struct dma_slave_config *config)
> +{
> +     struct sdma_request *sreq = to_sdma_request(chan);
> +     unsigned long flags;
> +
> +     spin_lock_irqsave(&sreq->vchan.lock, flags);
> +     memcpy(&sreq->cfg, config, sizeof(*config));
> +     spin_unlock_irqrestore(&sreq->vchan.lock, flags);
> +
> +     return 0;
> +}
> +
> +static int sdma_pause(struct dma_chan *chan)
> +{
> +     struct sdma_request *sreq = to_sdma_request(chan);
> +     struct sdma_channel *schan = sreq->chan;
> +     struct sdma *sdma = to_sdma(chan->device);
> +     unsigned long flags;
> +
> +     spin_lock_irqsave(&sreq->vchan.lock, flags);
> +
> +     /*
> +      * If the request is currently scheduled on a channel, just
> +      * pause the channel.
> +      *
> +      * If not, remove the request from the pending list.
The current API pauses a channel, not a request, so this part isn't clear to
me, cna you pls explain...

> +      */
> +     if (schan) {
> +             sdma->ops->channel_pause(schan);
make sure you document this and rest of the calls can be invoked with lock
held!
> +     } else {
> +             spin_lock(&sdma->lock);
> +             list_del_init(&sreq->node);
> +             spin_unlock(&sdma->lock);
> +     }
> +
> +     spin_unlock_irqrestore(&sreq->vchan.lock, flags);
> +
> +     return 0;
> +}
> +
> +static int sdma_resume(struct dma_chan *chan)
> +{
> +     struct sdma_request *sreq = to_sdma_request(chan);
> +     struct sdma_channel *schan = sreq->chan;
> +     struct sdma *sdma = to_sdma(chan->device);
> +     unsigned long flags;
> +
> +     spin_lock_irqsave(&sreq->vchan.lock, flags);
> +
> +     /*
> +      * If the request is currently scheduled on a channel, just
> +      * resume the channel.
> +      *
> +      * If not, add the request from the pending list.
same here too
> +      */
> +     if (schan) {
> +             sdma->ops->channel_resume(schan);
> +     } else {
> +             spin_lock(&sdma->lock);
> +             list_add_tail(&sreq->node, &sdma->pend_reqs);
> +             spin_unlock(&sdma->lock);
> +     }
> +
> +     spin_unlock_irqrestore(&sreq->vchan.lock, flags);
> +
> +     return 0;
> +}
> +
> +static int sdma_terminate(struct dma_chan *chan)
> +{
> +     struct sdma_request *sreq = to_sdma_request(chan);
> +     struct sdma_channel *schan = sreq->chan;
> +     struct sdma *sdma = to_sdma(chan->device);
> +     unsigned long flags;
> +
> +     spin_lock_irqsave(&sreq->vchan.lock, flags);
> +
> +     /*
> +      * If the request is currently scheduled on a channel,
> +      * terminate the channel.
> +      *
> +      * If not, prevent the request from being scheduled.
> +      */
> +     if (schan) {
> +             sdma->ops->channel_terminate(schan);
> +     } else {
> +             spin_lock(&sdma->lock);
> +             list_del_init(&sreq->node);
> +             spin_unlock(&sdma->lock);
> +     }
> +
> +     spin_unlock_irqrestore(&sreq->vchan.lock, flags);
> +
> +     /*
> +      * Flush all the pending descriptors from our vchan
> +      */
> +     vchan_free_chan_resources(&sreq->vchan);
I think cleanup is not right we need more work here.
> +
> +     return 0;
> +}
> +
> +
> +static struct dma_async_tx_descriptor *sdma_prep_memcpy(struct dma_chan 
> *chan,
> +                                                     dma_addr_t dest, 
> dma_addr_t src,
> +                                                     size_t len, unsigned 
> long flags)
> +{
> +     struct sdma_request *req = to_sdma_request(chan);
> +     struct sdma *sdma = to_sdma(chan->device);
> +     struct sdma_desc *desc;
> +     dma_addr_t p_lli;
> +     void *v_lli;
> +
> +     if (!len)
> +             return NULL;
> +
> +     /* Allocate our representation of a descriptor */
> +     desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
> +     if (!desc)
> +             return NULL;
> +
> +     v_lli = dma_pool_alloc(sdma->pool, GFP_NOWAIT, &p_lli);
> +     if (!v_lli)
> +             goto err_desc_free;
> +
> +     /* Ask the driver to initialise its hardware descriptor */
> +     if (sdma->ops->lli_init(v_lli, req->private,
> +                             SDMA_TRANSFER_MEMCPY,
> +                             DMA_MEM_TO_MEM, src, dest, len,
> +                             NULL))
> +             goto err_lli_free;
> +
> +     /* Create our single item LLI */
> +     sdma->ops->lli_queue(NULL, v_lli, p_lli);
> +     desc->p_lli = p_lli;
> +     desc->v_lli = v_lli;
> +
> +     return vchan_tx_prep(&req->vchan, &desc->vdesc, flags);
typically driver would do calculations for descriptor setting up the
registers values which cna be programmed, so a driver callback would be nice
here. That way lesser work in start function and which is good as I am
assuming start should be done from irq context

Also this needs to take into account the dma_slave_config, which is allowed
to me modified for subsequent txns
> +
> +err_lli_free:
> +     dma_pool_free(sdma->pool, v_lli, p_lli);
> +err_desc_free:
> +     kfree(desc);
> +
> +     return NULL;
> +}
> +
> +static struct dma_async_tx_descriptor *sdma_prep_slave_sg(struct dma_chan 
> *chan,
> +                                                       struct scatterlist 
> *sgl,
> +                                                       unsigned int sg_len,
> +                                                       enum 
> dma_transfer_direction dir,
> +                                                       unsigned long flags, 
> void *context)
> +{
> +     struct sdma_request *req = to_sdma_request(chan);
> +     struct dma_slave_config *config = &req->cfg;
> +     struct sdma *sdma = to_sdma(chan->device);
> +     void *v_lli, *prev_v_lli = NULL;
> +     struct scatterlist *sg;
> +     struct sdma_desc *desc;
> +     dma_addr_t p_lli;
> +     int i;
> +
> +     if (!sgl || !sg_len)
> +             return NULL;
> +
> +     /* Allocate our representation of a descriptor */
> +     desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
> +     if (!desc)
> +             return NULL;
> +
> +     /*
> +      * For each scatter list entry, build up our representation of
> +      * the LLI, and ask the driver to create its hardware
> +      * descriptor.
> +      */
> +     for_each_sg(sgl, sg, sg_len, i) {
> +             v_lli = dma_pool_alloc(sdma->pool, GFP_NOWAIT, &p_lli);
> +             if (!v_lli)
> +                     goto err_lli_free;
> +
> +             /* Ask the driver to initialise its hardware descriptor */
> +             if (sdma->ops->lli_init(v_lli, req->private,
> +                                     SDMA_TRANSFER_SLAVE,
> +                                     dir, sg_dma_address(sg),
> +                                     config->dst_addr, sg_dma_len(sg),
> +                                     config))
> +                     goto err_lli_free;
> +
> +             /*
> +              * If it's our first item, initialise our descriptor
> +              * pointers to the lli.
> +              *
> +              * Otherwise, queue it to the end of the LLI.
> +              */
> +             if (!prev_v_lli) {
> +                     desc->p_lli = p_lli;
> +                     desc->v_lli = v_lli;
> +                     prev_v_lli = v_lli;
> +             } else {
> +                     /* And to queue it at the end of its hardware LLI */
> +                     prev_v_lli = sdma->ops->lli_queue(prev_v_lli, v_lli, 
> p_lli);
> +             }
> +     }
> +
> +     return vchan_tx_prep(&req->vchan, &desc->vdesc, flags);
> +
> +err_lli_free:
> +#warning "Free the LLI"
why a non std preprocessor warning

> +
> +     kfree(desc);
> +     return NULL;
> +}
> +
> +static void sdma_issue_pending(struct dma_chan *chan)
> +{
> +     struct sdma_request *sreq = to_sdma_request(chan);
> +     struct sdma *sdma = to_sdma(chan->device);
> +     struct virt_dma_desc *vdesc;
> +     struct sdma_channel *schan;
> +     struct sdma_desc *sdesc;
> +     unsigned long flags;
> +
> +     spin_lock_irqsave(&sreq->vchan.lock, flags);
> +
> +     /* See if we have anything to do */
> +     if (!vchan_issue_pending(&sreq->vchan))
> +             goto out_chan_unlock;
> +
> +     /* Is some work in progress already? */
> +     if (sreq->chan)
> +             goto out_chan_unlock;
> +
> +     spin_lock(&sdma->lock);
> +
> +     /* Is there an available channel */
> +     if (list_empty(&sdma->avail_chans))
> +             goto out_main_unlock;
> +
> +     /*
> +      * If there's no validate_request callback, it means that all
> +      * channels can transfer any request. Pick the first available
> +      * channel.
> +      *
> +      * Otherwise, iterate over all the pending channels and call
> +      * validate_request.
> +      */
> +     if (!sdma->ops->validate_request) {
> +             schan = list_first_entry(&sdma->avail_chans,
> +                                      struct sdma_channel, node);
> +     } else {
> +             list_for_each_entry(schan, &sdma->avail_chans, node) {
> +                     if (sdma->ops->validate_request(schan, sreq)) {
> +                             list_del_init(&schan->node);
> +                             break;
> +                     }
> +             }
> +     }
why not call sdma_pop_queued_transfer() ?
> +
> +     if (!schan)
> +             goto out_main_unlock;
> +
> +     sreq->chan = schan;
> +
> +     /* Retrieve the next transfer descriptor */
> +     vdesc = vchan_next_desc(&sreq->vchan);
> +     schan->desc = sdesc = to_sdma_desc(&vdesc->tx);
> +
> +     sdma->ops->channel_start(schan, sdesc);
> +
> +out_main_unlock:
> +     spin_unlock(&sdma->lock);
> +out_chan_unlock:
> +     spin_unlock_irqrestore(&sreq->vchan.lock, flags);
> +}
> +
> +static void sdma_free_chan_resources(struct dma_chan *chan)
> +{
> +     struct sdma_request *sreq = to_sdma_request(chan);
> +     unsigned long flags;
> +
> +     spin_lock_irqsave(&sreq->vchan.lock, flags);
> +     list_del_init(&sreq->node);
> +     spin_unlock_irqrestore(&sreq->vchan.lock, flags);
> +
> +     vchan_free_chan_resources(&sreq->vchan);
> +}
> +
> +static void sdma_free_desc(struct virt_dma_desc *vdesc)
> +{
> +#warning "Free the descriptors"
??

> +}
> +
> +struct sdma *sdma_alloc(struct device *dev,
> +                     unsigned int channels,
> +                     unsigned int requests,
> +                     ssize_t lli_size,
> +                     ssize_t priv_size)
> +{
> +     struct sdma *sdma;
> +     int ret, i;
> +
> +     sdma = devm_kzalloc(dev, sizeof(*sdma) + priv_size, GFP_KERNEL);
> +     if (!sdma) {
> +             ret = -ENOMEM;
> +             goto out;
> +     }
> +     INIT_LIST_HEAD(&sdma->pend_reqs);
> +     INIT_LIST_HEAD(&sdma->avail_chans);
> +     spin_lock_init(&sdma->lock);
> +
> +     sdma->pool = dmam_pool_create(dev_name(dev), dev, lli_size, 4, 0);
> +     if (!sdma->pool) {
> +             ret = -ENOMEM;
> +             goto out;
> +     }
> +
> +     sdma->channels_nr = channels;
> +     sdma->channels = devm_kcalloc(dev, channels, sizeof(*sdma->channels), 
> GFP_KERNEL);
> +     if (!sdma->channels) {
> +             ret = -ENOMEM;
> +             goto out;
> +     }
> +
> +     for (i = 0; i < channels; i++) {
> +             struct sdma_channel *schan = &sdma->channels[i];
> +
> +             list_add_tail(&schan->node, &sdma->avail_chans);
> +             schan->index = i;
> +     }
> +
> +     sdma->requests_nr = requests;
> +     sdma->requests = devm_kcalloc(dev, requests, sizeof(*sdma->requests), 
> GFP_KERNEL);
> +     if (!sdma->channels) {
> +             ret = -ENOMEM;
> +             goto out;
> +     }
> +
> +     INIT_LIST_HEAD(&sdma->ddev.channels);
> +
> +     for (i = 0; i < requests; i++) {
> +             struct sdma_request *sreq = &sdma->requests[i];
> +
> +             INIT_LIST_HEAD(&sreq->node);
> +             sreq->vchan.desc_free = sdma_free_desc;
> +             vchan_init(&sreq->vchan, &sdma->ddev);
> +     }
> +
> +     return sdma;
> +
> +out:
> +     return ERR_PTR(ret);
> +}
> +EXPORT_SYMBOL(sdma_alloc);
> +
> +void sdma_free(struct sdma *sdma)
> +{
> +     return;
> +}
> +EXPORT_SYMBOL(sdma_free);
> +
> +int sdma_register(struct sdma *sdma,
> +               struct sdma_ops *ops)
> +{
> +     struct dma_device *ddev = &sdma->ddev;
> +
> +     sdma->ops = ops;
> +
> +     ddev->device_config = sdma_config;
> +     ddev->device_tx_status = sdma_tx_status;
> +     ddev->device_issue_pending = sdma_issue_pending;
> +     ddev->device_free_chan_resources = sdma_free_chan_resources;
> +
> +     if (ops->channel_pause)
> +             ddev->device_pause = sdma_pause;
> +
> +     if (ops->channel_resume)
> +             ddev->device_resume = sdma_resume;
> +
> +     if (ops->channel_terminate)
> +             ddev->device_terminate_all = sdma_terminate;
> +
> +     if (dma_has_cap(DMA_SLAVE, ddev->cap_mask))
> +             ddev->device_prep_slave_sg = sdma_prep_slave_sg;
> +
> +     if (dma_has_cap(DMA_MEMCPY, ddev->cap_mask))
> +             ddev->device_prep_dma_memcpy = sdma_prep_memcpy;
> +
> +     dma_async_device_register(ddev);
> +
> +     return 0;
> +}
> +EXPORT_SYMBOL(sdma_register);
> +
> +int sdma_unregister(struct sdma *sdma)
> +{
> +     dma_async_device_unregister(&sdma->ddev);
> +
> +     return 0;
> +}
> +EXPORT_SYMBOL(sdma_unregister);
> diff --git a/drivers/dma/scheduled-dma.h b/drivers/dma/scheduled-dma.h
> new file mode 100644
> index 000000000000..d24c8143b2b6
> --- /dev/null
> +++ b/drivers/dma/scheduled-dma.h
> @@ -0,0 +1,140 @@
> +/*
> + * Copyright (C) 2015 Maxime Ripard
> + * Maxime Ripard <maxime.rip...@free-electrons.com>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + */
> +
> +#include "virt-dma.h"
> +
> +#ifndef _SCHEDULED_DMA_H_
> +#define _SCHEDULED_DMA_H_
> +
> +enum sdma_transfer_type {
> +     SDMA_TRANSFER_MEMCPY,
> +     SDMA_TRANSFER_SLAVE,
> +};
> +
> +enum sdma_report_status {
> +     SDMA_REPORT_CHUNK,
> +     SDMA_REPORT_TRANSFER,
> +};
can you pls add kernel style documentation for these and below.
> +
> +struct sdma_desc {
> +     struct virt_dma_desc    vdesc;
> +
> +     /* Entry point to our LLI */
> +     dma_addr_t              p_lli;
> +     void                    *v_lli;
> +};
> +
> +struct sdma_channel {
> +     struct sdma_desc        *desc;
> +     unsigned int            index;
> +     struct list_head        node;
> +     void                    *private;
> +};
> +
> +struct sdma_request {
> +     struct dma_slave_config cfg;
> +     struct list_head        node;
> +     struct virt_dma_chan    vchan;
> +
> +     struct sdma_channel     *chan;
> +     void                    *private;
> +};
> +
> +struct sdma_ops {
> +     /* LLI management operations */
> +     bool                    (*lli_has_next)(void *v_lli);
> +     void                    *(*lli_next)(void *v_lli);
> +     int                     (*lli_init)(void *v_lli, void *sreq_priv,
> +                                         enum sdma_transfer_type type,
> +                                         enum dma_transfer_direction dir,
> +                                         dma_addr_t src,
> +                                         dma_addr_t dst, u32 len,
> +                                         struct dma_slave_config *config);
> +     void                    *(*lli_queue)(void *prev_v_lli, void *v_lli, 
> dma_addr_t p_lli);
> +     size_t                  (*lli_size)(void *v_lli);
if we can manage these using the standard list then it would simplify the
code a lot as well

> +
> +     /* Scheduler helper */
> +     struct sdma_request     *(*validate_request)(struct sdma_channel *chan,
> +                                                  struct sdma_request *req);
> +
> +     /* Transfer Management Functions */
> +     int                     (*channel_pause)(struct sdma_channel *chan);
> +     int                     (*channel_resume)(struct sdma_channel *chan);
> +     int                     (*channel_start)(struct sdma_channel *chan, 
> struct sdma_desc *sdesc);
> +     int                     (*channel_terminate)(struct sdma_channel *chan);
> +     size_t                  (*channel_residue)(struct sdma_channel *chan);
> +};
> +
> +struct sdma {
> +     struct dma_device       ddev;
> +     struct sdma_ops         *ops;
> +
> +     struct dma_pool         *pool;
> +
> +     struct sdma_channel     *channels;
> +     int                     channels_nr;
> +     struct sdma_request     *requests;
> +     int                     requests_nr;
> +
> +     struct list_head        avail_chans;
> +     struct list_head        pend_reqs;
> +
> +     spinlock_t              lock;
> +
> +     unsigned long           private[];
> +};
> +
> +static inline struct sdma *to_sdma(struct dma_device *d)
> +{
> +     return container_of(d, struct sdma, ddev);
> +}
> +
> +static inline struct sdma_request *to_sdma_request(struct dma_chan *chan)
> +{
> +     return container_of(chan, struct sdma_request, vchan.chan);
> +}
> +
> +static inline struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor 
> *tx)
> +{
> +     return container_of(tx, struct sdma_desc, vdesc.tx);
> +}
> +
> +static inline void *sdma_priv(struct sdma *sdma)
> +{
> +     return (void*)sdma->private;
> +}
> +
> +static inline void sdma_set_chan_private(struct sdma *sdma, void *ptr)
> +{
> +     int i;
> +
> +     for (i = 0; i < sdma->channels_nr; i++) {
> +             struct sdma_channel *schan = &sdma->channels[i];
> +
> +             schan->private = ptr;
> +     }
> +}
> +
> +struct sdma_desc *sdma_report(struct sdma *sdma,
> +                           struct sdma_channel *chan,
> +                           enum sdma_report_status status);
> +
> +struct sdma *sdma_alloc(struct device *dev,
> +                     unsigned int channels,
> +                     unsigned int requests,
> +                     ssize_t lli_size,
> +                     ssize_t priv_size);
> +void sdma_free(struct sdma *sdma);
> +
> +int sdma_register(struct sdma *sdma,
> +               struct sdma_ops *ops);
> +int sdma_unregister(struct sdma *sdma);
> +
> +#endif /* _SCHEDULED_DMA_H_ */
> -- 
> 2.3.3
> 
> --
> To unsubscribe from this list: send the line "unsubscribe dmaengine" in
> the body of a message to majord...@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

-- 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to