Re: [PATCH v6 11/11] mmc: add handling for two parallel block requests in issue_rw_rq

2011-06-21 Thread Per Forlin
On 20 June 2011 17:17, Kishore Kadiyala kishorek.kadiy...@gmail.com wrote:
 On Mon, Jun 20, 2011 at 2:47 AM, Per Forlin per.for...@linaro.org wrote:
 Change mmc_blk_issue_rw_rq() to become asynchronous.
 The execution flow looks like this:
 The mmc-queue calls issue_rw_rq(), which sends the request
 to the host and returns back to the mmc-queue. The mmc-queue calls
 issue_rw_rq() again with a new request. This new request is prepared,
 in isuue_rw_rq(), then it waits for the active request to complete before
 pushing it to the host. When to mmc-queue is empty it will call
 isuue_rw_rq() with req=NULL to finish off the active request
 without starting a new request.

 Signed-off-by: Per Forlin per.for...@linaro.org
 ---
  drivers/mmc/card/block.c |  121 
 +-
  drivers/mmc/card/queue.c |   17 +--
  drivers/mmc/card/queue.h |    1 +
  3 files changed, 101 insertions(+), 38 deletions(-)

 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
 index 6a84a75..66db77a 100644
 --- a/drivers/mmc/card/block.c
 +++ b/drivers/mmc/card/block.c
 @@ -108,6 +108,7 @@ static DEFINE_MUTEX(open_lock);

  enum mmc_blk_status {
        MMC_BLK_SUCCESS = 0,
 +       MMC_BLK_PARTIAL,
        MMC_BLK_RETRY,
        MMC_BLK_DATA_ERR,
        MMC_BLK_CMD_ERR,
 @@ -668,14 +669,16 @@ static inline void mmc_apply_rel_rw(struct 
 mmc_blk_request *brq,
        }
  }

 -static enum mmc_blk_status mmc_blk_err_check(struct mmc_blk_request *brq,
 -                                            struct request *req,
 -                                            struct mmc_card *card,
 -                                            struct mmc_blk_data *md)
 +static int mmc_blk_err_check(struct mmc_card *card,
 +                            struct mmc_async_req *areq)
  {
        struct mmc_command cmd;
        u32 status = 0;
        enum mmc_blk_status ret = MMC_BLK_SUCCESS;
 +       struct mmc_queue_req *mq_mrq = container_of(areq, struct 
 mmc_queue_req,
 +                                                   mmc_active);
 +       struct mmc_blk_request *brq = mq_mrq-brq;
 +       struct request *req = mq_mrq-req;

        /*
         * Check for errors here, but don't jump to cmd_err
 @@ -770,7 +773,11 @@ static enum mmc_blk_status mmc_blk_err_check(struct 
 mmc_blk_request *brq,
                else
                        ret = MMC_BLK_DATA_ERR;
        }
 -out:
 +
 +       if (ret == MMC_BLK_SUCCESS 
 +           blk_rq_bytes(req) != brq-data.bytes_xfered)
 +               ret = MMC_BLK_PARTIAL;
 + out:
        return ret;
  }

 @@ -901,27 +908,59 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req 
 *mqrq,
                brq-data.sg_len = i;
        }

 +       mqrq-mmc_active.mrq = brq-mrq;
 +       mqrq-mmc_active.err_check = mmc_blk_err_check;
 +
        mmc_queue_bounce_pre(mqrq);
  }

 -static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 +static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
  {
        struct mmc_blk_data *md = mq-data;
        struct mmc_card *card = md-queue.card;
 -       struct mmc_blk_request *brq = mq-mqrq_cur-brq;
 -       int ret = 1, disable_multi = 0;
 +       struct mmc_blk_request *brq;
 +       int ret = 1;
 +       int disable_multi = 0;
        enum mmc_blk_status status;
 +       struct mmc_queue_req *mq_rq;
 +       struct request *req;
 +       struct mmc_async_req *areq;
 +
 +       if (!rqc  !mq-mqrq_prev-req)
 +               goto out;

        do {
 -               mmc_blk_rw_rq_prep(mq-mqrq_cur, card, disable_multi, mq);
 -               mmc_wait_for_req(card-host, brq-mrq);
 +               if (rqc) {
 +                       mmc_blk_rw_rq_prep(mq-mqrq_cur, card, 0, mq);
 +                       areq = mq-mqrq_cur-mmc_active;
 +               } else
 +                       areq = NULL;
 +               areq = mmc_start_req(card-host, areq, (int *) status);

 I think 'status' is used uninitialized.
status is an out parameter. From that perspective it is always initialised.
I should update the doc description of mmc_start_req to clarify this.

 With this struct mmc_async_req *mmc_start_req in your first patch
 if (error)
        *error = err;
 return data;
 condition which always passes.

 You can have
 enum mmc_blk_status status = MMC_BLK_SUCCESS;

 struct mmc_async_req *mmc_start_req  {
 err = host-areq-err_check(host-card, host-areq);
                if (err) {
                             ...
                             ...
                             *error = err;
                }

 no need to update * error here in success case
 return data
 }
I agree, makes the code more clear.

Thanks,
Per
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v6 02/11] omap_hsmmc: add support for pre_req and post_req

2011-06-21 Thread Per Forlin
On 21 June 2011 07:41, Kishore Kadiyala kishorek.kadiy...@gmail.com wrote:
 snip

 +
 +static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request 
 *mrq,
 +                              bool is_first_req)

 I don't see the usage of is_first_req below.
 Is it required?

It is not required. It is only an indication that this request is the
first in a series of request. The host driver may do various
optimisations based on this information. The first request in a series
of jobs can't be prepared in parallel to the previous job. The host
driver can do the following to minimise latency for the first job.
 * Preparing the cache while the MMC read/write cmd is being
processed. In this case the pre_req could do nothing and the job is
instead run in parallel to the read/write cmd being sent. If the
is_first_req is false pre_req will run in parallel to an active
transfer, in this case it is more efficient to prepare the request in
pre_req.
 * Run PIO mode instead of DMA
 * Maybe there can be power related optimisations based on if it is
one single transfer or multiple ones.

 +{
 +       struct omap_hsmmc_host *host = mmc_priv(mmc);
 +
 +       if (mrq-data-host_cookie) {
 +               mrq-data-host_cookie = 0;
 +               return ;
 +       }
 +
 +       if (host-use_dma)
 +               if (omap_hsmmc_pre_dma_transfer(host, mrq-data,
 +                                               host-next_data))
 +                       mrq-data-host_cookie = 0;
 +}
 +
  /*


Thanks for your comments,
Per
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v6 11/11] mmc: add handling for two parallel block requests in issue_rw_rq

2011-06-21 Thread Per Forlin
On 21 June 2011 08:40, Per Forlin per.for...@linaro.org wrote:
 On 20 June 2011 17:17, Kishore Kadiyala kishorek.kadiy...@gmail.com wrote:
 On Mon, Jun 20, 2011 at 2:47 AM, Per Forlin per.for...@linaro.org wrote:
 Change mmc_blk_issue_rw_rq() to become asynchronous.
 The execution flow looks like this:
 The mmc-queue calls issue_rw_rq(), which sends the request
 to the host and returns back to the mmc-queue. The mmc-queue calls
 issue_rw_rq() again with a new request. This new request is prepared,
 in isuue_rw_rq(), then it waits for the active request to complete before
 pushing it to the host. When to mmc-queue is empty it will call
 isuue_rw_rq() with req=NULL to finish off the active request
 without starting a new request.

 Signed-off-by: Per Forlin per.for...@linaro.org
 ---
  drivers/mmc/card/block.c |  121 
 +-
  drivers/mmc/card/queue.c |   17 +--
  drivers/mmc/card/queue.h |    1 +
  3 files changed, 101 insertions(+), 38 deletions(-)

 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
 index 6a84a75..66db77a 100644
 --- a/drivers/mmc/card/block.c
 +++ b/drivers/mmc/card/block.c
 @@ -108,6 +108,7 @@ static DEFINE_MUTEX(open_lock);

  enum mmc_blk_status {
        MMC_BLK_SUCCESS = 0,
 +       MMC_BLK_PARTIAL,
        MMC_BLK_RETRY,
        MMC_BLK_DATA_ERR,
        MMC_BLK_CMD_ERR,
 @@ -668,14 +669,16 @@ static inline void mmc_apply_rel_rw(struct 
 mmc_blk_request *brq,
        }
  }

 -static enum mmc_blk_status mmc_blk_err_check(struct mmc_blk_request *brq,
 -                                            struct request *req,
 -                                            struct mmc_card *card,
 -                                            struct mmc_blk_data *md)
 +static int mmc_blk_err_check(struct mmc_card *card,
 +                            struct mmc_async_req *areq)
  {
        struct mmc_command cmd;
        u32 status = 0;
        enum mmc_blk_status ret = MMC_BLK_SUCCESS;
 +       struct mmc_queue_req *mq_mrq = container_of(areq, struct 
 mmc_queue_req,
 +                                                   mmc_active);
 +       struct mmc_blk_request *brq = mq_mrq-brq;
 +       struct request *req = mq_mrq-req;

        /*
         * Check for errors here, but don't jump to cmd_err
 @@ -770,7 +773,11 @@ static enum mmc_blk_status mmc_blk_err_check(struct 
 mmc_blk_request *brq,
                else
                        ret = MMC_BLK_DATA_ERR;
        }
 -out:
 +
 +       if (ret == MMC_BLK_SUCCESS 
 +           blk_rq_bytes(req) != brq-data.bytes_xfered)
 +               ret = MMC_BLK_PARTIAL;
 + out:
        return ret;
  }

 @@ -901,27 +908,59 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req 
 *mqrq,
                brq-data.sg_len = i;
        }

 +       mqrq-mmc_active.mrq = brq-mrq;
 +       mqrq-mmc_active.err_check = mmc_blk_err_check;
 +
        mmc_queue_bounce_pre(mqrq);
  }

 -static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 +static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
  {
        struct mmc_blk_data *md = mq-data;
        struct mmc_card *card = md-queue.card;
 -       struct mmc_blk_request *brq = mq-mqrq_cur-brq;
 -       int ret = 1, disable_multi = 0;
 +       struct mmc_blk_request *brq;
 +       int ret = 1;
 +       int disable_multi = 0;
        enum mmc_blk_status status;
 +       struct mmc_queue_req *mq_rq;
 +       struct request *req;
 +       struct mmc_async_req *areq;
 +
 +       if (!rqc  !mq-mqrq_prev-req)
 +               goto out;

        do {
 -               mmc_blk_rw_rq_prep(mq-mqrq_cur, card, disable_multi, mq);
 -               mmc_wait_for_req(card-host, brq-mrq);
 +               if (rqc) {
 +                       mmc_blk_rw_rq_prep(mq-mqrq_cur, card, 0, mq);
 +                       areq = mq-mqrq_cur-mmc_active;
 +               } else
 +                       areq = NULL;
 +               areq = mmc_start_req(card-host, areq, (int *) status);

 I think 'status' is used uninitialized.
 status is an out parameter. From that perspective it is always initialised.
 I should update the doc description of mmc_start_req to clarify this.

 With this struct mmc_async_req *mmc_start_req in your first patch
 if (error)
        *error = err;
 return data;
 condition which always passes.

 You can have
 enum mmc_blk_status status = MMC_BLK_SUCCESS;
In core.c there is no access to the type enum mmc_blk_status status,
this type is block.c specific.
The intention is to make this function available for SDIO as well.
int err = 0; is set at the top of mmc_start_req(). Default err condition is 0.

What do you think about the following changes?

  * @areq: async request to start
- * @error: non zero in case of error
+ * @error: out parameter returns 0 for success, otherwise non zero
  *
  * Start a new MMC custom command request for a host.
  * If there is on ongoing async request wait for completion
@@ -334,9 +334,7 

Re: [PATCH v6 11/11] mmc: add handling for two parallel block requests in issue_rw_rq

2011-06-21 Thread Per Forlin
On 19 June 2011 23:17, Per Forlin per.for...@linaro.org wrote:
 Change mmc_blk_issue_rw_rq() to become asynchronous.
 The execution flow looks like this:
 The mmc-queue calls issue_rw_rq(), which sends the request
 to the host and returns back to the mmc-queue. The mmc-queue calls
 issue_rw_rq() again with a new request. This new request is prepared,
 in isuue_rw_rq(), then it waits for the active request to complete before
 pushing it to the host. When to mmc-queue is empty it will call
 isuue_rw_rq() with req=NULL to finish off the active request
 without starting a new request.

 Signed-off-by: Per Forlin per.for...@linaro.org
 ---
  drivers/mmc/card/block.c |  121 
 +-
  drivers/mmc/card/queue.c |   17 +--
  drivers/mmc/card/queue.h |    1 +
  3 files changed, 101 insertions(+), 38 deletions(-)

 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
 index 6a84a75..66db77a 100644
 --- a/drivers/mmc/card/block.c
 +++ b/drivers/mmc/card/block.c
 @@ -108,6 +108,7 @@ static DEFINE_MUTEX(open_lock);

  enum mmc_blk_status {
        MMC_BLK_SUCCESS = 0,
 +       MMC_BLK_PARTIAL,
        MMC_BLK_RETRY,
        MMC_BLK_DATA_ERR,
        MMC_BLK_CMD_ERR,
 @@ -668,14 +669,16 @@ static inline void mmc_apply_rel_rw(struct 
 mmc_blk_request *brq,
        }
  }

 -static enum mmc_blk_status mmc_blk_err_check(struct mmc_blk_request *brq,
 -                                            struct request *req,
 -                                            struct mmc_card *card,
 -                                            struct mmc_blk_data *md)
 +static int mmc_blk_err_check(struct mmc_card *card,
 +                            struct mmc_async_req *areq)
  {
        struct mmc_command cmd;
        u32 status = 0;
        enum mmc_blk_status ret = MMC_BLK_SUCCESS;
 +       struct mmc_queue_req *mq_mrq = container_of(areq, struct 
 mmc_queue_req,
 +                                                   mmc_active);
 +       struct mmc_blk_request *brq = mq_mrq-brq;
 +       struct request *req = mq_mrq-req;

        /*
         * Check for errors here, but don't jump to cmd_err
 @@ -770,7 +773,11 @@ static enum mmc_blk_status mmc_blk_err_check(struct 
 mmc_blk_request *brq,
                else
                        ret = MMC_BLK_DATA_ERR;
        }
 -out:
 +
 +       if (ret == MMC_BLK_SUCCESS 
 +           blk_rq_bytes(req) != brq-data.bytes_xfered)
 +               ret = MMC_BLK_PARTIAL;
 + out:
        return ret;
  }

 @@ -901,27 +908,59 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req 
 *mqrq,
                brq-data.sg_len = i;
        }

 +       mqrq-mmc_active.mrq = brq-mrq;
 +       mqrq-mmc_active.err_check = mmc_blk_err_check;
 +
        mmc_queue_bounce_pre(mqrq);
  }

 -static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 +static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
  {
        struct mmc_blk_data *md = mq-data;
        struct mmc_card *card = md-queue.card;
 -       struct mmc_blk_request *brq = mq-mqrq_cur-brq;
 -       int ret = 1, disable_multi = 0;
 +       struct mmc_blk_request *brq;
 +       int ret = 1;
 +       int disable_multi = 0;
        enum mmc_blk_status status;
 +       struct mmc_queue_req *mq_rq;
 +       struct request *req;
 +       struct mmc_async_req *areq;
 +
 +       if (!rqc  !mq-mqrq_prev-req)
 +               goto out;

        do {
 -               mmc_blk_rw_rq_prep(mq-mqrq_cur, card, disable_multi, mq);
 -               mmc_wait_for_req(card-host, brq-mrq);
 +               if (rqc) {
 +                       mmc_blk_rw_rq_prep(mq-mqrq_cur, card, 0, mq);
 +                       areq = mq-mqrq_cur-mmc_active;
 +               } else
 +                       areq = NULL;
 +               areq = mmc_start_req(card-host, areq, (int *) status);
 +               if (!areq)
 +                       goto out;

 -               mmc_queue_bounce_post(mq-mqrq_cur);
 -               status = mmc_blk_err_check(brq, req, card, md);
 +               mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
 +               brq = mq_rq-brq;
 +               req = mq_rq-req;
 +               mmc_queue_bounce_post(mq_rq);

                switch (status) {
 -               case MMC_BLK_CMD_ERR:
 -                       goto cmd_err;
 +               case MMC_BLK_SUCCESS:
 +               case MMC_BLK_PARTIAL:
 +                       /*
 +                        * A block was successfully transferred.
 +                        */
 +                       spin_lock_irq(md-lock);
 +                       ret = __blk_end_request(req, 0,
 +                                               brq-data.bytes_xfered);
 +                       spin_unlock_irq(md-lock);
 +                       if (status == MMC_BLK_SUCCESS  ret) {
 +                               /* If this happen it is a bug */
 +                               printk(KERN_ERR %s BUG rq_tot %d d_xfer 
 %d\n,
 +           

Re: [PATCH v6 00/11] mmc: use nonblock mmc requests to minimize latency

2011-06-21 Thread Russell King - ARM Linux
On Sun, Jun 19, 2011 at 11:17:26PM +0200, Per Forlin wrote:
 How significant is the cache maintenance over head?

Per,

Can you measure how much difference this has before and after your
patch set please?  This moves the dsb() out of the individual cache
maintanence functions, such that we will only perform one dsb() per
dma_*_sg call rather than one per SG entry.

Thanks.

 arch/arm/include/asm/dma-mapping.h |   11 +++
 arch/arm/mm/cache-fa.S |6 --
 arch/arm/mm/cache-v4wb.S   |2 --
 arch/arm/mm/cache-v6.S |6 --
 arch/arm/mm/cache-v7.S |3 ---
 arch/arm/mm/dma-mapping.c  |8 
 6 files changed, 19 insertions(+), 17 deletions(-)

diff --git a/arch/arm/include/asm/dma-mapping.h 
b/arch/arm/include/asm/dma-mapping.h
index 4fff837..853eba5 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -115,6 +115,11 @@ static inline void __dma_page_dev_to_cpu(struct page 
*page, unsigned long off,
___dma_page_dev_to_cpu(page, off, size, dir);
 }
 
+static inline void __dma_sync(void)
+{
+   dsb();
+}
+
 /*
  * Return whether the given device DMA address mask can be supported
  * properly.  For example, if your device can only drive the low 24-bits
@@ -378,6 +383,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, 
void *cpu_addr,
BUG_ON(!valid_dma_direction(dir));
 
addr = __dma_map_single(dev, cpu_addr, size, dir);
+   __dma_sync();
debug_dma_map_page(dev, virt_to_page(cpu_addr),
(unsigned long)cpu_addr  ~PAGE_MASK, size,
dir, addr, true);
@@ -407,6 +413,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, 
struct page *page,
BUG_ON(!valid_dma_direction(dir));
 
addr = __dma_map_page(dev, page, offset, size, dir);
+   __dma_sync();
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
 
return addr;
@@ -431,6 +438,7 @@ static inline void dma_unmap_single(struct device *dev, 
dma_addr_t handle,
 {
debug_dma_unmap_page(dev, handle, size, dir, true);
__dma_unmap_single(dev, handle, size, dir);
+   __dma_sync();
 }
 
 /**
@@ -452,6 +460,7 @@ static inline void dma_unmap_page(struct device *dev, 
dma_addr_t handle,
 {
debug_dma_unmap_page(dev, handle, size, dir, false);
__dma_unmap_page(dev, handle, size, dir);
+   __dma_sync();
 }
 
 /**
@@ -484,6 +493,7 @@ static inline void dma_sync_single_range_for_cpu(struct 
device *dev,
return;
 
__dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
+   __dma_sync();
 }
 
 static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -498,6 +508,7 @@ static inline void dma_sync_single_range_for_device(struct 
device *dev,
return;
 
__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
+   __dma_sync();
 }
 
 static inline void dma_sync_single_for_cpu(struct device *dev,
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 1fa6f71..6eeb734 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -179,8 +179,6 @@ fa_dma_inv_range:
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
-   mov r0, #0
-   mcr p15, 0, r0, c7, c10, 4  @ drain write buffer
mov pc, lr
 
 /*
@@ -197,8 +195,6 @@ fa_dma_clean_range:
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
-   mov r0, #0  
-   mcr p15, 0, r0, c7, c10, 4  @ drain write buffer
mov pc, lr
 
 /*
@@ -212,8 +208,6 @@ ENTRY(fa_dma_flush_range)
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
-   mov r0, #0  
-   mcr p15, 0, r0, c7, c10, 4  @ drain write buffer
mov pc, lr
 
 /*
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index f40c696..523c0cb 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -194,7 +194,6 @@ v4wb_dma_inv_range:
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
-   mcr p15, 0, r0, c7, c10, 4  @ drain write buffer
mov pc, lr
 
 /*
@@ -211,7 +210,6 @@ v4wb_dma_clean_range:
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
-   mcr p15, 0, r0, c7, c10, 4  @ drain write buffer
mov pc, lr
 
 /*
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 73b4a8b..7a842dd 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -239,8 +239,6 @@ v6_dma_inv_range:
strlo   r2, [r0]@ write for ownership
 #endif
blo 1b
-   mov r0, #0
-   mcr p15, 0, r0, c7, c10, 4  @ drain write buffer
mov pc, lr
 
 /*
@@ -262,8 +260,6 @@ 

Re: [PATCH v6 00/11] mmc: use nonblock mmc requests to minimize latency

2011-06-21 Thread Per Forlin
On 21 June 2011 09:53, Russell King - ARM Linux li...@arm.linux.org.uk wrote:
 On Sun, Jun 19, 2011 at 11:17:26PM +0200, Per Forlin wrote:
 How significant is the cache maintenance over head?

 Per,

 Can you measure how much difference this has before and after your
 patch set please?
Absolutely, I can run the mmc_tests to get the measurement. The cache
affect is greater the faster the flash memory is. Currently I only
have access to a SD card (20 MiB/S). By the end of this week I can run
on eMMC (45 MiB/s) if this will be needed.

Thanks for your input,
Per
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 4/5] ARM: mach-shmobile: ag5evm: consistently name sdhi info structures

2011-06-21 Thread Paul Mundt
On Tue, Jun 21, 2011 at 08:00:11AM +0900, Simon Horman wrote:
 Name the SDHI1 instance sh_sdhi1_info to be consistent with sh_sdhi0_info.
 
 Signed-off-by: Simon Horman ho...@verge.net.au
 
 ---
 
 Dependencies: None known

Applied, thanks.
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v6 00/11] mmc: use nonblock mmc requests to minimize latency

2011-06-21 Thread Per Forlin
On 21 June 2011 10:09, Per Forlin per.for...@linaro.org wrote:
 On 21 June 2011 09:53, Russell King - ARM Linux li...@arm.linux.org.uk 
 wrote:
 On Sun, Jun 19, 2011 at 11:17:26PM +0200, Per Forlin wrote:
 How significant is the cache maintenance over head?

 Per,

 Can you measure how much difference this has before and after your
 patch set please?
 Absolutely, I can run the mmc_tests to get the measurement. The cache
 affect is greater the faster the flash memory is. Currently I only
 have access to a SD card (20 MiB/S). By the end of this week I can run
 on eMMC (45 MiB/s) if this will be needed.

Russel,

Here are the results.

mmc_test results without your DSB patch:
mmc0: Starting tests of card mmc0:80ca...
mmc0: Test case 37. Write performance with blocking req 4k to 4MB...
mmc0: Transfer of 32768 x 8 sectors (32768 x 4 KiB) took 17.907140069
seconds (7495 kB/s, 7319 KiB/s, 1829.88 IOPS)
mmc0: Transfer of 16384 x 16 sectors (16384 x 8 KiB) took 10.977203519
seconds (12226 kB/s, 11940 KiB/s, 1492.54 IOPS)
mmc0: Transfer of 8192 x 32 sectors (8192 x 16 KiB) took 8.618723194
seconds (15572 kB/s, 15207 KiB/s, 950.48 IOPS)
mmc0: Transfer of 4096 x 64 sectors (4096 x 32 KiB) took 7.452392708
seconds (18010 kB/s, 17587 KiB/s, 549.62 IOPS)
mmc0: Transfer of 2048 x 128 sectors (2048 x 64 KiB) took 6.839447152
seconds (19624 kB/s, 19164 KiB/s, 299.43 IOPS)
mmc0: Transfer of 1024 x 256 sectors (1024 x 128 KiB) took 6.533447450
seconds (20543 kB/s, 20061 KiB/s, 156.73 IOPS)
mmc0: Transfer of 512 x 512 sectors (512 x 256 KiB) took 6.355529943
seconds (21118 kB/s, 20623 KiB/s, 80.55 IOPS)
mmc0: Transfer of 256 x 1024 sectors (256 x 512 KiB) took 6.227417019
seconds (21552 kB/s, 21047 KiB/s, 41.10 IOPS)
mmc0: Transfer of 128 x 2048 sectors (128 x 1024 KiB) took 6.047821091
seconds (22192 kB/s, 21672 KiB/s, 21.16 IOPS)
mmc0: Transfer of 32 x 8192 sectors (32 x 4096 KiB) took 5.983120236
seconds (22432 kB/s, 21906 KiB/s, 5.34 IOPS)
mmc0: Result: OK
mmc0: Tests completed.
mmc0: Starting tests of card mmc0:80ca...
mmc0: Test case 38. Write performance with non-blocking req 4k to 4MB...
mmc0: Transfer of 32768 x 8 sectors (32768 x 4 KiB) took 17.004930158
seconds (7892 kB/s, 7707 KiB/s, 1926.97 IOPS)
mmc0: Transfer of 16384 x 16 sectors (16384 x 8 KiB) took 10.397338972
seconds (12908 kB/s, 12606 KiB/s, 1575.78 IOPS)
mmc0: Transfer of 8192 x 32 sectors (8192 x 16 KiB) took 8.127319360
seconds (16514 kB/s, 16127 KiB/s, 1007.95 IOPS)
mmc0: Transfer of 4096 x 64 sectors (4096 x 32 KiB) took 7.061096329
seconds (19008 kB/s, 18562 KiB/s, 580.07 IOPS)
mmc0: Transfer of 2048 x 128 sectors (2048 x 64 KiB) took 6.503535845
seconds (20637 kB/s, 20153 KiB/s, 314.90 IOPS)
mmc0: Transfer of 1024 x 256 sectors (1024 x 128 KiB) took 6.222897631
seconds (21568 kB/s, 21062 KiB/s, 164.55 IOPS)
mmc0: Transfer of 512 x 512 sectors (512 x 256 KiB) took 6.082733285
seconds (22065 kB/s, 21548 KiB/s, 84.17 IOPS)
mmc0: Transfer of 256 x 1024 sectors (256 x 512 KiB) took 5.928009056
seconds (22641 kB/s, 22110 KiB/s, 43.18 IOPS)
mmc0: Transfer of 128 x 2048 sectors (128 x 1024 KiB) took 5.891113751
seconds (22783 kB/s, 22249 KiB/s, 21.72 IOPS)
mmc0: Transfer of 32 x 8192 sectors (32 x 4096 KiB) took 5.878531233
seconds (22831 kB/s, 22296 KiB/s, 5.44 IOPS)
mmc0: Result: OK
mmc0: Tests completed.
mmc0: Starting tests of card mmc0:80ca...
mmc0: Test case 39. Read performance with blocking req 4k to 4MB...
mmc0: Transfer of 32768 x 8 sectors (32768 x 4 KiB) took 20.904750140
seconds (6420 kB/s, 6269 KiB/s, 1567.49 IOPS)
mmc0: Transfer of 16384 x 16 sectors (16384 x 8 KiB) took 12.929870605
seconds (10380 kB/s, 10137 KiB/s, 1267.14 IOPS)
mmc0: Transfer of 8192 x 32 sectors (8192 x 16 KiB) took 10.115753174
seconds (13268 kB/s, 12957 KiB/s, 809.82 IOPS)
mmc0: Transfer of 4096 x 64 sectors (4096 x 32 KiB) took 7.533538819
seconds (17816 kB/s, 17398 KiB/s, 543.70 IOPS)
mmc0: Transfer of 2048 x 128 sectors (2048 x 64 KiB) took 6.937011718
seconds (19348 kB/s, 18894 KiB/s, 295.22 IOPS)
mmc0: Transfer of 1024 x 256 sectors (1024 x 128 KiB) took 6.638824464
seconds (20217 kB/s, 19743 KiB/s, 154.24 IOPS)
mmc0: Transfer of 512 x 512 sectors (512 x 256 KiB) took 6.489288330
seconds (20682 kB/s, 20198 KiB/s, 78.89 IOPS)
mmc0: Transfer of 256 x 1024 sectors (256 x 512 KiB) took 6.414489746
seconds (20924 kB/s, 20433 KiB/s, 39.90 IOPS)
mmc0: Transfer of 128 x 2048 sectors (128 x 1024 KiB) took 6.376800426
seconds (21047 kB/s, 20554 KiB/s, 20.07 IOPS)
mmc0: Transfer of 32 x 8192 sectors (32 x 4096 KiB) took 6.348991821
seconds (21140 kB/s, 20644 KiB/s, 5.04 IOPS)
mmc0: Result: OK
mmc0: Tests completed.
mmc0: Starting tests of card mmc0:80ca...
mmc0: Test case 40. Read performance with non-blocking req 4k to 4MB...
mmc0: Transfer of 32768 x 8 sectors (32768 x 4 KiB) took 20.906376527
seconds (6419 kB/s, 6269 KiB/s, 1567.36 IOPS)
mmc0: Transfer of 16384 x 16 sectors (16384 x 8 KiB) took 12.929779053
seconds (10380 kB/s, 10137 KiB/s, 1267.15 IOPS)
mmc0: Transfer of 

Re: [PATCH 4/4] PCI: make cardbus-bridge resources nice-to-have

2011-06-21 Thread Dominik Brodowski
On Mon, Jun 20, 2011 at 03:47:17PM -0700, Ram Pai wrote:
 Allocate resources to cardbus bridge only after all other genuine
 resources requests are satisfied. Dont retry if resource allocation
 for cardbus-bridge fails.

Well, for those who use cardbus cards, cardbus resources aren't nice to
have, they are absolutely required. Of course, not all cardbus cards need
as many resources as are currently assigned, so I wouldn't oppose a patch
which marks _some_ of the currently assigned resources as nice to have.
But this approach -- 0 required, all nice to have -- seems wrong to me.

Best,
Dominik
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v3 0/4] Extend sdhci-esdhc-imx card_detect and write_protect support for mx5

2011-06-21 Thread Rtp
Shawn Guo shawn@freescale.com writes:

 Hi Arnaud,

Hi,


 Would you please give a test on the series, as it fixed the issue
 you reported?  TIA.

I've tested it yesterday on my efika platforms and the issue is
gone. Moreover, the differents card slot on theses platforms are using
each card detect type (internally connected, gpio, no card detect) and
they are all working as expected. I'm a little bit annoyed by the
polling on the slot without card detect but this has nothing to do with
this patchset.

Side note: turns out that the driver is using dev_warn when failing to
get cd gpio/irq. Given that in theses cases, it jumps to
no_card_detect_pin and makes the probe fail, maybe a dev_err would be
better ? 

Tested-by: Arnaud Patard arnaud.pat...@rtp-net.org

Thanks,
Arnaud
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v3 2/4] mmc: sdhci-esdhc-imx: SDHCI_CARD_PRESENT does not get cleared

2011-06-21 Thread Wolfram Sang
On Mon, Jun 20, 2011 at 06:38:43PM +0800, Shawn Guo wrote:
 The function esdhc_readl_le intends to clear bit SDHCI_CARD_PRESENT,
 when the card detect gpio tells there is no card.  But it does not
 clear the bit actually.  The patch gives a fix on that.
 
 Signed-off-by: Shawn Guo shawn@linaro.org

For the third time ;)

Acked-by: Wolfram Sang w.s...@pengutronix.de

Should go to stable.

-- 
Pengutronix e.K.   | Wolfram Sang|
Industrial Linux Solutions | http://www.pengutronix.de/  |


signature.asc
Description: Digital signature


Re: [PATCH 0/3] Improve MMC error handling (3rd rev)

2011-06-21 Thread Pawel Moll
On Mon, 2011-06-20 at 20:09 +0100, Russell King - ARM Linux wrote:
 This is the third revision of my improvements to the MMC block
 device error handling, which makes error handling more robust
 and permits MMC/SD to continue working in the presence of
 not-quite-perfect setups.

Tested-by: Pawel Moll pawel.m...@arm.com

Cheers!

Paweł


--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v6 11/11] mmc: add handling for two parallel block requests in issue_rw_rq

2011-06-21 Thread Kishore Kadiyala
Hi Per,

snip

 --- a/drivers/mmc/card/block.c
 +++ b/drivers/mmc/card/block.c
 @@ -108,6 +108,7 @@ static DEFINE_MUTEX(open_lock);

  enum mmc_blk_status {
        MMC_BLK_SUCCESS = 0,
 +       MMC_BLK_PARTIAL,
        MMC_BLK_RETRY,
        MMC_BLK_DATA_ERR,
        MMC_BLK_CMD_ERR,

snip

 -static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 +static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
  {
        struct mmc_blk_data *md = mq-data;
        struct mmc_card *card = md-queue.card;
 -       struct mmc_blk_request *brq = mq-mqrq_cur-brq;
 -       int ret = 1, disable_multi = 0;
 +       struct mmc_blk_request *brq;
 +       int ret = 1;
 +       int disable_multi = 0;
        enum mmc_blk_status status;
Can initialize here
enum mmc_blk_status = MMC_BLK_SUCCESS

 +       struct mmc_queue_req *mq_rq;
 +       struct request *req;
 +       struct mmc_async_req *areq;
 +
 +       if (!rqc  !mq-mqrq_prev-req)
 +               goto out;

snip

I meant doing initialization in block.c itself and not core.c as above.

 The intention is to make this function available for SDIO as well.

Totally agree, having the API access to SDIO

 int err = 0; is set at the top of mmc_start_req(). Default err condition is 
 0.

 What do you think about the following changes?

  *     @areq: async request to start
 - *     @error: non zero in case of error
 + *     @error: out parameter returns 0 for success, otherwise non zero
  *
  *     Start a new MMC custom command request for a host.
  *     If there is on ongoing async request wait for completion
 @@ -334,9 +334,7 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
                                mmc_post_req(host, areq-mrq, -EINVAL);

                        host-areq = NULL;
 -                       if (error)
 -                               *error = err;
 -                       return data;
 +                       goto out;
                }
        }

 @@ -347,6 +345,7 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
                mmc_post_req(host, host-areq-mrq, 0);

        host-areq = areq;
 + out:
        if (error)
                *error = err;
        return data;


The above change reduced the code size but still since 'error' is
pointer to the 'status' which is uninitialized,
so if(error) will be always true.
If you want to update *error in success/failure case [based on 'err'
]then can remove the if(error) check
else to update the error case only can look at the way proposed in my
previous mail.

Regards,
Kishore
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v6 02/11] omap_hsmmc: add support for pre_req and post_req

2011-06-21 Thread Kishore Kadiyala
Hi Per,

On Tue, Jun 21, 2011 at 12:21 PM, Per Forlin per.for...@linaro.org wrote:
 On 21 June 2011 07:41, Kishore Kadiyala kishorek.kadiy...@gmail.com wrote:
 snip

 +
 +static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request 
 *mrq,
 +                              bool is_first_req)

 I don't see the usage of is_first_req below.
 Is it required?

 It is not required. It is only an indication that this request is the
 first in a series of request. The host driver may do various
 optimisations based on this information. The first request in a series
 of jobs can't be prepared in parallel to the previous job. The host
 driver can do the following to minimise latency for the first job.
  * Preparing the cache while the MMC read/write cmd is being
 processed. In this case the pre_req could do nothing and the job is
 instead run in parallel to the read/write cmd being sent. If the
 is_first_req is false pre_req will run in parallel to an active
 transfer, in this case it is more efficient to prepare the request in
 pre_req.
  * Run PIO mode instead of DMA
  * Maybe there can be power related optimisations based on if it is
 one single transfer or multiple ones.

Ok, thanks for making things clear.

snip

Regards,
Kishore
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v3 0/4] Extend sdhci-esdhc-imx card_detect and write_protect support for mx5

2011-06-21 Thread Shawn Guo
On Tue, Jun 21, 2011 at 11:44:00AM +0200, Arnaud Patard wrote:
 Shawn Guo shawn@freescale.com writes:
 
  Hi Arnaud,
 
 Hi,
 
 
  Would you please give a test on the series, as it fixed the issue
  you reported?  TIA.
 
 I've tested it yesterday on my efika platforms and the issue is
 gone. Moreover, the differents card slot on theses platforms are using
 each card detect type (internally connected, gpio, no card detect) and
 they are all working as expected. I'm a little bit annoyed by the
 polling on the slot without card detect but this has nothing to do with
 this patchset.
 
That's great.

 Side note: turns out that the driver is using dev_warn when failing to
 get cd gpio/irq. Given that in theses cases, it jumps to
 no_card_detect_pin and makes the probe fail, maybe a dev_err would be
 better ? 
 
Though it's the existing code, I respect the comment and will make the
change in the v4.

 Tested-by: Arnaud Patard arnaud.pat...@rtp-net.org
 
Thanks for testing.

-- 
Regards,
Shawn

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v3 2/4] mmc: sdhci-esdhc-imx: SDHCI_CARD_PRESENT does not get cleared

2011-06-21 Thread Chris Ball
Hi Shawn,

On Tue, Jun 21 2011, Shawn Guo wrote:
 Acked-by: Wolfram Sang w.s...@pengutronix.de
 
 Sorry, my bad.  Will add in v4.

 Should go to stable.
 
 I suppose that Chris will take care of it, otherwise please let me
 know what I should do.  (I'm still new to the process)

Just add:

Cc: sta...@kernel.org

to the commit message, please.  (That's all that's necessary to get a
patch seen by the stable@ maintainers.)

Thanks,

- Chris.
-- 
Chris Ball   c...@laptop.org   http://printf.net/
One Laptop Per Child
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 0/4] Extend sdhci-esdhc-imx card_detect and write_protect support for mx5

2011-06-21 Thread Shawn Guo
The card-present polling for sdhci based driver is very expensive
in terms of the impact to system performance.  We observe a few
system performance issues from Freescale and Linaro on mx5 platforms,
which have been proved card polling related.

The patch set extends the current sdhci-esdhc-imx card_detect and
write_protect support to cover mx5 platforms, and solves above
performance issues.

Changes since v3:
 * Address comment from Philip Rakity on sdhci_set_card_detection
 * Change a couple of dev_warn to dev_err in sdhci-esdhc-imx.c per
   Arnaud Patard's comment
 * Add Arnaud Patard's Test-by
 * Add missing Acked-by, Cc stable on patch #2

Changes since v2:
 * Fix the issue reported by Arnaud Patard:
   http://article.gmane.org/gmane.linux.ports.arm.kernel/120790

Changes since v1:
 * Rebase on today's linux-next
 * Take the suggestion from Arnaud Patard to add default pdata in
   imx_add_sdhci_esdhc_imx(), to avoid touching every single board
   file for the platform_data changes
 * Add comment for sdhci.c change
 * Change ESDHC_CD(WP)_SIGNAL to ESDHC_CD(WP)_CONTROLLER for a more
   descriptive name
 * Add missing NONE case handling in esdhc_pltfm_get_ro
 * Improve a couple comment wording per suggestion from Wolfram Sang

Shawn Guo (4):
  mmc: sdhci: fix interrupt storm from card detection
  mmc: sdhci-esdhc-imx: SDHCI_CARD_PRESENT does not get cleared
  mmc: sdhci-esdhc-imx: remove WP from flag ESDHC_FLAG_GPIO_FOR_CD_WP
  mmc: sdhci-esdhc-imx: extend card_detect and write_protect support for mx5

 arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c |3 +-
 arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c |3 +-
 arch/arm/mach-imx/mach-mx25_3ds.c  |2 +
 arch/arm/mach-imx/mach-pcm043.c|2 +
 arch/arm/mach-mx5/board-mx51_babbage.c |   14 +-
 arch/arm/mach-mx5/board-mx53_loco.c|4 +
 .../plat-mxc/devices/platform-sdhci-esdhc-imx.c|   12 ++
 arch/arm/plat-mxc/include/mach/esdhc.h |   25 +++-
 drivers/mmc/host/sdhci-esdhc-imx.c |  140 
 drivers/mmc/host/sdhci.c   |   29 -
 10 files changed, 165 insertions(+), 69 deletions(-)
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 1/4] mmc: sdhci: fix interrupt storm from card detection

2011-06-21 Thread Shawn Guo
The issue was initially found by Eric Benard as below.

http://permalink.gmane.org/gmane.linux.ports.arm.kernel/108031

Not sure about other SDHCI based controller, but on Freescale eSDHC,
the SDHCI_INT_CARD_INSERT bits will be immediately set again when it
gets cleared, if a card is inserted. The driver need to mask the irq
to prevent interrupt storm which will freeze the system.  And the
SDHCI_INT_CARD_REMOVE gets the same situation.

The patch fixes the problem based on the initial idea from
Eric Benard.

Signed-off-by: Shawn Guo shawn@linaro.org
Cc: Eric Benard e...@eukrea.com
Tested-by: Arnaud Patard arnaud.pat...@rtp-net.org
---
 drivers/mmc/host/sdhci.c |   29 +
 1 files changed, 25 insertions(+), 4 deletions(-)

diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 91d9892..790f959 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -127,11 +127,15 @@ static void sdhci_mask_irqs(struct sdhci_host *host, u32 
irqs)
 
 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
 {
-   u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
+   u32 present, irqs;
 
if (host-quirks  SDHCI_QUIRK_BROKEN_CARD_DETECTION)
return;
 
+   present = sdhci_readl(host, SDHCI_PRESENT_STATE) 
+ SDHCI_CARD_PRESENT;
+   irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
+
if (enable)
sdhci_unmask_irqs(host, irqs);
else
@@ -2154,13 +2158,30 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
mmc_hostname(host-mmc), intmask);
 
if (intmask  (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+   u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) 
+ SDHCI_CARD_PRESENT;
+
+   /*
+* There is a observation on i.mx esdhc.  INSERT bit will be
+* immediately set again when it gets cleared, if a card is
+* inserted.  We have to mask the irq to prevent interrupt
+* storm which will freeze the system.  And the REMOVE gets
+* the same situation.
+*
+* More testing are needed here to ensure it works for other
+* platforms though.
+*/
+   sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
+   SDHCI_INT_CARD_REMOVE);
+   sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
+ SDHCI_INT_CARD_INSERT);
+
sdhci_writel(host, intmask  (SDHCI_INT_CARD_INSERT |
-   SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+   intmask = ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
tasklet_schedule(host-card_tasklet);
}
 
-   intmask = ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
-
if (intmask  SDHCI_INT_CMD_MASK) {
sdhci_writel(host, intmask  SDHCI_INT_CMD_MASK,
SDHCI_INT_STATUS);
-- 
1.7.4.1

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 2/4] mmc: sdhci-esdhc-imx: SDHCI_CARD_PRESENT does not get cleared

2011-06-21 Thread Shawn Guo
The function esdhc_readl_le intends to clear bit SDHCI_CARD_PRESENT,
when the card detect gpio tells there is no card.  But it does not
clear the bit actually.  The patch gives a fix on that.

Signed-off-by: Shawn Guo shawn@linaro.org
Acked-by: Wolfram Sang w.s...@pengutronix.de
Cc: sta...@kernel.org
---
 drivers/mmc/host/sdhci-esdhc-imx.c |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c 
b/drivers/mmc/host/sdhci-esdhc-imx.c
index 248b8e5..94097c0 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -84,7 +84,7 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
if (boarddata  gpio_is_valid(boarddata-cd_gpio)
 gpio_get_value(boarddata-cd_gpio))
/* no card, if a valid gpio says so... */
-   val = SDHCI_CARD_PRESENT;
+   val = ~SDHCI_CARD_PRESENT;
else
/* ... in all other cases assume card is present */
val |= SDHCI_CARD_PRESENT;
-- 
1.7.4.1

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 3/4] mmc: sdhci-esdhc-imx: remove WP from flag ESDHC_FLAG_GPIO_FOR_CD_WP

2011-06-21 Thread Shawn Guo
The use of flag ESDHC_FLAG_GPIO_FOR_CD_WP is all CD related.  It does
not necessarily need to bother WP in the flag name.

Signed-off-by: Shawn Guo shawn@linaro.org
---
 drivers/mmc/host/sdhci-esdhc-imx.c |8 
 1 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c 
b/drivers/mmc/host/sdhci-esdhc-imx.c
index 94097c0..79b7a9a 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -29,7 +29,7 @@
 #define SDHCI_VENDOR_SPEC  0xC0
 #define  SDHCI_VENDOR_SPEC_SDIO_QUIRK  0x0002
 
-#define ESDHC_FLAG_GPIO_FOR_CD_WP  (1  0)
+#define ESDHC_FLAG_GPIO_FOR_CD (1  0)
 /*
  * The CMDTYPE of the CMD register (offset 0xE) should be set to
  * 11 when the STOP CMD12 is issued on imx53 to abort one
@@ -77,7 +77,7 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
u32 val = readl(host-ioaddr + reg);
 
if (unlikely((reg == SDHCI_PRESENT_STATE)
-(imx_data-flags  ESDHC_FLAG_GPIO_FOR_CD_WP))) {
+(imx_data-flags  ESDHC_FLAG_GPIO_FOR_CD))) {
struct esdhc_platform_data *boarddata =
host-mmc-parent-platform_data;
 
@@ -99,7 +99,7 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, 
int reg)
struct pltfm_imx_data *imx_data = pltfm_host-priv;
 
if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)
-(imx_data-flags  ESDHC_FLAG_GPIO_FOR_CD_WP)))
+(imx_data-flags  ESDHC_FLAG_GPIO_FOR_CD)))
/*
 * these interrupts won't work with a custom card_detect gpio
 * (only applied to mx25/35)
@@ -308,7 +308,7 @@ static int __devinit sdhci_esdhc_imx_probe(struct 
platform_device *pdev)
goto no_card_detect_irq;
}
 
-   imx_data-flags |= ESDHC_FLAG_GPIO_FOR_CD_WP;
+   imx_data-flags |= ESDHC_FLAG_GPIO_FOR_CD;
/* Now we have a working card_detect again */
host-quirks = ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
}
-- 
1.7.4.1

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 4/4] mmc: sdhci-esdhc-imx: extend card_detect and write_protect support for mx5

2011-06-21 Thread Shawn Guo
The patch extends card_detect and write_protect support to get mx5
family and more scenarios supported.  The changes include:

 * Turn platform_data from optional to mandatory
 * Add cd_types and wp_types into platform_data to cover more use
   cases
 * Remove the use of flag ESDHC_FLAG_GPIO_FOR_CD
 * Adjust some machine codes to adopt the platform_data changes
 * Work around the issue that software reset will get card detection
   circuit stop working

With this patch, card_detect and write_protect gets supported on
mx5 based platforms.

Signed-off-by: Shawn Guo shawn@linaro.org
Acked-by: Wolfram Sang w.s...@pengutronix.de
Tested-by: Arnaud Patard arnaud.pat...@rtp-net.org
---
 arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c |3 +-
 arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c |3 +-
 arch/arm/mach-imx/mach-mx25_3ds.c  |2 +
 arch/arm/mach-imx/mach-pcm043.c|2 +
 arch/arm/mach-mx5/board-mx51_babbage.c |   14 +-
 arch/arm/mach-mx5/board-mx53_loco.c|4 +
 .../plat-mxc/devices/platform-sdhci-esdhc-imx.c|   12 ++
 arch/arm/plat-mxc/include/mach/esdhc.h |   25 +++-
 drivers/mmc/host/sdhci-esdhc-imx.c |  138 
 9 files changed, 139 insertions(+), 64 deletions(-)

diff --git a/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c 
b/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c
index 01ebcb3..66e8726 100644
--- a/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c
+++ b/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c
@@ -225,7 +225,8 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata 
__initconst = {
 
 static struct esdhc_platform_data sd1_pdata = {
.cd_gpio = GPIO_SD1CD,
-   .wp_gpio = -EINVAL,
+   .cd_type = ESDHC_CD_GPIO,
+   .wp_type = ESDHC_WP_NONE,
 };
 
 /*
diff --git a/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c 
b/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c
index 558eb52..0f0af02 100644
--- a/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c
+++ b/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c
@@ -236,7 +236,8 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata 
__initconst = {
 
 static struct esdhc_platform_data sd1_pdata = {
.cd_gpio = GPIO_SD1CD,
-   .wp_gpio = -EINVAL,
+   .cd_type = ESDHC_CD_GPIO,
+   .wp_type = ESDHC_WP_NONE,
 };
 
 /*
diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c 
b/arch/arm/mach-imx/mach-mx25_3ds.c
index 01534bb..7f66a91 100644
--- a/arch/arm/mach-imx/mach-mx25_3ds.c
+++ b/arch/arm/mach-imx/mach-mx25_3ds.c
@@ -215,6 +215,8 @@ static const struct imxi2c_platform_data mx25_3ds_i2c0_data 
__initconst = {
 static const struct esdhc_platform_data mx25pdk_esdhc_pdata __initconst = {
.wp_gpio = SD1_GPIO_WP,
.cd_gpio = SD1_GPIO_CD,
+   .wp_type = ESDHC_WP_GPIO,
+   .cd_type = ESDHC_CD_GPIO,
 };
 
 static void __init mx25pdk_init(void)
diff --git a/arch/arm/mach-imx/mach-pcm043.c b/arch/arm/mach-imx/mach-pcm043.c
index 163cc31..660ec3e 100644
--- a/arch/arm/mach-imx/mach-pcm043.c
+++ b/arch/arm/mach-imx/mach-pcm043.c
@@ -349,6 +349,8 @@ __setup(otg_mode=, pcm043_otg_mode);
 static struct esdhc_platform_data sd1_pdata = {
.wp_gpio = SD1_GPIO_WP,
.cd_gpio = SD1_GPIO_CD,
+   .wp_type = ESDHC_WP_GPIO,
+   .cd_type = ESDHC_CD_GPIO,
 };
 
 /*
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c 
b/arch/arm/mach-mx5/board-mx51_babbage.c
index 15c6000..e400b09 100644
--- a/arch/arm/mach-mx5/board-mx51_babbage.c
+++ b/arch/arm/mach-mx5/board-mx51_babbage.c
@@ -41,8 +41,6 @@
 #define BABBAGE_POWER_KEY  IMX_GPIO_NR(2, 21)
 #define BABBAGE_ECSPI1_CS0 IMX_GPIO_NR(4, 24)
 #define BABBAGE_ECSPI1_CS1 IMX_GPIO_NR(4, 25)
-#define BABBAGE_SD1_CD IMX_GPIO_NR(1, 0)
-#define BABBAGE_SD1_WP IMX_GPIO_NR(1, 1)
 #define BABBAGE_SD2_CD IMX_GPIO_NR(1, 6)
 #define BABBAGE_SD2_WP IMX_GPIO_NR(1, 5)
 
@@ -146,8 +144,9 @@ static iomux_v3_cfg_t mx51babbage_pads[] = {
MX51_PAD_SD1_DATA1__SD1_DATA1,
MX51_PAD_SD1_DATA2__SD1_DATA2,
MX51_PAD_SD1_DATA3__SD1_DATA3,
-   MX51_PAD_GPIO1_0__GPIO1_0,
-   MX51_PAD_GPIO1_1__GPIO1_1,
+   /* CD/WP from controller */
+   MX51_PAD_GPIO1_0__SD1_CD,
+   MX51_PAD_GPIO1_1__SD1_WP,
 
/* SD 2 */
MX51_PAD_SD2_CMD__SD2_CMD,
@@ -156,6 +155,7 @@ static iomux_v3_cfg_t mx51babbage_pads[] = {
MX51_PAD_SD2_DATA1__SD2_DATA1,
MX51_PAD_SD2_DATA2__SD2_DATA2,
MX51_PAD_SD2_DATA3__SD2_DATA3,
+   /* CD/WP gpio */
MX51_PAD_GPIO1_6__GPIO1_6,
MX51_PAD_GPIO1_5__GPIO1_5,
 
@@ -340,13 +340,15 @@ static const struct spi_imx_master mx51_babbage_spi_pdata 
__initconst = {
 };
 
 static const struct esdhc_platform_data mx51_babbage_sd1_data __initconst = {
-   .cd_gpio = BABBAGE_SD1_CD,
-   .wp_gpio = BABBAGE_SD1_WP,
+   .cd_type = ESDHC_CD_CONTROLLER,
+   .wp_type = ESDHC_WP_CONTROLLER,
 };
 
 static const struct 

Re: [PATCH v3 2/4] mmc: sdhci-esdhc-imx: SDHCI_CARD_PRESENT does not get cleared

2011-06-21 Thread Wolfram Sang

  Should go to stable.
  
 I suppose that Chris will take care of it,

That's correct. You can add the cc as Chris suggested, so the maintainer knows
that you think it should go to stable. The final decission is up to the
maintainer, though.

-- 
Pengutronix e.K.   | Wolfram Sang|
Industrial Linux Solutions | http://www.pengutronix.de/  |


signature.asc
Description: Digital signature


Re: [PATCH v4 1/4] mmc: sdhci: fix interrupt storm from card detection

2011-06-21 Thread Philip Rakity

On Jun 21, 2011, at 7:41 AM, Shawn Guo wrote:

 The issue was initially found by Eric Benard as below.
 
 http://permalink.gmane.org/gmane.linux.ports.arm.kernel/108031
 
 Not sure about other SDHCI based controller, but on Freescale eSDHC,
 the SDHCI_INT_CARD_INSERT bits will be immediately set again when it
 gets cleared, if a card is inserted. The driver need to mask the irq
 to prevent interrupt storm which will freeze the system.  And the
 SDHCI_INT_CARD_REMOVE gets the same situation.
 
 The patch fixes the problem based on the initial idea from
 Eric Benard.
 
 Signed-off-by: Shawn Guo shawn@linaro.org
 Cc: Eric Benard e...@eukrea.com
 Tested-by: Arnaud Patard arnaud.pat...@rtp-net.org
 ---
 drivers/mmc/host/sdhci.c |   29 +
 1 files changed, 25 insertions(+), 4 deletions(-)
 
 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
 index 91d9892..790f959 100644
 --- a/drivers/mmc/host/sdhci.c
 +++ b/drivers/mmc/host/sdhci.c
 @@ -127,11 +127,15 @@ static void sdhci_mask_irqs(struct sdhci_host *host, 
 u32 irqs)
 
 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
 {
 - u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
 + u32 present, irqs;
 
   if (host-quirks  SDHCI_QUIRK_BROKEN_CARD_DETECTION)
   return;
 
 + present = sdhci_readl(host, SDHCI_PRESENT_STATE) 
 +   SDHCI_CARD_PRESENT;
 + irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
 +
   if (enable)
   sdhci_unmask_irqs(host, irqs);
   else
 @@ -2154,13 +2158,30 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
   mmc_hostname(host-mmc), intmask);
 
   if (intmask  (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
 + u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) 
 +   SDHCI_CARD_PRESENT;
 +
 + /*
 +  * There is a observation on i.mx esdhc.  INSERT bit will be
 +  * immediately set again when it gets cleared, if a card is
 +  * inserted.  We have to mask the irq to prevent interrupt
 +  * storm which will freeze the system.  And the REMOVE gets
 +  * the same situation.
 +  *
 +  * More testing are needed here to ensure it works for other
 +  * platforms though.
 +  */
 + sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
 + SDHCI_INT_CARD_REMOVE);
 + sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
 +   SDHCI_INT_CARD_INSERT);
 +


   sdhci_writel(host, intmask  (SDHCI_INT_CARD_INSERT |
 - SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
 +  SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);


Please keep the old formatting since code does not change anything.  Makes it 
easier to find
the real change

 + intmask = ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
   tasklet_schedule(host-card_tasklet);
   }
 
 - intmask = ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
 -
   if (intmask  SDHCI_INT_CMD_MASK) {
   sdhci_writel(host, intmask  SDHCI_INT_CMD_MASK,
   SDHCI_INT_STATUS);
 -- 
 1.7.4.1
 


Reviewed-by: Philip Rakity prak...@marvell.com

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 5/5] ARM: mach-shmobile: ag5evm: SDHI requires waiting for idle

2011-06-21 Thread Chris Ball
Hi Paul,

On Mon, Jun 20 2011, Simon Horman wrote:
 The SDHI block on the ag5evm requires waiting for idle
 before writing to some registers.

 Cc: Guennadi Liakhovetski g.liakhovet...@gmx.de
 Cc: Magnus Damm magnus.d...@gmail.com
 Signed-off-by: Simon Horman ho...@verge.net.au

 ---

 Dependencies:
   mmc: sdhi: Add write16_hook
   ARM: mach-shmobile: ag5evm: consistently name sdhi info structures
 ---
  arch/arm/mach-shmobile/board-ag5evm.c |3 ++-
  1 files changed, 2 insertions(+), 1 deletions(-)

 diff --git a/arch/arm/mach-shmobile/board-ag5evm.c 
 b/arch/arm/mach-shmobile/board-ag5evm.c
 index ce5c251..cdfdd62 100644
 --- a/arch/arm/mach-shmobile/board-ag5evm.c
 +++ b/arch/arm/mach-shmobile/board-ag5evm.c
 @@ -341,6 +341,7 @@ static struct platform_device mipidsi0_device = {
  static struct sh_mobile_sdhi_info sdhi0_info = {
   .dma_slave_tx   = SHDMA_SLAVE_SDHI0_TX,
   .dma_slave_rx   = SHDMA_SLAVE_SDHI0_RX,
 + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
   .tmio_caps  = MMC_CAP_SD_HIGHSPEED,
   .tmio_ocr_mask  = MMC_VDD_27_28 | MMC_VDD_28_29,
  };
 @@ -382,7 +383,7 @@ void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, 
 int state)
  }
  
  static struct sh_mobile_sdhi_info sh_sdhi1_info = {
 - .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
 + .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT,
   .tmio_caps  = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
   .tmio_ocr_mask  = MMC_VDD_32_33 | MMC_VDD_33_34,
   .set_pwr= ag5evm_sdhi1_set_pwr,

Could I get your ACK for this one, so that I can merge patches 1/2/3/5
together via the MMC tree?

Thanks,

- Chris.
-- 
Chris Ball   c...@laptop.org   http://printf.net/
One Laptop Per Child
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] cb710: fix #ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS

2011-06-21 Thread Chris Ball
Hi,

On Tue, Jun 21 2011, Michał Mirosław wrote:
 On Tue, Jun 21, 2011 at 10:55:34AM +0100, James Hogan wrote:
 HAVE_EFFICIENT_UNALIGNED_ACCESS is a config option, therefore it needs
 the CONFIG_ before it when used by the preprocessor.
 
 Signed-off-by: James Hogan ja...@albanarts.com
 ---
  drivers/misc/cb710/sgbuf2.c |2 +-
  1 files changed, 1 insertions(+), 1 deletions(-)
 
 diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c
 index d019746..2a40d0e 100644
 --- a/drivers/misc/cb710/sgbuf2.c
 +++ b/drivers/misc/cb710/sgbuf2.c
 @@ -47,7 +47,7 @@ static uint32_t sg_dwiter_read_buffer(struct
 sg_mapping_iter *miter)
 
  static inline bool needs_unaligned_copy(const void *ptr)
  {
 -#ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS
 +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  return false;
  #else
  return ((ptr - NULL)  3) != 0;

 Acked-by: Michał Mirosław mirq-li...@rere.qmqm.pl

Thanks, pushed to mmc-next for 3.0.

(James, your patch was corrupt due to a hard line break on the @@
line -- please fix that next time around.)

- Chris.
-- 
Chris Ball   c...@laptop.org   http://printf.net/
One Laptop Per Child
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 4/4] PCI: make cardbus-bridge resources nice-to-have

2011-06-21 Thread Ram Pai
On Tue, Jun 21, 2011 at 09:57:00AM +0200, Dominik Brodowski wrote:
 On Mon, Jun 20, 2011 at 03:47:17PM -0700, Ram Pai wrote:
  Allocate resources to cardbus bridge only after all other genuine
  resources requests are satisfied. Dont retry if resource allocation
  for cardbus-bridge fails.
 
 Well, for those who use cardbus cards, cardbus resources aren't nice to
 have, they are absolutely required. Of course, not all cardbus cards need
 as many resources as are currently assigned, so I wouldn't oppose a patch
 which marks _some_ of the currently assigned resources as nice to have.
 But this approach -- 0 required, all nice to have -- seems wrong to me.

Do you know how much minimal resource is good enough?  The value, before
this patch, was 256 for IO ports and  64M for memory.

BTW: If the BIOS has already assigned enough resources for all the devices on
the system, no devices will be starved including the cardbus. The OS intervenes
and is forced to make this hard choice only when it sees unassigned resources to
some devices along with resource contention.

RP
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 0/3] Improve MMC error handling (3rd rev)

2011-06-21 Thread Chris Ball
Hi Russell,

On Mon, Jun 20 2011, Russell King - ARM Linux wrote:
 This is the third revision of my improvements to the MMC block
 device error handling, which makes error handling more robust
 and permits MMC/SD to continue working in the presence of
 not-quite-perfect setups.

 Without this, my Versatile Express tends to fail to mount its rootfs
 on SD.  With this, it can successfully read and write data from the
 card in the presence of FIFO overruns and underruns, and also sensibly
 recover from command channel errors.

 There is more to come, but this is the safer bits of the improvements.

  drivers/mmc/card/block.c |  279 ++---
  include/linux/mmc/mmc.h  |   10 ++
  2 files changed, 221 insertions(+), 68 deletions(-)

Thanks, pushed to mmc-next for 3.1.

(I'll do some more testing myself and let you know if I find anything
unexpected.)

- Chris.
-- 
Chris Ball   c...@laptop.org   http://printf.net/
One Laptop Per Child
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 5/5] ARM: mach-shmobile: ag5evm: SDHI requires waiting for idle

2011-06-21 Thread Paul Mundt
On Tue, Jun 21, 2011 at 12:10:22PM -0400, Chris Ball wrote:
 On Mon, Jun 20 2011, Simon Horman wrote:
  Dependencies:
  mmc: sdhi: Add write16_hook
  ARM: mach-shmobile: ag5evm: consistently name sdhi info structures
 
 Could I get your ACK for this one, so that I can merge patches 1/2/3/5
 together via the MMC tree?
 
Well, this has a dependency on 4/5 which I've already applied. I was
simply going to wait for 1-3 to be merged via your tree and then roll in
this last one on top of that. If you want to take them all that's ok too,
git should be able to deal with any merge conflicts fine.
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 5/5] ARM: mach-shmobile: ag5evm: SDHI requires waiting for idle

2011-06-21 Thread Chris Ball
Hi,

On Tue, Jun 21 2011, Paul Mundt wrote:
 On Tue, Jun 21, 2011 at 12:10:22PM -0400, Chris Ball wrote:
 On Mon, Jun 20 2011, Simon Horman wrote:
  Dependencies:
 mmc: sdhi: Add write16_hook
 ARM: mach-shmobile: ag5evm: consistently name sdhi info structures
 
 Could I get your ACK for this one, so that I can merge patches 1/2/3/5
 together via the MMC tree?
 
 Well, this has a dependency on 4/5 which I've already applied. I was
 simply going to wait for 1-3 to be merged via your tree and then roll in
 this last one on top of that. If you want to take them all that's ok too,
 git should be able to deal with any merge conflicts fine.

Ah, okay -- I've pushed 1-3 now to mmc-next for 3.1, you can take 4-5.

(I was thinking I should try to avoid requiring you to wait for Linus
to pull MMC before you can send sh to him.)

- Chris.
-- 
Chris Ball   c...@laptop.org   http://printf.net/
One Laptop Per Child
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] cb710: fix #ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS

2011-06-21 Thread James Hogan
Hi,

2011/6/21 Chris Ball c...@laptop.org:
 Hi,

 On Tue, Jun 21 2011, Michał Mirosław wrote:
 On Tue, Jun 21, 2011 at 10:55:34AM +0100, James Hogan wrote:
 HAVE_EFFICIENT_UNALIGNED_ACCESS is a config option, therefore it needs
 the CONFIG_ before it when used by the preprocessor.

 Signed-off-by: James Hogan ja...@albanarts.com
 ---
  drivers/misc/cb710/sgbuf2.c |    2 +-
  1 files changed, 1 insertions(+), 1 deletions(-)

 diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c
 index d019746..2a40d0e 100644
 --- a/drivers/misc/cb710/sgbuf2.c
 +++ b/drivers/misc/cb710/sgbuf2.c
 @@ -47,7 +47,7 @@ static uint32_t sg_dwiter_read_buffer(struct
 sg_mapping_iter *miter)

  static inline bool needs_unaligned_copy(const void *ptr)
  {
 -#ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS
 +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
      return false;
  #else
      return ((ptr - NULL)  3) != 0;

 Acked-by: Michał Mirosław mirq-li...@rere.qmqm.pl

 Thanks, pushed to mmc-next for 3.0.

 (James, your patch was corrupt due to a hard line break on the @@
 line -- please fix that next time around.)

Ahh, sorry about that Chris, I thought I had that problem sorted!

Thanks
James


 - Chris.
 --
 Chris Ball   c...@laptop.org   http://printf.net/
 One Laptop Per Child




-- 
James Hogan
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v3 RESEND] dma-mapping: add new API for max_segment_number

2011-06-21 Thread FUJITA Tomonori
On Fri, 17 Jun 2011 06:40:35 -0600
Matthew Wilcox matt...@wil.cx wrote:

 On Thu, Jun 16, 2011 at 08:30:53PM +0800, Shawn Guo wrote:
  Here is the user story that tells the need of the new api.  The
  mxs-mmc is the mmc host controller for Freescale MXS architecture.
  There are a pair of  mmc host specific parameters max_seg_size and
  max_segs that mxs-mmc host driver needs to tell mmc core, so that
  mmc core can know how big each data segment could be and how many
  segments could be handled one time in a scatter list by host driver.
  
  The mxs-mmc driver is one user of dmaengine mxs-dma, and it will call
  mxs-dma to transfer data in scatter list.  That is to say mxs-mmc has
  no idea of what max_seg_size and max_segs should be, because they are
  all mxs-dma capability parameters, and mxs-mmc needs to query them
  from mxs-dma.
 
 This approach would make sense if mxs-mmc were generic, but it's tied to
 mxs-dma, so it can just as well call mxs-dma to find out how many segments
 it supports.

SCSI HBA drivers stores the max number of sg entries in
host-sg_tablesize (and scsi-ml tells the block layer about the
limit)? So if we have the generic API to handle the value, scsi HBA
drivers (and scsi-ml) could use it too?
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 4/4] PCI: make cardbus-bridge resources nice-to-have

2011-06-21 Thread Jesse Barnes
On Tue, 21 Jun 2011 09:23:21 -0700
Ram Pai linux...@us.ibm.com wrote:

 On Tue, Jun 21, 2011 at 09:57:00AM +0200, Dominik Brodowski wrote:
  On Mon, Jun 20, 2011 at 03:47:17PM -0700, Ram Pai wrote:
   Allocate resources to cardbus bridge only after all other genuine
   resources requests are satisfied. Dont retry if resource allocation
   for cardbus-bridge fails.
  
  Well, for those who use cardbus cards, cardbus resources aren't nice to
  have, they are absolutely required. Of course, not all cardbus cards need
  as many resources as are currently assigned, so I wouldn't oppose a patch
  which marks _some_ of the currently assigned resources as nice to have.
  But this approach -- 0 required, all nice to have -- seems wrong to me.
 
 Do you know how much minimal resource is good enough?  The value, before
 this patch, was 256 for IO ports and  64M for memory.
 
 BTW: If the BIOS has already assigned enough resources for all the devices on
 the system, no devices will be starved including the cardbus. The OS 
 intervenes
 and is forced to make this hard choice only when it sees unassigned resources 
 to
 some devices along with resource contention.

I just know this is going to trigger regressions, so I think Dominik's
concern is valid.  We'll have some existing machine with a device whose
resource was never assigned, but we didn't care because it was unused.
Now this code will try to give it some address space at the expense of
something that *is* being used.

But OTOH this will at least try to allocate *some* space to cardbus, we
just won't try as hard as with some other resources.  I'd mainly like
to avoid the situation Dominik pointed out, where we have perfectly
good cardbus resources assigned (unlike in Oliver's case) but they're
stolen for a bridge that may get a hotplugged device or some other
device that didn't have a BIOS assignment.

-- 
Jesse Barnes, Intel Open Source Technology Center
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v6 02/11] omap_hsmmc: add support for pre_req and post_req

2011-06-21 Thread Per Forlin
On 21 June 2011 21:18, Nicolas Pitre nicolas.pi...@linaro.org wrote:
 On Tue, 21 Jun 2011, Per Forlin wrote:

 On 21 June 2011 07:41, Kishore Kadiyala kishorek.kadiy...@gmail.com wrote:
  snip
 
  +
  +static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request 
  *mrq,
  +                              bool is_first_req)
 
  I don't see the usage of is_first_req below.
  Is it required?
 
 It is not required. It is only an indication that this request is the
 first in a series of request. The host driver may do various
 optimisations based on this information. The first request in a series
 of jobs can't be prepared in parallel to the previous job. The host
 driver can do the following to minimise latency for the first job.
  * Preparing the cache while the MMC read/write cmd is being
 processed. In this case the pre_req could do nothing and the job is
 instead run in parallel to the read/write cmd being sent. If the
 is_first_req is false pre_req will run in parallel to an active
 transfer, in this case it is more efficient to prepare the request in
 pre_req.
  * Run PIO mode instead of DMA

 That is never going to be a good tradeoff.  If the CPU is busy doing
 PIO, it won't have a chance to prepare a subsequent request in parallel
 to the first transfer.

If you have two CPUs and the MMC interrupts are scheduled on the CPU
1, CPU 0 can prepare the next one. I'm still in favor of the preparing
cache in parallel to cmd. I have run tests and for small req like 4k
there is a good performance gain. Another option, if the mmc
controller support it, would be to start with PIO and switch to DMA on
the fly when cache is ready. Bottom line, it is up to the host driver
to do something clever based on is_first_req.

/Per
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v6 02/11] omap_hsmmc: add support for pre_req and post_req

2011-06-21 Thread Nicolas Pitre
On Tue, 21 Jun 2011, Per Forlin wrote:

 On 21 June 2011 21:18, Nicolas Pitre nicolas.pi...@linaro.org wrote:
  On Tue, 21 Jun 2011, Per Forlin wrote:
 
  On 21 June 2011 07:41, Kishore Kadiyala kishorek.kadiy...@gmail.com 
  wrote:
   snip
  
   +
   +static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct 
   mmc_request *mrq,
   +                              bool is_first_req)
  
   I don't see the usage of is_first_req below.
   Is it required?
  
  It is not required. It is only an indication that this request is the
  first in a series of request. The host driver may do various
  optimisations based on this information. The first request in a series
  of jobs can't be prepared in parallel to the previous job. The host
  driver can do the following to minimise latency for the first job.
   * Preparing the cache while the MMC read/write cmd is being
  processed. In this case the pre_req could do nothing and the job is
  instead run in parallel to the read/write cmd being sent. If the
  is_first_req is false pre_req will run in parallel to an active
  transfer, in this case it is more efficient to prepare the request in
  pre_req.
   * Run PIO mode instead of DMA
 
  That is never going to be a good tradeoff.  If the CPU is busy doing
  PIO, it won't have a chance to prepare a subsequent request in parallel
  to the first transfer.
 
 If you have two CPUs and the MMC interrupts are scheduled on the CPU
 1, CPU 0 can prepare the next one.

Well, it is true that in theory the PIO operation shouldn't take all the 
CPU anyway, so maybe there are some cycles left in between FIFO 
interrupts.

The danger here is of course to be presented with a trickle of single 
requests.  Doing them all with PIO is going to waste more power or 
prevent other tasks from running with 100% CPU which might impact the 
system latency more than the latency we're trying to avoid here.

In other words this is something that should be evaluated and not 
applied freely.


Nicolas

Re: [PATCH v6 11/11] mmc: add handling for two parallel block requests in issue_rw_rq

2011-06-21 Thread Per Forlin
On 20 June 2011 17:17, Kishore Kadiyala kishorek.kadiy...@gmail.com wrote:
 On Mon, Jun 20, 2011 at 2:47 AM, Per Forlin per.for...@linaro.org wrote:
 Change mmc_blk_issue_rw_rq() to become asynchronous.
 The execution flow looks like this:
 The mmc-queue calls issue_rw_rq(), which sends the request
 to the host and returns back to the mmc-queue. The mmc-queue calls
 issue_rw_rq() again with a new request. This new request is prepared,
 in isuue_rw_rq(), then it waits for the active request to complete before
 pushing it to the host. When to mmc-queue is empty it will call
 isuue_rw_rq() with req=NULL to finish off the active request
 without starting a new request.

 Signed-off-by: Per Forlin per.for...@linaro.org
 ---
  drivers/mmc/card/block.c |  121 
 +-
  drivers/mmc/card/queue.c |   17 +--
  drivers/mmc/card/queue.h |    1 +
  3 files changed, 101 insertions(+), 38 deletions(-)

 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
 index 6a84a75..66db77a 100644
 --- a/drivers/mmc/card/block.c
 +++ b/drivers/mmc/card/block.c
 @@ -108,6 +108,7 @@ static DEFINE_MUTEX(open_lock);

  enum mmc_blk_status {
        MMC_BLK_SUCCESS = 0,
 +       MMC_BLK_PARTIAL,
        MMC_BLK_RETRY,
        MMC_BLK_DATA_ERR,
        MMC_BLK_CMD_ERR,
 @@ -668,14 +669,16 @@ static inline void mmc_apply_rel_rw(struct 
 mmc_blk_request *brq,
        }
  }

 -static enum mmc_blk_status mmc_blk_err_check(struct mmc_blk_request *brq,
 -                                            struct request *req,
 -                                            struct mmc_card *card,
 -                                            struct mmc_blk_data *md)
 +static int mmc_blk_err_check(struct mmc_card *card,
 +                            struct mmc_async_req *areq)
  {
        struct mmc_command cmd;
        u32 status = 0;
        enum mmc_blk_status ret = MMC_BLK_SUCCESS;
 +       struct mmc_queue_req *mq_mrq = container_of(areq, struct 
 mmc_queue_req,
 +                                                   mmc_active);
 +       struct mmc_blk_request *brq = mq_mrq-brq;
 +       struct request *req = mq_mrq-req;

        /*
         * Check for errors here, but don't jump to cmd_err
 @@ -770,7 +773,11 @@ static enum mmc_blk_status mmc_blk_err_check(struct 
 mmc_blk_request *brq,
                else
                        ret = MMC_BLK_DATA_ERR;
        }
 -out:
 +
 +       if (ret == MMC_BLK_SUCCESS 
 +           blk_rq_bytes(req) != brq-data.bytes_xfered)
 +               ret = MMC_BLK_PARTIAL;
 + out:
        return ret;
  }

 @@ -901,27 +908,59 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req 
 *mqrq,
                brq-data.sg_len = i;
        }

 +       mqrq-mmc_active.mrq = brq-mrq;
 +       mqrq-mmc_active.err_check = mmc_blk_err_check;
 +
        mmc_queue_bounce_pre(mqrq);
  }

 -static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 +static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
  {
        struct mmc_blk_data *md = mq-data;
        struct mmc_card *card = md-queue.card;
 -       struct mmc_blk_request *brq = mq-mqrq_cur-brq;
 -       int ret = 1, disable_multi = 0;
 +       struct mmc_blk_request *brq;
 +       int ret = 1;
 +       int disable_multi = 0;
        enum mmc_blk_status status;
 +       struct mmc_queue_req *mq_rq;
 +       struct request *req;
 +       struct mmc_async_req *areq;
 +
 +       if (!rqc  !mq-mqrq_prev-req)
 +               goto out;

        do {
 -               mmc_blk_rw_rq_prep(mq-mqrq_cur, card, disable_multi, mq);
 -               mmc_wait_for_req(card-host, brq-mrq);
 +               if (rqc) {
 +                       mmc_blk_rw_rq_prep(mq-mqrq_cur, card, 0, mq);
 +                       areq = mq-mqrq_cur-mmc_active;
 +               } else
 +                       areq = NULL;
 +               areq = mmc_start_req(card-host, areq, (int *) status);

 I think 'status' is used uninitialized.
 With this struct mmc_async_req *mmc_start_req in your first patch
 if (error)
        *error = err;
 return data;
 condition which always passes.
It's valid to pass in NULL as status.
Do you suggest to make the status pointer mandatory?


 You can have
 enum mmc_blk_status status = MMC_BLK_SUCCESS;

status will be set to MMC_BLK_SUCCESS for the first iteration of the
do-while loop.
It may look like:
MMC_BLK_RETRY
MMC_BLK_PARTIAL
MMC_BLK_PARTIAL
MMC_BLK_PARTIAL
MMC_BLK_SUCCESS

This means mmc_start_req needs to return MMC_BLK_SUCCESS.

Regards,
Per
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 4/4] PCI: make cardbus-bridge resources nice-to-have

2011-06-21 Thread Jesse Barnes
On Tue, 21 Jun 2011 09:23:21 -0700
Ram Pai linux...@us.ibm.com wrote:

 On Tue, Jun 21, 2011 at 09:57:00AM +0200, Dominik Brodowski wrote:
  On Mon, Jun 20, 2011 at 03:47:17PM -0700, Ram Pai wrote:
   Allocate resources to cardbus bridge only after all other genuine
   resources requests are satisfied. Dont retry if resource allocation
   for cardbus-bridge fails.
  
  Well, for those who use cardbus cards, cardbus resources aren't nice to
  have, they are absolutely required. Of course, not all cardbus cards need
  as many resources as are currently assigned, so I wouldn't oppose a patch
  which marks _some_ of the currently assigned resources as nice to have.
  But this approach -- 0 required, all nice to have -- seems wrong to me.
 
 Do you know how much minimal resource is good enough?  The value, before
 this patch, was 256 for IO ports and  64M for memory.
 
 BTW: If the BIOS has already assigned enough resources for all the devices on
 the system, no devices will be starved including the cardbus. The OS 
 intervenes
 and is forced to make this hard choice only when it sees unassigned resources 
 to
 some devices along with resource contention.

Dominik, presumably you have a few good cardbus test machines; can you
give Ram's patches a try?  If we know they break existing
configurations, I'm afraid we'll just have to revert the whole
re-allocation patch yet again.  If your stuff survives, I'll ping Linus
to see what he thinks, though he'll probably want to revert in any
case...

Thanks,
-- 
Jesse Barnes, Intel Open Source Technology Center
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 4/4] PCI: make cardbus-bridge resources nice-to-have

2011-06-21 Thread Dominik Brodowski
Hey,

On Tue, Jun 21, 2011 at 02:36:22PM -0700, Jesse Barnes wrote:
 On Tue, 21 Jun 2011 09:23:21 -0700
 Ram Pai linux...@us.ibm.com wrote:
 
  On Tue, Jun 21, 2011 at 09:57:00AM +0200, Dominik Brodowski wrote:
   On Mon, Jun 20, 2011 at 03:47:17PM -0700, Ram Pai wrote:
Allocate resources to cardbus bridge only after all other genuine
resources requests are satisfied. Dont retry if resource allocation
for cardbus-bridge fails.
   
   Well, for those who use cardbus cards, cardbus resources aren't nice to
   have, they are absolutely required. Of course, not all cardbus cards need
   as many resources as are currently assigned, so I wouldn't oppose a patch
   which marks _some_ of the currently assigned resources as nice to have.
   But this approach -- 0 required, all nice to have -- seems wrong to me.
  
  Do you know how much minimal resource is good enough?  The value, before
  this patch, was 256 for IO ports and  64M for memory.
  
  BTW: If the BIOS has already assigned enough resources for all the devices 
  on
  the system, no devices will be starved including the cardbus. The OS 
  intervenes
  and is forced to make this hard choice only when it sees unassigned 
  resources to
  some devices along with resource contention.
 
 Dominik, presumably you have a few good cardbus test machines; can you
 give Ram's patches a try?  If we know they break existing
 configurations, I'm afraid we'll just have to revert the whole
 re-allocation patch yet again.  If your stuff survives, I'll ping Linus
 to see what he thinks, though he'll probably want to revert in any
 case...

Actually, I only have one cardbus-capable test machine, which does work in
very most cases, and also I do care much more about the PCMCIA side of
things than the PCI/CardBus side... Therefore, all I could do is some more
or less informed guessing about how much minimal resource we should try to
allocate...

Best,
Dominik
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v7 00/11] use nonblock mmc requests to minimize latency

2011-06-21 Thread Per Forlin
How significant is the cache maintenance over head?
It depends, the eMMC are much faster now
compared to a few years ago and cache maintenance cost more due to
multiple cache levels and speculative cache pre-fetch. In relation the
cost for handling the caches have increased and is now a bottle neck
dealing with fast eMMC together with DMA.

The intention for introducing non-blocking mmc requests is to minimize the
time between a mmc request ends and another mmc request starts. In the
current implementation the MMC controller is idle when dma_map_sg and
dma_unmap_sg is processing. Introducing non-blocking mmc request makes it
possible to prepare the caches for next job in parallel to an active
mmc request.

This is done by making the issue_rw_rq() non-blocking.
The increase in throughput is proportional to the time it takes to
prepare (major part of preparations is dma_map_sg and dma_unmap_sg)
a request and how fast the memory is. The faster the MMC/SD is
the more significant the prepare request time becomes. Measurements on U5500
and Panda on eMMC and SD shows significant performance gain for large
reads when running DMA mode. In the PIO case the performance is unchanged.

There are two optional hooks pre_req() and post_req() that the host driver
may implement in order to move work to before and after the actual mmc_request
function is called. In the DMA case pre_req() may do dma_map_sg() and prepare
the dma descriptor and post_req runs the dma_unmap_sg.

Details on measurements from IOZone and mmc_test:
https://wiki.linaro.org/WorkingGroups/Kernel/Specs/StoragePerfMMC-async-req

Changes since v6:
 * minor update of doc for mmc_start_req and code clean up.
 * Indentifed a bug running tests on ext4 with discard enable.
   The test procedure is documented here: 
https://wiki.linaro.org/WorkingGroups/Kernel/Specs/StoragePerfMMC-async-req#Liability_test
 * Resolved bug by preventing mmc async request run in parallel
   to discard (mmc_erase).

Per Forlin (11):
  mmc: add non-blocking mmc request function
  omap_hsmmc: add support for pre_req and post_req
  mmci: implement pre_req() and post_req()
  mmc: mmc_test: add debugfs file to list all tests
  mmc: mmc_test: add test for non-blocking transfers
  mmc: add member in mmc queue struct to hold request data
  mmc: add a block request prepare function
  mmc: move error code in mmc_block_issue_rw_rq to a separate function.
  mmc: add a second mmc queue request member
  mmc: test: add random fault injection in core.c
  mmc: add handling for two parallel block requests in issue_rw_rq

 drivers/mmc/card/block.c  |  537 -
 drivers/mmc/card/mmc_test.c   |  361 +++-
 drivers/mmc/card/queue.c  |  184 +-
 drivers/mmc/card/queue.h  |   33 ++-
 drivers/mmc/core/core.c   |  164 -
 drivers/mmc/core/debugfs.c|5 +
 drivers/mmc/host/mmci.c   |  146 ++-
 drivers/mmc/host/mmci.h   |8 +
 drivers/mmc/host/omap_hsmmc.c |   87 +++-
 include/linux/mmc/core.h  |6 +-
 include/linux/mmc/host.h  |   24 ++
 lib/Kconfig.debug |   11 +
 12 files changed, 1237 insertions(+), 329 deletions(-)

-- 
1.7.4.1

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v7 01/11] mmc: add non-blocking mmc request function

2011-06-21 Thread Per Forlin
Previously there has only been one function mmc_wait_for_req()
to start and wait for a request. This patch adds
 * mmc_start_req() - starts a request wihtout waiting
   If there is on ongoing request wait for completion
   of that request and start the new one and return.
   Does not wait for the new command to complete.

This patch also adds new function members in struct mmc_host_ops
only called from core.c
 * pre_req - asks the host driver to prepare for the next job
 * post_req - asks the host driver to clean up after a completed job

The intention is to use pre_req() and post_req() to do cache maintenance
while a request is active. pre_req() can be called while a request is active
to minimize latency to start next job. post_req() can be used after the next
job is started to clean up the request. This will minimize the host driver
request end latency. post_req() is typically used before ending the block
request and handing over the buffer to the block layer.

Add a host-private member in mmc_data to be used by
pre_req to mark the data. The host driver will then
check this mark to see if the data is prepared or not.

Signed-off-by: Per Forlin per.for...@linaro.org
---
 drivers/mmc/core/core.c  |  110 +
 include/linux/mmc/core.h |6 ++-
 include/linux/mmc/host.h |   21 +
 3 files changed, 126 insertions(+), 11 deletions(-)

diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 68091dd..c82fa3b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -198,9 +198,106 @@ mmc_start_request(struct mmc_host *host, struct 
mmc_request *mrq)
 
 static void mmc_wait_done(struct mmc_request *mrq)
 {
-   complete(mrq-done_data);
+   complete(mrq-completion);
 }
 
+static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+   init_completion(mrq-completion);
+   mrq-done = mmc_wait_done;
+   mmc_start_request(host, mrq);
+}
+
+static void mmc_wait_for_req_done(struct mmc_host *host,
+ struct mmc_request *mrq)
+{
+   wait_for_completion(mrq-completion);
+}
+
+/**
+ * mmc_pre_req - Prepare for a new request
+ * @host: MMC host to prepare command
+ * @mrq: MMC request to prepare for
+ * @is_first_req: true if there is no previous started request
+ * that may run in parellel to this call, otherwise false
+ *
+ * mmc_pre_req() is called in prior to mmc_start_req() to let
+ * host prepare for the new request. Preparation of a request may be
+ * performed while another request is running on the host.
+ */
+static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
+bool is_first_req)
+{
+   if (host-ops-pre_req)
+   host-ops-pre_req(host, mrq, is_first_req);
+}
+
+/**
+ * mmc_post_req - Post process a completed request
+ * @host: MMC host to post process command
+ * @mrq: MMC request to post process for
+ * @err: Error, if non zero, clean up any resources made in pre_req
+ *
+ * Let the host post process a completed request. Post processing of
+ * a request may be performed while another reuqest is running.
+ */
+static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
+int err)
+{
+   if (host-ops-post_req)
+   host-ops-post_req(host, mrq, err);
+}
+
+/**
+ * mmc_start_req - start a non-blocking request
+ * @host: MMC host to start command
+ * @areq: async request to start
+ * @error: out parameter returns 0 for success, otherwise non zero
+ *
+ * Start a new MMC custom command request for a host.
+ * If there is on ongoing async request wait for completion
+ * of that request and start the new one and return.
+ * Does not wait for the new request to complete.
+ *
+ * Returns the completed async request, NULL in case of none completed.
+ */
+struct mmc_async_req *mmc_start_req(struct mmc_host *host,
+   struct mmc_async_req *areq, int *error)
+{
+   int err = 0;
+   struct mmc_async_req *data = host-areq;
+
+   /* Prepare a new request */
+   if (areq)
+   mmc_pre_req(host, areq-mrq, !host-areq);
+
+   if (host-areq) {
+   mmc_wait_for_req_done(host, host-areq-mrq);
+   err = host-areq-err_check(host-card, host-areq);
+   if (err) {
+   mmc_post_req(host, host-areq-mrq, 0);
+   if (areq)
+   mmc_post_req(host, areq-mrq, -EINVAL);
+
+   host-areq = NULL;
+   goto out;
+   }
+   }
+
+   if (areq)
+   __mmc_start_req(host, areq-mrq);
+
+   if (host-areq)
+   mmc_post_req(host, host-areq-mrq, 0);
+
+   host-areq = areq;
+ out:
+   if (error)
+   *error = err;
+   return data;
+}

[PATCH v7 03/11] mmci: implement pre_req() and post_req()

2011-06-21 Thread Per Forlin
pre_req() runs dma_map_sg() and prepares the dma descriptor
for the next mmc data transfer. post_req() runs dma_unmap_sg.
If not calling pre_req() before mmci_request(), mmci_request()
will prepare the cache and dma just like it did it before.
It is optional to use pre_req() and post_req() for mmci.

Signed-off-by: Per Forlin per.for...@linaro.org
---
 drivers/mmc/host/mmci.c |  146 ++
 drivers/mmc/host/mmci.h |8 +++
 2 files changed, 141 insertions(+), 13 deletions(-)

diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 6e27433..f8c837b 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -322,7 +322,8 @@ static void mmci_dma_unmap(struct mmci_host *host, struct 
mmc_data *data)
dir = DMA_FROM_DEVICE;
}
 
-   dma_unmap_sg(chan-device-dev, data-sg, data-sg_len, dir);
+   if (!data-host_cookie)
+   dma_unmap_sg(chan-device-dev, data-sg, data-sg_len, dir);
 
/*
 * Use of DMA with scatter-gather is impossible.
@@ -340,7 +341,8 @@ static void mmci_dma_data_error(struct mmci_host *host)
dmaengine_terminate_all(host-dma_current);
 }
 
-static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
+ struct mmci_host_next *next)
 {
struct variant_data *variant = host-variant;
struct dma_slave_config conf = {
@@ -351,13 +353,20 @@ static int mmci_dma_start_data(struct mmci_host *host, 
unsigned int datactrl)
.src_maxburst = variant-fifohalfsize  2, /* # of words */
.dst_maxburst = variant-fifohalfsize  2, /* # of words */
};
-   struct mmc_data *data = host-data;
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *desc;
int nr_sg;
 
-   host-dma_current = NULL;
+   /* Check if next job is already prepared */
+   if (data-host_cookie  !next 
+   host-dma_current  host-dma_desc_current)
+   return 0;
+
+   if (!next) {
+   host-dma_current = NULL;
+   host-dma_desc_current = NULL;
+   }
 
if (data-flags  MMC_DATA_READ) {
conf.direction = DMA_FROM_DEVICE;
@@ -372,7 +381,7 @@ static int mmci_dma_start_data(struct mmci_host *host, 
unsigned int datactrl)
return -EINVAL;
 
/* If less than or equal to the fifo size, don't bother with DMA */
-   if (host-size = variant-fifosize)
+   if (data-blksz * data-blocks = variant-fifosize)
return -EINVAL;
 
device = chan-device;
@@ -386,14 +395,38 @@ static int mmci_dma_start_data(struct mmci_host *host, 
unsigned int datactrl)
if (!desc)
goto unmap_exit;
 
-   /* Okay, go for it. */
-   host-dma_current = chan;
+   if (next) {
+   next-dma_chan = chan;
+   next-dma_desc = desc;
+   } else {
+   host-dma_current = chan;
+   host-dma_desc_current = desc;
+   }
+
+   return 0;
 
+ unmap_exit:
+   if (!next)
+   dmaengine_terminate_all(chan);
+   dma_unmap_sg(device-dev, data-sg, data-sg_len, conf.direction);
+   return -ENOMEM;
+}
+
+static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+{
+   int ret;
+   struct mmc_data *data = host-data;
+
+   ret = mmci_dma_prep_data(host, host-data, NULL);
+   if (ret)
+   return ret;
+
+   /* Okay, go for it. */
dev_vdbg(mmc_dev(host-mmc),
 Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags 
%08x\n,
 data-sg_len, data-blksz, data-blocks, data-flags);
-   dmaengine_submit(desc);
-   dma_async_issue_pending(chan);
+   dmaengine_submit(host-dma_desc_current);
+   dma_async_issue_pending(host-dma_current);
 
datactrl |= MCI_DPSM_DMAENABLE;
 
@@ -408,14 +441,90 @@ static int mmci_dma_start_data(struct mmci_host *host, 
unsigned int datactrl)
writel(readl(host-base + MMCIMASK0) | MCI_DATAENDMASK,
   host-base + MMCIMASK0);
return 0;
+}
 
-unmap_exit:
-   dmaengine_terminate_all(chan);
-   dma_unmap_sg(device-dev, data-sg, data-sg_len, conf.direction);
-   return -ENOMEM;
+static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
+{
+   struct mmci_host_next *next = host-next_data;
+
+   if (data-host_cookie  data-host_cookie != next-cookie) {
+   printk(KERN_WARNING [%s] invalid cookie: data-host_cookie %d
+   host-next_data.cookie %d\n,
+  __func__, data-host_cookie, host-next_data.cookie);
+   data-host_cookie = 0;
+   }
+
+   if (!data-host_cookie)
+   return;
+
+   host-dma_desc_current = next-dma_desc;
+   

[PATCH v7 05/11] mmc: mmc_test: add test for non-blocking transfers

2011-06-21 Thread Per Forlin
Add four tests for read and write performance per
different transfer size, 4k to 4M.
 * Read using blocking mmc request
 * Read using non-blocking mmc request
 * Write using blocking mmc request
 * Write using non-blocking mmc request

The host dirver must support pre_req() and post_req()
in order to run the non-blocking test cases.

Signed-off-by: Per Forlin per.for...@linaro.org
---
 drivers/mmc/card/mmc_test.c |  322 +--
 1 files changed, 313 insertions(+), 9 deletions(-)

diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index e8508e9..5325049 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -22,6 +22,7 @@
 #include linux/debugfs.h
 #include linux/uaccess.h
 #include linux/seq_file.h
+#include linux/random.h
 
 #define RESULT_OK  0
 #define RESULT_FAIL1
@@ -51,10 +52,12 @@ struct mmc_test_pages {
  * struct mmc_test_mem - allocated memory.
  * @arr: array of allocations
  * @cnt: number of allocations
+ * @size_min_cmn: lowest common size in array of allocations
  */
 struct mmc_test_mem {
struct mmc_test_pages *arr;
unsigned int cnt;
+   unsigned int size_min_cmn;
 };
 
 /**
@@ -148,6 +151,26 @@ struct mmc_test_card {
struct mmc_test_general_result  *gr;
 };
 
+enum mmc_test_prep_media {
+   MMC_TEST_PREP_NONE = 0,
+   MMC_TEST_PREP_WRITE_FULL = 1  0,
+   MMC_TEST_PREP_ERASE = 1  1,
+};
+
+struct mmc_test_multiple_rw {
+   unsigned int *bs;
+   unsigned int len;
+   unsigned int size;
+   bool do_write;
+   bool do_nonblock_req;
+   enum mmc_test_prep_media prepare;
+};
+
+struct mmc_test_async_req {
+   struct mmc_async_req areq;
+   struct mmc_test_card *test;
+};
+
 /***/
 /*  General helper functions   */
 /***/
@@ -302,6 +325,7 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned 
long min_sz,
unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
unsigned long page_cnt = 0;
unsigned long limit = nr_free_buffer_pages()  4;
+   unsigned int min_cmn = 0;
struct mmc_test_mem *mem;
 
if (max_page_cnt  limit)
@@ -345,6 +369,12 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned 
long min_sz,
mem-arr[mem-cnt].page = page;
mem-arr[mem-cnt].order = order;
mem-cnt += 1;
+   if (!min_cmn)
+   min_cmn = PAGE_SIZE  order;
+   else
+   min_cmn = min(min_cmn,
+ (unsigned int) (PAGE_SIZE  order));
+
if (max_page_cnt = (1UL  order))
break;
max_page_cnt -= 1UL  order;
@@ -355,6 +385,7 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned 
long min_sz,
break;
}
}
+   mem-size_min_cmn = min_cmn;
 
return mem;
 
@@ -381,7 +412,6 @@ static int mmc_test_map_sg(struct mmc_test_mem *mem, 
unsigned long sz,
do {
for (i = 0; i  mem-cnt; i++) {
unsigned long len = PAGE_SIZE  mem-arr[i].order;
-
if (len  sz)
len = sz;
if (len  max_seg_sz)
@@ -661,7 +691,7 @@ static void mmc_test_prepare_broken_mrq(struct 
mmc_test_card *test,
  * Checks that a normal transfer didn't have any errors
  */
 static int mmc_test_check_result(struct mmc_test_card *test,
-   struct mmc_request *mrq)
+struct mmc_request *mrq)
 {
int ret;
 
@@ -685,6 +715,16 @@ static int mmc_test_check_result(struct mmc_test_card 
*test,
return ret;
 }
 
+
+static int mmc_test_check_result_async(struct mmc_card *card,
+  struct mmc_async_req *areq)
+{
+   struct mmc_test_async_req *test_async =
+   container_of(areq, struct mmc_test_async_req, areq);
+
+   return mmc_test_check_result(test_async-test, areq-mrq);
+}
+
 /*
  * Checks that a short transfer behaved as expected
  */
@@ -720,6 +760,89 @@ static int mmc_test_check_broken_result(struct 
mmc_test_card *test,
 }
 
 /*
+ * Tests nonblock transfer with certain parameters
+ */
+static void mmc_test_nonblock_reset(struct mmc_request *mrq,
+   struct mmc_command *cmd,
+   struct mmc_command *stop,
+   struct mmc_data *data)
+{
+   memset(mrq, 0, sizeof(struct mmc_request));
+   memset(cmd, 0, sizeof(struct mmc_command));
+   memset(data, 0, sizeof(struct mmc_data));
+   memset(stop, 0, sizeof(struct mmc_command));
+
+   mrq-cmd = cmd;
+   mrq-data = data;
+   mrq-stop = stop;
+}

[PATCH v7 07/11] mmc: add a block request prepare function

2011-06-21 Thread Per Forlin
Break out code from mmc_blk_issue_rw_rq to create a
block request prepare function. This doesn't change
any functionallity. This helps when handling more
than one active block request.

Signed-off-by: Per Forlin per.for...@linaro.org
---
 drivers/mmc/card/block.c |  222 --
 1 files changed, 117 insertions(+), 105 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 3d11690..7a0fabd 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -661,12 +661,15 @@ static inline void mmc_apply_rel_rw(struct 
mmc_blk_request *brq,
}
 }
 
-static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+  struct mmc_card *card,
+  int disable_multi,
+  struct mmc_queue *mq)
 {
+   u32 readcmd, writecmd;
+   struct mmc_blk_request *brq = mqrq-brq;
+   struct request *req = mqrq-req;
struct mmc_blk_data *md = mq-data;
-   struct mmc_card *card = md-queue.card;
-   struct mmc_blk_request *brq = mq-mqrq_cur-brq;
-   int ret = 1, disable_multi = 0;
 
/*
 * Reliable writes are used to implement Forced Unit Access and
@@ -677,120 +680,129 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, 
struct request *req)
(rq_data_dir(req) == WRITE) 
(md-flags  MMC_BLK_REL_WR);
 
-   do {
-   struct mmc_command cmd = {0};
-   u32 readcmd, writecmd, status = 0;
-
-   memset(brq, 0, sizeof(struct mmc_blk_request));
-   brq-mrq.cmd = brq-cmd;
-   brq-mrq.data = brq-data;
-
-   brq-cmd.arg = blk_rq_pos(req);
-   if (!mmc_card_blockaddr(card))
-   brq-cmd.arg = 9;
-   brq-cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
-   brq-data.blksz = 512;
-   brq-stop.opcode = MMC_STOP_TRANSMISSION;
-   brq-stop.arg = 0;
-   brq-stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
-   brq-data.blocks = blk_rq_sectors(req);
-
-   /*
-* The block layer doesn't support all sector count
-* restrictions, so we need to be prepared for too big
-* requests.
-*/
-   if (brq-data.blocks  card-host-max_blk_count)
-   brq-data.blocks = card-host-max_blk_count;
+   memset(brq, 0, sizeof(struct mmc_blk_request));
+   brq-mrq.cmd = brq-cmd;
+   brq-mrq.data = brq-data;
 
-   /*
-* After a read error, we redo the request one sector at a time
-* in order to accurately determine which sectors can be read
-* successfully.
-*/
-   if (disable_multi  brq-data.blocks  1)
-   brq-data.blocks = 1;
+   brq-cmd.arg = blk_rq_pos(req);
+   if (!mmc_card_blockaddr(card))
+   brq-cmd.arg = 9;
+   brq-cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+   brq-data.blksz = 512;
+   brq-stop.opcode = MMC_STOP_TRANSMISSION;
+   brq-stop.arg = 0;
+   brq-stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+   brq-data.blocks = blk_rq_sectors(req);
 
-   if (brq-data.blocks  1 || do_rel_wr) {
-   /* SPI multiblock writes terminate using a special
-* token, not a STOP_TRANSMISSION request.
-*/
-   if (!mmc_host_is_spi(card-host) ||
-   rq_data_dir(req) == READ)
-   brq-mrq.stop = brq-stop;
-   readcmd = MMC_READ_MULTIPLE_BLOCK;
-   writecmd = MMC_WRITE_MULTIPLE_BLOCK;
-   } else {
-   brq-mrq.stop = NULL;
-   readcmd = MMC_READ_SINGLE_BLOCK;
-   writecmd = MMC_WRITE_BLOCK;
-   }
-   if (rq_data_dir(req) == READ) {
-   brq-cmd.opcode = readcmd;
-   brq-data.flags |= MMC_DATA_READ;
-   } else {
-   brq-cmd.opcode = writecmd;
-   brq-data.flags |= MMC_DATA_WRITE;
-   }
+   /*
+* The block layer doesn't support all sector count
+* restrictions, so we need to be prepared for too big
+* requests.
+*/
+   if (brq-data.blocks  card-host-max_blk_count)
+   brq-data.blocks = card-host-max_blk_count;
 
-   if (do_rel_wr)
-   mmc_apply_rel_rw(brq, card, req);
+   /*
+* After a read error, we redo the request one sector at a time
+* in order to accurately determine which sectors can be read
+* successfully.
+ 

[PATCH v7 09/11] mmc: add a second mmc queue request member

2011-06-21 Thread Per Forlin
Add an additional mmc queue request instance to make way for
two active block requests. One request may be active while the
other request is being prepared.

Signed-off-by: Per Forlin per.for...@linaro.org
---
 drivers/mmc/card/queue.c |   44 ++--
 drivers/mmc/card/queue.h |3 ++-
 2 files changed, 44 insertions(+), 3 deletions(-)

diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 81d0eef..0757a39 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -130,6 +130,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card 
*card, spinlock_t *lock
u64 limit = BLK_BOUNCE_HIGH;
int ret;
struct mmc_queue_req *mqrq_cur = mq-mqrq[0];
+   struct mmc_queue_req *mqrq_prev = mq-mqrq[1];
 
if (mmc_dev(host)-dma_mask  *mmc_dev(host)-dma_mask)
limit = *mmc_dev(host)-dma_mask;
@@ -140,7 +141,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card 
*card, spinlock_t *lock
return -ENOMEM;
 
memset(mq-mqrq_cur, 0, sizeof(mq-mqrq_cur));
+   memset(mq-mqrq_prev, 0, sizeof(mq-mqrq_prev));
mq-mqrq_cur = mqrq_cur;
+   mq-mqrq_prev = mqrq_prev;
mq-queue-queuedata = mq;
 
blk_queue_prep_rq(mq-queue, mmc_prep_request);
@@ -181,9 +184,17 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card 
*card, spinlock_t *lock
allocate bounce cur buffer\n,
mmc_card_name(card));
}
+   mqrq_prev-bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+   if (!mqrq_prev-bounce_buf) {
+   printk(KERN_WARNING %s: unable to 
+   allocate bounce prev buffer\n,
+   mmc_card_name(card));
+   kfree(mqrq_cur-bounce_buf);
+   mqrq_cur-bounce_buf = NULL;
+   }
}
 
-   if (mqrq_cur-bounce_buf) {
+   if (mqrq_cur-bounce_buf  mqrq_prev-bounce_buf) {
blk_queue_bounce_limit(mq-queue, BLK_BOUNCE_ANY);
blk_queue_max_hw_sectors(mq-queue, bouncesz / 512);
blk_queue_max_segments(mq-queue, bouncesz / 512);
@@ -198,11 +209,19 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card 
*card, spinlock_t *lock
if (ret)
goto cleanup_queue;
 
+   mqrq_prev-sg = mmc_alloc_sg(1, ret);
+   if (ret)
+   goto cleanup_queue;
+
+   mqrq_prev-bounce_sg =
+   mmc_alloc_sg(bouncesz / 512, ret);
+   if (ret)
+   goto cleanup_queue;
}
}
 #endif
 
-   if (!mqrq_cur-bounce_buf) {
+   if (!mqrq_cur-bounce_buf  !mqrq_prev-bounce_buf) {
blk_queue_bounce_limit(mq-queue, limit);
blk_queue_max_hw_sectors(mq-queue,
min(host-max_blk_count, host-max_req_size / 512));
@@ -213,6 +232,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card 
*card, spinlock_t *lock
if (ret)
goto cleanup_queue;
 
+
+   mqrq_prev-sg = mmc_alloc_sg(host-max_segs, ret);
+   if (ret)
+   goto cleanup_queue;
}
 
sema_init(mq-thread_sem, 1);
@@ -229,6 +252,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card 
*card, spinlock_t *lock
  free_bounce_sg:
kfree(mqrq_cur-bounce_sg);
mqrq_cur-bounce_sg = NULL;
+   kfree(mqrq_prev-bounce_sg);
+   mqrq_prev-bounce_sg = NULL;
 
  cleanup_queue:
kfree(mqrq_cur-sg);
@@ -236,6 +261,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card 
*card, spinlock_t *lock
kfree(mqrq_cur-bounce_buf);
mqrq_cur-bounce_buf = NULL;
 
+   kfree(mqrq_prev-sg);
+   mqrq_prev-sg = NULL;
+   kfree(mqrq_prev-bounce_buf);
+   mqrq_prev-bounce_buf = NULL;
+
blk_cleanup_queue(mq-queue);
return ret;
 }
@@ -245,6 +275,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
struct request_queue *q = mq-queue;
unsigned long flags;
struct mmc_queue_req *mqrq_cur = mq-mqrq_cur;
+   struct mmc_queue_req *mqrq_prev = mq-mqrq_prev;
 
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume(mq);
@@ -267,6 +298,15 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
kfree(mqrq_cur-bounce_buf);
mqrq_cur-bounce_buf = NULL;
 
+   kfree(mqrq_prev-bounce_sg);
+   mqrq_prev-bounce_sg = NULL;
+
+   kfree(mqrq_prev-sg);
+   mqrq_prev-sg = NULL;
+
+   kfree(mqrq_prev-bounce_buf);
+   mqrq_prev-bounce_buf = NULL;
+
 

[PATCH v7 08/11] mmc: move error code in mmc_block_issue_rw_rq to a separate function.

2011-06-21 Thread Per Forlin
Break out code without functional changes. This simplifies the code and
makes way for handle two parallel request.

Signed-off-by: Per Forlin per.for...@linaro.org
---
 drivers/mmc/card/block.c |  244 +++---
 1 files changed, 142 insertions(+), 102 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 7a0fabd..6a84a75 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -106,6 +106,13 @@ struct mmc_blk_data {
 
 static DEFINE_MUTEX(open_lock);
 
+enum mmc_blk_status {
+   MMC_BLK_SUCCESS = 0,
+   MMC_BLK_RETRY,
+   MMC_BLK_DATA_ERR,
+   MMC_BLK_CMD_ERR,
+};
+
 module_param(perdev_minors, int, 0444);
 MODULE_PARM_DESC(perdev_minors, Minors numbers to allocate per device);
 
@@ -661,6 +668,112 @@ static inline void mmc_apply_rel_rw(struct 
mmc_blk_request *brq,
}
 }
 
+static enum mmc_blk_status mmc_blk_err_check(struct mmc_blk_request *brq,
+struct request *req,
+struct mmc_card *card,
+struct mmc_blk_data *md)
+{
+   struct mmc_command cmd;
+   u32 status = 0;
+   enum mmc_blk_status ret = MMC_BLK_SUCCESS;
+
+   /*
+* Check for errors here, but don't jump to cmd_err
+* until later as we need to wait for the card to leave
+* programming mode even when things go wrong.
+*/
+   if (brq-sbc.error || brq-cmd.error ||
+   brq-data.error || brq-stop.error) {
+   if (brq-data.blocks  1  rq_data_dir(req) == READ) {
+   /* Redo read one sector at a time */
+   printk(KERN_WARNING %s: retrying using single 
+  block read\n, req-rq_disk-disk_name);
+   ret = MMC_BLK_RETRY;
+   goto out;
+   }
+   status = get_card_status(card, req);
+   }
+
+   if (brq-sbc.error) {
+   printk(KERN_ERR %s: error %d sending SET_BLOCK_COUNT 
+  command, response %#x, card status %#x\n,
+  req-rq_disk-disk_name, brq-sbc.error,
+  brq-sbc.resp[0], status);
+   }
+
+   if (brq-cmd.error) {
+   printk(KERN_ERR %s: error %d sending read/write 
+  command, response %#x, card status %#x\n,
+  req-rq_disk-disk_name, brq-cmd.error,
+  brq-cmd.resp[0], status);
+   }
+
+   if (brq-data.error) {
+   if (brq-data.error == -ETIMEDOUT  brq-mrq.stop)
+   /* 'Stop' response contains card status */
+   status = brq-mrq.stop-resp[0];
+   printk(KERN_ERR %s: error %d transferring data,
+   sector %u, nr %u, card status %#x\n,
+  req-rq_disk-disk_name, brq-data.error,
+  (unsigned)blk_rq_pos(req),
+  (unsigned)blk_rq_sectors(req), status);
+   }
+
+   if (brq-stop.error) {
+   printk(KERN_ERR %s: error %d sending stop command, 
+  response %#x, card status %#x\n,
+  req-rq_disk-disk_name, brq-stop.error,
+  brq-stop.resp[0], status);
+   }
+
+   if (!mmc_host_is_spi(card-host)  rq_data_dir(req) != READ) {
+   do {
+   int err;
+
+   cmd.opcode = MMC_SEND_STATUS;
+   cmd.arg = card-rca  16;
+   cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+   err = mmc_wait_for_cmd(card-host, cmd, 5);
+   if (err) {
+   printk(KERN_ERR %s: error %d requesting 
status\n,
+  req-rq_disk-disk_name, err);
+   ret = MMC_BLK_CMD_ERR;
+   goto out;
+   }
+   /*
+* Some cards mishandle the status bits,
+* so make sure to check both the busy
+* indication and the card state.
+*/
+   } while (!(cmd.resp[0]  R1_READY_FOR_DATA) ||
+(R1_CURRENT_STATE(cmd.resp[0]) == 7));
+
+#if 0
+   if (cmd.resp[0]  ~0x0900)
+   printk(KERN_ERR %s: status = %08x\n,
+  req-rq_disk-disk_name, cmd.resp[0]);
+   if (mmc_decode_status(cmd.resp)) {
+   ret = MMC_BLK_CMD_ERR;
+   goto out;
+   }
+#endif
+   }
+
+   if (brq-cmd.error || brq-stop.error || brq-data.error) {
+   if (rq_data_dir(req) == READ)
+   /*
+* After an error, we redo I/O one sector at a
+  

[PATCH v7 11/11] mmc: add handling for two parallel block requests in issue_rw_rq

2011-06-21 Thread Per Forlin
Change mmc_blk_issue_rw_rq() to become asynchronous.
The execution flow looks like this:
The mmc-queue calls issue_rw_rq(), which sends the request
to the host and returns back to the mmc-queue. The mmc-queue calls
issue_rw_rq() again with a new request. This new request is prepared,
in isuue_rw_rq(), then it waits for the active request to complete before
pushing it to the host. When to mmc-queue is empty it will call
isuue_rw_rq() with req=NULL to finish off the active request
without starting a new request.

Signed-off-by: Per Forlin per.for...@linaro.org
---
 drivers/mmc/card/block.c |  124 +
 drivers/mmc/card/queue.c |   17 +--
 drivers/mmc/card/queue.h |1 +
 3 files changed, 104 insertions(+), 38 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 6a84a75..22acdd7 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -108,6 +108,7 @@ static DEFINE_MUTEX(open_lock);
 
 enum mmc_blk_status {
MMC_BLK_SUCCESS = 0,
+   MMC_BLK_PARTIAL,
MMC_BLK_RETRY,
MMC_BLK_DATA_ERR,
MMC_BLK_CMD_ERR,
@@ -668,14 +669,16 @@ static inline void mmc_apply_rel_rw(struct 
mmc_blk_request *brq,
}
 }
 
-static enum mmc_blk_status mmc_blk_err_check(struct mmc_blk_request *brq,
-struct request *req,
-struct mmc_card *card,
-struct mmc_blk_data *md)
+static int mmc_blk_err_check(struct mmc_card *card,
+struct mmc_async_req *areq)
 {
struct mmc_command cmd;
u32 status = 0;
enum mmc_blk_status ret = MMC_BLK_SUCCESS;
+   struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
+   mmc_active);
+   struct mmc_blk_request *brq = mq_mrq-brq;
+   struct request *req = mq_mrq-req;
 
/*
 * Check for errors here, but don't jump to cmd_err
@@ -770,7 +773,11 @@ static enum mmc_blk_status mmc_blk_err_check(struct 
mmc_blk_request *brq,
else
ret = MMC_BLK_DATA_ERR;
}
-out:
+
+   if (ret == MMC_BLK_SUCCESS 
+   blk_rq_bytes(req) != brq-data.bytes_xfered)
+   ret = MMC_BLK_PARTIAL;
+ out:
return ret;
 }
 
@@ -901,27 +908,59 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
brq-data.sg_len = i;
}
 
+   mqrq-mmc_active.mrq = brq-mrq;
+   mqrq-mmc_active.err_check = mmc_blk_err_check;
+
mmc_queue_bounce_pre(mqrq);
 }
 
-static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
 {
struct mmc_blk_data *md = mq-data;
struct mmc_card *card = md-queue.card;
-   struct mmc_blk_request *brq = mq-mqrq_cur-brq;
-   int ret = 1, disable_multi = 0;
+   struct mmc_blk_request *brq;
+   int ret = 1;
+   int disable_multi = 0;
enum mmc_blk_status status;
+   struct mmc_queue_req *mq_rq;
+   struct request *req;
+   struct mmc_async_req *areq;
+
+   if (!rqc  !mq-mqrq_prev-req)
+   goto out;
 
do {
-   mmc_blk_rw_rq_prep(mq-mqrq_cur, card, disable_multi, mq);
-   mmc_wait_for_req(card-host, brq-mrq);
+   if (rqc) {
+   mmc_blk_rw_rq_prep(mq-mqrq_cur, card, 0, mq);
+   areq = mq-mqrq_cur-mmc_active;
+   } else
+   areq = NULL;
+   areq = mmc_start_req(card-host, areq, (int *) status);
+   if (!areq)
+   goto out;
 
-   mmc_queue_bounce_post(mq-mqrq_cur);
-   status = mmc_blk_err_check(brq, req, card, md);
+   mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
+   brq = mq_rq-brq;
+   req = mq_rq-req;
+   mmc_queue_bounce_post(mq_rq);
 
switch (status) {
-   case MMC_BLK_CMD_ERR:
-   goto cmd_err;
+   case MMC_BLK_SUCCESS:
+   case MMC_BLK_PARTIAL:
+   /*
+* A block was successfully transferred.
+*/
+   spin_lock_irq(md-lock);
+   ret = __blk_end_request(req, 0,
+   brq-data.bytes_xfered);
+   spin_unlock_irq(md-lock);
+   if (status == MMC_BLK_SUCCESS  ret) {
+   /* If this happen it is a bug */
+   printk(KERN_ERR %s BUG rq_tot %d d_xfer %d\n,
+  __func__, blk_rq_bytes(req),
+  brq-data.bytes_xfered);
+   goto 

[PATCH v7 10/11] mmc: test: add random fault injection in core.c

2011-06-21 Thread Per Forlin
This simple fault injection proved to be very useful to
test the error handling in the block.c rw_rq(). It may
still be useful to test if the host driver handle
pre_req() and post_req() correctly in case of errors.

Signed-off-by: Per Forlin per.for...@linaro.org
---
 drivers/mmc/core/core.c|   54 
 drivers/mmc/core/debugfs.c |5 
 include/linux/mmc/host.h   |3 ++
 lib/Kconfig.debug  |   11 +
 4 files changed, 73 insertions(+), 0 deletions(-)

diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index c82fa3b..481c557 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -23,6 +23,8 @@
 #include linux/log2.h
 #include linux/regulator/consumer.h
 #include linux/pm_runtime.h
+#include linux/fault-inject.h
+#include linux/random.h
 
 #include linux/mmc/card.h
 #include linux/mmc/host.h
@@ -82,6 +84,56 @@ static void mmc_flush_scheduled_work(void)
flush_workqueue(workqueue);
 }
 
+#ifdef CONFIG_FAIL_MMC_REQUEST
+
+static DECLARE_FAULT_ATTR(fail_mmc_request);
+
+static int __init setup_fail_mmc_request(char *str)
+{
+   return setup_fault_attr(fail_mmc_request, str);
+}
+__setup(fail_mmc_request=, setup_fail_mmc_request);
+
+static void mmc_should_fail_request(struct mmc_host *host,
+   struct mmc_request *mrq)
+{
+   struct mmc_command *cmd = mrq-cmd;
+   struct mmc_data *data = mrq-data;
+   static const int data_errors[] = {
+   -ETIMEDOUT,
+   -EILSEQ,
+   -EIO,
+   };
+
+   if (!data)
+   return;
+
+   if (cmd-error || data-error || !host-make_it_fail ||
+   !should_fail(fail_mmc_request, data-blksz * data-blocks))
+   return;
+
+   data-error = data_errors[random32() % ARRAY_SIZE(data_errors)];
+   data-bytes_xfered = (random32() % (data-bytes_xfered  9))  9;
+}
+
+static int __init fail_mmc_request_debugfs(void)
+{
+   return init_fault_attr_dentries(fail_mmc_request,
+   fail_mmc_request);
+}
+
+late_initcall(fail_mmc_request_debugfs);
+
+#else /* CONFIG_FAIL_MMC_REQUEST */
+
+static void mmc_should_fail_request(struct mmc_host *host,
+   struct mmc_request *mrq)
+{
+}
+
+#endif /* CONFIG_FAIL_MMC_REQUEST */
+
+
 /**
  * mmc_request_done - finish processing an MMC request
  * @host: MMC host which completed request
@@ -108,6 +160,8 @@ void mmc_request_done(struct mmc_host *host, struct 
mmc_request *mrq)
cmd-error = 0;
host-ops-request(host, mrq);
} else {
+   mmc_should_fail_request(host, mrq);
+
led_trigger_event(host-led, LED_OFF);
 
pr_debug(%s: req done (CMD%u): %d: %08x %08x %08x %08x\n,
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 998797e..588e76f 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -188,6 +188,11 @@ void mmc_add_host_debugfs(struct mmc_host *host)
root, host-clk_delay))
goto err_node;
 #endif
+#ifdef CONFIG_FAIL_MMC_REQUEST
+   if (!debugfs_create_u8(make-it-fail, S_IRUSR | S_IWUSR,
+  root, host-make_it_fail))
+   goto err_node;
+#endif
return;
 
 err_node:
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 8ae44d8..981cbe8 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -302,6 +302,9 @@ struct mmc_host {
 
struct mmc_async_req*areq;  /* active async req */
 
+#ifdef CONFIG_FAIL_MMC_REQUEST
+   u8  make_it_fail;
+#endif
unsigned long   private[0] cacheline_aligned;
 };
 
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c768bcd..330fc70 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1057,6 +1057,17 @@ config FAIL_IO_TIMEOUT
  Only works with drivers that use the generic timeout handling,
  for others it wont do anything.
 
+config FAIL_MMC_REQUEST
+   bool Fault-injection capability for MMC IO
+   select DEBUG_FS
+   depends on FAULT_INJECTION
+   help
+ Provide fault-injection capability for MMC IO.
+ This will make the mmc core return data errors. This is
+ useful for testing the error handling in the mmc block device
+ and how the mmc host driver handle retries from
+ the block device.
+
 config FAULT_INJECTION_DEBUG_FS
bool Debugfs entries for fault-injection capabilities
depends on FAULT_INJECTION  SYSFS  DEBUG_FS
-- 
1.7.4.1

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v7 06/11] mmc: add member in mmc queue struct to hold request data

2011-06-21 Thread Per Forlin
The way the request data is organized in the mmc queue struct
it only allows processing of one request at the time.
This patch adds a new struct to hold mmc queue request data such as
sg list, request, blk request and bounce buffers, and updates any functions
depending on the mmc queue struct. This lies the ground for
using multiple active request for one mmc queue.

Signed-off-by: Per Forlin per.for...@linaro.org
---
 drivers/mmc/card/block.c |  125 +---
 drivers/mmc/card/queue.c |  129 --
 drivers/mmc/card/queue.h |   31 ---
 3 files changed, 149 insertions(+), 136 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 71da564..3d11690 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -427,14 +427,6 @@ static const struct block_device_operations mmc_bdops = {
 #endif
 };
 
-struct mmc_blk_request {
-   struct mmc_request  mrq;
-   struct mmc_command  sbc;
-   struct mmc_command  cmd;
-   struct mmc_command  stop;
-   struct mmc_data data;
-};
-
 static inline int mmc_blk_part_switch(struct mmc_card *card,
  struct mmc_blk_data *md)
 {
@@ -673,7 +665,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct 
request *req)
 {
struct mmc_blk_data *md = mq-data;
struct mmc_card *card = md-queue.card;
-   struct mmc_blk_request brq;
+   struct mmc_blk_request *brq = mq-mqrq_cur-brq;
int ret = 1, disable_multi = 0;
 
/*
@@ -689,56 +681,56 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, 
struct request *req)
struct mmc_command cmd = {0};
u32 readcmd, writecmd, status = 0;
 
-   memset(brq, 0, sizeof(struct mmc_blk_request));
-   brq.mrq.cmd = brq.cmd;
-   brq.mrq.data = brq.data;
+   memset(brq, 0, sizeof(struct mmc_blk_request));
+   brq-mrq.cmd = brq-cmd;
+   brq-mrq.data = brq-data;
 
-   brq.cmd.arg = blk_rq_pos(req);
+   brq-cmd.arg = blk_rq_pos(req);
if (!mmc_card_blockaddr(card))
-   brq.cmd.arg = 9;
-   brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
-   brq.data.blksz = 512;
-   brq.stop.opcode = MMC_STOP_TRANSMISSION;
-   brq.stop.arg = 0;
-   brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
-   brq.data.blocks = blk_rq_sectors(req);
+   brq-cmd.arg = 9;
+   brq-cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+   brq-data.blksz = 512;
+   brq-stop.opcode = MMC_STOP_TRANSMISSION;
+   brq-stop.arg = 0;
+   brq-stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+   brq-data.blocks = blk_rq_sectors(req);
 
/*
 * The block layer doesn't support all sector count
 * restrictions, so we need to be prepared for too big
 * requests.
 */
-   if (brq.data.blocks  card-host-max_blk_count)
-   brq.data.blocks = card-host-max_blk_count;
+   if (brq-data.blocks  card-host-max_blk_count)
+   brq-data.blocks = card-host-max_blk_count;
 
/*
 * After a read error, we redo the request one sector at a time
 * in order to accurately determine which sectors can be read
 * successfully.
 */
-   if (disable_multi  brq.data.blocks  1)
-   brq.data.blocks = 1;
+   if (disable_multi  brq-data.blocks  1)
+   brq-data.blocks = 1;
 
-   if (brq.data.blocks  1 || do_rel_wr) {
+   if (brq-data.blocks  1 || do_rel_wr) {
/* SPI multiblock writes terminate using a special
 * token, not a STOP_TRANSMISSION request.
 */
if (!mmc_host_is_spi(card-host) ||
rq_data_dir(req) == READ)
-   brq.mrq.stop = brq.stop;
+   brq-mrq.stop = brq-stop;
readcmd = MMC_READ_MULTIPLE_BLOCK;
writecmd = MMC_WRITE_MULTIPLE_BLOCK;
} else {
-   brq.mrq.stop = NULL;
+   brq-mrq.stop = NULL;
readcmd = MMC_READ_SINGLE_BLOCK;
writecmd = MMC_WRITE_BLOCK;
}
if (rq_data_dir(req) == READ) {
-   brq.cmd.opcode = readcmd;
-   brq.data.flags |= MMC_DATA_READ;
+   brq-cmd.opcode = readcmd;
+   

[PATCH v7 02/11] omap_hsmmc: add support for pre_req and post_req

2011-06-21 Thread Per Forlin
pre_req() runs dma_map_sg(), post_req() runs dma_unmap_sg.
If not calling pre_req() before omap_hsmmc_request()
dma_map_sg will be issued before starting the transfer.
It is optional to use pre_req(). If issuing pre_req()
post_req() must be to be called as well.

Signed-off-by: Per Forlin per.for...@linaro.org
---
 drivers/mmc/host/omap_hsmmc.c |   87 +++--
 1 files changed, 83 insertions(+), 4 deletions(-)

diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index dedf3da..b0c6910 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -141,6 +141,11 @@
 #define OMAP_HSMMC_WRITE(base, reg, val) \
__raw_writel((val), (base) + OMAP_HSMMC_##reg)
 
+struct omap_hsmmc_next {
+   unsigned intdma_len;
+   s32 cookie;
+};
+
 struct omap_hsmmc_host {
struct  device  *dev;
struct  mmc_host*mmc;
@@ -184,6 +189,7 @@ struct omap_hsmmc_host {
int reqs_blocked;
int use_reg;
int req_in_progress;
+   struct omap_hsmmc_next  next_data;
 
struct  omap_mmc_platform_data  *pdata;
 };
@@ -1346,8 +1352,9 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, 
void *cb_data)
return;
}
 
-   dma_unmap_sg(mmc_dev(host-mmc), data-sg, data-sg_len,
-   omap_hsmmc_get_dma_dir(host, data));
+   if (!data-host_cookie)
+   dma_unmap_sg(mmc_dev(host-mmc), data-sg, data-sg_len,
+omap_hsmmc_get_dma_dir(host, data));
 
req_in_progress = host-req_in_progress;
dma_ch = host-dma_ch;
@@ -1365,6 +1372,45 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, 
void *cb_data)
}
 }
 
+static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
+  struct mmc_data *data,
+  struct omap_hsmmc_next *next)
+{
+   int dma_len;
+
+   if (!next  data-host_cookie 
+   data-host_cookie != host-next_data.cookie) {
+   printk(KERN_WARNING [%s] invalid cookie: data-host_cookie %d
+   host-next_data.cookie %d\n,
+  __func__, data-host_cookie, host-next_data.cookie);
+   data-host_cookie = 0;
+   }
+
+   /* Check if next job is already prepared */
+   if (next ||
+   (!next  data-host_cookie != host-next_data.cookie)) {
+   dma_len = dma_map_sg(mmc_dev(host-mmc), data-sg,
+data-sg_len,
+omap_hsmmc_get_dma_dir(host, data));
+
+   } else {
+   dma_len = host-next_data.dma_len;
+   host-next_data.dma_len = 0;
+   }
+
+
+   if (dma_len == 0)
+   return -EINVAL;
+
+   if (next) {
+   next-dma_len = dma_len;
+   data-host_cookie = ++next-cookie  0 ? 1 : next-cookie;
+   } else
+   host-dma_len = dma_len;
+
+   return 0;
+}
+
 /*
  * Routine to configure and start DMA for the MMC card
  */
@@ -1398,9 +1444,10 @@ static int omap_hsmmc_start_dma_transfer(struct 
omap_hsmmc_host *host,
mmc_hostname(host-mmc), ret);
return ret;
}
+   ret = omap_hsmmc_pre_dma_transfer(host, data, NULL);
+   if (ret)
+   return ret;
 
-   host-dma_len = dma_map_sg(mmc_dev(host-mmc), data-sg,
-   data-sg_len, omap_hsmmc_get_dma_dir(host, data));
host-dma_ch = dma_ch;
host-dma_sg_idx = 0;
 
@@ -1480,6 +1527,35 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, 
struct mmc_request *req)
return 0;
 }
 
+static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
+   int err)
+{
+   struct omap_hsmmc_host *host = mmc_priv(mmc);
+   struct mmc_data *data = mrq-data;
+
+   if (host-use_dma) {
+   dma_unmap_sg(mmc_dev(host-mmc), data-sg, data-sg_len,
+omap_hsmmc_get_dma_dir(host, data));
+   data-host_cookie = 0;
+   }
+}
+
+static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
+  bool is_first_req)
+{
+   struct omap_hsmmc_host *host = mmc_priv(mmc);
+
+   if (mrq-data-host_cookie) {
+   mrq-data-host_cookie = 0;
+   return ;
+   }
+
+   if (host-use_dma)
+   if (omap_hsmmc_pre_dma_transfer(host, mrq-data,
+   host-next_data))
+   mrq-data-host_cookie = 0;
+}
+
 /*
  * Request function. for read/write operation
  */
@@ -1928,6 +2004,8 @@ static int omap_hsmmc_disable_fclk(struct mmc_host *mmc, 
int lazy)
 static const struct mmc_host_ops omap_hsmmc_ops = {
.enable = omap_hsmmc_enable_fclk,
 

[PATCH v7 04/11] mmc: mmc_test: add debugfs file to list all tests

2011-06-21 Thread Per Forlin
Add a debugfs file testlist to print all available tests

Signed-off-by: Per Forlin per.for...@linaro.org
---
 drivers/mmc/card/mmc_test.c |   39 ++-
 1 files changed, 38 insertions(+), 1 deletions(-)

diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 233cdfa..e8508e9 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2445,6 +2445,32 @@ static const struct file_operations mmc_test_fops_test = 
{
.release= single_release,
 };
 
+static int mtf_testlist_show(struct seq_file *sf, void *data)
+{
+   int i;
+
+   mutex_lock(mmc_test_lock);
+
+   for (i = 0; i  ARRAY_SIZE(mmc_test_cases); i++)
+   seq_printf(sf, %d:\t%s\n, i+1, mmc_test_cases[i].name);
+
+   mutex_unlock(mmc_test_lock);
+
+   return 0;
+}
+
+static int mtf_testlist_open(struct inode *inode, struct file *file)
+{
+   return single_open(file, mtf_testlist_show, inode-i_private);
+}
+
+static const struct file_operations mmc_test_fops_testlist = {
+   .open   = mtf_testlist_open,
+   .read   = seq_read,
+   .llseek = seq_lseek,
+   .release= single_release,
+};
+
 static void mmc_test_free_file_test(struct mmc_card *card)
 {
struct mmc_test_dbgfs_file *df, *dfs;
@@ -2476,7 +2502,18 @@ static int mmc_test_register_file_test(struct mmc_card 
*card)
 
if (IS_ERR_OR_NULL(file)) {
dev_err(card-dev,
-   Can't create file. Perhaps debugfs is disabled.\n);
+   Can't create test. Perhaps debugfs is disabled.\n);
+   ret = -ENODEV;
+   goto err;
+   }
+
+   if (card-debugfs_root)
+   file = debugfs_create_file(testlist, S_IRUGO,
+   card-debugfs_root, card, mmc_test_fops_testlist);
+
+   if (IS_ERR_OR_NULL(file)) {
+   dev_err(card-dev,
+   Can't create testlist. Perhaps debugfs is 
disabled.\n);
ret = -ENODEV;
goto err;
}
-- 
1.7.4.1

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 4/4] PCI: make cardbus-bridge resources nice-to-have

2011-06-21 Thread Ram Pai
On Wed, Jun 22, 2011 at 12:13:01AM +0200, Dominik Brodowski wrote:
 Hey,
 
 On Tue, Jun 21, 2011 at 02:36:22PM -0700, Jesse Barnes wrote:
  On Tue, 21 Jun 2011 09:23:21 -0700
  Ram Pai linux...@us.ibm.com wrote:
  
   On Tue, Jun 21, 2011 at 09:57:00AM +0200, Dominik Brodowski wrote:
On Mon, Jun 20, 2011 at 03:47:17PM -0700, Ram Pai wrote:
 Allocate resources to cardbus bridge only after all other genuine
 resources requests are satisfied. Dont retry if resource allocation
 for cardbus-bridge fails.

Well, for those who use cardbus cards, cardbus resources aren't nice to
have, they are absolutely required. Of course, not all cardbus cards 
need
as many resources as are currently assigned, so I wouldn't oppose a 
patch
which marks _some_ of the currently assigned resources as nice to 
have.
But this approach -- 0 required, all nice to have -- seems wrong to 
me.
   
   Do you know how much minimal resource is good enough?  The value, before
   this patch, was 256 for IO ports and  64M for memory.
   
   BTW: If the BIOS has already assigned enough resources for all the 
   devices on
   the system, no devices will be starved including the cardbus. The OS 
   intervenes
   and is forced to make this hard choice only when it sees unassigned 
   resources to
   some devices along with resource contention.
  
  Dominik, presumably you have a few good cardbus test machines; can you
  give Ram's patches a try?  If we know they break existing
  configurations, I'm afraid we'll just have to revert the whole
  re-allocation patch yet again.  If your stuff survives, I'll ping Linus
  to see what he thinks, though he'll probably want to revert in any
  case...
 
 Actually, I only have one cardbus-capable test machine, which does work in
 very most cases, and also I do care much more about the PCMCIA side of
 things than the PCI/CardBus side... Therefore, all I could do is some more
 or less informed guessing about how much minimal resource we should try to
 allocate...

I assume majority of the platforms will have enough resources to satisfy all
the resource requests, and their BIOS would have done a decent job.

Even if the BIOS has not done a decent job, and there are enough resources
available we should not see a regression.

The only platforms that would expose a regression is when resources are under
contention and the BIOS has assigned enough resource to the cardbus bridge but
not to some other device. It will be hard to find such a platform, but I am
sure there is one out somewhere there.

I am sure we will see; some day, reports of regression because that platform
would have the exact right characteristics to expose the issue. But then that
platform is a highly constrained platform in the first place. Its debatable if
that should be characterised as a regression, or a platform that was riding on
good luck till now.

Even Oliver's platform is a highly constrained platform, and we probably can
treat his platform as 'riding on good luck till now'.

We won't be able to satisfy all the platforms with resource constraints.  I
think our probable choice is to select a solution that breaks least number of
platforms and special case those broken platforms through kernel command line
parameters.

RP
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v4 1/4] mmc: sdhci: fix interrupt storm from card detection

2011-06-21 Thread Shawn Guo
On Tue, Jun 21, 2011 at 09:05:25AM -0700, Philip Rakity wrote:
 
 On Jun 21, 2011, at 7:41 AM, Shawn Guo wrote:
 
  The issue was initially found by Eric Benard as below.
  
  http://permalink.gmane.org/gmane.linux.ports.arm.kernel/108031
  
  Not sure about other SDHCI based controller, but on Freescale eSDHC,
  the SDHCI_INT_CARD_INSERT bits will be immediately set again when it
  gets cleared, if a card is inserted. The driver need to mask the irq
  to prevent interrupt storm which will freeze the system.  And the
  SDHCI_INT_CARD_REMOVE gets the same situation.
  
  The patch fixes the problem based on the initial idea from
  Eric Benard.
  
  Signed-off-by: Shawn Guo shawn@linaro.org
  Cc: Eric Benard e...@eukrea.com
  Tested-by: Arnaud Patard arnaud.pat...@rtp-net.org
  ---
  drivers/mmc/host/sdhci.c |   29 +
  1 files changed, 25 insertions(+), 4 deletions(-)
  
  diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
  index 91d9892..790f959 100644
  --- a/drivers/mmc/host/sdhci.c
  +++ b/drivers/mmc/host/sdhci.c
  @@ -127,11 +127,15 @@ static void sdhci_mask_irqs(struct sdhci_host *host, 
  u32 irqs)
  
  static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
  {
  -   u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
  +   u32 present, irqs;
  
  if (host-quirks  SDHCI_QUIRK_BROKEN_CARD_DETECTION)
  return;
  
  +   present = sdhci_readl(host, SDHCI_PRESENT_STATE) 
  + SDHCI_CARD_PRESENT;
  +   irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
  +
  if (enable)
  sdhci_unmask_irqs(host, irqs);
  else
  @@ -2154,13 +2158,30 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
  mmc_hostname(host-mmc), intmask);
  
  if (intmask  (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
  +   u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) 
  + SDHCI_CARD_PRESENT;
  +
  +   /*
  +* There is a observation on i.mx esdhc.  INSERT bit will be
  +* immediately set again when it gets cleared, if a card is
  +* inserted.  We have to mask the irq to prevent interrupt
  +* storm which will freeze the system.  And the REMOVE gets
  +* the same situation.
  +*
  +* More testing are needed here to ensure it works for other
  +* platforms though.
  +*/
  +   sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
  +   SDHCI_INT_CARD_REMOVE);
  +   sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
  + SDHCI_INT_CARD_INSERT);
  +
 
 
  sdhci_writel(host, intmask  (SDHCI_INT_CARD_INSERT |
  -   SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
  +SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
 
 
 Please keep the old formatting since code does not change anything.  Makes it 
 easier to find
 the real change
 

I actually rewrote the body of the if-statement, and fixed one
indentation in that specific line.

  +   intmask = ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
  tasklet_schedule(host-card_tasklet);
  }
  
  -   intmask = ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
  -
  if (intmask  SDHCI_INT_CMD_MASK) {
  sdhci_writel(host, intmask  SDHCI_INT_CMD_MASK,
  SDHCI_INT_STATUS);
  -- 
  1.7.4.1
  
 
 
 Reviewed-by: Philip Rakity prak...@marvell.com
 

-- 
Regards,
Shawn

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v4 1/4] mmc: sdhci: fix interrupt storm from card detection

2011-06-21 Thread Philip Rakity

On Jun 21, 2011, at 9:40 PM, Shawn Guo wrote:

 On Tue, Jun 21, 2011 at 09:05:25AM -0700, Philip Rakity wrote:
 
 On Jun 21, 2011, at 7:41 AM, Shawn Guo wrote:
 
 The issue was initially found by Eric Benard as below.
 
 http://permalink.gmane.org/gmane.linux.ports.arm.kernel/108031
 
 Not sure about other SDHCI based controller, but on Freescale eSDHC,
 the SDHCI_INT_CARD_INSERT bits will be immediately set again when it
 gets cleared, if a card is inserted. The driver need to mask the irq
 to prevent interrupt storm which will freeze the system.  And the
 SDHCI_INT_CARD_REMOVE gets the same situation.
 
 The patch fixes the problem based on the initial idea from
 Eric Benard.
 
 Signed-off-by: Shawn Guo shawn@linaro.org
 Cc: Eric Benard e...@eukrea.com
 Tested-by: Arnaud Patard arnaud.pat...@rtp-net.org
 ---
 drivers/mmc/host/sdhci.c |   29 +
 1 files changed, 25 insertions(+), 4 deletions(-)
 
 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
 index 91d9892..790f959 100644
 --- a/drivers/mmc/host/sdhci.c
 +++ b/drivers/mmc/host/sdhci.c
 @@ -127,11 +127,15 @@ static void sdhci_mask_irqs(struct sdhci_host *host, 
 u32 irqs)
 
 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
 {
 -   u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
 +   u32 present, irqs;
 
 if (host-quirks  SDHCI_QUIRK_BROKEN_CARD_DETECTION)
 return;
 
 +   present = sdhci_readl(host, SDHCI_PRESENT_STATE) 
 + SDHCI_CARD_PRESENT;
 +   irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
 +
 if (enable)
 sdhci_unmask_irqs(host, irqs);
 else
 @@ -2154,13 +2158,30 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
 mmc_hostname(host-mmc), intmask);
 
 if (intmask  (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
 +   u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) 
 + SDHCI_CARD_PRESENT;
 +
 +   /*
 +* There is a observation on i.mx esdhc.  INSERT bit will be
 +* immediately set again when it gets cleared, if a card is
 +* inserted.  We have to mask the irq to prevent interrupt
 +* storm which will freeze the system.  And the REMOVE gets
 +* the same situation.
 +*
 +* More testing are needed here to ensure it works for other
 +* platforms though.
 +*/
 +   sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
 +   SDHCI_INT_CARD_REMOVE);
 +   sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
 + SDHCI_INT_CARD_INSERT);
 +
 
 
 sdhci_writel(host, intmask  (SDHCI_INT_CARD_INSERT |
 -   SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
 +SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
 
 
 Please keep the old formatting since code does not change anything.  Makes 
 it easier to find
 the real change
 
 
 I actually rewrote the body of the if-statement, and fixed one
 indentation in that specific line.

understand -- I'm happy with the results !  
Philip

 
 +   intmask = ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
 tasklet_schedule(host-card_tasklet);
 }
 
 -   intmask = ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
 -
 if (intmask  SDHCI_INT_CMD_MASK) {
 sdhci_writel(host, intmask  SDHCI_INT_CMD_MASK,
 SDHCI_INT_STATUS);
 -- 
 1.7.4.1
 
 
 
 Reviewed-by: Philip Rakity prak...@marvell.com
 
 
 -- 
 Regards,
 Shawn
 

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html