[PATCH v4] mmc: card: Adding support for sanitize in eMMC 4.5

2013-04-30 Thread Maya Erez
The sanitize support is added as a user-app ioctl call, and
was removed from the block-device request, since its purpose is
to be invoked not via File-System but by a user.
This feature deletes the unmap memory region of the eMMC card,
by writing to a specific register in the EXT_CSD.
unmap region is the memory region that was previously deleted
(by erase, trim or discard operation).
In order to avoid timeout when sanitizing large-scale cards,
the timeout for sanitize operation is 240 seconds.

Signed-off-by: Yaniv Gardi yga...@codeaurora.org
Signed-off-by: Maya Erez me...@codeaurora.org
---
 drivers/mmc/card/block.c   |   67 ++--
 drivers/mmc/card/queue.c   |2 +-
 drivers/mmc/core/core.c|   22 ++
 drivers/mmc/core/mmc_ops.c |2 +
 include/linux/mmc/core.h   |2 +
 5 files changed, 73 insertions(+), 22 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index e12a03c..d3fbc21 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -58,6 +58,8 @@ MODULE_ALIAS(mmc:block);
 #define INAND_CMD38_ARG_SECTRIM1 0x81
 #define INAND_CMD38_ARG_SECTRIM2 0x88
 #define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)/* 10 minute timeout */
+#define MMC_SANITIZE_REQ_TIMEOUT 24
+#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x  0x00FF)  16)
 
 #define mmc_req_rel_wr(req)(((req-cmd_flags  REQ_FUA) || \
  (req-cmd_flags  REQ_META))  \
@@ -409,6 +411,34 @@ static int ioctl_rpmb_card_status_poll(struct mmc_card 
*card, u32 *status,
return err;
 }
 
+static int ioctl_do_sanitize(struct mmc_card *card)
+{
+   int err;
+
+   if (!mmc_can_sanitize(card)) {
+   pr_warn(%s: %s - SANITIZE is not supported\n,
+   mmc_hostname(card-host), __func__);
+   err = -EOPNOTSUPP;
+   goto out;
+   }
+
+   pr_debug(%s: %s - SANITIZE IN PROGRESS...\n,
+   mmc_hostname(card-host), __func__);
+
+   err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+   EXT_CSD_SANITIZE_START, 1,
+   MMC_SANITIZE_REQ_TIMEOUT);
+
+   if (err)
+   pr_err(%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n,
+  mmc_hostname(card-host), __func__, err);
+
+   pr_debug(%s: %s - SANITIZE COMPLETED\n, mmc_hostname(card-host),
+__func__);
+out:
+   return err;
+}
+
 static int mmc_blk_ioctl_cmd(struct block_device *bdev,
struct mmc_ioc_cmd __user *ic_ptr)
 {
@@ -511,6 +541,16 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
goto cmd_rel_host;
}
 
+   if (MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) {
+   err = ioctl_do_sanitize(card);
+
+   if (err)
+   pr_err(%s: ioctl_do_sanitize() failed. err = %d,
+  __func__, err);
+
+   goto cmd_rel_host;
+   }
+
mmc_wait_for_req(card-host, mrq);
 
if (cmd.error) {
@@ -940,10 +980,10 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue 
*mq,
 {
struct mmc_blk_data *md = mq-data;
struct mmc_card *card = md-queue.card;
-   unsigned int from, nr, arg, trim_arg, erase_arg;
+   unsigned int from, nr, arg;
int err = 0, type = MMC_BLK_SECDISCARD;
 
-   if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
+   if (!(mmc_can_secure_erase_trim(card))) {
err = -EOPNOTSUPP;
goto out;
}
@@ -951,23 +991,11 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue 
*mq,
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
 
-   /* The sanitize operation is supported at v4.5 only */
-   if (mmc_can_sanitize(card)) {
-   erase_arg = MMC_ERASE_ARG;
-   trim_arg = MMC_TRIM_ARG;
-   } else {
-   erase_arg = MMC_SECURE_ERASE_ARG;
-   trim_arg = MMC_SECURE_TRIM1_ARG;
-   }
+   if (mmc_can_trim(card)  !mmc_erase_group_aligned(card, from, nr))
+   arg = MMC_SECURE_TRIM1_ARG;
+   else
+   arg = MMC_SECURE_ERASE_ARG;
 
-   if (mmc_erase_group_aligned(card, from, nr))
-   arg = erase_arg;
-   else if (mmc_can_trim(card))
-   arg = trim_arg;
-   else {
-   err = -EINVAL;
-   goto out;
-   }
 retry:
if (card-quirks  MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1003,9 +1031,6 @@ retry:
goto out;
}
 
-   if (mmc_can_sanitize(card))
-   err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-EXT_CSD_SANITIZE_START, 1, 0);
 out_retry:
if (err  !mmc_blk_reset(md, card-host, type))
goto retry

[PATCH v6] mmc: block: Add write packing control

2013-04-29 Thread Maya Erez
The write packing control will ensure that read requests latency is
not increased due to long write packed commands.

The trigger for enabling the write packing is calculated by the relation
between the number of potential packed write requests and the mean
value of all previous potential values:
If the current potential is greater than the mean potential then
the heuristic is that the following workload will contain many write
requests, therefore we lower the packed trigger. In the opposite case
we want to increase the trigger in order to get less packing events.
The trigger for disabling the write packing is fetching a read request.

Signed-off-by: Maya Erez me...@codeaurora.org
Signed-off-by: Lee Susman lsus...@codeaurora.org
---
Our experiments showed that the write packing can increase the worst case read 
latency.
Since the read latency is critical for user experience we added a write packing 
control
mechanism that disables the write packing in case of read requests.
This will ensure that read requests latency is not increased due to long write 
packed commands.

The trigger for enabling the write packing is managing to pack several write 
requests.
The number of potential packed requests that will trigger the packing can be 
configured via sysfs.
The trigger for disabling the write packing is a fetch of a read request.

Changes in v6:
- Dynamic calculation of the trigger for enabling te write packing (instead 
of a hardcoded value)

Changes in v5:
- Revert v4 changes
- fix the device attribute removal in case of failure of device_create_file

Changes in v4:
- Move MMC specific attributes to mmc sub-directory

Changes in v3:
- Fix the settings of num_of_potential_packed_wr_reqs

Changes in v2:
- Move the attribute for setting the packing enabling trigger to the block 
device
- Add documentation of the new attribute
---
 drivers/mmc/card/block.c |  131 ++
 drivers/mmc/card/queue.c |8 +++
 drivers/mmc/card/queue.h |3 +
 include/linux/mmc/host.h |1 +
 4 files changed, 143 insertions(+), 0 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index e12a03c..e0ed0b4 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -64,6 +64,13 @@ MODULE_ALIAS(mmc:block);
  (rq_data_dir(req) == WRITE))
 #define PACKED_CMD_VER 0x01
 #define PACKED_CMD_WR  0x02
+#define PACKED_TRIGGER_MAX_ELEMENTS5000
+#define PCKD_TRGR_INIT_MEAN_POTEN  17
+#define PCKD_TRGR_POTEN_LOWER_BOUND5
+#define PCKD_TRGR_URGENT_PENALTY   2
+#define PCKD_TRGR_LOWER_BOUND  5
+#define PCKD_TRGR_PRECISION_MULTIPLIER 100
+
 
 static DEFINE_MUTEX(block_mutex);
 
@@ -1405,6 +1412,122 @@ static inline u8 mmc_calc_packed_hdr_segs(struct 
request_queue *q,
return nr_segs;
 }
 
+static int get_packed_trigger(int potential, struct mmc_card *card,
+ struct request *req, int curr_trigger)
+{
+   static int num_mean_elements = 1;
+   static unsigned long mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
+   unsigned int trigger = curr_trigger;
+   unsigned int pckd_trgr_upper_bound = card-ext_csd.max_packed_writes;
+
+   /* scale down the upper bound to 75% */
+   pckd_trgr_upper_bound = (pckd_trgr_upper_bound * 3) / 4;
+
+   /*
+* since the most common calls for this function are with small
+* potential write values and since we don't want these calls to affect
+* the packed trigger, set a lower bound and ignore calls with
+* potential lower than that bound
+*/
+   if (potential = PCKD_TRGR_POTEN_LOWER_BOUND)
+   return trigger;
+
+   /*
+* this is to prevent integer overflow in the following calculation:
+* once every PACKED_TRIGGER_MAX_ELEMENTS reset the algorithm
+*/
+   if (num_mean_elements  PACKED_TRIGGER_MAX_ELEMENTS) {
+   num_mean_elements = 1;
+   mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
+   }
+
+   /*
+* get next mean value based on previous mean value and current
+* potential packed writes. Calculation is as follows:
+* mean_pot[i+1] =
+*  ((mean_pot[i] * num_mean_elem) + potential)/(num_mean_elem + 1)
+*/
+   mean_potential *= num_mean_elements;
+   /*
+* add num_mean_elements so that the division of two integers doesn't
+* lower mean_potential too much
+*/
+   if (potential  mean_potential)
+   mean_potential += num_mean_elements;
+   mean_potential += potential;
+   /* this is for gaining more precision when dividing two integers */
+   mean_potential *= PCKD_TRGR_PRECISION_MULTIPLIER;
+   /* this completes the mean calculation */
+   mean_potential /= ++num_mean_elements;
+   mean_potential /= PCKD_TRGR_PRECISION_MULTIPLIER;
+
+   /*
+* if current potential

[PATCH v3] mmc: card: Adding support for sanitize in eMMC 4.5

2013-04-18 Thread Maya Erez
The sanitize support is added as a user-app ioctl call, and
was removed from the block-device request, since its purpose is
to be invoked not via File-System but by a user.
This feature deletes the unmap memory region of the eMMC card,
by writing to a specific register in the EXT_CSD.
unmap region is the memory region that was previously deleted
(by erase, trim or discard operation).
In order to avoid timeout when sanitizing large-scale cards,
the timeout for sanitize operation is 240 seconds.

Signed-off-by: Yaniv Gardi yga...@codeaurora.org
Signed-off-by: Maya Erez me...@codeaurora.org
---
 drivers/mmc/card/block.c   |   68 ++-
 drivers/mmc/card/queue.c   |2 +-
 drivers/mmc/core/core.c|   22 ++
 drivers/mmc/core/mmc_ops.c |2 +
 include/linux/mmc/core.h   |2 +
 include/linux/mmc/host.h   |1 +
 6 files changed, 75 insertions(+), 22 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index e12a03c..1f043ee 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -58,6 +58,8 @@ MODULE_ALIAS(mmc:block);
 #define INAND_CMD38_ARG_SECTRIM1 0x81
 #define INAND_CMD38_ARG_SECTRIM2 0x88
 #define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)/* 10 minute timeout */
+#define MMC_SANITIZE_REQ_TIMEOUT 24
+#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x  0x00FF)  16)
 
 #define mmc_req_rel_wr(req)(((req-cmd_flags  REQ_FUA) || \
  (req-cmd_flags  REQ_META))  \
@@ -409,6 +411,35 @@ static int ioctl_rpmb_card_status_poll(struct mmc_card 
*card, u32 *status,
return err;
 }
 
+static int ioctl_do_sanitize(struct mmc_card *card)
+{
+   int err;
+
+   if (!(mmc_can_sanitize(card) 
+ (card-host-caps2  MMC_CAP2_SANITIZE))) {
+   pr_warn(%s: %s - SANITIZE is not supported\n,
+   mmc_hostname(card-host), __func__);
+   err = -EOPNOTSUPP;
+   goto out;
+   }
+
+   pr_debug(%s: %s - SANITIZE IN PROGRESS...\n,
+   mmc_hostname(card-host), __func__);
+
+   err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+   EXT_CSD_SANITIZE_START, 1,
+   MMC_SANITIZE_REQ_TIMEOUT);
+
+   if (err)
+   pr_err(%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n,
+  mmc_hostname(card-host), __func__, err);
+
+   pr_debug(%s: %s - SANITIZE COMPLETED\n, mmc_hostname(card-host),
+__func__);
+out:
+   return err;
+}
+
 static int mmc_blk_ioctl_cmd(struct block_device *bdev,
struct mmc_ioc_cmd __user *ic_ptr)
 {
@@ -511,6 +542,16 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
goto cmd_rel_host;
}
 
+   if (MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) {
+   err = ioctl_do_sanitize(card);
+
+   if (err)
+   pr_err(%s: ioctl_do_sanitize() failed. err = %d,
+  __func__, err);
+
+   goto cmd_rel_host;
+   }
+
mmc_wait_for_req(card-host, mrq);
 
if (cmd.error) {
@@ -940,10 +981,10 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue 
*mq,
 {
struct mmc_blk_data *md = mq-data;
struct mmc_card *card = md-queue.card;
-   unsigned int from, nr, arg, trim_arg, erase_arg;
+   unsigned int from, nr, arg;
int err = 0, type = MMC_BLK_SECDISCARD;
 
-   if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
+   if (!(mmc_can_secure_erase_trim(card))) {
err = -EOPNOTSUPP;
goto out;
}
@@ -951,23 +992,11 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue 
*mq,
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
 
-   /* The sanitize operation is supported at v4.5 only */
-   if (mmc_can_sanitize(card)) {
-   erase_arg = MMC_ERASE_ARG;
-   trim_arg = MMC_TRIM_ARG;
-   } else {
-   erase_arg = MMC_SECURE_ERASE_ARG;
-   trim_arg = MMC_SECURE_TRIM1_ARG;
-   }
+   if (mmc_can_trim(card)  !mmc_erase_group_aligned(card, from, nr))
+   arg = MMC_SECURE_TRIM1_ARG;
+   else
+   arg = MMC_SECURE_ERASE_ARG;
 
-   if (mmc_erase_group_aligned(card, from, nr))
-   arg = erase_arg;
-   else if (mmc_can_trim(card))
-   arg = trim_arg;
-   else {
-   err = -EINVAL;
-   goto out;
-   }
 retry:
if (card-quirks  MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1003,9 +1032,6 @@ retry:
goto out;
}
 
-   if (mmc_can_sanitize(card))
-   err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL

[PATCH v2] mmc: card: Adding support for sanitize in eMMC 4.5

2013-04-17 Thread Maya Erez
The sanitize support is added as a user-app ioctl call, and
was removed from the block-device request, since its purpose is
to be invoked not via File-System but by a user.
This feature deletes the unmap memory region of the eMMC card,
by writing to a specific register in the EXT_CSD.
unmap region is the memory region that was previously deleted
(by erase, trim or discard operation).
In order to avoid timeout when sanitizing large-scale cards,
the timeout for sanitize operation is 240 seconds.

Signed-off-by: Yaniv Gardi yga...@codeaurora.org
Signed-off-by: Maya Erez me...@codeaurora.org
---
 drivers/mmc/card/block.c |   68 +++--
 drivers/mmc/card/queue.c |2 +-
 drivers/mmc/core/core.c  |   21 ++
 include/linux/mmc/core.h |2 +
 include/linux/mmc/host.h |1 +
 5 files changed, 72 insertions(+), 22 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index e12a03c..1f043ee 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -58,6 +58,8 @@ MODULE_ALIAS(mmc:block);
 #define INAND_CMD38_ARG_SECTRIM1 0x81
 #define INAND_CMD38_ARG_SECTRIM2 0x88
 #define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)/* 10 minute timeout */
+#define MMC_SANITIZE_REQ_TIMEOUT 24
+#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x  0x00FF)  16)
 
 #define mmc_req_rel_wr(req)(((req-cmd_flags  REQ_FUA) || \
  (req-cmd_flags  REQ_META))  \
@@ -409,6 +411,35 @@ static int ioctl_rpmb_card_status_poll(struct mmc_card 
*card, u32 *status,
return err;
 }
 
+static int ioctl_do_sanitize(struct mmc_card *card)
+{
+   int err;
+
+   if (!(mmc_can_sanitize(card) 
+ (card-host-caps2  MMC_CAP2_SANITIZE))) {
+   pr_warn(%s: %s - SANITIZE is not supported\n,
+   mmc_hostname(card-host), __func__);
+   err = -EOPNOTSUPP;
+   goto out;
+   }
+
+   pr_debug(%s: %s - SANITIZE IN PROGRESS...\n,
+   mmc_hostname(card-host), __func__);
+
+   err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+   EXT_CSD_SANITIZE_START, 1,
+   MMC_SANITIZE_REQ_TIMEOUT);
+
+   if (err)
+   pr_err(%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n,
+  mmc_hostname(card-host), __func__, err);
+
+   pr_debug(%s: %s - SANITIZE COMPLETED\n, mmc_hostname(card-host),
+__func__);
+out:
+   return err;
+}
+
 static int mmc_blk_ioctl_cmd(struct block_device *bdev,
struct mmc_ioc_cmd __user *ic_ptr)
 {
@@ -511,6 +542,16 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
goto cmd_rel_host;
}
 
+   if (MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) {
+   err = ioctl_do_sanitize(card);
+
+   if (err)
+   pr_err(%s: ioctl_do_sanitize() failed. err = %d,
+  __func__, err);
+
+   goto cmd_rel_host;
+   }
+
mmc_wait_for_req(card-host, mrq);
 
if (cmd.error) {
@@ -940,10 +981,10 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue 
*mq,
 {
struct mmc_blk_data *md = mq-data;
struct mmc_card *card = md-queue.card;
-   unsigned int from, nr, arg, trim_arg, erase_arg;
+   unsigned int from, nr, arg;
int err = 0, type = MMC_BLK_SECDISCARD;
 
-   if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
+   if (!(mmc_can_secure_erase_trim(card))) {
err = -EOPNOTSUPP;
goto out;
}
@@ -951,23 +992,11 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue 
*mq,
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
 
-   /* The sanitize operation is supported at v4.5 only */
-   if (mmc_can_sanitize(card)) {
-   erase_arg = MMC_ERASE_ARG;
-   trim_arg = MMC_TRIM_ARG;
-   } else {
-   erase_arg = MMC_SECURE_ERASE_ARG;
-   trim_arg = MMC_SECURE_TRIM1_ARG;
-   }
+   if (mmc_can_trim(card)  !mmc_erase_group_aligned(card, from, nr))
+   arg = MMC_SECURE_TRIM1_ARG;
+   else
+   arg = MMC_SECURE_ERASE_ARG;
 
-   if (mmc_erase_group_aligned(card, from, nr))
-   arg = erase_arg;
-   else if (mmc_can_trim(card))
-   arg = trim_arg;
-   else {
-   err = -EINVAL;
-   goto out;
-   }
 retry:
if (card-quirks  MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1003,9 +1032,6 @@ retry:
goto out;
}
 
-   if (mmc_can_sanitize(card))
-   err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-EXT_CSD_SANITIZE_START, 1, 0);
 out_retry

[PATCH v6] mmc: core: Add support for idle time BKOPS

2013-04-14 Thread Maya Erez
Devices have various maintenance operations need to perform internally.
In order to reduce latencies during time critical operations like read
and write, it is better to execute maintenance operations in other
times - when the host is not being serviced. Such operations are called
Background operations (BKOPS).
The device notifies the status of the BKOPS need by updating BKOPS_STATUS
(EXT_CSD byte [246]).

According to the standard a host that supports BKOPS shall check the
status periodically and start background operations as needed, so that
the device has enough time for its maintenance operations.

This patch adds support for this periodic check of the BKOPS status.
Since foreground operations are of higher priority than background
operations the host will check the need for BKOPS when it is idle
(in runtime suspend), and in case of an incoming request the BKOPS
operation will be interrupted.

If the card raised an exception with need for urgent BKOPS (level 2/3)
a flag will be set to indicate MMC to start the BKOPS activity when it
becomes idle.

Since running the BKOPS too often can impact the eMMC endurance, the card
need for BKOPS is not checked on every runtime suspend. In order to estimate
when is the best time to check for BKOPS need the host will take into
account the card capacity and percentages of changed sectors in the card.
A future enhancement can be to check the card need for BKOPS only in case
of random activity.

Signed-off-by: Maya Erez me...@codeaurora.org
---
This patch depends on the following patches:
[PATCH V2 1/2] mmc: core: Add bus_ops fro runtime pm callbacks
[PATCH V2 2/2] mmc: block: Enable runtime pm for mmc blkdevice
---
diff --git a/Documentation/mmc/mmc-dev-attrs.txt 
b/Documentation/mmc/mmc-dev-attrs.txt
index 189bab0..8257aa6 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -8,6 +8,15 @@ The following attributes are read/write.
 
force_roEnforce read-only access even if write protect 
switch is off.
 
+   bkops_check_threshold   This attribute is used to determine whether
+   the status bit that indicates the need for BKOPS should be checked.
+   The value should be given in percentages of the card size.
+   This value is used to calculate the minimum number of sectors that
+   needs to be changed in the device (written or discarded) in order to
+   require the status-bit of BKOPS to be checked.
+   The value can modified via sysfs by writing the required value to:
+   /sys/block/block_dev_name/bkops_check_threshold
+
 SD and MMC Device Attributes
 
 
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 536331a..ef42117 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -116,6 +116,7 @@ struct mmc_blk_data {
unsigned intpart_curr;
struct device_attribute force_ro;
struct device_attribute power_ro_lock;
+   struct device_attribute bkops_check_threshold;
int area_type;
 };
 
@@ -287,6 +288,65 @@ out:
return ret;
 }
 
+static ssize_t
+bkops_check_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+   struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+   struct mmc_card *card = md-queue.card;
+   int ret;
+
+   if (!card)
+   ret = -EINVAL;
+   else
+   ret = snprintf(buf, PAGE_SIZE, %d\n,
+   card-bkops_info.size_percentage_to_start_bkops);
+
+   mmc_blk_put(md);
+   return ret;
+}
+
+static ssize_t
+bkops_check_threshold_store(struct device *dev,
+struct device_attribute *attr,
+const char *buf, size_t count)
+{
+   int value;
+   struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+   struct mmc_card *card = md-queue.card;
+   unsigned int card_size;
+   int ret = count;
+
+   if (!card) {
+   ret = -EINVAL;
+   goto exit;
+   }
+
+   sscanf(buf, %d, value);
+   if ((value = 0) || (value = 100)) {
+   ret = -EINVAL;
+   goto exit;
+   }
+
+   card_size = (unsigned int)get_capacity(md-disk);
+   if (card_size = 0) {
+   ret = -EINVAL;
+   goto exit;
+   }
+   card-bkops_info.size_percentage_to_start_bkops = value;
+   card-bkops_info.min_sectors_to_start_bkops =
+   (card_size * value) / 100;
+
+   pr_debug(%s: size_percentage = %d, min_sectors = %d,
+   mmc_hostname(card-host),
+   card-bkops_info.size_percentage_to_start_bkops,
+   card-bkops_info.min_sectors_to_start_bkops);
+
+exit:
+   mmc_blk_put(md);
+   return count;
+}
+
 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
 {
struct mmc_blk_data *md = mmc_blk_get(bdev

RE: [PATCH 0/3] mmc: Use runtime pm for blkdevice

2013-03-02 Thread Maya Erez
Thanks Ulf.
I will go over the new patch and will develop the periodic BKOPS on top of
it.

Thanks,
Maya

-Original Message-
From: linux-mmc-ow...@vger.kernel.org
[mailto:linux-mmc-ow...@vger.kernel.org] On Behalf Of Ulf Hansson
Sent: Friday, March 01, 2013 2:47 PM
To: linux-mmc@vger.kernel.org; Chris Ball
Cc: Johan Rudholm; Ulf Hansson
Subject: [PATCH 0/3] mmc: Use runtime pm for blkdevice

From: Ulf Hansson ulf.hans...@linaro.org

SDIO has been using runtime pm for a while to handle runtime power save
operations. This patchset is enabling the option to make the sd/mmc
blockdevices to use runtime pm as well.

The runtime pm implementation for the block device will make use of
autosuspend to defer power save operation to after request inactivty for a
certain time.

To actually perform some power save operations the corresponding bus ops for
mmc and sd shall be implemented. Typically it could make sense to do BKOPS
for eMMC in here.

Ulf Hansson (3):
  mmc: core: Remove power_restore bus_ops for mmc and sd
  mmc: core: Add bus_ops for runtime pm callbacks
  mmc: block: Enable runtime pm for mmc blkdevice

 drivers/mmc/card/block.c |   28 ++--
 drivers/mmc/core/bus.c   |   14 --
 drivers/mmc/core/core.h  |2 ++
 drivers/mmc/core/mmc.c   |   14 --
 drivers/mmc/core/sd.c|   14 --
 drivers/mmc/core/sdio.c  |   20 
 6 files changed, 60 insertions(+), 32 deletions(-)

--
1.7.10

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in the
body of a message to majord...@vger.kernel.org More majordomo info at
http://vger.kernel.org/majordomo-info.html

Maya Erez
QUALCOMM ISRAEL, on behalf of Qualcomm Innovation Center, Inc. is a member
of Code Aurora Forum, hosted by The Linux Foundation

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


RE: [PATCH RESEND v7 1/2] block: ioctl support for sanitize in eMMC 4.5

2013-01-21 Thread Maya Erez
(struct block_device *bdev, gfp_t 
+gfp_mask);
 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask);
 static inline int sb_issue_discard(struct super_block *sb, sector_t block,
diff --git a/include/linux/fs.h b/include/linux/fs.h index b0a6d44..167c450
100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -333,6 +333,7 @@ struct inodes_stat_t {  #define BLKDISCARDZEROES
_IO(0x12,124)  #define BLKSECDISCARD _IO(0x12,125)  #define BLKROTATIONAL
_IO(0x12,126)
+#define BLKSANITIZE _IO(0x12, 127)
 
 #define BMAP_IOCTL 1   /* obsolete - kept for compatibility */
 #define FIBMAP_IO(0x00,1)  /* bmap access */
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index
c0bd030..06f7940 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1788,6 +1788,8 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
rwbs[i++] = 'W';
else if (rw  REQ_DISCARD)
rwbs[i++] = 'D';
+   else if (rw  REQ_SANITIZE)
+   rwbs[i++] = 'Z';
else if (bytes)
rwbs[i++] = 'R';
else
--
1.7.6
--
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in the
body of a message to majord...@vger.kernel.org More majordomo info at
http://vger.kernel.org/majordomo-info.html
--
Maya Erez
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1] mmc: core: move the cache disabling operation to mmc_suspend

2013-01-20 Thread Maya Erez
Cache control is an eMMC feature and in therefore should be
part of MMC's bus resume operations, performed in mmc_suspend,
rather than in the generic mmc_suspend_host().

Signed-off-by: Maya Erez me...@codeaurora.org

diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index aaed768..b438bb2 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2388,6 +2388,7 @@ EXPORT_SYMBOL(mmc_flush_cache);
  * Turn the cache ON/OFF.
  * Turning the cache OFF shall trigger flushing of the data
  * to the non-volatile storage.
+ * This function should be called with host claimed
  */
 int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
 {
@@ -2399,7 +2400,6 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
mmc_card_is_removable(host))
return err;
 
-   mmc_claim_host(host);
if (card  mmc_card_mmc(card) 
(card-ext_csd.cache_size  0)) {
enable = !!enable;
@@ -2417,7 +2417,6 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
card-ext_csd.cache_ctrl = enable;
}
}
-   mmc_release_host(host);
 
return err;
 }
@@ -2436,10 +2435,6 @@ int mmc_suspend_host(struct mmc_host *host)
cancel_delayed_work(host-detect);
mmc_flush_scheduled_work();
 
-   err = mmc_cache_ctrl(host, 0);
-   if (err)
-   goto out;
-
mmc_bus_get(host);
if (host-bus_ops  !host-bus_dead) {
if (host-bus_ops-suspend) {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index e6e3911..dc17d40 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1379,6 +1379,11 @@ static int mmc_suspend(struct mmc_host *host)
BUG_ON(!host-card);
 
mmc_claim_host(host);
+
+   err = mmc_cache_ctrl(host, 0);
+   if (err)
+   goto out;
+
if (mmc_can_poweroff_notify(host-card))
err = mmc_poweroff_notify(host-card, EXT_CSD_POWER_OFF_SHORT);
else if (mmc_card_can_sleep(host))
@@ -1386,8 +1391,9 @@ static int mmc_suspend(struct mmc_host *host)
else if (!mmc_host_is_spi(host))
err = mmc_deselect_cards(host);
host-card-state = ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
-   mmc_release_host(host);
 
+out:
+   mmc_release_host(host);
return err;
 }
 
-- 
1.7.3.3
-- 
QUALCOMM ISRAEL, on behalf of Qualcomm Innovation Center, Inc. is a member
of Code Aurora Forum, hosted by The Linux Foundation
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


RE: [PATCH] mmc: core: disable the cache before suspend only after stopping BKOPS

2013-01-13 Thread Maya Erez
-Original Message-
From: Subhash Jadavani [mailto:subha...@codeaurora.org] 
Sent: Saturday, January 12, 2013 9:07 AM
To: Maya Erez
Cc: linux-mmc@vger.kernel.org; linux-arm-...@vger.kernel.org; open list
Subject: Re: [PATCH] mmc: core: disable the cache before suspend only after
stopping BKOPS

On 1/12/2013 2:12 AM, Maya Erez wrote:
 mmc_cache_ctrl was called in runtime suspend before MMC interrupted 
 BKOPS in case it is still running on the card. This caused the cache 
 disable to timeout.
I guess even if the idle time bkops polling is not implemented, this patch
is good to have. cache control is the eMMC feature and in that sense, it
should have been part of MMC's bus resume (mmc_resume) rather than generic
mmc_suspend_host().

Patch as such is fine and if you agree, you may want to remove the
mentioning of bkops as part of commit text and may just want to mention
above reason as the main motivation for this patch.

Agreed, I will change the commit text in the next uploaded version.

 Therefore, mmc_cache_ctrl has to move to mmc_suspend where we are sure 
 that the card can go into suspend and there is no pending activity.

 Signed-off-by: Maya Erez me...@codeaurora.org
 ---
   drivers/mmc/core/core.c |7 +--
   drivers/mmc/core/mmc.c  |8 +++-
   2 files changed, 8 insertions(+), 7 deletions(-)

 diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 
 aaed768..b438bb2 100644
 --- a/drivers/mmc/core/core.c
 +++ b/drivers/mmc/core/core.c
 @@ -2388,6 +2388,7 @@ EXPORT_SYMBOL(mmc_flush_cache);
* Turn the cache ON/OFF.
* Turning the cache OFF shall trigger flushing of the data
* to the non-volatile storage.
 + * This function should be called with host claimed
*/
   int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
   {
 @@ -2399,7 +2400,6 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
   mmc_card_is_removable(host))
   return err;
   
 - mmc_claim_host(host);
   if (card  mmc_card_mmc(card) 
   (card-ext_csd.cache_size  0)) {
   enable = !!enable;
 @@ -2417,7 +2417,6 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
   card-ext_csd.cache_ctrl = enable;
   }
   }
 - mmc_release_host(host);
   
   return err;
   }
 @@ -2436,10 +2435,6 @@ int mmc_suspend_host(struct mmc_host *host)
   cancel_delayed_work(host-detect);
   mmc_flush_scheduled_work();
   
 - err = mmc_cache_ctrl(host, 0);
 - if (err)
 - goto out;
 -
   mmc_bus_get(host);
   if (host-bus_ops  !host-bus_dead) {
   if (host-bus_ops-suspend) {
 diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 
 e6e3911..dc17d40 100644
 --- a/drivers/mmc/core/mmc.c
 +++ b/drivers/mmc/core/mmc.c
 @@ -1379,6 +1379,11 @@ static int mmc_suspend(struct mmc_host *host)
   BUG_ON(!host-card);
   
   mmc_claim_host(host);
 +
 + err = mmc_cache_ctrl(host, 0);
 + if (err)
 + goto out;
 +
   if (mmc_can_poweroff_notify(host-card))
   err = mmc_poweroff_notify(host-card,
EXT_CSD_POWER_OFF_SHORT);
   else if (mmc_card_can_sleep(host))
 @@ -1386,8 +1391,9 @@ static int mmc_suspend(struct mmc_host *host)
   else if (!mmc_host_is_spi(host))
   err = mmc_deselect_cards(host);
   host-card-state = ~(MMC_STATE_HIGHSPEED |
MMC_STATE_HIGHSPEED_200);
 - mmc_release_host(host);
   
 +out:
 + mmc_release_host(host);
   return err;
   }
   
--
QUALCOMM ISRAEL, on behalf of Qualcomm Innovation Center, Inc. is a member
of Code Aurora Forum, hosted by The Linux Foundation

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] mmc: core: disable the cache before suspend only after stopping BKOPS

2013-01-11 Thread Maya Erez
mmc_cache_ctrl was called in runtime suspend before MMC interrupted
BKOPS in case it is still running on the card. This caused the cache
disable to timeout.
Therefore, mmc_cache_ctrl has to move to mmc_suspend where we are sure
that the card can go into suspend and there is no pending activity.

Signed-off-by: Maya Erez me...@codeaurora.org
---
 drivers/mmc/core/core.c |7 +--
 drivers/mmc/core/mmc.c  |8 +++-
 2 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index aaed768..b438bb2 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2388,6 +2388,7 @@ EXPORT_SYMBOL(mmc_flush_cache);
  * Turn the cache ON/OFF.
  * Turning the cache OFF shall trigger flushing of the data
  * to the non-volatile storage.
+ * This function should be called with host claimed
  */
 int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
 {
@@ -2399,7 +2400,6 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
mmc_card_is_removable(host))
return err;
 
-   mmc_claim_host(host);
if (card  mmc_card_mmc(card) 
(card-ext_csd.cache_size  0)) {
enable = !!enable;
@@ -2417,7 +2417,6 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
card-ext_csd.cache_ctrl = enable;
}
}
-   mmc_release_host(host);
 
return err;
 }
@@ -2436,10 +2435,6 @@ int mmc_suspend_host(struct mmc_host *host)
cancel_delayed_work(host-detect);
mmc_flush_scheduled_work();
 
-   err = mmc_cache_ctrl(host, 0);
-   if (err)
-   goto out;
-
mmc_bus_get(host);
if (host-bus_ops  !host-bus_dead) {
if (host-bus_ops-suspend) {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index e6e3911..dc17d40 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1379,6 +1379,11 @@ static int mmc_suspend(struct mmc_host *host)
BUG_ON(!host-card);
 
mmc_claim_host(host);
+
+   err = mmc_cache_ctrl(host, 0);
+   if (err)
+   goto out;
+
if (mmc_can_poweroff_notify(host-card))
err = mmc_poweroff_notify(host-card, EXT_CSD_POWER_OFF_SHORT);
else if (mmc_card_can_sleep(host))
@@ -1386,8 +1391,9 @@ static int mmc_suspend(struct mmc_host *host)
else if (!mmc_host_is_spi(host))
err = mmc_deselect_cards(host);
host-card-state = ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
-   mmc_release_host(host);
 
+out:
+   mmc_release_host(host);
return err;
 }
 
-- 
1.7.3.3
-- 
QUALCOMM ISRAEL, on behalf of Qualcomm Innovation Center, Inc. is a member
of Code Aurora Forum, hosted by The Linux Foundation
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v5 0/3] Add support for periodic BKOPS

2013-01-10 Thread Maya Erez
Devices have various maintenance operations need to perform internally.
In order to reduce latencies during time critical operations like read
and write, it is better to execute maintenance operations in other
times - when the host is not being serviced. Such operations are called
Background operations (BKOPS).
The device notifies the status of the BKOPS need by updating BKOPS_STATUS
(EXT_CSD byte [246]).

According to the standard a host that supports BKOPS shall check the
status periodically and start background operations as needed, so that
the device has enough time for its maintenance operations.

This patch adds support for this periodic check of the BKOPS status.
Since foreground operations are of higher priority than background
operations the host will check the need for BKOPS when it is idle,
and in case of an incoming request the BKOPS operation will be
interrupted.

When the mmcqd thread is idle, a delayed work is created to check the
need for BKOPS. The time to start the delayed work can be set by the host
controller. If this time is not set, a default time is used.
If the card raised an exception with need for urgent BKOPS (level 2/3)
a flag will be set to indicate MMC to start the BKOPS activity when it
becomes idle.

Since running the BKOPS too often can impact the eMMC endurance, the card
need for BKOPS is not checked every time MMC is idle (despite of cases of
exception raised). In order to estimate when is the best time to check
for BKOPS need the host will take into account the card capacity and
percentages of changed sectors in the card. A future enhancement can be to
check the card need for BKOPS only in case of random activity.

This patch is based on the periodic BKOPS implementation in version 8 of 
support BKOPS feature for eMMC patch.
The patch was modified to answer the following issues:
- Since mmc_start_bkops is called from two contexts now, mmc_claim_host was 
moved to the beginning of the function
- Also, the check of doing_bkops should be protected when determing if an HPI 
is needed due to the same reason.

Changes in v5:
- Do not allow BKOPS operation in all levels to be blocking
- Change the periodic check for BKOPS need to be based on percentage of 
changed sector
- Add BKOPS statistics

Changes in v4:
- Separate the polling for BKOPS completion to a different patch
- add a flag to indicate if polling for card completion is required

Changes in v3:
- Move the call to stop_bkops to block.c. 
  This allows us to remove the mmc_claim_host from inside the function and 
doesn't cause additional degradation 
  due to un-neccessary calim host operation

Changes in v2:
- Check the number of written / discarded sectors as the trigger for 
checking the BKOPS need.
- Code review fixes

Maya Erez (3):
  mmc: core: Add support for idle time BKOPS
  mmc: allow the host controller to poll for BKOPS completion
  mmc: core: Add MMC BKOPS statistics and debugfs ability to print them

 Documentation/mmc/mmc-dev-attrs.txt |9 +
 drivers/mmc/card/block.c|   96 -
 drivers/mmc/card/queue.c|2 +
 drivers/mmc/core/bus.c  |2 +
 drivers/mmc/core/core.c |  286 +++
 drivers/mmc/core/debugfs.c  |  114 ++
 drivers/mmc/core/mmc.c  |   20 +++
 include/linux/mmc/card.h|   64 -
 include/linux/mmc/core.h|5 +
 include/linux/mmc/host.h|2 +-
 10 files changed, 563 insertions(+), 37 deletions(-)

-- 
1.7.3.3
-- 
QUALCOMM ISRAEL, on behalf of Qualcomm Innovation Center, Inc. is a member
of Code Aurora Forum, hosted by The Linux Foundation
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v5 1/3] mmc: core: Add support for idle time BKOPS

2013-01-10 Thread Maya Erez
Devices have various maintenance operations need to perform internally.
In order to reduce latencies during time critical operations like read
and write, it is better to execute maintenance operations in other
times - when the host is not being serviced. Such operations are called
Background operations (BKOPS).
The device notifies the status of the BKOPS need by updating BKOPS_STATUS
(EXT_CSD byte [246]).

According to the standard a host that supports BKOPS shall check the
status periodically and start background operations as needed, so that
the device has enough time for its maintenance operations.

This patch adds support for this periodic check of the BKOPS status.
Since foreground operations are of higher priority than background
operations the host will check the need for BKOPS when it is idle,
and in case of an incoming request the BKOPS operation will be
interrupted.

When the mmcqd thread is idle, a delayed work is created to check the
need for BKOPS. The time to start the delayed work can be set by the host
controller. If this time is not set, a default time is used.
If the card raised an exception with need for urgent BKOPS (level 2/3)
a flag will be set to indicate MMC to start the BKOPS activity when it
becomes idle.

Since running the BKOPS too often can impact the eMMC endurance, the card
need for BKOPS is not checked every time MMC is idle (despite of cases of
exception raised). In order to estimate when is the best time to check
for BKOPS need the host will take into account the card capacity and
percentages of changed sectors in the card. A future enhancement can be to
check the card need for BKOPS only in case of random activity.

Signed-off-by: Maya Erez me...@codeaurora.org
---
 Documentation/mmc/mmc-dev-attrs.txt |9 ++
 drivers/mmc/card/block.c|   96 +-
 drivers/mmc/card/queue.c|2 +
 drivers/mmc/core/core.c |  155 +++
 drivers/mmc/core/mmc.c  |   17 
 include/linux/mmc/card.h|   47 ++-
 include/linux/mmc/core.h|2 +
 7 files changed, 291 insertions(+), 37 deletions(-)

diff --git a/Documentation/mmc/mmc-dev-attrs.txt 
b/Documentation/mmc/mmc-dev-attrs.txt
index 0d98fac..8d33b80 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -8,6 +8,15 @@ The following attributes are read/write.
 
force_roEnforce read-only access even if write protect 
switch is off.
 
+   bkops_check_threshold   This attribute is used to determine whether
+   the status bit that indicates the need for BKOPS should be checked.
+   The value should be given in percentages of the card size.
+   This value is used to calculate the minimum number of sectors that
+   needs to be changed in the device (written or discarded) in order to
+   require the status-bit of BKOPS to be checked.
+   The value can modified via sysfs by writing the required value to:
+   /sys/block/block_dev_name/bkops_check_threshold
+
 SD and MMC Device Attributes
 
 
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 21056b9..a4d4b7e 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -108,6 +108,7 @@ struct mmc_blk_data {
unsigned intpart_curr;
struct device_attribute force_ro;
struct device_attribute power_ro_lock;
+   struct device_attribute bkops_check_threshold;
int area_type;
 };
 
@@ -268,6 +269,65 @@ out:
return ret;
 }
 
+static ssize_t
+bkops_check_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+   struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+   struct mmc_card *card = md-queue.card;
+   int ret;
+
+   if (!card)
+   ret = -EINVAL;
+   else
+   ret = snprintf(buf, PAGE_SIZE, %d\n,
+   card-bkops_info.size_percentage_to_queue_delayed_work);
+
+   mmc_blk_put(md);
+   return ret;
+}
+
+static ssize_t
+bkops_check_threshold_store(struct device *dev,
+struct device_attribute *attr,
+const char *buf, size_t count)
+{
+   int value;
+   struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+   struct mmc_card *card = md-queue.card;
+   unsigned int card_size;
+   int ret = count;
+
+   if (!card) {
+   ret = -EINVAL;
+   goto exit;
+   }
+
+   sscanf(buf, %d, value);
+   if ((value = 0) || (value = 100)) {
+   ret = -EINVAL;
+   goto exit;
+   }
+
+   card_size = (unsigned int)get_capacity(md-disk);
+   if (card_size = 0) {
+   ret = -EINVAL;
+   goto exit;
+   }
+   card-bkops_info.size_percentage_to_queue_delayed_work = value;
+   card

[PATCH v5 2/3] mmc: allow the host controller to poll for BKOPS completion

2013-01-10 Thread Maya Erez
In order to allow the card to perform the required BKOPS and prevent
the need for critical BKOPS, we would like to prevent BKOPS interruption
when possible.
In case the controller calls mmc_suspend_host when runtime suspend is
idle, the BKOPS operation will be interrupted. To prevent this we would
like to prevent the runtime suspend idle until BKOPS is completed.
This patch adds a flag to allow the controller to mark if the polling is
required or not.

Signed-off-by: Maya Erez me...@codeaurora.org
---
 drivers/mmc/core/core.c  |   82 +-
 drivers/mmc/core/mmc.c   |3 ++
 include/linux/mmc/card.h |5 +++
 include/linux/mmc/core.h |1 +
 include/linux/mmc/host.h |2 +-
 5 files changed, 91 insertions(+), 2 deletions(-)

diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index c8cb98e..e22584a 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -364,7 +364,15 @@ void mmc_start_bkops(struct mmc_card *card, bool 
from_exception)
}
mmc_card_clr_need_bkops(card);
mmc_card_set_doing_bkops(card);
-   card-bkops_info.sectors_changed = 0;
+
+   if (card-host-caps2  MMC_CAP2_POLL_FOR_BKOPS_COMP) {
+   pr_debug(%s: %s: starting the polling thread\n,
+mmc_hostname(card-host), __func__);
+   queue_work(system_nrt_wq,
+  card-bkops_info.poll_for_completion);
+   } else {
+   card-bkops_info.sectors_changed = 0;
+   }
 
 out:
mmc_release_host(card-host);
@@ -372,6 +380,78 @@ out:
 EXPORT_SYMBOL(mmc_start_bkops);
 
 /**
+ * mmc_bkops_completion_polling() - Poll on the card status to
+ * wait for the non-blocking BKOPS completion
+ * @work:  The completion polling work
+ *
+ * The on-going reading of the card status will prevent the card
+ * from getting into suspend while it is in the middle of
+ * performing BKOPS.
+ * Since the non blocking BKOPS can be interrupted by a fetched
+ * request we also check IF mmc_card_doing_bkops in each
+ * iteration.
+ */
+void mmc_bkops_completion_polling(struct work_struct *work)
+{
+   struct mmc_card *card = container_of(work, struct mmc_card,
+   bkops_info.poll_for_completion);
+   unsigned long timeout_jiffies = jiffies +
+   msecs_to_jiffies(BKOPS_COMPLETION_POLLING_TIMEOUT_MS);
+   u32 status;
+   int err;
+
+   /*
+* Wait for the BKOPs to complete. Keep reading the status to prevent
+* the host from getting into suspend
+*/
+   do {
+   mmc_claim_host(card-host);
+
+   if (!mmc_card_doing_bkops(card))
+   goto out;
+
+   err = mmc_send_status(card, status);
+   if (err) {
+   pr_err(%s: error %d requesting status\n,
+  mmc_hostname(card-host), err);
+   goto out;
+   }
+
+   /*
+* Some cards mishandle the status bits, so make sure to check
+* both the busy indication and the card state.
+*/
+   if ((status  R1_READY_FOR_DATA) 
+   (R1_CURRENT_STATE(status) != R1_STATE_PRG)) {
+   pr_debug(%s: %s: completed BKOPs, exit polling\n,
+mmc_hostname(card-host), __func__);
+   mmc_card_clr_doing_bkops(card);
+   card-bkops_info.sectors_changed = 0;
+   goto out;
+   }
+
+   mmc_release_host(card-host);
+
+   /*
+* Sleep before checking the card status again to allow the
+* card to complete the BKOPs operation
+*/
+   msleep(BKOPS_COMPLETION_POLLING_INTERVAL_MS);
+   } while (time_before(jiffies, timeout_jiffies));
+
+   pr_err(%s: %s: exit polling due to timeout, stop bkops\n,
+  mmc_hostname(card-host), __func__);
+   err = mmc_stop_bkops(card);
+   if (err)
+   pr_err(%s: %s: mmc_stop_bkops failed, err=%d\n,
+  mmc_hostname(card-host), __func__, err);
+
+   return;
+out:
+   mmc_release_host(card-host);
+}
+
+/**
  * mmc_start_idle_time_bkops() - check if a non urgent BKOPS is
  * needed
  * @work:  The idle time BKOPS work
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 2f25488..61bfb8f 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1550,6 +1550,9 @@ int mmc_attach_mmc(struct mmc_host *host)
INIT_DELAYED_WORK(host-card-bkops_info.dw,
  mmc_start_idle_time_bkops);
 
+   INIT_WORK(host-card-bkops_info.poll_for_completion,
+ mmc_bkops_completion_polling);
+
/*
 * The host controller can set the time to start the BKOPS

[PATCH v5 3/3] mmc: core: Add MMC BKOPS statistics and debugfs ability to print them

2013-01-10 Thread Maya Erez
The BKOPS statistics are used for BKOPS unit tests and APT tests
to determine test success or failure.
the BKOPS statistics provide the following information:
The number of times BKOPS were issued according to it's severity level
The number of times BKOPS were interrupted by HPI.
The number of times the host went into suspend

Signed-off-by: Yaniv Gardi yga...@codeaurora.org
---
 drivers/mmc/core/bus.c |2 +
 drivers/mmc/core/core.c|   53 
 drivers/mmc/core/debugfs.c |  114 
 include/linux/mmc/card.h   |   12 +
 include/linux/mmc/core.h   |2 +
 5 files changed, 183 insertions(+), 0 deletions(-)

diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 420cb67..47f883b 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -250,6 +250,8 @@ struct mmc_card *mmc_alloc_card(struct mmc_host *host, 
struct device_type *type)
card-dev.release = mmc_release_card;
card-dev.type = type;
 
+   spin_lock_init(card-bkops_info.bkops_stats.lock);
+
return card;
 }
 
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index e22584a..7405243 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -79,6 +79,30 @@ MODULE_PARM_DESC(
removable,
MMC/SD cards are removable and may be removed during suspend);
 
+#define MMC_UPDATE_BKOPS_STATS_HPI(stats)  \
+   do {\
+   spin_lock(stats.lock); \
+   if (stats.enabled)  \
+   stats.hpi++;\
+   spin_unlock(stats.lock);   \
+   } while (0);
+#define MMC_UPDATE_BKOPS_STATS_SUSPEND(stats)  \
+   do {\
+   spin_lock(stats.lock); \
+   if (stats.enabled)  \
+   stats.suspend++;\
+   spin_unlock(stats.lock);   \
+   } while (0);
+#define MMC_UPDATE_STATS_BKOPS_SEVERITY_LEVEL(stats, level)\
+   do {\
+   if (level = 0 || level  BKOPS_NUM_OF_SEVERITY_LEVELS) \
+   break;  \
+   spin_lock(stats.lock); \
+   if (stats.enabled)  \
+   stats.bkops_level[level-1]++;   \
+   spin_unlock(stats.lock);   \
+   } while (0);
+
 /*
  * Internal function. Schedule delayed work in the MMC work queue.
  */
@@ -255,6 +279,29 @@ mmc_start_request(struct mmc_host *host, struct 
mmc_request *mrq)
host-ops-request(host, mrq);
 }
 
+void mmc_blk_init_bkops_statistics(struct mmc_card *card)
+{
+   int i;
+   struct mmc_bkops_stats *bkops_stats;
+
+   if (!card)
+   return;
+
+   bkops_stats = card-bkops_info.bkops_stats;
+
+   spin_lock(bkops_stats-lock);
+
+   for (i = 0 ; i  BKOPS_NUM_OF_SEVERITY_LEVELS ; ++i)
+   bkops_stats-bkops_level[i] = 0;
+
+   bkops_stats-suspend = 0;
+   bkops_stats-hpi = 0;
+   bkops_stats-enabled = true;
+
+   spin_unlock(bkops_stats-lock);
+}
+EXPORT_SYMBOL(mmc_blk_init_bkops_statistics);
+
 /**
  * mmc_start_delayed_bkops() - Start a delayed work to check for
  *  the need of non urgent BKOPS
@@ -362,6 +409,8 @@ void mmc_start_bkops(struct mmc_card *card, bool 
from_exception)
mmc_hostname(card-host), err);
goto out;
}
+   MMC_UPDATE_STATS_BKOPS_SEVERITY_LEVEL(card-bkops_info.bkops_stats,
+   card-ext_csd.raw_bkops_status);
mmc_card_clr_need_bkops(card);
mmc_card_set_doing_bkops(card);
 
@@ -762,6 +811,8 @@ int mmc_stop_bkops(struct mmc_card *card)
err = 0;
}
 
+   MMC_UPDATE_BKOPS_STATS_HPI(card-bkops_info.bkops_stats);
+
 out:
return err;
 }
@@ -2614,6 +2665,8 @@ int mmc_suspend_host(struct mmc_host *host)
err = mmc_stop_bkops(host-card);
if (err)
goto out;
+   MMC_UPDATE_BKOPS_STATS_SUSPEND(host-
+   card-bkops_info.bkops_stats);
}
err = host-bus_ops-suspend(host);
}
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 35c2f85..30738cb 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -334,6 +334,114 @@ static const struct file_operations mmc_dbg_ext_csd_fops 
= {
.llseek = default_llseek,
 };
 
+static int mmc_bkops_stats_open(struct inode *inode, struct file *filp)
+{
+   struct mmc_card *card = 

[PATCH v4 0/2] Add support for periodic BKOPS

2012-12-24 Thread Maya Erez
This patch is based on the periodic BKOPS implementation in version 8 of 
support BKOPS feature for eMMC patch.
The patch was modified to answer the following issues:
- Since mmc_start_bkops is called from two contexts now, mmc_claim_host was 
moved to the beginning of the function
- Also, the check of doing_bkops should be protected when determing if an HPI 
is needed due to the same reason.

Changes in v4:
- Separate the polling for BKOPS completion to a different patch
- add a flag to indicate if polling for card completion is required

Changes in v3:
- Move the call to stop_bkops to block.c. 
  This allows us to remove the mmc_claim_host from inside the function and 
doesn't cause additional degradation 
  due to un-neccessary calim host operation

Changes in v2:
- Check the number of written / discarded sectors as the trigger for 
checking the BKOPS need.
- Code review fixes

Maya Erez (2):
  mmc: core: Add support for idle time BKOPS
  mmc: allow the host controller to poll for BKOPS completion

 drivers/mmc/card/block.c |8 ++-
 drivers/mmc/card/queue.c |2 +
 drivers/mmc/core/core.c  |  189 +++---
 drivers/mmc/core/mmc.c   |   22 ++
 include/linux/mmc/card.h |   31 
 include/linux/mmc/core.h |3 +
 include/linux/mmc/host.h |1 +
 7 files changed, 245 insertions(+), 11 deletions(-)

-- 
1.7.3.3
-- 
QUALCOMM ISRAEL, on behalf of Qualcomm Innovation Center, Inc. is a member
of Code Aurora Forum, hosted by The Linux Foundation

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 2/2] mmc: allow the host controller to poll for BKOPS completion

2012-12-24 Thread Maya Erez
In order to allow the card to perform the required BKOPS and prevent
the need for critical BKOPS, we would like to prevent BKOPS interruption
when possible.
In case the controller calls mmc_suspend_host when runtime suspend is
idle, the BKOPS operation will be interrupted. To prevent this we would
like to prevent the runtime suspend idle until BKOPS is completed.
This patch adds a flag to allow the controller to mark if the polling is
required or not.

Signed-off-by: Maya Erez me...@codeaurora.org

diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 36cef94..cb20bac 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -361,14 +361,101 @@ void mmc_start_bkops(struct mmc_card *card, bool 
from_exception)
 * bkops executed synchronously, otherwise
 * the operation is in progress
 */
-   if (!use_busy_signal)
+   if (!use_busy_signal) {
mmc_card_set_doing_bkops(card);
+   if (card-host-caps2  MMC_CAP2_POLL_FOR_BKOPS_COMP) {
+   pr_debug(%s: %s: starting the polling thread\n,
+mmc_hostname(card-host), __func__);
+   queue_work(system_nrt_wq,
+  card-bkops_info.poll_for_completion);
+   }
+   }
+
 out:
mmc_release_host(card-host);
 }
 EXPORT_SYMBOL(mmc_start_bkops);
 
 /**
+ * mmc_bkops_completion_polling() - Poll on the card status to
+ * wait for the non-blocking BKOPS completion
+ * @work:  The completion polling work
+ *
+ * The on-going reading of the card status will prevent the card
+ * from getting into suspend while it is in the middle of
+ * performing BKOPS.
+ * Since the non blocking BKOPS can be interrupted by a fetched
+ * request we also check IF mmc_card_doing_bkops in each
+ * iteration.
+ */
+void mmc_bkops_completion_polling(struct work_struct *work)
+{
+   struct mmc_card *card = container_of(work, struct mmc_card,
+   bkops_info.poll_for_completion);
+   unsigned long timeout_jiffies = jiffies +
+   msecs_to_jiffies(BKOPS_COMPLETION_POLLING_TIMEOUT_MS);
+   u32 status;
+   int err;
+
+   /*
+* Wait for the BKOPs to complete. Keep reading the status to prevent
+* the host from getting into suspend
+*/
+   do {
+   mmc_claim_host(card-host);
+
+   if (!mmc_card_doing_bkops(card))
+   goto out;
+
+   err = pm_runtime_get_sync(card-host-parent);
+   if (err) {
+   pr_err(%s: error %d requesting status\n,
+  mmc_hostname(card-host), err);
+   goto out;
+   }
+
+   err = mmc_send_status(card, status);
+   if (err) {
+   pr_err(%s: error %d requesting status\n,
+  mmc_hostname(card-host), err);
+   goto put_sync;
+   }
+
+   /*
+* Some cards mishandle the status bits, so make sure to check
+* both the busy indication and the card state.
+*/
+   if ((status  R1_READY_FOR_DATA) 
+   (R1_CURRENT_STATE(status) != R1_STATE_PRG)) {
+   pr_debug(%s: %s: completed BKOPs, exit polling\n,
+mmc_hostname(card-host), __func__);
+   mmc_card_clr_doing_bkops(card);
+   card-bkops_info.started_delayed_bkops = false;
+   goto put_sync;
+   }
+
+   mmc_release_host(card-host);
+
+   /*
+* Sleep before checking the card status again to allow the
+* card to complete the BKOPs operation
+*/
+   msleep(BKOPS_COMPLETION_POLLING_INTERVAL_MS);
+   } while (time_before(jiffies, timeout_jiffies));
+
+   pr_err(%s: %s: exit polling due to timeout\n,
+  mmc_hostname(card-host), __func__);
+
+   pm_runtime_put_sync(card-host-parent);
+   return;
+
+put_sync:
+   pm_runtime_put_sync(card-host-parent);
+out:
+   mmc_release_host(card-host);
+}
+
+/**
  * mmc_start_idle_time_bkops() - check if a non urgent BKOPS is
  * needed
  * @work:  The idle time BKOPS work
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index f68624a..d9ad3ff 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1279,6 +1279,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
if (card-ext_csd.bkops_en) {
INIT_DELAYED_WORK(card-bkops_info.dw,
  mmc_start_idle_time_bkops);
+   INIT_WORK(card-bkops_info.poll_for_completion,
+ mmc_bkops_completion_polling

[PATCH v4 1/2] mmc: core: Add support for idle time BKOPS

2012-12-24 Thread Maya Erez
Devices have various maintenance operations need to perform internally.
In order to reduce latencies during time critical operations like read
and write, it is better to execute maintenance operations in other
times - when the host is not being serviced. Such operations are called
Background operations (BKOPS).
The device notifies the status of the BKOPS need by updating BKOPS_STATUS
(EXT_CSD byte [246]).

According to the standard a host that supports BKOPS shall check the
status periodically and start background operations as needed, so that
the device has enough time for its maintenance operations.

This patch adds support for this periodic check of the BKOPS status.
Since foreground operations are of higher priority than background
operations the host will check the need for BKOPS when it is idle,
and in case of an incoming request the BKOPS operation will be
interrupted.

When the mmcqd thread is idle, a delayed work is created to check the
need for BKOPS. The time to start the delayed work can be set by the host
controller. If this time is not set, a default time is used.
If the card raised an exception, the need for urgent BKOPS (level 2/3)
will be checked immediately and if needed, the BKOPS will be performed
without waiting for the next idle time.

Signed-off-by: Maya Erez me...@codeaurora.org

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 21056b9..64bbf75 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1473,9 +1473,15 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct 
request *req)
struct mmc_blk_data *md = mq-data;
struct mmc_card *card = md-queue.card;
 
-   if (req  !mq-mqrq_prev-req)
+   if (req  !mq-mqrq_prev-req) {
/* claim host only for the first request */
mmc_claim_host(card-host);
+   if (card-ext_csd.bkops_en 
+   card-bkops_info.started_delayed_bkops) {
+   card-bkops_info.started_delayed_bkops = false;
+   mmc_stop_bkops(card);
+   }
+   }
 
ret = mmc_blk_part_switch(card, md);
if (ret) {
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index fadf52e..9d0c96a 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -51,6 +51,7 @@ static int mmc_queue_thread(void *d)
 {
struct mmc_queue *mq = d;
struct request_queue *q = mq-queue;
+   struct mmc_card *card = mq-card;
 
current-flags |= PF_MEMALLOC;
 
@@ -83,6 +84,7 @@ static int mmc_queue_thread(void *d)
set_current_state(TASK_RUNNING);
break;
}
+   mmc_start_delayed_bkops(card);
up(mq-thread_sem);
schedule();
down(mq-thread_sem);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index aaed768..36cef94 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -256,6 +256,33 @@ mmc_start_request(struct mmc_host *host, struct 
mmc_request *mrq)
 }
 
 /**
+ * mmc_start_delayed_bkops() - Start a delayed work to check for
+ *  the need of non urgent BKOPS
+ *
+ * @card: MMC card to start BKOPS on
+ */
+void mmc_start_delayed_bkops(struct mmc_card *card)
+{
+   if (!card || !card-ext_csd.bkops_en || mmc_card_doing_bkops(card))
+   return;
+
+   pr_debug(%s: %s: queueing delayed_bkops_work\n,
+mmc_hostname(card-host), __func__);
+
+   /*
+* cancel_delayed_bkops_work will prevent a race condition between
+* fetching a request by the mmcqd and the delayed work, in case
+* it was removed from the queue work but not started yet
+*/
+   card-bkops_info.cancel_delayed_work = false;
+   card-bkops_info.started_delayed_bkops = true;
+   queue_delayed_work(system_nrt_wq, card-bkops_info.dw,
+  msecs_to_jiffies(
+  card-bkops_info.delay_ms));
+}
+EXPORT_SYMBOL(mmc_start_delayed_bkops);
+
+/**
  * mmc_start_bkops - start BKOPS for supported cards
  * @card: MMC card to start BKOPS
  * @form_exception: A flag to indicate if this function was
@@ -272,25 +299,47 @@ void mmc_start_bkops(struct mmc_card *card, bool 
from_exception)
bool use_busy_signal;
 
BUG_ON(!card);
-
-   if (!card-ext_csd.bkops_en || mmc_card_doing_bkops(card))
+   if (!card-ext_csd.bkops_en)
return;
 
+   mmc_claim_host(card-host);
+
+   if ((card-bkops_info.cancel_delayed_work)  !from_exception) {
+   pr_debug(%s: %s: cancel_delayed_work was set, exit\n,
+mmc_hostname(card-host), __func__);
+   card-bkops_info.cancel_delayed_work = false;
+   goto out;
+   }
+
+   if (mmc_card_doing_bkops(card)) {
+   pr_debug(%s: %s: already doing

[PATCH v3] mmc: core: Add support for idle time BKOPS

2012-11-25 Thread Maya Erez
Devices have various maintenance operations need to perform internally.
In order to reduce latencies during time critical operations like read
and write, it is better to execute maintenance operations in other
times - when the host is not being serviced. Such operations are called
Background operations (BKOPS).
The device notifies the status of the BKOPS need by updating BKOPS_STATUS
(EXT_CSD byte [246]).

According to the standard a host that supports BKOPS shall check the
status periodically and start background operations as needed, so that
the device has enough time for its maintenance operations.

This patch adds support for this periodic check of the BKOPS status.
Since foreground operations are of higher priority than background
operations the host will check the need for BKOPS when it is idle,
and in case of an incoming request the BKOPS operation will be
interrupted.

When the mmcqd thread is idle, a delayed work is created to check the
need for BKOPS. The time to start the delayed work is calculated based
on the host controller suspend timeout, in case it was set. If not, a
default time is used.
If BKOPS are required in level 1, which is non-blocking, there will be
polling of the card status to wait for the BKOPS completion and prevent
suspend that will interrupt the BKOPS.
If the card raised an exception, the need for urgent BKOPS (level 2/3)
will be checked immediately and if needed, the BKOPS will be performed
without waiting for the next idle time.

Signed-off-by: Maya Erez me...@codeaurora.org

---
This patch is based on the periodic BKOPS implementation in version 8 of 
support BKOPS feature for eMMC patch.
The patch was modified to answer the following issues:
- In order to prevent a race condition between going into suspend and starting 
BKOPS, 
  the suspend timeout of the host controller is taking into accound in 
determination of the start time 
  of the delayed work
- Since mmc_start_bkops is called from two contexts now, mmc_claim_host was 
moved to the beginning of the function
- Also, the check of doing_bkops should be protected when determing if an HPI 
is needed due to the same reason.

Changes in v3:
- Move the call to stop_bkops to block.c. 
  This allows us to remove the mmc_claim_host from inside the function and 
doesn't cause additional degradation 
  due to un-neccessary calim host operation

Changes in v2:
- Check the number of written / discarded sectors as the trigger for 
checking the BKOPS need.
- Code review fixes

---
 drivers/mmc/card/block.c |8 ++-
 drivers/mmc/card/queue.c |2 +
 drivers/mmc/core/core.c  |  178 +++---
 drivers/mmc/core/mmc.c   |   23 ++
 include/linux/mmc/card.h |   35 +
 include/linux/mmc/core.h |3 +
 6 files changed, 237 insertions(+), 12 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 172a768..40b4ae3 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1394,9 +1394,15 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct 
request *req)
struct mmc_blk_data *md = mq-data;
struct mmc_card *card = md-queue.card;
 
-   if (req  !mq-mqrq_prev-req)
+   if (req  !mq-mqrq_prev-req) {
/* claim host only for the first request */
mmc_claim_host(card-host);
+   if (card-ext_csd.bkops_en 
+   card-bkops_info.started_delayed_bkops) {
+   card-bkops_info.started_delayed_bkops = false;
+   mmc_stop_bkops(card);
+   }
+   }
 
ret = mmc_blk_part_switch(card, md);
if (ret) {
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index fadf52e..9d0c96a 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -51,6 +51,7 @@ static int mmc_queue_thread(void *d)
 {
struct mmc_queue *mq = d;
struct request_queue *q = mq-queue;
+   struct mmc_card *card = mq-card;
 
current-flags |= PF_MEMALLOC;
 
@@ -83,6 +84,7 @@ static int mmc_queue_thread(void *d)
set_current_state(TASK_RUNNING);
break;
}
+   mmc_start_delayed_bkops(card);
up(mq-thread_sem);
schedule();
down(mq-thread_sem);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 06c42cf..72ae15b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -253,9 +253,36 @@ mmc_start_request(struct mmc_host *host, struct 
mmc_request *mrq)
 }
 
 /**
+ * mmc_start_delayed_bkops() - Start a delayed work to check for
+ *  the need of non urgent BKOPS
+ *
+ * @card: MMC card to start BKOPS on
+ */
+void mmc_start_delayed_bkops(struct mmc_card *card)
+{
+   if (!card || !card-ext_csd.bkops_en || mmc_card_doing_bkops(card))
+   return

[PATCH v2] mmc: core: Add support for idle time BKOPS

2012-10-04 Thread Maya Erez
Devices have various maintenance operations need to perform internally.
In order to reduce latencies during time critical operations like read
and write, it is better to execute maintenance operations in other
times - when the host is not being serviced. Such operations are called
Background operations (BKOPS).
The device notifies the status of the BKOPS need by updating BKOPS_STATUS
(EXT_CSD byte [246]).

According to the standard a host that supports BKOPS shall check the
status periodically and start background operations as needed, so that
the device has enough time for its maintenance operations.

This patch adds support for this periodic check of the BKOPS status.
Since foreground operations are of higher priority than background
operations the host will check the need for BKOPS when it is idle,
and in case of an incoming request the BKOPS operation will be
interrupted.

When the mmcqd thread is idle, a delayed work is created to check the
need for BKOPS. The time to start the delayed work is calculated based
on the host controller suspend timeout, in case it was set. If not, a
default time is used.
If BKOPS are required in level 1, which is non-blocking, there will be
polling of the card status to wait for the BKOPS completion and prevent
suspend that will interrupt the BKOPS.
If the card raised an exception, the need for urgent BKOPS (level 2/3)
will be checked immediately and if needed, the BKOPS will be performed
without waiting for the next idle time.

Signed-off-by: Maya Erez me...@codeaurora.org
Signed-off-by: Jaehoon Chung jh80.ch...@samsung.com
---
This patch is based on the periodic BKOPS implementation in version 8 of 
support BKOPS feature for eMMC patch.
The patch was modified to answer the following issues:
- In order to prevent a race condition between going into suspend and starting 
BKOPS, 
  the suspend timeout of the host controller is taking into accound in 
determination of the start time 
  of the delayed work
- Since mmc_start_bkops is called from two contexts now, mmc_claim_host was 
moved to the beginning of the function
- Also, the check of doing_bkops should be protected when determing if an HPI 
is needed due to the same reason.
- Starting and canceling the delayed work in each idle caused degradation of 
iozone performance. Therefore,
  the delayed work is not started on each idle. The amount of sectors changed 
(written or discard) from the last 
  delayed work is the trigger for starting the delayed BKOPS work.
---
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 172a768..ed040d5 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -827,6 +827,9 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, 
struct request *req)
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
 
+   if (card-ext_csd.bkops_en)
+   card-bkops_info.sectors_changed += blk_rq_sectors(req);
+
if (mmc_can_discard(card))
arg = MMC_DISCARD_ARG;
else if (mmc_can_trim(card))
@@ -1268,6 +1271,9 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, 
struct request *rqc)
if (!rqc  !mq-mqrq_prev-req)
return 0;
 
+   if (rqc  (card-ext_csd.bkops_en)  (rq_data_dir(rqc) == WRITE))
+   card-bkops_info.sectors_changed += blk_rq_sectors(rqc);
+
do {
if (rqc) {
/*
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index e360a97..e96f5cf 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -51,6 +51,7 @@ static int mmc_queue_thread(void *d)
 {
struct mmc_queue *mq = d;
struct request_queue *q = mq-queue;
+   struct mmc_card *card = mq-card;
 
current-flags |= PF_MEMALLOC;
 
@@ -66,6 +67,17 @@ static int mmc_queue_thread(void *d)
spin_unlock_irq(q-queue_lock);
 
if (req || mq-mqrq_prev-req) {
+   /*
+* If this is the first request, BKOPs might be in
+* progress and needs to be stopped before issuing the
+* request
+*/
+   if (card-ext_csd.bkops_en 
+   card-bkops_info.started_delayed_bkops) {
+   card-bkops_info.started_delayed_bkops = false;
+   mmc_stop_bkops(card);
+   }
+
set_current_state(TASK_RUNNING);
mq-issue_fn(mq, req);
} else {
@@ -73,6 +85,7 @@ static int mmc_queue_thread(void *d)
set_current_state(TASK_RUNNING);
break;
}
+   mmc_start_delayed_bkops(card);
up(mq-thread_sem);
schedule();
down(mq-thread_sem);
diff --git a/drivers/mmc/core

[RFC/PATCH 0/2] Handling urgent and new request notifications

2012-09-20 Thread Maya Erez
From: Konstantin Dorfman kdorf...@codeaurora.org

   In this patch we propose a way to handle urgent requests in the MMC driver.
The decision whether a request is urgent or not is done within the I/O scheduler
at the block layer. In this patch we propose a way for the MMC driver to handle
urgent requests that are triggered while the driver issues packed commands.
That is, we propose a solution to stop an ongoing packed commands transaction in
order to serve an urgent request as well as stopping the packing process due to
an urgent request. This solution may be extended in the future to stop single
request transaction for serving urgent one.

In order to support urgent requests, a callback is added to the queue
structure, q-urgent_request_fn. It is used by the block layer to notify the
device driver that an urgent request just arrived and it is pending to be
fetched. The decision when the urgent request will be fetched is taken by the
device driver. That is, the device driver can decide to stop immediately any
ongoing transaction and fetching the urgent request, but it can also decide to
ignore the urgent request notification and continue the current transaction,
so the urgent request will be fetched like any other normal request when all
prior transactions are finished.

In additional we added a new block layer API, blk_reinsert_request.
If a device driver decides to stop an ongoing transaction in order to fetch and
serve an urgent request, it should keep aside the interrupted request and any
other un-served fetched requests in order to serve them later on.
This situation is problematic in the manner of creating a new scheduling policy
inside the device driver itself. For example, assuming a normal request was
interrupted by an urgent request and saved aside by the device driver for future
execution. Now,every time the device driver tries to execute the normal request,
it keeps getting urgent requests and the normal request is starved.
This situation may happen since the I/O scheduler is not aware of the
interrupted normal request that was saved aside. In order to solve such
scenarios, the device driver may re-insert the interrupted request as well as
any other fetched request that wasn't served yet, back into the I/O scheduler
by calling blk_reinsert_request. Using such a scheme, returns the scheduling
decisions to a single managing center  - the I/O scheduler. The new added RAW
I/O scheduler supports re-insert API.

An urgent request notification from the block layer may arrive to the MMC
driver in one of the following states: when the MMC driver is waiting to the
completion of an ongoing transaction, when the MMC driver prepares the next
request to be sent or when the MMC driver sleeps. If the MMC driver is in the
middle of preparing the next request or if it sleeps then the urgent
notification is not important since the queue_thread will fetch the urgent
request anyhow. However, if the queue_thread is blocked, waiting to the
completion of an ongoing transaction, then the urgent notification should wake
it up in order to stop the current transmission, fetch the urgent request and
re-insert the unfinished blocks of the interrupted request back to the
I/O scheduler. Moreover, if there is also a pending request (as part of the
async request scheme) then this request should also be re-inserted back to
the I/O scheduler.

Even when the queue_thread is woken by the urgent request notification,
it may decide not to stop the current transmission. For example, if the current
transmission is about to finish there is no point in triggering the whole stop
transmission sequence (which is not a zero time process). Nevertheless, the
driver should still re-insert the pending request, if there is any, to the I/O
scheduler.

This patch also resolves an issue in the async request mechanism.
Our tests found out, that in most cases new request is inserted after mmc layer
fetched NULL and was blocked on waiting for the previous request completion.
The new event new_packet_flag wakes mmc layer and allows it to immediately fetch
the new inserted request.

This patch depends on the following patches:
[PATCH v8 1/3] mmc: core: Add packed command feature of eMMC4.5
[PATCH v8 2/3] mmc: core: Support packed write command for eMMC4.5 device
[PATCH v5] mmc: block: Add write packing control
[RFC/PATCH 1/2] block: Add support for reinsert a dispatched req
[RFC/PATCH 2/2] block: Add API for urgent request handling

Please look into ROW I/O scheduler as example of implementing reinsert API:
[RFC/PATCH/RESEND 2/2] block: Adding ROW scheduling algorithm

Konstantin Dorfman (2):
  mmc: Urgent data request flow
  mmc: new packet notification

 drivers/mmc/card/block.c |  182 +++---
 drivers/mmc/card/queue.c |   67 +-
 drivers/mmc/card/queue.h |1 +
 drivers/mmc/core/core.c  |  327 +-
 include/linux/mmc/card.h |   15 ++
 include/linux/mmc/core.h |   22 +++
 

[RFC/PATCH 1/2] mmc: Urgent data request flow

2012-09-20 Thread Maya Erez
From: Konstantin Dorfman kdorf...@codeaurora.org

Urgent request notification stops currently running packed
transaction on bus. Finished part of the request will be
acknowledged to the block layer, remainder will be re-inserted
back to be fetched later, this will minimize urgent request latency.

Signed-off-by: Konstantin Dorfman kdorf...@codeaurora.org
---
 drivers/mmc/card/block.c |  171 +++---
 drivers/mmc/card/queue.c |   40 ++
 drivers/mmc/card/queue.h |1 +
 drivers/mmc/core/core.c  |  306 -
 include/linux/mmc/card.h |   14 ++
 include/linux/mmc/core.h |   21 +++
 include/linux/mmc/host.h |4 +
 include/linux/mmc/mmc.h  |1 +
 8 files changed, 534 insertions(+), 24 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index ea52ac2..e739c2f 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -119,17 +119,6 @@ struct mmc_blk_data {
 
 static DEFINE_MUTEX(open_lock);
 
-enum mmc_blk_status {
-   MMC_BLK_SUCCESS = 0,
-   MMC_BLK_PARTIAL,
-   MMC_BLK_CMD_ERR,
-   MMC_BLK_RETRY,
-   MMC_BLK_ABORT,
-   MMC_BLK_DATA_ERR,
-   MMC_BLK_ECC_ERR,
-   MMC_BLK_NOMEDIUM,
-};
-
 enum {
MMC_PACKED_N_IDX = -1,
MMC_PACKED_N_ZERO,
@@ -1133,6 +1122,113 @@ static int mmc_blk_err_check(struct mmc_card *card,
return MMC_BLK_SUCCESS;
 }
 
+/*
+ * mmc_blk_reinsert_request() - re-insert request back into block layer
+ * @areq:  request to re-insert.
+ *
+ * Request may be packed or single. When fails to reinsert request, -EIO will 
be
+ * reported for this request and rest of packed_list
+ */
+static void mmc_blk_reinsert_request(struct mmc_async_req *areq)
+{
+   struct request *prq;
+   int ret = 0;
+   struct mmc_queue_req *mq_rq;
+   struct request_queue *q;
+
+   mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
+   q = mq_rq-req-q;
+   if (mq_rq-packed_cmd != MMC_PACKED_NONE) {
+   while (!list_empty(mq_rq-packed_list)) {
+   /* return requests in reverse order */
+   prq = list_entry_rq(mq_rq-packed_list.prev);
+   list_del_init(prq-queuelist);
+   ret = blk_reinsert_request(q, prq);
+   if (ret)
+   goto reinsert_error;
+   }
+   } else {
+   ret = blk_reinsert_request(q, mq_rq-req);
+   if (ret) {
+   pr_err(%s: blk_reinsert_request() fails (%d),
+   __func__, ret);
+   blk_end_request(mq_rq-req, -EIO,
+   blk_rq_cur_bytes(mq_rq-req));
+   }
+   }
+   return;
+
+reinsert_error:
+   pr_err(%s: blk_reinsert_request() fails (%d), __func__, ret);
+   while (!list_empty(mq_rq-packed_list)) {
+   prq = list_entry_rq(mq_rq-packed_list.next);
+   list_del_init(prq-queuelist);
+   blk_end_request(prq, -EIO, blk_rq_cur_bytes(prq));
+   }
+}
+
+/*
+ * mmc_update_interrupted_request() - update details of the interrupted request
+ * @card:  the MMC card associated with the request.
+ * @areq:  interrupted async request.
+ *
+ * Get stopped request state from card and updates successfully done part
+ * of the request by setting packed_fail_idx. For not packed request
+ * packed_fail_idx unchanged (-1).
+ *
+ * Returns: 0 for success, MMC_BLK_ABORT otherwise
+ */
+static int mmc_update_interrupted_request(struct mmc_card *card,
+   struct mmc_async_req *areq)
+{
+   int ret = MMC_BLK_SUCCESS;
+   u8 *ext_csd;
+   int correctly_done;
+   struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
+   mmc_active);
+   struct request *prq;
+   u8 req_index = 0;
+
+   ext_csd = kmalloc(512, GFP_KERNEL);
+   if (!ext_csd) {
+   ret = MMC_BLK_ABORT;
+   goto exit;
+   }
+
+   mq_rq-packed_fail_idx = 0;
+
+   if (mq_rq-packed_cmd != MMC_PACKED_NONE) {
+   /* get correctly programmed sectors number from card */
+   ret = mmc_send_ext_csd(card, ext_csd);
+   if (ret) {
+   pr_err(%s: error %d sending ext_csd\n,
+   mq_rq-req-rq_disk-disk_name, ret);
+   ret = MMC_BLK_ABORT;
+   goto exit;
+   }
+   correctly_done = card-ext_csd.data_sector_size *
+   (ext_csd[EXT_CSD_CORRECTLY_PRG_SECTORS_NUM + 0]  0 |
+ext_csd[EXT_CSD_CORRECTLY_PRG_SECTORS_NUM + 1]  8 |
+ext_csd[EXT_CSD_CORRECTLY_PRG_SECTORS_NUM + 2]  16 |
+ext_csd[EXT_CSD_CORRECTLY_PRG_SECTORS_NUM + 3]  24);
+
+   

[RFC/PATCH 2/2] mmc: new packet notification

2012-09-20 Thread Maya Erez
From: Konstantin Dorfman kdorf...@codeaurora.org

Unblock waiting for current request running on the bus,
when new packet supplied by block layer. This improves async request
mechanism.
In most cases new request is inserted after mmc layer fetched NULL
and was blocked on waiting for the previous request completion.
The new event new_packet_flag wakes mmc layer and allow it to
immediately fetch the new inserted request.

Signed-off-by: Konstantin Dorfman kdorf...@codeaurora.org
---
 drivers/mmc/card/block.c |   11 +--
 drivers/mmc/card/queue.c |   29 +
 drivers/mmc/core/core.c  |   21 +
 include/linux/mmc/card.h |1 +
 include/linux/mmc/core.h |1 +
 5 files changed, 57 insertions(+), 6 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index e739c2f..8b996c3 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1820,8 +1820,11 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, 
struct request *rqc)
} else
areq = NULL;
areq = mmc_start_data_req(card-host, areq, (int *) status);
-   if (!areq)
+   if (!areq) {
+   if (status == MMC_BLK_NEW_PACKET)
+   return status;
return 0;
+   }
 
mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
brq = mq_rq-brq;
@@ -1830,6 +1833,9 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, 
struct request *rqc)
mmc_queue_bounce_post(mq_rq);
 
switch (status) {
+   case MMC_BLK_NEW_PACKET:
+   BUG_ON(1); /* should never get here */
+   return MMC_BLK_NEW_PACKET;
case MMC_BLK_URGENT:
if (mq_rq-packed_cmd != MMC_PACKED_NONE) {
/* complete successfully transmitted part */
@@ -2012,9 +2018,10 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct 
request *req)
}
 
 out:
-   if (!req)
+   if (!req  (ret != MMC_BLK_NEW_PACKET))
/* release host only when there are no more requests */
mmc_release_host(card-host);
+
return ret;
 }
 
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index b56ff33..0b9bac3 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -74,7 +74,9 @@ static int mmc_queue_thread(void *d)
 
if (req || mq-mqrq_prev-req) {
set_current_state(TASK_RUNNING);
-   mq-issue_fn(mq, req);
+   if (mq-issue_fn(mq, req) == MMC_BLK_NEW_PACKET) {
+   continue; /* fetch again */
+   }
} else {
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
@@ -105,6 +107,7 @@ static int mmc_queue_thread(void *d)
  */
 static void mmc_request_fn(struct request_queue *q)
 {
+   unsigned long flags;
struct mmc_queue *mq = q-queuedata;
struct request *req;
 
@@ -115,9 +118,26 @@ static void mmc_request_fn(struct request_queue *q)
}
return;
}
-
-   if (!mq-mqrq_cur-req  !mq-mqrq_prev-req)
-   wake_up_process(mq-thread);
+   if (!mq-mqrq_cur-req  mq-mqrq_prev-req) {
+   /* new packet arrived, while mmc context waiting with no
+* async packet
+*/
+   mq-sync_data.skip_urgent_flag = false;
+   /* critical section with mmc_wait_data_req_done() */
+   spin_lock_irqsave(mq-sync_data.lock, flags);
+   /* do stop flow only when mmc thread is waiting for done */
+   if (mq-sync_data.waiting_flag 
+   !mq-sync_data.new_packet_flag 
+   !mq-sync_data.skip_urgent_flag) {
+
+   mq-sync_data.new_packet_flag = true;
+   wake_up_interruptible(mq-sync_data.wait);
+   }
+   spin_unlock_irqrestore(mq-sync_data.lock, flags);
+   } else {
+   if (!mq-mqrq_cur-req  !mq-mqrq_prev-req)
+   wake_up_process(mq-thread);
+   }
 }
 
 /*
@@ -307,6 +327,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card 
*card,
spin_lock_init(mq-sync_data.lock);
mq-sync_data.skip_urgent_flag = false;
mq-sync_data.urgent_flag = false;
+   mq-sync_data.new_packet_flag = false;
mq-sync_data.done_flag = false;
mq-sync_data.waiting_flag = false;
init_waitqueue_head(mq-sync_data.wait);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 623f60b..5309990 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -414,18 +414,23 @@ static int 

[PATCH v1] mmc: core: Add support for idle time BKOPs

2012-09-19 Thread Maya Erez
When the mmcqd thread is idle, a delayed work is created to check the
need for BKOPs. The time to start the delayed work is calculated based
on the host controller suspend timeout, in case it was set. If not, a
default time is used.
If BKOPs is required in level 1, which is non-blocking, there will be
polling of the card status to wait for the BKOPs completion and prevent
suspend that will interrupt the BKOPs.
If the card raised an exception, the need for urgent BKOPs (level 2/3)
will be checked immediately and if needed, the BKOPs will be performed
without waiting for the next idle time.

Signed-off-by: Maya Erez me...@codeaurora.org
Signed-off-by: Jaehoon Chung jh80.ch...@samsung.com
---
This patch is based on the periodic BKOPs implementation in version 8 of 
support BKOPS feature for eMMC patch.
The patch was modified to answer the following issues:
- In order to prevent a race condition between going into suspend and starting 
BKOPs, 
  the suspend timeout of the host controller is taking into accound in 
determination of the start time 
  of the delayed work
- Since mmc_start_bkops is called from two contexts now, mmc_claim_host was 
moved to the beginning of the function
- Also, the check of doing_bkops should be protected when determing if an HPI 
is needed due to the same reason.
- Starting and canceling the delayed work in each idle caused degradation of 
iozone performance. Therefore,
  the delayed work is not started on each idle. The amount of write bytes sent 
from the last delayed work 
  is the trigger for starting the delayed BKOPs work.
- To prevent degaradtion of iozone performance we also moved the call to 
mmc_claim_host outside of mmc_stop_bkops  
  and its release is done after issue_fn. This prevents an addition of a full 
claim and release, that is also done 
  in issue_fn for the first request after idle time.
---
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 172a768..f64e588 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1268,6 +1268,10 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, 
struct request *rqc)
if (!rqc  !mq-mqrq_prev-req)
return 0;
 
+   if (rqc  (rq_data_dir(rqc) == WRITE))
+   card-host-bkops_info.wr_bytes_since_last_bkops +=
+   blk_rq_bytes(rqc);
+
do {
if (rqc) {
/*
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index e360a97..65c198a 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -51,6 +51,8 @@ static int mmc_queue_thread(void *d)
 {
struct mmc_queue *mq = d;
struct request_queue *q = mq-queue;
+   bool release_host = false;
+   struct mmc_host *host = mq-card-host;
 
current-flags |= PF_MEMALLOC;
 
@@ -66,13 +68,31 @@ static int mmc_queue_thread(void *d)
spin_unlock_irq(q-queue_lock);
 
if (req || mq-mqrq_prev-req) {
+   /*
+* If this is the first request, BKOPs might be in
+* progress and needs to be stopped before issuing the
+* request
+* */
+   if (mq-card-ext_csd.bkops_en 
+   host-bkops_info.started_delayed_bkops) {
+   host-bkops_info.started_delayed_bkops = false;
+   release_host = true;
+   mmc_claim_host(mq-card-host);
+   mmc_stop_bkops(mq-card);
+   }
+
set_current_state(TASK_RUNNING);
mq-issue_fn(mq, req);
+   if (release_host) {
+   release_host = false;
+   mmc_release_host(mq-card-host);
+   }
} else {
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
break;
}
+   mmc_start_delayed_bkops(mq-card);
up(mq-thread_sem);
schedule();
down(mq-thread_sem);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 044cd01..3c72bc2 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -48,6 +48,18 @@
  */
 #define MMC_BKOPS_MAX_TIMEOUT  (4 * 60 * 1000) /* max time to wait in ms */
 
+/* Polling timeout and interval for waiting on non-blocking BKOPs completion */
+#define BKOPS_COMPLETION_POLLING_TIMEOUT 1 /* in ms */
+#define BKOPS_COMPLETION_POLLING_INTERVAL 1000 /* in ms */
+/*
+ * Since canceling the delayed work might have significant effect on the
+ * performance of small requests we won't queue the delayed work every time
+ * mmcqd thread is idle.
+ * The delayed work for idle BKOPs

[RFC/PATCH] mmc: core: Add support for idle time BKOPs

2012-08-05 Thread Maya Erez
When the mmcqd thread is idle, a delayed work is created to check the
need for BKOPs. The time to start the delayed work is calculated based
on the host controller suspend timeout, in case it was set. If not, a
default time is used.
If BKOPs is required in level 1, which is non-blocking, there will be
polling of the card status to wait for the BKOPs completion and prevent
suspend that will interrupt the BKOPs.
If the card raised an exception, the need for urgent BKOPs (level 2/3)
will be checked immediately and if needed, the BKOPs will be performed
without waiting for the next idle time.

Signed-off-by: Maya Erez me...@codeaurora.org
Signed-off-by: Jaehoon Chung jh80.ch...@samsung.com
---
This patch depends on the following patch:
  [PATCH v11] mmc: support BKOPS feature for eMMC

This patch is based on the periodic BKOPs implementation in version 8 of 
support BKOPS feature for eMMC patch.
The patch was modified to answer the following issues:
- In order to prevent a race condition between going into suspend and starting 
BKOPs, 
  the suspend timeout of the host controller is taking into accound in 
determination of the start time 
  of the delayed work
- Since mmc_start_bkops is called from two contexts now, mmc_claim_host was 
moved to the beginning of the function
- Also, the check of doing_bkops should be protected when determing if an HPI 
is needed due to the same reason.
- Starting and canceling the delayed work in each idle caused degradation of 
iozone performance. Therefore,
  the delayed work is not started on each idle. Currently the number of issued 
requests from the last delayed work 
  is the trigger. We still investigate the best trigger for starting the 
delayed work.
- To prevent degaradtion of iozone performance we also moved the call to 
mmc_claim_host outside of mmc_stop_bkops  
  and its release is done after issue_fn. This prevents an addition of a full 
claim and release, that is also done 
  in issue_fn for the first request after idle time.
---
 drivers/mmc/card/block.c |3 +
 drivers/mmc/card/queue.c |   20 +
 drivers/mmc/core/core.c  |  188 +++---
 drivers/mmc/core/host.c  |   24 ++
 include/linux/mmc/card.h |3 +
 include/linux/mmc/core.h |3 +
 include/linux/mmc/host.h |   25 ++
 7 files changed, 256 insertions(+), 10 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f1c84de..4519271 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1268,6 +1268,9 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, 
struct request *rqc)
if (!rqc  !mq-mqrq_prev-req)
return 0;
 
+   if (rqc)
+   card-idle_bkops_rw_reqs_nr++;
+
do {
if (rqc) {
/*
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index e360a97..c9e1cee 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -51,6 +51,7 @@ static int mmc_queue_thread(void *d)
 {
struct mmc_queue *mq = d;
struct request_queue *q = mq-queue;
+   bool release_host = false;
 
current-flags |= PF_MEMALLOC;
 
@@ -66,13 +67,32 @@ static int mmc_queue_thread(void *d)
spin_unlock_irq(q-queue_lock);
 
if (req || mq-mqrq_prev-req) {
+   /*
+* If this is the first request, BKOPs might be in
+* progress and needs to be stopped before issuing the
+* request
+* */
+   if (!mq-mqrq_prev-req 
+   mq-card-ext_csd.bkops_en 
+   mq-card-idle_bkops_rw_reqs_nr == 0) {
+   release_host = true;
+   mmc_claim_host(mq-card-host);
+   mmc_stop_bkops(mq-card);
+   }
+
set_current_state(TASK_RUNNING);
mq-issue_fn(mq, req);
+   if (release_host) {
+   release_host = false;
+   mmc_release_host(mq-card-host);
+   }
} else {
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
break;
}
+
+   mmc_start_delayed_bkops(mq-card);
up(mq-thread_sem);
schedule();
down(mq-thread_sem);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index ed2cc93..14830d4 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -46,6 +46,15 @@
  * operations the card has to peform
  */
 #define MMC_BKOPS_MAX_TIMEOUT  (4 * 60 * 1000) /* max time to wait in ms */
+/* Polling timeout and interval for waiting on non-blocking BKOPs

[PATCH v5 0/3] mmc: card: Add eMMC4.5 write packed commands unit-tests

2012-07-31 Thread Maya Erez
Expose the following packed commands tests:
- Test the write packed commands list preparation
- Simulate a returned error code
- Send an invalid packed command to the card

In order to test the eMMC4.5 features, the test-iosched is used.
The test scheduler allows testing a block device by dispatching
specific requests according to the test case and declare PASS/FAIL
according to the requests completion error code.
The write packed commands statistics are used to determine the success or 
failure of 
the packed commands tests, in addition to the generic request completion 
checking.

This patch is dependant in the following patches:
  [PATCH v8 1/3] mmc: core: Add packed command feature of eMMC4.5
  [PATCH v8 2/3] mmc: core: Support packed write command for eMMC4.5 device

Changes in v5:
- Code review fixes

Changes in v4:
- Add large sector size alignment to the packed commands stop reason 
statistics
- Fix error handling in test-iosched
- Code review fixes

Changes in v3:
- Fix the cancel round mechanism

Changes in v2:
- Add MMC write packing statistics and test-iosched scheduler patches as 
part of this patch.
- Code review fixes

Maya Erez (3):
  block: Add test-iosched scheduler
  mmc: block: Add MMC write packing statistics
  mmc: card: Add eMMC4.5 write packed commands unit-tests

 Documentation/block/test-iosched.txt |   39 +
 block/Kconfig.iosched|   11 +
 block/Makefile   |1 +
 block/blk-core.c |3 +-
 block/test-iosched.c | 1038 +++
 drivers/mmc/card/Kconfig |   11 +
 drivers/mmc/card/Makefile|1 +
 drivers/mmc/card/block.c |  139 +++-
 drivers/mmc/card/mmc_block_test.c| 1522 ++
 drivers/mmc/card/queue.h |   15 +
 drivers/mmc/core/bus.c   |4 +
 drivers/mmc/core/debugfs.c   |  176 
 drivers/mmc/core/mmc.c   |   18 +
 include/linux/mmc/card.h |   25 +
 include/linux/test-iosched.h |  233 ++
 15 files changed, 3221 insertions(+), 15 deletions(-)
 create mode 100644 Documentation/block/test-iosched.txt
 create mode 100644 block/test-iosched.c
 create mode 100644 drivers/mmc/card/mmc_block_test.c
 create mode 100644 include/linux/test-iosched.h

-- 
1.7.3.3
-- 
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v5 1/3] block: Add test-iosched scheduler

2012-07-31 Thread Maya Erez
The test scheduler allows testing a block device by dispatching
specific requests according to the test case and declare PASS/FAIL
according to the requests completion error code

Signed-off-by: Maya Erez me...@codeaurora.org
---
 Documentation/block/test-iosched.txt |   39 ++
 block/Kconfig.iosched|   11 +
 block/Makefile   |1 +
 block/blk-core.c |3 +-
 block/test-iosched.c | 1038 ++
 include/linux/test-iosched.h |  233 
 6 files changed, 1323 insertions(+), 2 deletions(-)
 create mode 100644 Documentation/block/test-iosched.txt
 create mode 100644 block/test-iosched.c
 create mode 100644 include/linux/test-iosched.h

diff --git a/Documentation/block/test-iosched.txt 
b/Documentation/block/test-iosched.txt
new file mode 100644
index 000..75d8134
--- /dev/null
+++ b/Documentation/block/test-iosched.txt
@@ -0,0 +1,39 @@
+Test IO scheduler
+==
+
+The test scheduler allows testing a block device by dispatching
+specific requests according to the test case and declare PASS/FAIL
+according to the requests completion error code.
+
+The test IO scheduler implements the no-op scheduler operations, and uses
+them in order to dispatch the non-test requests when no test is running.
+This will allow to keep a normal FS operation in parallel to the test
+capability.
+The test IO scheduler keeps two different queues, one for real-world requests
+(inserted by the FS) and the other for the test requests.
+The test IO scheduler chooses the queue for dispatch requests according to the
+test state (IDLE/RUNNING).
+
+the test IO scheduler is compiled by default as a dynamic module and enabled
+only if CONFIG_DEBUG_FS is defined.
+
+Each block device test utility that would like to use the test-iosched test
+services, should register as a blk_dev_test_type and supply an init and exit
+callbacks. Those callback are called upon selection (or removal) of the
+test-iosched as the active scheduler. From that point the block device test
+can start a test and supply its own callbacks for preparing, running, result
+checking and cleanup of the test.
+
+Each test is exposed via debugfs and can be triggered by writing to
+the debugfs file. In order to add a new test one should expose a new debugfs
+file for the new test.
+
+Selecting IO schedulers
+---
+Refer to Documentation/block/switching-sched.txt for information on
+selecting an io scheduler on a per-device basis.
+
+
+May 10 2012, maya Erez me...@codeaurora.org
+
+
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 421bef9..af3d6a3 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -12,6 +12,17 @@ config IOSCHED_NOOP
  that do their own scheduling and require only minimal assistance from
  the kernel.
 
+config IOSCHED_TEST
+   tristate Test I/O scheduler
+   depends on DEBUG_FS
+   default m
+   ---help---
+ The test I/O scheduler is a duplicate of the noop scheduler with
+ addition of test utlity.
+ It allows testing a block device by dispatching specific requests
+ according to the test case and declare PASS/FAIL according to the
+ requests completion error code.
+
 config IOSCHED_DEADLINE
tristate Deadline I/O scheduler
default y
diff --git a/block/Makefile b/block/Makefile
index 39b76ba..436b220 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING)  += blk-throttle.o
 obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
 obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
 obj-$(CONFIG_IOSCHED_CFQ)  += cfq-iosched.o
+obj-$(CONFIG_IOSCHED_TEST) += test-iosched.o
 
 obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY)+= blk-integrity.o
diff --git a/block/blk-core.c b/block/blk-core.c
index c3b17c3..6fe111e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1085,8 +1085,6 @@ struct request *blk_get_request(struct request_queue *q, 
int rw, gfp_t gfp_mask)
 {
struct request *rq;
 
-   BUG_ON(rw != READ  rw != WRITE);
-
spin_lock_irq(q-queue_lock);
if (gfp_mask  __GFP_WAIT)
rq = get_request_wait(q, rw, NULL);
@@ -1419,6 +1417,7 @@ void init_request_from_bio(struct request *req, struct 
bio *bio)
req-ioprio = bio_prio(bio);
blk_rq_bio_prep(req-q, req, bio);
 }
+EXPORT_SYMBOL(init_request_from_bio);
 
 void blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
diff --git a/block/test-iosched.c b/block/test-iosched.c
new file mode 100644
index 000..d3d10d3
--- /dev/null
+++ b/block/test-iosched.c
@@ -0,0 +1,1038 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version

[PATCH v5 2/3] mmc: block: Add MMC write packing statistics

2012-07-31 Thread Maya Erez
The write packing statistics are used for the packed commands unit tests
in order to determine test success or failure

Signed-off-by: Maya Erez me...@codeaurora.org
---
 drivers/mmc/card/block.c   |   57 ++-
 drivers/mmc/core/bus.c |4 +
 drivers/mmc/core/debugfs.c |  176 
 drivers/mmc/core/mmc.c |   18 +
 include/linux/mmc/card.h   |   25 ++
 5 files changed, 279 insertions(+), 1 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c965f2b..92a6e25 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -63,6 +63,11 @@ MODULE_ALIAS(mmc:block);
(rq_data_dir(req) == WRITE))
 #define PACKED_CMD_VER 0x01
 #define PACKED_CMD_WR  0x02
+#define MMC_BLK_UPDATE_STOP_REASON(stats, reason)  \
+   do {\
+   if (stats-enabled) \
+   stats-pack_stop_reason[reason]++;  \
+   } while (0)
 
 static DEFINE_MUTEX(block_mutex);
 
@@ -1296,6 +1301,35 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req 
*mqrq,
mmc_queue_bounce_pre(mqrq);
 }
 
+struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
+{
+   if (!card)
+   return NULL;
+
+   return card-wr_pack_stats;
+}
+EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
+
+void mmc_blk_init_packed_statistics(struct mmc_card *card)
+{
+   int max_num_of_packed_reqs = 0;
+
+   if (!card || !card-wr_pack_stats.packing_events)
+   return;
+
+   max_num_of_packed_reqs = card-ext_csd.max_packed_writes;
+
+   spin_lock(card-wr_pack_stats.lock);
+   memset(card-wr_pack_stats.packing_events, 0,
+   (max_num_of_packed_reqs + 1) *
+  sizeof(*card-wr_pack_stats.packing_events));
+   memset(card-wr_pack_stats.pack_stop_reason, 0,
+   sizeof(card-wr_pack_stats.pack_stop_reason));
+   card-wr_pack_stats.enabled = true;
+   spin_unlock(card-wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
+
 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
 {
struct request_queue *q = mq-queue;
@@ -1308,6 +1342,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, 
struct request *req)
u8 put_back = 0;
u8 max_packed_rw = 0;
u8 reqs = 0;
+   struct mmc_wr_pack_stats *stats = card-wr_pack_stats;
 
mmc_blk_clear_packed(mq-mqrq_cur);
 
@@ -1345,26 +1380,33 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue 
*mq, struct request *req)
phys_segments++;
}
 
+   spin_lock(stats-lock);
+
while (reqs  max_packed_rw - 1) {
spin_lock_irq(q-queue_lock);
next = blk_fetch_request(q);
spin_unlock_irq(q-queue_lock);
-   if (!next)
+   if (!next) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
break;
+   }
 
if (mmc_large_sec(card) 
!IS_ALIGNED(blk_rq_sectors(next), 8)) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, LARGE_SEC_ALIGN);
put_back = 1;
break;
}
 
if (next-cmd_flags  REQ_DISCARD ||
next-cmd_flags  REQ_FLUSH) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
put_back = 1;
break;
}
 
if (rq_data_dir(cur) != rq_data_dir(next)) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
put_back = 1;
break;
}
@@ -1372,18 +1414,22 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue 
*mq, struct request *req)
if (mmc_req_rel_wr(next) 
(md-flags  MMC_BLK_REL_WR) 
!en_rel_wr) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
put_back = 1;
break;
}
 
req_sectors += blk_rq_sectors(next);
if (req_sectors  max_blk_count) {
+   if (stats-enabled)
+   stats-pack_stop_reason[EXCEEDS_SECTORS]++;
put_back = 1;
break;
}
 
phys_segments +=  next-nr_phys_segments;
if (phys_segments  max_phys_segs) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
put_back = 1;
break;
}
@@ -1399,6 +1445,15 @@ static u8 mmc_blk_prep_packed_list

[PATCH v5 3/3] mmc: card: Add eMMC4.5 write packed commands unit-tests

2012-07-31 Thread Maya Erez
Expose the following packed commands tests:
- Test the write packed commands list preparation
- Simulate a returned error code
- Send an invalid packed command to the card

Signed-off-by: Lee Susman lsus...@codeaurora.org
Signed-off-by: Maya Erez me...@codeaurora.org
---
 drivers/mmc/card/Kconfig  |   11 +
 drivers/mmc/card/Makefile |1 +
 drivers/mmc/card/block.c  |   82 ++-
 drivers/mmc/card/mmc_block_test.c | 1522 +
 drivers/mmc/card/queue.h  |   15 +
 5 files changed, 1619 insertions(+), 12 deletions(-)
 create mode 100644 drivers/mmc/card/mmc_block_test.c

diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3b1f783..bbe8cac 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -67,3 +67,14 @@ config MMC_TEST
 
  This driver is only of interest to those developing or
  testing a host driver. Most people should say N here.
+
+config MMC_BLOCK_TEST
+   tristate MMC block test
+   depends on MMC_BLOCK  IOSCHED_TEST
+   default m
+   help
+ MMC block test can be used with test iosched to test the MMC block
+ device.
+ Currently used to test eMMC 4.5 features (packed commands, sanitize,
+ BKOPs).
+
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
index c73b406..d55107f 100644
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_MMC_TEST)  += mmc_test.o
 
 obj-$(CONFIG_SDIO_UART)+= sdio_uart.o
 
+obj-$(CONFIG_MMC_BLOCK_TEST)   += mmc_block_test.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 92a6e25..e4239cc 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -123,17 +123,6 @@ struct mmc_blk_data {
 
 static DEFINE_MUTEX(open_lock);
 
-enum mmc_blk_status {
-   MMC_BLK_SUCCESS = 0,
-   MMC_BLK_PARTIAL,
-   MMC_BLK_CMD_ERR,
-   MMC_BLK_RETRY,
-   MMC_BLK_ABORT,
-   MMC_BLK_DATA_ERR,
-   MMC_BLK_ECC_ERR,
-   MMC_BLK_NOMEDIUM,
-};
-
 enum {
MMC_PACKED_N_IDX = -1,
MMC_PACKED_N_ZERO,
@@ -1330,6 +1319,64 @@ void mmc_blk_init_packed_statistics(struct mmc_card 
*card)
 }
 EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
 
+void print_mmc_packing_stats(struct mmc_card *card)
+{
+   int i;
+   int max_num_of_packed_reqs = 0;
+
+   if ((!card) || (!card-wr_pack_stats.packing_events))
+   return;
+
+   max_num_of_packed_reqs = card-ext_csd.max_packed_writes;
+
+   spin_lock(card-wr_pack_stats.lock);
+
+   pr_info(%s: write packing statistics:\n,
+   mmc_hostname(card-host));
+
+   for (i = 1 ; i = max_num_of_packed_reqs ; ++i) {
+   if (card-wr_pack_stats.packing_events[i] != 0)
+   pr_info(%s: Packed %d reqs - %d times\n,
+   mmc_hostname(card-host), i,
+   card-wr_pack_stats.packing_events[i]);
+   }
+
+   pr_info(%s: stopped packing due to the following reasons:\n,
+   mmc_hostname(card-host));
+
+   if (card-wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
+   pr_info(%s: %d times: exceedmax num of segments\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
+   if (card-wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
+   pr_info(%s: %d times: exceeding the max num of sectors\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
+   if (card-wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
+   pr_info(%s: %d times: wrong data direction\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
+   if (card-wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
+   pr_info(%s: %d times: flush or discard\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
+   if (card-wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
+   pr_info(%s: %d times: empty queue\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
+   if (card-wr_pack_stats.pack_stop_reason[REL_WRITE])
+   pr_info(%s: %d times: rel write\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[REL_WRITE]);
+   if (card-wr_pack_stats.pack_stop_reason[THRESHOLD])
+   pr_info(%s: %d times: Threshold\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[THRESHOLD]);
+
+   spin_unlock(card-wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(print_mmc_packing_stats);
+
 static u8

[PATCH v5] mmc: block: Add write packing control

2012-07-15 Thread Maya Erez
The write packing control will ensure that read requests latency is
not increased due to long write packed commands.

The trigger for enabling the write packing is managing to pack several
write requests. The number of potential packed requests that will trigger
the packing can be configured via sysfs by writing the required value to:
/sys/block/block_dev_name/num_wr_reqs_to_start_packing.
The trigger for disabling the write packing is fetching a read request.

Signed-off-by: Maya Erez me...@codeaurora.org
---
Our experiments showed that the write packing causes degradation of the read
throughput, in parallel read and write operations.
Since the read latency is critical for user experience we added a write packing 
control
mechanism that disables the write packing in case of read requests.
This will ensure that read requests latency is not increased due to long write 
packed commands.

The trigger for enabling the write packing is managing to pack several write 
requests.
The number of potential packed requests that will trigger the packing can be 
configured via sysfs.
The trigger for disabling the write packing is a fetch of a read request.

Changes in v5:
- Revert v4 changes
- fix the device attribute removal in case of failure of device_create_file

Changes in v4:
- Move MMC specific attributes to mmc sub-directory

Changes in v3:
- Fix the settings of num_of_potential_packed_wr_reqs

Changes in v2:
- Move the attribute for setting the packing enabling trigger to the block 
device
- Add documentation of the new attribute
---
 Documentation/mmc/mmc-dev-attrs.txt |   17 ++
 drivers/mmc/card/block.c|  104 +++
 drivers/mmc/card/queue.c|8 +++
 drivers/mmc/card/queue.h|3 +
 include/linux/mmc/host.h|1 +
 5 files changed, 133 insertions(+), 0 deletions(-)

diff --git a/Documentation/mmc/mmc-dev-attrs.txt 
b/Documentation/mmc/mmc-dev-attrs.txt
index 22ae844..08f7312 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -8,6 +8,23 @@ The following attributes are read/write.
 
force_roEnforce read-only access even if write protect 
switch is off.
 
+   num_wr_reqs_to_start_packingThis attribute is used to determine
+   the trigger for activating the write packing, in case the write
+   packing control feature is enabled.
+
+   When the MMC manages to reach a point where num_wr_reqs_to_start_packing
+   write requests could be packed, it enables the write packing feature.
+   This allows us to start the write packing only when it is beneficial
+   and has minimum affect on the read latency.
+
+   The number of potential packed requests that will trigger the packing
+   can be configured via sysfs by writing the required value to:
+   /sys/block/block_dev_name/num_wr_reqs_to_start_packing.
+
+   The default value of num_wr_reqs_to_start_packing was determined by
+   running parallel lmdd write and lmdd read operations and calculating
+   the max number of packed writes requests.
+
 SD and MMC Device Attributes
 
 
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 4ba0f09..73e26f3 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -114,6 +114,7 @@ struct mmc_blk_data {
struct device_attribute force_ro;
struct device_attribute power_ro_lock;
int area_type;
+   struct device_attribute num_wr_reqs_to_start_packing;
 };
 
 static DEFINE_MUTEX(open_lock);
@@ -281,6 +282,38 @@ out:
return ret;
 }
 
+static ssize_t
+num_wr_reqs_to_start_packing_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+   struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+   int num_wr_reqs_to_start_packing;
+   int ret;
+
+   num_wr_reqs_to_start_packing = md-queue.num_wr_reqs_to_start_packing;
+
+   ret = snprintf(buf, PAGE_SIZE, %d\n, num_wr_reqs_to_start_packing);
+
+   mmc_blk_put(md);
+   return ret;
+}
+
+static ssize_t
+num_wr_reqs_to_start_packing_store(struct device *dev,
+struct device_attribute *attr,
+const char *buf, size_t count)
+{
+   int value;
+   struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+
+   sscanf(buf, %d, value);
+   if (value = 0)
+   md-queue.num_wr_reqs_to_start_packing = value;
+
+   mmc_blk_put(md);
+   return count;
+}
+
 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
 {
struct mmc_blk_data *md = mmc_blk_get(bdev-bd_disk);
@@ -1296,6 +1329,49 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req 
*mqrq,
mmc_queue_bounce_pre(mqrq);
 }
 
+static void mmc_blk_write_packing_control(struct mmc_queue *mq

[PATCH v4 0/3] mmc: card: Add eMMC4.5 write packed commands unit-tests

2012-07-15 Thread Maya Erez
Expose the following packed commands tests:
- Test the write packed commands list preparation
- Simulate a returned error code
- Send an invalid packed command to the card

In order to test the eMMC4.5 features, the test-iosched is used.
The test scheduler allows testing a block device by dispatching
specific requests according to the test case and declare PASS/FAIL
according to the requests completion error code.
The write packed commands statistics are used to determine the success or 
failure of 
the packed commands tests, in addition to the generic request completion 
checking.


Changes in v4:
- Add large sector size alignment to the packed commands stop reason 
statistics
- Fix error handling in test-iosched
- Code review fixes

Changes in v3:
- Fix the cancel round mechanism

Changes in v2:
- Add MMC write packing statistics and test-iosched scheduler patches as 
part of this patch.
- Code review fixes


Maya Erez (3):
  mmc: block: Add MMC write packing statistics
  block: Add test-iosched scheduler
  mmc: card: Add eMMC4.5 write packed commands unit-tests

 Documentation/block/test-iosched.txt |   39 +
 block/Kconfig.iosched|   11 +
 block/Makefile   |1 +
 block/blk-core.c |3 +-
 block/test-iosched.c | 1036 +++
 drivers/mmc/card/Kconfig |   11 +
 drivers/mmc/card/Makefile|1 +
 drivers/mmc/card/block.c |  139 +++-
 drivers/mmc/card/mmc_block_test.c| 1522 ++
 drivers/mmc/card/queue.h |   15 +
 drivers/mmc/core/bus.c   |4 +
 drivers/mmc/core/debugfs.c   |  176 
 drivers/mmc/core/mmc.c   |   18 +
 include/linux/mmc/card.h |   25 +
 include/linux/test-iosched.h |  233 ++
 15 files changed, 3219 insertions(+), 15 deletions(-)
 create mode 100644 Documentation/block/test-iosched.txt
 create mode 100644 block/test-iosched.c
 create mode 100644 drivers/mmc/card/mmc_block_test.c
 create mode 100644 include/linux/test-iosched.h

-- 
1.7.3.3
-- 
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 1/3] mmc: block: Add MMC write packing statistics

2012-07-15 Thread Maya Erez
The write packing statistics are used for the packed commands unit tests
in order to determine test success or failure

Signed-off-by: Maya Erez me...@codeaurora.org
---
 drivers/mmc/card/block.c   |   57 ++-
 drivers/mmc/core/bus.c |4 +
 drivers/mmc/core/debugfs.c |  176 
 drivers/mmc/core/mmc.c |   18 +
 include/linux/mmc/card.h   |   25 ++
 5 files changed, 279 insertions(+), 1 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 4ba0f09..9754460 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -63,6 +63,11 @@ MODULE_ALIAS(mmc:block);
(rq_data_dir(req) == WRITE))
 #define PACKED_CMD_VER 0x01
 #define PACKED_CMD_WR  0x02
+#define MMC_BLK_UPDATE_STOP_REASON(stats, reason)  \
+   do {\
+   if (stats-enabled) \
+   stats-pack_stop_reason[reason]++;  \
+   } while (0)
 
 static DEFINE_MUTEX(block_mutex);
 
@@ -1296,6 +1301,35 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req 
*mqrq,
mmc_queue_bounce_pre(mqrq);
 }
 
+struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
+{
+   if (!card)
+   return NULL;
+
+   return card-wr_pack_stats;
+}
+EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
+
+void mmc_blk_init_packed_statistics(struct mmc_card *card)
+{
+   int max_num_of_packed_reqs = 0;
+
+   if (!card || !card-wr_pack_stats.packing_events)
+   return;
+
+   max_num_of_packed_reqs = card-ext_csd.max_packed_writes;
+
+   spin_lock(card-wr_pack_stats.lock);
+   memset(card-wr_pack_stats.packing_events, 0,
+   (max_num_of_packed_reqs + 1) *
+  sizeof(*card-wr_pack_stats.packing_events));
+   memset(card-wr_pack_stats.pack_stop_reason, 0,
+   sizeof(card-wr_pack_stats.pack_stop_reason));
+   card-wr_pack_stats.enabled = true;
+   spin_unlock(card-wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
+
 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
 {
struct request_queue *q = mq-queue;
@@ -1308,6 +1342,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, 
struct request *req)
u8 put_back = 0;
u8 max_packed_rw = 0;
u8 reqs = 0;
+   struct mmc_wr_pack_stats *stats = card-wr_pack_stats;
 
mmc_blk_clear_packed(mq-mqrq_cur);
 
@@ -1341,26 +1376,33 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue 
*mq, struct request *req)
phys_segments++;
}
 
+   spin_lock(stats-lock);
+
while (reqs  max_packed_rw - 1) {
spin_lock_irq(q-queue_lock);
next = blk_fetch_request(q);
spin_unlock_irq(q-queue_lock);
-   if (!next)
+   if (!next) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
break;
+   }
 
if (mmc_large_sec(card) 
!IS_ALIGNED(blk_rq_sectors(next), 8)) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, LARGE_SEC_ALIGN);
put_back = 1;
break;
}
 
if (next-cmd_flags  REQ_DISCARD ||
next-cmd_flags  REQ_FLUSH) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
put_back = 1;
break;
}
 
if (rq_data_dir(cur) != rq_data_dir(next)) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
put_back = 1;
break;
}
@@ -1368,18 +1410,22 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue 
*mq, struct request *req)
if (mmc_req_rel_wr(next) 
(md-flags  MMC_BLK_REL_WR) 
!en_rel_wr) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
put_back = 1;
break;
}
 
req_sectors += blk_rq_sectors(next);
if (req_sectors  max_blk_count) {
+   if (stats-enabled)
+   stats-pack_stop_reason[EXCEEDS_SECTORS]++;
put_back = 1;
break;
}
 
phys_segments +=  next-nr_phys_segments;
if (phys_segments  max_phys_segs) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
put_back = 1;
break;
}
@@ -1395,6 +1441,15 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, 
struct request *req

[PATCH v4 3/3] mmc: card: Add eMMC4.5 write packed commands unit-tests

2012-07-15 Thread Maya Erez
Expose the following packed commands tests:
- Test the write packed commands list preparation
- Simulate a returned error code
- Send an invalid packed command to the card

Signed-off-by: Lee Susman lsus...@codeaurora.org
Signed-off-by: Maya Erez me...@codeaurora.org
---
 drivers/mmc/card/Kconfig  |   11 +
 drivers/mmc/card/Makefile |1 +
 drivers/mmc/card/block.c  |   82 ++-
 drivers/mmc/card/mmc_block_test.c | 1522 +
 drivers/mmc/card/queue.h  |   15 +
 5 files changed, 1619 insertions(+), 12 deletions(-)
 create mode 100644 drivers/mmc/card/mmc_block_test.c

diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3b1f783..bbe8cac 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -67,3 +67,14 @@ config MMC_TEST
 
  This driver is only of interest to those developing or
  testing a host driver. Most people should say N here.
+
+config MMC_BLOCK_TEST
+   tristate MMC block test
+   depends on MMC_BLOCK  IOSCHED_TEST
+   default m
+   help
+ MMC block test can be used with test iosched to test the MMC block
+ device.
+ Currently used to test eMMC 4.5 features (packed commands, sanitize,
+ BKOPs).
+
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
index c73b406..d55107f 100644
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_MMC_TEST)  += mmc_test.o
 
 obj-$(CONFIG_SDIO_UART)+= sdio_uart.o
 
+obj-$(CONFIG_MMC_BLOCK_TEST)   += mmc_block_test.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 9754460..f25be77 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -123,17 +123,6 @@ struct mmc_blk_data {
 
 static DEFINE_MUTEX(open_lock);
 
-enum mmc_blk_status {
-   MMC_BLK_SUCCESS = 0,
-   MMC_BLK_PARTIAL,
-   MMC_BLK_CMD_ERR,
-   MMC_BLK_RETRY,
-   MMC_BLK_ABORT,
-   MMC_BLK_DATA_ERR,
-   MMC_BLK_ECC_ERR,
-   MMC_BLK_NOMEDIUM,
-};
-
 enum {
MMC_PACKED_N_IDX = -1,
MMC_PACKED_N_ZERO,
@@ -1330,6 +1319,64 @@ void mmc_blk_init_packed_statistics(struct mmc_card 
*card)
 }
 EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
 
+void print_mmc_packing_stats(struct mmc_card *card)
+{
+   int i;
+   int max_num_of_packed_reqs = 0;
+
+   if ((!card) || (!card-wr_pack_stats.packing_events))
+   return;
+
+   max_num_of_packed_reqs = card-ext_csd.max_packed_writes;
+
+   spin_lock(card-wr_pack_stats.lock);
+
+   pr_info(%s: write packing statistics:\n,
+   mmc_hostname(card-host));
+
+   for (i = 1 ; i = max_num_of_packed_reqs ; ++i) {
+   if (card-wr_pack_stats.packing_events[i] != 0)
+   pr_info(%s: Packed %d reqs - %d times\n,
+   mmc_hostname(card-host), i,
+   card-wr_pack_stats.packing_events[i]);
+   }
+
+   pr_info(%s: stopped packing due to the following reasons:\n,
+   mmc_hostname(card-host));
+
+   if (card-wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
+   pr_info(%s: %d times: exceedmax num of segments\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
+   if (card-wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
+   pr_info(%s: %d times: exceeding the max num of sectors\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
+   if (card-wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
+   pr_info(%s: %d times: wrong data direction\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
+   if (card-wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
+   pr_info(%s: %d times: flush or discard\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
+   if (card-wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
+   pr_info(%s: %d times: empty queue\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
+   if (card-wr_pack_stats.pack_stop_reason[REL_WRITE])
+   pr_info(%s: %d times: rel write\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[REL_WRITE]);
+   if (card-wr_pack_stats.pack_stop_reason[THRESHOLD])
+   pr_info(%s: %d times: Threshold\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[THRESHOLD]);
+
+   spin_unlock(card-wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(print_mmc_packing_stats);
+
 static u8

[PATCH RESEND v4 0/2] mmc: block: Add write packing control

2012-07-14 Thread Maya Erez
Our experiments showed that the write packing causes degradation of the read
throughput, in parallel read and write operations.
Since the read latency is critical for user experience we added a write packing 
control
mechanism that disables the write packing in case of read requests.
This will ensure that read requests latency is not increased due to long write 
packed commands.

The trigger for enabling the write packing is managing to pack several write 
requests.
The number of potential packed requests that will trigger the packing can be 
configured via sysfs.
The trigger for disabling the write packing is a fetch of a read request.

Changes in v4:
- Move MMC specific attributes to mmc sub-directory

Changes in v3:
- Fix the settings of num_of_potential_packed_wr_reqs

Changes in v2:
- Move the attribute for setting the packing enabling trigger to the block 
device
- Add documentation of the new attribute

Maya Erez (2):
  mmc: card: Move MMC specific attributes to mmc sub-directory
  mmc: block: Add write packing control

 Documentation/mmc/mmc-dev-attrs.txt |   17 
 drivers/mmc/card/block.c|  176 +--
 drivers/mmc/card/queue.c|8 ++
 drivers/mmc/card/queue.h|3 +
 include/linux/mmc/host.h|1 +
 5 files changed, 197 insertions(+), 8 deletions(-)

-- 
1.7.3.3
-- 
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH RESEND v4 2/2] mmc: block: Add write packing control

2012-07-14 Thread Maya Erez
The write packing control will ensure that read requests latency is
not increased due to long write packed commands.

The trigger for enabling the write packing is managing to pack several
write requests. The number of potential packed requests that will trigger
the packing can be configured via sysfs by writing the required value to:
/sys/block/block_dev_name/num_wr_reqs_to_start_packing.
The trigger for disabling the write packing is fetching a read request.

Signed-off-by: Maya Erez me...@codeaurora.org
---
 Documentation/mmc/mmc-dev-attrs.txt |   17 ++
 drivers/mmc/card/block.c|  105 ++-
 drivers/mmc/card/queue.c|8 +++
 drivers/mmc/card/queue.h|3 +
 include/linux/mmc/host.h|1 +
 5 files changed, 133 insertions(+), 1 deletions(-)

diff --git a/Documentation/mmc/mmc-dev-attrs.txt 
b/Documentation/mmc/mmc-dev-attrs.txt
index 22ae844..f4a48a8 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -8,6 +8,23 @@ The following attributes are read/write.
 
force_roEnforce read-only access even if write protect 
switch is off.
 
+   num_wr_reqs_to_start_packingThis attribute is used to determine
+   the trigger for activating the write packing, in case the write
+   packing control feature is enabled.
+
+   When the MMC manages to reach a point where num_wr_reqs_to_start_packing
+   write requests could be packed, it enables the write packing feature.
+   This allows us to start the write packing only when it is beneficial
+   and has minimum affect on the read latency.
+
+   The number of potential packed requests that will trigger the packing
+   can be configured via sysfs by writing the required value to:
+   /sys/block/block_dev_name/mmc/num_wr_reqs_to_start_packing.
+
+   The default value of num_wr_reqs_to_start_packing was determined by
+   running parallel lmdd write and lmdd read operations and calculating
+   the max number of packed writes requests.
+
 SD and MMC Device Attributes
 
 
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 2b8ad9e..6d63ce0 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -114,6 +114,7 @@ struct mmc_blk_data {
struct device_attribute force_ro;
struct device_attribute power_ro_lock;
int area_type;
+   struct device_attribute num_wr_reqs_to_start_packing;
 
struct kobject kobj;
struct kobj_type kobj_type;
@@ -329,6 +330,38 @@ out:
return ret;
 }
 
+static ssize_t
+num_wr_reqs_to_start_packing_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+   struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+   int num_wr_reqs_to_start_packing;
+   int ret;
+
+   num_wr_reqs_to_start_packing = md-queue.num_wr_reqs_to_start_packing;
+
+   ret = snprintf(buf, PAGE_SIZE, %d\n, num_wr_reqs_to_start_packing);
+
+   mmc_blk_put(md);
+   return ret;
+}
+
+static ssize_t
+num_wr_reqs_to_start_packing_store(struct device *dev,
+struct device_attribute *attr,
+const char *buf, size_t count)
+{
+   int value;
+   struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+
+   sscanf(buf, %d, value);
+   if (value = 0)
+   md-queue.num_wr_reqs_to_start_packing = value;
+
+   mmc_blk_put(md);
+   return count;
+}
+
 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
 {
struct mmc_blk_data *md = mmc_blk_get(bdev-bd_disk);
@@ -1344,6 +1377,49 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req 
*mqrq,
mmc_queue_bounce_pre(mqrq);
 }
 
+static void mmc_blk_write_packing_control(struct mmc_queue *mq,
+ struct request *req)
+{
+   struct mmc_host *host = mq-card-host;
+   int data_dir;
+
+   if (!(host-caps2  MMC_CAP2_PACKED_WR))
+   return;
+
+   /*
+* In case the packing control is not supported by the host, it should
+* not have an effect on the write packing. Therefore we have to enable
+* the write packing
+*/
+   if (!(host-caps2  MMC_CAP2_PACKED_WR_CONTROL)) {
+   mq-wr_packing_enabled = true;
+   return;
+   }
+
+   if (!req || (req  (req-cmd_flags  REQ_FLUSH))) {
+   if (mq-num_of_potential_packed_wr_reqs 
+   mq-num_wr_reqs_to_start_packing)
+   mq-wr_packing_enabled = true;
+   mq-num_of_potential_packed_wr_reqs = 0;
+   return;
+   }
+
+   data_dir = rq_data_dir(req);
+
+   if (data_dir == READ) {
+   mq-num_of_potential_packed_wr_reqs = 0;
+   mq-wr_packing_enabled = false

[PATCH v4 0/1] mmc: block: Add write packing control

2012-07-02 Thread Maya Erez
Our experiments showed that the write packing causes degradation of the read
throughput, in parallel read and write operations.
Since the read latency is critical for user experience we added a write packing 
control
mechanism that disables the write packing in case of read requests.
This will ensure that read requests latency is not increased due to long write 
packed commands.

The trigger for enabling the write packing is managing to pack several write 
requests.
The number of potential packed requests that will trigger the packing can be 
configured via sysfs.
The trigger for disabling the write packing is a fetch of a read request.

this patch is dependant in the following patches:
  [PATCH v8 1/3] mmc: core: Add packed command feature of eMMC4.5
  [PATCH v8 2/3] mmc: core: Support packed write command for eMMC4.5 device

Changes in v4:
- Move MMC specific attributes to mmc sub-directory

Changes in v3:
- Fix the settings of num_of_potential_packed_wr_reqs

Changes in v2:
- Move the attribute for setting the packing enabling trigger to the block 
device
- Add documentation of the new attribute

Maya Erez (2):
  mmc: card: Move MMC specific attributes to mmc sub-directory
  mmc: block: Add write packing control

 Documentation/mmc/mmc-dev-attrs.txt |   17 
 drivers/mmc/card/block.c|  176 +--
 drivers/mmc/card/queue.c|8 ++
 drivers/mmc/card/queue.h|3 +
 include/linux/mmc/host.h|1 +
 5 files changed, 197 insertions(+), 8 deletions(-)

-- 
1.7.3.3
-- 
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 1/2] mmc: card: Move MMC specific attributes to mmc sub-directory

2012-07-02 Thread Maya Erez
Separate MMC specific attributes from general block device
attributes and move them from the /sys/block/BLOCK_DEV directory
to /sys/block/BLOCK_DEV/mmc directory

Signed-off-by: Maya Erez me...@codeaurora.org

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c965f2b..c23034d 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -114,6 +114,9 @@ struct mmc_blk_data {
struct device_attribute force_ro;
struct device_attribute power_ro_lock;
int area_type;
+
+   struct kobject kobj;
+   struct kobj_type kobj_type;
 };
 
 static DEFINE_MUTEX(open_lock);
@@ -185,6 +188,51 @@ static void mmc_blk_put(struct mmc_blk_data *md)
mutex_unlock(open_lock);
 }
 
+static ssize_t mmc_blk_attr_show(struct kobject *kobj, struct attribute *attr,
+   char *buf)
+{
+   struct device_attribute *dev_attr;
+   struct mmc_blk_data *md;
+   ssize_t ret;
+
+   dev_attr = container_of(attr, struct device_attribute, attr);
+   if (!dev_attr-show)
+   return -EIO;
+
+   md = container_of(kobj, struct mmc_blk_data, kobj);
+   if (!md || md-kobj != kobj)
+   return -EINVAL;
+
+   ret = dev_attr-show(disk_to_dev(md-disk), dev_attr, buf);
+
+   return ret;
+}
+
+static ssize_t mmc_blk_attr_store(struct kobject *kobj, struct attribute *attr,
+   const char *buf, size_t count)
+{
+   struct device_attribute *dev_attr;
+   struct mmc_blk_data *md;
+   ssize_t ret;
+
+   dev_attr = container_of(attr, struct device_attribute, attr);
+   if (!dev_attr-store)
+   return -EIO;
+
+   md = container_of(kobj, struct mmc_blk_data, kobj);
+   if (!md || md-kobj != kobj)
+   return -EINVAL;
+
+   ret = dev_attr-store(disk_to_dev(md-disk), dev_attr, buf, count);
+
+   return ret;
+}
+
+static const struct sysfs_ops mmc_blk_sysfs_ops = {
+   .show   = mmc_blk_attr_show,
+   .store  = mmc_blk_attr_store,
+};
+
 static ssize_t power_ro_lock_show(struct device *dev,
struct device_attribute *attr, char *buf)
 {
@@ -2004,14 +2052,15 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
if (md) {
card = md-queue.card;
if (md-disk-flags  GENHD_FL_UP) {
-   device_remove_file(disk_to_dev(md-disk), 
md-force_ro);
+   sysfs_remove_file(md-kobj, md-force_ro.attr);
if ((md-area_type  MMC_BLK_DATA_AREA_BOOT) 
card-ext_csd.boot_ro_lockable)
-   device_remove_file(disk_to_dev(md-disk),
-   md-power_ro_lock);
+   sysfs_remove_file(md-kobj,
+   md-power_ro_lock.attr);
 
/* Stop new requests from getting into the queue */
del_gendisk(md-disk);
+   kobject_put(md-kobj);
}
 
/* Then flush out any already in there */
@@ -2040,12 +2089,19 @@ static int mmc_add_disk(struct mmc_blk_data *md)
struct mmc_card *card = md-queue.card;
 
add_disk(md-disk);
+
+   md-kobj_type.sysfs_ops = mmc_blk_sysfs_ops;
+   ret = kobject_init_and_add(md-kobj, md-kobj_type,
+   disk_to_dev(md-disk)-kobj, %s, mmc);
+   if (ret)
+   goto init_kobj_fail;
+
md-force_ro.show = force_ro_show;
md-force_ro.store = force_ro_store;
sysfs_attr_init(md-force_ro.attr);
md-force_ro.attr.name = force_ro;
md-force_ro.attr.mode = S_IRUGO | S_IWUSR;
-   ret = device_create_file(disk_to_dev(md-disk), md-force_ro);
+   ret = sysfs_create_file(md-kobj, md-force_ro.attr);
if (ret)
goto force_ro_fail;
 
@@ -2064,16 +2120,17 @@ static int mmc_add_disk(struct mmc_blk_data *md)
md-power_ro_lock.attr.mode = mode;
md-power_ro_lock.attr.name =
ro_lock_until_next_power_on;
-   ret = device_create_file(disk_to_dev(md-disk),
-   md-power_ro_lock);
+   ret = sysfs_create_file(md-kobj, md-power_ro_lock.attr);
if (ret)
goto power_ro_lock_fail;
}
return ret;
 
 power_ro_lock_fail:
-   device_remove_file(disk_to_dev(md-disk), md-force_ro);
+   sysfs_remove_file(md-kobj, md-force_ro.attr);
 force_ro_fail:
+   kobject_put(md-kobj);
+init_kobj_fail:
del_gendisk(md-disk);
 
return ret;
-- 
1.7.3.3
-- 
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo

[PATCH v3 0/3] mmc: card: Add eMMC4.5 write packed commands unit-tests

2012-06-30 Thread Maya Erez
Expose the following packed commands tests:
- Test the write packed commands list preparation
- Simulate a returned error code
- Send an invalid packed command to the card

This patch is dependant in the following patches:
  [PATCH v8 1/3] mmc: core: Add packed command feature of eMMC4.5
  [PATCH v8 2/3] mmc: core: Support packed write command for eMMC4.5 device

Changes in v3:
- Fix the cancel round mechanism

Changes in v2:
- Add MMC write packing statistics and test-iosched scheduler patches as 
part of this patch.
- Code review fixes

Maya Erez (3):
  mmc: block: Add MMC write packing statistics
  block: Add test-iosched scheduler
  mmc: card: Add eMMC4.5 write packed commands unit-tests

 Documentation/block/test-iosched.txt |   39 +
 block/Kconfig.iosched|   11 +
 block/Makefile   |1 +
 block/blk-core.c |3 +-
 block/test-iosched.c | 1020 +++
 drivers/mmc/card/Kconfig |   11 +
 drivers/mmc/card/Makefile|1 +
 drivers/mmc/card/block.c |  138 +++-
 drivers/mmc/card/mmc_block_test.c| 1469 ++
 drivers/mmc/card/queue.h |   15 +
 drivers/mmc/core/bus.c   |4 +
 drivers/mmc/core/debugfs.c   |  169 
 drivers/mmc/core/mmc.c   |   18 +
 include/linux/mmc/card.h |   24 +
 include/linux/test-iosched.h |  233 ++
 15 files changed, 3141 insertions(+), 15 deletions(-)
 create mode 100644 Documentation/block/test-iosched.txt
 create mode 100644 block/test-iosched.c
 create mode 100644 drivers/mmc/card/mmc_block_test.c
 create mode 100644 include/linux/test-iosched.h

-- 
1.7.3.3
-- 
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v3 1/3] mmc: block: Add MMC write packing statistics

2012-06-30 Thread Maya Erez
The write packing statistics are used for the packed commands unit tests
in order to determine test success or failure

Signed-off-by: Maya Erez me...@codeaurora.org

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c965f2b..79fc4c2 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -63,6 +63,11 @@ MODULE_ALIAS(mmc:block);
(rq_data_dir(req) == WRITE))
 #define PACKED_CMD_VER 0x01
 #define PACKED_CMD_WR  0x02
+#define MMC_BLK_UPDATE_STOP_REASON(stats, reason)  \
+   do {\
+   if (stats-enabled) \
+   stats-pack_stop_reason[reason]++;  \
+   } while (0)
 
 static DEFINE_MUTEX(block_mutex);
 
@@ -1296,6 +1301,35 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req 
*mqrq,
mmc_queue_bounce_pre(mqrq);
 }
 
+struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
+{
+   if (!card)
+   return NULL;
+
+   return card-wr_pack_stats;
+}
+EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
+
+void mmc_blk_init_packed_statistics(struct mmc_card *card)
+{
+   int max_num_of_packed_reqs = 0;
+
+   if (!card || !card-wr_pack_stats.packing_events)
+   return;
+
+   max_num_of_packed_reqs = card-ext_csd.max_packed_writes;
+
+   spin_lock(card-wr_pack_stats.lock);
+   memset(card-wr_pack_stats.packing_events, 0,
+   (max_num_of_packed_reqs + 1) *
+  sizeof(*card-wr_pack_stats.packing_events));
+   memset(card-wr_pack_stats.pack_stop_reason, 0,
+   sizeof(card-wr_pack_stats.pack_stop_reason));
+   card-wr_pack_stats.enabled = true;
+   spin_unlock(card-wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
+
 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
 {
struct request_queue *q = mq-queue;
@@ -1308,6 +1342,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, 
struct request *req)
u8 put_back = 0;
u8 max_packed_rw = 0;
u8 reqs = 0;
+   struct mmc_wr_pack_stats *stats = card-wr_pack_stats;
 
mmc_blk_clear_packed(mq-mqrq_cur);
 
@@ -1345,12 +1380,16 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue 
*mq, struct request *req)
phys_segments++;
}
 
+   spin_lock(stats-lock);
+
while (reqs  max_packed_rw - 1) {
spin_lock_irq(q-queue_lock);
next = blk_fetch_request(q);
spin_unlock_irq(q-queue_lock);
-   if (!next)
+   if (!next) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
break;
+   }
 
if (mmc_large_sec(card) 
!IS_ALIGNED(blk_rq_sectors(next), 8)) {
@@ -1360,11 +1399,13 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue 
*mq, struct request *req)
 
if (next-cmd_flags  REQ_DISCARD ||
next-cmd_flags  REQ_FLUSH) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
put_back = 1;
break;
}
 
if (rq_data_dir(cur) != rq_data_dir(next)) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
put_back = 1;
break;
}
@@ -1372,18 +1413,22 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue 
*mq, struct request *req)
if (mmc_req_rel_wr(next) 
(md-flags  MMC_BLK_REL_WR) 
!en_rel_wr) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
put_back = 1;
break;
}
 
req_sectors += blk_rq_sectors(next);
if (req_sectors  max_blk_count) {
+   if (stats-enabled)
+   stats-pack_stop_reason[EXCEEDS_SECTORS]++;
put_back = 1;
break;
}
 
phys_segments +=  next-nr_phys_segments;
if (phys_segments  max_phys_segs) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
put_back = 1;
break;
}
@@ -1399,6 +1444,15 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, 
struct request *req)
spin_unlock_irq(q-queue_lock);
}
 
+   if (stats-enabled) {
+   if (reqs + 1 = card-ext_csd.max_packed_writes)
+   stats-packing_events[reqs + 1]++;
+   if (reqs + 1 == max_packed_rw)
+   MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD

[PATCH v3 2/3] block: Add test-iosched scheduler

2012-06-30 Thread Maya Erez
The test scheduler allows testing a block device by dispatching
specific requests according to the test case and declare PASS/FAIL
according to the requests completion error code

Signed-off-by: Maya Erez me...@codeaurora.org

diff --git a/Documentation/block/test-iosched.txt 
b/Documentation/block/test-iosched.txt
new file mode 100644
index 000..75d8134
--- /dev/null
+++ b/Documentation/block/test-iosched.txt
@@ -0,0 +1,39 @@
+Test IO scheduler
+==
+
+The test scheduler allows testing a block device by dispatching
+specific requests according to the test case and declare PASS/FAIL
+according to the requests completion error code.
+
+The test IO scheduler implements the no-op scheduler operations, and uses
+them in order to dispatch the non-test requests when no test is running.
+This will allow to keep a normal FS operation in parallel to the test
+capability.
+The test IO scheduler keeps two different queues, one for real-world requests
+(inserted by the FS) and the other for the test requests.
+The test IO scheduler chooses the queue for dispatch requests according to the
+test state (IDLE/RUNNING).
+
+the test IO scheduler is compiled by default as a dynamic module and enabled
+only if CONFIG_DEBUG_FS is defined.
+
+Each block device test utility that would like to use the test-iosched test
+services, should register as a blk_dev_test_type and supply an init and exit
+callbacks. Those callback are called upon selection (or removal) of the
+test-iosched as the active scheduler. From that point the block device test
+can start a test and supply its own callbacks for preparing, running, result
+checking and cleanup of the test.
+
+Each test is exposed via debugfs and can be triggered by writing to
+the debugfs file. In order to add a new test one should expose a new debugfs
+file for the new test.
+
+Selecting IO schedulers
+---
+Refer to Documentation/block/switching-sched.txt for information on
+selecting an io scheduler on a per-device basis.
+
+
+May 10 2012, maya Erez me...@codeaurora.org
+
+
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 421bef9..af3d6a3 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -12,6 +12,17 @@ config IOSCHED_NOOP
  that do their own scheduling and require only minimal assistance from
  the kernel.
 
+config IOSCHED_TEST
+   tristate Test I/O scheduler
+   depends on DEBUG_FS
+   default m
+   ---help---
+ The test I/O scheduler is a duplicate of the noop scheduler with
+ addition of test utlity.
+ It allows testing a block device by dispatching specific requests
+ according to the test case and declare PASS/FAIL according to the
+ requests completion error code.
+
 config IOSCHED_DEADLINE
tristate Deadline I/O scheduler
default y
diff --git a/block/Makefile b/block/Makefile
index 39b76ba..436b220 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING)  += blk-throttle.o
 obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
 obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
 obj-$(CONFIG_IOSCHED_CFQ)  += cfq-iosched.o
+obj-$(CONFIG_IOSCHED_TEST) += test-iosched.o
 
 obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY)+= blk-integrity.o
diff --git a/block/blk-core.c b/block/blk-core.c
index 93eb3e4..619abbe 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1085,8 +1085,6 @@ struct request *blk_get_request(struct request_queue *q, 
int rw, gfp_t gfp_mask)
 {
struct request *rq;
 
-   BUG_ON(rw != READ  rw != WRITE);
-
spin_lock_irq(q-queue_lock);
if (gfp_mask  __GFP_WAIT)
rq = get_request_wait(q, rw, NULL);
@@ -1419,6 +1417,7 @@ void init_request_from_bio(struct request *req, struct 
bio *bio)
req-ioprio = bio_prio(bio);
blk_rq_bio_prep(req-q, req, bio);
 }
+EXPORT_SYMBOL(init_request_from_bio);
 
 void blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
diff --git a/block/test-iosched.c b/block/test-iosched.c
new file mode 100644
index 000..78f2a9e
--- /dev/null
+++ b/block/test-iosched.c
@@ -0,0 +1,1020 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * The test scheduler allows to test the block device by dispatching
+ * specific requests according to the test case and declare PASS/FAIL
+ * according to the requests completion error code.
+ * Each test is exposed

[PATCH v3 3/3] mmc: card: Add eMMC4.5 write packed commands unit-tests

2012-06-30 Thread Maya Erez
Expose the following packed commands tests:
- Test the write packed commands list preparation
- Simulate a returned error code
- Send an invalid packed command to the card

Signed-off-by: Lee Susman lsus...@codeaurora.org
Signed-off-by: Maya Erez me...@codeaurora.org

diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3b1f783..bbe8cac 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -67,3 +67,14 @@ config MMC_TEST
 
  This driver is only of interest to those developing or
  testing a host driver. Most people should say N here.
+
+config MMC_BLOCK_TEST
+   tristate MMC block test
+   depends on MMC_BLOCK  IOSCHED_TEST
+   default m
+   help
+ MMC block test can be used with test iosched to test the MMC block
+ device.
+ Currently used to test eMMC 4.5 features (packed commands, sanitize,
+ BKOPs).
+
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
index c73b406..d55107f 100644
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_MMC_TEST)  += mmc_test.o
 
 obj-$(CONFIG_SDIO_UART)+= sdio_uart.o
 
+obj-$(CONFIG_MMC_BLOCK_TEST)   += mmc_block_test.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 79fc4c2..3d6e564 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -123,17 +123,6 @@ struct mmc_blk_data {
 
 static DEFINE_MUTEX(open_lock);
 
-enum mmc_blk_status {
-   MMC_BLK_SUCCESS = 0,
-   MMC_BLK_PARTIAL,
-   MMC_BLK_CMD_ERR,
-   MMC_BLK_RETRY,
-   MMC_BLK_ABORT,
-   MMC_BLK_DATA_ERR,
-   MMC_BLK_ECC_ERR,
-   MMC_BLK_NOMEDIUM,
-};
-
 enum {
MMC_PACKED_N_IDX = -1,
MMC_PACKED_N_ZERO,
@@ -1330,6 +1319,64 @@ void mmc_blk_init_packed_statistics(struct mmc_card 
*card)
 }
 EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
 
+void print_mmc_packing_stats(struct mmc_card *card)
+{
+   int i;
+   int max_num_of_packed_reqs = 0;
+
+   if ((!card) || (!card-wr_pack_stats.packing_events))
+   return;
+
+   max_num_of_packed_reqs = card-ext_csd.max_packed_writes;
+
+   spin_lock(card-wr_pack_stats.lock);
+
+   pr_info(%s: write packing statistics:\n,
+   mmc_hostname(card-host));
+
+   for (i = 1 ; i = max_num_of_packed_reqs ; ++i) {
+   if (card-wr_pack_stats.packing_events[i] != 0)
+   pr_info(%s: Packed %d reqs - %d times\n,
+   mmc_hostname(card-host), i,
+   card-wr_pack_stats.packing_events[i]);
+   }
+
+   pr_info(%s: stopped packing due to the following reasons:\n,
+   mmc_hostname(card-host));
+
+   if (card-wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
+   pr_info(%s: %d times: exceedmax num of segments\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
+   if (card-wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
+   pr_info(%s: %d times: exceeding the max num of sectors\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
+   if (card-wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
+   pr_info(%s: %d times: wrong data direction\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
+   if (card-wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
+   pr_info(%s: %d times: flush or discard\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
+   if (card-wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
+   pr_info(%s: %d times: empty queue\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
+   if (card-wr_pack_stats.pack_stop_reason[REL_WRITE])
+   pr_info(%s: %d times: rel write\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[REL_WRITE]);
+   if (card-wr_pack_stats.pack_stop_reason[THRESHOLD])
+   pr_info(%s: %d times: Threshold\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[THRESHOLD]);
+
+   spin_unlock(card-wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(print_mmc_packing_stats);
+
 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
 {
struct request_queue *q = mq-queue;
@@ -1538,7 +1585,18 @@ static void mmc_blk_packed_hdr_wrq_prep(struct 
mmc_queue_req *mqrq,
brq-data.sg_len = mmc_queue_map_sg(mq, mqrq);
 
mqrq-mmc_active.mrq = brq-mrq;
-   mqrq-mmc_active.err_check = mmc_blk_packed_err_check

[PATCH v2 0/3] mmc: card: Add eMMC4.5 write packed commands unit-tests

2012-06-23 Thread Maya Erez
Expose the following packed commands tests:
- Test the write packed commands list preparation
- Simulate a returned error code
- Send an invalid packed command to the card

This patch is dependant in the following patches:
  [PATCH RESEND v7 1/3] mmc: core: Add packed command feature of eMMC4.5
  [PATCH RESEND v7 2/3] mmc: core: Support packed write command for eMMC4.5 
device

Changes in V2:
- Add MMC write packing statistics and test-iosched scheduler patches as 
part of this patch.
- Code review fixes

Maya Erez (3):
  mmc: block: Add MMC write packing statistics
  block: Add test-iosched scheduler
  block: Add eMMC4.5 write packed commands unit-tests

 Documentation/block/test-iosched.txt |   39 +
 block/Kconfig.iosched|   11 +
 block/Makefile   |1 +
 block/blk-core.c |3 +-
 block/test-iosched.c | 1005 +++
 drivers/mmc/card/Kconfig |   11 +
 drivers/mmc/card/Makefile|1 +
 drivers/mmc/card/block.c |  138 +++-
 drivers/mmc/card/mmc_block_test.c| 1449 ++
 drivers/mmc/card/queue.h |   15 +
 drivers/mmc/core/bus.c   |4 +
 drivers/mmc/core/debugfs.c   |  169 
 drivers/mmc/core/mmc.c   |   18 +
 include/linux/mmc/card.h |   24 +
 include/linux/test-iosched.h |  218 +
 15 files changed, 3091 insertions(+), 15 deletions(-)
 create mode 100644 Documentation/block/test-iosched.txt
 create mode 100644 block/test-iosched.c
 create mode 100644 drivers/mmc/card/mmc_block_test.c
 create mode 100644 include/linux/test-iosched.h

-- 
1.7.3.3
-- 
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v2 1/3] mmc: block: Add MMC write packing statistics

2012-06-23 Thread Maya Erez
The write packing statistics are used for the packed commands unit tests
in order to determine test success or failure

Signed-off-by: Maya Erez me...@codeaurora.org

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 260203a..10b77fd 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -63,6 +63,11 @@ MODULE_ALIAS(mmc:block);
(rq_data_dir(req) == WRITE))
 #define PACKED_CMD_VER 0x01
 #define PACKED_CMD_WR  0x02
+#define MMC_BLK_UPDATE_STOP_REASON(stats, reason)  \
+   do {\
+   if (stats-enabled) \
+   stats-pack_stop_reason[reason]++;  \
+   } while (0)
 
 static DEFINE_MUTEX(block_mutex);
 
@@ -1296,6 +1301,35 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req 
*mqrq,
mmc_queue_bounce_pre(mqrq);
 }
 
+struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
+{
+   if (!card)
+   return NULL;
+
+   return card-wr_pack_stats;
+}
+EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
+
+void mmc_blk_init_packed_statistics(struct mmc_card *card)
+{
+   int max_num_of_packed_reqs = 0;
+
+   if (!card || !card-wr_pack_stats.packing_events)
+   return;
+
+   max_num_of_packed_reqs = card-ext_csd.max_packed_writes;
+
+   spin_lock(card-wr_pack_stats.lock);
+   memset(card-wr_pack_stats.packing_events, 0,
+   (max_num_of_packed_reqs + 1) *
+  sizeof(*card-wr_pack_stats.packing_events));
+   memset(card-wr_pack_stats.pack_stop_reason, 0,
+   sizeof(card-wr_pack_stats.pack_stop_reason));
+   card-wr_pack_stats.enabled = true;
+   spin_unlock(card-wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
+
 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
 {
struct request_queue *q = mq-queue;
@@ -1308,6 +1342,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, 
struct request *req)
u8 put_back = 0;
u8 max_packed_rw = 0;
u8 reqs = 0;
+   struct mmc_wr_pack_stats *stats = card-wr_pack_stats;
 
mmc_blk_clear_packed(mq-mqrq_cur);
 
@@ -1342,20 +1377,26 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue 
*mq, struct request *req)
phys_segments++;
}
 
+   spin_lock(stats-lock);
+
while (reqs  max_packed_rw - 1) {
spin_lock_irq(q-queue_lock);
next = blk_fetch_request(q);
spin_unlock_irq(q-queue_lock);
-   if (!next)
+   if (!next) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
break;
+   }
 
if (next-cmd_flags  REQ_DISCARD ||
next-cmd_flags  REQ_FLUSH) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
put_back = 1;
break;
}
 
if (rq_data_dir(cur) != rq_data_dir(next)) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
put_back = 1;
break;
}
@@ -1363,18 +1404,22 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue 
*mq, struct request *req)
if (mmc_req_rel_wr(next) 
(md-flags  MMC_BLK_REL_WR) 
!en_rel_wr) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
put_back = 1;
break;
}
 
req_sectors += blk_rq_sectors(next);
if (req_sectors  max_blk_count) {
+   if (stats-enabled)
+   stats-pack_stop_reason[EXCEEDS_SECTORS]++;
put_back = 1;
break;
}
 
phys_segments +=  next-nr_phys_segments;
if (phys_segments  max_phys_segs) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
put_back = 1;
break;
}
@@ -1390,6 +1435,15 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, 
struct request *req)
spin_unlock_irq(q-queue_lock);
}
 
+   if (stats-enabled) {
+   if (reqs + 1 = card-ext_csd.max_packed_writes)
+   stats-packing_events[reqs + 1]++;
+   if (reqs + 1 == max_packed_rw)
+   MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD);
+   }
+
+   spin_unlock(stats-lock);
+
if (reqs  0) {
list_add(req-queuelist, mq-mqrq_cur-packed_list);
mq-mqrq_cur-packed_num = ++reqs;
diff --git a/drivers/mmc/core

[PATCH v2 2/3] block: Add test-iosched scheduler

2012-06-23 Thread Maya Erez
The test scheduler allows testing a block device by dispatching
specific requests according to the test case and declare PASS/FAIL
according to the requests completion error code

Signed-off-by: Maya Erez me...@codeaurora.org

diff --git a/Documentation/block/test-iosched.txt 
b/Documentation/block/test-iosched.txt
new file mode 100644
index 000..75d8134
--- /dev/null
+++ b/Documentation/block/test-iosched.txt
@@ -0,0 +1,39 @@
+Test IO scheduler
+==
+
+The test scheduler allows testing a block device by dispatching
+specific requests according to the test case and declare PASS/FAIL
+according to the requests completion error code.
+
+The test IO scheduler implements the no-op scheduler operations, and uses
+them in order to dispatch the non-test requests when no test is running.
+This will allow to keep a normal FS operation in parallel to the test
+capability.
+The test IO scheduler keeps two different queues, one for real-world requests
+(inserted by the FS) and the other for the test requests.
+The test IO scheduler chooses the queue for dispatch requests according to the
+test state (IDLE/RUNNING).
+
+the test IO scheduler is compiled by default as a dynamic module and enabled
+only if CONFIG_DEBUG_FS is defined.
+
+Each block device test utility that would like to use the test-iosched test
+services, should register as a blk_dev_test_type and supply an init and exit
+callbacks. Those callback are called upon selection (or removal) of the
+test-iosched as the active scheduler. From that point the block device test
+can start a test and supply its own callbacks for preparing, running, result
+checking and cleanup of the test.
+
+Each test is exposed via debugfs and can be triggered by writing to
+the debugfs file. In order to add a new test one should expose a new debugfs
+file for the new test.
+
+Selecting IO schedulers
+---
+Refer to Documentation/block/switching-sched.txt for information on
+selecting an io scheduler on a per-device basis.
+
+
+May 10 2012, maya Erez me...@codeaurora.org
+
+
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 421bef9..af3d6a3 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -12,6 +12,17 @@ config IOSCHED_NOOP
  that do their own scheduling and require only minimal assistance from
  the kernel.
 
+config IOSCHED_TEST
+   tristate Test I/O scheduler
+   depends on DEBUG_FS
+   default m
+   ---help---
+ The test I/O scheduler is a duplicate of the noop scheduler with
+ addition of test utlity.
+ It allows testing a block device by dispatching specific requests
+ according to the test case and declare PASS/FAIL according to the
+ requests completion error code.
+
 config IOSCHED_DEADLINE
tristate Deadline I/O scheduler
default y
diff --git a/block/Makefile b/block/Makefile
index 39b76ba..436b220 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING)  += blk-throttle.o
 obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
 obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
 obj-$(CONFIG_IOSCHED_CFQ)  += cfq-iosched.o
+obj-$(CONFIG_IOSCHED_TEST) += test-iosched.o
 
 obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY)+= blk-integrity.o
diff --git a/block/blk-core.c b/block/blk-core.c
index 93eb3e4..619abbe 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1085,8 +1085,6 @@ struct request *blk_get_request(struct request_queue *q, 
int rw, gfp_t gfp_mask)
 {
struct request *rq;
 
-   BUG_ON(rw != READ  rw != WRITE);
-
spin_lock_irq(q-queue_lock);
if (gfp_mask  __GFP_WAIT)
rq = get_request_wait(q, rw, NULL);
@@ -1419,6 +1417,7 @@ void init_request_from_bio(struct request *req, struct 
bio *bio)
req-ioprio = bio_prio(bio);
blk_rq_bio_prep(req-q, req, bio);
 }
+EXPORT_SYMBOL(init_request_from_bio);
 
 void blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
diff --git a/block/test-iosched.c b/block/test-iosched.c
new file mode 100644
index 000..e79ef5f
--- /dev/null
+++ b/block/test-iosched.c
@@ -0,0 +1,1005 @@
+
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * The test scheduler allows to test the block device by dispatching
+ * specific requests according to the test case and declare PASS/FAIL
+ * according to the requests completion error code.
+ * Each test is exposed

[PATCH v2 3/3] block: Add eMMC4.5 write packed commands unit-tests

2012-06-23 Thread Maya Erez
Expose the following packed commands tests:
- Test the write packed commands list preparation
- Simulate a returned error code
- Send an invalid packed command to the card

Signed-off-by: Lee Susman lsus...@codeaurora.org
Signed-off-by: Maya Erez me...@codeaurora.org

diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3b1f783..bbe8cac 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -67,3 +67,14 @@ config MMC_TEST
 
  This driver is only of interest to those developing or
  testing a host driver. Most people should say N here.
+
+config MMC_BLOCK_TEST
+   tristate MMC block test
+   depends on MMC_BLOCK  IOSCHED_TEST
+   default m
+   help
+ MMC block test can be used with test iosched to test the MMC block
+ device.
+ Currently used to test eMMC 4.5 features (packed commands, sanitize,
+ BKOPs).
+
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
index c73b406..d55107f 100644
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_MMC_TEST)  += mmc_test.o
 
 obj-$(CONFIG_SDIO_UART)+= sdio_uart.o
 
+obj-$(CONFIG_MMC_BLOCK_TEST)   += mmc_block_test.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 10b77fd..4a24530 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -123,17 +123,6 @@ struct mmc_blk_data {
 
 static DEFINE_MUTEX(open_lock);
 
-enum mmc_blk_status {
-   MMC_BLK_SUCCESS = 0,
-   MMC_BLK_PARTIAL,
-   MMC_BLK_CMD_ERR,
-   MMC_BLK_RETRY,
-   MMC_BLK_ABORT,
-   MMC_BLK_DATA_ERR,
-   MMC_BLK_ECC_ERR,
-   MMC_BLK_NOMEDIUM,
-};
-
 enum {
MMC_PACKED_N_IDX = -1,
MMC_PACKED_N_ZERO,
@@ -1330,6 +1319,64 @@ void mmc_blk_init_packed_statistics(struct mmc_card 
*card)
 }
 EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
 
+void print_mmc_packing_stats(struct mmc_card *card)
+{
+   int i;
+   int max_num_of_packed_reqs = 0;
+
+   if ((!card) || (!card-wr_pack_stats.packing_events))
+   return;
+
+   max_num_of_packed_reqs = card-ext_csd.max_packed_writes;
+
+   spin_lock(card-wr_pack_stats.lock);
+
+   pr_info(%s: write packing statistics:\n,
+   mmc_hostname(card-host));
+
+   for (i = 1 ; i = max_num_of_packed_reqs ; ++i) {
+   if (card-wr_pack_stats.packing_events[i] != 0)
+   pr_info(%s: Packed %d reqs - %d times\n,
+   mmc_hostname(card-host), i,
+   card-wr_pack_stats.packing_events[i]);
+   }
+
+   pr_info(%s: stopped packing due to the following reasons:\n,
+   mmc_hostname(card-host));
+
+   if (card-wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
+   pr_info(%s: %d times: exceedmax num of segments\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
+   if (card-wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
+   pr_info(%s: %d times: exceeding the max num of sectors\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
+   if (card-wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
+   pr_info(%s: %d times: wrong data direction\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
+   if (card-wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
+   pr_info(%s: %d times: flush or discard\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
+   if (card-wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
+   pr_info(%s: %d times: empty queue\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
+   if (card-wr_pack_stats.pack_stop_reason[REL_WRITE])
+   pr_info(%s: %d times: rel write\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[REL_WRITE]);
+   if (card-wr_pack_stats.pack_stop_reason[THRESHOLD])
+   pr_info(%s: %d times: Threshold\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[THRESHOLD]);
+
+   spin_unlock(card-wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(print_mmc_packing_stats);
+
 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
 {
struct request_queue *q = mq-queue;
@@ -1529,7 +1576,18 @@ static void mmc_blk_packed_hdr_wrq_prep(struct 
mmc_queue_req *mqrq,
brq-data.sg_len = mmc_queue_map_sg(mq, mqrq);
 
mqrq-mmc_active.mrq = brq-mrq;
-   mqrq-mmc_active.err_check = mmc_blk_packed_err_check

[PATCH v3 0/1] mmc: block: Add write packing control

2012-06-13 Thread Maya Erez
Our experiments showed that the write packing causes degradation of the read
throughput, in parallel read and write operations.
Since the read latency is critical for user experience we added a write packing 
control
mechanism that disables the write packing in case of read requests.
This will ensure that read requests latency is not increased due to long write 
packed commands.

The trigger for enabling the write packing is managing to pack several write 
requests.
The number of potential packed requests that will trigger the packing can be 
configured via sysfs.
The trigger for disabling the write packing is a fetch of a read request.

this patch is dependant in the following patches:
  [PATCH v7 1/3] mmc: core: Add packed command feature of eMMC4.5
  [PATCH v7 2/3] mmc: core: Support packed write command for eMMC4.5 device

Changes in v3:
- Fix the settings of num_of_potential_packed_wr_reqs

Changes in v2:
- Move the attribute for setting the packing enabling trigger to the block 
device
- Add documentation of the new attribute

Maya Erez (1):
  mmc: block: Add write packing control

 Documentation/mmc/mmc-dev-attrs.txt |   17 ++
 drivers/mmc/card/block.c|  101 ++-
 drivers/mmc/card/queue.c|8 +++
 drivers/mmc/card/queue.h|3 +
 include/linux/mmc/host.h|1 +
 5 files changed, 129 insertions(+), 1 deletions(-)

-- 
1.7.3.3
-- 
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v3 1/1] mmc: block: Add write packing control

2012-06-13 Thread Maya Erez
The write packing control will ensure that read requests latency is
not increased due to long write packed commands.

The trigger for enabling the write packing is managing to pack several
write requests. The number of potential packed requests that will trigger
the packing can be configured via sysfs by writing the required value to:
/sys/block/block_dev_name/num_wr_reqs_to_start_packing.
The trigger for disabling the write packing is fetching a read request.

---
 Documentation/mmc/mmc-dev-attrs.txt |   17 ++
 drivers/mmc/card/block.c|  101 ++-
 drivers/mmc/card/queue.c|8 +++
 drivers/mmc/card/queue.h|3 +
 include/linux/mmc/host.h|1 +
 5 files changed, 129 insertions(+), 1 deletions(-)

diff --git a/Documentation/mmc/mmc-dev-attrs.txt 
b/Documentation/mmc/mmc-dev-attrs.txt
index 22ae844..08f7312 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -8,6 +8,23 @@ The following attributes are read/write.
 
force_roEnforce read-only access even if write protect 
switch is off.
 
+   num_wr_reqs_to_start_packingThis attribute is used to determine
+   the trigger for activating the write packing, in case the write
+   packing control feature is enabled.
+
+   When the MMC manages to reach a point where num_wr_reqs_to_start_packing
+   write requests could be packed, it enables the write packing feature.
+   This allows us to start the write packing only when it is beneficial
+   and has minimum affect on the read latency.
+
+   The number of potential packed requests that will trigger the packing
+   can be configured via sysfs by writing the required value to:
+   /sys/block/block_dev_name/num_wr_reqs_to_start_packing.
+
+   The default value of num_wr_reqs_to_start_packing was determined by
+   running parallel lmdd write and lmdd read operations and calculating
+   the max number of packed writes requests.
+
 SD and MMC Device Attributes
 
 
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 2107e4a..d795b3c 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -114,6 +114,7 @@ struct mmc_blk_data {
struct device_attribute force_ro;
struct device_attribute power_ro_lock;
int area_type;
+   struct device_attribute num_wr_reqs_to_start_packing;
 };
 
 static DEFINE_MUTEX(open_lock);
@@ -281,6 +282,38 @@ out:
return ret;
 }
 
+static ssize_t
+num_wr_reqs_to_start_packing_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+   struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+   int num_wr_reqs_to_start_packing;
+   int ret;
+
+   num_wr_reqs_to_start_packing = md-queue.num_wr_reqs_to_start_packing;
+
+   ret = snprintf(buf, PAGE_SIZE, %d\n, num_wr_reqs_to_start_packing);
+
+   mmc_blk_put(md);
+   return ret;
+}
+
+static ssize_t
+num_wr_reqs_to_start_packing_store(struct device *dev,
+struct device_attribute *attr,
+const char *buf, size_t count)
+{
+   int value;
+   struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+
+   sscanf(buf, %d, value);
+   if (value = 0)
+   md-queue.num_wr_reqs_to_start_packing = value;
+
+   mmc_blk_put(md);
+   return count;
+}
+
 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
 {
struct mmc_blk_data *md = mmc_blk_get(bdev-bd_disk);
@@ -1308,6 +1341,49 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req 
*mqrq,
mmc_queue_bounce_pre(mqrq);
 }
 
+static void mmc_blk_write_packing_control(struct mmc_queue *mq,
+ struct request *req)
+{
+   struct mmc_host *host = mq-card-host;
+   int data_dir;
+
+   if (!(host-caps2  MMC_CAP2_PACKED_WR))
+   return;
+
+   /*
+* In case the packing control is not supported by the host, it should
+* not have an effect on the write packing. Therefore we have to enable
+* the write packing
+*/
+   if (!(host-caps2  MMC_CAP2_PACKED_WR_CONTROL)) {
+   mq-wr_packing_enabled = true;
+   return;
+   }
+
+   if (!req || (req  (req-cmd_flags  REQ_FLUSH))) {
+   if (mq-num_of_potential_packed_wr_reqs 
+   mq-num_wr_reqs_to_start_packing)
+   mq-wr_packing_enabled = true;
+   mq-num_of_potential_packed_wr_reqs = 0;
+   return;
+   }
+
+   data_dir = rq_data_dir(req);
+
+   if (data_dir == READ) {
+   mq-num_of_potential_packed_wr_reqs = 0;
+   mq-wr_packing_enabled = false;
+   return;
+   } else if (data_dir == WRITE) {
+   

[PATCH v3 0/1] block: Add test-iosched scheduler

2012-06-12 Thread Maya Erez
The test scheduler allows testing a block device by dispatching
specific requests according to the test case and declare PASS/FAIL
according to the requests completion error code

Changes in v3:
 - checkpatch error fixes
 - Changes to support the packed commands tests
Changes in v2:
- Export test-iosched functionality to allow definition of the block device
  tests under the block device layer
- Add registration of block device tests utilities

Maya Erez (1):
  block: Add test-iosched scheduler

 Documentation/block/test-iosched.txt |   39 ++
 block/Kconfig.iosched|   11 +
 block/Makefile   |1 +
 block/blk-core.c |3 +-
 block/test-iosched.c | 1014 ++
 include/linux/test-iosched.h |  218 
 6 files changed, 1284 insertions(+), 2 deletions(-)
 create mode 100644 Documentation/block/test-iosched.txt
 create mode 100644 block/test-iosched.c
 create mode 100644 include/linux/test-iosched.h

-- 
1.7.3.3
-- 
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 0/1] block: Add test-iosched scheduler

2012-06-12 Thread Maya Erez
The test scheduler allows testing a block device by dispatching
specific requests according to the test case and declare PASS/FAIL
according to the requests completion error code

Changes in v4:
 - fix test_init_queue definition

Changes in v3:
 - checkpatch error fixes
 - Changes to support the packed commands tests
Changes in v2:
- Export test-iosched functionality to allow definition of the block device
  tests under the block device layer
- Add registration of block device tests utilities

Maya Erez (1):
  block: Add test-iosched scheduler

 Documentation/block/test-iosched.txt |   39 ++
 block/Kconfig.iosched|   11 +
 block/Makefile   |1 +
 block/blk-core.c |3 +-
 block/test-iosched.c | 1015 ++
 include/linux/test-iosched.h |  218 
 6 files changed, 1285 insertions(+), 2 deletions(-)
 create mode 100644 Documentation/block/test-iosched.txt
 create mode 100644 block/test-iosched.c
 create mode 100644 include/linux/test-iosched.h

-- 
1.7.3.3
-- 
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 1/1] block: Add test-iosched scheduler

2012-06-12 Thread Maya Erez
The test scheduler allows testing a block device by dispatching
specific requests according to the test case and declare PASS/FAIL
according to the requests completion error code

Signed-off-by: Maya Erez me...@codeaurora.org

diff --git a/Documentation/block/test-iosched.txt 
b/Documentation/block/test-iosched.txt
new file mode 100644
index 000..75d8134
--- /dev/null
+++ b/Documentation/block/test-iosched.txt
@@ -0,0 +1,39 @@
+Test IO scheduler
+==
+
+The test scheduler allows testing a block device by dispatching
+specific requests according to the test case and declare PASS/FAIL
+according to the requests completion error code.
+
+The test IO scheduler implements the no-op scheduler operations, and uses
+them in order to dispatch the non-test requests when no test is running.
+This will allow to keep a normal FS operation in parallel to the test
+capability.
+The test IO scheduler keeps two different queues, one for real-world requests
+(inserted by the FS) and the other for the test requests.
+The test IO scheduler chooses the queue for dispatch requests according to the
+test state (IDLE/RUNNING).
+
+the test IO scheduler is compiled by default as a dynamic module and enabled
+only if CONFIG_DEBUG_FS is defined.
+
+Each block device test utility that would like to use the test-iosched test
+services, should register as a blk_dev_test_type and supply an init and exit
+callbacks. Those callback are called upon selection (or removal) of the
+test-iosched as the active scheduler. From that point the block device test
+can start a test and supply its own callbacks for preparing, running, result
+checking and cleanup of the test.
+
+Each test is exposed via debugfs and can be triggered by writing to
+the debugfs file. In order to add a new test one should expose a new debugfs
+file for the new test.
+
+Selecting IO schedulers
+---
+Refer to Documentation/block/switching-sched.txt for information on
+selecting an io scheduler on a per-device basis.
+
+
+May 10 2012, maya Erez me...@codeaurora.org
+
+
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 421bef9..af3d6a3 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -12,6 +12,17 @@ config IOSCHED_NOOP
  that do their own scheduling and require only minimal assistance from
  the kernel.
 
+config IOSCHED_TEST
+   tristate Test I/O scheduler
+   depends on DEBUG_FS
+   default m
+   ---help---
+ The test I/O scheduler is a duplicate of the noop scheduler with
+ addition of test utlity.
+ It allows testing a block device by dispatching specific requests
+ according to the test case and declare PASS/FAIL according to the
+ requests completion error code.
+
 config IOSCHED_DEADLINE
tristate Deadline I/O scheduler
default y
diff --git a/block/Makefile b/block/Makefile
index 39b76ba..436b220 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING)  += blk-throttle.o
 obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
 obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
 obj-$(CONFIG_IOSCHED_CFQ)  += cfq-iosched.o
+obj-$(CONFIG_IOSCHED_TEST) += test-iosched.o
 
 obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY)+= blk-integrity.o
diff --git a/block/blk-core.c b/block/blk-core.c
index 5639a3d..54ea4da 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1072,8 +1072,6 @@ struct request *blk_get_request(struct request_queue *q, 
int rw, gfp_t gfp_mask)
 {
struct request *rq;
 
-   BUG_ON(rw != READ  rw != WRITE);
-
spin_lock_irq(q-queue_lock);
if (gfp_mask  __GFP_WAIT)
rq = get_request_wait(q, rw, NULL);
@@ -1406,6 +1404,7 @@ void init_request_from_bio(struct request *req, struct 
bio *bio)
req-ioprio = bio_prio(bio);
blk_rq_bio_prep(req-q, req, bio);
 }
+EXPORT_SYMBOL(init_request_from_bio);
 
 void blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
diff --git a/block/test-iosched.c b/block/test-iosched.c
new file mode 100644
index 000..e4b89d2
--- /dev/null
+++ b/block/test-iosched.c
@@ -0,0 +1,1015 @@
+
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * The test scheduler allows to test the block device by dispatching
+ * specific requests according to the test case and declare PASS/FAIL
+ * according to the requests completion error code.
+ * Each test is exposed

[PATCH 1/1] block: Add eMMC4.5 packed commands unit-tests

2012-06-12 Thread Maya Erez
Expose the following packed commands tests:
- Test the write packed commands list preparation
- Simulate a returned error code
- Send an invalid packed command to the card

Signed-off-by: Lee Susman lsus...@codeaurora.org
Signed-off-by: Maya Erez me...@codeaurora.org

diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3b1f783..bbe8cac 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -67,3 +67,14 @@ config MMC_TEST
 
  This driver is only of interest to those developing or
  testing a host driver. Most people should say N here.
+
+config MMC_BLOCK_TEST
+   tristate MMC block test
+   depends on MMC_BLOCK  IOSCHED_TEST
+   default m
+   help
+ MMC block test can be used with test iosched to test the MMC block
+ device.
+ Currently used to test eMMC 4.5 features (packed commands, sanitize,
+ BKOPs).
+
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
index c73b406..d55107f 100644
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_MMC_TEST)  += mmc_test.o
 
 obj-$(CONFIG_SDIO_UART)+= sdio_uart.o
 
+obj-$(CONFIG_MMC_BLOCK_TEST)   += mmc_block_test.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index dbf96ec..926040c 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -123,17 +123,6 @@ struct mmc_blk_data {
 
 static DEFINE_MUTEX(open_lock);
 
-enum mmc_blk_status {
-   MMC_BLK_SUCCESS = 0,
-   MMC_BLK_PARTIAL,
-   MMC_BLK_CMD_ERR,
-   MMC_BLK_RETRY,
-   MMC_BLK_ABORT,
-   MMC_BLK_DATA_ERR,
-   MMC_BLK_ECC_ERR,
-   MMC_BLK_NOMEDIUM,
-};
-
 enum {
MMC_PACKED_N_IDX = -1,
MMC_PACKED_N_ZERO,
@@ -1342,6 +1331,64 @@ void mmc_blk_init_packed_statistics(struct mmc_card 
*card)
 }
 EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
 
+void print_mmc_packing_stats(struct mmc_card *card)
+{
+   int i;
+   int max_num_of_packed_reqs = 0;
+
+   if ((!card) || (!card-wr_pack_stats.packing_events))
+   return;
+
+   max_num_of_packed_reqs = card-ext_csd.max_packed_writes;
+
+   spin_lock(card-wr_pack_stats.lock);
+
+   pr_info(%s: write packing statistics:\n,
+   mmc_hostname(card-host));
+
+   for (i = 1 ; i = max_num_of_packed_reqs ; ++i) {
+   if (card-wr_pack_stats.packing_events[i] != 0)
+   pr_info(%s: Packed %d reqs - %d times\n,
+   mmc_hostname(card-host), i,
+   card-wr_pack_stats.packing_events[i]);
+   }
+
+   pr_info(%s: stopped packing due to the following reasons:\n,
+   mmc_hostname(card-host));
+
+   if (card-wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
+   pr_info(%s: %d times: exceedmax num of segments\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
+   if (card-wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
+   pr_info(%s: %d times: exceeding the max num of sectors\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
+   if (card-wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
+   pr_info(%s: %d times: wrong data direction\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
+   if (card-wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
+   pr_info(%s: %d times: flush or discard\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
+   if (card-wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
+   pr_info(%s: %d times: empty queue\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
+   if (card-wr_pack_stats.pack_stop_reason[REL_WRITE])
+   pr_info(%s: %d times: rel write\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[REL_WRITE]);
+   if (card-wr_pack_stats.pack_stop_reason[THRESHOLD])
+   pr_info(%s: %d times: Threshold\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[THRESHOLD]);
+
+   spin_unlock(card-wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(print_mmc_packing_stats);
+
 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
 {
struct request_queue *q = mq-queue;
@@ -1541,7 +1588,18 @@ static void mmc_blk_packed_hdr_wrq_prep(struct 
mmc_queue_req *mqrq,
brq-data.sg_len = mmc_queue_map_sg(mq, mqrq);
 
mqrq-mmc_active.mrq = brq-mrq;
-   mqrq-mmc_active.err_check = mmc_blk_packed_err_check

[PATCH v1 0/1] block: Add eMMC4.5 packed commands unit-tests

2012-06-12 Thread Maya Erez
Expose the following packed commands tests:
- Test the write packed commands list preparation
- Simulate a returned error code
- Send an invalid packed command to the card

This patch is dependant in the following patches:
  [PATCH v7 1/3] mmc: core: Add packed command feature of eMMC4.5
  [PATCH v7 2/3] mmc: core: Support packed write command for eMMC4.5 device
  [RFC/PATCH 1/1] Add MMC write packing statistics
  [PATCH v4 1/1] block: Add test-iosched scheduler

Maya Erez (1):
  block: Add eMMC4.5 packed commands unit-tests

 drivers/mmc/card/Kconfig  |   11 +
 drivers/mmc/card/Makefile |1 +
 drivers/mmc/card/block.c  |   82 ++-
 drivers/mmc/card/mmc_block_test.c | 1407 +
 drivers/mmc/card/queue.h  |   15 +
 5 files changed, 1504 insertions(+), 12 deletions(-)
 create mode 100644 drivers/mmc/card/mmc_block_test.c

-- 
1.7.3.3
-- 
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 1/1] block: Add eMMC4.5 packed commands unit-tests

2012-06-12 Thread Maya Erez
Expose the following packed commands tests:
- Test the write packed commands list preparation
- Simulate a returned error code
- Send an invalid packed command to the card

Signed-off-by: Lee Susman lsus...@codeaurora.org
Signed-off-by: Maya Erez me...@codeaurora.org

diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3b1f783..bbe8cac 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -67,3 +67,14 @@ config MMC_TEST
 
  This driver is only of interest to those developing or
  testing a host driver. Most people should say N here.
+
+config MMC_BLOCK_TEST
+   tristate MMC block test
+   depends on MMC_BLOCK  IOSCHED_TEST
+   default m
+   help
+ MMC block test can be used with test iosched to test the MMC block
+ device.
+ Currently used to test eMMC 4.5 features (packed commands, sanitize,
+ BKOPs).
+
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
index c73b406..d55107f 100644
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_MMC_TEST)  += mmc_test.o
 
 obj-$(CONFIG_SDIO_UART)+= sdio_uart.o
 
+obj-$(CONFIG_MMC_BLOCK_TEST)   += mmc_block_test.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index dbf96ec..926040c 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -123,17 +123,6 @@ struct mmc_blk_data {
 
 static DEFINE_MUTEX(open_lock);
 
-enum mmc_blk_status {
-   MMC_BLK_SUCCESS = 0,
-   MMC_BLK_PARTIAL,
-   MMC_BLK_CMD_ERR,
-   MMC_BLK_RETRY,
-   MMC_BLK_ABORT,
-   MMC_BLK_DATA_ERR,
-   MMC_BLK_ECC_ERR,
-   MMC_BLK_NOMEDIUM,
-};
-
 enum {
MMC_PACKED_N_IDX = -1,
MMC_PACKED_N_ZERO,
@@ -1342,6 +1331,64 @@ void mmc_blk_init_packed_statistics(struct mmc_card 
*card)
 }
 EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
 
+void print_mmc_packing_stats(struct mmc_card *card)
+{
+   int i;
+   int max_num_of_packed_reqs = 0;
+
+   if ((!card) || (!card-wr_pack_stats.packing_events))
+   return;
+
+   max_num_of_packed_reqs = card-ext_csd.max_packed_writes;
+
+   spin_lock(card-wr_pack_stats.lock);
+
+   pr_info(%s: write packing statistics:\n,
+   mmc_hostname(card-host));
+
+   for (i = 1 ; i = max_num_of_packed_reqs ; ++i) {
+   if (card-wr_pack_stats.packing_events[i] != 0)
+   pr_info(%s: Packed %d reqs - %d times\n,
+   mmc_hostname(card-host), i,
+   card-wr_pack_stats.packing_events[i]);
+   }
+
+   pr_info(%s: stopped packing due to the following reasons:\n,
+   mmc_hostname(card-host));
+
+   if (card-wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
+   pr_info(%s: %d times: exceedmax num of segments\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
+   if (card-wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
+   pr_info(%s: %d times: exceeding the max num of sectors\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
+   if (card-wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
+   pr_info(%s: %d times: wrong data direction\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
+   if (card-wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
+   pr_info(%s: %d times: flush or discard\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
+   if (card-wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
+   pr_info(%s: %d times: empty queue\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
+   if (card-wr_pack_stats.pack_stop_reason[REL_WRITE])
+   pr_info(%s: %d times: rel write\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[REL_WRITE]);
+   if (card-wr_pack_stats.pack_stop_reason[THRESHOLD])
+   pr_info(%s: %d times: Threshold\n,
+   mmc_hostname(card-host),
+   card-wr_pack_stats.pack_stop_reason[THRESHOLD]);
+
+   spin_unlock(card-wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(print_mmc_packing_stats);
+
 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
 {
struct request_queue *q = mq-queue;
@@ -1541,7 +1588,18 @@ static void mmc_blk_packed_hdr_wrq_prep(struct 
mmc_queue_req *mqrq,
brq-data.sg_len = mmc_queue_map_sg(mq, mqrq);
 
mqrq-mmc_active.mrq = brq-mrq;
-   mqrq-mmc_active.err_check = mmc_blk_packed_err_check

[RFC/PATCH 0/1] Add MMC write packing statistics

2012-06-06 Thread Maya Erez
The write packing statistics are used for the packed commands unit tests
in order to determine test success or failure

this patch is dependant in the following patches:
  [PATCH v6 1/3] mmc: core: Add packed command feature of eMMC4.5
  [PATCH v6 2/3] mmc: core: Support packed write command for eMMC4.5 device

Maya Erez (1):
  mmc: block: Add MMC write packing statistics

 drivers/mmc/card/block.c   |   56 +++-
 drivers/mmc/core/bus.c |4 +
 drivers/mmc/core/debugfs.c |  164 
 drivers/mmc/core/mmc.c |   18 +
 include/linux/mmc/card.h   |   24 +++
 5 files changed, 265 insertions(+), 1 deletions(-)

--
1.7.3.3
-- 
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC/PATCH 1/1] mmc: block: Add MMC write packing statistics

2012-06-06 Thread Maya Erez
The write packing statistics are used for the packed commands unit tests
in order to determine test success or failure

Signed-off-by: Maya Erez me...@codeaurora.org

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 2785fd4..c33c0c8 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -63,6 +63,11 @@ MODULE_ALIAS(mmc:block);
(rq_data_dir(req) == WRITE))
 #define PACKED_CMD_VER 0x01
 #define PACKED_CMD_WR  0x02
+#define MMC_BLK_UPDATE_STOP_REASON(stats, reason)  \
+   do {\
+   if (stats-enabled) \
+   stats-pack_stop_reason[reason]++;  \
+   } while (0)

 static DEFINE_MUTEX(block_mutex);

@@ -1313,6 +1318,35 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req 
*mqrq,
mmc_queue_bounce_pre(mqrq);
 }

+struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
+{
+   if (!card)
+   return NULL;
+
+   return card-wr_pack_stats;
+}
+EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
+
+void mmc_blk_init_packed_statistics(struct mmc_card *card)
+{
+   int max_num_of_packed_reqs = 0;
+
+   if (!card || !card-wr_pack_stats.packing_events)
+   return;
+
+   max_num_of_packed_reqs = card-ext_csd.max_packed_writes;
+
+   spin_lock(card-wr_pack_stats.lock);
+   memset(card-wr_pack_stats.packing_events, 0,
+   (max_num_of_packed_reqs + 1) *
+  sizeof(*card-wr_pack_stats.packing_events));
+   memset(card-wr_pack_stats.pack_stop_reason, 0,
+   sizeof(card-wr_pack_stats.pack_stop_reason));
+   card-wr_pack_stats.enabled = true;
+   spin_unlock(card-wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
+
 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
 {
struct request_queue *q = mq-queue;
@@ -1325,6 +1359,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, 
struct request *req)
u8 put_back = 0;
u8 max_packed_rw = 0;
u8 reqs = 0;
+   struct mmc_wr_pack_stats *stats = card-wr_pack_stats;

mmc_blk_clear_packed(mq-mqrq_cur);

@@ -1359,20 +1394,26 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue 
*mq, struct request *req)
phys_segments++;
}

+   spin_lock(stats-lock);
+
while (reqs  max_packed_rw - 1) {
spin_lock_irq(q-queue_lock);
next = blk_fetch_request(q);
spin_unlock_irq(q-queue_lock);
-   if (!next)
+   if (!next) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
break;
+   }

if (next-cmd_flags  REQ_DISCARD ||
next-cmd_flags  REQ_FLUSH) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
put_back = 1;
break;
}

if (rq_data_dir(cur) != rq_data_dir(next)) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
put_back = 1;
break;
}
@@ -1380,18 +1421,22 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue 
*mq, struct request *req)
if (mmc_req_rel_wr(next) 
(md-flags  MMC_BLK_REL_WR) 
!en_rel_wr) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
put_back = 1;
break;
}

req_sectors += blk_rq_sectors(next);
if (req_sectors  max_blk_count) {
+   if (stats-enabled)
+   stats-pack_stop_reason[EXCEEDS_SECTORS]++;
put_back = 1;
break;
}

phys_segments +=  next-nr_phys_segments;
if (phys_segments  max_phys_segs) {
+   MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
put_back = 1;
break;
}
@@ -1407,6 +1452,15 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, 
struct request *req)
spin_unlock_irq(q-queue_lock);
}

+   if (stats-enabled) {
+   if (reqs + 1 = card-ext_csd.max_packed_writes)
+   stats-packing_events[reqs + 1]++;
+   if (reqs + 1 == max_packed_rw)
+   MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD);
+   }
+
+   spin_unlock(stats-lock);
+
if (reqs  0) {
list_add(req-queuelist, mq-mqrq_cur-packed_list);
mq-mqrq_cur-packed_num = ++reqs;
diff --git a/drivers/mmc/core/bus.c b

[PATCH v2 0/1] block: Add test-iosched scheduler

2012-06-06 Thread Maya Erez
The test scheduler allows testing a block device by dispatching
specific requests according to the test case and declare PASS/FAIL
according to the requests completion error code

Changes in v2:
- Export test-iosched functionality to allow definition of the block device
  tests under the block device layer
- Add registration of block device tests utilities

Maya Erez (1):
  block: Add test-iosched scheduler

 Documentation/block/test-iosched.txt |   39 ++
 block/Kconfig.iosched|8 +
 block/Makefile   |1 +
 block/blk-core.c |3 +-
 block/test-iosched.c | 1025 ++
 include/linux/test-iosched.h |  218 +++
 6 files changed, 1292 insertions(+), 2 deletions(-)
 create mode 100644 Documentation/block/test-iosched.txt
 create mode 100644 block/test-iosched.c
 create mode 100644 include/linux/test-iosched.h

--
1.7.3.3
-- 
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v2 1/1] block: Add test-iosched scheduler

2012-06-06 Thread Maya Erez
The test scheduler allows testing a block device by dispatching
specific requests according to the test case and declare PASS/FAIL
according to the requests completion error code

Signed-off-by: Maya Erez me...@codeaurora.org

diff --git a/Documentation/block/test-iosched.txt 
b/Documentation/block/test-iosched.txt
new file mode 100644
index 000..75d8134
--- /dev/null
+++ b/Documentation/block/test-iosched.txt
@@ -0,0 +1,39 @@
+Test IO scheduler
+==
+
+The test scheduler allows testing a block device by dispatching
+specific requests according to the test case and declare PASS/FAIL
+according to the requests completion error code.
+
+The test IO scheduler implements the no-op scheduler operations, and uses
+them in order to dispatch the non-test requests when no test is running.
+This will allow to keep a normal FS operation in parallel to the test
+capability.
+The test IO scheduler keeps two different queues, one for real-world requests
+(inserted by the FS) and the other for the test requests.
+The test IO scheduler chooses the queue for dispatch requests according to the
+test state (IDLE/RUNNING).
+
+the test IO scheduler is compiled by default as a dynamic module and enabled
+only if CONFIG_DEBUG_FS is defined.
+
+Each block device test utility that would like to use the test-iosched test
+services, should register as a blk_dev_test_type and supply an init and exit
+callbacks. Those callback are called upon selection (or removal) of the
+test-iosched as the active scheduler. From that point the block device test
+can start a test and supply its own callbacks for preparing, running, result
+checking and cleanup of the test.
+
+Each test is exposed via debugfs and can be triggered by writing to
+the debugfs file. In order to add a new test one should expose a new debugfs
+file for the new test.
+
+Selecting IO schedulers
+---
+Refer to Documentation/block/switching-sched.txt for information on
+selecting an io scheduler on a per-device basis.
+
+
+May 10 2012, maya Erez me...@codeaurora.org
+
+
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 421bef9..34a1f9e 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -12,6 +12,14 @@ config IOSCHED_NOOP
  that do their own scheduling and require only minimal assistance from
  the kernel.

+config IOSCHED_TEST
+   tristate Test I/O scheduler
+   depends on DEBUG_FS
+   default m
+   ---help---
+ The test I/O scheduler is duplicate of the noop scheduler with
+ test ability.
+
 config IOSCHED_DEADLINE
tristate Deadline I/O scheduler
default y
diff --git a/block/Makefile b/block/Makefile
index 39b76ba..436b220 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING)  += blk-throttle.o
 obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
 obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
 obj-$(CONFIG_IOSCHED_CFQ)  += cfq-iosched.o
+obj-$(CONFIG_IOSCHED_TEST) += test-iosched.o

 obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY)+= blk-integrity.o
diff --git a/block/blk-core.c b/block/blk-core.c
index 3c923a7..f3c3b9e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1072,8 +1072,6 @@ struct request *blk_get_request(struct request_queue *q, 
int rw, gfp_t gfp_mask)
 {
struct request *rq;

-   BUG_ON(rw != READ  rw != WRITE);
-
spin_lock_irq(q-queue_lock);
if (gfp_mask  __GFP_WAIT)
rq = get_request_wait(q, rw, NULL);
@@ -1406,6 +1404,7 @@ void init_request_from_bio(struct request *req, struct 
bio *bio)
req-ioprio = bio_prio(bio);
blk_rq_bio_prep(req-q, req, bio);
 }
+EXPORT_SYMBOL(init_request_from_bio);

 void blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
diff --git a/block/test-iosched.c b/block/test-iosched.c
new file mode 100644
index 000..942e2c5
--- /dev/null
+++ b/block/test-iosched.c
@@ -0,0 +1,1025 @@
+
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * The test scheduler allows to test the block device by dispatching
+ * specific requests according to the test case and declare PASS/FAIL
+ * according to the requests completion error code.
+ * Each test is exposed via debugfs and can be triggered by writing to
+ * the debugfs file.
+ *
+ */
+
+/* elevator test iosched */
+#include linux/blkdev.h
+#include linux/elevator.h
+#include linux/bio.h
+#include linux/module.h

[PATCH v2 0/1] mmc: block: Add write packing control

2012-06-01 Thread Maya Erez
Our experiments showed that the write packing causes degradation of the read
throughput, in parallel read and write operations.
Since the read latency is critical for user experience we added a write packing 
control
mechanism that disables the write packing in case of read requests.
This will ensure that read requests latency is not increased due to long write 
packed commands.

The trigger for enabling the write packing is managing to pack several write 
requests.
The number of potential packed requests that will trigger the packing can be 
configured via sysfs.
The trigger for disabling the write packing is a fetch of a read request.

this patch is dependant in the following patches:
  [PATCH v6 1/3] mmc: core: Add packed command feature of eMMC4.5
  [PATCH v6 2/3] mmc: core: Support packed write command for eMMC4.5 device

Changes in v2:
- Move the attribute for setting the packing enabling trigger to the block 
device
- Add documentation of the new attribute

Maya Erez (1):
  mmc: block: Add write packing control

 Documentation/mmc/mmc-dev-attrs.txt |   17 ++
 drivers/mmc/card/block.c|  100 ++-
 drivers/mmc/card/queue.c|8 +++
 drivers/mmc/card/queue.h|3 +
 include/linux/mmc/host.h|1 +
 5 files changed, 128 insertions(+), 1 deletions(-)

--
1.7.3.3
-- 
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC/PATCH] mmc: block: Add write packing control

2012-05-20 Thread Maya Erez
Our experiments showed that the write packing causes degradation of
the read throughput, in parallel read and write operations.
Since the read latency is critical for user experience we added a write
packing control mechanism that disables the write packing in case of read
requests.
This will ensure that read requests latency is not increased due to long write
packed commands.

The trigger for enabling the write packing is managing to pack several
write requests. The number of potential packed requests that will trigger
the packing can be configured via sysfs.
The trigger for disabling the write packing is a fetch of a read request.

this patch is dependant in the follwoing patches:
  [PATCH v6 1/3] mmc: core: Add packed command feature of eMMC4.5
  [PATCH v6 2/3] mmc: core: Support packed write command for eMMC4.5 device

Maya Erez (1):
  mmc: block: Add write packing control

 drivers/mmc/card/block.c |   44 
 drivers/mmc/card/queue.c |2 ++
 drivers/mmc/card/queue.h |2 ++
 drivers/mmc/core/host.c  |   45 +
 include/linux/mmc/host.h |5 -
 5 files changed, 97 insertions(+), 1 deletions(-)

--
1.7.3.3
-- 
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.
--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC/PATCH] mmc: block: Add write packing control

2012-05-20 Thread Maya Erez
The write packing control will ensure that read requests latency is
not increased due to long write packed commands.

The trigger for enabling the write packing is managing to pack several
write requests. The number of potential packed requests that will trigger
the packing can be configured via sysfs.
The trigger for disabling the write packing is a fetch of a read request.

Signed-off-by: Maya Erez me...@codeaurora.org
---
 drivers/mmc/card/block.c |   44 
 drivers/mmc/card/queue.c |2 ++
 drivers/mmc/card/queue.h |2 ++
 drivers/mmc/core/host.c  |   45 +
 include/linux/mmc/host.h |5 -
 5 files changed, 97 insertions(+), 1 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 2785fd4..c4f7573 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1313,6 +1313,43 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req 
*mqrq,
mmc_queue_bounce_pre(mqrq);
 }

+static void mmc_blk_write_packing_control(struct mmc_queue *mq,
+ struct request *req)
+{
+   struct mmc_host *host = mq-card-host;
+   int data_dir = 0;
+
+   if (!(host-caps2  MMC_CAP2_PACKED_WR))
+   return;
+
+   if (!(host-caps2  MMC_CAP2_PACKED_WR_CONTROL)) {
+   mq-write_packing_enabled = 1;
+   return;
+   }
+
+   if (!req || (req  (req-cmd_flags  REQ_FLUSH))) {
+   if (mq-num_of_potential_packed_wr_reqs 
+   host-num_wr_reqs_to_start_packing)
+   mq-write_packing_enabled = 1;
+   return;
+   }
+
+   data_dir = rq_data_dir(req);
+
+   if (data_dir == READ) {
+   mq-num_of_potential_packed_wr_reqs = 0;
+   mq-write_packing_enabled = 0;
+   return;
+   } else if (data_dir == WRITE) {
+   mq-num_of_potential_packed_wr_reqs++;
+   }
+
+   if (mq-num_of_potential_packed_wr_reqs 
+   host-num_wr_reqs_to_start_packing)
+   mq-write_packing_enabled = 1;
+
+}
+
 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
 {
struct request_queue *q = mq-queue;
@@ -1332,6 +1369,9 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, 
struct request *req)
!card-ext_csd.packed_event_en)
goto no_packed;

+   if (!mq-write_packing_enabled)
+   goto no_packed;
+
if ((rq_data_dir(cur) == WRITE) 
(card-host-caps2  MMC_CAP2_PACKED_WR))
max_packed_rw = card-ext_csd.max_packed_writes;
@@ -1396,6 +1436,8 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, 
struct request *req)
break;
}

+   if (rq_data_dir(next) == WRITE)
+   mq-num_of_potential_packed_wr_reqs++;
list_add_tail(next-queuelist, mq-mqrq_cur-packed_list);
cur = next;
reqs++;
@@ -1780,6 +1822,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct 
request *req)
goto out;
}

+   mmc_blk_write_packing_control(mq, req);
+
if (req  req-cmd_flags  REQ_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card-host-areq)
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 165d85a..2d8dcd9 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -181,6 +181,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card 
*card,
mq-mqrq_cur = mqrq_cur;
mq-mqrq_prev = mqrq_prev;
mq-queue-queuedata = mq;
+   mq-write_packing_enabled = 0;
+   mq-num_of_potential_packed_wr_reqs = 0;

blk_queue_prep_rq(mq-queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq-queue);
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index d761bf1..c5fd2ac 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -44,6 +44,8 @@ struct mmc_queue {
struct mmc_queue_reqmqrq[2];
struct mmc_queue_req*mqrq_cur;
struct mmc_queue_req*mqrq_prev;
+   int write_packing_enabled;
+   int num_of_potential_packed_wr_reqs;
 };

 extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 91c84c7..d7e3552 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -345,6 +345,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device 
*dev)
host-max_blk_size = 512;
host-max_blk_count = PAGE_CACHE_SIZE / 512;

+   host-num_wr_reqs_to_start_packing = 17;
+
return host;

 free:
@@ -353,6 +355,49 @@ free:
 }

 EXPORT_SYMBOL(mmc_alloc_host

[PATCH v1 0/1] mmc: block: Add write packing control

2012-05-20 Thread Maya Erez
Our experiments showed that the write packing causes degradation of the read 
throughput, in parallel read and write operations.
Since the read latency is critical for user experience we added a write packing 
control 
mechanism that disables the write packing in case of read requests.
This will ensure that read requests latency is not increased due to long write 
packed commands.

The trigger for enabling the write packing is managing to pack several write 
requests. 
The number of potential packed requests that will trigger the packing can be 
configured via sysfs.
The trigger for disabling the write packing is a fetch of a read request.

this patch is dependant in the following patches:
  [PATCH v6 1/3] mmc: core: Add packed command feature of eMMC4.5
  [PATCH v6 2/3] mmc: core: Support packed write command for eMMC4.5 device

Maya Erez (1):
  mmc: block: Add write packing control

 drivers/mmc/card/block.c |   44 ++
 drivers/mmc/card/queue.c |2 +
 drivers/mmc/card/queue.h |2 +
 drivers/mmc/core/host.c  |   52 ++
 include/linux/mmc/host.h |5 +++-
 5 files changed, 104 insertions(+), 1 deletions(-)

-- 
1.7.3.3
-- 
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 1/1] mmc: block: Add write packing control

2012-05-20 Thread Maya Erez
The write packing control will ensure that read requests latency is
not increased due to long write packed commands.

The trigger for enabling the write packing is managing to pack several
write requests. The number of potential packed requests that will trigger
the packing can be configured via sysfs.
The trigger for disabling the write packing is a fetch of a read request.

Signed-off-by: Maya Erez me...@codeaurora.org
---
 drivers/mmc/card/block.c |   44 ++
 drivers/mmc/card/queue.c |2 +
 drivers/mmc/card/queue.h |2 +
 drivers/mmc/core/host.c  |   52 ++
 include/linux/mmc/host.h |5 +++-
 5 files changed, 104 insertions(+), 1 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 2785fd4..c4f7573 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1313,6 +1313,43 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req 
*mqrq,
mmc_queue_bounce_pre(mqrq);
 }
 
+static void mmc_blk_write_packing_control(struct mmc_queue *mq,
+ struct request *req)
+{
+   struct mmc_host *host = mq-card-host;
+   int data_dir = 0;
+
+   if (!(host-caps2  MMC_CAP2_PACKED_WR))
+   return;
+
+   if (!(host-caps2  MMC_CAP2_PACKED_WR_CONTROL)) {
+   mq-write_packing_enabled = 1;
+   return;
+   }
+
+   if (!req || (req  (req-cmd_flags  REQ_FLUSH))) {
+   if (mq-num_of_potential_packed_wr_reqs 
+   host-num_wr_reqs_to_start_packing)
+   mq-write_packing_enabled = 1;
+   return;
+   }
+
+   data_dir = rq_data_dir(req);
+
+   if (data_dir == READ) {
+   mq-num_of_potential_packed_wr_reqs = 0;
+   mq-write_packing_enabled = 0;
+   return;
+   } else if (data_dir == WRITE) {
+   mq-num_of_potential_packed_wr_reqs++;
+   }
+
+   if (mq-num_of_potential_packed_wr_reqs 
+   host-num_wr_reqs_to_start_packing)
+   mq-write_packing_enabled = 1;
+
+}
+
 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
 {
struct request_queue *q = mq-queue;
@@ -1332,6 +1369,9 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, 
struct request *req)
!card-ext_csd.packed_event_en)
goto no_packed;
 
+   if (!mq-write_packing_enabled)
+   goto no_packed;
+
if ((rq_data_dir(cur) == WRITE) 
(card-host-caps2  MMC_CAP2_PACKED_WR))
max_packed_rw = card-ext_csd.max_packed_writes;
@@ -1396,6 +1436,8 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, 
struct request *req)
break;
}
 
+   if (rq_data_dir(next) == WRITE)
+   mq-num_of_potential_packed_wr_reqs++;
list_add_tail(next-queuelist, mq-mqrq_cur-packed_list);
cur = next;
reqs++;
@@ -1780,6 +1822,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct 
request *req)
goto out;
}
 
+   mmc_blk_write_packing_control(mq, req);
+
if (req  req-cmd_flags  REQ_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card-host-areq)
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 165d85a..2d8dcd9 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -181,6 +181,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card 
*card,
mq-mqrq_cur = mqrq_cur;
mq-mqrq_prev = mqrq_prev;
mq-queue-queuedata = mq;
+   mq-write_packing_enabled = 0;
+   mq-num_of_potential_packed_wr_reqs = 0;
 
blk_queue_prep_rq(mq-queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq-queue);
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index d761bf1..c5fd2ac 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -44,6 +44,8 @@ struct mmc_queue {
struct mmc_queue_reqmqrq[2];
struct mmc_queue_req*mqrq_cur;
struct mmc_queue_req*mqrq_prev;
+   int write_packing_enabled;
+   int num_of_potential_packed_wr_reqs;
 };
 
 extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 91c84c7..9cf1847 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -345,6 +345,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device 
*dev)
host-max_blk_size = 512;
host-max_blk_count = PAGE_CACHE_SIZE / 512;
 
+   host-num_wr_reqs_to_start_packing = 17;
+
return host;
 
 free:
@@ -353,6 +355,49 @@ free:
 }
 
 EXPORT_SYMBOL(mmc_alloc_host

[PATCH v1 0/1] block: Add test-iosched scheduler

2012-05-20 Thread Maya Erez
The test scheduler allows testing a block device by dispatching
specific requests according to the test case and declare PASS/FAIL
according to the requests completion error code

Maya Erez (1):
  block: Add test-iosched scheduler

 Documentation/block/test-iosched.txt |   32 ++
 block/Kconfig.iosched|8 +
 block/Makefile   |1 +
 block/blk-core.c |4 +-
 block/test-iosched.c |  966 ++
 5 files changed, 1010 insertions(+), 1 deletions(-)
 create mode 100644 Documentation/block/test-iosched.txt
 create mode 100644 block/test-iosched.c

-- 
1.7.3.3
-- 
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.

--
To unsubscribe from this list: send the line unsubscribe linux-mmc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 1/1] block: Add test-iosched scheduler

2012-05-20 Thread Maya Erez
The test scheduler allows testing a block device by dispatching
specific requests according to the test case and declare PASS/FAIL
according to the requests completion error code

Signed-off-by: Maya Erez me...@codeaurora.org
---
 Documentation/block/test-iosched.txt |   32 ++
 block/Kconfig.iosched|8 +
 block/Makefile   |1 +
 block/blk-core.c |4 +-
 block/test-iosched.c |  966 ++
 5 files changed, 1010 insertions(+), 1 deletions(-)
 create mode 100644 Documentation/block/test-iosched.txt
 create mode 100644 block/test-iosched.c

diff --git a/Documentation/block/test-iosched.txt 
b/Documentation/block/test-iosched.txt
new file mode 100644
index 000..b86eb8d
--- /dev/null
+++ b/Documentation/block/test-iosched.txt
@@ -0,0 +1,32 @@
+Test IO scheduler
+==
+
+The test scheduler allows to test the a block device by dispatching
+specific requests according to the test case and declare PASS/FAIL
+according to the requests completion error code.
+
+Each test is exposed via debugfs and can be triggered by writing to
+the debugfs file. In order to add a new test one should expose a new debugfs
+file for the new test.
+
+The test IO scheduler includes the no-op scheduler operations, and uses
+them in order to dispatch the non-test requests when no test is running.
+This will allow to keep a normal FS operation in parallel to the test
+capability.
+The test IO sceduler keeps two different queues, one for non-test requests
+(inserted by the FS) and the other for test requests.
+The test IO scheduler chooses the queue for dispatch requests according to the
+test state (IDLE/RUNNING).
+
+the test IO scheduler is compiled by default as a dynamic module and enabled
+only if CONFIG_DEBUG_FS is defined.
+
+Selecting IO schedulers
+---
+Refer to Documentation/block/switching-sched.txt for information on
+selecting an io scheduler on a per-device basis.
+
+
+May 10 2012, maya Erez me...@codeaurora.org
+
+
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 421bef9..34a1f9e 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -12,6 +12,14 @@ config IOSCHED_NOOP
  that do their own scheduling and require only minimal assistance from
  the kernel.
 
+config IOSCHED_TEST
+   tristate Test I/O scheduler
+   depends on DEBUG_FS
+   default m
+   ---help---
+ The test I/O scheduler is duplicate of the noop scheduler with
+ test ability.
+
 config IOSCHED_DEADLINE
tristate Deadline I/O scheduler
default y
diff --git a/block/Makefile b/block/Makefile
index 39b76ba..436b220 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING)  += blk-throttle.o
 obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
 obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
 obj-$(CONFIG_IOSCHED_CFQ)  += cfq-iosched.o
+obj-$(CONFIG_IOSCHED_TEST) += test-iosched.o
 
 obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY)+= blk-integrity.o
diff --git a/block/blk-core.c b/block/blk-core.c
index 3c923a7..a789a98 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1072,7 +1072,8 @@ struct request *blk_get_request(struct request_queue *q, 
int rw, gfp_t gfp_mask)
 {
struct request *rq;
 
-   BUG_ON(rw != READ  rw != WRITE);
+   if (unlikely(test_bit(QUEUE_FLAG_DEAD, q-queue_flags)))
+   return NULL;
 
spin_lock_irq(q-queue_lock);
if (gfp_mask  __GFP_WAIT)
@@ -1406,6 +1407,7 @@ void init_request_from_bio(struct request *req, struct 
bio *bio)
req-ioprio = bio_prio(bio);
blk_rq_bio_prep(req-q, req, bio);
 }
+EXPORT_SYMBOL(init_request_from_bio);
 
 void blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
diff --git a/block/test-iosched.c b/block/test-iosched.c
new file mode 100644
index 000..e856a51
--- /dev/null
+++ b/block/test-iosched.c
@@ -0,0 +1,966 @@
+
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * The test scheduler allows to test the block device by dispatching
+ * specific requests according to the test case and declare PASS/FAIL
+ * according to the requests completion error code.
+ * Each test is exposed via debugfs and can be triggered by writing to
+ * the debugfs file.
+ *
+ */
+
+/*
+ * elevator test iosched
+ */
+#include linux/blkdev.h
+#include linux/elevator.h