The flow of continuous read operation is: firstly, starting
with the page read command and the 1st page data will be
read into the cache after the read latency tRD. Secondly,
issuing the Read From Cache commands (03h/0Bh/3Bh/6Bh/BBh/EBh)
to read out the data from cache continuously. After all the
data is read out, the host should pull CS# high to terminate
this continuous read operation and wait tRST for the NAND
device resets read operation.

The continuous read usage is enabled by reading multiple pages
(at least greater than 1 page size) and the column address is
"don't care" in this operation, since the data output for each
page will always start from byte 0 and a full page data should
be read out for each page.

On the other hand, since the continuous read mode can only read
the entire page of data and cannot read the oob data, the dynamic
mode switching is added to enable continuous read mode and disable
continuous read mode in spinand_continuous_read to avoid abnormal
writing and erasing operations.

The performance of continuous read mode is as follows. Set the
flash to QSPI mode and run 25MHz direct mapping mode on the SPI
bus and use the MTD test module to show the performance of
continuous reads.

Signed-off-by: Leo Yu <liangyany...@gmail.com>
---
 drivers/mtd/nand/spi/core.c | 120 +++++++++++++++++++++++++++++++++++-
 1 file changed, 119 insertions(+), 1 deletion(-)

diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 9d628f6f26..b28962f921 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -269,7 +269,10 @@ static int spinand_read_from_cache_op(struct 
spinand_device *spinand,
        u16 column = 0;
        int ret;

-       if (req->datalen) {
+       if (spinand->use_continuous_read) {
+               buf = req->databuf.in;
+               nbytes = req->datalen;
+       } else if (req->datalen) {
                adjreq.datalen = nanddev_page_size(nand);
                adjreq.dataoffs = 0;
                adjreq.databuf.in = spinand->databuf;
@@ -312,6 +315,9 @@ static int spinand_read_from_cache_op(struct spinand_device 
*spinand,
                op.addr.val += op.data.nbytes;
        }

+       if (spinand->use_continuous_read)
+               return 0;
+
        if (req->datalen)
                memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
                       req->datalen);
@@ -577,6 +583,105 @@ static int spinand_write_page(struct spinand_device 
*spinand,
        return ret;
 }

+static int spinand_continuous_read(struct mtd_info *mtd, loff_t from,
+                                  struct mtd_oob_ops *req,
+                                  struct nand_io_iter *iter)
+{
+       struct spinand_device *spinand = mtd_to_spinand(mtd);
+       struct nand_device *nand = mtd_to_nanddev(mtd);
+       int ret = 0;
+
+       bool enable_cont_read = true;
+       bool enable_ecc = false;
+
+       /*
+        * Since the continuous read mode can only read the entire page of data
+        * and cannot read the oob data, therefore, only ECC-Free SPI-NAND 
support
+        * continuous read mode now.
+        */
+       iter->req.mode = MTD_OPS_RAW;
+       iter->req.ooblen = 0;
+
+       req->retlen = 0;
+
+       /* Read the first unaligned page with conventional read */
+       if (from & (nanddev_page_size(nand) - 1)) {
+               pr_debug("address not aligned\n");
+
+               iter->req.databuf.in = req->datbuf;
+               iter->req.dataoffs = nanddev_offs_to_pos(nand, from, 
&iter->req.pos);
+               iter->req.datalen = nanddev_page_size(nand) - 
iter->req.dataoffs;
+
+               schedule();
+               ret = spinand_select_target(spinand, iter->req.pos.target);
+               if (ret)
+                       return ret;
+
+               spinand_read_page(spinand, &iter->req, enable_ecc);
+               if (ret)
+                       return ret;
+
+               req->retlen += iter->req.datalen;
+       }
+
+       iter->req.dataoffs = nanddev_offs_to_pos(nand, from + req->retlen, 
&iter->req.pos);
+       iter->req.databuf.in = req->datbuf + req->retlen;
+       iter->req.datalen = req->totallen - req->retlen;
+
+       schedule();
+       ret = spinand_continuous_read_enable(spinand, enable_cont_read);
+       if (ret)
+               return ret;
+
+       ret = spinand_select_target(spinand, iter->req.pos.target);
+       if (ret) {
+               req->retlen = 0;
+               goto continuous_read_error;
+       }
+
+       /*
+        * The continuous read operation including: firstly, starting with the
+        * page read command and the 1 st page data will be read into the cache
+        * after the read latency tRD. Secondly, Issuing the Read From Cache
+        * commands (03h/0Bh/3Bh/6Bh/BBh/EBh) to read out the data from cache
+        * continuously.
+        *
+        * The cache is divided into two halves, while one half of the cache is
+        * outputting the data, the other half will be loaded for the new data;
+        * therefore, the host can read out the data continuously from page to
+        * page. Multiple of Read From Cache commands can be issued in one
+        * continuous read operation, each Read From Cache command is required
+        * to read multiple 4-byte data exactly; otherwise, the data output will
+        * be out of sequence from one Read From Cache command to another Read
+        * From Cache command.
+        *
+        * After all the data is read out, the host should pull CS# high to
+        * terminate this continuous read operation and wait a 6us of tRST for
+        * the NAND device resets read operation. The data output for each page
+        * will always start from byte 0 and a full page data should be read out
+        * for each page.
+        */
+       ret = spinand_read_page(spinand, &iter->req, enable_ecc);
+       if (ret) {
+               req->retlen = 0;
+               goto continuous_read_error;
+       }
+
+       ret = spinand_reset_op(spinand);
+       if (ret) {
+               req->retlen = 0;
+               goto continuous_read_error;
+       }
+
+       req->retlen += iter->req.datalen;
+
+continuous_read_error:
+       enable_cont_read = false;
+       ret = spinand_continuous_read_enable(spinand, enable_cont_read);
+
+       return ret;
+}
+
 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
                            struct mtd_oob_ops *ops)
 {
@@ -594,6 +699,18 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t 
from,
 #ifndef __UBOOT__
        mutex_lock(&spinand->lock);
 #endif
+       /*
+        * If the device supports continuous read mode and the read length is 
greater
+        * than one page size, the device will enter the continuous read mode. 
This mode
+        * helps avoiding issuing a page read command and read from cache 
command
+        * again, and improves the performance of reading continuous pages.
+        */
+       if ((spinand->flags & SPINAND_HAS_CONT_READ_BIT) &&
+           (ops->totallen > nanddev_page_size(nand))) {
+               ret = spinand_continuous_read(mtd, from, ops, &iter);
+
+               goto continuous_read_finish;
+       }

        nanddev_io_for_each_page(nand, from, ops, &iter) {
                schedule();
@@ -622,6 +739,7 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t 
from,
                ops->oobretlen += iter.req.ooblen;
        }

+continuous_read_finish:
 #ifndef __UBOOT__
        mutex_unlock(&spinand->lock);
 #endif
--
2.17.1

Reply via email to