Make use of the spi-mem direct mapping API to let advanced controllers
optimize read/write operations when they support direct mapping.

This is a port of linux patch 981d1aa0697ce1393e00933f154d181e965703d0
created by Boris Brezillon <bbrezil...@kernel.org>.

Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevets...@iopsys.eu>
---
 drivers/mtd/nand/spi/core.c | 185 +++++++++++++++++-------------------
 include/linux/mtd/spinand.h |   7 ++
 2 files changed, 95 insertions(+), 97 deletions(-)

diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index f5ddfbf4b83..ea00cd7dcf0 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -41,21 +41,6 @@ struct spinand_plat {
 /* SPI NAND index visible in MTD names */
 static int spi_nand_idx;
 
-static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
-                                         const struct nand_page_io_req *req,
-                                         u16 *column)
-{
-       struct nand_device *nand = spinand_to_nand(spinand);
-       unsigned int shift;
-
-       if (nand->memorg.planes_per_lun < 2)
-               return;
-
-       /* The plane number is passed in MSB just above the column address */
-       shift = fls(nand->memorg.pagesize);
-       *column |= req->pos.plane << shift;
-}
-
 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
 {
        struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
@@ -249,27 +234,21 @@ static int spinand_load_page_op(struct spinand_device 
*spinand,
 static int spinand_read_from_cache_op(struct spinand_device *spinand,
                                      const struct nand_page_io_req *req)
 {
-       struct spi_mem_op op = *spinand->op_templates.read_cache;
        struct nand_device *nand = spinand_to_nand(spinand);
        struct mtd_info *mtd = nanddev_to_mtd(nand);
-       struct nand_page_io_req adjreq = *req;
+       struct spi_mem_dirmap_desc *rdesc;
        unsigned int nbytes = 0;
        void *buf = NULL;
        u16 column = 0;
-       int ret;
+       ssize_t ret;
 
        if (req->datalen) {
-               adjreq.datalen = nanddev_page_size(nand);
-               adjreq.dataoffs = 0;
-               adjreq.databuf.in = spinand->databuf;
                buf = spinand->databuf;
-               nbytes = adjreq.datalen;
+               nbytes = nanddev_page_size(nand);
+               column = 0;
        }
 
        if (req->ooblen) {
-               adjreq.ooblen = nanddev_per_page_oobsize(nand);
-               adjreq.ooboffs = 0;
-               adjreq.oobbuf.in = spinand->oobbuf;
                nbytes += nanddev_per_page_oobsize(nand);
                if (!buf) {
                        buf = spinand->oobbuf;
@@ -277,28 +256,19 @@ static int spinand_read_from_cache_op(struct 
spinand_device *spinand,
                }
        }
 
-       spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
-       op.addr.val = column;
+       rdesc = spinand->dirmaps[req->pos.plane].rdesc;
 
-       /*
-        * Some controllers are limited in term of max RX data size. In this
-        * case, just repeat the READ_CACHE operation after updating the
-        * column.
-        */
        while (nbytes) {
-               op.data.buf.in = buf;
-               op.data.nbytes = nbytes;
-               ret = spi_mem_adjust_op_size(spinand->slave, &op);
-               if (ret)
+               ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
+               if (ret < 0)
                        return ret;
 
-               ret = spi_mem_exec_op(spinand->slave, &op);
-               if (ret)
-                       return ret;
+               if (!ret || ret > nbytes)
+                       return -EIO;
 
-               buf += op.data.nbytes;
-               nbytes -= op.data.nbytes;
-               op.addr.val += op.data.nbytes;
+               nbytes -= ret;
+               column += ret;
+               buf += ret;
        }
 
        if (req->datalen)
@@ -322,14 +292,12 @@ static int spinand_read_from_cache_op(struct 
spinand_device *spinand,
 static int spinand_write_to_cache_op(struct spinand_device *spinand,
                                     const struct nand_page_io_req *req)
 {
-       struct spi_mem_op op = *spinand->op_templates.write_cache;
        struct nand_device *nand = spinand_to_nand(spinand);
        struct mtd_info *mtd = nanddev_to_mtd(nand);
-       struct nand_page_io_req adjreq = *req;
-       unsigned int nbytes = 0;
-       void *buf = NULL;
-       u16 column = 0;
-       int ret;
+       struct spi_mem_dirmap_desc *wdesc;
+       unsigned int nbytes, column = 0;
+       void *buf = spinand->databuf;
+       ssize_t ret;
 
        /*
         * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
@@ -338,19 +306,12 @@ static int spinand_write_to_cache_op(struct 
spinand_device *spinand,
         * the data portion of the page, otherwise we might corrupt the BBM or
         * user data previously programmed in OOB area.
         */
-       memset(spinand->databuf, 0xff,
-              nanddev_page_size(nand) +
-              nanddev_per_page_oobsize(nand));
+       nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
+       memset(spinand->databuf, 0xff, nbytes);
 
-       if (req->datalen) {
+       if (req->datalen)
                memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
                       req->datalen);
-               adjreq.dataoffs = 0;
-               adjreq.datalen = nanddev_page_size(nand);
-               adjreq.databuf.out = spinand->databuf;
-               nbytes = adjreq.datalen;
-               buf = spinand->databuf;
-       }
 
        if (req->ooblen) {
                if (req->mode == MTD_OPS_AUTO_OOB)
@@ -361,52 +322,21 @@ static int spinand_write_to_cache_op(struct 
spinand_device *spinand,
                else
                        memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
                               req->ooblen);
-
-               adjreq.ooblen = nanddev_per_page_oobsize(nand);
-               adjreq.ooboffs = 0;
-               nbytes += nanddev_per_page_oobsize(nand);
-               if (!buf) {
-                       buf = spinand->oobbuf;
-                       column = nanddev_page_size(nand);
-               }
        }
 
-       spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
-
-       op = *spinand->op_templates.write_cache;
-       op.addr.val = column;
+       wdesc = spinand->dirmaps[req->pos.plane].wdesc;
 
-       /*
-        * Some controllers are limited in term of max TX data size. In this
-        * case, split the operation into one LOAD CACHE and one or more
-        * LOAD RANDOM CACHE.
-        */
        while (nbytes) {
-               op.data.buf.out = buf;
-               op.data.nbytes = nbytes;
-
-               ret = spi_mem_adjust_op_size(spinand->slave, &op);
-               if (ret)
-                       return ret;
-
-               ret = spi_mem_exec_op(spinand->slave, &op);
-               if (ret)
+               ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
+               if (ret < 0)
                        return ret;
 
-               buf += op.data.nbytes;
-               nbytes -= op.data.nbytes;
-               op.addr.val += op.data.nbytes;
+               if (!ret || ret > nbytes)
+                       return -EIO;
 
-               /*
-                * We need to use the RANDOM LOAD CACHE operation if there's
-                * more than one iteration, because the LOAD operation resets
-                * the cache to 0xff.
-                */
-               if (nbytes) {
-                       column = op.addr.val;
-                       op = *spinand->op_templates.update_cache;
-                       op.addr.val = column;
-               }
+               nbytes -= ret;
+               column += ret;
+               buf += ret;
        }
 
        return 0;
@@ -819,6 +749,59 @@ static int spinand_mtd_block_isreserved(struct mtd_info 
*mtd, loff_t offs)
        return ret;
 }
 
+static int spinand_create_dirmap(struct spinand_device *spinand,
+                                unsigned int plane)
+{
+       struct nand_device *nand = spinand_to_nand(spinand);
+       struct spi_mem_dirmap_info info = {
+               .length = nanddev_page_size(nand) +
+                         nanddev_per_page_oobsize(nand),
+       };
+       struct spi_mem_dirmap_desc *desc;
+
+       /* The plane number is passed in MSB just above the column address */
+       info.offset = plane << fls(nand->memorg.pagesize);
+
+       info.op_tmpl = *spinand->op_templates.update_cache;
+       desc = spi_mem_dirmap_create(spinand->slave, &info);
+       if (IS_ERR(desc))
+               return PTR_ERR(desc);
+
+       spinand->dirmaps[plane].wdesc = desc;
+
+       info.op_tmpl = *spinand->op_templates.read_cache;
+       desc = spi_mem_dirmap_create(spinand->slave, &info);
+       if (IS_ERR(desc)) {
+               spi_mem_dirmap_destroy(spinand->dirmaps[plane].wdesc);
+               return PTR_ERR(desc);
+       }
+
+       spinand->dirmaps[plane].rdesc = desc;
+
+       return 0;
+}
+
+static int spinand_create_dirmaps(struct spinand_device *spinand)
+{
+       struct nand_device *nand = spinand_to_nand(spinand);
+       int i, ret;
+
+       spinand->dirmaps = devm_kzalloc(spinand->slave->dev,
+                                       sizeof(*spinand->dirmaps) *
+                                       nand->memorg.planes_per_lun,
+                                       GFP_KERNEL);
+       if (!spinand->dirmaps)
+               return -ENOMEM;
+
+       for (i = 0; i < nand->memorg.planes_per_lun; i++) {
+               ret = spinand_create_dirmap(spinand, i);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 static const struct nand_ops spinand_ops = {
        .erase = spinand_erase,
        .markbad = spinand_markbad,
@@ -1116,6 +1099,14 @@ static int spinand_init(struct spinand_device *spinand)
                goto err_free_bufs;
        }
 
+       ret = spinand_create_dirmaps(spinand);
+       if (ret) {
+               dev_err(spinand->slave->dev,
+                       "Failed to create direct mappings for read/write 
operations (err = %d)\n",
+                       ret);
+               goto err_manuf_cleanup;
+       }
+
        /* After power up, all blocks are locked, so unlock them here. */
        for (i = 0; i < nand->memorg.ntargets; i++) {
                ret = spinand_select_target(spinand, i);
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 13b5a52f8b9..5934b7604cc 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -363,6 +363,11 @@ struct spinand_info {
                __VA_ARGS__                                             \
        }
 
+struct spinand_dirmap {
+       struct spi_mem_dirmap_desc *wdesc;
+       struct spi_mem_dirmap_desc *rdesc;
+};
+
 /**
  * struct spinand_device - SPI NAND device instance
  * @base: NAND device instance
@@ -406,6 +411,8 @@ struct spinand_device {
                const struct spi_mem_op *update_cache;
        } op_templates;
 
+       struct spinand_dirmap *dirmaps;
+
        int (*select_target)(struct spinand_device *spinand,
                             unsigned int target);
        unsigned int cur_target;
-- 
2.45.2

Reply via email to