From: Tien Fong Chee <tien.fong.c...@intel.com>

Add image loader used by the NAND SPL into the full Denali NAND
driver. This allows usage of the full Denali NAND driver in SPL instead
of the reduced SPL-only version.

Signed-off-by: Tien Fong Chee <tien.fong.c...@intel.com>
Signed-off-by: Jit Loon Lim <jit.loon....@intel.com>
---
 drivers/mtd/nand/raw/denali.c | 55 +++++++++++++++++++++++++++++++++++
 1 file changed, 55 insertions(+)

diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index c827f80281..499b4acc6e 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -7,7 +7,9 @@
 
 #include <common.h>
 #include <dm.h>
+#include <hang.h>
 #include <malloc.h>
+#include <memalign.h>
 #include <nand.h>
 #include <asm/cache.h>
 #include <asm/dma-mapping.h>
@@ -1374,3 +1376,56 @@ free_buf:
 
        return ret;
 }
+
+#ifdef CONFIG_SPL_BUILD
+int nand_spl_load_image(u32 offset, u32 len, void *dst)
+{
+       size_t count = len, actual = 0, page_align_overhead = 0;
+       u32 page_align_offset = 0;
+       u8 *page_buffer;
+       int err = 0;
+       struct mtd_info *mtd;
+
+       if (!len || !dst)
+               return -EINVAL;
+
+       mtd = get_nand_dev_by_index(nand_curr_device);
+       if (!mtd)
+               hang();
+
+       if ((offset & (mtd->writesize - 1)) != 0) {
+               page_buffer = malloc_cache_aligned(mtd->writesize);
+               if (!page_buffer) {
+                       debug("Error: allocating buffer\n");
+                       return -ENOMEM;
+               }
+
+               page_align_overhead = offset % mtd->writesize;
+               page_align_offset = (offset / mtd->writesize) * mtd->writesize;
+               count = mtd->writesize;
+
+               err = nand_read_skip_bad(mtd, page_align_offset, &count,
+                                        &actual, mtd->size, page_buffer);
+
+               if (err)
+                       return err;
+
+               count -= page_align_overhead;
+               count = min((size_t)len, count);
+               memcpy(dst, page_buffer + page_align_overhead, count);
+               free(page_buffer);
+
+               len -= count;
+               if (!len)
+                       return err;
+
+               offset += count;
+               dst += count;
+               count = len;
+       }
+
+       return nand_read_skip_bad(mtd, offset, &count, &actual, mtd->size, dst);
+}
+
+void nand_deselect(void) {}
+#endif
-- 
2.26.2

Reply via email to