Home Home > GIT Browse > stable
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJiri Slaby <jslaby@suse.cz>2019-02-15 10:23:48 +0100
committerJiri Slaby <jslaby@suse.cz>2019-02-15 10:23:52 +0100
commitab98d1bdd65ad4d69c9e4715464103eab307a051 (patch)
tree66a7dcbcdd35eaf18c57fd7bc669597136b6547c
parent6a609f5f394356abba6ce920c53a46fac8bb93cc (diff)
mtd: spinand: Handle the case where PROGRAM LOAD does not
reset the cache (bnc#1012628).
-rw-r--r--patches.kernel.org/4.20.9-002-mtd-spinand-Handle-the-case-where-PROGRAM-LOAD.patch105
-rw-r--r--series.conf1
2 files changed, 106 insertions, 0 deletions
diff --git a/patches.kernel.org/4.20.9-002-mtd-spinand-Handle-the-case-where-PROGRAM-LOAD.patch b/patches.kernel.org/4.20.9-002-mtd-spinand-Handle-the-case-where-PROGRAM-LOAD.patch
new file mode 100644
index 0000000000..a8a96d72a9
--- /dev/null
+++ b/patches.kernel.org/4.20.9-002-mtd-spinand-Handle-the-case-where-PROGRAM-LOAD.patch
@@ -0,0 +1,105 @@
+From: Boris Brezillon <bbrezillon@kernel.org>
+Date: Thu, 24 Jan 2019 15:20:07 +0100
+Subject: [PATCH] mtd: spinand: Handle the case where PROGRAM LOAD does not
+ reset the cache
+References: bnc#1012628
+Patch-mainline: 4.20.9
+Git-commit: 13c15e07eedf26092054c8c71f2f47edb8388310
+
+commit 13c15e07eedf26092054c8c71f2f47edb8388310 upstream.
+
+Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
+the cache content to 0xFF (depends on vendor implementation), so we
+must fill the page cache entirely even if we only want to program the
+data portion of the page, otherwise we might corrupt the BBM or user
+data previously programmed in OOB area.
+
+Fixes: 7529df465248 ("mtd: nand: Add core infrastructure to support SPI NANDs")
+Reported-by: Stefan Roese <sr@denx.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Boris Brezillon <bbrezillon@kernel.org>
+Tested-by: Stefan Roese <sr@denx.de>
+Reviewed-by: Stefan Roese <sr@denx.de>
+Acked-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/mtd/nand/spi/core.c | 42 ++++++++++++++++++-------------------
+ 1 file changed, 20 insertions(+), 22 deletions(-)
+
+diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
+index 30f83649c481..a78621c7b11d 100644
+--- a/drivers/mtd/nand/spi/core.c
++++ b/drivers/mtd/nand/spi/core.c
+@@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct nand_page_io_req adjreq = *req;
+- unsigned int nbytes = 0;
+- void *buf = NULL;
++ void *buf = spinand->databuf;
++ unsigned int nbytes;
+ u16 column = 0;
+ int ret;
+
+- memset(spinand->databuf, 0xff,
+- nanddev_page_size(nand) +
+- nanddev_per_page_oobsize(nand));
++ /*
++ * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
++ * the cache content to 0xFF (depends on vendor implementation), so we
++ * must fill the page cache entirely even if we only want to program
++ * the data portion of the page, otherwise we might corrupt the BBM or
++ * user data previously programmed in OOB area.
++ */
++ nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
++ memset(spinand->databuf, 0xff, nbytes);
++ adjreq.dataoffs = 0;
++ adjreq.datalen = nanddev_page_size(nand);
++ adjreq.databuf.out = spinand->databuf;
++ adjreq.ooblen = nanddev_per_page_oobsize(nand);
++ adjreq.ooboffs = 0;
++ adjreq.oobbuf.out = spinand->oobbuf;
+
+- if (req->datalen) {
++ if (req->datalen)
+ memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
+ req->datalen);
+- adjreq.dataoffs = 0;
+- adjreq.datalen = nanddev_page_size(nand);
+- adjreq.databuf.out = spinand->databuf;
+- nbytes = adjreq.datalen;
+- buf = spinand->databuf;
+- }
+
+ if (req->ooblen) {
+ if (req->mode == MTD_OPS_AUTO_OOB)
+@@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
+ else
+ memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
+ req->ooblen);
+-
+- adjreq.ooblen = nanddev_per_page_oobsize(nand);
+- adjreq.ooboffs = 0;
+- nbytes += nanddev_per_page_oobsize(nand);
+- if (!buf) {
+- buf = spinand->oobbuf;
+- column = nanddev_page_size(nand);
+- }
+ }
+
+ spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
+@@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
+
+ /*
+ * We need to use the RANDOM LOAD CACHE operation if there's
+- * more than one iteration, because the LOAD operation resets
+- * the cache to 0xff.
++ * more than one iteration, because the LOAD operation might
++ * reset the cache to 0xff.
+ */
+ if (nbytes) {
+ column = op.addr.val;
+--
+2.20.1
+
diff --git a/series.conf b/series.conf
index cdb2177655..daa892493f 100644
--- a/series.conf
+++ b/series.conf
@@ -1084,6 +1084,7 @@
patches.kernel.org/4.20.8-345-ath9k-dynack-check-da-enabled-first-in-samplin.patch
patches.kernel.org/4.20.8-346-Linux-4.20.8.patch
patches.kernel.org/4.20.9-001-mtd-Make-sure-mtd-erasesize-is-valid-even-if-t.patch
+ patches.kernel.org/4.20.9-002-mtd-spinand-Handle-the-case-where-PROGRAM-LOAD.patch
########################################################
# Build fixes that apply to the vanilla kernel too.