Commit 3d1f08b0 authored by Miquel Raynal's avatar Miquel Raynal

mtd: spinand: Use the external ECC engine logic

Now that all the logic is available in the NAND core, let's use it
from the SPI-NAND core. Right now there is no functional change as the
default ECC engine for SPI-NANDs is set to 'on-die', but user can now
use software correction if they want to by just setting the right
properties in the DT.

Also note that the OOB layout handling is removed from the SPI-NAND
core as each ECC engine is supposed to handle it by it's own; users
should not be aware of that.
Signed-off-by: default avatarMiquel Raynal <miquel.raynal@bootlin.com>
Link: https://lore.kernel.org/linux-mtd/20201001102014.20100-4-miquel.raynal@bootlin.com
parent 533af69c
......@@ -2,6 +2,7 @@
menuconfig MTD_SPI_NAND
tristate "SPI NAND device Support"
select MTD_NAND_CORE
select MTD_NAND_ECC
depends on SPI_MASTER
select SPI_MEM
help
......
......@@ -313,6 +313,15 @@ static struct nand_ecc_engine spinand_ondie_ecc_engine = {
.ops = &spinand_ondie_ecc_engine_ops,
};
static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
{
struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
engine_conf)
engine_conf->status = status;
}
static int spinand_write_enable_op(struct spinand_device *spinand)
{
struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
......@@ -334,7 +343,6 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
struct spi_mem_dirmap_desc *rdesc;
unsigned int nbytes = 0;
void *buf = NULL;
......@@ -374,17 +382,6 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
req->datalen);
if (req->ooblen) {
if (req->mode == MTD_OPS_AUTO_OOB)
mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
spinand->oobbuf,
req->ooboffs,
req->ooblen);
else
memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
req->ooblen);
}
return 0;
}
......@@ -392,7 +389,7 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
struct mtd_info *mtd = spinand_to_mtd(spinand);
struct spi_mem_dirmap_desc *wdesc;
unsigned int nbytes, column = 0;
void *buf = spinand->databuf;
......@@ -404,9 +401,12 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
* must fill the page cache entirely even if we only want to program
* the data portion of the page, otherwise we might corrupt the BBM or
* user data previously programmed in OOB area.
*
* Only reset the data buffer manually, the OOB buffer is prepared by
* ECC engines ->prepare_io_req() callback.
*/
nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
memset(spinand->databuf, 0xff, nbytes);
memset(spinand->databuf, 0xff, nanddev_page_size(nand));
if (req->datalen)
memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
......@@ -523,12 +523,16 @@ static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
}
static int spinand_read_page(struct spinand_device *spinand,
const struct nand_page_io_req *req,
bool ecc_enabled)
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
int ret;
ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
if (ret)
return ret;
ret = spinand_load_page_op(spinand, req);
if (ret)
return ret;
......@@ -537,22 +541,26 @@ static int spinand_read_page(struct spinand_device *spinand,
if (ret < 0)
return ret;
spinand_ondie_ecc_save_status(nand, status);
ret = spinand_read_from_cache_op(spinand, req);
if (ret)
return ret;
if (!ecc_enabled)
return 0;
return spinand_check_ecc_status(spinand, status);
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
static int spinand_write_page(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
int ret;
ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
if (ret)
return ret;
ret = spinand_write_enable_op(spinand);
if (ret)
return ret;
......@@ -567,9 +575,9 @@ static int spinand_write_page(struct spinand_device *spinand,
ret = spinand_wait(spinand, &status);
if (!ret && (status & STATUS_PROG_FAILED))
ret = -EIO;
return -EIO;
return ret;
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
......@@ -579,25 +587,24 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
struct nand_device *nand = mtd_to_nanddev(mtd);
unsigned int max_bitflips = 0;
struct nand_io_iter iter;
bool enable_ecc = false;
bool disable_ecc = false;
bool ecc_failed = false;
int ret = 0;
if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
enable_ecc = true;
if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
disable_ecc = true;
mutex_lock(&spinand->lock);
nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
if (disable_ecc)
iter.req.mode = MTD_OPS_RAW;
ret = spinand_ecc_enable(spinand, enable_ecc);
ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
ret = spinand_read_page(spinand, &iter.req, enable_ecc);
ret = spinand_read_page(spinand, &iter.req);
if (ret < 0 && ret != -EBADMSG)
break;
......@@ -628,20 +635,19 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
struct nand_io_iter iter;
bool enable_ecc = false;
bool disable_ecc = false;
int ret = 0;
if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
enable_ecc = true;
if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
disable_ecc = true;
mutex_lock(&spinand->lock);
nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
if (disable_ecc)
iter.req.mode = MTD_OPS_RAW;
ret = spinand_ecc_enable(spinand, enable_ecc);
ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
......@@ -671,7 +677,7 @@ static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
};
spinand_select_target(spinand, pos->target);
spinand_read_page(spinand, &req, false);
spinand_read_page(spinand, &req);
if (marker[0] != 0xff || marker[1] != 0xff)
return true;
......@@ -1137,6 +1143,11 @@ static int spinand_init(struct spinand_device *spinand)
nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
spinand_ecc_enable(spinand, false);
ret = nanddev_ecc_engine_init(nand);
if (ret)
goto err_cleanup_nanddev;
/*
* Right now, we don't support ECC, so let the whole oob
* area available for the user.
......@@ -1149,14 +1160,9 @@ static int spinand_init(struct spinand_device *spinand)
mtd->_erase = spinand_mtd_erase;
mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
if (spinand->eccinfo.ooblayout)
mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
else
mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
ret = mtd_ooblayout_count_freebytes(mtd);
if (ret < 0)
goto err_cleanup_nanddev;
goto err_cleanup_ecc_engine;
mtd->oobavail = ret;
......@@ -1166,6 +1172,9 @@ static int spinand_init(struct spinand_device *spinand)
return 0;
err_cleanup_ecc_engine:
nanddev_ecc_engine_cleanup(nand);
err_cleanup_nanddev:
nanddev_cleanup(nand);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment