Commit 869acb87 authored by Miquel Raynal's avatar Miquel Raynal

Merge tag 'nand/for-6.12' into mtd/next

* Raw NAND changes

The use of for_each_child_of_node_scoped() has been spread into the
subsystem drivers. Aside from that, a couple of exit path have been
fixed (mtk, denali), the TI GPMC bindings have been enhanced to comply
with up-to-date partition descriptions and as always there is a load of
small and misc fixes.

* SPI-NAND changes

The most impacting series this cycle is bringing support for continuous
reads in the SPI-NAND subsystem. This is a feature already merged in the
raw NAND subsystem which allows optimizing the internal fetch times in
the chip while reading sequential pages within an eraseblock. For now
only Macronix NANDs benefit from this feature. While we are talking
about Macronix, some of their chip need an explicit action for selecting
a different plane, and support for it has also been brought.

The bitflip threshold has also been set to the same arbitrary level as
in the raw NAND subsystem to optimize wear leveling decisions, and
finally support for a new Winbond chip has been added.
parents 5d09909a 475aadeb
......@@ -61,12 +61,9 @@ properties:
GPIO connection to R/B signal from NAND chip
maxItems: 1
patternProperties:
"@[0-9a-f]+$":
$ref: /schemas/mtd/partitions/partition.yaml
allOf:
- $ref: /schemas/memory-controllers/ti,gpmc-child.yaml
- $ref: mtd.yaml#
required:
- compatible
......
......@@ -1360,7 +1360,7 @@ static void anfc_chips_cleanup(struct arasan_nfc *nfc)
static int anfc_chips_init(struct arasan_nfc *nfc)
{
struct device_node *np = nfc->dev->of_node, *nand_np;
struct device_node *np = nfc->dev->of_node;
int nchips = of_get_child_count(np);
int ret;
......@@ -1370,10 +1370,9 @@ static int anfc_chips_init(struct arasan_nfc *nfc)
return -EINVAL;
}
for_each_child_of_node(np, nand_np) {
for_each_child_of_node_scoped(np, nand_np) {
ret = anfc_chip_init(nfc, nand_np);
if (ret) {
of_node_put(nand_np);
anfc_chips_cleanup(nfc);
break;
}
......
......@@ -2049,7 +2049,10 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
dma_cap_set(DMA_MEMCPY, mask);
nc->dmac = dma_request_channel(mask, NULL, NULL);
if (!nc->dmac)
if (nc->dmac)
dev_info(nc->dev, "using %s for DMA transfers\n",
dma_chan_name(nc->dmac));
else
dev_err(nc->dev, "Failed to request DMA channel\n");
}
......
......@@ -2836,7 +2836,6 @@ static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
{
struct device_node *np = cdns_ctrl->dev->of_node;
struct device_node *nand_np;
int max_cs = cdns_ctrl->caps2.max_banks;
int nchips, ret;
......@@ -2849,10 +2848,9 @@ static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
return -EINVAL;
}
for_each_child_of_node(np, nand_np) {
for_each_child_of_node_scoped(np, nand_np) {
ret = cadence_nand_chip_init(cdns_ctrl, nand_np);
if (ret) {
of_node_put(nand_np);
cadence_nand_chips_cleanup(cdns_ctrl);
return ret;
}
......
......@@ -20,8 +20,71 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/platform_data/mtd-davinci.h>
#include <linux/platform_data/mtd-davinci-aemif.h>
#define NRCSR_OFFSET 0x00
#define NANDFCR_OFFSET 0x60
#define NANDFSR_OFFSET 0x64
#define NANDF1ECC_OFFSET 0x70
/* 4-bit ECC syndrome registers */
#define NAND_4BIT_ECC_LOAD_OFFSET 0xbc
#define NAND_4BIT_ECC1_OFFSET 0xc0
#define NAND_4BIT_ECC2_OFFSET 0xc4
#define NAND_4BIT_ECC3_OFFSET 0xc8
#define NAND_4BIT_ECC4_OFFSET 0xcc
#define NAND_ERR_ADD1_OFFSET 0xd0
#define NAND_ERR_ADD2_OFFSET 0xd4
#define NAND_ERR_ERRVAL1_OFFSET 0xd8
#define NAND_ERR_ERRVAL2_OFFSET 0xdc
/* NOTE: boards don't need to use these address bits
* for ALE/CLE unless they support booting from NAND.
* They're used unless platform data overrides them.
*/
#define MASK_ALE 0x08
#define MASK_CLE 0x10
struct davinci_nand_pdata {
uint32_t mask_ale;
uint32_t mask_cle;
/*
* 0-indexed chip-select number of the asynchronous
* interface to which the NAND device has been connected.
*
* So, if you have NAND connected to CS3 of DA850, you
* will pass '1' here. Since the asynchronous interface
* on DA850 starts from CS2.
*/
uint32_t core_chipsel;
/* for packages using two chipselects */
uint32_t mask_chipsel;
/* board's default static partition info */
struct mtd_partition *parts;
unsigned int nr_parts;
/* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
* soft == NAND_ECC_ENGINE_TYPE_SOFT
* else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
*
* All DaVinci-family chips support 1-bit hardware ECC.
* Newer ones also support 4-bit ECC, but are awkward
* using it with large page chips.
*/
enum nand_ecc_engine_type engine_type;
enum nand_ecc_placement ecc_placement;
u8 ecc_bits;
/* e.g. NAND_BUSWIDTH_16 */
unsigned int options;
/* e.g. NAND_BBT_USE_FLASH */
unsigned int bbt_options;
/* Main and mirror bbt descriptor overrides */
struct nand_bbt_descr *bbt_td;
struct nand_bbt_descr *bbt_md;
};
/*
* This is a device driver for the NAND flash controller found on the
......@@ -54,8 +117,6 @@ struct davinci_nand_info {
uint32_t mask_cle;
uint32_t core_chipsel;
struct davinci_aemif_timing *timing;
};
static DEFINE_SPINLOCK(davinci_nand_lock);
......@@ -775,7 +836,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
info->chip.options = pdata->options;
info->chip.bbt_td = pdata->bbt_td;
info->chip.bbt_md = pdata->bbt_md;
info->timing = pdata->timing;
info->current_cs = info->vaddr;
info->core_chipsel = pdata->core_chipsel;
......
......@@ -145,15 +145,15 @@ static int denali_dt_probe(struct platform_device *pdev)
if (IS_ERR(denali->host))
return PTR_ERR(denali->host);
dt->clk = devm_clk_get(dev, "nand");
dt->clk = devm_clk_get_enabled(dev, "nand");
if (IS_ERR(dt->clk))
return PTR_ERR(dt->clk);
dt->clk_x = devm_clk_get(dev, "nand_x");
dt->clk_x = devm_clk_get_enabled(dev, "nand_x");
if (IS_ERR(dt->clk_x))
return PTR_ERR(dt->clk_x);
dt->clk_ecc = devm_clk_get(dev, "ecc");
dt->clk_ecc = devm_clk_get_enabled(dev, "ecc");
if (IS_ERR(dt->clk_ecc))
return PTR_ERR(dt->clk_ecc);
......@@ -165,18 +165,6 @@ static int denali_dt_probe(struct platform_device *pdev)
if (IS_ERR(dt->rst_reg))
return PTR_ERR(dt->rst_reg);
ret = clk_prepare_enable(dt->clk);
if (ret)
return ret;
ret = clk_prepare_enable(dt->clk_x);
if (ret)
goto out_disable_clk;
ret = clk_prepare_enable(dt->clk_ecc);
if (ret)
goto out_disable_clk_x;
denali->clk_rate = clk_get_rate(dt->clk);
denali->clk_x_rate = clk_get_rate(dt->clk_x);
......@@ -187,7 +175,7 @@ static int denali_dt_probe(struct platform_device *pdev)
*/
ret = reset_control_deassert(dt->rst_reg);
if (ret)
goto out_disable_clk_ecc;
return ret;
ret = reset_control_deassert(dt->rst);
if (ret)
......@@ -222,12 +210,6 @@ static int denali_dt_probe(struct platform_device *pdev)
reset_control_assert(dt->rst);
out_assert_rst_reg:
reset_control_assert(dt->rst_reg);
out_disable_clk_ecc:
clk_disable_unprepare(dt->clk_ecc);
out_disable_clk_x:
clk_disable_unprepare(dt->clk_x);
out_disable_clk:
clk_disable_unprepare(dt->clk);
return ret;
}
......@@ -239,9 +221,6 @@ static void denali_dt_remove(struct platform_device *pdev)
denali_remove(&dt->controller);
reset_control_assert(dt->rst);
reset_control_assert(dt->rst_reg);
clk_disable_unprepare(dt->clk_ecc);
clk_disable_unprepare(dt->clk_x);
clk_disable_unprepare(dt->clk);
}
static struct platform_driver denali_dt_driver = {
......
......@@ -77,18 +77,20 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->reg = devm_ioremap(denali->dev, csr_base, csr_len);
if (!denali->reg) {
dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
return -ENOMEM;
ret = -ENOMEM;
goto regions_release;
}
denali->host = devm_ioremap(denali->dev, mem_base, mem_len);
if (!denali->host) {
dev_err(&dev->dev, "Spectra: ioremap failed!");
return -ENOMEM;
ret = -ENOMEM;
goto regions_release;
}
ret = denali_init(denali);
if (ret)
return ret;
goto regions_release;
nsels = denali->nbanks;
......@@ -116,6 +118,8 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
out_remove_denali:
denali_remove(denali);
regions_release:
pci_release_regions(dev);
return ret;
}
......@@ -123,6 +127,7 @@ static void denali_pci_remove(struct pci_dev *dev)
{
struct denali_controller *denali = pci_get_drvdata(dev);
pci_release_regions(dev);
denali_remove(denali);
}
......
......@@ -2771,7 +2771,6 @@ static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
{
struct device_node *np = dev->of_node;
struct device_node *nand_np;
int max_cs = nfc->caps->max_cs_nb;
int nchips;
int ret;
......@@ -2798,20 +2797,15 @@ static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
return ret;
}
for_each_child_of_node(np, nand_np) {
for_each_child_of_node_scoped(np, nand_np) {
ret = marvell_nand_chip_init(dev, nfc, nand_np);
if (ret) {
of_node_put(nand_np);
goto cleanup_chips;
marvell_nand_chips_cleanup(nfc);
return ret;
}
}
return 0;
cleanup_chips:
marvell_nand_chips_cleanup(nfc);
return ret;
}
static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
......
......@@ -1475,7 +1475,7 @@ meson_nfc_nand_chip_init(struct device *dev,
return 0;
}
static void meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
static void meson_nfc_nand_chips_cleanup(struct meson_nfc *nfc)
{
struct meson_nfc_nand_chip *meson_chip;
struct mtd_info *mtd;
......@@ -1495,14 +1495,12 @@ static int meson_nfc_nand_chips_init(struct device *dev,
struct meson_nfc *nfc)
{
struct device_node *np = dev->of_node;
struct device_node *nand_np;
int ret;
for_each_child_of_node(np, nand_np) {
for_each_child_of_node_scoped(np, nand_np) {
ret = meson_nfc_nand_chip_init(dev, nfc, nand_np);
if (ret) {
meson_nfc_nand_chip_cleanup(nfc);
of_node_put(nand_np);
meson_nfc_nand_chips_cleanup(nfc);
return ret;
}
}
......@@ -1616,7 +1614,7 @@ static void meson_nfc_remove(struct platform_device *pdev)
{
struct meson_nfc *nfc = platform_get_drvdata(pdev);
meson_nfc_nand_chip_cleanup(nfc);
meson_nfc_nand_chips_cleanup(nfc);
meson_nfc_disable_clk(nfc);
}
......
......@@ -1429,16 +1429,32 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
return 0;
}
static void mtk_nfc_nand_chips_cleanup(struct mtk_nfc *nfc)
{
struct mtk_nfc_nand_chip *mtk_chip;
struct nand_chip *chip;
int ret;
while (!list_empty(&nfc->chips)) {
mtk_chip = list_first_entry(&nfc->chips,
struct mtk_nfc_nand_chip, node);
chip = &mtk_chip->nand;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
list_del(&mtk_chip->node);
}
}
static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
{
struct device_node *np = dev->of_node;
struct device_node *nand_np;
int ret;
for_each_child_of_node(np, nand_np) {
for_each_child_of_node_scoped(np, nand_np) {
ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
if (ret) {
of_node_put(nand_np);
mtk_nfc_nand_chips_cleanup(nfc);
return ret;
}
}
......@@ -1570,20 +1586,8 @@ static int mtk_nfc_probe(struct platform_device *pdev)
static void mtk_nfc_remove(struct platform_device *pdev)
{
struct mtk_nfc *nfc = platform_get_drvdata(pdev);
struct mtk_nfc_nand_chip *mtk_chip;
struct nand_chip *chip;
int ret;
while (!list_empty(&nfc->chips)) {
mtk_chip = list_first_entry(&nfc->chips,
struct mtk_nfc_nand_chip, node);
chip = &mtk_chip->nand;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
list_del(&mtk_chip->node);
}
mtk_nfc_nand_chips_cleanup(nfc);
mtk_ecc_release(nfc->ecc);
}
......
......@@ -1111,7 +1111,7 @@ static void pl35x_nand_chips_cleanup(struct pl35x_nandc *nfc)
static int pl35x_nand_chips_init(struct pl35x_nandc *nfc)
{
struct device_node *np = nfc->dev->of_node, *nand_np;
struct device_node *np = nfc->dev->of_node;
int nchips = of_get_child_count(np);
int ret;
......@@ -1121,10 +1121,9 @@ static int pl35x_nand_chips_init(struct pl35x_nandc *nfc)
return -EINVAL;
}
for_each_child_of_node(np, nand_np) {
for_each_child_of_node_scoped(np, nand_np) {
ret = pl35x_nand_chip_init(nfc, nand_np);
if (ret) {
of_node_put(nand_np);
pl35x_nand_chips_cleanup(nfc);
break;
}
......
......@@ -1297,23 +1297,17 @@ static void rnandc_chips_cleanup(struct rnandc *rnandc)
static int rnandc_chips_init(struct rnandc *rnandc)
{
struct device_node *np;
int ret;
for_each_child_of_node(rnandc->dev->of_node, np) {
for_each_child_of_node_scoped(rnandc->dev->of_node, np) {
ret = rnandc_chip_init(rnandc, np);
if (ret) {
of_node_put(np);
goto cleanup_chips;
rnandc_chips_cleanup(rnandc);
return ret;
}
}
return 0;
cleanup_chips:
rnandc_chips_cleanup(rnandc);
return ret;
}
static int rnandc_probe(struct platform_device *pdev)
......
......@@ -1211,7 +1211,7 @@ static void rk_nfc_chips_cleanup(struct rk_nfc *nfc)
static int rk_nfc_nand_chips_init(struct device *dev, struct rk_nfc *nfc)
{
struct device_node *np = dev->of_node, *nand_np;
struct device_node *np = dev->of_node;
int nchips = of_get_child_count(np);
int ret;
......@@ -1221,10 +1221,9 @@ static int rk_nfc_nand_chips_init(struct device *dev, struct rk_nfc *nfc)
return -EINVAL;
}
for_each_child_of_node(np, nand_np) {
for_each_child_of_node_scoped(np, nand_np) {
ret = rk_nfc_nand_chip_init(dev, nfc, nand_np);
if (ret) {
of_node_put(nand_np);
rk_nfc_chips_cleanup(nfc);
return ret;
}
......
......@@ -1851,7 +1851,6 @@ static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc)
{
struct device_node *dn = nfc->dev->of_node;
struct device_node *child;
int nchips = of_get_child_count(dn);
int ret = 0;
......@@ -1865,12 +1864,10 @@ static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc)
return -EINVAL;
}
for_each_child_of_node(dn, child) {
for_each_child_of_node_scoped(dn, child) {
ret = stm32_fmc2_nfc_parse_child(nfc, child);
if (ret < 0) {
of_node_put(child);
if (ret < 0)
return ret;
}
}
return ret;
......
......@@ -2025,13 +2025,11 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
{
struct device_node *np = dev->of_node;
struct device_node *nand_np;
int ret;
for_each_child_of_node(np, nand_np) {
for_each_child_of_node_scoped(np, nand_np) {
ret = sunxi_nand_chip_init(dev, nfc, nand_np);
if (ret) {
of_node_put(nand_np);
sunxi_nand_chips_cleanup(nfc);
return ret;
}
......
......@@ -34,7 +34,7 @@ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
return 0;
}
static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
{
struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
spinand->scratchbuf);
......@@ -200,6 +200,12 @@ static int spinand_ecc_enable(struct spinand_device *spinand,
enable ? CFG_ECC_ENABLE : 0);
}
static int spinand_cont_read_enable(struct spinand_device *spinand,
bool enable)
{
return spinand->set_cont_read(spinand, enable);
}
static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
......@@ -311,10 +317,22 @@ static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
/* Finish a page read: check the status, report errors/bitflips */
ret = spinand_check_ecc_status(spinand, engine_conf->status);
if (ret == -EBADMSG)
if (ret == -EBADMSG) {
mtd->ecc_stats.failed++;
else if (ret > 0)
mtd->ecc_stats.corrected += ret;
} else if (ret > 0) {
unsigned int pages;
/*
* Continuous reads don't allow us to get the detail,
* so we may exagerate the actual number of corrected bitflips.
*/
if (!req->continuous)
pages = 1;
else
pages = req->datalen / nanddev_page_size(nand);
mtd->ecc_stats.corrected += ret * pages;
}
return ret;
}
......@@ -369,7 +387,11 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
if (req->datalen) {
buf = spinand->databuf;
nbytes = nanddev_page_size(nand);
if (!req->continuous)
nbytes = nanddev_page_size(nand);
else
nbytes = round_up(req->dataoffs + req->datalen,
nanddev_page_size(nand));
column = 0;
}
......@@ -386,6 +408,9 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
else
rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc;
if (spinand->flags & SPINAND_HAS_READ_PLANE_SELECT_BIT)
column |= req->pos.plane << fls(nanddev_page_size(nand));
while (nbytes) {
ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
if (ret < 0)
......@@ -397,6 +422,13 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
nbytes -= ret;
column += ret;
buf += ret;
/*
* Dirmap accesses are allowed to toggle the CS.
* Toggling the CS during a continuous read is forbidden.
*/
if (nbytes && req->continuous)
return -EIO;
}
if (req->datalen)
......@@ -460,6 +492,9 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
else
wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc;
if (spinand->flags & SPINAND_HAS_PROG_PLANE_SELECT_BIT)
column |= req->pos.plane << fls(nanddev_page_size(nand));
while (nbytes) {
ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
if (ret < 0)
......@@ -630,25 +665,20 @@ static int spinand_write_page(struct spinand_device *spinand,
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops,
unsigned int *max_bitflips)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
struct mtd_ecc_stats old_stats;
unsigned int max_bitflips = 0;
struct nand_io_iter iter;
bool disable_ecc = false;
bool ecc_failed = false;
int ret = 0;
int ret;
if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
disable_ecc = true;
mutex_lock(&spinand->lock);
old_stats = mtd->ecc_stats;
nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
if (disable_ecc)
iter.req.mode = MTD_OPS_RAW;
......@@ -664,13 +694,155 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
if (ret == -EBADMSG)
ecc_failed = true;
else
max_bitflips = max_t(unsigned int, max_bitflips, ret);
*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
ret = 0;
ops->retlen += iter.req.datalen;
ops->oobretlen += iter.req.ooblen;
}
if (ecc_failed && !ret)
ret = -EBADMSG;
return ret;
}
static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops,
unsigned int *max_bitflips)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
struct nand_io_iter iter;
u8 status;
int ret;
ret = spinand_cont_read_enable(spinand, true);
if (ret)
return ret;
/*
* The cache is divided into two halves. While one half of the cache has
* the requested data, the other half is loaded with the next chunk of data.
* Therefore, the host can read out the data continuously from page to page.
* Each data read must be a multiple of 4-bytes and full pages should be read;
* otherwise, the data output might get out of sequence from one read command
* to another.
*/
nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) {
ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
goto end_cont_read;
ret = nand_ecc_prepare_io_req(nand, &iter.req);
if (ret)
goto end_cont_read;
ret = spinand_load_page_op(spinand, &iter.req);
if (ret)
goto end_cont_read;
ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US,
SPINAND_READ_POLL_DELAY_US, NULL);
if (ret < 0)
goto end_cont_read;
ret = spinand_read_from_cache_op(spinand, &iter.req);
if (ret)
goto end_cont_read;
ops->retlen += iter.req.datalen;
ret = spinand_read_status(spinand, &status);
if (ret)
goto end_cont_read;
spinand_ondie_ecc_save_status(nand, status);
ret = nand_ecc_finish_io_req(nand, &iter.req);
if (ret < 0)
goto end_cont_read;
*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
ret = 0;
}
end_cont_read:
/*
* Once all the data has been read out, the host can either pull CS#
* high and wait for tRST or manually clear the bit in the configuration
* register to terminate the continuous read operation. We have no
* guarantee the SPI controller drivers will effectively deassert the CS
* when we expect them to, so take the register based approach.
*/
spinand_cont_read_enable(spinand, false);
return ret;
}
static void spinand_cont_read_init(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
enum nand_ecc_engine_type engine_type = nand->ecc.ctx.conf.engine_type;
/* OOBs cannot be retrieved so external/on-host ECC engine won't work */
if (spinand->set_cont_read &&
(engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE ||
engine_type == NAND_ECC_ENGINE_TYPE_NONE)) {
spinand->cont_read_possible = true;
}
}
static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
struct spinand_device *spinand = nand_to_spinand(nand);
struct nand_pos start_pos, end_pos;
if (!spinand->cont_read_possible)
return false;
/* OOBs won't be retrieved */
if (ops->ooblen || ops->oobbuf)
return false;
nanddev_offs_to_pos(nand, from, &start_pos);
nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos);
/*
* Continuous reads never cross LUN boundaries. Some devices don't
* support crossing planes boundaries. Some devices don't even support
* crossing blocks boundaries. The common case being to read through UBI,
* we will very rarely read two consequent blocks or more, so it is safer
* and easier (can be improved) to only enable continuous reads when
* reading within the same erase block.
*/
if (start_pos.target != end_pos.target ||
start_pos.plane != end_pos.plane ||
start_pos.eraseblock != end_pos.eraseblock)
return false;
return start_pos.page < end_pos.page;
}
static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct mtd_ecc_stats old_stats;
unsigned int max_bitflips = 0;
int ret;
mutex_lock(&spinand->lock);
old_stats = mtd->ecc_stats;
if (spinand_use_cont_read(mtd, from, ops))
ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips);
else
ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
if (ops->stats) {
ops->stats->uncorrectable_errors +=
mtd->ecc_stats.failed - old_stats.failed;
......@@ -680,9 +852,6 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
mutex_unlock(&spinand->lock);
if (ecc_failed && !ret)
ret = -EBADMSG;
return ret ? ret : max_bitflips;
}
......@@ -862,6 +1031,9 @@ static int spinand_create_dirmap(struct spinand_device *spinand,
};
struct spi_mem_dirmap_desc *desc;
if (spinand->cont_read_possible)
info.length = nanddev_eraseblock_size(nand);
/* The plane number is passed in MSB just above the column address */
info.offset = plane << fls(nand->memorg.pagesize);
......@@ -1095,6 +1267,7 @@ int spinand_match_and_init(struct spinand_device *spinand,
spinand->flags = table[i].flags;
spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
spinand->set_cont_read = table[i].set_cont_read;
op = spinand_select_op_variant(spinand,
info->op_variants.read_cache);
......@@ -1236,9 +1409,8 @@ static int spinand_init(struct spinand_device *spinand)
* may use this buffer for DMA access.
* Memory allocated by devm_ does not guarantee DMA-safe alignment.
*/
spinand->databuf = kzalloc(nanddev_page_size(nand) +
nanddev_per_page_oobsize(nand),
GFP_KERNEL);
spinand->databuf = kzalloc(nanddev_eraseblock_size(nand),
GFP_KERNEL);
if (!spinand->databuf) {
ret = -ENOMEM;
goto err_free_bufs;
......@@ -1267,6 +1439,12 @@ static int spinand_init(struct spinand_device *spinand)
if (ret)
goto err_cleanup_nanddev;
/*
* Continuous read can only be enabled with an on-die ECC engine, so the
* ECC initialization must have happened previously.
*/
spinand_cont_read_init(spinand);
mtd->_read_oob = spinand_mtd_read;
mtd->_write_oob = spinand_mtd_write;
mtd->_block_isbad = spinand_mtd_block_isbad;
......@@ -1287,6 +1465,7 @@ static int spinand_init(struct spinand_device *spinand)
/* Propagate ECC information to mtd_info */
mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
ret = spinand_create_dirmaps(spinand);
if (ret) {
......
......@@ -5,12 +5,25 @@
* Author: Boris Brezillon <boris.brezillon@bootlin.com>
*/
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_MACRONIX 0xC2
#define MACRONIX_ECCSR_MASK 0x0F
#define MACRONIX_ECCSR_BF_LAST_PAGE(eccsr) FIELD_GET(GENMASK(3, 0), eccsr)
#define MACRONIX_ECCSR_BF_ACCUMULATED_PAGES(eccsr) FIELD_GET(GENMASK(7, 4), eccsr)
#define MACRONIX_CFG_CONT_READ BIT(2)
#define STATUS_ECC_HAS_BITFLIPS_THRESHOLD (3 << 4)
/* Bitflip theshold configuration register */
#define REG_CFG_BFT 0x10
#define CFG_BFT(x) FIELD_PREP(GENMASK(7, 4), (x))
struct macronix_priv {
bool cont_read;
};
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
......@@ -49,8 +62,9 @@ static const struct mtd_ooblayout_ops mx35lfxge4ab_ooblayout = {
.free = mx35lfxge4ab_ooblayout_free,
};
static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
static int macronix_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
{
struct macronix_priv *priv = spinand->priv;
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_DUMMY(1, 1),
......@@ -60,12 +74,21 @@ static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
if (ret)
return ret;
*eccsr &= MACRONIX_ECCSR_MASK;
/*
* ECCSR exposes the number of bitflips for the last read page in bits [3:0].
* Continuous read compatible chips also expose the maximum number of
* bitflips for the whole (continuous) read operation in bits [7:4].
*/
if (!priv->cont_read)
*eccsr = MACRONIX_ECCSR_BF_LAST_PAGE(*eccsr);
else
*eccsr = MACRONIX_ECCSR_BF_ACCUMULATED_PAGES(*eccsr);
return 0;
}
static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
u8 status)
static int macronix_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 eccsr;
......@@ -83,16 +106,14 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
* in order to avoid forcing the wear-leveling layer to move
* data around if it's not necessary.
*/
if (mx35lf1ge4ab_get_eccsr(spinand, spinand->scratchbuf))
if (macronix_get_eccsr(spinand, spinand->scratchbuf))
return nanddev_get_ecc_conf(nand)->strength;
eccsr = *spinand->scratchbuf;
if (WARN_ON(eccsr > nanddev_get_ecc_conf(nand)->strength ||
!eccsr))
if (WARN_ON(eccsr > nanddev_get_ecc_conf(nand)->strength || !eccsr))
return nanddev_get_ecc_conf(nand)->strength;
return eccsr;
default:
break;
}
......@@ -100,6 +121,21 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
return -EINVAL;
}
static int macronix_set_cont_read(struct spinand_device *spinand, bool enable)
{
struct macronix_priv *priv = spinand->priv;
int ret;
ret = spinand_upd_cfg(spinand, MACRONIX_CFG_CONT_READ,
enable ? MACRONIX_CFG_CONT_READ : 0);
if (ret)
return ret;
priv->cont_read = enable;
return 0;
}
static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO("MX35LF1GE4AB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12),
......@@ -110,7 +146,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status)),
SPINAND_INFO("MX35LF2GE4AB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
......@@ -118,7 +154,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT |
SPINAND_HAS_READ_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX35LF2GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x26, 0x03),
......@@ -129,7 +167,8 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status),
SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_INFO("MX35LF4GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37, 0x03),
NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1),
......@@ -139,7 +178,8 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status),
SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_INFO("MX35LF1G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
......@@ -156,7 +196,8 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX35LF2G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x64, 0x03),
......@@ -174,7 +215,8 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX35LF4G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x75, 0x03),
......@@ -194,7 +236,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status)),
SPINAND_INFO("MX31UF1GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x9e),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
......@@ -204,7 +246,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status)),
SPINAND_INFO("MX35LF2G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x20),
......@@ -213,9 +255,11 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT |
SPINAND_HAS_READ_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status)),
SPINAND_INFO("MX35UF4G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb5, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
......@@ -223,9 +267,10 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status)),
SPINAND_INFO("MX35UF4G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xf5, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
......@@ -235,7 +280,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status)),
SPINAND_INFO("MX35UF4GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
......@@ -245,7 +290,8 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status),
SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_INFO("MX35UF2G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa0),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
......@@ -253,9 +299,11 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT |
SPINAND_HAS_READ_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status)),
SPINAND_INFO("MX35UF2G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa4, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
......@@ -263,9 +311,10 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status)),
SPINAND_INFO("MX35UF2G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe4, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
......@@ -275,7 +324,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status)),
SPINAND_INFO("MX35UF2GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
......@@ -285,7 +334,8 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status),
SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_INFO("MX35UF2GE4AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
......@@ -295,7 +345,8 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status),
SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_INFO("MX35UF1G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x90),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
......@@ -305,7 +356,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status)),
SPINAND_INFO("MX35UF1G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x94, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
......@@ -315,7 +366,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status)),
SPINAND_INFO("MX35UF1GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
......@@ -325,7 +376,8 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status),
SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_INFO("MX35UF1GE4AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
......@@ -335,8 +387,8 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status),
SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_INFO("MX31LF2GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x2e),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
......@@ -346,7 +398,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status)),
SPINAND_INFO("MX3UF2GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
......@@ -356,10 +408,30 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
macronix_ecc_get_status)),
};
static int macronix_spinand_init(struct spinand_device *spinand)
{
struct macronix_priv *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spinand->priv = priv;
return 0;
}
static void macronix_spinand_cleanup(struct spinand_device *spinand)
{
kfree(spinand->priv);
}
static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
.init = macronix_spinand_init,
.cleanup = macronix_spinand_cleanup,
};
const struct spinand_manufacturer macronix_spinand_manufacturer = {
......
......@@ -76,6 +76,18 @@ static int w25m02gv_select_target(struct spinand_device *spinand,
return spi_mem_exec_op(spinand->spimem, &op);
}
static int w25n01kv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 3)
return -ERANGE;
region->offset = 64 + (8 * section);
region->length = 7;
return 0;
}
static int w25n02kv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
......@@ -100,6 +112,11 @@ static int w25n02kv_ooblayout_free(struct mtd_info *mtd, int section,
return 0;
}
static const struct mtd_ooblayout_ops w25n01kv_ooblayout = {
.ecc = w25n01kv_ooblayout_ecc,
.free = w25n02kv_ooblayout_free,
};
static const struct mtd_ooblayout_ops w25n02kv_ooblayout = {
.ecc = w25n02kv_ooblayout_ecc,
.free = w25n02kv_ooblayout_free,
......@@ -163,6 +180,15 @@ static const struct spinand_info winbond_spinand_table[] = {
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
SPINAND_INFO("W25N01KV",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae, 0x21),
NAND_MEMORG(1, 2048, 96, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25n01kv_ooblayout, w25n02kv_ecc_get_status)),
SPINAND_INFO("W25N02KV",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x22),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
......
......@@ -103,6 +103,8 @@ enum nand_page_io_req_type {
* @ooblen: the number of OOB bytes to read from/write to this page
* @oobbuf: buffer to store OOB data in or get OOB data from
* @mode: one of the %MTD_OPS_XXX mode
* @continuous: no need to start over the operation at the end of each page, the
* NAND device will automatically prepare the next one
*
* This object is used to pass per-page I/O requests to NAND sub-layers. This
* way all useful information are already formatted in a useful way and
......@@ -125,6 +127,7 @@ struct nand_page_io_req {
void *in;
} oobbuf;
int mode;
bool continuous;
};
const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void);
......@@ -906,19 +909,19 @@ static inline void nanddev_pos_next_page(struct nand_device *nand,
}
/**
* nand_io_iter_init - Initialize a NAND I/O iterator
* nand_io_page_iter_init - Initialize a NAND I/O iterator
* @nand: NAND device
* @offs: absolute offset
* @req: MTD request
* @iter: NAND I/O iterator
*
* Initializes a NAND iterator based on the information passed by the MTD
* layer.
* layer for page jumps.
*/
static inline void nanddev_io_iter_init(struct nand_device *nand,
enum nand_page_io_req_type reqtype,
loff_t offs, struct mtd_oob_ops *req,
struct nand_io_iter *iter)
static inline void nanddev_io_page_iter_init(struct nand_device *nand,
enum nand_page_io_req_type reqtype,
loff_t offs, struct mtd_oob_ops *req,
struct nand_io_iter *iter)
{
struct mtd_info *mtd = nanddev_to_mtd(nand);
......@@ -937,6 +940,43 @@ static inline void nanddev_io_iter_init(struct nand_device *nand,
iter->req.ooblen = min_t(unsigned int,
iter->oobbytes_per_page - iter->req.ooboffs,
iter->oobleft);
iter->req.continuous = false;
}
/**
* nand_io_block_iter_init - Initialize a NAND I/O iterator
* @nand: NAND device
* @offs: absolute offset
* @req: MTD request
* @iter: NAND I/O iterator
*
* Initializes a NAND iterator based on the information passed by the MTD
* layer for block jumps (no OOB)
*
* In practice only reads may leverage this iterator.
*/
static inline void nanddev_io_block_iter_init(struct nand_device *nand,
enum nand_page_io_req_type reqtype,
loff_t offs, struct mtd_oob_ops *req,
struct nand_io_iter *iter)
{
unsigned int offs_in_eb;
iter->req.type = reqtype;
iter->req.mode = req->mode;
iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
iter->req.ooboffs = 0;
iter->oobbytes_per_page = 0;
iter->dataleft = req->len;
iter->oobleft = 0;
iter->req.databuf.in = req->datbuf;
offs_in_eb = (nand->memorg.pagesize * iter->req.pos.page) + iter->req.dataoffs;
iter->req.datalen = min_t(unsigned int,
nanddev_eraseblock_size(nand) - offs_in_eb,
iter->dataleft);
iter->req.oobbuf.in = NULL;
iter->req.ooblen = 0;
iter->req.continuous = true;
}
/**
......@@ -962,6 +1002,25 @@ static inline void nanddev_io_iter_next_page(struct nand_device *nand,
iter->oobleft);
}
/**
* nand_io_iter_next_block - Move to the next block
* @nand: NAND device
* @iter: NAND I/O iterator
*
* Updates the @iter to point to the next block.
* No OOB handling available.
*/
static inline void nanddev_io_iter_next_block(struct nand_device *nand,
struct nand_io_iter *iter)
{
nanddev_pos_next_eraseblock(nand, &iter->req.pos);
iter->dataleft -= iter->req.datalen;
iter->req.databuf.in += iter->req.datalen;
iter->req.dataoffs = 0;
iter->req.datalen = min_t(unsigned int, nanddev_eraseblock_size(nand),
iter->dataleft);
}
/**
* nand_io_iter_end - Should end iteration or not
* @nand: NAND device
......@@ -990,13 +1049,28 @@ static inline bool nanddev_io_iter_end(struct nand_device *nand,
* @req: MTD I/O request
* @iter: NAND I/O iterator
*
* Should be used for iterate over pages that are contained in an MTD request.
* Should be used for iterating over pages that are contained in an MTD request.
*/
#define nanddev_io_for_each_page(nand, type, start, req, iter) \
for (nanddev_io_iter_init(nand, type, start, req, iter); \
for (nanddev_io_page_iter_init(nand, type, start, req, iter); \
!nanddev_io_iter_end(nand, iter); \
nanddev_io_iter_next_page(nand, iter))
/**
* nand_io_for_each_block - Iterate over all NAND pages contained in an MTD I/O
* request, one block at a time
* @nand: NAND device
* @start: start address to read/write from
* @req: MTD I/O request
* @iter: NAND I/O iterator
*
* Should be used for iterating over blocks that are contained in an MTD request.
*/
#define nanddev_io_for_each_block(nand, type, start, req, iter) \
for (nanddev_io_block_iter_init(nand, type, start, req, iter); \
!nanddev_io_iter_end(nand, iter); \
nanddev_io_iter_next_block(nand, iter))
bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
......
......@@ -312,6 +312,8 @@ struct spinand_ecc_info {
#define SPINAND_HAS_QE_BIT BIT(0)
#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
#define SPINAND_HAS_PROG_PLANE_SELECT_BIT BIT(2)
#define SPINAND_HAS_READ_PLANE_SELECT_BIT BIT(3)
/**
* struct spinand_ondie_ecc_conf - private SPI-NAND on-die ECC engine structure
......@@ -336,6 +338,7 @@ struct spinand_ondie_ecc_conf {
* @op_variants.update_cache: variants of the update-cache operation
* @select_target: function used to select a target/die. Required only for
* multi-die chips
* @set_cont_read: enable/disable continuous cached reads
*
* Each SPI NAND manufacturer driver should have a spinand_info table
* describing all the chips supported by the driver.
......@@ -354,6 +357,8 @@ struct spinand_info {
} op_variants;
int (*select_target)(struct spinand_device *spinand,
unsigned int target);
int (*set_cont_read)(struct spinand_device *spinand,
bool enable);
};
#define SPINAND_ID(__method, ...) \
......@@ -379,6 +384,9 @@ struct spinand_info {
#define SPINAND_SELECT_TARGET(__func) \
.select_target = __func,
#define SPINAND_CONT_READ(__set_cont_read) \
.set_cont_read = __set_cont_read,
#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
__flags, ...) \
{ \
......@@ -422,6 +430,12 @@ struct spinand_dirmap {
* passed in spi_mem_op be DMA-able, so we can't based the bufs on
* the stack
* @manufacturer: SPI NAND manufacturer information
* @cont_read_possible: Field filled by the core once the whole system
* configuration is known to tell whether continuous reads are
* suitable to use or not in general with this chip/configuration.
* A per-transfer check must of course be done to ensure it is
* actually relevant to enable this feature.
* @set_cont_read: Enable/disable the continuous read feature
* @priv: manufacturer private data
*/
struct spinand_device {
......@@ -451,6 +465,10 @@ struct spinand_device {
u8 *scratchbuf;
const struct spinand_manufacturer *manufacturer;
void *priv;
bool cont_read_possible;
int (*set_cont_read)(struct spinand_device *spinand,
bool enable);
};
/**
......@@ -517,6 +535,7 @@ int spinand_match_and_init(struct spinand_device *spinand,
enum spinand_readid_method rdid_method);
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val);
int spinand_select_target(struct spinand_device *spinand, unsigned int target);
#endif /* __LINUX_MTD_SPINAND_H */
/*
* TI DaVinci AEMIF support
*
* Copyright 2010 (C) Texas Instruments, Inc. https://www.ti.com/
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef _MACH_DAVINCI_AEMIF_H
#define _MACH_DAVINCI_AEMIF_H
#include <linux/platform_device.h>
#define NRCSR_OFFSET 0x00
#define AWCCR_OFFSET 0x04
#define A1CR_OFFSET 0x10
#define ACR_ASIZE_MASK 0x3
#define ACR_EW_MASK BIT(30)
#define ACR_SS_MASK BIT(31)
/* All timings in nanoseconds */
struct davinci_aemif_timing {
u8 wsetup;
u8 wstrobe;
u8 whold;
u8 rsetup;
u8 rstrobe;
u8 rhold;
u8 ta;
};
#endif
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* mach-davinci/nand.h
*
* Copyright © 2006 Texas Instruments.
*
* Ported to 2.6.23 Copyright © 2008 by
* Sander Huijsen <Shuijsen@optelecom-nkf.com>
* Troy Kisky <troy.kisky@boundarydevices.com>
* Dirk Behme <Dirk.Behme@gmail.com>
*
* --------------------------------------------------------------------------
*/
#ifndef __ARCH_ARM_DAVINCI_NAND_H
#define __ARCH_ARM_DAVINCI_NAND_H
#include <linux/mtd/rawnand.h>
#define NANDFCR_OFFSET 0x60
#define NANDFSR_OFFSET 0x64
#define NANDF1ECC_OFFSET 0x70
/* 4-bit ECC syndrome registers */
#define NAND_4BIT_ECC_LOAD_OFFSET 0xbc
#define NAND_4BIT_ECC1_OFFSET 0xc0
#define NAND_4BIT_ECC2_OFFSET 0xc4
#define NAND_4BIT_ECC3_OFFSET 0xc8
#define NAND_4BIT_ECC4_OFFSET 0xcc
#define NAND_ERR_ADD1_OFFSET 0xd0
#define NAND_ERR_ADD2_OFFSET 0xd4
#define NAND_ERR_ERRVAL1_OFFSET 0xd8
#define NAND_ERR_ERRVAL2_OFFSET 0xdc
/* NOTE: boards don't need to use these address bits
* for ALE/CLE unless they support booting from NAND.
* They're used unless platform data overrides them.
*/
#define MASK_ALE 0x08
#define MASK_CLE 0x10
struct davinci_nand_pdata { /* platform_data */
uint32_t mask_ale;
uint32_t mask_cle;
/*
* 0-indexed chip-select number of the asynchronous
* interface to which the NAND device has been connected.
*
* So, if you have NAND connected to CS3 of DA850, you
* will pass '1' here. Since the asynchronous interface
* on DA850 starts from CS2.
*/
uint32_t core_chipsel;
/* for packages using two chipselects */
uint32_t mask_chipsel;
/* board's default static partition info */
struct mtd_partition *parts;
unsigned nr_parts;
/* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
* soft == NAND_ECC_ENGINE_TYPE_SOFT
* else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
*
* All DaVinci-family chips support 1-bit hardware ECC.
* Newer ones also support 4-bit ECC, but are awkward
* using it with large page chips.
*/
enum nand_ecc_engine_type engine_type;
enum nand_ecc_placement ecc_placement;
u8 ecc_bits;
/* e.g. NAND_BUSWIDTH_16 */
unsigned options;
/* e.g. NAND_BBT_USE_FLASH */
unsigned bbt_options;
/* Main and mirror bbt descriptor overrides */
struct nand_bbt_descr *bbt_td;
struct nand_bbt_descr *bbt_md;
/* Access timings */
struct davinci_aemif_timing *timing;
};
#endif /* __ARCH_ARM_DAVINCI_NAND_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment