Commit 2471d2b3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mtd/for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux

Pull MTD updates from Miquel Raynal:
 "MTD changes:

   - No particularly important patchset this cycle, but we have a few
     usual improvements: like using a better/more recent helper or
     checking a return value.

  Raw NAND changes:

   - The use of for_each_child_of_node_scoped() has been spread into the
     subsystem drivers

   - a couple of exit path have been fixed (mtk, denali)

   - TI GPMC bindings have been enhanced to comply with up-to-date
     partition descriptions

   - a load of small and misc fixes

  SPI-NAND changes:

   - The most impacting series this cycle is bringing support for
     continuous reads in the SPI-NAND subsystem.

     This is a feature already merged in the raw NAND subsystem which
     allows optimizing the internal fetch times in the chip while
     reading sequential pages within an eraseblock.

     For now only Macronix NANDs benefit from this feature. While we are
     talking about Macronix, some of their chip need an explicit action
     for selecting a different plane, and support for it has also been
     brought.

   - The bitflip threshold has also been set to the same arbitrary level
     as in the raw NAND subsystem to optimize wear leveling decisions

   - Add upport for a new Winbond chip

  SPI NOR changes:

   - Add Write Protect support for N25Q064A.

   - New flash support for Zetta ZD25Q128C and Spansion S28HS256T.

   - Fix a NULL dereference in probe path for flashes without a name.

     The probe path tries to access the name without checking its
     existence first. S28HS256T is the first flash to define its entry
     without a name, uncovering this issue"

* tag 'mtd/for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux: (41 commits)
  mtd: spi-nor: fix flash probing
  mtd: spi-nor: spansion: Add support for S28HS256T
  mtd: spi-nor: winbond: add Zetta ZD25Q128C support
  mtd: spi-nor: micron-st: Add n25q064a WP support
  mtd: spi-nor: sst: Factor out common write operation to `sst_nor_write_data()`
  mtd: spinand: macronix: Flag parts needing explicit plane select
  mtd: spinand: Add support for setting plane select bits
  dt-bindings: mtd: ti, gpmc-nand: support partitions node
  mtd: rawnand: mtk: Fix init error path
  mtd: powernv: Add check devm_kasprintf() returned value
  mtd: rawnand: mtk: Factorize out the logic cleaning mtk chips
  mtd: rawnand: atmel: Add message on DMA usage
  mtd: rawnand: meson: Fix typo in function name
  mtd: spi-nand: macronix: Continuous read support
  mtd: spi-nand: macronix: Add a possible bitflip status flag
  mtd: spi-nand: macronix: Extract the bitflip retrieval logic
  mtd: spi-nand: macronix: Fix helper name
  mtd: spi-nand: Expose spinand_write_reg_op()
  mtd: spi-nand: Add continuous read support
  mtd: spi-nand: Isolate the MTD read logic in a helper
  ...
parents 288cb34e 869acb87
......@@ -61,12 +61,9 @@ properties:
GPIO connection to R/B signal from NAND chip
maxItems: 1
patternProperties:
"@[0-9a-f]+$":
$ref: /schemas/mtd/partitions/partition.yaml
allOf:
- $ref: /schemas/memory-controllers/ti,gpmc-child.yaml
- $ref: mtd.yaml#
required:
- compatible
......
......@@ -207,6 +207,9 @@ static int powernv_flash_set_driver_info(struct device *dev,
* get them
*/
mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
if (!mtd->name)
return -ENOMEM;
mtd->type = MTD_NORFLASH;
mtd->flags = MTD_WRITEABLE;
mtd->size = size;
......
......@@ -296,10 +296,12 @@ static int __init init_slram(void)
T("slram: devname = %s\n", devname);
if ((!map) || (!(devstart = strsep(&map, ",")))) {
E("slram: No devicestart specified.\n");
break;
}
T("slram: devstart = %s\n", devstart);
if ((!map) || (!(devlength = strsep(&map, ",")))) {
E("slram: No devicelength / -end specified.\n");
break;
}
T("slram: devlength = %s\n", devlength);
if (parse_cmdline(devname, devstart, devlength) != 0) {
......
......@@ -204,7 +204,7 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
}
/* make a copy of vecs */
vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
vecs_copy = kmemdup_array(vecs, count, sizeof(struct kvec), GFP_KERNEL);
if (!vecs_copy)
return -ENOMEM;
......
......@@ -1360,7 +1360,7 @@ static void anfc_chips_cleanup(struct arasan_nfc *nfc)
static int anfc_chips_init(struct arasan_nfc *nfc)
{
struct device_node *np = nfc->dev->of_node, *nand_np;
struct device_node *np = nfc->dev->of_node;
int nchips = of_get_child_count(np);
int ret;
......@@ -1370,10 +1370,9 @@ static int anfc_chips_init(struct arasan_nfc *nfc)
return -EINVAL;
}
for_each_child_of_node(np, nand_np) {
for_each_child_of_node_scoped(np, nand_np) {
ret = anfc_chip_init(nfc, nand_np);
if (ret) {
of_node_put(nand_np);
anfc_chips_cleanup(nfc);
break;
}
......
......@@ -2049,7 +2049,10 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
dma_cap_set(DMA_MEMCPY, mask);
nc->dmac = dma_request_channel(mask, NULL, NULL);
if (!nc->dmac)
if (nc->dmac)
dev_info(nc->dev, "using %s for DMA transfers\n",
dma_chan_name(nc->dmac));
else
dev_err(nc->dev, "Failed to request DMA channel\n");
}
......
......@@ -2836,7 +2836,6 @@ static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
{
struct device_node *np = cdns_ctrl->dev->of_node;
struct device_node *nand_np;
int max_cs = cdns_ctrl->caps2.max_banks;
int nchips, ret;
......@@ -2849,10 +2848,9 @@ static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
return -EINVAL;
}
for_each_child_of_node(np, nand_np) {
for_each_child_of_node_scoped(np, nand_np) {
ret = cadence_nand_chip_init(cdns_ctrl, nand_np);
if (ret) {
of_node_put(nand_np);
cadence_nand_chips_cleanup(cdns_ctrl);
return ret;
}
......
......@@ -20,8 +20,71 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/platform_data/mtd-davinci.h>
#include <linux/platform_data/mtd-davinci-aemif.h>
#define NRCSR_OFFSET 0x00
#define NANDFCR_OFFSET 0x60
#define NANDFSR_OFFSET 0x64
#define NANDF1ECC_OFFSET 0x70
/* 4-bit ECC syndrome registers */
#define NAND_4BIT_ECC_LOAD_OFFSET 0xbc
#define NAND_4BIT_ECC1_OFFSET 0xc0
#define NAND_4BIT_ECC2_OFFSET 0xc4
#define NAND_4BIT_ECC3_OFFSET 0xc8
#define NAND_4BIT_ECC4_OFFSET 0xcc
#define NAND_ERR_ADD1_OFFSET 0xd0
#define NAND_ERR_ADD2_OFFSET 0xd4
#define NAND_ERR_ERRVAL1_OFFSET 0xd8
#define NAND_ERR_ERRVAL2_OFFSET 0xdc
/* NOTE: boards don't need to use these address bits
* for ALE/CLE unless they support booting from NAND.
* They're used unless platform data overrides them.
*/
#define MASK_ALE 0x08
#define MASK_CLE 0x10
struct davinci_nand_pdata {
uint32_t mask_ale;
uint32_t mask_cle;
/*
* 0-indexed chip-select number of the asynchronous
* interface to which the NAND device has been connected.
*
* So, if you have NAND connected to CS3 of DA850, you
* will pass '1' here. Since the asynchronous interface
* on DA850 starts from CS2.
*/
uint32_t core_chipsel;
/* for packages using two chipselects */
uint32_t mask_chipsel;
/* board's default static partition info */
struct mtd_partition *parts;
unsigned int nr_parts;
/* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
* soft == NAND_ECC_ENGINE_TYPE_SOFT
* else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
*
* All DaVinci-family chips support 1-bit hardware ECC.
* Newer ones also support 4-bit ECC, but are awkward
* using it with large page chips.
*/
enum nand_ecc_engine_type engine_type;
enum nand_ecc_placement ecc_placement;
u8 ecc_bits;
/* e.g. NAND_BUSWIDTH_16 */
unsigned int options;
/* e.g. NAND_BBT_USE_FLASH */
unsigned int bbt_options;
/* Main and mirror bbt descriptor overrides */
struct nand_bbt_descr *bbt_td;
struct nand_bbt_descr *bbt_md;
};
/*
* This is a device driver for the NAND flash controller found on the
......@@ -54,8 +117,6 @@ struct davinci_nand_info {
uint32_t mask_cle;
uint32_t core_chipsel;
struct davinci_aemif_timing *timing;
};
static DEFINE_SPINLOCK(davinci_nand_lock);
......@@ -775,7 +836,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
info->chip.options = pdata->options;
info->chip.bbt_td = pdata->bbt_td;
info->chip.bbt_md = pdata->bbt_md;
info->timing = pdata->timing;
info->current_cs = info->vaddr;
info->core_chipsel = pdata->core_chipsel;
......
......@@ -145,15 +145,15 @@ static int denali_dt_probe(struct platform_device *pdev)
if (IS_ERR(denali->host))
return PTR_ERR(denali->host);
dt->clk = devm_clk_get(dev, "nand");
dt->clk = devm_clk_get_enabled(dev, "nand");
if (IS_ERR(dt->clk))
return PTR_ERR(dt->clk);
dt->clk_x = devm_clk_get(dev, "nand_x");
dt->clk_x = devm_clk_get_enabled(dev, "nand_x");
if (IS_ERR(dt->clk_x))
return PTR_ERR(dt->clk_x);
dt->clk_ecc = devm_clk_get(dev, "ecc");
dt->clk_ecc = devm_clk_get_enabled(dev, "ecc");
if (IS_ERR(dt->clk_ecc))
return PTR_ERR(dt->clk_ecc);
......@@ -165,18 +165,6 @@ static int denali_dt_probe(struct platform_device *pdev)
if (IS_ERR(dt->rst_reg))
return PTR_ERR(dt->rst_reg);
ret = clk_prepare_enable(dt->clk);
if (ret)
return ret;
ret = clk_prepare_enable(dt->clk_x);
if (ret)
goto out_disable_clk;
ret = clk_prepare_enable(dt->clk_ecc);
if (ret)
goto out_disable_clk_x;
denali->clk_rate = clk_get_rate(dt->clk);
denali->clk_x_rate = clk_get_rate(dt->clk_x);
......@@ -187,7 +175,7 @@ static int denali_dt_probe(struct platform_device *pdev)
*/
ret = reset_control_deassert(dt->rst_reg);
if (ret)
goto out_disable_clk_ecc;
return ret;
ret = reset_control_deassert(dt->rst);
if (ret)
......@@ -222,12 +210,6 @@ static int denali_dt_probe(struct platform_device *pdev)
reset_control_assert(dt->rst);
out_assert_rst_reg:
reset_control_assert(dt->rst_reg);
out_disable_clk_ecc:
clk_disable_unprepare(dt->clk_ecc);
out_disable_clk_x:
clk_disable_unprepare(dt->clk_x);
out_disable_clk:
clk_disable_unprepare(dt->clk);
return ret;
}
......@@ -239,9 +221,6 @@ static void denali_dt_remove(struct platform_device *pdev)
denali_remove(&dt->controller);
reset_control_assert(dt->rst);
reset_control_assert(dt->rst_reg);
clk_disable_unprepare(dt->clk_ecc);
clk_disable_unprepare(dt->clk_x);
clk_disable_unprepare(dt->clk);
}
static struct platform_driver denali_dt_driver = {
......
......@@ -77,18 +77,20 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->reg = devm_ioremap(denali->dev, csr_base, csr_len);
if (!denali->reg) {
dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
return -ENOMEM;
ret = -ENOMEM;
goto regions_release;
}
denali->host = devm_ioremap(denali->dev, mem_base, mem_len);
if (!denali->host) {
dev_err(&dev->dev, "Spectra: ioremap failed!");
return -ENOMEM;
ret = -ENOMEM;
goto regions_release;
}
ret = denali_init(denali);
if (ret)
return ret;
goto regions_release;
nsels = denali->nbanks;
......@@ -116,6 +118,8 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
out_remove_denali:
denali_remove(denali);
regions_release:
pci_release_regions(dev);
return ret;
}
......@@ -123,6 +127,7 @@ static void denali_pci_remove(struct pci_dev *dev)
{
struct denali_controller *denali = pci_get_drvdata(dev);
pci_release_regions(dev);
denali_remove(denali);
}
......
......@@ -2771,7 +2771,6 @@ static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
{
struct device_node *np = dev->of_node;
struct device_node *nand_np;
int max_cs = nfc->caps->max_cs_nb;
int nchips;
int ret;
......@@ -2798,20 +2797,15 @@ static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
return ret;
}
for_each_child_of_node(np, nand_np) {
for_each_child_of_node_scoped(np, nand_np) {
ret = marvell_nand_chip_init(dev, nfc, nand_np);
if (ret) {
of_node_put(nand_np);
goto cleanup_chips;
marvell_nand_chips_cleanup(nfc);
return ret;
}
}
return 0;
cleanup_chips:
marvell_nand_chips_cleanup(nfc);
return ret;
}
static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
......
......@@ -1475,7 +1475,7 @@ meson_nfc_nand_chip_init(struct device *dev,
return 0;
}
static void meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
static void meson_nfc_nand_chips_cleanup(struct meson_nfc *nfc)
{
struct meson_nfc_nand_chip *meson_chip;
struct mtd_info *mtd;
......@@ -1495,14 +1495,12 @@ static int meson_nfc_nand_chips_init(struct device *dev,
struct meson_nfc *nfc)
{
struct device_node *np = dev->of_node;
struct device_node *nand_np;
int ret;
for_each_child_of_node(np, nand_np) {
for_each_child_of_node_scoped(np, nand_np) {
ret = meson_nfc_nand_chip_init(dev, nfc, nand_np);
if (ret) {
meson_nfc_nand_chip_cleanup(nfc);
of_node_put(nand_np);
meson_nfc_nand_chips_cleanup(nfc);
return ret;
}
}
......@@ -1616,7 +1614,7 @@ static void meson_nfc_remove(struct platform_device *pdev)
{
struct meson_nfc *nfc = platform_get_drvdata(pdev);
meson_nfc_nand_chip_cleanup(nfc);
meson_nfc_nand_chips_cleanup(nfc);
meson_nfc_disable_clk(nfc);
}
......
......@@ -1429,16 +1429,32 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
return 0;
}
static void mtk_nfc_nand_chips_cleanup(struct mtk_nfc *nfc)
{
struct mtk_nfc_nand_chip *mtk_chip;
struct nand_chip *chip;
int ret;
while (!list_empty(&nfc->chips)) {
mtk_chip = list_first_entry(&nfc->chips,
struct mtk_nfc_nand_chip, node);
chip = &mtk_chip->nand;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
list_del(&mtk_chip->node);
}
}
static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
{
struct device_node *np = dev->of_node;
struct device_node *nand_np;
int ret;
for_each_child_of_node(np, nand_np) {
for_each_child_of_node_scoped(np, nand_np) {
ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
if (ret) {
of_node_put(nand_np);
mtk_nfc_nand_chips_cleanup(nfc);
return ret;
}
}
......@@ -1570,20 +1586,8 @@ static int mtk_nfc_probe(struct platform_device *pdev)
static void mtk_nfc_remove(struct platform_device *pdev)
{
struct mtk_nfc *nfc = platform_get_drvdata(pdev);
struct mtk_nfc_nand_chip *mtk_chip;
struct nand_chip *chip;
int ret;
while (!list_empty(&nfc->chips)) {
mtk_chip = list_first_entry(&nfc->chips,
struct mtk_nfc_nand_chip, node);
chip = &mtk_chip->nand;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
list_del(&mtk_chip->node);
}
mtk_nfc_nand_chips_cleanup(nfc);
mtk_ecc_release(nfc->ecc);
}
......
......@@ -1111,7 +1111,7 @@ static void pl35x_nand_chips_cleanup(struct pl35x_nandc *nfc)
static int pl35x_nand_chips_init(struct pl35x_nandc *nfc)
{
struct device_node *np = nfc->dev->of_node, *nand_np;
struct device_node *np = nfc->dev->of_node;
int nchips = of_get_child_count(np);
int ret;
......@@ -1121,10 +1121,9 @@ static int pl35x_nand_chips_init(struct pl35x_nandc *nfc)
return -EINVAL;
}
for_each_child_of_node(np, nand_np) {
for_each_child_of_node_scoped(np, nand_np) {
ret = pl35x_nand_chip_init(nfc, nand_np);
if (ret) {
of_node_put(nand_np);
pl35x_nand_chips_cleanup(nfc);
break;
}
......
......@@ -1297,23 +1297,17 @@ static void rnandc_chips_cleanup(struct rnandc *rnandc)
static int rnandc_chips_init(struct rnandc *rnandc)
{
struct device_node *np;
int ret;
for_each_child_of_node(rnandc->dev->of_node, np) {
for_each_child_of_node_scoped(rnandc->dev->of_node, np) {
ret = rnandc_chip_init(rnandc, np);
if (ret) {
of_node_put(np);
goto cleanup_chips;
rnandc_chips_cleanup(rnandc);
return ret;
}
}
return 0;
cleanup_chips:
rnandc_chips_cleanup(rnandc);
return ret;
}
static int rnandc_probe(struct platform_device *pdev)
......
......@@ -1211,7 +1211,7 @@ static void rk_nfc_chips_cleanup(struct rk_nfc *nfc)
static int rk_nfc_nand_chips_init(struct device *dev, struct rk_nfc *nfc)
{
struct device_node *np = dev->of_node, *nand_np;
struct device_node *np = dev->of_node;
int nchips = of_get_child_count(np);
int ret;
......@@ -1221,10 +1221,9 @@ static int rk_nfc_nand_chips_init(struct device *dev, struct rk_nfc *nfc)
return -EINVAL;
}
for_each_child_of_node(np, nand_np) {
for_each_child_of_node_scoped(np, nand_np) {
ret = rk_nfc_nand_chip_init(dev, nfc, nand_np);
if (ret) {
of_node_put(nand_np);
rk_nfc_chips_cleanup(nfc);
return ret;
}
......
......@@ -1851,7 +1851,6 @@ static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc)
{
struct device_node *dn = nfc->dev->of_node;
struct device_node *child;
int nchips = of_get_child_count(dn);
int ret = 0;
......@@ -1865,13 +1864,11 @@ static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc)
return -EINVAL;
}
for_each_child_of_node(dn, child) {
for_each_child_of_node_scoped(dn, child) {
ret = stm32_fmc2_nfc_parse_child(nfc, child);
if (ret < 0) {
of_node_put(child);
if (ret < 0)
return ret;
}
}
return ret;
}
......
......@@ -2025,13 +2025,11 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
{
struct device_node *np = dev->of_node;
struct device_node *nand_np;
int ret;
for_each_child_of_node(np, nand_np) {
for_each_child_of_node_scoped(np, nand_np) {
ret = sunxi_nand_chip_init(dev, nfc, nand_np);
if (ret) {
of_node_put(nand_np);
sunxi_nand_chips_cleanup(nfc);
return ret;
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -76,6 +76,18 @@ static int w25m02gv_select_target(struct spinand_device *spinand,
return spi_mem_exec_op(spinand->spimem, &op);
}
static int w25n01kv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 3)
return -ERANGE;
region->offset = 64 + (8 * section);
region->length = 7;
return 0;
}
static int w25n02kv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
......@@ -100,6 +112,11 @@ static int w25n02kv_ooblayout_free(struct mtd_info *mtd, int section,
return 0;
}
static const struct mtd_ooblayout_ops w25n01kv_ooblayout = {
.ecc = w25n01kv_ooblayout_ecc,
.free = w25n02kv_ooblayout_free,
};
static const struct mtd_ooblayout_ops w25n02kv_ooblayout = {
.ecc = w25n02kv_ooblayout_ecc,
.free = w25n02kv_ooblayout_free,
......@@ -163,6 +180,15 @@ static const struct spinand_info winbond_spinand_table[] = {
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
SPINAND_INFO("W25N01KV",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae, 0x21),
NAND_MEMORG(1, 2048, 96, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25n01kv_ooblayout, w25n02kv_ecc_get_status)),
SPINAND_INFO("W25N02KV",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x22),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
......
......@@ -95,7 +95,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
uint32_t blocksize = master->erasesize;
int trx_parts[2]; /* Array with indexes of TRX partitions */
int trx_num = 0; /* Number of found TRX partitions */
int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
static const int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
int err;
/*
......
......@@ -157,10 +157,10 @@ static int parse_fixed_partitions(struct mtd_info *master,
partname = of_get_property(pp, "name", &len);
parts[i].name = partname;
if (of_get_property(pp, "read-only", &len))
if (of_property_read_bool(pp, "read-only"))
parts[i].mask_flags |= MTD_WRITEABLE;
if (of_get_property(pp, "lock", &len))
if (of_property_read_bool(pp, "lock"))
parts[i].mask_flags |= MTD_POWERUP_LOCK;
if (of_property_read_bool(pp, "slc-mode"))
......
......@@ -3281,7 +3281,8 @@ static const struct flash_info *spi_nor_match_name(struct spi_nor *nor,
for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
for (j = 0; j < manufacturers[i]->nparts; j++) {
if (!strcmp(name, manufacturers[i]->parts[j].name)) {
if (manufacturers[i]->parts[j].name &&
!strcmp(name, manufacturers[i]->parts[j].name)) {
nor->manufacturer = manufacturers[i];
return &manufacturers[i]->parts[j];
}
......
......@@ -436,6 +436,8 @@ static const struct flash_info st_nor_parts[] = {
.id = SNOR_ID(0x20, 0xbb, 0x17),
.name = "n25q064a",
.size = SZ_8M,
.flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
SPI_NOR_BP3_SR_BIT6,
.no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
}, {
.id = SNOR_ID(0x20, 0xbb, 0x18),
......
......@@ -966,6 +966,10 @@ static const struct flash_info spansion_nor_parts[] = {
.name = "s28hl01gt",
.mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
}, {
.id = SNOR_ID(0x34, 0x5b, 0x19),
.mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
}, {
.id = SNOR_ID(0x34, 0x5b, 0x1a),
.name = "s28hs512t",
......
......@@ -167,6 +167,21 @@ static const struct flash_info sst_nor_parts[] = {
}
};
static int sst_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
const u_char *buf)
{
u8 op = (len == 1) ? SPINOR_OP_BP : SPINOR_OP_AAI_WP;
int ret;
nor->program_opcode = op;
ret = spi_nor_write_data(nor, to, 1, buf);
if (ret < 0)
return ret;
WARN(ret != len, "While writing %zu byte written %i bytes\n", len, ret);
return spi_nor_wait_till_ready(nor);
}
static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
......@@ -188,16 +203,10 @@ static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Start write from odd address. */
if (to % 2) {
nor->program_opcode = SPINOR_OP_BP;
/* write one byte. */
ret = spi_nor_write_data(nor, to, 1, buf);
ret = sst_nor_write_data(nor, to, 1, buf);
if (ret < 0)
goto out;
WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
goto out;
to++;
actual++;
......@@ -205,16 +214,11 @@ static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Write out most of the data here. */
for (; actual < len - 1; actual += 2) {
nor->program_opcode = SPINOR_OP_AAI_WP;
/* write two bytes. */
ret = spi_nor_write_data(nor, to, 2, buf + actual);
ret = sst_nor_write_data(nor, to, 2, buf + actual);
if (ret < 0)
goto out;
WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
goto out;
to += 2;
nor->sst_write_second = true;
}
......@@ -234,14 +238,9 @@ static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
if (ret)
goto out;
nor->program_opcode = SPINOR_OP_BP;
ret = spi_nor_write_data(nor, to, 1, buf + actual);
ret = sst_nor_write_data(nor, to, 1, buf + actual);
if (ret < 0)
goto out;
WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
goto out;
actual += 1;
......
......@@ -17,6 +17,31 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(1, buf, 0))
static int
w25q128_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
/*
* Zetta ZD25Q128C is a clone of the Winbond device. But the encoded
* size is really wrong. It seems that they confused Mbit with MiB.
* Thus the flash is discovered as a 2MiB device.
*/
if (bfpt_header->major == SFDP_JESD216_MAJOR &&
bfpt_header->minor == SFDP_JESD216_MINOR &&
nor->params->size == SZ_2M &&
nor->params->erase_map.regions[0].size == SZ_2M) {
nor->params->size = SZ_16M;
nor->params->erase_map.regions[0].size = SZ_16M;
}
return 0;
}
static const struct spi_nor_fixups w25q128_fixups = {
.post_bfpt = w25q128_post_bfpt_fixups,
};
static int
w25q256_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
......@@ -108,6 +133,7 @@ static const struct flash_info winbond_nor_parts[] = {
.size = SZ_16M,
.flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixups = &w25q128_fixups,
}, {
.id = SNOR_ID(0xef, 0x40, 0x19),
.name = "w25q256",
......
......@@ -103,6 +103,8 @@ enum nand_page_io_req_type {
* @ooblen: the number of OOB bytes to read from/write to this page
* @oobbuf: buffer to store OOB data in or get OOB data from
* @mode: one of the %MTD_OPS_XXX mode
* @continuous: no need to start over the operation at the end of each page, the
* NAND device will automatically prepare the next one
*
* This object is used to pass per-page I/O requests to NAND sub-layers. This
* way all useful information are already formatted in a useful way and
......@@ -125,6 +127,7 @@ struct nand_page_io_req {
void *in;
} oobbuf;
int mode;
bool continuous;
};
const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void);
......@@ -906,16 +909,16 @@ static inline void nanddev_pos_next_page(struct nand_device *nand,
}
/**
* nand_io_iter_init - Initialize a NAND I/O iterator
* nand_io_page_iter_init - Initialize a NAND I/O iterator
* @nand: NAND device
* @offs: absolute offset
* @req: MTD request
* @iter: NAND I/O iterator
*
* Initializes a NAND iterator based on the information passed by the MTD
* layer.
* layer for page jumps.
*/
static inline void nanddev_io_iter_init(struct nand_device *nand,
static inline void nanddev_io_page_iter_init(struct nand_device *nand,
enum nand_page_io_req_type reqtype,
loff_t offs, struct mtd_oob_ops *req,
struct nand_io_iter *iter)
......@@ -937,6 +940,43 @@ static inline void nanddev_io_iter_init(struct nand_device *nand,
iter->req.ooblen = min_t(unsigned int,
iter->oobbytes_per_page - iter->req.ooboffs,
iter->oobleft);
iter->req.continuous = false;
}
/**
* nand_io_block_iter_init - Initialize a NAND I/O iterator
* @nand: NAND device
* @offs: absolute offset
* @req: MTD request
* @iter: NAND I/O iterator
*
* Initializes a NAND iterator based on the information passed by the MTD
* layer for block jumps (no OOB)
*
* In practice only reads may leverage this iterator.
*/
static inline void nanddev_io_block_iter_init(struct nand_device *nand,
enum nand_page_io_req_type reqtype,
loff_t offs, struct mtd_oob_ops *req,
struct nand_io_iter *iter)
{
unsigned int offs_in_eb;
iter->req.type = reqtype;
iter->req.mode = req->mode;
iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
iter->req.ooboffs = 0;
iter->oobbytes_per_page = 0;
iter->dataleft = req->len;
iter->oobleft = 0;
iter->req.databuf.in = req->datbuf;
offs_in_eb = (nand->memorg.pagesize * iter->req.pos.page) + iter->req.dataoffs;
iter->req.datalen = min_t(unsigned int,
nanddev_eraseblock_size(nand) - offs_in_eb,
iter->dataleft);
iter->req.oobbuf.in = NULL;
iter->req.ooblen = 0;
iter->req.continuous = true;
}
/**
......@@ -962,6 +1002,25 @@ static inline void nanddev_io_iter_next_page(struct nand_device *nand,
iter->oobleft);
}
/**
* nand_io_iter_next_block - Move to the next block
* @nand: NAND device
* @iter: NAND I/O iterator
*
* Updates the @iter to point to the next block.
* No OOB handling available.
*/
static inline void nanddev_io_iter_next_block(struct nand_device *nand,
struct nand_io_iter *iter)
{
nanddev_pos_next_eraseblock(nand, &iter->req.pos);
iter->dataleft -= iter->req.datalen;
iter->req.databuf.in += iter->req.datalen;
iter->req.dataoffs = 0;
iter->req.datalen = min_t(unsigned int, nanddev_eraseblock_size(nand),
iter->dataleft);
}
/**
* nand_io_iter_end - Should end iteration or not
* @nand: NAND device
......@@ -990,13 +1049,28 @@ static inline bool nanddev_io_iter_end(struct nand_device *nand,
* @req: MTD I/O request
* @iter: NAND I/O iterator
*
* Should be used for iterate over pages that are contained in an MTD request.
* Should be used for iterating over pages that are contained in an MTD request.
*/
#define nanddev_io_for_each_page(nand, type, start, req, iter) \
for (nanddev_io_iter_init(nand, type, start, req, iter); \
for (nanddev_io_page_iter_init(nand, type, start, req, iter); \
!nanddev_io_iter_end(nand, iter); \
nanddev_io_iter_next_page(nand, iter))
/**
* nand_io_for_each_block - Iterate over all NAND pages contained in an MTD I/O
* request, one block at a time
* @nand: NAND device
* @start: start address to read/write from
* @req: MTD I/O request
* @iter: NAND I/O iterator
*
* Should be used for iterating over blocks that are contained in an MTD request.
*/
#define nanddev_io_for_each_block(nand, type, start, req, iter) \
for (nanddev_io_block_iter_init(nand, type, start, req, iter); \
!nanddev_io_iter_end(nand, iter); \
nanddev_io_iter_next_block(nand, iter))
bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
......
......@@ -312,6 +312,8 @@ struct spinand_ecc_info {
#define SPINAND_HAS_QE_BIT BIT(0)
#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
#define SPINAND_HAS_PROG_PLANE_SELECT_BIT BIT(2)
#define SPINAND_HAS_READ_PLANE_SELECT_BIT BIT(3)
/**
* struct spinand_ondie_ecc_conf - private SPI-NAND on-die ECC engine structure
......@@ -336,6 +338,7 @@ struct spinand_ondie_ecc_conf {
* @op_variants.update_cache: variants of the update-cache operation
* @select_target: function used to select a target/die. Required only for
* multi-die chips
* @set_cont_read: enable/disable continuous cached reads
*
* Each SPI NAND manufacturer driver should have a spinand_info table
* describing all the chips supported by the driver.
......@@ -354,6 +357,8 @@ struct spinand_info {
} op_variants;
int (*select_target)(struct spinand_device *spinand,
unsigned int target);
int (*set_cont_read)(struct spinand_device *spinand,
bool enable);
};
#define SPINAND_ID(__method, ...) \
......@@ -379,6 +384,9 @@ struct spinand_info {
#define SPINAND_SELECT_TARGET(__func) \
.select_target = __func,
#define SPINAND_CONT_READ(__set_cont_read) \
.set_cont_read = __set_cont_read,
#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
__flags, ...) \
{ \
......@@ -422,6 +430,12 @@ struct spinand_dirmap {
* passed in spi_mem_op be DMA-able, so we can't based the bufs on
* the stack
* @manufacturer: SPI NAND manufacturer information
* @cont_read_possible: Field filled by the core once the whole system
* configuration is known to tell whether continuous reads are
* suitable to use or not in general with this chip/configuration.
* A per-transfer check must of course be done to ensure it is
* actually relevant to enable this feature.
* @set_cont_read: Enable/disable the continuous read feature
* @priv: manufacturer private data
*/
struct spinand_device {
......@@ -451,6 +465,10 @@ struct spinand_device {
u8 *scratchbuf;
const struct spinand_manufacturer *manufacturer;
void *priv;
bool cont_read_possible;
int (*set_cont_read)(struct spinand_device *spinand,
bool enable);
};
/**
......@@ -517,6 +535,7 @@ int spinand_match_and_init(struct spinand_device *spinand,
enum spinand_readid_method rdid_method);
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val);
int spinand_select_target(struct spinand_device *spinand, unsigned int target);
#endif /* __LINUX_MTD_SPINAND_H */
/*
* TI DaVinci AEMIF support
*
* Copyright 2010 (C) Texas Instruments, Inc. https://www.ti.com/
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef _MACH_DAVINCI_AEMIF_H
#define _MACH_DAVINCI_AEMIF_H
#include <linux/platform_device.h>
#define NRCSR_OFFSET 0x00
#define AWCCR_OFFSET 0x04
#define A1CR_OFFSET 0x10
#define ACR_ASIZE_MASK 0x3
#define ACR_EW_MASK BIT(30)
#define ACR_SS_MASK BIT(31)
/* All timings in nanoseconds */
struct davinci_aemif_timing {
u8 wsetup;
u8 wstrobe;
u8 whold;
u8 rsetup;
u8 rstrobe;
u8 rhold;
u8 ta;
};
#endif
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* mach-davinci/nand.h
*
* Copyright © 2006 Texas Instruments.
*
* Ported to 2.6.23 Copyright © 2008 by
* Sander Huijsen <Shuijsen@optelecom-nkf.com>
* Troy Kisky <troy.kisky@boundarydevices.com>
* Dirk Behme <Dirk.Behme@gmail.com>
*
* --------------------------------------------------------------------------
*/
#ifndef __ARCH_ARM_DAVINCI_NAND_H
#define __ARCH_ARM_DAVINCI_NAND_H
#include <linux/mtd/rawnand.h>
#define NANDFCR_OFFSET 0x60
#define NANDFSR_OFFSET 0x64
#define NANDF1ECC_OFFSET 0x70
/* 4-bit ECC syndrome registers */
#define NAND_4BIT_ECC_LOAD_OFFSET 0xbc
#define NAND_4BIT_ECC1_OFFSET 0xc0
#define NAND_4BIT_ECC2_OFFSET 0xc4
#define NAND_4BIT_ECC3_OFFSET 0xc8
#define NAND_4BIT_ECC4_OFFSET 0xcc
#define NAND_ERR_ADD1_OFFSET 0xd0
#define NAND_ERR_ADD2_OFFSET 0xd4
#define NAND_ERR_ERRVAL1_OFFSET 0xd8
#define NAND_ERR_ERRVAL2_OFFSET 0xdc
/* NOTE: boards don't need to use these address bits
* for ALE/CLE unless they support booting from NAND.
* They're used unless platform data overrides them.
*/
#define MASK_ALE 0x08
#define MASK_CLE 0x10
struct davinci_nand_pdata { /* platform_data */
uint32_t mask_ale;
uint32_t mask_cle;
/*
* 0-indexed chip-select number of the asynchronous
* interface to which the NAND device has been connected.
*
* So, if you have NAND connected to CS3 of DA850, you
* will pass '1' here. Since the asynchronous interface
* on DA850 starts from CS2.
*/
uint32_t core_chipsel;
/* for packages using two chipselects */
uint32_t mask_chipsel;
/* board's default static partition info */
struct mtd_partition *parts;
unsigned nr_parts;
/* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
* soft == NAND_ECC_ENGINE_TYPE_SOFT
* else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
*
* All DaVinci-family chips support 1-bit hardware ECC.
* Newer ones also support 4-bit ECC, but are awkward
* using it with large page chips.
*/
enum nand_ecc_engine_type engine_type;
enum nand_ecc_placement ecc_placement;
u8 ecc_bits;
/* e.g. NAND_BUSWIDTH_16 */
unsigned options;
/* e.g. NAND_BBT_USE_FLASH */
unsigned bbt_options;
/* Main and mirror bbt descriptor overrides */
struct nand_bbt_descr *bbt_td;
struct nand_bbt_descr *bbt_md;
/* Access timings */
struct davinci_aemif_timing *timing;
};
#endif /* __ARCH_ARM_DAVINCI_NAND_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment