Commit c01d85c2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mtd/for-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux

Pull MTD updates from Miquel Raynal:
 "MTD core changes:

   - mtdchar: Prevent unbounded allocation in MEMWRITE ioctl

   - gen_probe: Use bitmap_zalloc() when applicable

   - Introduce an expert mode for forensics and debugging purposes

   - Clear out unregistered devices a bit more

   - Provide unique name for nvmem device

   - Remove unused header file <linux/mtd/latch-addr-flash.h>

   - Fixed breaking list in __mtd_del_partition.

  MTD device changes:

   - Warn about failure to unregister mtd device in sst25l, mchp48l640,
     mchp23k256, and dataflash drivers.

  Raw NAND core changes:

   - Export nand_read_page_hwecc_oob_first()

  GPMC memory controller for OMAP2 NAND controller changes:

   - Add support for AM64 SoC and allow build on K3 platforms

   - Use a compatible match table when checking for NAND controller

   - Use platform_get_irq() to get the interrupt

  Raw NAND controller changes:

   - OMAP2 NAND controller:
      - Document the missing 'rb-gpios' DT property
      - Drop unused variable
      - Fix force_8bit flag behaviour for DMA mode
      - Move to exec_op interface
      - Use platform_get_irq() to get the interrupt

   - Renesas:
      - Add new NAND controller driver with its bindings and MAINTAINERS entry

   - Onenand:
      - Remove redundant variable ooblen

   - MPC5121:
      - Remove unused variable in ads5121_select_chip()

   - GPMI:
      - Add ERR007117 protection for nfc_apply_timings
      - Remove explicit default gpmi clock setting for i.MX6
      - Use platform_get_irq_byname() to get the interrupt
      - Remove unneeded variable

   - Ingenic:
      - JZ4740 needs 'oob_first' read page function

   - Davinci:
      - Rewrite function description
      - Avoid duplicated page read
      - Don't calculate ECC when reading page

  SPI NOR core changes:

   - Add Pratyush as SPI NOR co-maintainer.

   - Flash parameters initialization was done in a spaghetti way. Clean
     flash parameters initialization.

   - Rework the flash_info flags and clarify where one should be used.

   - Initialize all flash parameters based on JESD216 SFDP where
     possible. Flash parameters and settings that are SFDP discoverable
     should not be duplicated via flash_info flags at flash declaration.

   - Remove debugfs entries that duplicate sysfs entries.

  SPI NOR manufacturer driver changes:

   - Use late_init() hook in various drivers to make it clear that those
     flash parameters are either not declared in the JESD216 SFDP
     standard, or the SFDP tables which define those flash parameters
     are not defined by the flash.

   - Fix mtd size for s3an flashes.

   - Write 2 bytes when disabling Octal DTR mode: 1 byte long
     transactions are not allowed in 8D-8D-8D mode.

  Hyperbus changes:

   - Couple of fixes in Renesas hyperbus rpc-if driver to avoid crash on
     module remove and for missing check for error value in probe"

* tag 'mtd/for-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux: (71 commits)
  mtd: spi-nor: Remove debugfs entries that duplicate sysfs entries
  mtd: spi-nor: micron-st: write 2 bytes when disabling Octal DTR mode
  mtd: spi-nor: spansion: write 2 bytes when disabling Octal DTR mode
  mtd: spi-nor: core: use 2 data bytes for template ops
  mtd: spi-nor: Constify part specific fixup hooks
  mtd: spi-nor: core: Remove reference to spi-nor.c
  mtd: rawnand: gpmi: Use platform_get_irq_byname() to get the interrupt
  mtd: rawnand: omap_elm: Use platform_get_irq() to get the interrupt
  mtd: rawnand: omap2: Select GPMC device driver for ARCH_K3
  memory: omap-gpmc: Use a compatible match table when checking for NAND controller
  memory: omap-gpmc: Add support for GPMC on AM64 SoC
  dt-bindings: memory-controllers: ti,gpmc: Add compatible for AM64
  memory: omap-gpmc: Use platform_get_irq() to get the interrupt
  MAINTAINERS: Add an entry for Renesas NAND controller
  mtd: rawnand: renesas: Add new NAND controller driver
  dt-bindings: mtd: renesas: Describe Renesas R-Car Gen3 & RZ/N1 NAND controller
  mtd: rawnand: gpmi: remove unneeded variable
  mtd: rawnand: omap2: drop unused variable
  mtd: rawnand: omap2: fix force_8bit flag behaviour for DMA mode
  mtd: rawnand: omap2: Add compatible for AM64 SoC
  ...
parents 34770887 9ce47e43
......@@ -23,13 +23,20 @@ properties:
items:
- enum:
- ti,am3352-gpmc
- ti,am64-gpmc
- ti,omap2420-gpmc
- ti,omap2430-gpmc
- ti,omap3430-gpmc
- ti,omap4430-gpmc
reg:
maxItems: 1
minItems: 1
maxItems: 2
reg-names:
items:
- const: cfg
- const: data
interrupts:
maxItems: 1
......@@ -44,6 +51,9 @@ properties:
items:
- const: fck
power-domains:
maxItems: 1
dmas:
items:
- description: DMA channel for GPMC NAND prefetch
......@@ -133,6 +143,17 @@ required:
- "#address-cells"
- "#size-cells"
allOf:
- if:
properties:
compatible:
contains:
const: ti,am64-gpmc
then:
required:
- reg-names
- power-domains
additionalProperties: false
examples:
......
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/mtd/renesas-nandc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Renesas R-Car Gen3 & RZ/N1x NAND flash controller device tree bindings
maintainers:
- Miquel Raynal <miquel.raynal@bootlin.com>
allOf:
- $ref: "nand-controller.yaml"
properties:
compatible:
oneOf:
- items:
- enum:
- renesas,r9a06g032-nandc
- const: renesas,rzn1-nandc
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
items:
- description: APB host controller clock
- description: External NAND bus clock
clock-names:
items:
- const: hclk
- const: eclk
required:
- compatible
- reg
- clocks
- clock-names
- interrupts
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/r9a06g032-sysctrl.h>
nand-controller@40102000 {
compatible = "renesas,r9a06g032-nandc", "renesas,rzn1-nandc";
reg = <0x40102000 0x2000>;
interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&sysctrl R9A06G032_HCLK_NAND>, <&sysctrl R9A06G032_CLK_NAND>;
clock-names = "hclk", "eclk";
#address-cells = <1>;
#size-cells = <0>;
};
......@@ -16,7 +16,10 @@ description:
properties:
compatible:
const: ti,omap2-nand
items:
- enum:
- ti,am64-nand
- ti,omap2-nand
reg:
maxItems: 1
......@@ -53,6 +56,11 @@ properties:
enum: [8, 16]
default: 8
rb-gpios:
description:
GPIO connection to R/B signal from NAND chip
maxItems: 1
patternProperties:
"@[0-9a-f]+$":
$ref: "/schemas/mtd/partitions/partition.yaml"
......
......@@ -16416,6 +16416,14 @@ S: Supported
F: Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml
F: drivers/iio/adc/rzg2l_adc.c
RENESAS R-CAR GEN3 & RZ/N1 NAND CONTROLLER DRIVER
M: Miquel Raynal <miquel.raynal@bootlin.com>
L: linux-mtd@lists.infradead.org
L: linux-renesas-soc@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/mtd/renesas-nandc.yaml
F: drivers/mtd/nand/raw/renesas-nand-controller.c
RESET CONTROLLER FRAMEWORK
M: Philipp Zabel <p.zabel@pengutronix.de>
S: Maintained
......@@ -18084,8 +18092,8 @@ F: drivers/pinctrl/spear/
SPI NOR SUBSYSTEM
M: Tudor Ambarus <tudor.ambarus@microchip.com>
M: Pratyush Yadav <p.yadav@ti.com>
R: Michael Walle <michael@walle.cc>
R: Pratyush Yadav <p.yadav@ti.com>
L: linux-mtd@lists.infradead.org
S: Maintained
W: http://www.linux-mtd.infradead.org/
......
......@@ -237,6 +237,7 @@ struct gpmc_device {
struct omap3_gpmc_regs context;
int nirqs;
unsigned int is_suspended:1;
struct resource *data;
};
static struct irq_domain *gpmc_irq_domain;
......@@ -1456,12 +1457,18 @@ static void gpmc_mem_exit(void)
}
}
static void gpmc_mem_init(void)
static void gpmc_mem_init(struct gpmc_device *gpmc)
{
int cs;
gpmc_mem_root.start = GPMC_MEM_START;
gpmc_mem_root.end = GPMC_MEM_END;
if (!gpmc->data) {
/* All legacy devices have same data IO window */
gpmc_mem_root.start = GPMC_MEM_START;
gpmc_mem_root.end = GPMC_MEM_END;
} else {
gpmc_mem_root.start = gpmc->data->start;
gpmc_mem_root.end = gpmc->data->end;
}
/* Reserve all regions that has been set up by bootloader */
for (cs = 0; cs < gpmc_cs_num; cs++) {
......@@ -1888,6 +1895,7 @@ static const struct of_device_id gpmc_dt_ids[] = {
{ .compatible = "ti,omap3430-gpmc" }, /* omap3430 & omap3630 */
{ .compatible = "ti,omap4430-gpmc" }, /* omap4430 & omap4460 & omap543x */
{ .compatible = "ti,am3352-gpmc" }, /* am335x devices */
{ .compatible = "ti,am64-gpmc" },
{ }
};
......@@ -2175,7 +2183,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
}
}
if (of_device_is_compatible(child, "ti,omap2-nand")) {
if (of_match_node(omap_nand_ids, child)) {
/* NAND specific setup */
val = 8;
of_property_read_u32(child, "nand-bus-width", &val);
......@@ -2502,21 +2510,29 @@ static int gpmc_probe(struct platform_device *pdev)
gpmc->dev = &pdev->dev;
platform_set_drvdata(pdev, gpmc);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENOENT;
gpmc_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(gpmc_base))
return PTR_ERR(gpmc_base);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
if (!res) {
dev_err(&pdev->dev, "Failed to get resource: irq\n");
return -ENOENT;
/* legacy DT */
gpmc_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpmc_base))
return PTR_ERR(gpmc_base);
} else {
gpmc_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(gpmc_base))
return PTR_ERR(gpmc_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "data");
if (!res) {
dev_err(&pdev->dev, "couldn't get data reg resource\n");
return -ENOENT;
}
gpmc->data = res;
}
gpmc->irq = res->start;
gpmc->irq = platform_get_irq(pdev, 0);
if (gpmc->irq < 0)
return gpmc->irq;
gpmc_l3_clk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(gpmc_l3_clk)) {
......@@ -2562,7 +2578,7 @@ static int gpmc_probe(struct platform_device *pdev)
dev_info(gpmc->dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l),
GPMC_REVISION_MINOR(l));
gpmc_mem_init();
gpmc_mem_init(gpmc);
rc = gpmc_gpio_init(gpmc);
if (rc)
goto gpio_init_failed;
......
......@@ -61,8 +61,8 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
struct cfi_private cfi;
struct cfi_private *retcfi;
unsigned long *chip_map;
int i, j, mapsize;
int max_chips;
int i, j;
memset(&cfi, 0, sizeof(cfi));
......@@ -111,8 +111,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
max_chips = 1;
}
mapsize = sizeof(long) * DIV_ROUND_UP(max_chips, BITS_PER_LONG);
chip_map = kzalloc(mapsize, GFP_KERNEL);
chip_map = bitmap_zalloc(max_chips, GFP_KERNEL);
if (!chip_map) {
kfree(cfi.cfiq);
return NULL;
......@@ -139,7 +138,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
if (!retcfi) {
kfree(cfi.cfiq);
kfree(chip_map);
bitmap_free(chip_map);
return NULL;
}
......@@ -157,7 +156,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
}
}
kfree(chip_map);
bitmap_free(chip_map);
return retcfi;
}
......
......@@ -213,7 +213,9 @@ static int mchp23k256_remove(struct spi_device *spi)
{
struct mchp23k256_flash *flash = spi_get_drvdata(spi);
return mtd_device_unregister(&flash->mtd);
WARN_ON(mtd_device_unregister(&flash->mtd));
return 0;
}
static const struct of_device_id mchp23k256_of_table[] = {
......
......@@ -345,7 +345,9 @@ static int mchp48l640_remove(struct spi_device *spi)
{
struct mchp48l640_flash *flash = spi_get_drvdata(spi);
return mtd_device_unregister(&flash->mtd);
WARN_ON(mtd_device_unregister(&flash->mtd));
return 0;
}
static const struct of_device_id mchp48l640_of_table[] = {
......
......@@ -919,14 +919,14 @@ static int dataflash_probe(struct spi_device *spi)
static int dataflash_remove(struct spi_device *spi)
{
struct dataflash *flash = spi_get_drvdata(spi);
int status;
dev_dbg(&spi->dev, "remove\n");
status = mtd_device_unregister(&flash->mtd);
if (status == 0)
kfree(flash);
return status;
WARN_ON(mtd_device_unregister(&flash->mtd));
kfree(flash);
return 0;
}
static struct spi_driver dataflash_driver = {
......
......@@ -402,7 +402,9 @@ static int sst25l_remove(struct spi_device *spi)
{
struct sst25l_flash *flash = spi_get_drvdata(spi);
return mtd_device_unregister(&flash->mtd);
WARN_ON(mtd_device_unregister(&flash->mtd));
return 0;
}
static struct spi_driver sst25l_driver = {
......
......@@ -124,7 +124,9 @@ static int rpcif_hb_probe(struct platform_device *pdev)
if (!hyperbus)
return -ENOMEM;
rpcif_sw_init(&hyperbus->rpc, pdev->dev.parent);
error = rpcif_sw_init(&hyperbus->rpc, pdev->dev.parent);
if (error)
return error;
platform_set_drvdata(pdev, hyperbus);
......@@ -152,9 +154,9 @@ static int rpcif_hb_remove(struct platform_device *pdev)
{
struct rpcif_hyperbus *hyperbus = platform_get_drvdata(pdev);
int error = hyperbus_unregister_device(&hyperbus->hbdev);
struct rpcif *rpc = dev_get_drvdata(pdev->dev.parent);
rpcif_disable_rpm(rpc);
rpcif_disable_rpm(&hyperbus->rpc);
return error;
}
......
......@@ -573,14 +573,32 @@ static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
}
}
static void adjust_oob_length(struct mtd_info *mtd, uint64_t start,
struct mtd_oob_ops *ops)
{
uint32_t start_page, end_page;
u32 oob_per_page;
if (ops->len == 0 || ops->ooblen == 0)
return;
start_page = mtd_div_by_ws(start, mtd);
end_page = mtd_div_by_ws(start + ops->len - 1, mtd);
oob_per_page = mtd_oobavail(mtd, ops);
ops->ooblen = min_t(size_t, ops->ooblen,
(end_page - start_page + 1) * oob_per_page);
}
static int mtdchar_write_ioctl(struct mtd_info *mtd,
struct mtd_write_req __user *argp)
{
struct mtd_info *master = mtd_get_master(mtd);
struct mtd_write_req req;
struct mtd_oob_ops ops = {};
const void __user *usr_data, *usr_oob;
int ret;
uint8_t *datbuf = NULL, *oobbuf = NULL;
size_t datbuf_len, oobbuf_len;
int ret = 0;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
......@@ -590,33 +608,79 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
if (!master->_write_oob)
return -EOPNOTSUPP;
ops.mode = req.mode;
ops.len = (size_t)req.len;
ops.ooblen = (size_t)req.ooblen;
ops.ooboffs = 0;
if (usr_data) {
ops.datbuf = memdup_user(usr_data, ops.len);
if (IS_ERR(ops.datbuf))
return PTR_ERR(ops.datbuf);
} else {
ops.datbuf = NULL;
if (!usr_data)
req.len = 0;
if (!usr_oob)
req.ooblen = 0;
if (req.start + req.len > mtd->size)
return -EINVAL;
datbuf_len = min_t(size_t, req.len, mtd->erasesize);
if (datbuf_len > 0) {
datbuf = kmalloc(datbuf_len, GFP_KERNEL);
if (!datbuf)
return -ENOMEM;
}
if (usr_oob) {
ops.oobbuf = memdup_user(usr_oob, ops.ooblen);
if (IS_ERR(ops.oobbuf)) {
kfree(ops.datbuf);
return PTR_ERR(ops.oobbuf);
oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
if (oobbuf_len > 0) {
oobbuf = kmalloc(oobbuf_len, GFP_KERNEL);
if (!oobbuf) {
kfree(datbuf);
return -ENOMEM;
}
} else {
ops.oobbuf = NULL;
}
ret = mtd_write_oob(mtd, (loff_t)req.start, &ops);
while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
struct mtd_oob_ops ops = {
.mode = req.mode,
.len = min_t(size_t, req.len, datbuf_len),
.ooblen = min_t(size_t, req.ooblen, oobbuf_len),
.datbuf = datbuf,
.oobbuf = oobbuf,
};
kfree(ops.datbuf);
kfree(ops.oobbuf);
/*
* Shorten non-page-aligned, eraseblock-sized writes so that
* the write ends on an eraseblock boundary. This is necessary
* for adjust_oob_length() to properly handle non-page-aligned
* writes.
*/
if (ops.len == mtd->erasesize)
ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
/*
* For writes which are not OOB-only, adjust the amount of OOB
* data written according to the number of data pages written.
* This is necessary to prevent OOB data from being skipped
* over in data+OOB writes requiring multiple mtd_write_oob()
* calls to be completed.
*/
adjust_oob_length(mtd, req.start, &ops);
if (copy_from_user(datbuf, usr_data, ops.len) ||
copy_from_user(oobbuf, usr_oob, ops.ooblen)) {
ret = -EFAULT;
break;
}
ret = mtd_write_oob(mtd, req.start, &ops);
if (ret)
break;
req.start += ops.retlen;
req.len -= ops.retlen;
usr_data += ops.retlen;
req.ooblen -= ops.oobretlen;
usr_oob += ops.oobretlen;
}
kfree(datbuf);
kfree(oobbuf);
return ret;
}
......
......@@ -747,6 +747,9 @@ int del_mtd_device(struct mtd_info *mtd)
device_unregister(&mtd->dev);
/* Clear dev so mtd can be safely re-registered later if desired */
memset(&mtd->dev, 0, sizeof(mtd->dev));
idr_remove(&mtd_idr, mtd->index);
of_node_put(mtd_get_of_node(mtd));
......@@ -825,8 +828,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
/* OTP nvmem will be registered on the physical device */
config.dev = mtd->dev.parent;
/* just reuse the compatible as name */
config.name = compatible;
config.name = kasprintf(GFP_KERNEL, "%s-%s", dev_name(&mtd->dev), compatible);
config.id = NVMEM_DEVID_NONE;
config.owner = THIS_MODULE;
config.type = NVMEM_TYPE_OTP;
......@@ -842,6 +844,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
nvmem = NULL;
of_node_put(np);
kfree(config.name);
return nvmem;
}
......@@ -1018,8 +1021,10 @@ int mtd_device_unregister(struct mtd_info *master)
{
int err;
if (master->_reboot)
if (master->_reboot) {
unregister_reboot_notifier(&master->reboot_notifier);
memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
}
if (master->otp_user_nvmem)
nvmem_unregister(master->otp_user_nvmem);
......@@ -2365,6 +2370,14 @@ static struct backing_dev_info * __init mtd_bdi_init(const char *name)
return ret ? ERR_PTR(ret) : bdi;
}
char *mtd_expert_analysis_warning =
"Bad block checks have been entirely disabled.\n"
"This is only reserved for post-mortem forensics and debug purposes.\n"
"Never enable this mode if you do not know what you are doing!\n";
EXPORT_SYMBOL_GPL(mtd_expert_analysis_warning);
bool mtd_expert_analysis_mode;
EXPORT_SYMBOL_GPL(mtd_expert_analysis_mode);
static struct proc_dir_entry *proc_mtd;
static int __init init_mtd(void)
......@@ -2388,6 +2401,8 @@ static int __init init_mtd(void)
goto out_procfs;
dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
&mtd_expert_analysis_mode);
return 0;
......
......@@ -312,7 +312,7 @@ static int __mtd_del_partition(struct mtd_info *mtd)
if (err)
return err;
list_del(&child->part.node);
list_del(&mtd->part.node);
free_partition(mtd);
return 0;
......
......@@ -21,6 +21,9 @@
*/
bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
{
if (WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning))
return false;
if (nanddev_bbt_is_initialized(nand)) {
unsigned int entry;
int status;
......
......@@ -60,7 +60,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
int i, j, numblocks, len, scanlen;
int startblock;
loff_t from;
size_t readlen, ooblen;
size_t readlen;
struct mtd_oob_ops ops;
int rgn;
......@@ -69,7 +69,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
len = 2;
/* We need only read few bytes from the OOB area */
scanlen = ooblen = 0;
scanlen = 0;
readlen = bd->len;
/* chip == -1 case only */
......
......@@ -40,8 +40,9 @@ config MTD_NAND_AMS_DELTA
config MTD_NAND_OMAP2
tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller"
depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
depends on HAS_IOMEM
select OMAP_GPMC if ARCH_K3
help
Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
and Keystone platforms.
......@@ -461,6 +462,13 @@ config MTD_NAND_PL35X
Enables support for PrimeCell SMC PL351 and PL353 NAND
controller found on Zynq7000.
config MTD_NAND_RENESAS
tristate "Renesas R-Car Gen3 & RZ/N1 NAND controller"
depends on ARCH_RENESAS || COMPILE_TEST
help
Enables support for the NAND controller found on Renesas R-Car
Gen3 and RZ/N1 SoC families.
comment "Misc"
config MTD_SM_COMMON
......
......@@ -58,6 +58,7 @@ obj-$(CONFIG_MTD_NAND_ARASAN) += arasan-nand-controller.o
obj-$(CONFIG_MTD_NAND_INTEL_LGM) += intel-nand-controller.o
obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o
obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o
obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
......
......@@ -371,77 +371,6 @@ static int nand_davinci_correct_4bit(struct nand_chip *chip, u_char *data,
return corrected;
}
/**
* nand_read_page_hwecc_oob_first - hw ecc, read oob first
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* Hardware ECC for large page chips, require OOB to be read first. For this
* ECC mode, the write_page method is re-used from ECC_HW. These methods
* read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
* multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
* the data area, by overwriting the NAND manufacturer bad block markings.
*/
static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip,
uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
uint8_t *ecc_code = chip->ecc.code_buf;
uint8_t *ecc_calc = chip->ecc.calc_buf;
unsigned int max_bitflips = 0;
/* Read the OOB area first */
ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
if (ret)
return ret;
ret = nand_read_page_op(chip, page, 0, NULL, 0);
if (ret)
return ret;
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
chip->ecc.hwctl(chip, NAND_ECC_READ);
ret = nand_read_data_op(chip, p, eccsize, false, false);
if (ret)
return ret;
chip->ecc.calculate(chip, p, &ecc_calc[i]);
stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
if (stat == -EBADMSG &&
(chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
/* check for empty pages with bitflips */
stat = nand_check_erased_ecc_chunk(p, eccsize,
&ecc_code[i],
eccbytes, NULL, 0,
chip->ecc.strength);
}
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
return max_bitflips;
}
/*----------------------------------------------------------------------*/
/* An ECC layout for using 4-bit ECC with small-page flash, storing
......@@ -651,7 +580,7 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
} else if (chunks == 4 || chunks == 8) {
mtd_set_ooblayout(mtd,
nand_get_large_page_ooblayout());
chip->ecc.read_page = nand_davinci_read_page_hwecc_oob_first;
chip->ecc.read_page = nand_read_page_hwecc_oob_first;
} else {
return -EIO;
}
......
......@@ -713,14 +713,32 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
(use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
}
static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
{
struct gpmi_nfc_hardware_timing *hw = &this->hw;
struct resources *r = &this->resources;
void __iomem *gpmi_regs = r->gpmi_regs;
unsigned int dll_wait_time_us;
int ret;
/* Clock dividers do NOT guarantee a clean clock signal on its output
* during the change of the divide factor on i.MX6Q/UL/SX. On i.MX7/8,
* all clock dividers provide these guarantee.
*/
if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this))
clk_disable_unprepare(r->clock[0]);
clk_set_rate(r->clock[0], hw->clk_rate);
ret = clk_set_rate(r->clock[0], hw->clk_rate);
if (ret) {
dev_err(this->dev, "cannot set clock rate to %lu Hz: %d\n", hw->clk_rate, ret);
return ret;
}
if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) {
ret = clk_prepare_enable(r->clock[0]);
if (ret)
return ret;
}
writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
......@@ -739,6 +757,8 @@ static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
/* Wait for the DLL to settle. */
udelay(dll_wait_time_us);
return 0;
}
static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
......@@ -971,16 +991,13 @@ static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
{
struct platform_device *pdev = this->pdev;
const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
struct resource *r;
int err;
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
if (!r) {
dev_err(this->dev, "Can't get resource for %s\n", res_name);
return -ENODEV;
}
err = platform_get_irq_byname(pdev, res_name);
if (err < 0)
return err;
err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
err = devm_request_irq(this->dev, err, irq_h, 0, res_name, this);
if (err)
dev_err(this->dev, "error requesting BCH IRQ\n");
......@@ -1032,15 +1049,6 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
r->clock[i] = clk;
}
if (GPMI_IS_MX6(this))
/*
* Set the default value for the gpmi clock.
*
* If you want to use the ONFI nand which is in the
* Synchronous Mode, you should change the clock as you need.
*/
clk_set_rate(r->clock[0], 22000000);
return 0;
err_clock:
......@@ -1425,7 +1433,6 @@ static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *nfc_geo = &this->bch_geometry;
int ret;
dev_dbg(this->dev, "ecc write page.\n");
......@@ -1445,9 +1452,7 @@ static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
this->auxiliary_virt);
}
ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
return ret;
return nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
}
/*
......@@ -2278,7 +2283,9 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
*/
if (this->hw.must_apply_timings) {
this->hw.must_apply_timings = false;
gpmi_nfc_apply_timings(this);
ret = gpmi_nfc_apply_timings(this);
if (ret)
return ret;
}
dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
......
......@@ -32,6 +32,7 @@ struct jz_soc_info {
unsigned long addr_offset;
unsigned long cmd_offset;
const struct mtd_ooblayout_ops *oob_layout;
bool oob_first;
};
struct ingenic_nand_cs {
......@@ -240,6 +241,9 @@ static int ingenic_nand_attach_chip(struct nand_chip *chip)
if (chip->bbt_options & NAND_BBT_USE_FLASH)
chip->bbt_options |= NAND_BBT_NO_OOB;
if (nfc->soc_info->oob_first)
chip->ecc.read_page = nand_read_page_hwecc_oob_first;
/* For legacy reasons we use a different layout on the qi,lb60 board. */
if (of_machine_is_compatible("qi,lb60"))
mtd_set_ooblayout(mtd, &qi_lb60_ooblayout_ops);
......@@ -534,6 +538,7 @@ static const struct jz_soc_info jz4740_soc_info = {
.data_offset = 0x00000000,
.cmd_offset = 0x00008000,
.addr_offset = 0x00010000,
.oob_first = true,
};
static const struct jz_soc_info jz4725b_soc_info = {
......
......@@ -291,7 +291,6 @@ static int ads5121_chipselect_init(struct mtd_info *mtd)
/* Control chips select signal on ADS5121 board */
static void ads5121_select_chip(struct nand_chip *nand, int chip)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
u8 v;
......
......@@ -321,6 +321,9 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
if (nand_region_is_secured(chip, ofs, mtd->erasesize))
return -EIO;
if (WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning))
return 0;
if (chip->legacy.block_bad)
return chip->legacy.block_bad(chip, ofs);
......@@ -3160,6 +3163,73 @@ static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
return max_bitflips;
}
/**
* nand_read_page_hwecc_oob_first - Hardware ECC page read with ECC
* data read from OOB area
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* Hardware ECC for large page chips, which requires the ECC data to be
* extracted from the OOB before the actual data is read.
*/
int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
uint8_t *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0;
/* Read the OOB area first */
ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
if (ret)
return ret;
/* Move read cursor to start of page */
ret = nand_change_read_column_op(chip, 0, NULL, 0, false);
if (ret)
return ret;
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
chip->ecc.hwctl(chip, NAND_ECC_READ);
ret = nand_read_data_op(chip, p, eccsize, false, false);
if (ret)
return ret;
stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
if (stat == -EBADMSG &&
(chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
/* check for empty pages with bitflips */
stat = nand_check_erased_ecc_chunk(p, eccsize,
&ecc_code[i],
eccbytes, NULL, 0,
chip->ecc.strength);
}
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
return max_bitflips;
}
EXPORT_SYMBOL_GPL(nand_read_page_hwecc_oob_first);
/**
* nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
* @chip: nand chip info structure
......
......@@ -1455,6 +1455,9 @@ int nand_isbad_bbt(struct nand_chip *this, loff_t offs, int allowbbt)
pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
(unsigned int)offs, block, res);
if (WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning))
return 0;
switch (res) {
case BBT_BLOCK_GOOD:
return 0;
......
......@@ -19,7 +19,7 @@
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/omap-dma.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
......@@ -148,7 +148,6 @@ struct omap_nand_info {
int gpmc_cs;
bool dev_ready;
enum nand_io xfer_type;
int devsize;
enum omap_ecc ecc_opt;
struct device_node *elm_of_node;
......@@ -164,6 +163,7 @@ struct omap_nand_info {
u_char *buf;
int buf_len;
/* Interface to GPMC */
void __iomem *fifo;
struct gpmc_nand_regs reg;
struct gpmc_nand_ops *ops;
bool flash_bbt;
......@@ -175,6 +175,11 @@ struct omap_nand_info {
unsigned int nsteps_per_eccpg;
unsigned int eccpg_size;
unsigned int eccpg_bytes;
void (*data_in)(struct nand_chip *chip, void *buf,
unsigned int len, bool force_8bit);
void (*data_out)(struct nand_chip *chip,
const void *buf, unsigned int len,
bool force_8bit);
};
static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
......@@ -182,6 +187,13 @@ static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
return container_of(mtd_to_nand(mtd), struct omap_nand_info, nand);
}
static void omap_nand_data_in(struct nand_chip *chip, void *buf,
unsigned int len, bool force_8bit);
static void omap_nand_data_out(struct nand_chip *chip,
const void *buf, unsigned int len,
bool force_8bit);
/**
* omap_prefetch_enable - configures and starts prefetch transfer
* @cs: cs (chip select) number
......@@ -241,169 +253,70 @@ static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
}
/**
* omap_hwcontrol - hardware specific access to control-lines
* @chip: NAND chip object
* @cmd: command to device
* @ctrl:
* NAND_NCE: bit 0 -> don't care
* NAND_CLE: bit 1 -> Command Latch
* NAND_ALE: bit 2 -> Address Latch
*
* NOTE: boards may use different bits for these!!
* omap_nand_data_in_pref - NAND data in using prefetch engine
*/
static void omap_hwcontrol(struct nand_chip *chip, int cmd, unsigned int ctrl)
static void omap_nand_data_in_pref(struct nand_chip *chip, void *buf,
unsigned int len, bool force_8bit)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
if (cmd != NAND_CMD_NONE) {
if (ctrl & NAND_CLE)
writeb(cmd, info->reg.gpmc_nand_command);
else if (ctrl & NAND_ALE)
writeb(cmd, info->reg.gpmc_nand_address);
else /* NAND_NCE */
writeb(cmd, info->reg.gpmc_nand_data);
}
}
/**
* omap_read_buf8 - read data from NAND controller into buffer
* @mtd: MTD device structure
* @buf: buffer to store date
* @len: number of bytes to read
*/
static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
{
struct nand_chip *nand = mtd_to_nand(mtd);
ioread8_rep(nand->legacy.IO_ADDR_R, buf, len);
}
/**
* omap_write_buf8 - write buffer to NAND controller
* @mtd: MTD device structure
* @buf: data buffer
* @len: number of bytes to write
*/
static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
u_char *p = (u_char *)buf;
bool status;
while (len--) {
iowrite8(*p++, info->nand.legacy.IO_ADDR_W);
/* wait until buffer is available for write */
do {
status = info->ops->nand_writebuffer_empty();
} while (!status);
}
}
/**
* omap_read_buf16 - read data from NAND controller into buffer
* @mtd: MTD device structure
* @buf: buffer to store date
* @len: number of bytes to read
*/
static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
{
struct nand_chip *nand = mtd_to_nand(mtd);
ioread16_rep(nand->legacy.IO_ADDR_R, buf, len / 2);
}
/**
* omap_write_buf16 - write buffer to NAND controller
* @mtd: MTD device structure
* @buf: data buffer
* @len: number of bytes to write
*/
static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
u16 *p = (u16 *) buf;
bool status;
/* FIXME try bursts of writesw() or DMA ... */
len >>= 1;
while (len--) {
iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
/* wait until buffer is available for write */
do {
status = info->ops->nand_writebuffer_empty();
} while (!status);
}
}
/**
* omap_read_buf_pref - read data from NAND controller into buffer
* @chip: NAND chip object
* @buf: buffer to store date
* @len: number of bytes to read
*/
static void omap_read_buf_pref(struct nand_chip *chip, u_char *buf, int len)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct omap_nand_info *info = mtd_to_omap(mtd);
uint32_t r_count = 0;
int ret = 0;
u32 *p = (u32 *)buf;
unsigned int pref_len;
/* take care of subpage reads */
if (len % 4) {
if (info->nand.options & NAND_BUSWIDTH_16)
omap_read_buf16(mtd, buf, len % 4);
else
omap_read_buf8(mtd, buf, len % 4);
p = (u32 *) (buf + len % 4);
len -= len % 4;
if (force_8bit) {
omap_nand_data_in(chip, buf, len, force_8bit);
return;
}
/* read 32-bit words using prefetch and remaining bytes normally */
/* configure and start prefetch transfer */
pref_len = len - (len & 3);
ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
PREFETCH_FIFOTHRESHOLD_MAX, 0x0, pref_len, 0x0, info);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
omap_read_buf16(mtd, (u_char *)p, len);
else
omap_read_buf8(mtd, (u_char *)p, len);
/* prefetch engine is busy, use CPU copy method */
omap_nand_data_in(chip, buf, len, false);
} else {
do {
r_count = readl(info->reg.gpmc_prefetch_status);
r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
r_count = r_count >> 2;
ioread32_rep(info->nand.legacy.IO_ADDR_R, p, r_count);
ioread32_rep(info->fifo, p, r_count);
p += r_count;
len -= r_count << 2;
} while (len);
/* disable and stop the PFPW engine */
pref_len -= r_count << 2;
} while (pref_len);
/* disable and stop the Prefetch engine */
omap_prefetch_reset(info->gpmc_cs, info);
/* fetch any remaining bytes */
if (len & 3)
omap_nand_data_in(chip, p, len & 3, false);
}
}
/**
* omap_write_buf_pref - write buffer to NAND controller
* @chip: NAND chip object
* @buf: data buffer
* @len: number of bytes to write
* omap_nand_data_out_pref - NAND data out using Write Posting engine
*/
static void omap_write_buf_pref(struct nand_chip *chip, const u_char *buf,
int len)
static void omap_nand_data_out_pref(struct nand_chip *chip,
const void *buf, unsigned int len,
bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct omap_nand_info *info = mtd_to_omap(mtd);
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
uint32_t w_count = 0;
int i = 0, ret = 0;
u16 *p = (u16 *)buf;
unsigned long tim, limit;
u32 val;
if (force_8bit) {
omap_nand_data_out(chip, buf, len, force_8bit);
return;
}
/* take care of subpage writes */
if (len % 2 != 0) {
writeb(*buf, info->nand.legacy.IO_ADDR_W);
writeb(*(u8 *)buf, info->fifo);
p = (u16 *)(buf + 1);
len--;
}
......@@ -412,18 +325,15 @@ static void omap_write_buf_pref(struct nand_chip *chip, const u_char *buf,
ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
omap_write_buf16(mtd, (u_char *)p, len);
else
omap_write_buf8(mtd, (u_char *)p, len);
/* write posting engine is busy, use CPU copy method */
omap_nand_data_out(chip, buf, len, false);
} else {
while (len) {
w_count = readl(info->reg.gpmc_prefetch_status);
w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
w_count = w_count >> 1;
for (i = 0; (i < w_count) && len; i++, len -= 2)
iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
iowrite16(*p++, info->fifo);
}
/* wait for data to flushed-out before reset the prefetch */
tim = 0;
......@@ -451,15 +361,16 @@ static void omap_nand_dma_callback(void *data)
/*
* omap_nand_dma_transfer: configure and start dma transfer
* @mtd: MTD device structure
* @chip: nand chip structure
* @addr: virtual address in RAM of source/destination
* @len: number of data bytes to be transferred
* @is_write: flag for read/write operation
*/
static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
unsigned int len, int is_write)
static inline int omap_nand_dma_transfer(struct nand_chip *chip,
const void *addr, unsigned int len,
int is_write)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
struct dma_async_tx_descriptor *tx;
enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
DMA_FROM_DEVICE;
......@@ -521,49 +432,51 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
out_copy_unmap:
dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
out_copy:
if (info->nand.options & NAND_BUSWIDTH_16)
is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
: omap_write_buf16(mtd, (u_char *) addr, len);
else
is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
: omap_write_buf8(mtd, (u_char *) addr, len);
is_write == 0 ? omap_nand_data_in(chip, (void *)addr, len, false)
: omap_nand_data_out(chip, addr, len, false);
return 0;
}
/**
* omap_read_buf_dma_pref - read data from NAND controller into buffer
* @chip: NAND chip object
* @buf: buffer to store date
* @len: number of bytes to read
* omap_nand_data_in_dma_pref - NAND data in using DMA and Prefetch
*/
static void omap_read_buf_dma_pref(struct nand_chip *chip, u_char *buf,
int len)
static void omap_nand_data_in_dma_pref(struct nand_chip *chip, void *buf,
unsigned int len, bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
if (force_8bit) {
omap_nand_data_in(chip, buf, len, force_8bit);
return;
}
if (len <= mtd->oobsize)
omap_read_buf_pref(chip, buf, len);
omap_nand_data_in_pref(chip, buf, len, false);
else
/* start transfer in DMA mode */
omap_nand_dma_transfer(mtd, buf, len, 0x0);
omap_nand_dma_transfer(chip, buf, len, 0x0);
}
/**
* omap_write_buf_dma_pref - write buffer to NAND controller
* @chip: NAND chip object
* @buf: data buffer
* @len: number of bytes to write
* omap_nand_data_out_dma_pref - NAND data out using DMA and write posting
*/
static void omap_write_buf_dma_pref(struct nand_chip *chip, const u_char *buf,
int len)
static void omap_nand_data_out_dma_pref(struct nand_chip *chip,
const void *buf, unsigned int len,
bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
if (force_8bit) {
omap_nand_data_out(chip, buf, len, force_8bit);
return;
}
if (len <= mtd->oobsize)
omap_write_buf_pref(chip, buf, len);
omap_nand_data_out_pref(chip, buf, len, false);
else
/* start transfer in DMA mode */
omap_nand_dma_transfer(mtd, (u_char *)buf, len, 0x1);
omap_nand_dma_transfer(chip, buf, len, 0x1);
}
/*
......@@ -587,13 +500,13 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
bytes = info->buf_len;
else if (!info->buf_len)
bytes = 0;
iowrite32_rep(info->nand.legacy.IO_ADDR_W, (u32 *)info->buf,
iowrite32_rep(info->fifo, (u32 *)info->buf,
bytes >> 2);
info->buf = info->buf + bytes;
info->buf_len -= bytes;
} else {
ioread32_rep(info->nand.legacy.IO_ADDR_R, (u32 *)info->buf,
ioread32_rep(info->fifo, (u32 *)info->buf,
bytes >> 2);
info->buf = info->buf + bytes;
......@@ -613,20 +526,17 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
}
/*
* omap_read_buf_irq_pref - read data from NAND controller into buffer
* @chip: NAND chip object
* @buf: buffer to store date
* @len: number of bytes to read
* omap_nand_data_in_irq_pref - NAND data in using Prefetch and IRQ
*/
static void omap_read_buf_irq_pref(struct nand_chip *chip, u_char *buf,
int len)
static void omap_nand_data_in_irq_pref(struct nand_chip *chip, void *buf,
unsigned int len, bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct omap_nand_info *info = mtd_to_omap(mtd);
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
struct mtd_info *mtd = nand_to_mtd(&info->nand);
int ret = 0;
if (len <= mtd->oobsize) {
omap_read_buf_pref(chip, buf, len);
if (len <= mtd->oobsize || force_8bit) {
omap_nand_data_in(chip, buf, len, force_8bit);
return;
}
......@@ -637,9 +547,11 @@ static void omap_read_buf_irq_pref(struct nand_chip *chip, u_char *buf,
/* configure and start prefetch transfer */
ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
if (ret)
if (ret) {
/* PFPW engine is busy, use cpu copy method */
goto out_copy;
omap_nand_data_in(chip, buf, len, false);
return;
}
info->buf_len = len;
......@@ -652,31 +564,23 @@ static void omap_read_buf_irq_pref(struct nand_chip *chip, u_char *buf,
/* disable and stop the PFPW engine */
omap_prefetch_reset(info->gpmc_cs, info);
return;
out_copy:
if (info->nand.options & NAND_BUSWIDTH_16)
omap_read_buf16(mtd, buf, len);
else
omap_read_buf8(mtd, buf, len);
}
/*
* omap_write_buf_irq_pref - write buffer to NAND controller
* @chip: NAND chip object
* @buf: data buffer
* @len: number of bytes to write
* omap_nand_data_out_irq_pref - NAND out using write posting and IRQ
*/
static void omap_write_buf_irq_pref(struct nand_chip *chip, const u_char *buf,
int len)
static void omap_nand_data_out_irq_pref(struct nand_chip *chip,
const void *buf, unsigned int len,
bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct omap_nand_info *info = mtd_to_omap(mtd);
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
struct mtd_info *mtd = nand_to_mtd(&info->nand);
int ret = 0;
unsigned long tim, limit;
u32 val;
if (len <= mtd->oobsize) {
omap_write_buf_pref(chip, buf, len);
if (len <= mtd->oobsize || force_8bit) {
omap_nand_data_out(chip, buf, len, force_8bit);
return;
}
......@@ -687,9 +591,11 @@ static void omap_write_buf_irq_pref(struct nand_chip *chip, const u_char *buf,
/* configure and start prefetch transfer : size=24 */
ret = omap_prefetch_enable(info->gpmc_cs,
(PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
if (ret)
if (ret) {
/* PFPW engine is busy, use cpu copy method */
goto out_copy;
omap_nand_data_out(chip, buf, len, false);
return;
}
info->buf_len = len;
......@@ -711,12 +617,6 @@ static void omap_write_buf_irq_pref(struct nand_chip *chip, const u_char *buf,
/* disable and stop the PFPW engine */
omap_prefetch_reset(info->gpmc_cs, info);
return;
out_copy:
if (info->nand.options & NAND_BUSWIDTH_16)
omap_write_buf16(mtd, buf, len);
else
omap_write_buf8(mtd, buf, len);
}
/**
......@@ -981,50 +881,6 @@ static void omap_enable_hwecc(struct nand_chip *chip, int mode)
writel(val, info->reg.gpmc_ecc_config);
}
/**
* omap_wait - wait until the command is done
* @this: NAND Chip structure
*
* Wait function is called during Program and erase operations and
* the way it is called from MTD layer, we should wait till the NAND
* chip is ready after the programming/erase operation has completed.
*
* Erase can take up to 400ms and program up to 20ms according to
* general NAND and SmartMedia specs
*/
static int omap_wait(struct nand_chip *this)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(this));
unsigned long timeo = jiffies;
int status;
timeo += msecs_to_jiffies(400);
writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
while (time_before(jiffies, timeo)) {
status = readb(info->reg.gpmc_nand_data);
if (status & NAND_STATUS_READY)
break;
cond_resched();
}
status = readb(info->reg.gpmc_nand_data);
return status;
}
/**
* omap_dev_ready - checks the NAND Ready GPIO line
* @chip: NAND chip object
*
* Returns true if ready and false if busy.
*/
static int omap_dev_ready(struct nand_chip *chip)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
return gpiod_get_value(info->ready_gpiod);
}
/**
* omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation
* @chip: NAND chip object
......@@ -1543,8 +1399,8 @@ static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
chip->ecc.hwctl(chip, NAND_ECC_WRITE);
/* Write data */
chip->legacy.write_buf(chip, buf + (eccpg * info->eccpg_size),
info->eccpg_size);
info->data_out(chip, buf + (eccpg * info->eccpg_size),
info->eccpg_size, false);
/* Update ecc vector from GPMC result registers */
ret = omap_calculate_ecc_bch_multi(mtd,
......@@ -1562,7 +1418,7 @@ static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
}
/* Write ecc vector to OOB area */
chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
info->data_out(chip, chip->oob_poi, mtd->oobsize, false);
return nand_prog_page_end_op(chip);
}
......@@ -1607,8 +1463,8 @@ static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
chip->ecc.hwctl(chip, NAND_ECC_WRITE);
/* Write data */
chip->legacy.write_buf(chip, buf + (eccpg * info->eccpg_size),
info->eccpg_size);
info->data_out(chip, buf + (eccpg * info->eccpg_size),
info->eccpg_size, false);
for (step = 0; step < info->nsteps_per_eccpg; step++) {
unsigned int base_step = eccpg * info->nsteps_per_eccpg;
......@@ -1641,7 +1497,7 @@ static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
}
/* write OOB buffer to NAND device */
chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
info->data_out(chip, chip->oob_poi, mtd->oobsize, false);
return nand_prog_page_end_op(chip);
}
......@@ -1984,8 +1840,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
/* Re-populate low-level callbacks based on xfer modes */
switch (info->xfer_type) {
case NAND_OMAP_PREFETCH_POLLED:
chip->legacy.read_buf = omap_read_buf_pref;
chip->legacy.write_buf = omap_write_buf_pref;
info->data_in = omap_nand_data_in_pref;
info->data_out = omap_nand_data_out_pref;
break;
case NAND_OMAP_POLLED:
......@@ -2017,8 +1873,9 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
err);
return err;
}
chip->legacy.read_buf = omap_read_buf_dma_pref;
chip->legacy.write_buf = omap_write_buf_dma_pref;
info->data_in = omap_nand_data_in_dma_pref;
info->data_out = omap_nand_data_out_dma_pref;
}
break;
......@@ -2049,9 +1906,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
return err;
}
chip->legacy.read_buf = omap_read_buf_irq_pref;
chip->legacy.write_buf = omap_write_buf_irq_pref;
info->data_in = omap_nand_data_in_irq_pref;
info->data_out = omap_nand_data_out_irq_pref;
break;
default:
......@@ -2217,8 +2073,105 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
return 0;
}
static void omap_nand_data_in(struct nand_chip *chip, void *buf,
unsigned int len, bool force_8bit)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
u32 alignment = ((uintptr_t)buf | len) & 3;
if (force_8bit || (alignment & 1))
ioread8_rep(info->fifo, buf, len);
else if (alignment & 3)
ioread16_rep(info->fifo, buf, len >> 1);
else
ioread32_rep(info->fifo, buf, len >> 2);
}
static void omap_nand_data_out(struct nand_chip *chip,
const void *buf, unsigned int len,
bool force_8bit)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
u32 alignment = ((uintptr_t)buf | len) & 3;
if (force_8bit || (alignment & 1))
iowrite8_rep(info->fifo, buf, len);
else if (alignment & 3)
iowrite16_rep(info->fifo, buf, len >> 1);
else
iowrite32_rep(info->fifo, buf, len >> 2);
}
static int omap_nand_exec_instr(struct nand_chip *chip,
const struct nand_op_instr *instr)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
unsigned int i;
int ret;
switch (instr->type) {
case NAND_OP_CMD_INSTR:
iowrite8(instr->ctx.cmd.opcode,
info->reg.gpmc_nand_command);
break;
case NAND_OP_ADDR_INSTR:
for (i = 0; i < instr->ctx.addr.naddrs; i++) {
iowrite8(instr->ctx.addr.addrs[i],
info->reg.gpmc_nand_address);
}
break;
case NAND_OP_DATA_IN_INSTR:
info->data_in(chip, instr->ctx.data.buf.in,
instr->ctx.data.len,
instr->ctx.data.force_8bit);
break;
case NAND_OP_DATA_OUT_INSTR:
info->data_out(chip, instr->ctx.data.buf.out,
instr->ctx.data.len,
instr->ctx.data.force_8bit);
break;
case NAND_OP_WAITRDY_INSTR:
ret = info->ready_gpiod ?
nand_gpio_waitrdy(chip, info->ready_gpiod, instr->ctx.waitrdy.timeout_ms) :
nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
if (ret)
return ret;
break;
}
if (instr->delay_ns)
ndelay(instr->delay_ns);
return 0;
}
static int omap_nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
unsigned int i;
if (check_only)
return 0;
for (i = 0; i < op->ninstrs; i++) {
int ret;
ret = omap_nand_exec_instr(chip, &op->instrs[i]);
if (ret)
return ret;
}
return 0;
}
static const struct nand_controller_ops omap_nand_controller_ops = {
.attach_chip = omap_nand_attach_chip,
.exec_op = omap_nand_exec_op,
};
/* Shared among all NAND instances to synchronize access to the ECC Engine */
......@@ -2233,6 +2186,7 @@ static int omap_nand_probe(struct platform_device *pdev)
int err;
struct resource *res;
struct device *dev = &pdev->dev;
void __iomem *vaddr;
info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
GFP_KERNEL);
......@@ -2266,10 +2220,11 @@ static int omap_nand_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
nand_chip->legacy.IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(nand_chip->legacy.IO_ADDR_R))
return PTR_ERR(nand_chip->legacy.IO_ADDR_R);
vaddr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
info->fifo = vaddr;
info->phys_base = res->start;
if (!omap_gpmc_controller_initialized) {
......@@ -2280,9 +2235,6 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->controller = &omap_gpmc_controller;
nand_chip->legacy.IO_ADDR_W = nand_chip->legacy.IO_ADDR_R;
nand_chip->legacy.cmd_ctrl = omap_hwcontrol;
info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
GPIOD_IN);
if (IS_ERR(info->ready_gpiod)) {
......@@ -2290,26 +2242,12 @@ static int omap_nand_probe(struct platform_device *pdev)
return PTR_ERR(info->ready_gpiod);
}
/*
* If RDY/BSY line is connected to OMAP then use the omap ready
* function and the generic nand_wait function which reads the status
* register after monitoring the RDY/BSY line. Otherwise use a standard
* chip delay which is slightly more than tR (AC Timing) of the NAND
* device and read status register until you get a failure or success
*/
if (info->ready_gpiod) {
nand_chip->legacy.dev_ready = omap_dev_ready;
nand_chip->legacy.chip_delay = 0;
} else {
nand_chip->legacy.waitfunc = omap_wait;
nand_chip->legacy.chip_delay = 50;
}
if (info->flash_bbt)
nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
/* scan NAND device connected to chip controller */
nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
/* default operations */
info->data_in = omap_nand_data_in;
info->data_out = omap_nand_data_out;
err = nand_scan(nand_chip, 1);
if (err)
......@@ -2352,10 +2290,7 @@ static int omap_nand_remove(struct platform_device *pdev)
return ret;
}
static const struct of_device_id omap_nand_ids[] = {
{ .compatible = "ti,omap2-nand", },
{},
};
/* omap_nand_ids defined in linux/platform_data/mtd-nand-omap2.h */
MODULE_DEVICE_TABLE(of, omap_nand_ids);
static struct platform_driver omap_nand_driver = {
......
......@@ -384,8 +384,8 @@ static irqreturn_t elm_isr(int this_irq, void *dev_id)
static int elm_probe(struct platform_device *pdev)
{
int ret = 0;
struct resource *irq;
struct elm_info *info;
int irq;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
......@@ -393,20 +393,18 @@ static int elm_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq) {
dev_err(&pdev->dev, "no irq resource defined\n");
return -ENODEV;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
info->elm_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->elm_base))
return PTR_ERR(info->elm_base);
ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0,
pdev->name, info);
ret = devm_request_irq(&pdev->dev, irq, elm_isr, 0,
pdev->name, info);
if (ret) {
dev_err(&pdev->dev, "failure requesting %pr\n", irq);
dev_err(&pdev->dev, "failure requesting %d\n", irq);
return ret;
}
......
// SPDX-License-Identifier: GPL-2.0
/*
* Evatronix/Renesas R-Car Gen3, RZ/N1D, RZ/N1S, RZ/N1L NAND controller driver
*
* Copyright (C) 2021 Schneider Electric
* Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define COMMAND_REG 0x00
#define COMMAND_SEQ(x) FIELD_PREP(GENMASK(5, 0), (x))
#define COMMAND_SEQ_10 COMMAND_SEQ(0x2A)
#define COMMAND_SEQ_12 COMMAND_SEQ(0x0C)
#define COMMAND_SEQ_18 COMMAND_SEQ(0x32)
#define COMMAND_SEQ_19 COMMAND_SEQ(0x13)
#define COMMAND_SEQ_GEN_IN COMMAND_SEQ_18
#define COMMAND_SEQ_GEN_OUT COMMAND_SEQ_19
#define COMMAND_SEQ_READ_PAGE COMMAND_SEQ_10
#define COMMAND_SEQ_WRITE_PAGE COMMAND_SEQ_12
#define COMMAND_INPUT_SEL_AHBS 0
#define COMMAND_INPUT_SEL_DMA BIT(6)
#define COMMAND_FIFO_SEL 0
#define COMMAND_DATA_SEL BIT(7)
#define COMMAND_0(x) FIELD_PREP(GENMASK(15, 8), (x))
#define COMMAND_1(x) FIELD_PREP(GENMASK(23, 16), (x))
#define COMMAND_2(x) FIELD_PREP(GENMASK(31, 24), (x))
#define CONTROL_REG 0x04
#define CONTROL_CHECK_RB_LINE 0
#define CONTROL_ECC_BLOCK_SIZE(x) FIELD_PREP(GENMASK(2, 1), (x))
#define CONTROL_ECC_BLOCK_SIZE_256 CONTROL_ECC_BLOCK_SIZE(0)
#define CONTROL_ECC_BLOCK_SIZE_512 CONTROL_ECC_BLOCK_SIZE(1)
#define CONTROL_ECC_BLOCK_SIZE_1024 CONTROL_ECC_BLOCK_SIZE(2)
#define CONTROL_INT_EN BIT(4)
#define CONTROL_ECC_EN BIT(5)
#define CONTROL_BLOCK_SIZE(x) FIELD_PREP(GENMASK(7, 6), (x))
#define CONTROL_BLOCK_SIZE_32P CONTROL_BLOCK_SIZE(0)
#define CONTROL_BLOCK_SIZE_64P CONTROL_BLOCK_SIZE(1)
#define CONTROL_BLOCK_SIZE_128P CONTROL_BLOCK_SIZE(2)
#define CONTROL_BLOCK_SIZE_256P CONTROL_BLOCK_SIZE(3)
#define STATUS_REG 0x8
#define MEM_RDY(cs, reg) (FIELD_GET(GENMASK(3, 0), (reg)) & BIT(cs))
#define CTRL_RDY(reg) (FIELD_GET(BIT(8), (reg)) == 0)
#define ECC_CTRL_REG 0x18
#define ECC_CTRL_CAP(x) FIELD_PREP(GENMASK(2, 0), (x))
#define ECC_CTRL_CAP_2B ECC_CTRL_CAP(0)
#define ECC_CTRL_CAP_4B ECC_CTRL_CAP(1)
#define ECC_CTRL_CAP_8B ECC_CTRL_CAP(2)
#define ECC_CTRL_CAP_16B ECC_CTRL_CAP(3)
#define ECC_CTRL_CAP_24B ECC_CTRL_CAP(4)
#define ECC_CTRL_CAP_32B ECC_CTRL_CAP(5)
#define ECC_CTRL_ERR_THRESHOLD(x) FIELD_PREP(GENMASK(13, 8), (x))
#define INT_MASK_REG 0x10
#define INT_STATUS_REG 0x14
#define INT_CMD_END BIT(1)
#define INT_DMA_END BIT(3)
#define INT_MEM_RDY(cs) FIELD_PREP(GENMASK(11, 8), BIT(cs))
#define INT_DMA_ENDED BIT(3)
#define MEM_IS_RDY(cs, reg) (FIELD_GET(GENMASK(11, 8), (reg)) & BIT(cs))
#define DMA_HAS_ENDED(reg) FIELD_GET(BIT(3), (reg))
#define ECC_OFFSET_REG 0x1C
#define ECC_OFFSET(x) FIELD_PREP(GENMASK(15, 0), (x))
#define ECC_STAT_REG 0x20
#define ECC_STAT_CORRECTABLE(cs, reg) (FIELD_GET(GENMASK(3, 0), (reg)) & BIT(cs))
#define ECC_STAT_UNCORRECTABLE(cs, reg) (FIELD_GET(GENMASK(11, 8), (reg)) & BIT(cs))
#define ADDR0_COL_REG 0x24
#define ADDR0_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
#define ADDR0_ROW_REG 0x28
#define ADDR0_ROW(x) FIELD_PREP(GENMASK(23, 0), (x))
#define ADDR1_COL_REG 0x2C
#define ADDR1_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
#define ADDR1_ROW_REG 0x30
#define ADDR1_ROW(x) FIELD_PREP(GENMASK(23, 0), (x))
#define FIFO_DATA_REG 0x38
#define DATA_REG 0x3C
#define DATA_REG_SIZE_REG 0x40
#define DMA_ADDR_LOW_REG 0x64
#define DMA_ADDR_HIGH_REG 0x68
#define DMA_CNT_REG 0x6C
#define DMA_CTRL_REG 0x70
#define DMA_CTRL_INCREMENT_BURST_4 0
#define DMA_CTRL_REGISTER_MANAGED_MODE 0
#define DMA_CTRL_START BIT(7)
#define MEM_CTRL_REG 0x80
#define MEM_CTRL_CS(cs) FIELD_PREP(GENMASK(1, 0), (cs))
#define MEM_CTRL_DIS_WP(cs) FIELD_PREP(GENMASK(11, 8), BIT((cs)))
#define DATA_SIZE_REG 0x84
#define DATA_SIZE(x) FIELD_PREP(GENMASK(14, 0), (x))
#define TIMINGS_ASYN_REG 0x88
#define TIMINGS_ASYN_TRWP(x) FIELD_PREP(GENMASK(3, 0), max((x), 1U) - 1)
#define TIMINGS_ASYN_TRWH(x) FIELD_PREP(GENMASK(7, 4), max((x), 1U) - 1)
#define TIM_SEQ0_REG 0x90
#define TIM_SEQ0_TCCS(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
#define TIM_SEQ0_TADL(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
#define TIM_SEQ0_TRHW(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
#define TIM_SEQ0_TWHR(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
#define TIM_SEQ1_REG 0x94
#define TIM_SEQ1_TWB(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
#define TIM_SEQ1_TRR(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
#define TIM_SEQ1_TWW(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
#define TIM_GEN_SEQ0_REG 0x98
#define TIM_GEN_SEQ0_D0(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
#define TIM_GEN_SEQ0_D1(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
#define TIM_GEN_SEQ0_D2(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
#define TIM_GEN_SEQ0_D3(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
#define TIM_GEN_SEQ1_REG 0x9c
#define TIM_GEN_SEQ1_D4(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
#define TIM_GEN_SEQ1_D5(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
#define TIM_GEN_SEQ1_D6(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
#define TIM_GEN_SEQ1_D7(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
#define TIM_GEN_SEQ2_REG 0xA0
#define TIM_GEN_SEQ2_D8(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
#define TIM_GEN_SEQ2_D9(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
#define TIM_GEN_SEQ2_D10(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
#define TIM_GEN_SEQ2_D11(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
#define FIFO_INIT_REG 0xB4
#define FIFO_INIT BIT(0)
#define FIFO_STATE_REG 0xB4
#define FIFO_STATE_R_EMPTY(reg) FIELD_GET(BIT(0), (reg))
#define FIFO_STATE_W_FULL(reg) FIELD_GET(BIT(1), (reg))
#define FIFO_STATE_C_EMPTY(reg) FIELD_GET(BIT(2), (reg))
#define FIFO_STATE_R_FULL(reg) FIELD_GET(BIT(6), (reg))
#define FIFO_STATE_W_EMPTY(reg) FIELD_GET(BIT(7), (reg))
#define GEN_SEQ_CTRL_REG 0xB8
#define GEN_SEQ_CMD0_EN BIT(0)
#define GEN_SEQ_CMD1_EN BIT(1)
#define GEN_SEQ_CMD2_EN BIT(2)
#define GEN_SEQ_CMD3_EN BIT(3)
#define GEN_SEQ_COL_A0(x) FIELD_PREP(GENMASK(5, 4), min((x), 2U))
#define GEN_SEQ_COL_A1(x) FIELD_PREP(GENMASK(7, 6), min((x), 2U))
#define GEN_SEQ_ROW_A0(x) FIELD_PREP(GENMASK(9, 8), min((x), 3U))
#define GEN_SEQ_ROW_A1(x) FIELD_PREP(GENMASK(11, 10), min((x), 3U))
#define GEN_SEQ_DATA_EN BIT(12)
#define GEN_SEQ_DELAY_EN(x) FIELD_PREP(GENMASK(14, 13), (x))
#define GEN_SEQ_DELAY0_EN GEN_SEQ_DELAY_EN(1)
#define GEN_SEQ_DELAY1_EN GEN_SEQ_DELAY_EN(2)
#define GEN_SEQ_IMD_SEQ BIT(15)
#define GEN_SEQ_COMMAND_3(x) FIELD_PREP(GENMASK(26, 16), (x))
#define DMA_TLVL_REG 0x114
#define DMA_TLVL(x) FIELD_PREP(GENMASK(7, 0), (x))
#define DMA_TLVL_MAX DMA_TLVL(0xFF)
#define TIM_GEN_SEQ3_REG 0x134
#define TIM_GEN_SEQ3_D12(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
#define ECC_CNT_REG 0x14C
#define ECC_CNT(cs, reg) FIELD_GET(GENMASK(5, 0), (reg) >> ((cs) * 8))
#define RNANDC_CS_NUM 4
#define TO_CYCLES64(ps, period_ns) ((unsigned int)DIV_ROUND_UP_ULL(div_u64(ps, 1000), \
period_ns))
struct rnand_chip_sel {
unsigned int cs;
};
struct rnand_chip {
struct nand_chip chip;
struct list_head node;
int selected_die;
u32 ctrl;
unsigned int nsels;
u32 control;
u32 ecc_ctrl;
u32 timings_asyn;
u32 tim_seq0;
u32 tim_seq1;
u32 tim_gen_seq0;
u32 tim_gen_seq1;
u32 tim_gen_seq2;
u32 tim_gen_seq3;
struct rnand_chip_sel sels[];
};
struct rnandc {
struct nand_controller controller;
struct device *dev;
void __iomem *regs;
struct clk *hclk;
struct clk *eclk;
unsigned long assigned_cs;
struct list_head chips;
struct nand_chip *selected_chip;
struct completion complete;
bool use_polling;
u8 *buf;
unsigned int buf_sz;
};
struct rnandc_op {
u32 command;
u32 addr0_col;
u32 addr0_row;
u32 addr1_col;
u32 addr1_row;
u32 data_size;
u32 ecc_offset;
u32 gen_seq_ctrl;
u8 *buf;
bool read;
unsigned int len;
};
static inline struct rnandc *to_rnandc(struct nand_controller *ctrl)
{
return container_of(ctrl, struct rnandc, controller);
}
static inline struct rnand_chip *to_rnand(struct nand_chip *chip)
{
return container_of(chip, struct rnand_chip, chip);
}
static inline unsigned int to_rnandc_cs(struct rnand_chip *nand)
{
return nand->sels[nand->selected_die].cs;
}
static void rnandc_dis_correction(struct rnandc *rnandc)
{
u32 control;
control = readl_relaxed(rnandc->regs + CONTROL_REG);
control &= ~CONTROL_ECC_EN;
writel_relaxed(control, rnandc->regs + CONTROL_REG);
}
static void rnandc_en_correction(struct rnandc *rnandc)
{
u32 control;
control = readl_relaxed(rnandc->regs + CONTROL_REG);
control |= CONTROL_ECC_EN;
writel_relaxed(control, rnandc->regs + CONTROL_REG);
}
static void rnandc_clear_status(struct rnandc *rnandc)
{
writel_relaxed(0, rnandc->regs + INT_STATUS_REG);
writel_relaxed(0, rnandc->regs + ECC_STAT_REG);
writel_relaxed(0, rnandc->regs + ECC_CNT_REG);
}
static void rnandc_dis_interrupts(struct rnandc *rnandc)
{
writel_relaxed(0, rnandc->regs + INT_MASK_REG);
}
static void rnandc_en_interrupts(struct rnandc *rnandc, u32 val)
{
if (!rnandc->use_polling)
writel_relaxed(val, rnandc->regs + INT_MASK_REG);
}
static void rnandc_clear_fifo(struct rnandc *rnandc)
{
writel_relaxed(FIFO_INIT, rnandc->regs + FIFO_INIT_REG);
}
static void rnandc_select_target(struct nand_chip *chip, int die_nr)
{
struct rnand_chip *rnand = to_rnand(chip);
struct rnandc *rnandc = to_rnandc(chip->controller);
unsigned int cs = rnand->sels[die_nr].cs;
if (chip == rnandc->selected_chip && die_nr == rnand->selected_die)
return;
rnandc_clear_status(rnandc);
writel_relaxed(MEM_CTRL_CS(cs) | MEM_CTRL_DIS_WP(cs), rnandc->regs + MEM_CTRL_REG);
writel_relaxed(rnand->control, rnandc->regs + CONTROL_REG);
writel_relaxed(rnand->ecc_ctrl, rnandc->regs + ECC_CTRL_REG);
writel_relaxed(rnand->timings_asyn, rnandc->regs + TIMINGS_ASYN_REG);
writel_relaxed(rnand->tim_seq0, rnandc->regs + TIM_SEQ0_REG);
writel_relaxed(rnand->tim_seq1, rnandc->regs + TIM_SEQ1_REG);
writel_relaxed(rnand->tim_gen_seq0, rnandc->regs + TIM_GEN_SEQ0_REG);
writel_relaxed(rnand->tim_gen_seq1, rnandc->regs + TIM_GEN_SEQ1_REG);
writel_relaxed(rnand->tim_gen_seq2, rnandc->regs + TIM_GEN_SEQ2_REG);
writel_relaxed(rnand->tim_gen_seq3, rnandc->regs + TIM_GEN_SEQ3_REG);
rnandc->selected_chip = chip;
rnand->selected_die = die_nr;
}
static void rnandc_trigger_op(struct rnandc *rnandc, struct rnandc_op *rop)
{
writel_relaxed(rop->addr0_col, rnandc->regs + ADDR0_COL_REG);
writel_relaxed(rop->addr0_row, rnandc->regs + ADDR0_ROW_REG);
writel_relaxed(rop->addr1_col, rnandc->regs + ADDR1_COL_REG);
writel_relaxed(rop->addr1_row, rnandc->regs + ADDR1_ROW_REG);
writel_relaxed(rop->ecc_offset, rnandc->regs + ECC_OFFSET_REG);
writel_relaxed(rop->gen_seq_ctrl, rnandc->regs + GEN_SEQ_CTRL_REG);
writel_relaxed(DATA_SIZE(rop->len), rnandc->regs + DATA_SIZE_REG);
writel_relaxed(rop->command, rnandc->regs + COMMAND_REG);
}
static void rnandc_trigger_dma(struct rnandc *rnandc)
{
writel_relaxed(DMA_CTRL_INCREMENT_BURST_4 |
DMA_CTRL_REGISTER_MANAGED_MODE |
DMA_CTRL_START, rnandc->regs + DMA_CTRL_REG);
}
static irqreturn_t rnandc_irq_handler(int irq, void *private)
{
struct rnandc *rnandc = private;
rnandc_dis_interrupts(rnandc);
complete(&rnandc->complete);
return IRQ_HANDLED;
}
static int rnandc_wait_end_of_op(struct rnandc *rnandc,
struct nand_chip *chip)
{
struct rnand_chip *rnand = to_rnand(chip);
unsigned int cs = to_rnandc_cs(rnand);
u32 status;
int ret;
ret = readl_poll_timeout(rnandc->regs + STATUS_REG, status,
MEM_RDY(cs, status) && CTRL_RDY(status),
1, 100000);
if (ret)
dev_err(rnandc->dev, "Operation timed out, status: 0x%08x\n",
status);
return ret;
}
static int rnandc_wait_end_of_io(struct rnandc *rnandc,
struct nand_chip *chip)
{
int timeout_ms = 1000;
int ret;
if (rnandc->use_polling) {
struct rnand_chip *rnand = to_rnand(chip);
unsigned int cs = to_rnandc_cs(rnand);
u32 status;
ret = readl_poll_timeout(rnandc->regs + INT_STATUS_REG, status,
MEM_IS_RDY(cs, status) &
DMA_HAS_ENDED(status),
0, timeout_ms * 1000);
} else {
ret = wait_for_completion_timeout(&rnandc->complete,
msecs_to_jiffies(timeout_ms));
if (!ret)
ret = -ETIMEDOUT;
else
ret = 0;
}
return ret;
}
static int rnandc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct rnandc *rnandc = to_rnandc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
struct rnand_chip *rnand = to_rnand(chip);
unsigned int cs = to_rnandc_cs(rnand);
struct rnandc_op rop = {
.command = COMMAND_INPUT_SEL_DMA | COMMAND_0(NAND_CMD_READ0) |
COMMAND_2(NAND_CMD_READSTART) | COMMAND_FIFO_SEL |
COMMAND_SEQ_READ_PAGE,
.addr0_row = page,
.len = mtd->writesize,
.ecc_offset = ECC_OFFSET(mtd->writesize + 2),
};
unsigned int max_bitflips = 0;
dma_addr_t dma_addr;
u32 ecc_stat;
int bf, ret, i;
/* Prepare controller */
rnandc_select_target(chip, chip->cur_cs);
rnandc_clear_status(rnandc);
reinit_completion(&rnandc->complete);
rnandc_en_interrupts(rnandc, INT_DMA_ENDED);
rnandc_en_correction(rnandc);
/* Configure DMA */
dma_addr = dma_map_single(rnandc->dev, rnandc->buf, mtd->writesize,
DMA_FROM_DEVICE);
writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
rnandc_trigger_op(rnandc, &rop);
rnandc_trigger_dma(rnandc);
ret = rnandc_wait_end_of_io(rnandc, chip);
dma_unmap_single(rnandc->dev, dma_addr, mtd->writesize, DMA_FROM_DEVICE);
rnandc_dis_correction(rnandc);
if (ret) {
dev_err(rnandc->dev, "Read page operation never ending\n");
return ret;
}
ecc_stat = readl_relaxed(rnandc->regs + ECC_STAT_REG);
if (oob_required || ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
ret = nand_change_read_column_op(chip, mtd->writesize,
chip->oob_poi, mtd->oobsize,
false);
if (ret)
return ret;
}
if (ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
for (i = 0; i < chip->ecc.steps; i++) {
unsigned int off = i * chip->ecc.size;
unsigned int eccoff = i * chip->ecc.bytes;
bf = nand_check_erased_ecc_chunk(rnandc->buf + off,
chip->ecc.size,
chip->oob_poi + 2 + eccoff,
chip->ecc.bytes,
NULL, 0,
chip->ecc.strength);
if (bf < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += bf;
max_bitflips = max_t(unsigned int, max_bitflips, bf);
}
}
} else if (ECC_STAT_CORRECTABLE(cs, ecc_stat)) {
bf = ECC_CNT(cs, readl_relaxed(rnandc->regs + ECC_CNT_REG));
/*
* The number of bitflips is an approximation given the fact
* that this controller does not provide per-chunk details but
* only gives statistics on the entire page.
*/
mtd->ecc_stats.corrected += bf;
}
memcpy(buf, rnandc->buf, mtd->writesize);
return 0;
}
static int rnandc_read_subpage_hw_ecc(struct nand_chip *chip, u32 req_offset,
u32 req_len, u8 *bufpoi, int page)
{
struct rnandc *rnandc = to_rnandc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
struct rnand_chip *rnand = to_rnand(chip);
unsigned int cs = to_rnandc_cs(rnand);
unsigned int page_off = round_down(req_offset, chip->ecc.size);
unsigned int real_len = round_up(req_offset + req_len - page_off,
chip->ecc.size);
unsigned int start_chunk = page_off / chip->ecc.size;
unsigned int nchunks = real_len / chip->ecc.size;
unsigned int ecc_off = 2 + (start_chunk * chip->ecc.bytes);
struct rnandc_op rop = {
.command = COMMAND_INPUT_SEL_AHBS | COMMAND_0(NAND_CMD_READ0) |
COMMAND_2(NAND_CMD_READSTART) | COMMAND_FIFO_SEL |
COMMAND_SEQ_READ_PAGE,
.addr0_row = page,
.addr0_col = page_off,
.len = real_len,
.ecc_offset = ECC_OFFSET(mtd->writesize + ecc_off),
};
unsigned int max_bitflips = 0, i;
u32 ecc_stat;
int bf, ret;
/* Prepare controller */
rnandc_select_target(chip, chip->cur_cs);
rnandc_clear_status(rnandc);
rnandc_en_correction(rnandc);
rnandc_trigger_op(rnandc, &rop);
while (!FIFO_STATE_C_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
cpu_relax();
while (FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
cpu_relax();
ioread32_rep(rnandc->regs + FIFO_DATA_REG, bufpoi + page_off,
real_len / 4);
if (!FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG))) {
dev_err(rnandc->dev, "Clearing residual data in the read FIFO\n");
rnandc_clear_fifo(rnandc);
}
ret = rnandc_wait_end_of_op(rnandc, chip);
rnandc_dis_correction(rnandc);
if (ret) {
dev_err(rnandc->dev, "Read subpage operation never ending\n");
return ret;
}
ecc_stat = readl_relaxed(rnandc->regs + ECC_STAT_REG);
if (ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
ret = nand_change_read_column_op(chip, mtd->writesize,
chip->oob_poi, mtd->oobsize,
false);
if (ret)
return ret;
for (i = start_chunk; i < nchunks; i++) {
unsigned int dataoff = i * chip->ecc.size;
unsigned int eccoff = 2 + (i * chip->ecc.bytes);
bf = nand_check_erased_ecc_chunk(bufpoi + dataoff,
chip->ecc.size,
chip->oob_poi + eccoff,
chip->ecc.bytes,
NULL, 0,
chip->ecc.strength);
if (bf < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += bf;
max_bitflips = max_t(unsigned int, max_bitflips, bf);
}
}
} else if (ECC_STAT_CORRECTABLE(cs, ecc_stat)) {
bf = ECC_CNT(cs, readl_relaxed(rnandc->regs + ECC_CNT_REG));
/*
* The number of bitflips is an approximation given the fact
* that this controller does not provide per-chunk details but
* only gives statistics on the entire page.
*/
mtd->ecc_stats.corrected += bf;
}
return 0;
}
static int rnandc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
struct rnandc *rnandc = to_rnandc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
struct rnand_chip *rnand = to_rnand(chip);
unsigned int cs = to_rnandc_cs(rnand);
struct rnandc_op rop = {
.command = COMMAND_INPUT_SEL_DMA | COMMAND_0(NAND_CMD_SEQIN) |
COMMAND_1(NAND_CMD_PAGEPROG) | COMMAND_FIFO_SEL |
COMMAND_SEQ_WRITE_PAGE,
.addr0_row = page,
.len = mtd->writesize,
.ecc_offset = ECC_OFFSET(mtd->writesize + 2),
};
dma_addr_t dma_addr;
int ret;
memcpy(rnandc->buf, buf, mtd->writesize);
/* Prepare controller */
rnandc_select_target(chip, chip->cur_cs);
rnandc_clear_status(rnandc);
reinit_completion(&rnandc->complete);
rnandc_en_interrupts(rnandc, INT_MEM_RDY(cs));
rnandc_en_correction(rnandc);
/* Configure DMA */
dma_addr = dma_map_single(rnandc->dev, (void *)rnandc->buf, mtd->writesize,
DMA_TO_DEVICE);
writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
rnandc_trigger_op(rnandc, &rop);
rnandc_trigger_dma(rnandc);
ret = rnandc_wait_end_of_io(rnandc, chip);
dma_unmap_single(rnandc->dev, dma_addr, mtd->writesize, DMA_TO_DEVICE);
rnandc_dis_correction(rnandc);
if (ret) {
dev_err(rnandc->dev, "Write page operation never ending\n");
return ret;
}
if (!oob_required)
return 0;
return nand_change_write_column_op(chip, mtd->writesize, chip->oob_poi,
mtd->oobsize, false);
}
static int rnandc_write_subpage_hw_ecc(struct nand_chip *chip, u32 req_offset,
u32 req_len, const u8 *bufpoi,
int oob_required, int page)
{
struct rnandc *rnandc = to_rnandc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int page_off = round_down(req_offset, chip->ecc.size);
unsigned int real_len = round_up(req_offset + req_len - page_off,
chip->ecc.size);
unsigned int start_chunk = page_off / chip->ecc.size;
unsigned int ecc_off = 2 + (start_chunk * chip->ecc.bytes);
struct rnandc_op rop = {
.command = COMMAND_INPUT_SEL_AHBS | COMMAND_0(NAND_CMD_SEQIN) |
COMMAND_1(NAND_CMD_PAGEPROG) | COMMAND_FIFO_SEL |
COMMAND_SEQ_WRITE_PAGE,
.addr0_row = page,
.addr0_col = page_off,
.len = real_len,
.ecc_offset = ECC_OFFSET(mtd->writesize + ecc_off),
};
int ret;
/* Prepare controller */
rnandc_select_target(chip, chip->cur_cs);
rnandc_clear_status(rnandc);
rnandc_en_correction(rnandc);
rnandc_trigger_op(rnandc, &rop);
while (FIFO_STATE_W_FULL(readl(rnandc->regs + FIFO_STATE_REG)))
cpu_relax();
iowrite32_rep(rnandc->regs + FIFO_DATA_REG, bufpoi + page_off,
real_len / 4);
while (!FIFO_STATE_W_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
cpu_relax();
ret = rnandc_wait_end_of_op(rnandc, chip);
rnandc_dis_correction(rnandc);
if (ret) {
dev_err(rnandc->dev, "Write subpage operation never ending\n");
return ret;
}
return 0;
}
/*
* This controller is simple enough and thus does not need to use the parser
* provided by the core, instead, handle every situation here.
*/
static int rnandc_exec_op(struct nand_chip *chip,
const struct nand_operation *op, bool check_only)
{
struct rnandc *rnandc = to_rnandc(chip->controller);
const struct nand_op_instr *instr = NULL;
struct rnandc_op rop = {
.command = COMMAND_INPUT_SEL_AHBS,
.gen_seq_ctrl = GEN_SEQ_IMD_SEQ,
};
unsigned int cmd_phase = 0, addr_phase = 0, data_phase = 0,
delay_phase = 0, delays = 0;
unsigned int op_id, col_addrs, row_addrs, naddrs, remainder, words, i;
const u8 *addrs;
u32 last_bytes;
int ret;
if (!check_only)
rnandc_select_target(chip, op->cs);
for (op_id = 0; op_id < op->ninstrs; op_id++) {
instr = &op->instrs[op_id];
nand_op_trace(" ", instr);
switch (instr->type) {
case NAND_OP_CMD_INSTR:
switch (cmd_phase++) {
case 0:
rop.command |= COMMAND_0(instr->ctx.cmd.opcode);
rop.gen_seq_ctrl |= GEN_SEQ_CMD0_EN;
break;
case 1:
rop.gen_seq_ctrl |= GEN_SEQ_COMMAND_3(instr->ctx.cmd.opcode);
rop.gen_seq_ctrl |= GEN_SEQ_CMD3_EN;
if (addr_phase == 0)
addr_phase = 1;
break;
case 2:
rop.command |= COMMAND_2(instr->ctx.cmd.opcode);
rop.gen_seq_ctrl |= GEN_SEQ_CMD2_EN;
if (addr_phase <= 1)
addr_phase = 2;
break;
case 3:
rop.command |= COMMAND_1(instr->ctx.cmd.opcode);
rop.gen_seq_ctrl |= GEN_SEQ_CMD1_EN;
if (addr_phase <= 1)
addr_phase = 2;
if (delay_phase == 0)
delay_phase = 1;
if (data_phase == 0)
data_phase = 1;
break;
default:
return -EOPNOTSUPP;
}
break;
case NAND_OP_ADDR_INSTR:
addrs = instr->ctx.addr.addrs;
naddrs = instr->ctx.addr.naddrs;
if (naddrs > 5)
return -EOPNOTSUPP;
col_addrs = min(2U, naddrs);
row_addrs = naddrs > 2 ? naddrs - col_addrs : 0;
switch (addr_phase++) {
case 0:
for (i = 0; i < col_addrs; i++)
rop.addr0_col |= addrs[i] << (i * 8);
rop.gen_seq_ctrl |= GEN_SEQ_COL_A0(col_addrs);
for (i = 0; i < row_addrs; i++)
rop.addr0_row |= addrs[2 + i] << (i * 8);
rop.gen_seq_ctrl |= GEN_SEQ_ROW_A0(row_addrs);
if (cmd_phase == 0)
cmd_phase = 1;
break;
case 1:
for (i = 0; i < col_addrs; i++)
rop.addr1_col |= addrs[i] << (i * 8);
rop.gen_seq_ctrl |= GEN_SEQ_COL_A1(col_addrs);
for (i = 0; i < row_addrs; i++)
rop.addr1_row |= addrs[2 + i] << (i * 8);
rop.gen_seq_ctrl |= GEN_SEQ_ROW_A1(row_addrs);
if (cmd_phase <= 1)
cmd_phase = 2;
break;
default:
return -EOPNOTSUPP;
}
break;
case NAND_OP_DATA_IN_INSTR:
rop.read = true;
fallthrough;
case NAND_OP_DATA_OUT_INSTR:
rop.gen_seq_ctrl |= GEN_SEQ_DATA_EN;
rop.buf = instr->ctx.data.buf.in;
rop.len = instr->ctx.data.len;
rop.command |= COMMAND_FIFO_SEL;
switch (data_phase++) {
case 0:
if (cmd_phase <= 2)
cmd_phase = 3;
if (addr_phase <= 1)
addr_phase = 2;
if (delay_phase == 0)
delay_phase = 1;
break;
default:
return -EOPNOTSUPP;
}
break;
case NAND_OP_WAITRDY_INSTR:
switch (delay_phase++) {
case 0:
rop.gen_seq_ctrl |= GEN_SEQ_DELAY0_EN;
if (cmd_phase <= 2)
cmd_phase = 3;
break;
case 1:
rop.gen_seq_ctrl |= GEN_SEQ_DELAY1_EN;
if (cmd_phase <= 3)
cmd_phase = 4;
if (data_phase == 0)
data_phase = 1;
break;
default:
return -EOPNOTSUPP;
}
break;
}
}
/*
* Sequence 19 is generic and dedicated to write operations.
* Sequence 18 is also generic and works for all other operations.
*/
if (rop.buf && !rop.read)
rop.command |= COMMAND_SEQ_GEN_OUT;
else
rop.command |= COMMAND_SEQ_GEN_IN;
if (delays > 1) {
dev_err(rnandc->dev, "Cannot handle more than one wait delay\n");
return -EOPNOTSUPP;
}
if (check_only)
return 0;
rnandc_trigger_op(rnandc, &rop);
words = rop.len / sizeof(u32);
remainder = rop.len % sizeof(u32);
if (rop.buf && rop.read) {
while (!FIFO_STATE_C_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
cpu_relax();
while (FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
cpu_relax();
ioread32_rep(rnandc->regs + FIFO_DATA_REG, rop.buf, words);
if (remainder) {
last_bytes = readl_relaxed(rnandc->regs + FIFO_DATA_REG);
memcpy(rop.buf + (words * sizeof(u32)), &last_bytes,
remainder);
}
if (!FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG))) {
dev_warn(rnandc->dev,
"Clearing residual data in the read FIFO\n");
rnandc_clear_fifo(rnandc);
}
} else if (rop.len && !rop.read) {
while (FIFO_STATE_W_FULL(readl(rnandc->regs + FIFO_STATE_REG)))
cpu_relax();
iowrite32_rep(rnandc->regs + FIFO_DATA_REG, rop.buf,
DIV_ROUND_UP(rop.len, 4));
if (remainder) {
last_bytes = 0;
memcpy(&last_bytes, rop.buf + (words * sizeof(u32)), remainder);
writel_relaxed(last_bytes, rnandc->regs + FIFO_DATA_REG);
}
while (!FIFO_STATE_W_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
cpu_relax();
}
ret = rnandc_wait_end_of_op(rnandc, chip);
if (ret)
return ret;
return 0;
}
static int rnandc_setup_interface(struct nand_chip *chip, int chipnr,
const struct nand_interface_config *conf)
{
struct rnand_chip *rnand = to_rnand(chip);
struct rnandc *rnandc = to_rnandc(chip->controller);
unsigned int period_ns = 1000000000 / clk_get_rate(rnandc->eclk);
const struct nand_sdr_timings *sdr;
unsigned int cyc, cle, ale, bef_dly, ca_to_data;
sdr = nand_get_sdr_timings(conf);
if (IS_ERR(sdr))
return PTR_ERR(sdr);
if (sdr->tRP_min != sdr->tWP_min || sdr->tREH_min != sdr->tWH_min) {
dev_err(rnandc->dev, "Read and write hold times must be identical\n");
return -EINVAL;
}
if (chipnr < 0)
return 0;
rnand->timings_asyn =
TIMINGS_ASYN_TRWP(TO_CYCLES64(sdr->tRP_min, period_ns)) |
TIMINGS_ASYN_TRWH(TO_CYCLES64(sdr->tREH_min, period_ns));
rnand->tim_seq0 =
TIM_SEQ0_TCCS(TO_CYCLES64(sdr->tCCS_min, period_ns)) |
TIM_SEQ0_TADL(TO_CYCLES64(sdr->tADL_min, period_ns)) |
TIM_SEQ0_TRHW(TO_CYCLES64(sdr->tRHW_min, period_ns)) |
TIM_SEQ0_TWHR(TO_CYCLES64(sdr->tWHR_min, period_ns));
rnand->tim_seq1 =
TIM_SEQ1_TWB(TO_CYCLES64(sdr->tWB_max, period_ns)) |
TIM_SEQ1_TRR(TO_CYCLES64(sdr->tRR_min, period_ns)) |
TIM_SEQ1_TWW(TO_CYCLES64(sdr->tWW_min, period_ns));
cyc = sdr->tDS_min + sdr->tDH_min;
cle = sdr->tCLH_min + sdr->tCLS_min;
ale = sdr->tALH_min + sdr->tALS_min;
bef_dly = sdr->tWB_max - sdr->tDH_min;
ca_to_data = sdr->tWHR_min + sdr->tREA_max - sdr->tDH_min;
/*
* D0 = CMD -> ADDR = tCLH + tCLS - 1 cycle
* D1 = CMD -> CMD = tCLH + tCLS - 1 cycle
* D2 = CMD -> DLY = tWB - tDH
* D3 = CMD -> DATA = tWHR + tREA - tDH
*/
rnand->tim_gen_seq0 =
TIM_GEN_SEQ0_D0(TO_CYCLES64(cle - cyc, period_ns)) |
TIM_GEN_SEQ0_D1(TO_CYCLES64(cle - cyc, period_ns)) |
TIM_GEN_SEQ0_D2(TO_CYCLES64(bef_dly, period_ns)) |
TIM_GEN_SEQ0_D3(TO_CYCLES64(ca_to_data, period_ns));
/*
* D4 = ADDR -> CMD = tALH + tALS - 1 cyle
* D5 = ADDR -> ADDR = tALH + tALS - 1 cyle
* D6 = ADDR -> DLY = tWB - tDH
* D7 = ADDR -> DATA = tWHR + tREA - tDH
*/
rnand->tim_gen_seq1 =
TIM_GEN_SEQ1_D4(TO_CYCLES64(ale - cyc, period_ns)) |
TIM_GEN_SEQ1_D5(TO_CYCLES64(ale - cyc, period_ns)) |
TIM_GEN_SEQ1_D6(TO_CYCLES64(bef_dly, period_ns)) |
TIM_GEN_SEQ1_D7(TO_CYCLES64(ca_to_data, period_ns));
/*
* D8 = DLY -> DATA = tRR + tREA
* D9 = DLY -> CMD = tRR
* D10 = DATA -> CMD = tCLH + tCLS - 1 cycle
* D11 = DATA -> DLY = tWB - tDH
*/
rnand->tim_gen_seq2 =
TIM_GEN_SEQ2_D8(TO_CYCLES64(sdr->tRR_min + sdr->tREA_max, period_ns)) |
TIM_GEN_SEQ2_D9(TO_CYCLES64(sdr->tRR_min, period_ns)) |
TIM_GEN_SEQ2_D10(TO_CYCLES64(cle - cyc, period_ns)) |
TIM_GEN_SEQ2_D11(TO_CYCLES64(bef_dly, period_ns));
/* D12 = DATA -> END = tCLH - tDH */
rnand->tim_gen_seq3 =
TIM_GEN_SEQ3_D12(TO_CYCLES64(sdr->tCLH_min - sdr->tDH_min, period_ns));
return 0;
}
static int rnandc_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
unsigned int eccbytes = round_up(chip->ecc.bytes, 4) * chip->ecc.steps;
if (section)
return -ERANGE;
oobregion->offset = 2;
oobregion->length = eccbytes;
return 0;
}
static int rnandc_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
unsigned int eccbytes = round_up(chip->ecc.bytes, 4) * chip->ecc.steps;
if (section)
return -ERANGE;
oobregion->offset = 2 + eccbytes;
oobregion->length = mtd->oobsize - oobregion->offset;
return 0;
}
static const struct mtd_ooblayout_ops rnandc_ooblayout_ops = {
.ecc = rnandc_ooblayout_ecc,
.free = rnandc_ooblayout_free,
};
static int rnandc_hw_ecc_controller_init(struct nand_chip *chip)
{
struct rnand_chip *rnand = to_rnand(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
struct rnandc *rnandc = to_rnandc(chip->controller);
if (mtd->writesize > SZ_16K) {
dev_err(rnandc->dev, "Unsupported page size\n");
return -EINVAL;
}
switch (chip->ecc.size) {
case SZ_256:
rnand->control |= CONTROL_ECC_BLOCK_SIZE_256;
break;
case SZ_512:
rnand->control |= CONTROL_ECC_BLOCK_SIZE_512;
break;
case SZ_1K:
rnand->control |= CONTROL_ECC_BLOCK_SIZE_1024;
break;
default:
dev_err(rnandc->dev, "Unsupported ECC chunk size\n");
return -EINVAL;
}
switch (chip->ecc.strength) {
case 2:
chip->ecc.bytes = 4;
rnand->ecc_ctrl |= ECC_CTRL_CAP_2B;
break;
case 4:
chip->ecc.bytes = 7;
rnand->ecc_ctrl |= ECC_CTRL_CAP_4B;
break;
case 8:
chip->ecc.bytes = 14;
rnand->ecc_ctrl |= ECC_CTRL_CAP_8B;
break;
case 16:
chip->ecc.bytes = 28;
rnand->ecc_ctrl |= ECC_CTRL_CAP_16B;
break;
case 24:
chip->ecc.bytes = 42;
rnand->ecc_ctrl |= ECC_CTRL_CAP_24B;
break;
case 32:
chip->ecc.bytes = 56;
rnand->ecc_ctrl |= ECC_CTRL_CAP_32B;
break;
default:
dev_err(rnandc->dev, "Unsupported ECC strength\n");
return -EINVAL;
}
rnand->ecc_ctrl |= ECC_CTRL_ERR_THRESHOLD(chip->ecc.strength);
mtd_set_ooblayout(mtd, &rnandc_ooblayout_ops);
chip->ecc.steps = mtd->writesize / chip->ecc.size;
chip->ecc.read_page = rnandc_read_page_hw_ecc;
chip->ecc.read_subpage = rnandc_read_subpage_hw_ecc;
chip->ecc.write_page = rnandc_write_page_hw_ecc;
chip->ecc.write_subpage = rnandc_write_subpage_hw_ecc;
return 0;
}
static int rnandc_ecc_init(struct nand_chip *chip)
{
struct nand_ecc_ctrl *ecc = &chip->ecc;
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(&chip->base);
struct rnandc *rnandc = to_rnandc(chip->controller);
int ret;
if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
(!ecc->size || !ecc->strength)) {
if (requirements->step_size && requirements->strength) {
ecc->size = requirements->step_size;
ecc->strength = requirements->strength;
} else {
dev_err(rnandc->dev, "No minimum ECC strength\n");
return -EINVAL;
}
}
switch (ecc->engine_type) {
case NAND_ECC_ENGINE_TYPE_ON_HOST:
ret = rnandc_hw_ecc_controller_init(chip);
if (ret)
return ret;
break;
case NAND_ECC_ENGINE_TYPE_NONE:
case NAND_ECC_ENGINE_TYPE_SOFT:
case NAND_ECC_ENGINE_TYPE_ON_DIE:
break;
default:
return -EINVAL;
}
return 0;
}
static int rnandc_attach_chip(struct nand_chip *chip)
{
struct rnand_chip *rnand = to_rnand(chip);
struct rnandc *rnandc = to_rnandc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg = nanddev_get_memorg(&chip->base);
int ret;
/* Do not store BBT bits in the OOB section as it is not protected */
if (chip->bbt_options & NAND_BBT_USE_FLASH)
chip->bbt_options |= NAND_BBT_NO_OOB;
if (mtd->writesize <= 512) {
dev_err(rnandc->dev, "Small page devices not supported\n");
return -EINVAL;
}
rnand->control |= CONTROL_CHECK_RB_LINE | CONTROL_INT_EN;
switch (memorg->pages_per_eraseblock) {
case 32:
rnand->control |= CONTROL_BLOCK_SIZE_32P;
break;
case 64:
rnand->control |= CONTROL_BLOCK_SIZE_64P;
break;
case 128:
rnand->control |= CONTROL_BLOCK_SIZE_128P;
break;
case 256:
rnand->control |= CONTROL_BLOCK_SIZE_256P;
break;
default:
dev_err(rnandc->dev, "Unsupported memory organization\n");
return -EINVAL;
}
chip->options |= NAND_SUBPAGE_READ;
ret = rnandc_ecc_init(chip);
if (ret) {
dev_err(rnandc->dev, "ECC initialization failed (%d)\n", ret);
return ret;
}
/* Force an update of the configuration registers */
rnand->selected_die = -1;
return 0;
}
static const struct nand_controller_ops rnandc_ops = {
.attach_chip = rnandc_attach_chip,
.exec_op = rnandc_exec_op,
.setup_interface = rnandc_setup_interface,
};
static int rnandc_alloc_dma_buf(struct rnandc *rnandc,
struct mtd_info *new_mtd)
{
unsigned int max_len = new_mtd->writesize + new_mtd->oobsize;
struct rnand_chip *entry, *temp;
struct nand_chip *chip;
struct mtd_info *mtd;
list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
chip = &entry->chip;
mtd = nand_to_mtd(chip);
max_len = max(max_len, mtd->writesize + mtd->oobsize);
}
if (rnandc->buf && rnandc->buf_sz < max_len) {
devm_kfree(rnandc->dev, rnandc->buf);
rnandc->buf = NULL;
}
if (!rnandc->buf) {
rnandc->buf_sz = max_len;
rnandc->buf = devm_kmalloc(rnandc->dev, max_len,
GFP_KERNEL | GFP_DMA);
if (!rnandc->buf)
return -ENOMEM;
}
return 0;
}
static int rnandc_chip_init(struct rnandc *rnandc, struct device_node *np)
{
struct rnand_chip *rnand;
struct mtd_info *mtd;
struct nand_chip *chip;
int nsels, ret, i;
u32 cs;
nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
if (nsels <= 0) {
ret = (nsels < 0) ? nsels : -EINVAL;
dev_err(rnandc->dev, "Invalid reg property (%d)\n", ret);
return ret;
}
/* Alloc the driver's NAND chip structure */
rnand = devm_kzalloc(rnandc->dev, struct_size(rnand, sels, nsels),
GFP_KERNEL);
if (!rnand)
return -ENOMEM;
rnand->nsels = nsels;
rnand->selected_die = -1;
for (i = 0; i < nsels; i++) {
ret = of_property_read_u32_index(np, "reg", i, &cs);
if (ret) {
dev_err(rnandc->dev, "Incomplete reg property (%d)\n", ret);
return ret;
}
if (cs >= RNANDC_CS_NUM) {
dev_err(rnandc->dev, "Invalid reg property (%d)\n", cs);
return -EINVAL;
}
if (test_and_set_bit(cs, &rnandc->assigned_cs)) {
dev_err(rnandc->dev, "CS %d already assigned\n", cs);
return -EINVAL;
}
/*
* No need to check for RB or WP properties, there is a 1:1
* mandatory mapping with the CS.
*/
rnand->sels[i].cs = cs;
}
chip = &rnand->chip;
chip->controller = &rnandc->controller;
nand_set_flash_node(chip, np);
mtd = nand_to_mtd(chip);
mtd->dev.parent = rnandc->dev;
if (!mtd->name) {
dev_err(rnandc->dev, "Missing MTD label\n");
return -EINVAL;
}
ret = nand_scan(chip, rnand->nsels);
if (ret) {
dev_err(rnandc->dev, "Failed to scan the NAND chip (%d)\n", ret);
return ret;
}
ret = rnandc_alloc_dma_buf(rnandc, mtd);
if (ret)
goto cleanup_nand;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(rnandc->dev, "Failed to register MTD device (%d)\n", ret);
goto cleanup_nand;
}
list_add_tail(&rnand->node, &rnandc->chips);
return 0;
cleanup_nand:
nand_cleanup(chip);
return ret;
}
static void rnandc_chips_cleanup(struct rnandc *rnandc)
{
struct rnand_chip *entry, *temp;
struct nand_chip *chip;
int ret;
list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
chip = &entry->chip;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
list_del(&entry->node);
}
}
static int rnandc_chips_init(struct rnandc *rnandc)
{
struct device_node *np;
int ret;
for_each_child_of_node(rnandc->dev->of_node, np) {
ret = rnandc_chip_init(rnandc, np);
if (ret) {
of_node_put(np);
goto cleanup_chips;
}
}
return 0;
cleanup_chips:
rnandc_chips_cleanup(rnandc);
return ret;
}
static int rnandc_probe(struct platform_device *pdev)
{
struct rnandc *rnandc;
int irq, ret;
rnandc = devm_kzalloc(&pdev->dev, sizeof(*rnandc), GFP_KERNEL);
if (!rnandc)
return -ENOMEM;
rnandc->dev = &pdev->dev;
nand_controller_init(&rnandc->controller);
rnandc->controller.ops = &rnandc_ops;
INIT_LIST_HEAD(&rnandc->chips);
init_completion(&rnandc->complete);
rnandc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rnandc->regs))
return PTR_ERR(rnandc->regs);
/* APB clock */
rnandc->hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(rnandc->hclk))
return PTR_ERR(rnandc->hclk);
/* External NAND bus clock */
rnandc->eclk = devm_clk_get(&pdev->dev, "eclk");
if (IS_ERR(rnandc->eclk))
return PTR_ERR(rnandc->eclk);
ret = clk_prepare_enable(rnandc->hclk);
if (ret)
return ret;
ret = clk_prepare_enable(rnandc->eclk);
if (ret)
goto disable_hclk;
rnandc_dis_interrupts(rnandc);
irq = platform_get_irq_optional(pdev, 0);
if (irq == -EPROBE_DEFER) {
ret = irq;
goto disable_eclk;
} else if (irq < 0) {
dev_info(&pdev->dev, "No IRQ found, fallback to polling\n");
rnandc->use_polling = true;
} else {
ret = devm_request_irq(&pdev->dev, irq, rnandc_irq_handler, 0,
"renesas-nand-controller", rnandc);
if (ret < 0)
goto disable_eclk;
}
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
goto disable_eclk;
rnandc_clear_fifo(rnandc);
platform_set_drvdata(pdev, rnandc);
ret = rnandc_chips_init(rnandc);
if (ret)
goto disable_eclk;
return 0;
disable_eclk:
clk_disable_unprepare(rnandc->eclk);
disable_hclk:
clk_disable_unprepare(rnandc->hclk);
return ret;
}
static int rnandc_remove(struct platform_device *pdev)
{
struct rnandc *rnandc = platform_get_drvdata(pdev);
rnandc_chips_cleanup(rnandc);
clk_disable_unprepare(rnandc->eclk);
clk_disable_unprepare(rnandc->hclk);
return 0;
}
static const struct of_device_id rnandc_id_table[] = {
{ .compatible = "renesas,rcar-gen3-nandc" },
{ .compatible = "renesas,rzn1-nandc" },
{} /* sentinel */
};
MODULE_DEVICE_TABLE(of, rnandc_id_table);
static struct platform_driver rnandc_driver = {
.driver = {
.name = "renesas-nandc",
.of_match_table = of_match_ptr(rnandc_id_table),
},
.probe = rnandc_probe,
.remove = rnandc_remove,
};
module_platform_driver(rnandc_driver);
MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
MODULE_DESCRIPTION("Renesas R-Car Gen3 & RZ/N1 NAND controller driver");
MODULE_LICENSE("GPL v2");
......@@ -48,13 +48,13 @@ static const struct spi_nor_locking_ops atmel_at25fs_locking_ops = {
.is_locked = atmel_at25fs_is_locked,
};
static void atmel_at25fs_default_init(struct spi_nor *nor)
static void atmel_at25fs_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &atmel_at25fs_locking_ops;
}
static const struct spi_nor_fixups atmel_at25fs_fixups = {
.default_init = atmel_at25fs_default_init,
.late_init = atmel_at25fs_late_init,
};
/**
......@@ -146,50 +146,59 @@ static const struct spi_nor_locking_ops atmel_global_protection_ops = {
.is_locked = atmel_is_global_protected,
};
static void atmel_global_protection_default_init(struct spi_nor *nor)
static void atmel_global_protection_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &atmel_global_protection_ops;
}
static const struct spi_nor_fixups atmel_global_protection_fixups = {
.default_init = atmel_global_protection_default_init,
.late_init = atmel_global_protection_late_init,
};
static const struct flash_info atmel_parts[] = {
/* Atmel -- some are (confusingly) marketed as "DataFlash" */
{ "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K | SPI_NOR_HAS_LOCK)
{ "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4)
FLAGS(SPI_NOR_HAS_LOCK)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_at25fs_fixups },
{ "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_HAS_LOCK)
{ "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8)
FLAGS(SPI_NOR_HAS_LOCK)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_at25fs_fixups },
{ "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8,
SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
.fixups = &atmel_global_protection_fixups },
{ "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
.fixups = &atmel_global_protection_fixups },
{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
.fixups = &atmel_global_protection_fixups },
{ "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
.fixups = &atmel_global_protection_fixups },
{ "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16,
SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
.fixups = &atmel_global_protection_fixups },
{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
.fixups = &atmel_global_protection_fixups },
{ "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
.fixups = &atmel_global_protection_fixups },
{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
{ "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_global_protection_fixups },
{ "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_global_protection_fixups },
{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_global_protection_fixups },
{ "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_global_protection_fixups },
{ "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8)
NO_SFDP_FLAGS(SECT_4K) },
{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_global_protection_fixups },
{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_global_protection_fixups },
{ "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_global_protection_fixups },
{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K) },
};
const struct spi_nor_manufacturer spi_nor_atmel = {
......
......@@ -10,16 +10,11 @@
static const struct flash_info catalyst_parts[] = {
/* Catalyst / On Semiconductor -- non-JEDEC */
{ "cat25c11", CAT25_INFO(16, 8, 16, 1,
SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "cat25c03", CAT25_INFO(32, 8, 16, 2,
SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "cat25c09", CAT25_INFO(128, 8, 32, 2,
SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "cat25c17", CAT25_INFO(256, 8, 32, 2,
SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "cat25128", CAT25_INFO(2048, 8, 64, 2,
SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "cat25c11", CAT25_INFO(16, 8, 16, 1) },
{ "cat25c03", CAT25_INFO(32, 8, 16, 2) },
{ "cat25c09", CAT25_INFO(128, 8, 32, 2) },
{ "cat25c17", CAT25_INFO(256, 8, 32, 2) },
{ "cat25128", CAT25_INFO(2048, 8, 64, 2) },
};
const struct spi_nor_manufacturer spi_nor_catalyst = {
......
......@@ -1952,6 +1952,7 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
struct spi_nor *nor = mtd_to_spi_nor(mtd);
size_t page_offset, page_remain, i;
ssize_t ret;
u32 page_size = nor->params->page_size;
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
......@@ -1968,16 +1969,15 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
* calculated with an AND operation. On the other cases we
* need to do a modulus operation (more expensive).
*/
if (is_power_of_2(nor->page_size)) {
page_offset = addr & (nor->page_size - 1);
if (is_power_of_2(page_size)) {
page_offset = addr & (page_size - 1);
} else {
uint64_t aux = addr;
page_offset = do_div(aux, nor->page_size);
page_offset = do_div(aux, page_size);
}
/* the size of data remaining on the first page */
page_remain = min_t(size_t,
nor->page_size - page_offset, len - i);
page_remain = min_t(size_t, page_size - page_offset, len - i);
addr = spi_nor_convert_addr(nor, addr);
......@@ -2115,7 +2115,7 @@ static int spi_nor_spimem_check_op(struct spi_nor *nor,
*/
op->addr.nbytes = 4;
if (!spi_mem_supports_op(nor->spimem, op)) {
if (nor->mtd.size > SZ_16M)
if (nor->params->size > SZ_16M)
return -EOPNOTSUPP;
/* If flash size <= 16MB, 3 address bytes are sufficient */
......@@ -2141,7 +2141,7 @@ static int spi_nor_spimem_check_readop(struct spi_nor *nor,
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 0),
SPI_MEM_OP_ADDR(3, 0, 0),
SPI_MEM_OP_DUMMY(1, 0),
SPI_MEM_OP_DATA_IN(1, NULL, 0));
SPI_MEM_OP_DATA_IN(2, NULL, 0));
spi_nor_spimem_setup_op(nor, &op, read->proto);
......@@ -2167,7 +2167,7 @@ static int spi_nor_spimem_check_pp(struct spi_nor *nor,
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 0),
SPI_MEM_OP_ADDR(3, 0, 0),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(1, NULL, 0));
SPI_MEM_OP_DATA_OUT(2, NULL, 0));
spi_nor_spimem_setup_op(nor, &op, pp->proto);
......@@ -2484,13 +2484,61 @@ static int spi_nor_default_setup(struct spi_nor *nor,
return 0;
}
static int spi_nor_set_addr_width(struct spi_nor *nor)
{
if (nor->addr_width) {
/* already configured from SFDP */
} else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
/*
* In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
* in this protocol an odd address width cannot be used because
* then the address phase would only span a cycle and a half.
* Half a cycle would be left over. We would then have to start
* the dummy phase in the middle of a cycle and so too the data
* phase, and we will end the transaction with half a cycle left
* over.
*
* Force all 8D-8D-8D flashes to use an address width of 4 to
* avoid this situation.
*/
nor->addr_width = 4;
} else if (nor->info->addr_width) {
nor->addr_width = nor->info->addr_width;
} else {
nor->addr_width = 3;
}
if (nor->addr_width == 3 && nor->params->size > 0x1000000) {
/* enable 4-byte addressing if the device exceeds 16MiB */
nor->addr_width = 4;
}
if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
dev_dbg(nor->dev, "address width is too large: %u\n",
nor->addr_width);
return -EINVAL;
}
/* Set 4byte opcodes when possible. */
if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
!(nor->flags & SNOR_F_HAS_4BAIT))
spi_nor_set_4byte_opcodes(nor);
return 0;
}
static int spi_nor_setup(struct spi_nor *nor,
const struct spi_nor_hwcaps *hwcaps)
{
if (!nor->params->setup)
return 0;
int ret;
return nor->params->setup(nor, hwcaps);
if (nor->params->setup) {
ret = nor->params->setup(nor, hwcaps);
if (ret)
return ret;
}
return spi_nor_set_addr_width(nor);
}
/**
......@@ -2509,107 +2557,50 @@ static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
}
/**
* spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
* based on JESD216 SFDP standard.
* @nor: pointer to a 'struct spi_nor'.
*
* The method has a roll-back mechanism: in case the SFDP parsing fails, the
* legacy flash parameters and settings will be restored.
*/
static void spi_nor_sfdp_init_params(struct spi_nor *nor)
{
struct spi_nor_flash_parameter sfdp_params;
memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
if (spi_nor_parse_sfdp(nor)) {
memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
nor->addr_width = 0;
nor->flags &= ~SNOR_F_4B_OPCODES;
}
}
/**
* spi_nor_info_init_params() - Initialize the flash's parameters and settings
* based on nor->info data.
* spi_nor_no_sfdp_init_params() - Initialize the flash's parameters and
* settings based on nor->info->sfdp_flags. This method should be called only by
* flashes that do not define SFDP tables. If the flash supports SFDP but the
* information is wrong and the settings from this function can not be retrieved
* by parsing SFDP, one should instead use the fixup hooks and update the wrong
* bits.
* @nor: pointer to a 'struct spi_nor'.
*/
static void spi_nor_info_init_params(struct spi_nor *nor)
static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
struct spi_nor_erase_map *map = &params->erase_map;
const struct flash_info *info = nor->info;
struct device_node *np = spi_nor_get_flash_node(nor);
const u8 no_sfdp_flags = nor->info->no_sfdp_flags;
u8 i, erase_mask;
/* Initialize default flash parameters and settings. */
params->quad_enable = spi_nor_sr2_bit1_quad_enable;
params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
params->setup = spi_nor_default_setup;
params->otp.org = &info->otp_org;
/* Default to 16-bit Write Status (01h) Command */
nor->flags |= SNOR_F_HAS_16BIT_SR;
/* Set SPI NOR sizes. */
params->writesize = 1;
params->size = (u64)info->sector_size * info->n_sectors;
params->page_size = info->page_size;
if (!(info->flags & SPI_NOR_NO_FR)) {
/* Default to Fast Read for DT and non-DT platform devices. */
params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
/* Mask out Fast Read if not requested at DT instantiation. */
if (np && !of_property_read_bool(np, "m25p,fast-read"))
params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
}
/* (Fast) Read settings. */
params->hwcaps.mask |= SNOR_HWCAPS_READ;
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
0, 0, SPINOR_OP_READ,
SNOR_PROTO_1_1_1);
if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
0, 8, SPINOR_OP_READ_FAST,
SNOR_PROTO_1_1_1);
if (info->flags & SPI_NOR_DUAL_READ) {
if (no_sfdp_flags & SPI_NOR_DUAL_READ) {
params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
0, 8, SPINOR_OP_READ_1_1_2,
SNOR_PROTO_1_1_2);
}
if (info->flags & SPI_NOR_QUAD_READ) {
if (no_sfdp_flags & SPI_NOR_QUAD_READ) {
params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
0, 8, SPINOR_OP_READ_1_1_4,
SNOR_PROTO_1_1_4);
}
if (info->flags & SPI_NOR_OCTAL_READ) {
if (no_sfdp_flags & SPI_NOR_OCTAL_READ) {
params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
0, 8, SPINOR_OP_READ_1_1_8,
SNOR_PROTO_1_1_8);
}
if (info->flags & SPI_NOR_OCTAL_DTR_READ) {
if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_READ) {
params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_8_8_8_DTR],
0, 20, SPINOR_OP_READ_FAST,
SNOR_PROTO_8_8_8_DTR);
}
/* Page Program settings. */
params->hwcaps.mask |= SNOR_HWCAPS_PP;
spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
SPINOR_OP_PP, SNOR_PROTO_1_1_1);
if (info->flags & SPI_NOR_OCTAL_DTR_PP) {
if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_PP) {
params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
/*
* Since xSPI Page Program opcode is backward compatible with
......@@ -2625,52 +2616,111 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
*/
erase_mask = 0;
i = 0;
if (info->flags & SECT_4K_PMC) {
if (no_sfdp_flags & SECT_4K_PMC) {
erase_mask |= BIT(i);
spi_nor_set_erase_type(&map->erase_type[i], 4096u,
SPINOR_OP_BE_4K_PMC);
i++;
} else if (info->flags & SECT_4K) {
} else if (no_sfdp_flags & SECT_4K) {
erase_mask |= BIT(i);
spi_nor_set_erase_type(&map->erase_type[i], 4096u,
SPINOR_OP_BE_4K);
i++;
}
erase_mask |= BIT(i);
spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
spi_nor_set_erase_type(&map->erase_type[i], nor->info->sector_size,
SPINOR_OP_SE);
spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
}
/**
* spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
* after SFDP has been parsed (is also called for SPI NORs that do not
* support RDSFDP).
* spi_nor_init_flags() - Initialize NOR flags for settings that are not defined
* in the JESD216 SFDP standard, thus can not be retrieved when parsing SFDP.
* @nor: pointer to a 'struct spi_nor'
*
* Typically used to tweak various parameters that could not be extracted by
* other means (i.e. when information provided by the SFDP/flash_info tables
* are incomplete or wrong).
*/
static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
static void spi_nor_init_flags(struct spi_nor *nor)
{
if (nor->manufacturer && nor->manufacturer->fixups &&
nor->manufacturer->fixups->post_sfdp)
nor->manufacturer->fixups->post_sfdp(nor);
struct device_node *np = spi_nor_get_flash_node(nor);
const u16 flags = nor->info->flags;
if (of_property_read_bool(np, "broken-flash-reset"))
nor->flags |= SNOR_F_BROKEN_RESET;
if (nor->info->fixups && nor->info->fixups->post_sfdp)
nor->info->fixups->post_sfdp(nor);
if (flags & SPI_NOR_SWP_IS_VOLATILE)
nor->flags |= SNOR_F_SWP_IS_VOLATILE;
if (flags & SPI_NOR_HAS_LOCK)
nor->flags |= SNOR_F_HAS_LOCK;
if (flags & SPI_NOR_HAS_TB) {
nor->flags |= SNOR_F_HAS_SR_TB;
if (flags & SPI_NOR_TB_SR_BIT6)
nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
}
if (flags & SPI_NOR_4BIT_BP) {
nor->flags |= SNOR_F_HAS_4BIT_BP;
if (flags & SPI_NOR_BP3_SR_BIT6)
nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
}
if (flags & NO_CHIP_ERASE)
nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
if (flags & USE_CLSR)
nor->flags |= SNOR_F_USE_CLSR;
if (flags & USE_FSR)
nor->flags |= SNOR_F_USE_FSR;
/*
* Make sure the XSR_RDY flag is set before calling
* spi_nor_wait_till_ready(). Xilinx S3AN share MFR
* with Atmel SPI NOR.
*/
if (flags & SPI_NOR_XSR_RDY)
nor->flags |= SNOR_F_READY_XSR_RDY;
}
/**
* spi_nor_init_fixup_flags() - Initialize NOR flags for settings that can not
* be discovered by SFDP for this particular flash because the SFDP table that
* indicates this support is not defined in the flash. In case the table for
* this support is defined but has wrong values, one should instead use a
* post_sfdp() hook to set the SNOR_F equivalent flag.
* @nor: pointer to a 'struct spi_nor'
*/
static void spi_nor_init_fixup_flags(struct spi_nor *nor)
{
const u8 fixup_flags = nor->info->fixup_flags;
if (fixup_flags & SPI_NOR_4B_OPCODES)
nor->flags |= SNOR_F_4B_OPCODES;
if (fixup_flags & SPI_NOR_IO_MODE_EN_VOLATILE)
nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
}
/**
* spi_nor_late_init_params() - Late initialization of default flash parameters.
* @nor: pointer to a 'struct spi_nor'
*
* Used to set default flash parameters and settings when the ->default_init()
* hook or the SFDP parser let voids.
* Used to initialize flash parameters that are not declared in the JESD216
* SFDP standard, or where SFDP tables are not defined at all.
* Will replace the spi_nor_manufacturer_init_params() method.
*/
static void spi_nor_late_init_params(struct spi_nor *nor)
{
if (nor->manufacturer && nor->manufacturer->fixups &&
nor->manufacturer->fixups->late_init)
nor->manufacturer->fixups->late_init(nor);
if (nor->info->fixups && nor->info->fixups->late_init)
nor->info->fixups->late_init(nor);
spi_nor_init_flags(nor);
spi_nor_init_fixup_flags(nor);
/*
* NOR protection support. When locking_ops are not provided, we pick
* the default ones.
......@@ -2679,6 +2729,99 @@ static void spi_nor_late_init_params(struct spi_nor *nor)
spi_nor_init_default_locking_ops(nor);
}
/**
* spi_nor_sfdp_init_params_deprecated() - Deprecated way of initializing flash
* parameters and settings based on JESD216 SFDP standard.
* @nor: pointer to a 'struct spi_nor'.
*
* The method has a roll-back mechanism: in case the SFDP parsing fails, the
* legacy flash parameters and settings will be restored.
*/
static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor)
{
struct spi_nor_flash_parameter sfdp_params;
memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
if (spi_nor_parse_sfdp(nor)) {
memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
nor->addr_width = 0;
nor->flags &= ~SNOR_F_4B_OPCODES;
}
}
/**
* spi_nor_init_params_deprecated() - Deprecated way of initializing flash
* parameters and settings.
* @nor: pointer to a 'struct spi_nor'.
*
* The method assumes that flash doesn't support SFDP so it initializes flash
* parameters in spi_nor_no_sfdp_init_params() which later on can be overwritten
* when parsing SFDP, if supported.
*/
static void spi_nor_init_params_deprecated(struct spi_nor *nor)
{
spi_nor_no_sfdp_init_params(nor);
spi_nor_manufacturer_init_params(nor);
if (nor->info->no_sfdp_flags & (SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ |
SPI_NOR_OCTAL_READ |
SPI_NOR_OCTAL_DTR_READ))
spi_nor_sfdp_init_params_deprecated(nor);
}
/**
* spi_nor_init_default_params() - Default initialization of flash parameters
* and settings. Done for all flashes, regardless is they define SFDP tables
* or not.
* @nor: pointer to a 'struct spi_nor'.
*/
static void spi_nor_init_default_params(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
const struct flash_info *info = nor->info;
struct device_node *np = spi_nor_get_flash_node(nor);
params->quad_enable = spi_nor_sr2_bit1_quad_enable;
params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
params->setup = spi_nor_default_setup;
params->otp.org = &info->otp_org;
/* Default to 16-bit Write Status (01h) Command */
nor->flags |= SNOR_F_HAS_16BIT_SR;
/* Set SPI NOR sizes. */
params->writesize = 1;
params->size = (u64)info->sector_size * info->n_sectors;
params->page_size = info->page_size;
if (!(info->flags & SPI_NOR_NO_FR)) {
/* Default to Fast Read for DT and non-DT platform devices. */
params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
/* Mask out Fast Read if not requested at DT instantiation. */
if (np && !of_property_read_bool(np, "m25p,fast-read"))
params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
}
/* (Fast) Read settings. */
params->hwcaps.mask |= SNOR_HWCAPS_READ;
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
0, 0, SPINOR_OP_READ,
SNOR_PROTO_1_1_1);
if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
0, 8, SPINOR_OP_READ_FAST,
SNOR_PROTO_1_1_1);
/* Page Program settings. */
params->hwcaps.mask |= SNOR_HWCAPS_PP;
spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
SPINOR_OP_PP, SNOR_PROTO_1_1_1);
}
/**
* spi_nor_init_params() - Initialize the flash's parameters and settings.
* @nor: pointer to a 'struct spi_nor'.
......@@ -2699,39 +2842,44 @@ static void spi_nor_late_init_params(struct spi_nor *nor)
* which can be overwritten by:
* 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
* should be more accurate that the above.
* spi_nor_sfdp_init_params()
* spi_nor_parse_sfdp() or spi_nor_no_sfdp_init_params()
*
* Please note that there is a ->post_bfpt() fixup hook that can overwrite
* the flash parameters and settings immediately after parsing the Basic
* Flash Parameter Table.
* spi_nor_post_sfdp_fixups() is called after the SFDP tables are parsed.
* It is used to tweak various flash parameters when information provided
* by the SFDP tables are wrong.
*
* which can be overwritten by:
* 4/ Post SFDP flash parameters initialization. Used to tweak various
* parameters that could not be extracted by other means (i.e. when
* information provided by the SFDP/flash_info tables are incomplete or
* wrong).
* spi_nor_post_sfdp_fixups()
*
* 5/ Late default flash parameters initialization, used when the
* ->default_init() hook or the SFDP parser do not set specific params.
* 4/ Late flash parameters initialization, used to initialize flash
* parameters that are not declared in the JESD216 SFDP standard, or where SFDP
* tables are not defined at all.
* spi_nor_late_init_params()
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_init_params(struct spi_nor *nor)
{
int ret;
nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
if (!nor->params)
return -ENOMEM;
spi_nor_info_init_params(nor);
spi_nor_manufacturer_init_params(nor);
spi_nor_init_default_params(nor);
if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_OCTAL_READ | SPI_NOR_OCTAL_DTR_READ)) &&
!(nor->info->flags & SPI_NOR_SKIP_SFDP))
spi_nor_sfdp_init_params(nor);
spi_nor_post_sfdp_fixups(nor);
if (nor->info->parse_sfdp) {
ret = spi_nor_parse_sfdp(nor);
if (ret) {
dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n");
return ret;
}
} else if (nor->info->no_sfdp_flags & SPI_NOR_SKIP_SFDP) {
spi_nor_no_sfdp_init_params(nor);
} else {
spi_nor_init_params_deprecated(nor);
}
spi_nor_late_init_params(nor);
......@@ -2978,59 +3126,6 @@ static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
return NULL;
}
static int spi_nor_set_addr_width(struct spi_nor *nor)
{
if (nor->addr_width) {
/* already configured from SFDP */
} else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
/*
* In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
* in this protocol an odd address width cannot be used because
* then the address phase would only span a cycle and a half.
* Half a cycle would be left over. We would then have to start
* the dummy phase in the middle of a cycle and so too the data
* phase, and we will end the transaction with half a cycle left
* over.
*
* Force all 8D-8D-8D flashes to use an address width of 4 to
* avoid this situation.
*/
nor->addr_width = 4;
} else if (nor->info->addr_width) {
nor->addr_width = nor->info->addr_width;
} else {
nor->addr_width = 3;
}
if (nor->addr_width == 3 && nor->mtd.size > 0x1000000) {
/* enable 4-byte addressing if the device exceeds 16MiB */
nor->addr_width = 4;
}
if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
dev_dbg(nor->dev, "address width is too large: %u\n",
nor->addr_width);
return -EINVAL;
}
/* Set 4byte opcodes when possible. */
if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
!(nor->flags & SNOR_F_HAS_4BAIT))
spi_nor_set_4byte_opcodes(nor);
return 0;
}
static void spi_nor_debugfs_init(struct spi_nor *nor,
const struct flash_info *info)
{
struct mtd_info *mtd = &nor->mtd;
mtd->dbg.partname = info->name;
mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
info->id_len, info->id);
}
static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
const char *name)
{
......@@ -3071,13 +3166,41 @@ static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
return info;
}
static void spi_nor_set_mtd_info(struct spi_nor *nor)
{
struct mtd_info *mtd = &nor->mtd;
struct device *dev = nor->dev;
spi_nor_set_mtd_locking_ops(nor);
spi_nor_set_mtd_otp_ops(nor);
mtd->dev.parent = dev;
if (!mtd->name)
mtd->name = dev_name(dev);
mtd->type = MTD_NORFLASH;
mtd->flags = MTD_CAP_NORFLASH;
if (nor->info->flags & SPI_NOR_NO_ERASE)
mtd->flags |= MTD_NO_ERASE;
mtd->writesize = nor->params->writesize;
mtd->writebufsize = nor->params->page_size;
mtd->size = nor->params->size;
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
/* Might be already set by some SST flashes. */
if (!mtd->_write)
mtd->_write = spi_nor_write;
mtd->_suspend = spi_nor_suspend;
mtd->_resume = spi_nor_resume;
mtd->_get_device = spi_nor_get_device;
mtd->_put_device = spi_nor_put_device;
}
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
const struct flash_info *info;
struct device *dev = nor->dev;
struct mtd_info *mtd = &nor->mtd;
struct device_node *np = spi_nor_get_flash_node(nor);
int ret;
int i;
......@@ -3094,7 +3217,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
* We need the bounce buffer early to read/write registers when going
* through the spi-mem layer (buffers have to be DMA-able).
* For spi-mem drivers, we'll reallocate a new buffer if
* nor->page_size turns out to be greater than PAGE_SIZE (which
* nor->params->page_size turns out to be greater than PAGE_SIZE (which
* shouldn't happen before long since NOR pages are usually less
* than 1KB) after spi_nor_scan() returns.
*/
......@@ -3110,102 +3233,31 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
nor->info = info;
spi_nor_debugfs_init(nor, info);
mutex_init(&nor->lock);
/*
* Make sure the XSR_RDY flag is set before calling
* spi_nor_wait_till_ready(). Xilinx S3AN share MFR
* with Atmel SPI NOR.
*/
if (info->flags & SPI_NOR_XSR_RDY)
nor->flags |= SNOR_F_READY_XSR_RDY;
if (info->flags & SPI_NOR_HAS_LOCK)
nor->flags |= SNOR_F_HAS_LOCK;
mtd->_write = spi_nor_write;
/* Init flash parameters based on flash_info struct and SFDP */
ret = spi_nor_init_params(nor);
if (ret)
return ret;
if (!mtd->name)
mtd->name = dev_name(dev);
mtd->priv = nor;
mtd->type = MTD_NORFLASH;
mtd->writesize = nor->params->writesize;
mtd->flags = MTD_CAP_NORFLASH;
mtd->size = nor->params->size;
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
mtd->_suspend = spi_nor_suspend;
mtd->_resume = spi_nor_resume;
mtd->_get_device = spi_nor_get_device;
mtd->_put_device = spi_nor_put_device;
if (info->flags & USE_FSR)
nor->flags |= SNOR_F_USE_FSR;
if (info->flags & SPI_NOR_HAS_TB) {
nor->flags |= SNOR_F_HAS_SR_TB;
if (info->flags & SPI_NOR_TB_SR_BIT6)
nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
}
if (info->flags & NO_CHIP_ERASE)
nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
if (info->flags & USE_CLSR)
nor->flags |= SNOR_F_USE_CLSR;
if (info->flags & SPI_NOR_SWP_IS_VOLATILE)
nor->flags |= SNOR_F_SWP_IS_VOLATILE;
if (info->flags & SPI_NOR_4BIT_BP) {
nor->flags |= SNOR_F_HAS_4BIT_BP;
if (info->flags & SPI_NOR_BP3_SR_BIT6)
nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
}
if (info->flags & SPI_NOR_NO_ERASE)
mtd->flags |= MTD_NO_ERASE;
mtd->dev.parent = dev;
nor->page_size = nor->params->page_size;
mtd->writebufsize = nor->page_size;
if (of_property_read_bool(np, "broken-flash-reset"))
nor->flags |= SNOR_F_BROKEN_RESET;
/*
* Configure the SPI memory:
* - select op codes for (Fast) Read, Page Program and Sector Erase.
* - set the number of dummy cycles (mode cycles + wait states).
* - set the SPI protocols for register and memory accesses.
* - set the address width.
*/
ret = spi_nor_setup(nor, hwcaps);
if (ret)
return ret;
if (info->flags & SPI_NOR_4B_OPCODES)
nor->flags |= SNOR_F_4B_OPCODES;
if (info->flags & SPI_NOR_IO_MODE_EN_VOLATILE)
nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
ret = spi_nor_set_addr_width(nor);
if (ret)
return ret;
spi_nor_register_locking_ops(nor);
/* Send all the required SPI flash commands to initialize device */
ret = spi_nor_init(nor);
if (ret)
return ret;
/* Configure OTP parameters and ops */
spi_nor_otp_init(nor);
/* No mtd_info fields should be used up to this point. */
spi_nor_set_mtd_info(nor);
dev_info(dev, "%s (%lld Kbytes)\n", info->name,
(long long)mtd->size >> 10);
......@@ -3238,7 +3290,7 @@ static int spi_nor_create_read_dirmap(struct spi_nor *nor)
SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
SPI_MEM_OP_DATA_IN(0, NULL, 0)),
.offset = 0,
.length = nor->mtd.size,
.length = nor->params->size,
};
struct spi_mem_op *op = &info.op_tmpl;
......@@ -3269,7 +3321,7 @@ static int spi_nor_create_write_dirmap(struct spi_nor *nor)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
.offset = 0,
.length = nor->mtd.size,
.length = nor->params->size,
};
struct spi_mem_op *op = &info.op_tmpl;
......@@ -3341,8 +3393,8 @@ static int spi_nor_probe(struct spi_mem *spimem)
* and add this logic so that if anyone ever adds support for such
* a NOR we don't end up with buffer overflows.
*/
if (nor->page_size > PAGE_SIZE) {
nor->bouncebuf_size = nor->page_size;
if (nor->params->page_size > PAGE_SIZE) {
nor->bouncebuf_size = nor->params->page_size;
devm_kfree(nor->dev, nor->bouncebuf);
nor->bouncebuf = devm_kmalloc(nor->dev,
nor->bouncebuf_size,
......@@ -3389,8 +3441,8 @@ static void spi_nor_shutdown(struct spi_mem *spimem)
* encourage new users to add support to the spi-nor library, and simply bind
* against a generic string here (e.g., "jedec,spi-nor").
*
* Many flash names are kept here in this list (as well as in spi-nor.c) to
* keep them available as module aliases for existing platforms.
* Many flash names are kept here in this list to keep them available
* as module aliases for existing platforms.
*/
static const struct spi_device_id spi_nor_dev_ids[] = {
/*
......
......@@ -250,7 +250,7 @@ struct spi_nor_otp {
* higher index in the array, the higher priority.
* @erase_map: the erase map parsed from the SFDP Sector Map Parameter
* Table.
* @otp_info: describes the OTP regions.
* @otp: SPI NOR OTP info.
* @octal_dtr_enable: enables SPI NOR octal DTR mode.
* @quad_enable: enables SPI NOR quad mode.
* @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
......@@ -262,7 +262,6 @@ struct spi_nor_otp {
* e.g. different opcodes, specific address calculation,
* page size, etc.
* @locking_ops: SPI NOR locking methods.
* @otp: SPI NOR OTP methods.
*/
struct spi_nor_flash_parameter {
u64 size;
......@@ -298,6 +297,9 @@ struct spi_nor_flash_parameter {
* parameters that could not be extracted by other means (i.e.
* when information provided by the SFDP/flash_info tables are
* incomplete or wrong).
* @late_init: used to initialize flash parameters that are not declared in the
* JESD216 SFDP standard, or where SFDP tables not defined at all.
* Will replace the default_init() hook.
*
* Those hooks can be used to tweak the SPI NOR configuration when the SFDP
* table is broken or not available.
......@@ -308,89 +310,121 @@ struct spi_nor_fixups {
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt);
void (*post_sfdp)(struct spi_nor *nor);
void (*late_init)(struct spi_nor *nor);
};
/**
* struct flash_info - SPI NOR flash_info entry.
* @name: the name of the flash.
* @id: the flash's ID bytes. The first three bytes are the
* JEDIC ID. JEDEC ID zero means "no ID" (mostly older chips).
* @id_len: the number of bytes of ID.
* @sector_size: the size listed here is what works with SPINOR_OP_SE, which
* isn't necessarily called a "sector" by the vendor.
* @n_sectors: the number of sectors.
* @page_size: the flash's page size.
* @addr_width: the flash's address width.
*
* @parse_sfdp: true when flash supports SFDP tables. The false value has no
* meaning. If one wants to skip the SFDP tables, one should
* instead use the SPI_NOR_SKIP_SFDP sfdp_flag.
* @flags: flags that indicate support that is not defined by the
* JESD216 standard in its SFDP tables. Flag meanings:
* SPI_NOR_HAS_LOCK: flash supports lock/unlock via SR
* SPI_NOR_HAS_TB: flash SR has Top/Bottom (TB) protect bit. Must be
* used with SPI_NOR_HAS_LOCK.
* SPI_NOR_TB_SR_BIT6: Top/Bottom (TB) is bit 6 of status register.
* Must be used with SPI_NOR_HAS_TB.
* SPI_NOR_4BIT_BP: flash SR has 4 bit fields (BP0-3) for block
* protection.
* SPI_NOR_BP3_SR_BIT6: BP3 is bit 6 of status register. Must be used with
* SPI_NOR_4BIT_BP.
* SPI_NOR_SWP_IS_VOLATILE: flash has volatile software write protection bits.
* Usually these will power-up in a write-protected
* state.
* SPI_NOR_NO_ERASE: no erase command needed.
* NO_CHIP_ERASE: chip does not support chip erase.
* SPI_NOR_NO_FR: can't do fastread.
* USE_CLSR: use CLSR command.
* USE_FSR: use flag status register
* SPI_NOR_XSR_RDY: S3AN flashes have specific opcode to read the
* status register.
*
* @no_sfdp_flags: flags that indicate support that can be discovered via SFDP.
* Used when SFDP tables are not defined in the flash. These
* flags are used together with the SPI_NOR_SKIP_SFDP flag.
* SPI_NOR_SKIP_SFDP: skip parsing of SFDP tables.
* SECT_4K: SPINOR_OP_BE_4K works uniformly.
* SECT_4K_PMC: SPINOR_OP_BE_4K_PMC works uniformly.
* SPI_NOR_DUAL_READ: flash supports Dual Read.
* SPI_NOR_QUAD_READ: flash supports Quad Read.
* SPI_NOR_OCTAL_READ: flash supports Octal Read.
* SPI_NOR_OCTAL_DTR_READ: flash supports octal DTR Read.
* SPI_NOR_OCTAL_DTR_PP: flash supports Octal DTR Page Program.
*
* @fixup_flags: flags that indicate support that can be discovered via SFDP
* ideally, but can not be discovered for this particular flash
* because the SFDP table that indicates this support is not
* defined by the flash. In case the table for this support is
* defined but has wrong values, one should instead use a
* post_sfdp() hook to set the SNOR_F equivalent flag.
*
* SPI_NOR_4B_OPCODES: use dedicated 4byte address op codes to support
* memory size above 128Mib.
* SPI_NOR_IO_MODE_EN_VOLATILE: flash enables the best available I/O mode
* via a volatile bit.
* @mfr_flags: manufacturer private flags. Used in the manufacturer fixup
* hooks to differentiate support between flashes of the same
* manufacturer.
* @otp_org: flash's OTP organization.
* @fixups: part specific fixup hooks.
*/
struct flash_info {
char *name;
/*
* This array stores the ID bytes.
* The first three bytes are the JEDIC ID.
* JEDEC ID zero means "no ID" (mostly older chips).
*/
u8 id[SPI_NOR_MAX_ID_LEN];
u8 id_len;
/* The size listed here is what works with SPINOR_OP_SE, which isn't
* necessarily called a "sector" by the vendor.
*/
unsigned sector_size;
u16 n_sectors;
u16 page_size;
u16 addr_width;
u32 flags;
#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
#define SST_WRITE BIT(2) /* use SST byte programming */
#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
#define USE_FSR BIT(7) /* use flag status register */
#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
#define SPI_NOR_HAS_TB BIT(9) /*
* Flash SR has Top/Bottom (TB) protect
* bit. Must be used with
* SPI_NOR_HAS_LOCK.
*/
#define SPI_NOR_XSR_RDY BIT(10) /*
* S3AN flashes have specific opcode to
* read the status register.
*/
#define SPI_NOR_4B_OPCODES BIT(11) /*
* Use dedicated 4byte address op codes
* to support memory size above 128Mib.
*/
#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
#define USE_CLSR BIT(14) /* use CLSR command */
#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */
#define SPI_NOR_TB_SR_BIT6 BIT(16) /*
* Top/Bottom (TB) is bit 6 of
* status register. Must be used with
* SPI_NOR_HAS_TB.
*/
#define SPI_NOR_4BIT_BP BIT(17) /*
* Flash SR has 4 bit fields (BP0-3)
* for block protection.
*/
#define SPI_NOR_BP3_SR_BIT6 BIT(18) /*
* BP3 is bit 6 of status register.
* Must be used with SPI_NOR_4BIT_BP.
*/
#define SPI_NOR_OCTAL_DTR_READ BIT(19) /* Flash supports octal DTR Read. */
#define SPI_NOR_OCTAL_DTR_PP BIT(20) /* Flash supports Octal DTR Page Program */
#define SPI_NOR_IO_MODE_EN_VOLATILE BIT(21) /*
* Flash enables the best
* available I/O mode via a
* volatile bit.
*/
#define SPI_NOR_SWP_IS_VOLATILE BIT(22) /*
* Flash has volatile software write
* protection bits. Usually these will
* power-up in a write-protected state.
*/
char *name;
u8 id[SPI_NOR_MAX_ID_LEN];
u8 id_len;
unsigned sector_size;
u16 n_sectors;
u16 page_size;
u16 addr_width;
bool parse_sfdp;
u16 flags;
#define SPI_NOR_HAS_LOCK BIT(0)
#define SPI_NOR_HAS_TB BIT(1)
#define SPI_NOR_TB_SR_BIT6 BIT(2)
#define SPI_NOR_4BIT_BP BIT(3)
#define SPI_NOR_BP3_SR_BIT6 BIT(4)
#define SPI_NOR_SWP_IS_VOLATILE BIT(5)
#define SPI_NOR_NO_ERASE BIT(6)
#define NO_CHIP_ERASE BIT(7)
#define SPI_NOR_NO_FR BIT(8)
#define USE_CLSR BIT(9)
#define USE_FSR BIT(10)
#define SPI_NOR_XSR_RDY BIT(11)
u8 no_sfdp_flags;
#define SPI_NOR_SKIP_SFDP BIT(0)
#define SECT_4K BIT(1)
#define SECT_4K_PMC BIT(2)
#define SPI_NOR_DUAL_READ BIT(3)
#define SPI_NOR_QUAD_READ BIT(4)
#define SPI_NOR_OCTAL_READ BIT(5)
#define SPI_NOR_OCTAL_DTR_READ BIT(6)
#define SPI_NOR_OCTAL_DTR_PP BIT(7)
u8 fixup_flags;
#define SPI_NOR_4B_OPCODES BIT(0)
#define SPI_NOR_IO_MODE_EN_VOLATILE BIT(1)
u8 mfr_flags;
const struct spi_nor_otp_organization otp_org;
/* Part specific fixup hooks. */
const struct spi_nor_fixups *fixups;
};
/* Used when the "_ext_id" is two bytes at most */
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors) \
.id = { \
((_jedec_id) >> 16) & 0xff, \
((_jedec_id) >> 8) & 0xff, \
......@@ -402,9 +436,8 @@ struct flash_info {
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
.flags = (_flags),
#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors) \
.id = { \
((_jedec_id) >> 16) & 0xff, \
((_jedec_id) >> 8) & 0xff, \
......@@ -417,14 +450,13 @@ struct flash_info {
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
.flags = (_flags),
#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
.addr_width = (_addr_width), \
.flags = (_flags),
.flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR, \
#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
.id = { \
......@@ -447,6 +479,21 @@ struct flash_info {
.n_regions = (_n_regions), \
},
#define PARSE_SFDP \
.parse_sfdp = true, \
#define FLAGS(_flags) \
.flags = (_flags), \
#define NO_SFDP_FLAGS(_no_sfdp_flags) \
.no_sfdp_flags = (_no_sfdp_flags), \
#define FIXUP_FLAGS(_fixup_flags) \
.fixup_flags = (_fixup_flags), \
#define MFR_FLAGS(_mfr_flags) \
.mfr_flags = (_mfr_flags), \
/**
* struct spi_nor_manufacturer - SPI NOR manufacturer object
* @name: manufacturer name
......@@ -549,12 +596,12 @@ int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
void spi_nor_init_default_locking_ops(struct spi_nor *nor);
void spi_nor_try_unlock_all(struct spi_nor *nor);
void spi_nor_register_locking_ops(struct spi_nor *nor);
void spi_nor_otp_init(struct spi_nor *nor);
void spi_nor_set_mtd_locking_ops(struct spi_nor *nor);
void spi_nor_set_mtd_otp_ops(struct spi_nor *nor);
static struct spi_nor __maybe_unused *mtd_to_spi_nor(struct mtd_info *mtd)
static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
{
return mtd->priv;
return container_of(mtd, struct spi_nor, mtd);
}
#endif /* __LINUX_MTD_SPI_NOR_INTERNAL_H */
......@@ -10,21 +10,24 @@
static const struct flash_info eon_parts[] = {
/* EON -- en25xxx */
{ "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
{ "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
{ "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
SECT_4K | SPI_NOR_DUAL_READ) },
{ "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_DUAL_READ) },
{ "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
{ "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ) },
{ "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
{ "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
{ "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64) },
{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64) },
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128) },
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K) },
{ "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64) },
{ "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256) },
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512) },
{ "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K) },
};
const struct spi_nor_manufacturer spi_nor_eon = {
......
......@@ -10,12 +10,15 @@
static const struct flash_info esmt_parts[] = {
/* ESMT */
{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_HAS_LOCK) },
{ "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_HAS_LOCK) },
{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K) },
{ "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK)
NO_SFDP_FLAGS(SECT_4K) },
{ "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK)
NO_SFDP_FLAGS(SECT_4K) },
};
const struct spi_nor_manufacturer spi_nor_esmt = {
......
......@@ -10,14 +10,10 @@
static const struct flash_info everspin_parts[] = {
/* Everspin */
{ "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2,
SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2,
SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3,
SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3,
SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2) },
{ "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2) },
{ "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3) },
{ "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3) },
};
const struct spi_nor_manufacturer spi_nor_everspin = {
......
......@@ -10,7 +10,8 @@
static const struct flash_info fujitsu_parts[] = {
/* Fujitsu */
{ "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
{ "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1)
FLAGS(SPI_NOR_NO_ERASE) },
};
const struct spi_nor_manufacturer spi_nor_fujitsu = {
......
......@@ -19,36 +19,43 @@ static void gd25q256_default_init(struct spi_nor *nor)
nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
}
static struct spi_nor_fixups gd25q256_fixups = {
static const struct spi_nor_fixups gd25q256_fixups = {
.default_init = gd25q256_default_init,
};
static const struct flash_info gigadevice_parts[] = {
{ "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK |
SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6)
{ "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512)
PARSE_SFDP
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
.fixups = &gd25q256_fixups },
};
......
......@@ -10,12 +10,12 @@
static const struct flash_info intel_parts[] = {
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32,
SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "320s33b", INFO(0x898912, 0, 64 * 1024, 64,
SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128,
SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "320s33b", INFO(0x898912, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
};
const struct spi_nor_manufacturer spi_nor_intel = {
......
......@@ -25,44 +25,48 @@ is25lp256_post_bfpt_fixups(struct spi_nor *nor,
return 0;
}
static struct spi_nor_fixups is25lp256_fixups = {
static const struct spi_nor_fixups is25lp256_fixups = {
.post_bfpt = is25lp256_post_bfpt_fixups,
};
static const struct flash_info issi_parts[] = {
/* ISSI */
{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
{ "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ) },
{ "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ) },
{ "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ) },
{ "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_4B_OPCODES)
{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2)
NO_SFDP_FLAGS(SECT_4K) },
{ "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512)
PARSE_SFDP
FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
.fixups = &is25lp256_fixups },
{ "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_4B_OPCODES)
{ "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
.fixups = &is25lp256_fixups },
/* PMC */
{ "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
{ "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
{ "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
{ "pm25lv512", INFO(0, 0, 32 * 1024, 2)
NO_SFDP_FLAGS(SECT_4K_PMC) },
{ "pm25lv010", INFO(0, 0, 32 * 1024, 4)
NO_SFDP_FLAGS(SECT_4K_PMC) },
{ "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
};
static void issi_default_init(struct spi_nor *nor)
......
......@@ -28,65 +28,78 @@ mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
return 0;
}
static struct spi_nor_fixups mx25l25635_fixups = {
static const struct spi_nor_fixups mx25l25635_fixups = {
.post_bfpt = mx25l25635_post_bfpt_fixups,
};
static const struct flash_info macronix_parts[] = {
/* Macronix */
{ "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
{ "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
{ "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
{ "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
{ "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
{ "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
{ "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
{ "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
{ "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ |
{ "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16) },
{ "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
{ "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
{ "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, SECT_4K |
SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
{ "mx25r1635f", INFO(0xc22815, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_DUAL_READ |
{ "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256) },
{ "mx25r1635f", INFO(0xc22815, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ |
{ "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ |
{ "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &mx25l25635_fixups },
{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_4B_OPCODES) },
{ "mx25u51245g", INFO(0xc2253a, 0, 64 * 1024, 1024,
SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16,
SECT_4K | SPI_NOR_DUAL_READ |
{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "mx25u51245g", INFO(0xc2253a, 0, 64 * 1024, 1024)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
{ "mx66l51235f", INFO(0xc2201a, 0, 64 * 1024, 1024,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_4B_OPCODES) },
{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024,
SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048,
SECT_4K | SPI_NOR_DUAL_READ |
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512) },
{ "mx66l51235f", INFO(0xc2201a, 0, 64 * 1024, 1024)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048,
SPI_NOR_QUAD_READ) },
{ "mx66u2g45g", INFO(0xc2253c, 0, 64 * 1024, 4096,
SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048)
NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
{ "mx66u2g45g", INFO(0xc2253c, 0, 64 * 1024, 4096)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
};
static void macronix_default_init(struct spi_nor *nor)
......
......@@ -13,6 +13,7 @@
#define SPINOR_OP_MT_WR_ANY_REG 0x81 /* Write volatile register */
#define SPINOR_REG_MT_CFR0V 0x00 /* For setting octal DTR mode */
#define SPINOR_REG_MT_CFR1V 0x01 /* For setting dummy cycles */
#define SPINOR_REG_MT_CFR1V_DEF 0x1f /* Default dummy cycles */
#define SPINOR_MT_OCT_DTR 0xe7 /* Enable Octal DTR. */
#define SPINOR_MT_EXSPI 0xff /* Enable Extended SPI (default) */
......@@ -48,17 +49,28 @@ static int spi_nor_micron_octal_dtr_enable(struct spi_nor *nor, bool enable)
if (ret)
return ret;
if (enable)
*buf = SPINOR_MT_OCT_DTR;
else
*buf = SPINOR_MT_EXSPI;
if (enable) {
buf[0] = SPINOR_MT_OCT_DTR;
} else {
/*
* The register is 1-byte wide, but 1-byte transactions are not
* allowed in 8D-8D-8D mode. The next register is the dummy
* cycle configuration register. Since the transaction needs to
* be at least 2 bytes wide, set the next register to its
* default value. This also makes sense because the value was
* changed when enabling 8D-8D-8D mode, it should be reset when
* disabling.
*/
buf[0] = SPINOR_MT_EXSPI;
buf[1] = SPINOR_REG_MT_CFR1V_DEF;
}
op = (struct spi_mem_op)
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_MT_WR_ANY_REG, 1),
SPI_MEM_OP_ADDR(enable ? 3 : 4,
SPINOR_REG_MT_CFR0V, 1),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(1, buf, 1));
SPI_MEM_OP_DATA_OUT(enable ? 1 : 2, buf, 1));
if (!enable)
spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
......@@ -113,116 +125,128 @@ static void mt35xu512aba_post_sfdp_fixup(struct spi_nor *nor)
nor->params->quad_enable = NULL;
}
static struct spi_nor_fixups mt35xu512aba_fixups = {
static const struct spi_nor_fixups mt35xu512aba_fixups = {
.default_init = mt35xu512aba_default_init,
.post_sfdp = mt35xu512aba_post_sfdp_fixup,
};
static const struct flash_info micron_parts[] = {
{ "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
SPI_NOR_4B_OPCODES | SPI_NOR_OCTAL_DTR_READ |
SPI_NOR_OCTAL_DTR_PP |
SPI_NOR_IO_MODE_EN_VOLATILE)
.fixups = &mt35xu512aba_fixups},
{ "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048,
SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
SPI_NOR_4B_OPCODES) },
{ "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512)
FLAGS(USE_FSR)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ |
SPI_NOR_OCTAL_DTR_READ | SPI_NOR_OCTAL_DTR_PP)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES | SPI_NOR_IO_MODE_EN_VOLATILE)
.fixups = &mt35xu512aba_fixups},
{ "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048)
FLAGS(USE_FSR)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
};
static const struct flash_info st_parts[] = {
{ "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64,
{ "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
{ "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
SPI_NOR_BP3_SR_BIT6 | USE_FSR)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
SPI_NOR_BP3_SR_BIT6 | USE_FSR)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
{ "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512)
FLAGS(USE_FSR)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512)
FLAGS(USE_FSR)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64,
{ "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512)
FLAGS(USE_FSR)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512)
FLAGS(USE_FSR)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
{ "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024)
FLAGS(USE_FSR)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
SPI_NOR_BP3_SR_BIT6 | USE_FSR)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
{ "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024)
FLAGS(USE_FSR)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
SPI_NOR_BP3_SR_BIT6 | USE_FSR)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
SPI_NOR_BP3_SR_BIT6 | NO_CHIP_ERASE | USE_FSR)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048)
FLAGS(NO_CHIP_ERASE | USE_FSR)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
{ "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096)
FLAGS(NO_CHIP_ERASE | USE_FSR)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
{ "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096)
FLAGS(NO_CHIP_ERASE | USE_FSR)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
{ "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512,
SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K |
USE_FSR | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512,
SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
{ "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024,
SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
{ "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
{ "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6 |
NO_CHIP_ERASE) },
{ "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
NO_CHIP_ERASE) },
{ "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
NO_CHIP_ERASE) },
{ "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096,
SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
{ "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
{ "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
{ "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
{ "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
{ "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
{ "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
{ "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
{ "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
{ "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
{ "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
{ "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
{ "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
{ "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
{ "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
{ "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
{ "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
{ "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
{ "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
{ "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
{ "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
{ "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
{ "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
{ "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
{ "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
{ "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
{ "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
{ "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
{ "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
{ "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
{ "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
{ "m25p05", INFO(0x202010, 0, 32 * 1024, 2) },
{ "m25p10", INFO(0x202011, 0, 32 * 1024, 4) },
{ "m25p20", INFO(0x202012, 0, 64 * 1024, 4) },
{ "m25p40", INFO(0x202013, 0, 64 * 1024, 8) },
{ "m25p80", INFO(0x202014, 0, 64 * 1024, 16) },
{ "m25p16", INFO(0x202015, 0, 64 * 1024, 32) },
{ "m25p32", INFO(0x202016, 0, 64 * 1024, 64) },
{ "m25p64", INFO(0x202017, 0, 64 * 1024, 128) },
{ "m25p128", INFO(0x202018, 0, 256 * 1024, 64) },
{ "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2) },
{ "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4) },
{ "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4) },
{ "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8) },
{ "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16) },
{ "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32) },
{ "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64) },
{ "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128) },
{ "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64) },
{ "m45pe10", INFO(0x204011, 0, 64 * 1024, 2) },
{ "m45pe80", INFO(0x204014, 0, 64 * 1024, 16) },
{ "m45pe16", INFO(0x204015, 0, 64 * 1024, 32) },
{ "m25pe20", INFO(0x208012, 0, 64 * 1024, 4) },
{ "m25pe80", INFO(0x208014, 0, 64 * 1024, 16) },
{ "m25pe16", INFO(0x208015, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K) },
{ "m25px16", INFO(0x207115, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K) },
{ "m25px32", INFO(0x207116, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "m25px64", INFO(0x207117, 0, 64 * 1024, 128) },
{ "m25px80", INFO(0x207114, 0, 64 * 1024, 16) },
};
/**
......
......@@ -480,7 +480,7 @@ static int spi_nor_mtd_otp_lock(struct mtd_info *mtd, loff_t from, size_t len)
return ret;
}
void spi_nor_otp_init(struct spi_nor *nor)
void spi_nor_set_mtd_otp_ops(struct spi_nor *nor)
{
struct mtd_info *mtd = &nor->mtd;
......
......@@ -1228,6 +1228,25 @@ static int spi_nor_parse_sccr(struct spi_nor *nor,
return ret;
}
/**
* spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
* after SFDP has been parsed. Called only for flashes that define JESD216 SFDP
* tables.
* @nor: pointer to a 'struct spi_nor'
*
* Used to tweak various flash parameters when information provided by the SFDP
* tables are wrong.
*/
static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
{
if (nor->manufacturer && nor->manufacturer->fixups &&
nor->manufacturer->fixups->post_sfdp)
nor->manufacturer->fixups->post_sfdp(nor);
if (nor->info->fixups && nor->info->fixups->post_sfdp)
nor->info->fixups->post_sfdp(nor);
}
/**
* spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
* @nor: pointer to a 'struct spi_nor'
......@@ -1408,6 +1427,7 @@ int spi_nor_parse_sfdp(struct spi_nor *nor)
}
}
spi_nor_post_sfdp_fixups(nor);
exit:
kfree(param_headers);
return err;
......
......@@ -65,10 +65,18 @@ static int spi_nor_cypress_octal_dtr_enable(struct spi_nor *nor, bool enable)
if (ret)
return ret;
if (enable)
*buf = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN;
else
*buf = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS;
if (enable) {
buf[0] = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN;
} else {
/*
* The register is 1-byte wide, but 1-byte transactions are not
* allowed in 8D-8D-8D mode. Since there is no register at the
* next location, just initialize the value to 0 and let the
* transaction go on.
*/
buf[0] = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS;
buf[1] = 0;
}
op = (struct spi_mem_op)
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_ANY_REG, 1),
......@@ -76,7 +84,7 @@ static int spi_nor_cypress_octal_dtr_enable(struct spi_nor *nor, bool enable)
SPINOR_REG_CYPRESS_CFR5V,
1),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(1, buf, 1));
SPI_MEM_OP_DATA_OUT(enable ? 1 : 2, buf, 1));
if (!enable)
spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
......@@ -168,7 +176,7 @@ static int s28hs512t_post_bfpt_fixup(struct spi_nor *nor,
return 0;
}
static struct spi_nor_fixups s28hs512t_fixups = {
static const struct spi_nor_fixups s28hs512t_fixups = {
.default_init = s28hs512t_default_init,
.post_sfdp = s28hs512t_post_sfdp_fixup,
.post_bfpt = s28hs512t_post_bfpt_fixup,
......@@ -190,7 +198,7 @@ s25fs_s_post_bfpt_fixups(struct spi_nor *nor,
return 0;
}
static struct spi_nor_fixups s25fs_s_fixups = {
static const struct spi_nor_fixups s25fs_s_fixups = {
.post_bfpt = s25fs_s_post_bfpt_fixups,
};
......@@ -198,85 +206,95 @@ static const struct flash_info spansion_parts[] = {
/* Spansion/Cypress -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
USE_CLSR) },
{ "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
USE_CLSR) },
{ "s25fl256s0", INFO6(0x010219, 0x4d0080, 256 * 1024, 128,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
USE_CLSR) },
{ "s25fl256s1", INFO6(0x010219, 0x4d0180, 64 * 1024, 512,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
USE_CLSR) },
{ "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | USE_CLSR) },
{ "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR)
.fixups = &s25fs_s_fixups, },
{ "s25fs256s0", INFO6(0x010219, 0x4d0081, 256 * 1024, 128,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
USE_CLSR) },
{ "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
USE_CLSR) },
{ "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR)
.fixups = &s25fs_s_fixups, },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
USE_CLSR) },
{ "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
USE_CLSR) },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
{ "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
{ "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
{ "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
{ "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
{ "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
{ "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
{ "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8,
SECT_4K | SPI_NOR_DUAL_READ) },
{ "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16,
SECT_4K | SPI_NOR_DUAL_READ) },
{ "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_4B_OPCODES) },
{ "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_4B_OPCODES) },
{ "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_4B_OPCODES) },
{ "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1,
SPI_NOR_NO_ERASE) },
{ "s28hs512t", INFO(0x345b1a, 0, 256 * 1024, 256,
SECT_4K | SPI_NOR_OCTAL_DTR_READ |
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64)
FLAGS(USE_CLSR)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256)
FLAGS(USE_CLSR)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl256s0", INFO6(0x010219, 0x4d0080, 256 * 1024, 128)
FLAGS(USE_CLSR)
NO_SFDP_FLAGS(SPI_NOR_SKIP_SFDP | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "s25fl256s1", INFO6(0x010219, 0x4d0180, 64 * 1024, 512)
FLAGS(USE_CLSR)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK | USE_CLSR)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256)
FLAGS(USE_CLSR)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &s25fs_s_fixups, },
{ "s25fs256s0", INFO6(0x010219, 0x4d0081, 256 * 1024, 128)
FLAGS(USE_CLSR)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512)
FLAGS(USE_CLSR)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256)
FLAGS(USE_CLSR)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &s25fs_s_fixups, },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256) },
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64)
FLAGS(USE_CLSR)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256)
FLAGS(USE_CLSR)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8) },
{ "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16) },
{ "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32) },
{ "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64) },
{ "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128) },
{ "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K) },
{ "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1)
FLAGS(SPI_NOR_NO_ERASE) },
{ "s28hs512t", INFO(0x345b1a, 0, 256 * 1024, 256)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_DTR_READ |
SPI_NOR_OCTAL_DTR_PP)
.fixups = &s28hs512t_fixups,
.fixups = &s28hs512t_fixups,
},
};
static void spansion_post_sfdp_fixups(struct spi_nor *nor)
static void spansion_late_init(struct spi_nor *nor)
{
if (nor->params->size <= SZ_16M)
return;
......@@ -288,7 +306,7 @@ static void spansion_post_sfdp_fixups(struct spi_nor *nor)
}
static const struct spi_nor_fixups spansion_fixups = {
.post_sfdp = spansion_post_sfdp_fixups,
.late_init = spansion_late_init,
};
const struct spi_nor_manufacturer spi_nor_spansion = {
......
......@@ -8,6 +8,9 @@
#include "core.h"
/* SST flash_info mfr_flag. Used to specify SST byte programming. */
#define SST_WRITE BIT(0)
#define SST26VF_CR_BPNV BIT(3)
static int sst26vf_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
......@@ -46,48 +49,71 @@ static const struct spi_nor_locking_ops sst26vf_locking_ops = {
.is_locked = sst26vf_is_locked,
};
static void sst26vf_default_init(struct spi_nor *nor)
static void sst26vf_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &sst26vf_locking_ops;
}
static const struct spi_nor_fixups sst26vf_fixups = {
.default_init = sst26vf_default_init,
.late_init = sst26vf_late_init,
};
static const struct flash_info sst_parts[] = {
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8,
SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16,
SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32,
SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64,
SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_4BIT_BP | SPI_NOR_HAS_LOCK |
SPI_NOR_SWP_IS_VOLATILE) },
{ "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1,
SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2,
SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4,
SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K | SPI_NOR_HAS_LOCK) },
{ "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_HAS_LOCK) },
{ "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8,
SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16,
SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_DUAL_READ |
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP |
SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K) },
{ "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4)
FLAGS(SPI_NOR_HAS_LOCK)
NO_SFDP_FLAGS(SECT_4K) },
{ "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8)
FLAGS(SPI_NOR_HAS_LOCK)
NO_SFDP_FLAGS(SECT_4K) },
{ "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_DUAL_READ) },
{ "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
{ "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &sst26vf_fixups },
};
......@@ -177,14 +203,14 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
return ret;
}
static void sst_post_sfdp_fixups(struct spi_nor *nor)
static void sst_late_init(struct spi_nor *nor)
{
if (nor->info->flags & SST_WRITE)
if (nor->info->mfr_flags & SST_WRITE)
nor->mtd._write = sst_write;
}
static const struct spi_nor_fixups sst_fixups = {
.post_sfdp = sst_post_sfdp_fixups,
.late_init = sst_late_init,
};
const struct spi_nor_manufacturer spi_nor_sst = {
......
......@@ -414,7 +414,7 @@ void spi_nor_try_unlock_all(struct spi_nor *nor)
dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
}
void spi_nor_register_locking_ops(struct spi_nor *nor)
void spi_nor_set_mtd_locking_ops(struct spi_nor *nor)
{
struct mtd_info *mtd = &nor->mtd;
......
......@@ -28,80 +28,105 @@ w25q256_post_bfpt_fixups(struct spi_nor *nor,
return 0;
}
static struct spi_nor_fixups w25q256_fixups = {
static const struct spi_nor_fixups w25q256_fixups = {
.post_bfpt = w25q256_post_bfpt_fixups,
};
static const struct flash_info winbond_parts[] = {
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
{ "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
{ "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
{ "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
{ "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
{ "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
{ "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK |
SPI_NOR_HAS_TB) },
{ "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
{ "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
{ "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
{ "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
OTP_INFO(256, 3, 0x1000, 0x1000)
},
{ "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{ "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
OTP_INFO(256, 3, 0x1000, 0x1000) },
{ "w25q64jwm", INFO(0xef8017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "w25q128jwm", INFO(0xef8018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "w25q256jwm", INFO(0xef8019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &w25q256_fixups },
{ "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
{ "w25q512jvq", INFO(0xef4020, 0, 64 * 1024, 1024,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25x05", INFO(0xef3010, 0, 64 * 1024, 1)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25x10", INFO(0xef3011, 0, 64 * 1024, 2)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25x20", INFO(0xef3012, 0, 64 * 1024, 4)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25x40", INFO(0xef3013, 0, 64 * 1024, 8)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25x80", INFO(0xef3014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q32", INFO(0xef4016, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
OTP_INFO(256, 3, 0x1000, 0x1000) },
{ "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
OTP_INFO(256, 3, 0x1000, 0x1000) },
{ "w25q64jwm", INFO(0xef8017, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25q128jwm", INFO(0xef8018, 0, 64 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25q256jwm", INFO(0xef8019, 0, 64 * 1024, 512)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &w25q256_fixups },
{ "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512)
PARSE_SFDP },
{ "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ |
SPI_NOR_DUAL_READ) },
{ "w25q512jvq", INFO(0xef4020, 0, 64 * 1024, 1024)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
};
/**
......@@ -147,12 +172,17 @@ static const struct spi_nor_otp_ops winbond_otp_ops = {
static void winbond_default_init(struct spi_nor *nor)
{
nor->params->set_4byte_addr_mode = winbond_set_4byte_addr_mode;
}
static void winbond_late_init(struct spi_nor *nor)
{
if (nor->params->otp.org->n_regions)
nor->params->otp.ops = &winbond_otp_ops;
}
static const struct spi_nor_fixups winbond_fixups = {
.default_init = winbond_default_init,
.late_init = winbond_late_init,
};
const struct spi_nor_manufacturer spi_nor_winbond = {
......
......@@ -28,11 +28,12 @@ static const struct flash_info xilinx_parts[] = {
*/
static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr)
{
u32 page_size = nor->params->page_size;
u32 offset, page;
offset = addr % nor->page_size;
page = addr / nor->page_size;
page <<= (nor->page_size > 512) ? 10 : 9;
offset = addr % page_size;
page = addr / page_size;
page <<= (page_size > 512) ? 10 : 9;
return page | offset;
}
......@@ -40,6 +41,7 @@ static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr)
static int xilinx_nor_setup(struct spi_nor *nor,
const struct spi_nor_hwcaps *hwcaps)
{
u32 page_size;
int ret;
ret = spi_nor_xread_sr(nor, nor->bouncebuf);
......@@ -64,10 +66,11 @@ static int xilinx_nor_setup(struct spi_nor *nor,
*/
if (nor->bouncebuf[0] & XSR_PAGESIZE) {
/* Flash in Power of 2 mode */
nor->page_size = (nor->page_size == 264) ? 256 : 512;
nor->mtd.writebufsize = nor->page_size;
nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
nor->mtd.erasesize = 8 * nor->page_size;
page_size = (nor->params->page_size == 264) ? 256 : 512;
nor->params->page_size = page_size;
nor->mtd.writebufsize = page_size;
nor->params->size = 8 * page_size * nor->info->n_sectors;
nor->mtd.erasesize = 8 * page_size;
} else {
/* Flash in Default addressing mode */
nor->params->convert_addr = s3an_convert_addr;
......@@ -77,13 +80,13 @@ static int xilinx_nor_setup(struct spi_nor *nor,
return 0;
}
static void xilinx_post_sfdp_fixups(struct spi_nor *nor)
static void xilinx_late_init(struct spi_nor *nor)
{
nor->params->setup = xilinx_nor_setup;
}
static const struct spi_nor_fixups xilinx_fixups = {
.post_sfdp = xilinx_post_sfdp_fixups,
.late_init = xilinx_late_init,
};
const struct spi_nor_manufacturer spi_nor_xilinx = {
......
......@@ -10,10 +10,12 @@
static const struct flash_info xmc_parts[] = {
/* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
{ "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
};
const struct spi_nor_manufacturer spi_nor_xmc = {
......
/*
* Interface for NOR flash driver whose high address lines are latched
*
* Copyright © 2008 MontaVista Software, Inc. <source@mvista.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef __LATCH_ADDR_FLASH__
#define __LATCH_ADDR_FLASH__
struct map_info;
struct mtd_partition;
struct latch_addr_flash_data {
unsigned int width;
unsigned int size;
int (*init)(void *data, int cs);
void (*done)(void *data);
void (*set_window)(unsigned long offset, void *data);
void *data;
unsigned int nr_parts;
struct mtd_partition *parts;
};
#endif
......@@ -711,4 +711,7 @@ static inline int mtd_is_bitflip_or_eccerr(int err) {
unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
extern char *mtd_expert_analysis_warning;
extern bool mtd_expert_analysis_mode;
#endif /* __MTD_MTD_H__ */
......@@ -1539,6 +1539,8 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
bool force_8bit, bool check_only);
int nand_write_data_op(struct nand_chip *chip, const void *buf,
unsigned int len, bool force_8bit);
int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page);
/* Scan and identify a NAND device */
int nand_scan_with_ids(struct nand_chip *chip, unsigned int max_chips,
......
......@@ -371,7 +371,6 @@ struct spi_nor_flash_parameter;
* @bouncebuf_size: size of the bounce buffer
* @info: SPI NOR part JEDEC MFR ID and other info
* @manufacturer: SPI NOR manufacturer
* @page_size: the page size of the SPI NOR
* @addr_width: number of address bytes
* @erase_opcode: the opcode for erasing a sector
* @read_opcode: the read opcode
......@@ -401,7 +400,6 @@ struct spi_nor {
size_t bouncebuf_size;
const struct flash_info *info;
const struct spi_nor_manufacturer *manufacturer;
u32 page_size;
u8 addr_width;
u8 erase_opcode;
u8 read_opcode;
......
......@@ -7,6 +7,7 @@
#define _MTD_NAND_OMAP2_H
#include <linux/mtd/partitions.h>
#include <linux/mod_devicetable.h>
#define GPMC_BCH_NUM_REMAINDER 8
......@@ -61,4 +62,11 @@ struct gpmc_nand_regs {
void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER];
void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER];
};
#endif
static const struct of_device_id omap_nand_ids[] = {
{ .compatible = "ti,omap2-nand", },
{ .compatible = "ti,am64-nand", },
{},
};
#endif /* _MTD_NAND_OMAP2_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment