Commit c309bfa9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20140808' of git://git.infradead.org/linux-mtd

Pull MTD updates from Brian Norris:
 "AMD-compatible CFI driver:
   - Support OTP programming for Micron M29EW family
   - Increase buffer write timeout, according to detected flash
     parameter info

  NAND
   - Add helpers for retrieving ONFI timing modes
   - GPMI: provide option to disable bad block marker swapping (required
     for Ka-On electronics platforms)

  SPI NOR
   - EON EN25QH128 support
   - Support new Flag Status Register (FSR) on a few Micron flash

  Common
   - New sysfs entries for bad block and ECC stats

  And a few miscellaneous refactorings, cleanups, and driver
  improvements"

* tag 'for-linus-20140808' of git://git.infradead.org/linux-mtd: (31 commits)
  mtd: gpmi: make blockmark swapping optional
  mtd: gpmi: remove line breaks from error messages and improve wording
  mtd: gpmi: remove useless (void *) type casts and spaces between type casts and variables
  mtd: atmel_nand: NFC: support multiple interrupt handling
  mtd: atmel_nand: implement the nfc_device_ready() by checking the R/B bit
  mtd: atmel_nand: add NFC status error check
  mtd: atmel_nand: make ecc parameters same as definition
  mtd: nand: add ONFI timing mode to nand_timings converter
  mtd: nand: define struct nand_timings
  mtd: cfi_cmdset_0002: fix do_write_buffer() timeout error
  mtd: denali: use 8 bytes for READID command
  mtd/ftl: fix the double free of the buffers allocated in build_maps()
  mtd: phram: Fix whitespace issues
  mtd: spi-nor: add support for EON EN25QH128
  mtd: cfi_cmdset_0002: Add support for locking OTP memory
  mtd: cfi_cmdset_0002: Add support for writing OTP memory
  mtd: cfi_cmdset_0002: Invalidate cache after entering/exiting OTP memory
  mtd: cfi_cmdset_0002: Add support for reading OTP
  mtd: spi-nor: add support for flag status register on Micron chips
  mtd: Account for BBT blocks when a partition is being allocated
  ...
parents 9e9ac896 2a500afe
......@@ -184,3 +184,41 @@ Description:
It will always be a non-negative integer. In the case of
devices lacking any ECC capability, it is 0.
What: /sys/class/mtd/mtdX/ecc_failures
Date: June 2014
KernelVersion: 3.17
Contact: linux-mtd@lists.infradead.org
Description:
The number of failures reported by this device's ECC. Typically,
these failures are associated with failed read operations.
It will always be a non-negative integer. In the case of
devices lacking any ECC capability, it is 0.
What: /sys/class/mtd/mtdX/corrected_bits
Date: June 2014
KernelVersion: 3.17
Contact: linux-mtd@lists.infradead.org
Description:
The number of bits that have been corrected by means of the
device's ECC.
It will always be a non-negative integer. In the case of
devices lacking any ECC capability, it is 0.
What: /sys/class/mtd/mtdX/bad_blocks
Date: June 2014
KernelVersion: 3.17
Contact: linux-mtd@lists.infradead.org
Description:
The number of blocks marked as bad, if any, in this partition.
What: /sys/class/mtd/mtdX/bbt_blocks
Date: June 2014
KernelVersion: 3.17
Contact: linux-mtd@lists.infradead.org
Description:
The number of blocks that are marked as reserved, if any, in
this partition. These are typically used to store the in-flash
bad block table (BBT).
......@@ -25,6 +25,16 @@ Optional properties:
discoverable or this property is not enabled,
the software may chooses an implementation-defined
ECC scheme.
- fsl,no-blockmark-swap: Don't swap the bad block marker from the OOB
area with the byte in the data area but rely on the
flash based BBT for identifying bad blocks.
NOTE: this is only valid in conjunction with
'nand-on-flash-bbt'.
WARNING: on i.MX28 blockmark swapping cannot be
disabled for the BootROM in the FCB. Thus,
partitions written from Linux with this feature
turned on may not be accessible by the BootROM
code.
The device tree may optionally contain sub-nodes describing partitions of the
address space. See partition.txt for more detail.
......
......@@ -58,7 +58,18 @@ static void cfi_amdstd_sync (struct mtd_info *);
static int cfi_amdstd_suspend (struct mtd_info *);
static void cfi_amdstd_resume (struct mtd_info *);
static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
size_t *, struct otp_info *);
static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
size_t *, struct otp_info *);
static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
size_t *, u_char *);
static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
size_t *, u_char *);
static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
size_t *, u_char *);
static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf);
......@@ -518,6 +529,12 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
mtd->_sync = cfi_amdstd_sync;
mtd->_suspend = cfi_amdstd_suspend;
mtd->_resume = cfi_amdstd_resume;
mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
mtd->writesize = 1;
......@@ -628,6 +645,23 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
/*
* First calculate the timeout max according to timeout field
* of struct cfi_ident that probed from chip's CFI aera, if
* available. Specify a minimum of 2000us, in case the CFI data
* is wrong.
*/
if (cfi->cfiq->BufWriteTimeoutTyp &&
cfi->cfiq->BufWriteTimeoutMax)
cfi->chips[i].buffer_write_time_max =
1 << (cfi->cfiq->BufWriteTimeoutTyp +
cfi->cfiq->BufWriteTimeoutMax);
else
cfi->chips[i].buffer_write_time_max = 0;
cfi->chips[i].buffer_write_time_max =
max(cfi->chips[i].buffer_write_time_max, 2000);
cfi->chips[i].ref_point_counter = 0;
init_waitqueue_head(&(cfi->chips[i].wq));
}
......@@ -1137,12 +1171,48 @@ static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_
return ret;
}
typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
loff_t adr, size_t len, u_char *buf, size_t grouplen);
static inline void otp_enter(struct map_info *map, struct flchip *chip,
loff_t adr, size_t len)
{
struct cfi_private *cfi = map->fldrv_priv;
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
}
static inline void otp_exit(struct map_info *map, struct flchip *chip,
loff_t adr, size_t len)
{
struct cfi_private *cfi = map->fldrv_priv;
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
}
static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
static inline int do_read_secsi_onechip(struct map_info *map,
struct flchip *chip, loff_t adr,
size_t len, u_char *buf,
size_t grouplen)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long timeo = jiffies + HZ;
struct cfi_private *cfi = map->fldrv_priv;
retry:
mutex_lock(&chip->mutex);
......@@ -1164,16 +1234,9 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
chip->state = FL_READY;
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
otp_enter(map, chip, adr, len);
map_copy_from(map, buf, adr, len);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
otp_exit(map, chip, adr, len);
wake_up(&chip->wq);
mutex_unlock(&chip->mutex);
......@@ -1205,7 +1268,8 @@ static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len,
else
thislen = len;
ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
thislen, buf, 0);
if (ret)
break;
......@@ -1219,8 +1283,267 @@ static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len,
return ret;
}
static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
unsigned long adr, map_word datum,
int mode);
static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
size_t len, u_char *buf, size_t grouplen)
{
int ret;
while (len) {
unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
int gap = adr - bus_ofs;
int n = min_t(int, len, map_bankwidth(map) - gap);
map_word datum;
if (n != map_bankwidth(map)) {
/* partial write of a word, load old contents */
otp_enter(map, chip, bus_ofs, map_bankwidth(map));
datum = map_read(map, bus_ofs);
otp_exit(map, chip, bus_ofs, map_bankwidth(map));
}
datum = map_word_load_partial(map, datum, buf, gap, n);
ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
if (ret)
return ret;
adr += n;
buf += n;
len -= n;
}
return 0;
}
static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
size_t len, u_char *buf, size_t grouplen)
{
struct cfi_private *cfi = map->fldrv_priv;
uint8_t lockreg;
unsigned long timeo;
int ret;
/* make sure area matches group boundaries */
if ((adr != 0) || (len != grouplen))
return -EINVAL;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_LOCKING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
chip->state = FL_LOCKING;
/* Enter lock register command */
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
/* read lock register */
lockreg = cfi_read_query(map, 0);
/* set bit 0 to protect extended memory block */
lockreg &= ~0x01;
/* set bit 0 to protect extended memory block */
/* write lock register */
map_write(map, CMD(0xA0), chip->start);
map_write(map, CMD(lockreg), chip->start);
/* wait for chip to become ready */
timeo = jiffies + msecs_to_jiffies(2);
for (;;) {
if (chip_ready(map, adr))
break;
if (time_after(jiffies, timeo)) {
pr_err("Waiting for chip to be ready timed out.\n");
ret = -EIO;
break;
}
UDELAY(map, chip, 0, 1);
}
/* exit protection commands */
map_write(map, CMD(0x90), chip->start);
map_write(map, CMD(0x00), chip->start);
chip->state = FL_READY;
put_chip(map, chip, chip->start);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf,
otp_op_t action, int user_regs)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int ofs_factor = cfi->interleave * cfi->device_type;
unsigned long base;
int chipnum;
struct flchip *chip;
uint8_t otp, lockreg;
int ret;
size_t user_size, factory_size, otpsize;
loff_t user_offset, factory_offset, otpoffset;
int user_locked = 0, otplocked;
*retlen = 0;
for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
chip = &cfi->chips[chipnum];
factory_size = 0;
user_size = 0;
static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
/* Micron M29EW family */
if (is_m29ew(cfi)) {
base = chip->start;
/* check whether secsi area is factory locked
or user lockable */
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, base, FL_CFI_QUERY);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
cfi_qry_mode_on(base, map, cfi);
otp = cfi_read_query(map, base + 0x3 * ofs_factor);
cfi_qry_mode_off(base, map, cfi);
put_chip(map, chip, base);
mutex_unlock(&chip->mutex);
if (otp & 0x80) {
/* factory locked */
factory_offset = 0;
factory_size = 0x100;
} else {
/* customer lockable */
user_offset = 0;
user_size = 0x100;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, base, FL_LOCKING);
/* Enter lock register command */
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
chip->start, map, cfi,
cfi->device_type, NULL);
/* read lock register */
lockreg = cfi_read_query(map, 0);
/* exit protection commands */
map_write(map, CMD(0x90), chip->start);
map_write(map, CMD(0x00), chip->start);
put_chip(map, chip, chip->start);
mutex_unlock(&chip->mutex);
user_locked = ((lockreg & 0x01) == 0x00);
}
}
otpsize = user_regs ? user_size : factory_size;
if (!otpsize)
continue;
otpoffset = user_regs ? user_offset : factory_offset;
otplocked = user_regs ? user_locked : 1;
if (!action) {
/* return otpinfo */
struct otp_info *otpinfo;
len -= sizeof(*otpinfo);
if (len <= 0)
return -ENOSPC;
otpinfo = (struct otp_info *)buf;
otpinfo->start = from;
otpinfo->length = otpsize;
otpinfo->locked = otplocked;
buf += sizeof(*otpinfo);
*retlen += sizeof(*otpinfo);
from += otpsize;
} else if ((from < otpsize) && (len > 0)) {
size_t size;
size = (len < otpsize - from) ? len : otpsize - from;
ret = action(map, chip, otpoffset + from, size, buf,
otpsize);
if (ret < 0)
return ret;
buf += size;
len -= size;
*retlen += size;
from = 0;
} else {
from -= otpsize;
}
}
return 0;
}
static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
size_t *retlen, struct otp_info *buf)
{
return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
NULL, 0);
}
static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
size_t *retlen, struct otp_info *buf)
{
return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
NULL, 1);
}
static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen,
u_char *buf)
{
return cfi_amdstd_otp_walk(mtd, from, len, retlen,
buf, do_read_secsi_onechip, 0);
}
static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen,
u_char *buf)
{
return cfi_amdstd_otp_walk(mtd, from, len, retlen,
buf, do_read_secsi_onechip, 1);
}
static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen,
u_char *buf)
{
return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf,
do_otp_write, 1);
}
static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len)
{
size_t retlen;
return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
do_otp_lock, 1);
}
static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
unsigned long adr, map_word datum,
int mode)
{
struct cfi_private *cfi = map->fldrv_priv;
unsigned long timeo = jiffies + HZ;
......@@ -1241,7 +1564,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
adr += chip->start;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
ret = get_chip(map, chip, adr, mode);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
......@@ -1250,6 +1573,9 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
__func__, adr, datum.x[0] );
if (mode == FL_OTP_WRITE)
otp_enter(map, chip, adr, map_bankwidth(map));
/*
* Check for a NOP for the case when the datum to write is already
* present - it saves time and works around buggy chips that corrupt
......@@ -1266,12 +1592,13 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
ENABLE_VPP(map);
xip_disable(map, chip, adr);
retry:
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
map_write(map, datum, adr);
chip->state = FL_WRITING;
chip->state = mode;
INVALIDATE_CACHE_UDELAY(map, chip,
adr, map_bankwidth(map),
......@@ -1280,7 +1607,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
/* See comment above for timeout value. */
timeo = jiffies + uWriteTimeout;
for (;;) {
if (chip->state != FL_WRITING) {
if (chip->state != mode) {
/* Someone's suspended the write. Sleep */
DECLARE_WAITQUEUE(wait, current);
......@@ -1320,6 +1647,8 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
}
xip_enable(map, chip, adr);
op_done:
if (mode == FL_OTP_WRITE)
otp_exit(map, chip, adr, map_bankwidth(map));
chip->state = FL_READY;
DISABLE_VPP(map);
put_chip(map, chip, adr);
......@@ -1375,7 +1704,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
ret = do_write_oneword(map, &cfi->chips[chipnum],
bus_ofs, tmp_buf);
bus_ofs, tmp_buf, FL_WRITING);
if (ret)
return ret;
......@@ -1399,7 +1728,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
datum = map_word_load(map, buf);
ret = do_write_oneword(map, &cfi->chips[chipnum],
ofs, datum);
ofs, datum, FL_WRITING);
if (ret)
return ret;
......@@ -1442,7 +1771,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
ret = do_write_oneword(map, &cfi->chips[chipnum],
ofs, tmp_buf);
ofs, tmp_buf, FL_WRITING);
if (ret)
return ret;
......@@ -1462,8 +1791,12 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
{
struct cfi_private *cfi = map->fldrv_priv;
unsigned long timeo = jiffies + HZ;
/* see comments in do_write_oneword() regarding uWriteTimeo. */
unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
/*
* Timeout is calculated according to CFI data, if available.
* See more comments in cfi_cmdset_0002().
*/
unsigned long uWriteTimeout =
usecs_to_jiffies(chip->buffer_write_time_max);
int ret = -EIO;
unsigned long cmd_adr;
int z, words;
......
......@@ -26,7 +26,7 @@
* <mtd-id> := unique name used in mapping driver/device (mtd->name)
* <size> := standard linux memsize OR "-" to denote all remaining space
* size is automatically truncated at end of device
* if specified or trucated size is 0 the part is skipped
* if specified or truncated size is 0 the part is skipped
* <offset> := standard linux memsize
* if omitted the part will immediately follow the previous part
* or 0 if the first part
......
......@@ -181,12 +181,10 @@ static int parse_name(char **pname, const char *token)
if (len > 64)
return -ENOSPC;
name = kmalloc(len, GFP_KERNEL);
name = kstrdup(token, GFP_KERNEL);
if (!name)
return -ENOMEM;
strcpy(name, token);
*pname = name;
return 0;
}
......@@ -195,6 +193,7 @@ static int parse_name(char **pname, const char *token)
static inline void kill_final_newline(char *str)
{
char *newline = strrchr(str, '\n');
if (newline && !newline[1])
*newline = 0;
}
......@@ -233,7 +232,7 @@ static int phram_setup(const char *val)
strcpy(str, val);
kill_final_newline(str);
for (i=0; i<3; i++)
for (i = 0; i < 3; i++)
token[i] = strsep(&str, ",");
if (str)
......
......@@ -111,7 +111,6 @@ typedef struct partition_t {
struct mtd_blktrans_dev mbd;
uint32_t state;
uint32_t *VirtualBlockMap;
uint32_t *VirtualPageMap;
uint32_t FreeTotal;
struct eun_info_t {
uint32_t Offset;
......@@ -1035,8 +1034,6 @@ static void ftl_freepart(partition_t *part)
{
vfree(part->VirtualBlockMap);
part->VirtualBlockMap = NULL;
kfree(part->VirtualPageMap);
part->VirtualPageMap = NULL;
kfree(part->EUNInfo);
part->EUNInfo = NULL;
kfree(part->XferInfo);
......@@ -1075,7 +1072,6 @@ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
return;
}
ftl_freepart(partition);
kfree(partition);
}
......
......@@ -35,8 +35,6 @@ static int rbtx4939_flash_remove(struct platform_device *dev)
return 0;
if (info->mtd) {
struct rbtx4939_flash_data *pdata = dev_get_platdata(&dev->dev);
mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
......
......@@ -298,6 +298,47 @@ static ssize_t mtd_ecc_step_size_show(struct device *dev,
}
static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
static ssize_t mtd_ecc_stats_corrected_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected);
}
static DEVICE_ATTR(corrected_bits, S_IRUGO,
mtd_ecc_stats_corrected_show, NULL);
static ssize_t mtd_ecc_stats_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed);
}
static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL);
static ssize_t mtd_badblocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks);
}
static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL);
static ssize_t mtd_bbtblocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks);
}
static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL);
static struct attribute *mtd_attrs[] = {
&dev_attr_type.attr,
&dev_attr_flags.attr,
......@@ -310,6 +351,10 @@ static struct attribute *mtd_attrs[] = {
&dev_attr_name.attr,
&dev_attr_ecc_strength.attr,
&dev_attr_ecc_step_size.attr,
&dev_attr_corrected_bits.attr,
&dev_attr_ecc_failures.attr,
&dev_attr_bad_blocks.attr,
&dev_attr_bbt_blocks.attr,
&dev_attr_bitflip_threshold.attr,
NULL,
};
......@@ -998,12 +1043,22 @@ int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
}
EXPORT_SYMBOL_GPL(mtd_is_locked);
int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
{
if (!mtd->_block_isbad)
if (ofs < 0 || ofs > mtd->size)
return -EINVAL;
if (!mtd->_block_isreserved)
return 0;
return mtd->_block_isreserved(mtd, ofs);
}
EXPORT_SYMBOL_GPL(mtd_block_isreserved);
int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
if (ofs < 0 || ofs > mtd->size)
return -EINVAL;
if (!mtd->_block_isbad)
return 0;
return mtd->_block_isbad(mtd, ofs);
}
EXPORT_SYMBOL_GPL(mtd_block_isbad);
......
......@@ -290,6 +290,13 @@ static void part_resume(struct mtd_info *mtd)
part->master->_resume(part->master);
}
static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_part *part = PART(mtd);
ofs += part->offset;
return part->master->_block_isreserved(part->master, ofs);
}
static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_part *part = PART(mtd);
......@@ -422,6 +429,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
slave->mtd._unlock = part_unlock;
if (master->_is_locked)
slave->mtd._is_locked = part_is_locked;
if (master->_block_isreserved)
slave->mtd._block_isreserved = part_block_isreserved;
if (master->_block_isbad)
slave->mtd._block_isbad = part_block_isbad;
if (master->_block_markbad)
......@@ -526,7 +535,9 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
uint64_t offs = 0;
while (offs < slave->mtd.size) {
if (mtd_block_isbad(master, offs + slave->offset))
if (mtd_block_isreserved(master, offs + slave->offset))
slave->mtd.ecc_stats.bbtblocks++;
else if (mtd_block_isbad(master, offs + slave->offset))
slave->mtd.ecc_stats.badblocks++;
offs += slave->mtd.erasesize;
}
......
......@@ -50,4 +50,4 @@ obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
nand-objs := nand_base.o nand_bbt.o
nand-objs := nand_base.o nand_bbt.o nand_timings.o
......@@ -97,7 +97,9 @@ struct atmel_nfc {
bool write_by_sram;
bool is_initialized;
struct completion comp_nfc;
struct completion comp_ready;
struct completion comp_cmd_done;
struct completion comp_xfer_done;
/* Point to the sram bank which include readed data via NFC */
void __iomem *data_in_sram;
......@@ -861,12 +863,11 @@ static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
{
struct nand_chip *nand_chip = mtd->priv;
struct atmel_nand_host *host = nand_chip->priv;
int i, err_nbr, eccbytes;
int i, err_nbr;
uint8_t *buf_pos;
int total_err = 0;
eccbytes = nand_chip->ecc.bytes;
for (i = 0; i < eccbytes; i++)
for (i = 0; i < nand_chip->ecc.total; i++)
if (ecc[i] != 0xff)
goto normal_check;
/* Erased page, return OK */
......@@ -928,7 +929,7 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
{
struct atmel_nand_host *host = chip->priv;
int eccsize = chip->ecc.size;
int eccsize = chip->ecc.size * chip->ecc.steps;
uint8_t *oob = chip->oob_poi;
uint32_t *eccpos = chip->ecc.layout->eccpos;
uint32_t stat;
......@@ -1169,8 +1170,7 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
goto err;
}
/* ECC is calculated for the whole page (1 step) */
nand_chip->ecc.size = mtd->writesize;
nand_chip->ecc.size = sector_size;
/* set ECC page size and oob layout */
switch (mtd->writesize) {
......@@ -1185,18 +1185,20 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
host->pmecc_index_of = host->pmecc_rom_base +
host->pmecc_lookup_table_offset;
nand_chip->ecc.steps = 1;
nand_chip->ecc.steps = host->pmecc_sector_number;
nand_chip->ecc.strength = cap;
nand_chip->ecc.bytes = host->pmecc_bytes_per_sector *
nand_chip->ecc.bytes = host->pmecc_bytes_per_sector;
nand_chip->ecc.total = host->pmecc_bytes_per_sector *
host->pmecc_sector_number;
if (nand_chip->ecc.bytes > mtd->oobsize - 2) {
if (nand_chip->ecc.total > mtd->oobsize - 2) {
dev_err(host->dev, "No room for ECC bytes\n");
err_no = -EINVAL;
goto err;
}
pmecc_config_ecc_layout(&atmel_pmecc_oobinfo,
mtd->oobsize,
nand_chip->ecc.bytes);
nand_chip->ecc.total);
nand_chip->ecc.layout = &atmel_pmecc_oobinfo;
break;
case 512:
......@@ -1572,49 +1574,104 @@ static int atmel_hw_nand_init_params(struct platform_device *pdev,
return 0;
}
static inline u32 nfc_read_status(struct atmel_nand_host *host)
{
u32 err_flags = NFC_SR_DTOE | NFC_SR_UNDEF | NFC_SR_AWB | NFC_SR_ASE;
u32 nfc_status = nfc_readl(host->nfc->hsmc_regs, SR);
if (unlikely(nfc_status & err_flags)) {
if (nfc_status & NFC_SR_DTOE)
dev_err(host->dev, "NFC: Waiting Nand R/B Timeout Error\n");
else if (nfc_status & NFC_SR_UNDEF)
dev_err(host->dev, "NFC: Access Undefined Area Error\n");
else if (nfc_status & NFC_SR_AWB)
dev_err(host->dev, "NFC: Access memory While NFC is busy\n");
else if (nfc_status & NFC_SR_ASE)
dev_err(host->dev, "NFC: Access memory Size Error\n");
}
return nfc_status;
}
/* SMC interrupt service routine */
static irqreturn_t hsmc_interrupt(int irq, void *dev_id)
{
struct atmel_nand_host *host = dev_id;
u32 status, mask, pending;
irqreturn_t ret = IRQ_HANDLED;
irqreturn_t ret = IRQ_NONE;
status = nfc_readl(host->nfc->hsmc_regs, SR);
status = nfc_read_status(host);
mask = nfc_readl(host->nfc->hsmc_regs, IMR);
pending = status & mask;
if (pending & NFC_SR_XFR_DONE) {
complete(&host->nfc->comp_nfc);
complete(&host->nfc->comp_xfer_done);
nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE);
} else if (pending & NFC_SR_RB_EDGE) {
complete(&host->nfc->comp_nfc);
ret = IRQ_HANDLED;
}
if (pending & NFC_SR_RB_EDGE) {
complete(&host->nfc->comp_ready);
nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE);
} else if (pending & NFC_SR_CMD_DONE) {
complete(&host->nfc->comp_nfc);
ret = IRQ_HANDLED;
}
if (pending & NFC_SR_CMD_DONE) {
complete(&host->nfc->comp_cmd_done);
nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_CMD_DONE);
} else {
ret = IRQ_NONE;
ret = IRQ_HANDLED;
}
return ret;
}
/* NFC(Nand Flash Controller) related functions */
static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag)
{
unsigned long timeout;
init_completion(&host->nfc->comp_nfc);
if (flag & NFC_SR_XFR_DONE)
init_completion(&host->nfc->comp_xfer_done);
if (flag & NFC_SR_RB_EDGE)
init_completion(&host->nfc->comp_ready);
if (flag & NFC_SR_CMD_DONE)
init_completion(&host->nfc->comp_cmd_done);
/* Enable interrupt that need to wait for */
nfc_writel(host->nfc->hsmc_regs, IER, flag);
}
timeout = wait_for_completion_timeout(&host->nfc->comp_nfc,
msecs_to_jiffies(NFC_TIME_OUT_MS));
if (timeout)
return 0;
static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
{
int i, index = 0;
struct completion *comp[3]; /* Support 3 interrupt completion */
/* Time out to wait for the interrupt */
if (flag & NFC_SR_XFR_DONE)
comp[index++] = &host->nfc->comp_xfer_done;
if (flag & NFC_SR_RB_EDGE)
comp[index++] = &host->nfc->comp_ready;
if (flag & NFC_SR_CMD_DONE)
comp[index++] = &host->nfc->comp_cmd_done;
if (index == 0) {
dev_err(host->dev, "Unkown interrupt flag: 0x%08x\n", flag);
return -EINVAL;
}
for (i = 0; i < index; i++) {
if (wait_for_completion_timeout(comp[i],
msecs_to_jiffies(NFC_TIME_OUT_MS)))
continue; /* wait for next completion */
else
goto err_timeout;
}
return 0;
err_timeout:
dev_err(host->dev, "Time out to wait for interrupt: 0x%08x\n", flag);
/* Disable the interrupt as it is not handled by interrupt handler */
nfc_writel(host->nfc->hsmc_regs, IDR, flag);
return -ETIMEDOUT;
}
......@@ -1622,6 +1679,9 @@ static int nfc_send_command(struct atmel_nand_host *host,
unsigned int cmd, unsigned int addr, unsigned char cycle0)
{
unsigned long timeout;
u32 flag = NFC_SR_CMD_DONE;
flag |= cmd & NFCADDR_CMD_DATAEN ? NFC_SR_XFR_DONE : 0;
dev_dbg(host->dev,
"nfc_cmd: 0x%08x, addr1234: 0x%08x, cycle0: 0x%02x\n",
cmd, addr, cycle0);
......@@ -1635,18 +1695,28 @@ static int nfc_send_command(struct atmel_nand_host *host,
return -ETIMEDOUT;
}
}
nfc_prepare_interrupt(host, flag);
nfc_writel(host->nfc->hsmc_regs, CYCLE0, cycle0);
nfc_cmd_addr1234_writel(cmd, addr, host->nfc->base_cmd_regs);
return nfc_wait_interrupt(host, NFC_SR_CMD_DONE);
return nfc_wait_interrupt(host, flag);
}
static int nfc_device_ready(struct mtd_info *mtd)
{
u32 status, mask;
struct nand_chip *nand_chip = mtd->priv;
struct atmel_nand_host *host = nand_chip->priv;
if (!nfc_wait_interrupt(host, NFC_SR_RB_EDGE))
return 1;
return 0;
status = nfc_read_status(host);
mask = nfc_readl(host->nfc->hsmc_regs, IMR);
/* The mask should be 0. If not we may lost interrupts */
if (unlikely(mask & status))
dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n",
mask & status);
return status & NFC_SR_RB_EDGE;
}
static void nfc_select_chip(struct mtd_info *mtd, int chip)
......@@ -1795,10 +1865,6 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
nfc_addr_cmd = cmd1 | cmd2 | vcmd2 | acycle | csid | dataen | nfcwr;
nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0);
if (dataen == NFCADDR_CMD_DATAEN)
if (nfc_wait_interrupt(host, NFC_SR_XFR_DONE))
dev_err(host->dev, "something wrong, No XFR_DONE interrupt comes.\n");
/*
* Program and erase have their own busy handlers status, sequential
* in, and deplete1 need no delay.
......@@ -1823,6 +1889,7 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
}
/* fall through */
default:
nfc_prepare_interrupt(host, NFC_SR_RB_EDGE);
nfc_wait_interrupt(host, NFC_SR_RB_EDGE);
}
}
......@@ -2209,6 +2276,9 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev)
}
}
nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff);
nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */
nfc->is_initialized = true;
dev_info(&pdev->dev, "NFC is probed.\n");
return 0;
......
......@@ -37,6 +37,10 @@
#define ATMEL_HSMC_NFC_SR 0x08 /* NFC Status Register */
#define NFC_SR_XFR_DONE (1 << 16)
#define NFC_SR_CMD_DONE (1 << 17)
#define NFC_SR_DTOE (1 << 20)
#define NFC_SR_UNDEF (1 << 21)
#define NFC_SR_AWB (1 << 22)
#define NFC_SR_ASE (1 << 23)
#define NFC_SR_RB_EDGE (1 << 24)
#define ATMEL_HSMC_NFC_IER 0x0c
......
......@@ -830,34 +830,10 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
return err;
}
/* PM Support */
#ifdef CONFIG_PM
static int bf5xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
{
struct bf5xx_nand_info *info = platform_get_drvdata(dev);
return 0;
}
static int bf5xx_nand_resume(struct platform_device *dev)
{
struct bf5xx_nand_info *info = platform_get_drvdata(dev);
return 0;
}
#else
#define bf5xx_nand_suspend NULL
#define bf5xx_nand_resume NULL
#endif
/* driver device registration */
static struct platform_driver bf5xx_nand_driver = {
.probe = bf5xx_nand_probe,
.remove = bf5xx_nand_remove,
.suspend = bf5xx_nand_suspend,
.resume = bf5xx_nand_resume,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
......
......@@ -473,7 +473,7 @@ static void detect_partition_feature(struct denali_nand_info *denali)
static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
{
uint16_t status = PASS;
uint32_t id_bytes[5], addr;
uint32_t id_bytes[8], addr;
uint8_t i, maf_id, device_id;
dev_dbg(denali->dev,
......@@ -488,7 +488,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
index_addr(denali, (uint32_t)addr | 0, 0x90);
index_addr(denali, (uint32_t)addr | 1, 0);
for (i = 0; i < 5; i++)
for (i = 0; i < 8; i++)
index_addr_read_data(denali, addr | 2, &id_bytes[i]);
maf_id = id_bytes[0];
device_id = id_bytes[1];
......@@ -1276,7 +1276,7 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
index_addr(denali, (uint32_t)addr | 0, 0x90);
index_addr(denali, (uint32_t)addr | 1, 0);
for (i = 0; i < 5; i++) {
for (i = 0; i < 8; i++) {
index_addr_read_data(denali,
(uint32_t)addr | 2,
&id);
......
......@@ -285,9 +285,8 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
geo->ecc_strength = get_ecc_strength(this);
if (!gpmi_check_ecc(this)) {
dev_err(this->dev,
"We can not support this nand chip."
" Its required ecc strength(%d) is beyond our"
" capability(%d).\n", geo->ecc_strength,
"required ecc strength of the NAND chip: %d is not supported by the GPMI controller (%d)\n",
geo->ecc_strength,
this->devdata->bch_max_ecc_strength);
return -EINVAL;
}
......@@ -1082,6 +1081,7 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
int first, last, marker_pos;
int ecc_parity_size;
int col = 0;
int old_swap_block_mark = this->swap_block_mark;
/* The size of ECC parity */
ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
......@@ -1090,17 +1090,21 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
first = offs / size;
last = (offs + len - 1) / size;
/*
* Find the chunk which contains the Block Marker. If this chunk is
* in the range of [first, last], we have to read out the whole page.
* Why? since we had swapped the data at the position of Block Marker
* to the metadata which is bound with the chunk 0.
*/
marker_pos = geo->block_mark_byte_offset / size;
if (last >= marker_pos && first <= marker_pos) {
dev_dbg(this->dev, "page:%d, first:%d, last:%d, marker at:%d\n",
if (this->swap_block_mark) {
/*
* Find the chunk which contains the Block Marker.
* If this chunk is in the range of [first, last],
* we have to read out the whole page.
* Why? since we had swapped the data at the position of Block
* Marker to the metadata which is bound with the chunk 0.
*/
marker_pos = geo->block_mark_byte_offset / size;
if (last >= marker_pos && first <= marker_pos) {
dev_dbg(this->dev,
"page:%d, first:%d, last:%d, marker at:%d\n",
page, first, last, marker_pos);
return gpmi_ecc_read_page(mtd, chip, buf, 0, page);
return gpmi_ecc_read_page(mtd, chip, buf, 0, page);
}
}
meta = geo->metadata_size;
......@@ -1146,7 +1150,7 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
writel(r2_old, bch_regs + HW_BCH_FLASH0LAYOUT1);
this->bch_geometry = old_geo;
this->swap_block_mark = true;
this->swap_block_mark = old_swap_block_mark;
return max_bitflips;
}
......@@ -1180,7 +1184,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
/* Handle block mark swapping. */
block_mark_swapping(this,
(void *) payload_virt, (void *) auxiliary_virt);
(void *)payload_virt, (void *)auxiliary_virt);
} else {
/*
* If control arrives here, we're not doing block mark swapping,
......@@ -1310,10 +1314,10 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
/*
* Now, we want to make sure the block mark is correct. In the
* Swapping/Raw case, we already have it. Otherwise, we need to
* explicitly read it.
* non-transcribing case (!GPMI_IS_MX23()), we already have it.
* Otherwise, we need to explicitly read it.
*/
if (!this->swap_block_mark) {
if (GPMI_IS_MX23(this)) {
/* Read the block mark into the first byte of the OOB buffer. */
chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
chip->oob_poi[0] = chip->read_byte(mtd);
......@@ -1354,7 +1358,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
chipnr = (int)(ofs >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
column = this->swap_block_mark ? mtd->writesize : 0;
column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
/* Write the block mark. */
block_mark = this->data_buffer_dma;
......@@ -1597,8 +1601,9 @@ static int mx23_boot_init(struct gpmi_nand_data *this)
dev_dbg(dev, "Transcribing mark in block %u\n", block);
ret = chip->block_markbad(mtd, byte);
if (ret)
dev_err(dev, "Failed to mark block bad with "
"ret %d\n", ret);
dev_err(dev,
"Failed to mark block bad with ret %d\n",
ret);
}
}
......@@ -1649,9 +1654,6 @@ static int gpmi_init_last(struct gpmi_nand_data *this)
struct bch_geometry *bch_geo = &this->bch_geometry;
int ret;
/* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
this->swap_block_mark = !GPMI_IS_MX23(this);
/* Set up the medium geometry */
ret = gpmi_set_geometry(this);
if (ret)
......@@ -1715,9 +1717,20 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
chip->badblock_pattern = &gpmi_bbt_descr;
chip->block_markbad = gpmi_block_markbad;
chip->options |= NAND_NO_SUBPAGE_WRITE;
if (of_get_nand_on_flash_bbt(this->dev->of_node))
/* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
this->swap_block_mark = !GPMI_IS_MX23(this);
if (of_get_nand_on_flash_bbt(this->dev->of_node)) {
chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
if (of_property_read_bool(this->dev->of_node,
"fsl,no-blockmark-swap"))
this->swap_block_mark = false;
}
dev_dbg(this->dev, "Blockmark swapping %sabled\n",
this->swap_block_mark ? "en" : "dis");
/*
* Allocate a temporary DMA buffer for reading ID in the
* nand_scan_ident().
......@@ -1760,16 +1773,16 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
static const struct of_device_id gpmi_nand_id_table[] = {
{
.compatible = "fsl,imx23-gpmi-nand",
.data = (void *)&gpmi_devdata_imx23,
.data = &gpmi_devdata_imx23,
}, {
.compatible = "fsl,imx28-gpmi-nand",
.data = (void *)&gpmi_devdata_imx28,
.data = &gpmi_devdata_imx28,
}, {
.compatible = "fsl,imx6q-gpmi-nand",
.data = (void *)&gpmi_devdata_imx6q,
.data = &gpmi_devdata_imx6q,
}, {
.compatible = "fsl,imx6sx-gpmi-nand",
.data = (void *)&gpmi_devdata_imx6sx,
.data = &gpmi_devdata_imx6sx,
}, {}
};
MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
......
......@@ -721,12 +721,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
nand_chip->bbt_td = &lpc32xx_nand_bbt;
nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
/* bitflip_threshold's default is defined as ecc_strength anyway.
* Unfortunately, it is set only later at add_mtd_device(). Meanwhile
* being 0, it causes bad block table scanning errors in
* nand_scan_tail(), so preparing it here. */
mtd->bitflip_threshold = nand_chip->ecc.strength;
if (use_dma) {
res = lpc32xx_dma_setup(host);
if (res) {
......
......@@ -840,12 +840,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
chip->ecc.strength = 1;
chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
/* bitflip_threshold's default is defined as ecc_strength anyway.
* Unfortunately, it is set only later at add_mtd_device(). Meanwhile
* being 0, it causes bad block table scanning errors in
* nand_scan_tail(), so preparing it here already. */
mtd->bitflip_threshold = chip->ecc.strength;
/*
* Allocate a large enough buffer for a single huge page plus
* extra space for the spare area and ECC storage area
......
......@@ -484,6 +484,23 @@ static int nand_check_wp(struct mtd_info *mtd)
return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
}
/**
* nand_block_checkbad - [GENERIC] Check if a block is marked bad
* @mtd: MTD device structure
* @ofs: offset from device start
*
* Check if the block is mark as reserved.
*/
static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
{
struct nand_chip *chip = mtd->priv;
if (!chip->bbt)
return 0;
/* Return info from the table */
return nand_isreserved_bbt(mtd, ofs);
}
/**
* nand_block_checkbad - [GENERIC] Check if a block is marked bad
* @mtd: MTD device structure
......@@ -4113,6 +4130,7 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->_unlock = NULL;
mtd->_suspend = nand_suspend;
mtd->_resume = nand_resume;
mtd->_block_isreserved = nand_block_isreserved;
mtd->_block_isbad = nand_block_isbad;
mtd->_block_markbad = nand_block_markbad;
mtd->writebufsize = mtd->writesize;
......
......@@ -1310,6 +1310,20 @@ int nand_default_bbt(struct mtd_info *mtd)
return nand_scan_bbt(mtd, this->badblock_pattern);
}
/**
* nand_isreserved_bbt - [NAND Interface] Check if a block is reserved
* @mtd: MTD device structure
* @offs: offset in the device
*/
int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs)
{
struct nand_chip *this = mtd->priv;
int block;
block = (int)(offs >> this->bbt_erase_shift);
return bbt_get_entry(this, block) == BBT_BLOCK_RESERVED;
}
/**
* nand_isbad_bbt - [NAND Interface] Check if a block is bad
* @mtd: MTD device structure
......
/*
* Copyright (C) 2014 Free Electrons
*
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/mtd/nand.h>
static const struct nand_sdr_timings onfi_sdr_timings[] = {
/* Mode 0 */
{
.tADL_min = 200000,
.tALH_min = 20000,
.tALS_min = 50000,
.tAR_min = 25000,
.tCEA_max = 100000,
.tCEH_min = 20000,
.tCH_min = 20000,
.tCHZ_max = 100000,
.tCLH_min = 20000,
.tCLR_min = 20000,
.tCLS_min = 50000,
.tCOH_min = 0,
.tCS_min = 70000,
.tDH_min = 20000,
.tDS_min = 40000,
.tFEAT_max = 1000000,
.tIR_min = 10000,
.tITC_max = 1000000,
.tRC_min = 100000,
.tREA_max = 40000,
.tREH_min = 30000,
.tRHOH_min = 0,
.tRHW_min = 200000,
.tRHZ_max = 200000,
.tRLOH_min = 0,
.tRP_min = 50000,
.tRST_max = 250000000000,
.tWB_max = 200000,
.tRR_min = 40000,
.tWC_min = 100000,
.tWH_min = 30000,
.tWHR_min = 120000,
.tWP_min = 50000,
.tWW_min = 100000,
},
/* Mode 1 */
{
.tADL_min = 100000,
.tALH_min = 10000,
.tALS_min = 25000,
.tAR_min = 10000,
.tCEA_max = 45000,
.tCEH_min = 20000,
.tCH_min = 10000,
.tCHZ_max = 50000,
.tCLH_min = 10000,
.tCLR_min = 10000,
.tCLS_min = 25000,
.tCOH_min = 15000,
.tCS_min = 35000,
.tDH_min = 10000,
.tDS_min = 20000,
.tFEAT_max = 1000000,
.tIR_min = 0,
.tITC_max = 1000000,
.tRC_min = 50000,
.tREA_max = 30000,
.tREH_min = 15000,
.tRHOH_min = 15000,
.tRHW_min = 100000,
.tRHZ_max = 100000,
.tRLOH_min = 0,
.tRP_min = 25000,
.tRR_min = 20000,
.tRST_max = 500000000,
.tWB_max = 100000,
.tWC_min = 45000,
.tWH_min = 15000,
.tWHR_min = 80000,
.tWP_min = 25000,
.tWW_min = 100000,
},
/* Mode 2 */
{
.tADL_min = 100000,
.tALH_min = 10000,
.tALS_min = 15000,
.tAR_min = 10000,
.tCEA_max = 30000,
.tCEH_min = 20000,
.tCH_min = 10000,
.tCHZ_max = 50000,
.tCLH_min = 10000,
.tCLR_min = 10000,
.tCLS_min = 15000,
.tCOH_min = 15000,
.tCS_min = 25000,
.tDH_min = 5000,
.tDS_min = 15000,
.tFEAT_max = 1000000,
.tIR_min = 0,
.tITC_max = 1000000,
.tRC_min = 35000,
.tREA_max = 25000,
.tREH_min = 15000,
.tRHOH_min = 15000,
.tRHW_min = 100000,
.tRHZ_max = 100000,
.tRLOH_min = 0,
.tRR_min = 20000,
.tRST_max = 500000000,
.tWB_max = 100000,
.tRP_min = 17000,
.tWC_min = 35000,
.tWH_min = 15000,
.tWHR_min = 80000,
.tWP_min = 17000,
.tWW_min = 100000,
},
/* Mode 3 */
{
.tADL_min = 100000,
.tALH_min = 5000,
.tALS_min = 10000,
.tAR_min = 10000,
.tCEA_max = 25000,
.tCEH_min = 20000,
.tCH_min = 5000,
.tCHZ_max = 50000,
.tCLH_min = 5000,
.tCLR_min = 10000,
.tCLS_min = 10000,
.tCOH_min = 15000,
.tCS_min = 25000,
.tDH_min = 5000,
.tDS_min = 10000,
.tFEAT_max = 1000000,
.tIR_min = 0,
.tITC_max = 1000000,
.tRC_min = 30000,
.tREA_max = 20000,
.tREH_min = 10000,
.tRHOH_min = 15000,
.tRHW_min = 100000,
.tRHZ_max = 100000,
.tRLOH_min = 0,
.tRP_min = 15000,
.tRR_min = 20000,
.tRST_max = 500000000,
.tWB_max = 100000,
.tWC_min = 30000,
.tWH_min = 10000,
.tWHR_min = 80000,
.tWP_min = 15000,
.tWW_min = 100000,
},
/* Mode 4 */
{
.tADL_min = 70000,
.tALH_min = 5000,
.tALS_min = 10000,
.tAR_min = 10000,
.tCEA_max = 25000,
.tCEH_min = 20000,
.tCH_min = 5000,
.tCHZ_max = 30000,
.tCLH_min = 5000,
.tCLR_min = 10000,
.tCLS_min = 10000,
.tCOH_min = 15000,
.tCS_min = 20000,
.tDH_min = 5000,
.tDS_min = 10000,
.tFEAT_max = 1000000,
.tIR_min = 0,
.tITC_max = 1000000,
.tRC_min = 25000,
.tREA_max = 20000,
.tREH_min = 10000,
.tRHOH_min = 15000,
.tRHW_min = 100000,
.tRHZ_max = 100000,
.tRLOH_min = 5000,
.tRP_min = 12000,
.tRR_min = 20000,
.tRST_max = 500000000,
.tWB_max = 100000,
.tWC_min = 25000,
.tWH_min = 10000,
.tWHR_min = 80000,
.tWP_min = 12000,
.tWW_min = 100000,
},
/* Mode 5 */
{
.tADL_min = 70000,
.tALH_min = 5000,
.tALS_min = 10000,
.tAR_min = 10000,
.tCEA_max = 25000,
.tCEH_min = 20000,
.tCH_min = 5000,
.tCHZ_max = 30000,
.tCLH_min = 5000,
.tCLR_min = 10000,
.tCLS_min = 10000,
.tCOH_min = 15000,
.tCS_min = 15000,
.tDH_min = 5000,
.tDS_min = 7000,
.tFEAT_max = 1000000,
.tIR_min = 0,
.tITC_max = 1000000,
.tRC_min = 20000,
.tREA_max = 16000,
.tREH_min = 7000,
.tRHOH_min = 15000,
.tRHW_min = 100000,
.tRHZ_max = 100000,
.tRLOH_min = 5000,
.tRP_min = 10000,
.tRR_min = 20000,
.tRST_max = 500000000,
.tWB_max = 100000,
.tWC_min = 20000,
.tWH_min = 7000,
.tWHR_min = 80000,
.tWP_min = 10000,
.tWW_min = 100000,
},
};
/**
* onfi_async_timing_mode_to_sdr_timings - [NAND Interface] Retrieve NAND
* timings according to the given ONFI timing mode
* @mode: ONFI timing mode
*/
const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode)
{
if (mode < 0 || mode >= ARRAY_SIZE(onfi_sdr_timings))
return ERR_PTR(-EINVAL);
return &onfi_sdr_timings[mode];
}
EXPORT_SYMBOL(onfi_async_timing_mode_to_sdr_timings);
......@@ -208,10 +208,10 @@ static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info,
if (info->clk_state == CLOCK_ENABLE) {
if (new_state != CLOCK_ENABLE)
clk_disable(info->clk);
clk_disable_unprepare(info->clk);
} else {
if (new_state == CLOCK_ENABLE)
clk_enable(info->clk);
clk_prepare_enable(info->clk);
}
info->clk_state = new_state;
......
......@@ -47,6 +47,25 @@ static int read_sr(struct spi_nor *nor)
return val;
}
/*
* Read the flag status register, returning its value in the location
* Return the status register value.
* Returns negative if error occurred.
*/
static int read_fsr(struct spi_nor *nor)
{
int ret;
u8 val;
ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
if (ret < 0) {
pr_err("error %d reading FSR\n", ret);
return ret;
}
return val;
}
/*
* Read configuration register, returning its value in the
* location. Return the configuration register value.
......@@ -165,6 +184,32 @@ static int spi_nor_wait_till_ready(struct spi_nor *nor)
return -ETIMEDOUT;
}
static int spi_nor_wait_till_fsr_ready(struct spi_nor *nor)
{
unsigned long deadline;
int sr;
int fsr;
deadline = jiffies + MAX_READY_WAIT_JIFFIES;
do {
cond_resched();
sr = read_sr(nor);
if (sr < 0) {
break;
} else if (!(sr & SR_WIP)) {
fsr = read_fsr(nor);
if (fsr < 0)
break;
if (fsr & FSR_READY)
return 0;
}
} while (!time_after_eq(jiffies, deadline));
return -ETIMEDOUT;
}
/*
* Service routine to read status register until ready, or timeout occurs.
* Returns non-zero if error.
......@@ -402,6 +447,7 @@ struct flash_info {
#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */
#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
#define USE_FSR 0x80 /* use flag status register */
};
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
......@@ -449,6 +495,7 @@ const struct spi_device_id spi_nor_ids[] = {
{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
{ "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
/* ESMT */
......@@ -488,6 +535,8 @@ const struct spi_device_id spi_nor_ids[] = {
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
{ "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) },
{ "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, USE_FSR) },
{ "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, USE_FSR) },
/* PMC */
{ "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
......@@ -965,6 +1014,10 @@ int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id,
else
mtd->_write = spi_nor_write;
if ((info->flags & USE_FSR) &&
nor->wait_till_ready == spi_nor_wait_till_ready)
nor->wait_till_ready = spi_nor_wait_till_fsr_ready;
/* prefer "small sector" erase if possible */
if (info->flags & SECT_4K) {
nor->erase_opcode = SPINOR_OP_BE_4K;
......
......@@ -202,8 +202,7 @@ struct posix_acl *jffs2_get_acl(struct inode *inode, int type)
} else {
acl = ERR_PTR(rc);
}
if (value)
kfree(value);
kfree(value);
if (!IS_ERR(acl))
set_cached_acl(inode, type, acl);
return acl;
......
......@@ -756,8 +756,7 @@ void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c)
for (i=0; i < XATTRINDEX_HASHSIZE; i++) {
list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) {
list_del(&xd->xindex);
if (xd->xname)
kfree(xd->xname);
kfree(xd->xname);
jffs2_free_xattr_datum(xd);
}
}
......
......@@ -222,6 +222,7 @@ struct mtd_info {
int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs);
int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
int (*_suspend) (struct mtd_info *mtd);
......@@ -302,6 +303,7 @@ static inline void mtd_sync(struct mtd_info *mtd)
int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs);
int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs);
int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
......
......@@ -810,6 +810,7 @@ extern struct nand_manufacturers nand_manuf_ids[];
extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
extern int nand_default_bbt(struct mtd_info *mtd);
extern int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
extern int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
extern int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
int allowbbt);
......@@ -947,4 +948,56 @@ static inline int jedec_feature(struct nand_chip *chip)
return chip->jedec_version ? le16_to_cpu(chip->jedec_params.features)
: 0;
}
/**
* struct nand_sdr_timings - SDR NAND chip timings
*
* This struct defines the timing requirements of a SDR NAND chip.
* These informations can be found in every NAND datasheets and the timings
* meaning are described in the ONFI specifications:
* www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing
* Parameters)
*
* All these timings are expressed in picoseconds.
*/
struct nand_sdr_timings {
u32 tALH_min;
u32 tADL_min;
u32 tALS_min;
u32 tAR_min;
u32 tCEA_max;
u32 tCEH_min;
u32 tCH_min;
u32 tCHZ_max;
u32 tCLH_min;
u32 tCLR_min;
u32 tCLS_min;
u32 tCOH_min;
u32 tCS_min;
u32 tDH_min;
u32 tDS_min;
u32 tFEAT_max;
u32 tIR_min;
u32 tITC_max;
u32 tRC_min;
u32 tREA_max;
u32 tREH_min;
u32 tRHOH_min;
u32 tRHW_min;
u32 tRHZ_max;
u32 tRLOH_min;
u32 tRP_min;
u32 tRR_min;
u64 tRST_max;
u32 tWB_max;
u32 tWC_min;
u32 tWH_min;
u32 tWHR_min;
u32 tWP_min;
u32 tWW_min;
};
/* get timing characteristics from ONFI timing mode. */
const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
#endif /* __LINUX_MTD_NAND_H */
......@@ -34,6 +34,7 @@
#define SPINOR_OP_SE 0xd8 /* Sector erase (usually 64KiB) */
#define SPINOR_OP_RDID 0x9f /* Read JEDEC ID */
#define SPINOR_OP_RDCR 0x35 /* Read configuration register */
#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
#define SPINOR_OP_READ4 0x13 /* Read data bytes (low frequency) */
......@@ -66,6 +67,9 @@
#define SR_QUAD_EN_MX 0x40 /* Macronix Quad I/O */
/* Flag Status Register bits */
#define FSR_READY 0x80
/* Configuration Register bits. */
#define CR_QUAD_EN_SPAN 0x2 /* Spansion Quad I/O */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment