Commit d68156cf authored by Singh, Vimal's avatar Singh, Vimal Committed by David Woodhouse

[MTD] [NAND] nand_ecc.c: adding support for 512 byte ecc

Support 512 byte ECC calculation

[FM: updated two comments]
Signed-off-by: default avatarVimal Singh <vimalsingh@ti.com>
Signed-off-by: default avatarFrans Meulenbroeks <fransmeulenbroeks@gmail.com>
Signed-off-by: default avatarDavid Woodhouse <David.Woodhouse@intel.com>
parent dffc8d66
...@@ -42,6 +42,8 @@ ...@@ -42,6 +42,8 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h> #include <linux/mtd/nand_ecc.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#else #else
...@@ -148,8 +150,9 @@ static const char addressbits[256] = { ...@@ -148,8 +150,9 @@ static const char addressbits[256] = {
}; };
/** /**
* nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256-byte block * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
* @mtd: MTD block structure (unused) * block
* @mtd: MTD block structure
* @buf: input buffer with raw data * @buf: input buffer with raw data
* @code: output buffer with ECC * @code: output buffer with ECC
*/ */
...@@ -158,13 +161,18 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, ...@@ -158,13 +161,18 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
{ {
int i; int i;
const uint32_t *bp = (uint32_t *)buf; const uint32_t *bp = (uint32_t *)buf;
/* 256 or 512 bytes/ecc */
const uint32_t eccsize_mult =
(((struct nand_chip *)mtd->priv)->ecc.size) >> 8;
uint32_t cur; /* current value in buffer */ uint32_t cur; /* current value in buffer */
/* rp0..rp15 are the various accumulated parities (per byte) */ /* rp0..rp15..rp17 are the various accumulated parities (per byte) */
uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15; uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16;
uint32_t uninitialized_var(rp17); /* to make compiler happy */
uint32_t par; /* the cumulative parity for all data */ uint32_t par; /* the cumulative parity for all data */
uint32_t tmppar; /* the cumulative parity for this iteration; uint32_t tmppar; /* the cumulative parity for this iteration;
for rp12 and rp14 at the end of the loop */ for rp12, rp14 and rp16 at the end of the
loop */
par = 0; par = 0;
rp4 = 0; rp4 = 0;
...@@ -173,6 +181,7 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, ...@@ -173,6 +181,7 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
rp10 = 0; rp10 = 0;
rp12 = 0; rp12 = 0;
rp14 = 0; rp14 = 0;
rp16 = 0;
/* /*
* The loop is unrolled a number of times; * The loop is unrolled a number of times;
...@@ -181,10 +190,10 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, ...@@ -181,10 +190,10 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
* Note: passing unaligned data might give a performance penalty. * Note: passing unaligned data might give a performance penalty.
* It is assumed that the buffers are aligned. * It is assumed that the buffers are aligned.
* tmppar is the cumulative sum of this iteration. * tmppar is the cumulative sum of this iteration.
* needed for calculating rp12, rp14 and par * needed for calculating rp12, rp14, rp16 and par
* also used as a performance improvement for rp6, rp8 and rp10 * also used as a performance improvement for rp6, rp8 and rp10
*/ */
for (i = 0; i < 4; i++) { for (i = 0; i < eccsize_mult << 2; i++) {
cur = *bp++; cur = *bp++;
tmppar = cur; tmppar = cur;
rp4 ^= cur; rp4 ^= cur;
...@@ -247,12 +256,14 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, ...@@ -247,12 +256,14 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
rp12 ^= tmppar; rp12 ^= tmppar;
if ((i & 0x2) == 0) if ((i & 0x2) == 0)
rp14 ^= tmppar; rp14 ^= tmppar;
if (eccsize_mult == 2 && (i & 0x4) == 0)
rp16 ^= tmppar;
} }
/* /*
* handle the fact that we use longword operations * handle the fact that we use longword operations
* we'll bring rp4..rp14 back to single byte entities by shifting and * we'll bring rp4..rp14..rp16 back to single byte entities by
* xoring first fold the upper and lower 16 bits, * shifting and xoring first fold the upper and lower 16 bits,
* then the upper and lower 8 bits. * then the upper and lower 8 bits.
*/ */
rp4 ^= (rp4 >> 16); rp4 ^= (rp4 >> 16);
...@@ -273,6 +284,11 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, ...@@ -273,6 +284,11 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
rp14 ^= (rp14 >> 16); rp14 ^= (rp14 >> 16);
rp14 ^= (rp14 >> 8); rp14 ^= (rp14 >> 8);
rp14 &= 0xff; rp14 &= 0xff;
if (eccsize_mult == 2) {
rp16 ^= (rp16 >> 16);
rp16 ^= (rp16 >> 8);
rp16 &= 0xff;
}
/* /*
* we also need to calculate the row parity for rp0..rp3 * we also need to calculate the row parity for rp0..rp3
...@@ -315,7 +331,7 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, ...@@ -315,7 +331,7 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
par &= 0xff; par &= 0xff;
/* /*
* and calculate rp5..rp15 * and calculate rp5..rp15..rp17
* note that par = rp4 ^ rp5 and due to the commutative property * note that par = rp4 ^ rp5 and due to the commutative property
* of the ^ operator we can say: * of the ^ operator we can say:
* rp5 = (par ^ rp4); * rp5 = (par ^ rp4);
...@@ -329,6 +345,8 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, ...@@ -329,6 +345,8 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
rp11 = (par ^ rp10) & 0xff; rp11 = (par ^ rp10) & 0xff;
rp13 = (par ^ rp12) & 0xff; rp13 = (par ^ rp12) & 0xff;
rp15 = (par ^ rp14) & 0xff; rp15 = (par ^ rp14) & 0xff;
if (eccsize_mult == 2)
rp17 = (par ^ rp16) & 0xff;
/* /*
* Finally calculate the ecc bits. * Finally calculate the ecc bits.
...@@ -375,6 +393,7 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, ...@@ -375,6 +393,7 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
(invparity[rp9] << 1) | (invparity[rp9] << 1) |
(invparity[rp8]); (invparity[rp8]);
#endif #endif
if (eccsize_mult == 1)
code[2] = code[2] =
(invparity[par & 0xf0] << 7) | (invparity[par & 0xf0] << 7) |
(invparity[par & 0x0f] << 6) | (invparity[par & 0x0f] << 6) |
...@@ -383,24 +402,37 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, ...@@ -383,24 +402,37 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
(invparity[par & 0xaa] << 3) | (invparity[par & 0xaa] << 3) |
(invparity[par & 0x55] << 2) | (invparity[par & 0x55] << 2) |
3; 3;
else
code[2] =
(invparity[par & 0xf0] << 7) |
(invparity[par & 0x0f] << 6) |
(invparity[par & 0xcc] << 5) |
(invparity[par & 0x33] << 4) |
(invparity[par & 0xaa] << 3) |
(invparity[par & 0x55] << 2) |
(invparity[rp17] << 1) |
(invparity[rp16] << 0);
return 0; return 0;
} }
EXPORT_SYMBOL(nand_calculate_ecc); EXPORT_SYMBOL(nand_calculate_ecc);
/** /**
* nand_correct_data - [NAND Interface] Detect and correct bit error(s) * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
* @mtd: MTD block structure (unused) * @mtd: MTD block structure
* @buf: raw data read from the chip * @buf: raw data read from the chip
* @read_ecc: ECC from the chip * @read_ecc: ECC from the chip
* @calc_ecc: the ECC calculated from raw data * @calc_ecc: the ECC calculated from raw data
* *
* Detect and correct a 1 bit error for 256 byte block * Detect and correct a 1 bit error for 256/512 byte block
*/ */
int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc) unsigned char *read_ecc, unsigned char *calc_ecc)
{ {
unsigned char b0, b1, b2; unsigned char b0, b1, b2;
unsigned char byte_addr, bit_addr; unsigned char byte_addr, bit_addr;
/* 256 or 512 bytes/ecc */
const uint32_t eccsize_mult =
(((struct nand_chip *)mtd->priv)->ecc.size) >> 8;
/* /*
* b0 to b2 indicate which bit is faulty (if any) * b0 to b2 indicate which bit is faulty (if any)
...@@ -426,10 +458,12 @@ int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, ...@@ -426,10 +458,12 @@ int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) && if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) &&
(((b1 ^ (b1 >> 1)) & 0x55) == 0x55) && (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) &&
(((b2 ^ (b2 >> 1)) & 0x54) == 0x54)) { /* single bit error */ ((eccsize_mult == 1 && ((b2 ^ (b2 >> 1)) & 0x54) == 0x54) ||
(eccsize_mult == 2 && ((b2 ^ (b2 >> 1)) & 0x55) == 0x55))) {
/* single bit error */
/* /*
* rp15/13/11/9/7/5/3/1 indicate which byte is the faulty byte * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty
* cp 5/3/1 indicate the faulty bit. * byte, cp 5/3/1 indicate the faulty bit.
* A lookup table (called addressbits) is used to filter * A lookup table (called addressbits) is used to filter
* the bits from the byte they are in. * the bits from the byte they are in.
* A marginal optimisation is possible by having three * A marginal optimisation is possible by having three
...@@ -443,7 +477,11 @@ int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, ...@@ -443,7 +477,11 @@ int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
* We could also do addressbits[b2] >> 1 but for the * We could also do addressbits[b2] >> 1 but for the
* performace it does not make any difference * performace it does not make any difference
*/ */
if (eccsize_mult == 1)
byte_addr = (addressbits[b1] << 4) + addressbits[b0]; byte_addr = (addressbits[b1] << 4) + addressbits[b0];
else
byte_addr = (addressbits[b2 & 0x3] << 8) +
(addressbits[b1] << 4) + addressbits[b0];
bit_addr = addressbits[b2 >> 2]; bit_addr = addressbits[b2 >> 2];
/* flip the bit */ /* flip the bit */
buf[byte_addr] ^= (1 << bit_addr); buf[byte_addr] ^= (1 << bit_addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment