Commit 163ced61 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mtd/fixes-for-4.17-rc6' of git://git.infradead.org/linux-mtd

Pull mtd fixes from Boris Brezillon:
 "NAND fixes:
   - Fix read path of the Marvell NAND driver
   - Make sure we don't pass a u64 to ndelay()

  CFI fix:
   - Fix the map_word_andequal() implementation"

* tag 'mtd/fixes-for-4.17-rc6' of git://git.infradead.org/linux-mtd:
  mtd: rawnand: Fix return type of __DIVIDE() when called with 32-bit
  mtd: rawnand: marvell: Fix read logic for layouts with ->nchunks > 2
  mtd: Fix comparison in map_word_andequal()
parents d90eb183 9f825e74
...@@ -1194,11 +1194,13 @@ static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk, ...@@ -1194,11 +1194,13 @@ static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
NDCB0_CMD2(NAND_CMD_READSTART); NDCB0_CMD2(NAND_CMD_READSTART);
/* /*
* Trigger the naked read operation only on the last chunk. * Trigger the monolithic read on the first chunk, then naked read on
* Otherwise, use monolithic read. * intermediate chunks and finally a last naked read on the last chunk.
*/ */
if (lt->nchunks == 1 || (chunk < lt->nchunks - 1)) if (chunk == 0)
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW); nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
else if (chunk < lt->nchunks - 1)
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
else else
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW); nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
......
...@@ -312,7 +312,7 @@ void map_destroy(struct mtd_info *mtd); ...@@ -312,7 +312,7 @@ void map_destroy(struct mtd_info *mtd);
({ \ ({ \
int i, ret = 1; \ int i, ret = 1; \
for (i = 0; i < map_words(map); i++) { \ for (i = 0; i < map_words(map); i++) { \
if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \ if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \
ret = 0; \ ret = 0; \
break; \ break; \
} \ } \
......
...@@ -867,11 +867,17 @@ struct nand_op_instr { ...@@ -867,11 +867,17 @@ struct nand_op_instr {
* tBERS (during an erase) which all of them are u64 values that cannot be * tBERS (during an erase) which all of them are u64 values that cannot be
* divided by usual kernel macros and must be handled with the special * divided by usual kernel macros and must be handled with the special
* DIV_ROUND_UP_ULL() macro. * DIV_ROUND_UP_ULL() macro.
*
* Cast to type of dividend is needed here to guarantee that the result won't
* be an unsigned long long when the dividend is an unsigned long (or smaller),
* which is what the compiler does when it sees ternary operator with 2
* different return types (picks the largest type to make sure there's no
* loss).
*/ */
#define __DIVIDE(dividend, divisor) ({ \ #define __DIVIDE(dividend, divisor) ({ \
sizeof(dividend) == sizeof(u32) ? \ (__typeof__(dividend))(sizeof(dividend) <= sizeof(unsigned long) ? \
DIV_ROUND_UP(dividend, divisor) : \ DIV_ROUND_UP(dividend, divisor) : \
DIV_ROUND_UP_ULL(dividend, divisor); \ DIV_ROUND_UP_ULL(dividend, divisor)); \
}) })
#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000) #define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000) #define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment