Commit 5626196a authored by Jens Axboe's avatar Jens Axboe

Merge branch 'md-next' of...

Merge branch 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md into for-6.2/block

Pull MD fixes from Song.

* 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md:
  md/raid1: stop mdx_raid1 thread when raid1 array run failed
  md/raid5: use bdev_write_cache instead of open coding it
  md: fix a crash in mempool_free
  md/raid0, raid10: Don't set discard sectors for request queue
  md/bitmap: Fix bitmap chunk size overflow issues
  md: introduce md_ro_state
  md: factor out __md_set_array_info()
  lib/raid6: drop RAID6_USE_EMPTY_ZERO_PAGE
  raid5-cache: use try_cmpxchg in r5l_wake_reclaim
  drivers/md/md-bitmap: check the return value of md_bitmap_get_counter()
parents 4f8126bb b611ad14
...@@ -486,7 +486,7 @@ void md_bitmap_print_sb(struct bitmap *bitmap) ...@@ -486,7 +486,7 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
sb = kmap_atomic(bitmap->storage.sb_page); sb = kmap_atomic(bitmap->storage.sb_page);
pr_debug("%s: bitmap file superblock:\n", bmname(bitmap)); pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
pr_debug(" version: %d\n", le32_to_cpu(sb->version)); pr_debug(" version: %u\n", le32_to_cpu(sb->version));
pr_debug(" uuid: %08x.%08x.%08x.%08x\n", pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
le32_to_cpu(*(__le32 *)(sb->uuid+0)), le32_to_cpu(*(__le32 *)(sb->uuid+0)),
le32_to_cpu(*(__le32 *)(sb->uuid+4)), le32_to_cpu(*(__le32 *)(sb->uuid+4)),
...@@ -497,11 +497,11 @@ void md_bitmap_print_sb(struct bitmap *bitmap) ...@@ -497,11 +497,11 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
pr_debug("events cleared: %llu\n", pr_debug("events cleared: %llu\n",
(unsigned long long) le64_to_cpu(sb->events_cleared)); (unsigned long long) le64_to_cpu(sb->events_cleared));
pr_debug(" state: %08x\n", le32_to_cpu(sb->state)); pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize)); pr_debug(" chunksize: %u B\n", le32_to_cpu(sb->chunksize));
pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep)); pr_debug(" daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep));
pr_debug(" sync size: %llu KB\n", pr_debug(" sync size: %llu KB\n",
(unsigned long long)le64_to_cpu(sb->sync_size)/2); (unsigned long long)le64_to_cpu(sb->sync_size)/2);
pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind)); pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind));
kunmap_atomic(sb); kunmap_atomic(sb);
} }
...@@ -2105,7 +2105,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, ...@@ -2105,7 +2105,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bytes = DIV_ROUND_UP(chunks, 8); bytes = DIV_ROUND_UP(chunks, 8);
if (!bitmap->mddev->bitmap_info.external) if (!bitmap->mddev->bitmap_info.external)
bytes += sizeof(bitmap_super_t); bytes += sizeof(bitmap_super_t);
} while (bytes > (space << 9)); } while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) <
(BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1));
} else } else
chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT; chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
...@@ -2150,7 +2151,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, ...@@ -2150,7 +2151,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bitmap->counts.missing_pages = pages; bitmap->counts.missing_pages = pages;
bitmap->counts.chunkshift = chunkshift; bitmap->counts.chunkshift = chunkshift;
bitmap->counts.chunks = chunks; bitmap->counts.chunks = chunks;
bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift + bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift +
BITMAP_BLOCK_SHIFT); BITMAP_BLOCK_SHIFT);
blocks = min(old_counts.chunks << old_counts.chunkshift, blocks = min(old_counts.chunks << old_counts.chunkshift,
...@@ -2176,8 +2177,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, ...@@ -2176,8 +2177,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bitmap->counts.missing_pages = old_counts.pages; bitmap->counts.missing_pages = old_counts.pages;
bitmap->counts.chunkshift = old_counts.chunkshift; bitmap->counts.chunkshift = old_counts.chunkshift;
bitmap->counts.chunks = old_counts.chunks; bitmap->counts.chunks = old_counts.chunks;
bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift + bitmap->mddev->bitmap_info.chunksize =
BITMAP_BLOCK_SHIFT); 1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT);
blocks = old_counts.chunks << old_counts.chunkshift; blocks = old_counts.chunks << old_counts.chunkshift;
pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n"); pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
break; break;
...@@ -2195,20 +2196,23 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, ...@@ -2195,20 +2196,23 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
if (set) { if (set) {
bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1); bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
if (*bmc_new == 0) { if (bmc_new) {
/* need to set on-disk bits too. */ if (*bmc_new == 0) {
sector_t end = block + new_blocks; /* need to set on-disk bits too. */
sector_t start = block >> chunkshift; sector_t end = block + new_blocks;
start <<= chunkshift; sector_t start = block >> chunkshift;
while (start < end) {
md_bitmap_file_set_bit(bitmap, block); start <<= chunkshift;
start += 1 << chunkshift; while (start < end) {
md_bitmap_file_set_bit(bitmap, block);
start += 1 << chunkshift;
}
*bmc_new = 2;
md_bitmap_count_page(&bitmap->counts, block, 1);
md_bitmap_set_pending(&bitmap->counts, block);
} }
*bmc_new = 2; *bmc_new |= NEEDED_MASK;
md_bitmap_count_page(&bitmap->counts, block, 1);
md_bitmap_set_pending(&bitmap->counts, block);
} }
*bmc_new |= NEEDED_MASK;
if (new_blocks < old_blocks) if (new_blocks < old_blocks)
old_blocks = new_blocks; old_blocks = new_blocks;
} }
...@@ -2534,6 +2538,9 @@ chunksize_store(struct mddev *mddev, const char *buf, size_t len) ...@@ -2534,6 +2538,9 @@ chunksize_store(struct mddev *mddev, const char *buf, size_t len)
if (csize < 512 || if (csize < 512 ||
!is_power_of_2(csize)) !is_power_of_2(csize))
return -EINVAL; return -EINVAL;
if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE *
sizeof(((bitmap_super_t *)0)->chunksize))))
return -EOVERFLOW;
mddev->bitmap_info.chunksize = csize; mddev->bitmap_info.chunksize = csize;
return len; return len;
} }
......
This diff is collapsed.
...@@ -398,7 +398,6 @@ static int raid0_run(struct mddev *mddev) ...@@ -398,7 +398,6 @@ static int raid0_run(struct mddev *mddev)
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
blk_queue_io_opt(mddev->queue, blk_queue_io_opt(mddev->queue,
......
...@@ -3159,6 +3159,7 @@ static int raid1_run(struct mddev *mddev) ...@@ -3159,6 +3159,7 @@ static int raid1_run(struct mddev *mddev)
* RAID1 needs at least one disk in active * RAID1 needs at least one disk in active
*/ */
if (conf->raid_disks - mddev->degraded < 1) { if (conf->raid_disks - mddev->degraded < 1) {
md_unregister_thread(&conf->thread);
ret = -EINVAL; ret = -EINVAL;
goto abort; goto abort;
} }
......
...@@ -4145,8 +4145,6 @@ static int raid10_run(struct mddev *mddev) ...@@ -4145,8 +4145,6 @@ static int raid10_run(struct mddev *mddev)
conf->thread = NULL; conf->thread = NULL;
if (mddev->queue) { if (mddev->queue) {
blk_queue_max_discard_sectors(mddev->queue,
UINT_MAX);
blk_queue_max_write_zeroes_sectors(mddev->queue, 0); blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
raid10_set_io_opt(conf); raid10_set_io_opt(conf);
......
...@@ -1565,11 +1565,12 @@ void r5l_wake_reclaim(struct r5l_log *log, sector_t space) ...@@ -1565,11 +1565,12 @@ void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
if (!log) if (!log)
return; return;
target = READ_ONCE(log->reclaim_target);
do { do {
target = log->reclaim_target;
if (new < target) if (new < target)
return; return;
} while (cmpxchg(&log->reclaim_target, target, new) != target); } while (!try_cmpxchg(&log->reclaim_target, &target, new));
md_wakeup_thread(log->reclaim_thread); md_wakeup_thread(log->reclaim_thread);
} }
...@@ -3061,7 +3062,6 @@ void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev) ...@@ -3061,7 +3062,6 @@ void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
{ {
struct request_queue *q = bdev_get_queue(rdev->bdev);
struct r5l_log *log; struct r5l_log *log;
int ret; int ret;
...@@ -3090,9 +3090,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) ...@@ -3090,9 +3090,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
if (!log) if (!log)
return -ENOMEM; return -ENOMEM;
log->rdev = rdev; log->rdev = rdev;
log->need_cache_flush = bdev_write_cache(rdev->bdev);
log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0;
log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid, log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
sizeof(rdev->mddev->uuid)); sizeof(rdev->mddev->uuid));
......
...@@ -1301,8 +1301,6 @@ static int ppl_validate_rdev(struct md_rdev *rdev) ...@@ -1301,8 +1301,6 @@ static int ppl_validate_rdev(struct md_rdev *rdev)
static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev) static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
{ {
struct request_queue *q;
if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE + if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE +
PPL_HEADER_SIZE) * 2) { PPL_HEADER_SIZE) * 2) {
log->use_multippl = true; log->use_multippl = true;
...@@ -1316,8 +1314,7 @@ static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev) ...@@ -1316,8 +1314,7 @@ static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
} }
log->next_io_sector = rdev->ppl.sector; log->next_io_sector = rdev->ppl.sector;
q = bdev_get_queue(rdev->bdev); if (bdev_write_cache(rdev->bdev))
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
log->wb_cache_on = true; log->wb_cache_on = true;
} }
......
...@@ -10,17 +10,9 @@ ...@@ -10,17 +10,9 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
/* Set to 1 to use kernel-wide empty_zero_page */
#define RAID6_USE_EMPTY_ZERO_PAGE 0
#include <linux/blkdev.h> #include <linux/blkdev.h>
/* We need a pre-zeroed page... if we don't want to use the kernel-provided
one define it here */
#if RAID6_USE_EMPTY_ZERO_PAGE
# define raid6_empty_zero_page empty_zero_page
#else
extern const char raid6_empty_zero_page[PAGE_SIZE]; extern const char raid6_empty_zero_page[PAGE_SIZE];
#endif
#else /* ! __KERNEL__ */ #else /* ! __KERNEL__ */
/* Used for testing in user space */ /* Used for testing in user space */
......
...@@ -18,12 +18,10 @@ ...@@ -18,12 +18,10 @@
#else #else
#include <linux/module.h> #include <linux/module.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#if !RAID6_USE_EMPTY_ZERO_PAGE
/* In .bss so it's zeroed */ /* In .bss so it's zeroed */
const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
EXPORT_SYMBOL(raid6_empty_zero_page); EXPORT_SYMBOL(raid6_empty_zero_page);
#endif #endif
#endif
struct raid6_calls raid6_call; struct raid6_calls raid6_call;
EXPORT_SYMBOL_GPL(raid6_call); EXPORT_SYMBOL_GPL(raid6_call);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment