Commit 0bb3acdc authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: update SCRUB_MAX_PAGES_PER_BLOCK

Use BTRFS_MAX_METADATA_BLOCKSIZE and SZ_4K (minimal sectorsize) to
calculate this value.

And remove one stale comment on the value, in fact with recent subpage
support, BTRFS_MAX_METADATA_BLOCKSIZE * PAGE_SIZE is already beyond
BTRFS_STRIPE_LEN, just we don't use the full page.

Also since we're here, update the BUG_ON() related to
SCRUB_MAX_PAGES_PER_BLOCK to ASSERT().

As those ASSERT() are really only for developers to catch early obvious
bugs, not to let end users suffer.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 8697b8f8
...@@ -49,11 +49,10 @@ struct scrub_ctx; ...@@ -49,11 +49,10 @@ struct scrub_ctx;
#define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */ #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
/* /*
* the following value times PAGE_SIZE needs to be large enough to match the * The following value times PAGE_SIZE needs to be large enough to match the
* largest node/leaf/sector size that shall be supported. * largest node/leaf/sector size that shall be supported.
* Values larger than BTRFS_STRIPE_LEN are not supported.
*/ */
#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */ #define SCRUB_MAX_PAGES_PER_BLOCK (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
struct scrub_recover { struct scrub_recover {
refcount_t refs; refcount_t refs;
...@@ -1313,7 +1312,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock, ...@@ -1313,7 +1312,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
recover->bioc = bioc; recover->bioc = bioc;
recover->map_length = mapped_length; recover->map_length = mapped_length;
BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK); ASSERT(page_index < SCRUB_MAX_PAGES_PER_BLOCK);
nmirrors = min(scrub_nr_raid_mirrors(bioc), BTRFS_MAX_MIRRORS); nmirrors = min(scrub_nr_raid_mirrors(bioc), BTRFS_MAX_MIRRORS);
...@@ -2297,7 +2296,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u32 len, ...@@ -2297,7 +2296,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u32 len,
scrub_block_put(sblock); scrub_block_put(sblock);
return -ENOMEM; return -ENOMEM;
} }
BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); ASSERT(index < SCRUB_MAX_PAGES_PER_BLOCK);
scrub_page_get(spage); scrub_page_get(spage);
sblock->pagev[index] = spage; sblock->pagev[index] = spage;
spage->sblock = sblock; spage->sblock = sblock;
...@@ -2631,7 +2630,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity, ...@@ -2631,7 +2630,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
scrub_block_put(sblock); scrub_block_put(sblock);
return -ENOMEM; return -ENOMEM;
} }
BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); ASSERT(index < SCRUB_MAX_PAGES_PER_BLOCK);
/* For scrub block */ /* For scrub block */
scrub_page_get(spage); scrub_page_get(spage);
sblock->pagev[index] = spage; sblock->pagev[index] = spage;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment