Commit ce4a71ee authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: subpage: remove btrfs_fs_info::subpage_info member

The member btrfs_fs_info::subpage_info stores the cached bitmap start
position inside the merged bitmap.

However in reality there is only one thing depending on the sectorsize,
bitmap_nr_bits, which records the number of sectors that fit inside a
page.

The sequence of sub-bitmaps have fixed order, thus it's just a quick
multiplication to calculate the start position of each sub-bitmaps.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 2c70fe16
...@@ -1285,7 +1285,6 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info) ...@@ -1285,7 +1285,6 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
btrfs_extent_buffer_leak_debug_check(fs_info); btrfs_extent_buffer_leak_debug_check(fs_info);
kfree(fs_info->super_copy); kfree(fs_info->super_copy);
kfree(fs_info->super_for_commit); kfree(fs_info->super_for_commit);
kfree(fs_info->subpage_info);
kvfree(fs_info); kvfree(fs_info);
} }
...@@ -3322,6 +3321,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3322,6 +3321,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
fs_info->nodesize = nodesize; fs_info->nodesize = nodesize;
fs_info->sectorsize = sectorsize; fs_info->sectorsize = sectorsize;
fs_info->sectorsize_bits = ilog2(sectorsize); fs_info->sectorsize_bits = ilog2(sectorsize);
fs_info->sectors_per_page = (PAGE_SIZE >> fs_info->sectorsize_bits);
fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size; fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
fs_info->stripesize = stripesize; fs_info->stripesize = stripesize;
...@@ -3346,20 +3346,10 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3346,20 +3346,10 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
*/ */
fs_info->max_inline = min_t(u64, fs_info->max_inline, fs_info->sectorsize); fs_info->max_inline = min_t(u64, fs_info->max_inline, fs_info->sectorsize);
if (sectorsize < PAGE_SIZE) { if (sectorsize < PAGE_SIZE)
struct btrfs_subpage_info *subpage_info;
btrfs_warn(fs_info, btrfs_warn(fs_info,
"read-write for sector size %u with page size %lu is experimental", "read-write for sector size %u with page size %lu is experimental",
sectorsize, PAGE_SIZE); sectorsize, PAGE_SIZE);
subpage_info = kzalloc(sizeof(*subpage_info), GFP_KERNEL);
if (!subpage_info) {
ret = -ENOMEM;
goto fail_alloc;
}
btrfs_init_subpage_info(subpage_info, sectorsize);
fs_info->subpage_info = subpage_info;
}
ret = btrfs_init_workqueues(fs_info); ret = btrfs_init_workqueues(fs_info);
if (ret) if (ret)
......
...@@ -1440,9 +1440,9 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode, ...@@ -1440,9 +1440,9 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
} }
if (btrfs_is_subpage(fs_info, inode->vfs_inode.i_mapping)) { if (btrfs_is_subpage(fs_info, inode->vfs_inode.i_mapping)) {
ASSERT(fs_info->subpage_info); ASSERT(fs_info->sectors_per_page > 1);
btrfs_get_subpage_dirty_bitmap(fs_info, folio, &dirty_bitmap); btrfs_get_subpage_dirty_bitmap(fs_info, folio, &dirty_bitmap);
bitmap_size = fs_info->subpage_info->bitmap_nr_bits; bitmap_size = fs_info->sectors_per_page;
} }
for (cur = start; cur < start + len; cur += fs_info->sectorsize) for (cur = start; cur < start + len; cur += fs_info->sectorsize)
set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap); set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap);
...@@ -1827,7 +1827,7 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc) ...@@ -1827,7 +1827,7 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits; int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
/* Lock and write each dirty extent buffers in the range */ /* Lock and write each dirty extent buffers in the range */
while (bit_start < fs_info->subpage_info->bitmap_nr_bits) { while (bit_start < fs_info->sectors_per_page) {
struct btrfs_subpage *subpage = folio_get_private(folio); struct btrfs_subpage *subpage = folio_get_private(folio);
struct extent_buffer *eb; struct extent_buffer *eb;
unsigned long flags; unsigned long flags;
...@@ -1843,7 +1843,7 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc) ...@@ -1843,7 +1843,7 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
break; break;
} }
spin_lock_irqsave(&subpage->lock, flags); spin_lock_irqsave(&subpage->lock, flags);
if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset, if (!test_bit(bit_start + btrfs_bitmap_nr_dirty * fs_info->sectors_per_page,
subpage->bitmaps)) { subpage->bitmaps)) {
spin_unlock_irqrestore(&subpage->lock, flags); spin_unlock_irqrestore(&subpage->lock, flags);
spin_unlock(&page->mapping->i_private_lock); spin_unlock(&page->mapping->i_private_lock);
......
...@@ -703,8 +703,8 @@ struct btrfs_fs_info { ...@@ -703,8 +703,8 @@ struct btrfs_fs_info {
* running. * running.
*/ */
refcount_t scrub_workers_refcnt; refcount_t scrub_workers_refcnt;
u32 sectors_per_page;
struct workqueue_struct *scrub_workers; struct workqueue_struct *scrub_workers;
struct btrfs_subpage_info *subpage_info;
struct btrfs_discard_ctl discard_ctl; struct btrfs_discard_ctl discard_ctl;
......
...@@ -88,37 +88,6 @@ bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space ...@@ -88,37 +88,6 @@ bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space
} }
#endif #endif
void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize)
{
unsigned int cur = 0;
unsigned int nr_bits;
ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize));
nr_bits = PAGE_SIZE / sectorsize;
subpage_info->bitmap_nr_bits = nr_bits;
subpage_info->uptodate_offset = cur;
cur += nr_bits;
subpage_info->dirty_offset = cur;
cur += nr_bits;
subpage_info->writeback_offset = cur;
cur += nr_bits;
subpage_info->ordered_offset = cur;
cur += nr_bits;
subpage_info->checked_offset = cur;
cur += nr_bits;
subpage_info->locked_offset = cur;
cur += nr_bits;
subpage_info->total_nr_bits = cur;
}
int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
struct folio *folio, enum btrfs_subpage_type type) struct folio *folio, enum btrfs_subpage_type type)
{ {
...@@ -165,7 +134,7 @@ struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info, ...@@ -165,7 +134,7 @@ struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
ASSERT(fs_info->sectorsize < PAGE_SIZE); ASSERT(fs_info->sectorsize < PAGE_SIZE);
real_size = struct_size(ret, bitmaps, real_size = struct_size(ret, bitmaps,
BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits)); BITS_TO_LONGS(btrfs_bitmap_nr_max * fs_info->sectors_per_page));
ret = kzalloc(real_size, GFP_NOFS); ret = kzalloc(real_size, GFP_NOFS);
if (!ret) if (!ret)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -248,7 +217,7 @@ static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info, ...@@ -248,7 +217,7 @@ static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
\ \
btrfs_subpage_assert(fs_info, folio, start, len); \ btrfs_subpage_assert(fs_info, folio, start, len); \
__start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \ __start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
__start_bit += fs_info->subpage_info->name##_offset; \ __start_bit += fs_info->sectors_per_page * btrfs_bitmap_nr_##name; \
__start_bit; \ __start_bit; \
}) })
...@@ -420,13 +389,13 @@ void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info, ...@@ -420,13 +389,13 @@ void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
#define subpage_test_bitmap_all_set(fs_info, subpage, name) \ #define subpage_test_bitmap_all_set(fs_info, subpage, name) \
bitmap_test_range_all_set(subpage->bitmaps, \ bitmap_test_range_all_set(subpage->bitmaps, \
fs_info->subpage_info->name##_offset, \ fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
fs_info->subpage_info->bitmap_nr_bits) fs_info->sectors_per_page)
#define subpage_test_bitmap_all_zero(fs_info, subpage, name) \ #define subpage_test_bitmap_all_zero(fs_info, subpage, name) \
bitmap_test_range_all_zero(subpage->bitmaps, \ bitmap_test_range_all_zero(subpage->bitmaps, \
fs_info->subpage_info->name##_offset, \ fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
fs_info->subpage_info->bitmap_nr_bits) fs_info->sectors_per_page)
void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info, void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len) struct folio *folio, u64 start, u32 len)
...@@ -805,7 +774,7 @@ void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info, ...@@ -805,7 +774,7 @@ void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits)); ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
bitmap_set(subpage->bitmaps, start_bit, nbits); bitmap_set(subpage->bitmaps, start_bit, nbits);
ret = atomic_add_return(nbits, &subpage->writers); ret = atomic_add_return(nbits, &subpage->writers);
ASSERT(ret <= fs_info->subpage_info->bitmap_nr_bits); ASSERT(ret <= fs_info->sectors_per_page);
spin_unlock_irqrestore(&subpage->lock, flags); spin_unlock_irqrestore(&subpage->lock, flags);
} }
...@@ -821,14 +790,13 @@ bool btrfs_subpage_find_writer_locked(const struct btrfs_fs_info *fs_info, ...@@ -821,14 +790,13 @@ bool btrfs_subpage_find_writer_locked(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 search_start, struct folio *folio, u64 search_start,
u64 *found_start_ret, u32 *found_len_ret) u64 *found_start_ret, u32 *found_len_ret)
{ {
struct btrfs_subpage_info *subpage_info = fs_info->subpage_info;
struct btrfs_subpage *subpage = folio_get_private(folio); struct btrfs_subpage *subpage = folio_get_private(folio);
const u32 sectors_per_page = fs_info->sectors_per_page;
const unsigned int len = PAGE_SIZE - offset_in_page(search_start); const unsigned int len = PAGE_SIZE - offset_in_page(search_start);
const unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, const unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
locked, search_start, len); locked, search_start, len);
const unsigned int locked_bitmap_start = subpage_info->locked_offset; const unsigned int locked_bitmap_start = sectors_per_page * btrfs_bitmap_nr_locked;
const unsigned int locked_bitmap_end = locked_bitmap_start + const unsigned int locked_bitmap_end = locked_bitmap_start + sectors_per_page;
subpage_info->bitmap_nr_bits;
unsigned long flags; unsigned long flags;
int first_zero; int first_zero;
int first_set; int first_set;
...@@ -901,21 +869,21 @@ void btrfs_folio_end_all_writers(const struct btrfs_fs_info *fs_info, struct fol ...@@ -901,21 +869,21 @@ void btrfs_folio_end_all_writers(const struct btrfs_fs_info *fs_info, struct fol
} }
} }
#define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \ #define GET_SUBPAGE_BITMAP(subpage, fs_info, name, dst) \
{ \ { \
const int bitmap_nr_bits = subpage_info->bitmap_nr_bits; \ const int sectors_per_page = fs_info->sectors_per_page; \
\ \
ASSERT(bitmap_nr_bits < BITS_PER_LONG); \ ASSERT(sectors_per_page < BITS_PER_LONG); \
*dst = bitmap_read(subpage->bitmaps, \ *dst = bitmap_read(subpage->bitmaps, \
subpage_info->name##_offset, \ sectors_per_page * btrfs_bitmap_nr_##name, \
bitmap_nr_bits); \ sectors_per_page); \
} }
void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len) struct folio *folio, u64 start, u32 len)
{ {
struct btrfs_subpage_info *subpage_info = fs_info->subpage_info;
struct btrfs_subpage *subpage; struct btrfs_subpage *subpage;
const u32 sectors_per_page = fs_info->sectors_per_page;
unsigned long uptodate_bitmap; unsigned long uptodate_bitmap;
unsigned long dirty_bitmap; unsigned long dirty_bitmap;
unsigned long writeback_bitmap; unsigned long writeback_bitmap;
...@@ -924,42 +892,41 @@ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, ...@@ -924,42 +892,41 @@ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
unsigned long flags; unsigned long flags;
ASSERT(folio_test_private(folio) && folio_get_private(folio)); ASSERT(folio_test_private(folio) && folio_get_private(folio));
ASSERT(subpage_info); ASSERT(sectors_per_page > 1);
subpage = folio_get_private(folio); subpage = folio_get_private(folio);
spin_lock_irqsave(&subpage->lock, flags); spin_lock_irqsave(&subpage->lock, flags);
GET_SUBPAGE_BITMAP(subpage, subpage_info, uptodate, &uptodate_bitmap); GET_SUBPAGE_BITMAP(subpage, fs_info, uptodate, &uptodate_bitmap);
GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, &dirty_bitmap); GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, &dirty_bitmap);
GET_SUBPAGE_BITMAP(subpage, subpage_info, writeback, &writeback_bitmap); GET_SUBPAGE_BITMAP(subpage, fs_info, writeback, &writeback_bitmap);
GET_SUBPAGE_BITMAP(subpage, subpage_info, ordered, &ordered_bitmap); GET_SUBPAGE_BITMAP(subpage, fs_info, ordered, &ordered_bitmap);
GET_SUBPAGE_BITMAP(subpage, subpage_info, checked, &checked_bitmap); GET_SUBPAGE_BITMAP(subpage, fs_info, checked, &checked_bitmap);
GET_SUBPAGE_BITMAP(subpage, subpage_info, locked, &checked_bitmap); GET_SUBPAGE_BITMAP(subpage, fs_info, locked, &checked_bitmap);
spin_unlock_irqrestore(&subpage->lock, flags); spin_unlock_irqrestore(&subpage->lock, flags);
dump_page(folio_page(folio, 0), "btrfs subpage dump"); dump_page(folio_page(folio, 0), "btrfs subpage dump");
btrfs_warn(fs_info, btrfs_warn(fs_info,
"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl", "start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
start, len, folio_pos(folio), start, len, folio_pos(folio),
subpage_info->bitmap_nr_bits, &uptodate_bitmap, sectors_per_page, &uptodate_bitmap,
subpage_info->bitmap_nr_bits, &dirty_bitmap, sectors_per_page, &dirty_bitmap,
subpage_info->bitmap_nr_bits, &writeback_bitmap, sectors_per_page, &writeback_bitmap,
subpage_info->bitmap_nr_bits, &ordered_bitmap, sectors_per_page, &ordered_bitmap,
subpage_info->bitmap_nr_bits, &checked_bitmap); sectors_per_page, &checked_bitmap);
} }
void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info, void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
struct folio *folio, struct folio *folio,
unsigned long *ret_bitmap) unsigned long *ret_bitmap)
{ {
struct btrfs_subpage_info *subpage_info = fs_info->subpage_info;
struct btrfs_subpage *subpage; struct btrfs_subpage *subpage;
unsigned long flags; unsigned long flags;
ASSERT(folio_test_private(folio) && folio_get_private(folio)); ASSERT(folio_test_private(folio) && folio_get_private(folio));
ASSERT(subpage_info); ASSERT(fs_info->sectors_per_page > 1);
subpage = folio_get_private(folio); subpage = folio_get_private(folio);
spin_lock_irqsave(&subpage->lock, flags); spin_lock_irqsave(&subpage->lock, flags);
GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, ret_bitmap); GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, ret_bitmap);
spin_unlock_irqrestore(&subpage->lock, flags); spin_unlock_irqrestore(&subpage->lock, flags);
} }
...@@ -19,39 +19,23 @@ struct btrfs_fs_info; ...@@ -19,39 +19,23 @@ struct btrfs_fs_info;
* *
* This structure records how they are organized in the bitmap: * This structure records how they are organized in the bitmap:
* *
* /- uptodate_offset /- dirty_offset /- ordered_offset * /- uptodate /- dirty /- ordered
* | | | * | | |
* v v v * v v v
* |u|u|u|u|........|u|u|d|d|.......|d|d|o|o|.......|o|o| * |u|u|u|u|........|u|u|d|d|.......|d|d|o|o|.......|o|o|
* |<- bitmap_nr_bits ->| * |< sectors_per_page >|
* |<----------------- total_nr_bits ------------------>| *
* Unlike regular macro-like enums, here we do not go upper-case names, as
* these names will be utilized in various macros to define function names.
*/ */
struct btrfs_subpage_info { enum {
/* Number of bits for each bitmap */ btrfs_bitmap_nr_uptodate = 0,
unsigned int bitmap_nr_bits; btrfs_bitmap_nr_dirty,
btrfs_bitmap_nr_writeback,
/* Total number of bits for the whole bitmap */ btrfs_bitmap_nr_ordered,
unsigned int total_nr_bits; btrfs_bitmap_nr_checked,
btrfs_bitmap_nr_locked,
/* btrfs_bitmap_nr_max
* *_offset indicates where the bitmap starts, the length is always
* @bitmap_size, which is calculated from PAGE_SIZE / sectorsize.
*/
unsigned int uptodate_offset;
unsigned int dirty_offset;
unsigned int writeback_offset;
unsigned int ordered_offset;
unsigned int checked_offset;
/*
* For locked bitmaps, normally it's subpage representation for folio
* Locked flag, but metadata is different:
*
* - Metadata doesn't really lock the folio
* It's just to prevent page::private get cleared before the last
* end_page_read().
*/
unsigned int locked_offset;
}; };
/* /*
...@@ -99,7 +83,6 @@ static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, ...@@ -99,7 +83,6 @@ static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info,
} }
#endif #endif
void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize);
int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
struct folio *folio, enum btrfs_subpage_type type); struct folio *folio, enum btrfs_subpage_type type);
void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio); void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment