Commit bca707e5 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: subpage: introduce helpers to handle subpage delalloc locking

Three new helpers are introduced for the incoming subpage delalloc locking
change.

- btrfs_folio_set_writer_lock()
  This is to mark specified range with subpage specific writer lock.
  After calling this, the subpage range can be proper unlocked by
  btrfs_folio_end_writer_lock()

- btrfs_subpage_find_writer_locked()
  This is to find the writer locked subpage range in a page.
  With the help of btrfs_folio_set_writer_lock(), it can allow us to
  record and find previously locked subpage range without extra memory
  allocation.

- btrfs_folio_end_all_writers()
  This is for the locked_page of __extent_writepage(), as there may be
  multiple subpage delalloc ranges locked.
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: default avatarNaohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 21b5bef2
......@@ -775,6 +775,124 @@ void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info,
btrfs_folio_end_writer_lock(fs_info, folio, start, len);
}
/*
* This is for folio already locked by plain lock_page()/folio_lock(), which
* doesn't have any subpage awareness.
*
* This populates the involved subpage ranges so that subpage helpers can
* properly unlock them.
*/
void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_subpage *subpage;
unsigned long flags;
unsigned int start_bit;
unsigned int nbits;
int ret;
ASSERT(folio_test_locked(folio));
if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping))
return;
subpage = folio_get_private(folio);
start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
nbits = len >> fs_info->sectorsize_bits;
spin_lock_irqsave(&subpage->lock, flags);
/* Target range should not yet be locked. */
ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
bitmap_set(subpage->bitmaps, start_bit, nbits);
ret = atomic_add_return(nbits, &subpage->writers);
ASSERT(ret <= fs_info->subpage_info->bitmap_nr_bits);
spin_unlock_irqrestore(&subpage->lock, flags);
}
/*
* Find any subpage writer locked range inside @folio, starting at file offset
* @search_start. The caller should ensure the folio is locked.
*
* Return true and update @found_start_ret and @found_len_ret to the first
* writer locked range.
* Return false if there is no writer locked range.
*/
bool btrfs_subpage_find_writer_locked(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 search_start,
u64 *found_start_ret, u32 *found_len_ret)
{
struct btrfs_subpage_info *subpage_info = fs_info->subpage_info;
struct btrfs_subpage *subpage = folio_get_private(folio);
const unsigned int len = PAGE_SIZE - offset_in_page(search_start);
const unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
locked, search_start, len);
const unsigned int locked_bitmap_start = subpage_info->locked_offset;
const unsigned int locked_bitmap_end = locked_bitmap_start +
subpage_info->bitmap_nr_bits;
unsigned long flags;
int first_zero;
int first_set;
bool found = false;
ASSERT(folio_test_locked(folio));
spin_lock_irqsave(&subpage->lock, flags);
first_set = find_next_bit(subpage->bitmaps, locked_bitmap_end, start_bit);
if (first_set >= locked_bitmap_end)
goto out;
found = true;
*found_start_ret = folio_pos(folio) +
((first_set - locked_bitmap_start) << fs_info->sectorsize_bits);
/*
* Since @first_set is ensured to be smaller than locked_bitmap_end
* here, @found_start_ret should be inside the folio.
*/
ASSERT(*found_start_ret < folio_pos(folio) + PAGE_SIZE);
first_zero = find_next_zero_bit(subpage->bitmaps, locked_bitmap_end, first_set);
*found_len_ret = (first_zero - first_set) << fs_info->sectorsize_bits;
out:
spin_unlock_irqrestore(&subpage->lock, flags);
return found;
}
/*
* Unlike btrfs_folio_end_writer_lock() which unlocks a specified subpage range,
* this ends all writer locked ranges of a page.
*
* This is for the locked page of __extent_writepage(), as the locked page
* can contain several locked subpage ranges.
*/
void btrfs_folio_end_all_writers(const struct btrfs_fs_info *fs_info, struct folio *folio)
{
u64 folio_start = folio_pos(folio);
u64 cur = folio_start;
ASSERT(folio_test_locked(folio));
if (!btrfs_is_subpage(fs_info, folio->mapping)) {
folio_unlock(folio);
return;
}
while (cur < folio_start + PAGE_SIZE) {
u64 found_start;
u32 found_len;
bool found;
bool last;
found = btrfs_subpage_find_writer_locked(fs_info, folio, cur,
&found_start, &found_len);
if (!found)
break;
last = btrfs_subpage_end_and_test_writer(fs_info, folio,
found_start, found_len);
if (last) {
folio_unlock(folio);
break;
}
cur = found_start + found_len;
}
}
#define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \
bitmap_cut(dst, subpage->bitmaps, 0, \
subpage_info->name##_offset, subpage_info->bitmap_nr_bits)
......
......@@ -112,6 +112,12 @@ int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len);
void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len);
void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len);
bool btrfs_subpage_find_writer_locked(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 search_start,
u64 *found_start_ret, u32 *found_len_ret);
void btrfs_folio_end_all_writers(const struct btrfs_fs_info *fs_info, struct folio *folio);
/*
* Template for subpage related operations.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment