Commit 8d993618 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios

These two functions are still using the old page based code, which is
not going to handle larger folios at all.

The migration itself is going to involve the following changes:

- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()

And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:

- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
  The common, non-subpage case with per-page folio.

- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
  The incoming larger folio, non-subpage case.

- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
  The existing subpage case, we won't larger folio anyway.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 4a565c80
...@@ -60,28 +60,30 @@ u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \ ...@@ -60,28 +60,30 @@ u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
const void *ptr, unsigned long off) \ const void *ptr, unsigned long off) \
{ \ { \
const unsigned long member_offset = (unsigned long)ptr + off; \ const unsigned long member_offset = (unsigned long)ptr + off; \
const unsigned long idx = get_eb_page_index(member_offset); \ const unsigned long idx = get_eb_folio_index(token->eb, member_offset); \
const unsigned long oip = get_eb_offset_in_page(token->eb, \ const unsigned long oil = get_eb_offset_in_folio(token->eb, \
member_offset); \ member_offset);\
const int unit_size = folio_size(token->eb->folios[0]); \
const int unit_shift = folio_shift(token->eb->folios[0]); \
const int size = sizeof(u##bits); \ const int size = sizeof(u##bits); \
u8 lebytes[sizeof(u##bits)]; \ u8 lebytes[sizeof(u##bits)]; \
const int part = PAGE_SIZE - oip; \ const int part = unit_size - oil; \
\ \
ASSERT(token); \ ASSERT(token); \
ASSERT(token->kaddr); \ ASSERT(token->kaddr); \
ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \ ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
if (token->offset <= member_offset && \ if (token->offset <= member_offset && \
member_offset + size <= token->offset + PAGE_SIZE) { \ member_offset + size <= token->offset + unit_size) { \
return get_unaligned_le##bits(token->kaddr + oip); \ return get_unaligned_le##bits(token->kaddr + oil); \
} \ } \
token->kaddr = folio_address(token->eb->folios[idx]); \ token->kaddr = folio_address(token->eb->folios[idx]); \
token->offset = idx << PAGE_SHIFT; \ token->offset = idx << unit_shift; \
if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE ) \ if (INLINE_EXTENT_BUFFER_PAGES == 1 || oil + size <= unit_size) \
return get_unaligned_le##bits(token->kaddr + oip); \ return get_unaligned_le##bits(token->kaddr + oil); \
\ \
memcpy(lebytes, token->kaddr + oip, part); \ memcpy(lebytes, token->kaddr + oil, part); \
token->kaddr = folio_address(token->eb->folios[idx + 1]); \ token->kaddr = folio_address(token->eb->folios[idx + 1]); \
token->offset = (idx + 1) << PAGE_SHIFT; \ token->offset = (idx + 1) << unit_shift; \
memcpy(lebytes + part, token->kaddr, size - part); \ memcpy(lebytes + part, token->kaddr, size - part); \
return get_unaligned_le##bits(lebytes); \ return get_unaligned_le##bits(lebytes); \
} \ } \
...@@ -89,18 +91,20 @@ u##bits btrfs_get_##bits(const struct extent_buffer *eb, \ ...@@ -89,18 +91,20 @@ u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
const void *ptr, unsigned long off) \ const void *ptr, unsigned long off) \
{ \ { \
const unsigned long member_offset = (unsigned long)ptr + off; \ const unsigned long member_offset = (unsigned long)ptr + off; \
const unsigned long oip = get_eb_offset_in_page(eb, member_offset); \ const unsigned long idx = get_eb_folio_index(eb, member_offset);\
const unsigned long idx = get_eb_page_index(member_offset); \ const unsigned long oil = get_eb_offset_in_folio(eb, \
member_offset);\
const int unit_size = folio_size(eb->folios[0]); \
char *kaddr = folio_address(eb->folios[idx]); \ char *kaddr = folio_address(eb->folios[idx]); \
const int size = sizeof(u##bits); \ const int size = sizeof(u##bits); \
const int part = PAGE_SIZE - oip; \ const int part = unit_size - oil; \
u8 lebytes[sizeof(u##bits)]; \ u8 lebytes[sizeof(u##bits)]; \
\ \
ASSERT(check_setget_bounds(eb, ptr, off, size)); \ ASSERT(check_setget_bounds(eb, ptr, off, size)); \
if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) \ if (INLINE_EXTENT_BUFFER_PAGES == 1 || oil + size <= unit_size) \
return get_unaligned_le##bits(kaddr + oip); \ return get_unaligned_le##bits(kaddr + oil); \
\ \
memcpy(lebytes, kaddr + oip, part); \ memcpy(lebytes, kaddr + oil, part); \
kaddr = folio_address(eb->folios[idx + 1]); \ kaddr = folio_address(eb->folios[idx + 1]); \
memcpy(lebytes + part, kaddr, size - part); \ memcpy(lebytes + part, kaddr, size - part); \
return get_unaligned_le##bits(lebytes); \ return get_unaligned_le##bits(lebytes); \
...@@ -110,52 +114,58 @@ void btrfs_set_token_##bits(struct btrfs_map_token *token, \ ...@@ -110,52 +114,58 @@ void btrfs_set_token_##bits(struct btrfs_map_token *token, \
u##bits val) \ u##bits val) \
{ \ { \
const unsigned long member_offset = (unsigned long)ptr + off; \ const unsigned long member_offset = (unsigned long)ptr + off; \
const unsigned long idx = get_eb_page_index(member_offset); \ const unsigned long idx = get_eb_folio_index(token->eb, member_offset); \
const unsigned long oip = get_eb_offset_in_page(token->eb, \ const unsigned long oil = get_eb_offset_in_folio(token->eb, \
member_offset); \ member_offset);\
const int unit_size = folio_size(token->eb->folios[0]); \
const int unit_shift = folio_shift(token->eb->folios[0]); \
const int size = sizeof(u##bits); \ const int size = sizeof(u##bits); \
u8 lebytes[sizeof(u##bits)]; \ u8 lebytes[sizeof(u##bits)]; \
const int part = PAGE_SIZE - oip; \ const int part = unit_size - oil; \
\ \
ASSERT(token); \ ASSERT(token); \
ASSERT(token->kaddr); \ ASSERT(token->kaddr); \
ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \ ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
if (token->offset <= member_offset && \ if (token->offset <= member_offset && \
member_offset + size <= token->offset + PAGE_SIZE) { \ member_offset + size <= token->offset + unit_size) { \
put_unaligned_le##bits(val, token->kaddr + oip); \ put_unaligned_le##bits(val, token->kaddr + oil); \
return; \ return; \
} \ } \
token->kaddr = folio_address(token->eb->folios[idx]); \ token->kaddr = folio_address(token->eb->folios[idx]); \
token->offset = idx << PAGE_SHIFT; \ token->offset = idx << unit_shift; \
if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) { \ if (INLINE_EXTENT_BUFFER_PAGES == 1 || \
put_unaligned_le##bits(val, token->kaddr + oip); \ oil + size <= unit_size) { \
put_unaligned_le##bits(val, token->kaddr + oil); \
return; \ return; \
} \ } \
put_unaligned_le##bits(val, lebytes); \ put_unaligned_le##bits(val, lebytes); \
memcpy(token->kaddr + oip, lebytes, part); \ memcpy(token->kaddr + oil, lebytes, part); \
token->kaddr = folio_address(token->eb->folios[idx + 1]); \ token->kaddr = folio_address(token->eb->folios[idx + 1]); \
token->offset = (idx + 1) << PAGE_SHIFT; \ token->offset = (idx + 1) << unit_shift; \
memcpy(token->kaddr, lebytes + part, size - part); \ memcpy(token->kaddr, lebytes + part, size - part); \
} \ } \
void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \ void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
unsigned long off, u##bits val) \ unsigned long off, u##bits val) \
{ \ { \
const unsigned long member_offset = (unsigned long)ptr + off; \ const unsigned long member_offset = (unsigned long)ptr + off; \
const unsigned long oip = get_eb_offset_in_page(eb, member_offset); \ const unsigned long idx = get_eb_folio_index(eb, member_offset);\
const unsigned long idx = get_eb_page_index(member_offset); \ const unsigned long oil = get_eb_offset_in_folio(eb, \
member_offset);\
const int unit_size = folio_size(eb->folios[0]); \
char *kaddr = folio_address(eb->folios[idx]); \ char *kaddr = folio_address(eb->folios[idx]); \
const int size = sizeof(u##bits); \ const int size = sizeof(u##bits); \
const int part = PAGE_SIZE - oip; \ const int part = unit_size - oil; \
u8 lebytes[sizeof(u##bits)]; \ u8 lebytes[sizeof(u##bits)]; \
\ \
ASSERT(check_setget_bounds(eb, ptr, off, size)); \ ASSERT(check_setget_bounds(eb, ptr, off, size)); \
if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) { \ if (INLINE_EXTENT_BUFFER_PAGES == 1 || \
put_unaligned_le##bits(val, kaddr + oip); \ oil + size <= unit_size) { \
put_unaligned_le##bits(val, kaddr + oil); \
return; \ return; \
} \ } \
\ \
put_unaligned_le##bits(val, lebytes); \ put_unaligned_le##bits(val, lebytes); \
memcpy(kaddr + oip, lebytes, part); \ memcpy(kaddr + oil, lebytes, part); \
kaddr = folio_address(eb->folios[idx + 1]); \ kaddr = folio_address(eb->folios[idx + 1]); \
memcpy(kaddr, lebytes + part, size - part); \ memcpy(kaddr, lebytes + part, size - part); \
} }
......
...@@ -820,7 +820,8 @@ int btrfs_bin_search(struct extent_buffer *eb, int first_slot, ...@@ -820,7 +820,8 @@ int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
} }
while (low < high) { while (low < high) {
unsigned long oip; const int unit_size = folio_size(eb->folios[0]);
unsigned long oil;
unsigned long offset; unsigned long offset;
struct btrfs_disk_key *tmp; struct btrfs_disk_key *tmp;
struct btrfs_disk_key unaligned; struct btrfs_disk_key unaligned;
...@@ -828,14 +829,14 @@ int btrfs_bin_search(struct extent_buffer *eb, int first_slot, ...@@ -828,14 +829,14 @@ int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
mid = (low + high) / 2; mid = (low + high) / 2;
offset = p + mid * item_size; offset = p + mid * item_size;
oip = offset_in_page(offset); oil = get_eb_offset_in_folio(eb, offset);
if (oip + key_size <= PAGE_SIZE) { if (oil + key_size <= unit_size) {
const unsigned long idx = get_eb_page_index(offset); const unsigned long idx = get_eb_folio_index(eb, offset);
char *kaddr = folio_address(eb->folios[idx]); char *kaddr = folio_address(eb->folios[idx]);
oip = get_eb_offset_in_page(eb, offset); oil = get_eb_offset_in_folio(eb, offset);
tmp = (struct btrfs_disk_key *)(kaddr + oip); tmp = (struct btrfs_disk_key *)(kaddr + oil);
} else { } else {
read_extent_buffer(eb, &unaligned, offset, key_size); read_extent_buffer(eb, &unaligned, offset, key_size);
tmp = &unaligned; tmp = &unaligned;
......
...@@ -395,7 +395,7 @@ int btrfs_validate_extent_buffer(struct extent_buffer *eb, ...@@ -395,7 +395,7 @@ int btrfs_validate_extent_buffer(struct extent_buffer *eb,
csum_tree_block(eb, result); csum_tree_block(eb, result);
header_csum = folio_address(eb->folios[0]) + header_csum = folio_address(eb->folios[0]) +
get_eb_offset_in_page(eb, offsetof(struct btrfs_header, csum)); get_eb_offset_in_folio(eb, offsetof(struct btrfs_header, csum));
if (memcmp(result, header_csum, csum_size) != 0) { if (memcmp(result, header_csum, csum_size) != 0) {
btrfs_warn_rl(fs_info, btrfs_warn_rl(fs_info,
......
...@@ -4169,12 +4169,11 @@ static inline int check_eb_range(const struct extent_buffer *eb, ...@@ -4169,12 +4169,11 @@ static inline int check_eb_range(const struct extent_buffer *eb,
void read_extent_buffer(const struct extent_buffer *eb, void *dstv, void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
unsigned long start, unsigned long len) unsigned long start, unsigned long len)
{ {
const int unit_size = folio_size(eb->folios[0]);
size_t cur; size_t cur;
size_t offset; size_t offset;
struct page *page;
char *kaddr;
char *dst = (char *)dstv; char *dst = (char *)dstv;
unsigned long i = get_eb_page_index(start); unsigned long i = get_eb_folio_index(eb, start);
if (check_eb_range(eb, start, len)) { if (check_eb_range(eb, start, len)) {
/* /*
...@@ -4190,13 +4189,13 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv, ...@@ -4190,13 +4189,13 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
return; return;
} }
offset = get_eb_offset_in_page(eb, start); offset = get_eb_offset_in_folio(eb, start);
while (len > 0) { while (len > 0) {
page = folio_page(eb->folios[i], 0); char *kaddr;
cur = min(len, (PAGE_SIZE - offset)); cur = min(len, unit_size - offset);
kaddr = page_address(page); kaddr = folio_address(eb->folios[i]);
memcpy(dst, kaddr + offset, cur); memcpy(dst, kaddr + offset, cur);
dst += cur; dst += cur;
...@@ -4210,12 +4209,11 @@ int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, ...@@ -4210,12 +4209,11 @@ int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
void __user *dstv, void __user *dstv,
unsigned long start, unsigned long len) unsigned long start, unsigned long len)
{ {
const int unit_size = folio_size(eb->folios[0]);
size_t cur; size_t cur;
size_t offset; size_t offset;
struct page *page;
char *kaddr;
char __user *dst = (char __user *)dstv; char __user *dst = (char __user *)dstv;
unsigned long i = get_eb_page_index(start); unsigned long i = get_eb_folio_index(eb, start);
int ret = 0; int ret = 0;
WARN_ON(start > eb->len); WARN_ON(start > eb->len);
...@@ -4227,13 +4225,13 @@ int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, ...@@ -4227,13 +4225,13 @@ int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
return ret; return ret;
} }
offset = get_eb_offset_in_page(eb, start); offset = get_eb_offset_in_folio(eb, start);
while (len > 0) { while (len > 0) {
page = folio_page(eb->folios[i], 0); char *kaddr;
cur = min(len, (PAGE_SIZE - offset)); cur = min(len, unit_size - offset);
kaddr = page_address(page); kaddr = folio_address(eb->folios[i]);
if (copy_to_user_nofault(dst, kaddr + offset, cur)) { if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
ret = -EFAULT; ret = -EFAULT;
break; break;
...@@ -4251,12 +4249,12 @@ int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, ...@@ -4251,12 +4249,12 @@ int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
unsigned long start, unsigned long len) unsigned long start, unsigned long len)
{ {
const int unit_size = folio_size(eb->folios[0]);
size_t cur; size_t cur;
size_t offset; size_t offset;
struct page *page;
char *kaddr; char *kaddr;
char *ptr = (char *)ptrv; char *ptr = (char *)ptrv;
unsigned long i = get_eb_page_index(start); unsigned long i = get_eb_folio_index(eb, start);
int ret = 0; int ret = 0;
if (check_eb_range(eb, start, len)) if (check_eb_range(eb, start, len))
...@@ -4265,14 +4263,11 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, ...@@ -4265,14 +4263,11 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
if (eb->addr) if (eb->addr)
return memcmp(ptrv, eb->addr + start, len); return memcmp(ptrv, eb->addr + start, len);
offset = get_eb_offset_in_page(eb, start); offset = get_eb_offset_in_folio(eb, start);
while (len > 0) { while (len > 0) {
page = folio_page(eb->folios[i], 0); cur = min(len, unit_size - offset);
kaddr = folio_address(eb->folios[i]);
cur = min(len, (PAGE_SIZE - offset));
kaddr = page_address(page);
ret = memcmp(ptr, kaddr + offset, cur); ret = memcmp(ptr, kaddr + offset, cur);
if (ret) if (ret)
break; break;
...@@ -4291,10 +4286,12 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, ...@@ -4291,10 +4286,12 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
* For regular sector size == PAGE_SIZE case, check if @page is uptodate. * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
* For subpage case, check if the range covered by the eb has EXTENT_UPTODATE. * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
*/ */
static void assert_eb_page_uptodate(const struct extent_buffer *eb, static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
struct page *page)
{ {
struct btrfs_fs_info *fs_info = eb->fs_info; struct btrfs_fs_info *fs_info = eb->fs_info;
struct folio *folio = eb->folios[i];
ASSERT(folio);
/* /*
* If we are using the commit root we could potentially clear a page * If we are using the commit root we could potentially clear a page
...@@ -4308,11 +4305,13 @@ static void assert_eb_page_uptodate(const struct extent_buffer *eb, ...@@ -4308,11 +4305,13 @@ static void assert_eb_page_uptodate(const struct extent_buffer *eb,
return; return;
if (fs_info->nodesize < PAGE_SIZE) { if (fs_info->nodesize < PAGE_SIZE) {
struct page *page = folio_page(folio, 0);
if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, page, if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, page,
eb->start, eb->len))) eb->start, eb->len)))
btrfs_subpage_dump_bitmap(fs_info, page, eb->start, eb->len); btrfs_subpage_dump_bitmap(fs_info, page, eb->start, eb->len);
} else { } else {
WARN_ON(!PageUptodate(page)); WARN_ON(!folio_test_uptodate(folio));
} }
} }
...@@ -4320,12 +4319,12 @@ static void __write_extent_buffer(const struct extent_buffer *eb, ...@@ -4320,12 +4319,12 @@ static void __write_extent_buffer(const struct extent_buffer *eb,
const void *srcv, unsigned long start, const void *srcv, unsigned long start,
unsigned long len, bool use_memmove) unsigned long len, bool use_memmove)
{ {
const int unit_size = folio_size(eb->folios[0]);
size_t cur; size_t cur;
size_t offset; size_t offset;
struct page *page;
char *kaddr; char *kaddr;
char *src = (char *)srcv; char *src = (char *)srcv;
unsigned long i = get_eb_page_index(start); unsigned long i = get_eb_folio_index(eb, start);
/* For unmapped (dummy) ebs, no need to check their uptodate status. */ /* For unmapped (dummy) ebs, no need to check their uptodate status. */
const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
...@@ -4340,15 +4339,14 @@ static void __write_extent_buffer(const struct extent_buffer *eb, ...@@ -4340,15 +4339,14 @@ static void __write_extent_buffer(const struct extent_buffer *eb,
return; return;
} }
offset = get_eb_offset_in_page(eb, start); offset = get_eb_offset_in_folio(eb, start);
while (len > 0) { while (len > 0) {
page = folio_page(eb->folios[i], 0);
if (check_uptodate) if (check_uptodate)
assert_eb_page_uptodate(eb, page); assert_eb_folio_uptodate(eb, i);
cur = min(len, PAGE_SIZE - offset); cur = min(len, unit_size - offset);
kaddr = page_address(page); kaddr = folio_address(eb->folios[i]);
if (use_memmove) if (use_memmove)
memmove(kaddr + offset, src, cur); memmove(kaddr + offset, src, cur);
else else
...@@ -4370,6 +4368,7 @@ void write_extent_buffer(const struct extent_buffer *eb, const void *srcv, ...@@ -4370,6 +4368,7 @@ void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
static void memset_extent_buffer(const struct extent_buffer *eb, int c, static void memset_extent_buffer(const struct extent_buffer *eb, int c,
unsigned long start, unsigned long len) unsigned long start, unsigned long len)
{ {
const int unit_size = folio_size(eb->folios[0]);
unsigned long cur = start; unsigned long cur = start;
if (eb->addr) { if (eb->addr) {
...@@ -4378,13 +4377,12 @@ static void memset_extent_buffer(const struct extent_buffer *eb, int c, ...@@ -4378,13 +4377,12 @@ static void memset_extent_buffer(const struct extent_buffer *eb, int c,
} }
while (cur < start + len) { while (cur < start + len) {
unsigned long index = get_eb_page_index(cur); unsigned long index = get_eb_folio_index(eb, cur);
unsigned int offset = get_eb_offset_in_page(eb, cur); unsigned int offset = get_eb_offset_in_folio(eb, cur);
unsigned int cur_len = min(start + len - cur, PAGE_SIZE - offset); unsigned int cur_len = min(start + len - cur, unit_size - offset);
struct page *page = folio_page(eb->folios[index], 0);
assert_eb_page_uptodate(eb, page); assert_eb_folio_uptodate(eb, index);
memset_page(page, offset, c, cur_len); memset(folio_address(eb->folios[index]) + offset, c, cur_len);
cur += cur_len; cur += cur_len;
} }
...@@ -4401,14 +4399,15 @@ void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start, ...@@ -4401,14 +4399,15 @@ void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
void copy_extent_buffer_full(const struct extent_buffer *dst, void copy_extent_buffer_full(const struct extent_buffer *dst,
const struct extent_buffer *src) const struct extent_buffer *src)
{ {
const int unit_size = folio_size(src->folios[0]);
unsigned long cur = 0; unsigned long cur = 0;
ASSERT(dst->len == src->len); ASSERT(dst->len == src->len);
while (cur < src->len) { while (cur < src->len) {
unsigned long index = get_eb_page_index(cur); unsigned long index = get_eb_folio_index(src, cur);
unsigned long offset = get_eb_offset_in_page(src, cur); unsigned long offset = get_eb_offset_in_folio(src, cur);
unsigned long cur_len = min(src->len, PAGE_SIZE - offset); unsigned long cur_len = min(src->len, unit_size - offset);
void *addr = folio_address(src->folios[index]) + offset; void *addr = folio_address(src->folios[index]) + offset;
write_extent_buffer(dst, addr, cur, cur_len); write_extent_buffer(dst, addr, cur, cur_len);
...@@ -4422,12 +4421,12 @@ void copy_extent_buffer(const struct extent_buffer *dst, ...@@ -4422,12 +4421,12 @@ void copy_extent_buffer(const struct extent_buffer *dst,
unsigned long dst_offset, unsigned long src_offset, unsigned long dst_offset, unsigned long src_offset,
unsigned long len) unsigned long len)
{ {
const int unit_size = folio_size(dst->folios[0]);
u64 dst_len = dst->len; u64 dst_len = dst->len;
size_t cur; size_t cur;
size_t offset; size_t offset;
struct page *page;
char *kaddr; char *kaddr;
unsigned long i = get_eb_page_index(dst_offset); unsigned long i = get_eb_folio_index(dst, dst_offset);
if (check_eb_range(dst, dst_offset, len) || if (check_eb_range(dst, dst_offset, len) ||
check_eb_range(src, src_offset, len)) check_eb_range(src, src_offset, len))
...@@ -4435,15 +4434,14 @@ void copy_extent_buffer(const struct extent_buffer *dst, ...@@ -4435,15 +4434,14 @@ void copy_extent_buffer(const struct extent_buffer *dst,
WARN_ON(src->len != dst_len); WARN_ON(src->len != dst_len);
offset = get_eb_offset_in_page(dst, dst_offset); offset = get_eb_offset_in_folio(dst, dst_offset);
while (len > 0) { while (len > 0) {
page = folio_page(dst->folios[i], 0); assert_eb_folio_uptodate(dst, i);
assert_eb_page_uptodate(dst, page);
cur = min(len, (unsigned long)(PAGE_SIZE - offset)); cur = min(len, (unsigned long)(unit_size - offset));
kaddr = page_address(page); kaddr = folio_address(dst->folios[i]);
read_extent_buffer(src, kaddr + offset, src_offset, cur); read_extent_buffer(src, kaddr + offset, src_offset, cur);
src_offset += cur; src_offset += cur;
...@@ -4502,18 +4500,18 @@ int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, ...@@ -4502,18 +4500,18 @@ int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
eb_bitmap_offset(eb, start, nr, &i, &offset); eb_bitmap_offset(eb, start, nr, &i, &offset);
page = folio_page(eb->folios[i], 0); page = folio_page(eb->folios[i], 0);
assert_eb_page_uptodate(eb, page); assert_eb_folio_uptodate(eb, i);
kaddr = page_address(page); kaddr = page_address(page);
return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1))); return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
} }
static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr) static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
{ {
unsigned long index = get_eb_page_index(bytenr); unsigned long index = get_eb_folio_index(eb, bytenr);
if (check_eb_range(eb, bytenr, 1)) if (check_eb_range(eb, bytenr, 1))
return NULL; return NULL;
return folio_address(eb->folios[index]) + get_eb_offset_in_page(eb, bytenr); return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
} }
/* /*
...@@ -4598,6 +4596,7 @@ void memcpy_extent_buffer(const struct extent_buffer *dst, ...@@ -4598,6 +4596,7 @@ void memcpy_extent_buffer(const struct extent_buffer *dst,
unsigned long dst_offset, unsigned long src_offset, unsigned long dst_offset, unsigned long src_offset,
unsigned long len) unsigned long len)
{ {
const int unit_size = folio_size(dst->folios[0]);
unsigned long cur_off = 0; unsigned long cur_off = 0;
if (check_eb_range(dst, dst_offset, len) || if (check_eb_range(dst, dst_offset, len) ||
...@@ -4616,11 +4615,11 @@ void memcpy_extent_buffer(const struct extent_buffer *dst, ...@@ -4616,11 +4615,11 @@ void memcpy_extent_buffer(const struct extent_buffer *dst,
while (cur_off < len) { while (cur_off < len) {
unsigned long cur_src = cur_off + src_offset; unsigned long cur_src = cur_off + src_offset;
unsigned long pg_index = get_eb_page_index(cur_src); unsigned long folio_index = get_eb_folio_index(dst, cur_src);
unsigned long pg_off = get_eb_offset_in_page(dst, cur_src); unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
unsigned long cur_len = min(src_offset + len - cur_src, unsigned long cur_len = min(src_offset + len - cur_src,
PAGE_SIZE - pg_off); unit_size - folio_off);
void *src_addr = folio_address(dst->folios[pg_index]) + pg_off; void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
const bool use_memmove = areas_overlap(src_offset + cur_off, const bool use_memmove = areas_overlap(src_offset + cur_off,
dst_offset + cur_off, cur_len); dst_offset + cur_off, cur_len);
...@@ -4654,20 +4653,20 @@ void memmove_extent_buffer(const struct extent_buffer *dst, ...@@ -4654,20 +4653,20 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
while (len > 0) { while (len > 0) {
unsigned long src_i; unsigned long src_i;
size_t cur; size_t cur;
size_t dst_off_in_page; size_t dst_off_in_folio;
size_t src_off_in_page; size_t src_off_in_folio;
void *src_addr; void *src_addr;
bool use_memmove; bool use_memmove;
src_i = get_eb_page_index(src_end); src_i = get_eb_folio_index(dst, src_end);
dst_off_in_page = get_eb_offset_in_page(dst, dst_end); dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
src_off_in_page = get_eb_offset_in_page(dst, src_end); src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
cur = min_t(unsigned long, len, src_off_in_page + 1); cur = min_t(unsigned long, len, src_off_in_folio + 1);
cur = min(cur, dst_off_in_page + 1); cur = min(cur, dst_off_in_folio + 1);
src_addr = folio_address(dst->folios[src_i]) + src_off_in_page - src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
cur + 1; cur + 1;
use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1, use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
cur); cur);
......
...@@ -121,29 +121,43 @@ struct btrfs_eb_write_context { ...@@ -121,29 +121,43 @@ struct btrfs_eb_write_context {
* *
* Will handle both sectorsize == PAGE_SIZE and sectorsize < PAGE_SIZE cases. * Will handle both sectorsize == PAGE_SIZE and sectorsize < PAGE_SIZE cases.
*/ */
static inline size_t get_eb_offset_in_page(const struct extent_buffer *eb, static inline size_t get_eb_offset_in_folio(const struct extent_buffer *eb,
unsigned long offset) unsigned long offset)
{ {
/* /*
* For sectorsize == PAGE_SIZE case, eb->start will always be aligned * 1) sectorsize == PAGE_SIZE and nodesize >= PAGE_SIZE case
* to PAGE_SIZE, thus adding it won't cause any difference. * 1.1) One large folio covering the whole eb
* The eb->start is aligned to folio size, thus adding it
* won't cause any difference.
* 1.2) Several page sized folios
* The eb->start is aligned to folio (page) size, thus
* adding it won't cause any difference.
* *
* For sectorsize < PAGE_SIZE, we must only read the data that belongs * 2) sectorsize < PAGE_SIZE and nodesize < PAGE_SIZE case
* to the eb, thus we have to take the eb->start into consideration. * In this case there would only be one page sized folio, and there
* may be several different extent buffers in the page/folio.
* We need to add eb->start to properly access the offset inside
* that eb.
*/ */
return offset_in_page(offset + eb->start); return offset_in_folio(eb->folios[0], offset + eb->start);
} }
static inline unsigned long get_eb_page_index(unsigned long offset) static inline unsigned long get_eb_folio_index(const struct extent_buffer *eb,
unsigned long offset)
{ {
/* /*
* For sectorsize == PAGE_SIZE case, plain >> PAGE_SHIFT is enough. * 1) sectorsize == PAGE_SIZE and nodesize >= PAGE_SIZE case
* 1.1) One large folio covering the whole eb.
* the folio_shift would be large enough to always make us
* return 0 as index.
* 1.2) Several page sized folios
* The folio_shift() would be PAGE_SHIFT, giving us the correct
* index.
* *
* For sectorsize < PAGE_SIZE case, we only support 64K PAGE_SIZE, * 2) sectorsize < PAGE_SIZE and nodesize < PAGE_SIZE case
* and have ensured that all tree blocks are contained in one page, * The folio would only be page sized, and always give us 0 as index.
* thus we always get index == 0.
*/ */
return offset >> PAGE_SHIFT; return offset >> folio_shift(eb->folios[0]);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment