Commit b7178a5f authored by Zhao Lei's avatar Zhao Lei Committed by Chris Mason

btrfs: Use unified stripe_page's index calculation

We are using different index calculation method for stripe_page in
current code:
1: (rbio->stripe_len / PAGE_CACHE_SIZE) * stripe_index + page_index
2: DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE) * stripe_index + page_index
3: DIV_ROUND_UP(rbio->stripe_len * stripe_index, PAGE_CACHE_SIZE) + page_index
...

They can get same result when stripe_len align to PAGE_CACHE_SIZE,
this is why current code can work, intruduce and use a common function
for calculation is a better choose.
Signed-off-by: default avatarZhao Lei <zhaolei@cn.fujitsu.com>
Signed-off-by: default avatarChris Mason <clm@fb.com>
parent bfca9a6d
...@@ -609,13 +609,28 @@ static int rbio_can_merge(struct btrfs_raid_bio *last, ...@@ -609,13 +609,28 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
return 1; return 1;
} }
static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
int index)
{
return stripe * rbio->stripe_npages + index;
}
/*
* these are just the pages from the rbio array, not from anything
* the FS sent down to us
*/
static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
int index)
{
return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
}
/* /*
* helper to index into the pstripe * helper to index into the pstripe
*/ */
static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index) static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
{ {
index += (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT; return rbio_stripe_page(rbio, rbio->nr_data, index);
return rbio->stripe_pages[index];
} }
/* /*
...@@ -626,10 +641,7 @@ static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) ...@@ -626,10 +641,7 @@ static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
{ {
if (rbio->nr_data + 1 == rbio->real_stripes) if (rbio->nr_data + 1 == rbio->real_stripes)
return NULL; return NULL;
return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
index += ((rbio->nr_data + 1) * rbio->stripe_len) >>
PAGE_CACHE_SHIFT;
return rbio->stripe_pages[index];
} }
/* /*
...@@ -947,8 +959,7 @@ static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, ...@@ -947,8 +959,7 @@ static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
*/ */
static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
{ {
unsigned long nr = stripe_len * nr_stripes; return DIV_ROUND_UP(stripe_len, PAGE_CACHE_SIZE) * nr_stripes;
return DIV_ROUND_UP(nr, PAGE_CACHE_SIZE);
} }
/* /*
...@@ -1026,13 +1037,13 @@ static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) ...@@ -1026,13 +1037,13 @@ static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
return 0; return 0;
} }
/* allocate pages for just the p/q stripes */ /* only allocate pages for p/q stripes */
static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
{ {
int i; int i;
struct page *page; struct page *page;
i = (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT; i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
for (; i < rbio->nr_pages; i++) { for (; i < rbio->nr_pages; i++) {
if (rbio->stripe_pages[i]) if (rbio->stripe_pages[i])
...@@ -1120,18 +1131,6 @@ static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) ...@@ -1120,18 +1131,6 @@ static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
} }
} }
/*
* these are just the pages from the rbio array, not from anything
* the FS sent down to us
*/
static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, int page)
{
int index;
index = stripe * (rbio->stripe_len >> PAGE_CACHE_SHIFT);
index += page;
return rbio->stripe_pages[index];
}
/* /*
* helper function to walk our bio list and populate the bio_pages array with * helper function to walk our bio list and populate the bio_pages array with
* the result. This seems expensive, but it is faster than constantly * the result. This seems expensive, but it is faster than constantly
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment