Commit 39b0555f authored by Andreas Gruenbacher's avatar Andreas Gruenbacher Committed by Bob Peterson

gfs2: Extended attribute readahead optimization

Instead of submitting a READ_SYNC bio for the inode and a READA bio for
the inode's extended attributes through submit_bh, submit a single READ_SYNC
bio for both through submit_bio when possible.  This can be more
efficient on some kinds of block devices.
Signed-off-by: default avatarAndreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: default avatarBob Peterson <rpeterso@redhat.com>
parent c8d57703
...@@ -187,19 +187,50 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) ...@@ -187,19 +187,50 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
return bh; return bh;
} }
static void gfs2_meta_readahead(struct gfs2_glock *gl, u64 blkno) static void gfs2_meta_read_endio(struct bio *bio)
{ {
struct buffer_head *bh; struct bio_vec *bvec;
int i;
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
struct buffer_head *bh = page_buffers(page);
unsigned int len = bvec->bv_len;
while (bh_offset(bh) < bvec->bv_offset)
bh = bh->b_this_page;
do {
struct buffer_head *next = bh->b_this_page;
len -= bh->b_size;
bh->b_end_io(bh, !bio->bi_error);
bh = next;
} while (bh && len);
}
bio_put(bio);
}
bh = gfs2_getbuf(gl, blkno, 1); /*
lock_buffer(bh); * Submit several consecutive buffer head I/O requests as a single bio I/O
if (buffer_uptodate(bh)) { * request. (See submit_bh_wbc.)
unlock_buffer(bh); */
brelse(bh); static void gfs2_submit_bhs(int rw, struct buffer_head *bhs[], int num)
{
struct buffer_head *bh = bhs[0];
struct bio *bio;
int i;
if (!num)
return; return;
bio = bio_alloc(GFP_NOIO, num);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev;
for (i = 0; i < num; i++) {
bh = bhs[i];
bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
} }
bh->b_end_io = end_buffer_read_sync; bio->bi_end_io = gfs2_meta_read_endio;
submit_bh(READA | REQ_META | REQ_PRIO, bh); submit_bio(rw, bio);
} }
/** /**
...@@ -216,7 +247,8 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, ...@@ -216,7 +247,8 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
int rahead, struct buffer_head **bhp) int rahead, struct buffer_head **bhp)
{ {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct buffer_head *bh; struct buffer_head *bh, *bhs[2];
int num = 0;
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
*bhp = NULL; *bhp = NULL;
...@@ -228,18 +260,31 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, ...@@ -228,18 +260,31 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
lock_buffer(bh); lock_buffer(bh);
if (buffer_uptodate(bh)) { if (buffer_uptodate(bh)) {
unlock_buffer(bh); unlock_buffer(bh);
if (rahead) flags &= ~DIO_WAIT;
gfs2_meta_readahead(gl, blkno + 1); } else {
return 0; bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
bhs[num++] = bh;
} }
bh->b_end_io = end_buffer_read_sync;
get_bh(bh); if (rahead) {
submit_bh(READ_SYNC | REQ_META | REQ_PRIO, bh); bh = gfs2_getbuf(gl, blkno + 1, CREATE);
if (rahead)
gfs2_meta_readahead(gl, blkno + 1); lock_buffer(bh);
if (buffer_uptodate(bh)) {
unlock_buffer(bh);
brelse(bh);
} else {
bh->b_end_io = end_buffer_read_sync;
bhs[num++] = bh;
}
}
gfs2_submit_bhs(READ_SYNC | REQ_META | REQ_PRIO, bhs, num);
if (!(flags & DIO_WAIT)) if (!(flags & DIO_WAIT))
return 0; return 0;
bh = *bhp;
wait_on_buffer(bh); wait_on_buffer(bh);
if (unlikely(!buffer_uptodate(bh))) { if (unlikely(!buffer_uptodate(bh))) {
struct gfs2_trans *tr = current->journal_info; struct gfs2_trans *tr = current->journal_info;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment