Commit 67b56134 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'md-next' of...

Merge branch 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md into for-5.18/drivers

Pull MD updates from Song:

"This set contains raid5 bio handling cleanups for raid5."

* 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md:
  raid5: initialize the stripe_head embeeded bios as needed
  raid5-cache: statically allocate the recovery ra bio
  raid5-cache: fully initialize flush_bio when needed
  raid5-ppl: fully initialize the bio in ppl_new_iounit
parents a2daeab5 03a6b195
...@@ -1266,6 +1266,8 @@ static void r5l_log_flush_endio(struct bio *bio) ...@@ -1266,6 +1266,8 @@ static void r5l_log_flush_endio(struct bio *bio)
r5l_io_run_stripes(io); r5l_io_run_stripes(io);
list_splice_tail_init(&log->flushing_ios, &log->finished_ios); list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
spin_unlock_irqrestore(&log->io_list_lock, flags); spin_unlock_irqrestore(&log->io_list_lock, flags);
bio_uninit(bio);
} }
/* /*
...@@ -1301,7 +1303,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log) ...@@ -1301,7 +1303,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
if (!do_flush) if (!do_flush)
return; return;
bio_reset(&log->flush_bio, log->rdev->bdev, bio_init(&log->flush_bio, log->rdev->bdev, NULL, 0,
REQ_OP_WRITE | REQ_PREFLUSH); REQ_OP_WRITE | REQ_PREFLUSH);
log->flush_bio.bi_end_io = r5l_log_flush_endio; log->flush_bio.bi_end_io = r5l_log_flush_endio;
submit_bio(&log->flush_bio); submit_bio(&log->flush_bio);
...@@ -1621,10 +1623,10 @@ struct r5l_recovery_ctx { ...@@ -1621,10 +1623,10 @@ struct r5l_recovery_ctx {
* just copy data from the pool. * just copy data from the pool.
*/ */
struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE]; struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
struct bio_vec ra_bvec[R5L_RECOVERY_PAGE_POOL_SIZE];
sector_t pool_offset; /* offset of first page in the pool */ sector_t pool_offset; /* offset of first page in the pool */
int total_pages; /* total allocated pages */ int total_pages; /* total allocated pages */
int valid_pages; /* pages with valid data */ int valid_pages; /* pages with valid data */
struct bio *ra_bio; /* bio to do the read ahead */
}; };
static int r5l_recovery_allocate_ra_pool(struct r5l_log *log, static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
...@@ -1632,11 +1634,6 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log, ...@@ -1632,11 +1634,6 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
{ {
struct page *page; struct page *page;
ctx->ra_bio = bio_alloc_bioset(NULL, BIO_MAX_VECS, 0, GFP_KERNEL,
&log->bs);
if (!ctx->ra_bio)
return -ENOMEM;
ctx->valid_pages = 0; ctx->valid_pages = 0;
ctx->total_pages = 0; ctx->total_pages = 0;
while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) { while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
...@@ -1648,10 +1645,8 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log, ...@@ -1648,10 +1645,8 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
ctx->total_pages += 1; ctx->total_pages += 1;
} }
if (ctx->total_pages == 0) { if (ctx->total_pages == 0)
bio_put(ctx->ra_bio);
return -ENOMEM; return -ENOMEM;
}
ctx->pool_offset = 0; ctx->pool_offset = 0;
return 0; return 0;
...@@ -1664,7 +1659,6 @@ static void r5l_recovery_free_ra_pool(struct r5l_log *log, ...@@ -1664,7 +1659,6 @@ static void r5l_recovery_free_ra_pool(struct r5l_log *log,
for (i = 0; i < ctx->total_pages; ++i) for (i = 0; i < ctx->total_pages; ++i)
put_page(ctx->ra_pool[i]); put_page(ctx->ra_pool[i]);
bio_put(ctx->ra_bio);
} }
/* /*
...@@ -1677,15 +1671,19 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log, ...@@ -1677,15 +1671,19 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
struct r5l_recovery_ctx *ctx, struct r5l_recovery_ctx *ctx,
sector_t offset) sector_t offset)
{ {
bio_reset(ctx->ra_bio, log->rdev->bdev, REQ_OP_READ); struct bio bio;
ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset; int ret;
bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
R5L_RECOVERY_PAGE_POOL_SIZE, REQ_OP_READ);
bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
ctx->valid_pages = 0; ctx->valid_pages = 0;
ctx->pool_offset = offset; ctx->pool_offset = offset;
while (ctx->valid_pages < ctx->total_pages) { while (ctx->valid_pages < ctx->total_pages) {
bio_add_page(ctx->ra_bio, __bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0); 0);
ctx->valid_pages += 1; ctx->valid_pages += 1;
offset = r5l_ring_add(log, offset, BLOCK_SECTORS); offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
...@@ -1694,7 +1692,9 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log, ...@@ -1694,7 +1692,9 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
break; break;
} }
return submit_bio_wait(ctx->ra_bio); ret = submit_bio_wait(&bio);
bio_uninit(&bio);
return ret;
} }
/* /*
...@@ -3105,7 +3105,6 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) ...@@ -3105,7 +3105,6 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
INIT_LIST_HEAD(&log->io_end_ios); INIT_LIST_HEAD(&log->io_end_ios);
INIT_LIST_HEAD(&log->flushing_ios); INIT_LIST_HEAD(&log->flushing_ios);
INIT_LIST_HEAD(&log->finished_ios); INIT_LIST_HEAD(&log->finished_ios);
bio_init(&log->flush_bio, NULL, NULL, 0, 0);
log->io_kc = KMEM_CACHE(r5l_io_unit, 0); log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
if (!log->io_kc) if (!log->io_kc)
......
...@@ -250,7 +250,8 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log, ...@@ -250,7 +250,8 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
INIT_LIST_HEAD(&io->stripe_list); INIT_LIST_HEAD(&io->stripe_list);
atomic_set(&io->pending_stripes, 0); atomic_set(&io->pending_stripes, 0);
atomic_set(&io->pending_flushes, 0); atomic_set(&io->pending_flushes, 0);
bio_init(&io->bio, NULL, io->biovec, PPL_IO_INLINE_BVECS, 0); bio_init(&io->bio, log->rdev->bdev, io->biovec, PPL_IO_INLINE_BVECS,
REQ_OP_WRITE | REQ_FUA);
pplhdr = page_address(io->header_page); pplhdr = page_address(io->header_page);
clear_page(pplhdr); clear_page(pplhdr);
...@@ -465,8 +466,6 @@ static void ppl_submit_iounit(struct ppl_io_unit *io) ...@@ -465,8 +466,6 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
bio->bi_end_io = ppl_log_endio; bio->bi_end_io = ppl_log_endio;
bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
bio_set_dev(bio, log->rdev->bdev);
bio->bi_iter.bi_sector = log->next_io_sector; bio->bi_iter.bi_sector = log->next_io_sector;
bio_add_page(bio, io->header_page, PAGE_SIZE, 0); bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
bio->bi_write_hint = ppl_conf->write_hint; bio->bi_write_hint = ppl_conf->write_hint;
......
...@@ -1060,6 +1060,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) ...@@ -1060,6 +1060,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
int i, disks = sh->disks; int i, disks = sh->disks;
struct stripe_head *head_sh = sh; struct stripe_head *head_sh = sh;
struct bio_list pending_bios = BIO_EMPTY_LIST; struct bio_list pending_bios = BIO_EMPTY_LIST;
struct r5dev *dev;
bool should_defer; bool should_defer;
might_sleep(); might_sleep();
...@@ -1094,8 +1095,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) ...@@ -1094,8 +1095,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
op_flags |= REQ_SYNC; op_flags |= REQ_SYNC;
again: again:
bi = &sh->dev[i].req; dev = &sh->dev[i];
rbi = &sh->dev[i].rreq; /* For writing to replacement */ bi = &dev->req;
rbi = &dev->rreq; /* For writing to replacement */
rcu_read_lock(); rcu_read_lock();
rrdev = rcu_dereference(conf->disks[i].replacement); rrdev = rcu_dereference(conf->disks[i].replacement);
...@@ -1171,8 +1173,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) ...@@ -1171,8 +1173,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
set_bit(STRIPE_IO_STARTED, &sh->state); set_bit(STRIPE_IO_STARTED, &sh->state);
bio_set_dev(bi, rdev->bdev); bio_init(bi, rdev->bdev, &dev->vec, 1, op | op_flags);
bio_set_op_attrs(bi, op, op_flags);
bi->bi_end_io = op_is_write(op) bi->bi_end_io = op_is_write(op)
? raid5_end_write_request ? raid5_end_write_request
: raid5_end_read_request; : raid5_end_read_request;
...@@ -1238,8 +1239,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) ...@@ -1238,8 +1239,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
set_bit(STRIPE_IO_STARTED, &sh->state); set_bit(STRIPE_IO_STARTED, &sh->state);
bio_set_dev(rbi, rrdev->bdev); bio_init(rbi, rrdev->bdev, &dev->rvec, 1, op | op_flags);
bio_set_op_attrs(rbi, op, op_flags);
BUG_ON(!op_is_write(op)); BUG_ON(!op_is_write(op));
rbi->bi_end_io = raid5_end_write_request; rbi->bi_end_io = raid5_end_write_request;
rbi->bi_private = sh; rbi->bi_private = sh;
...@@ -2294,7 +2294,6 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, ...@@ -2294,7 +2294,6 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
int disks, struct r5conf *conf) int disks, struct r5conf *conf)
{ {
struct stripe_head *sh; struct stripe_head *sh;
int i;
sh = kmem_cache_zalloc(sc, gfp); sh = kmem_cache_zalloc(sc, gfp);
if (sh) { if (sh) {
...@@ -2307,12 +2306,6 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, ...@@ -2307,12 +2306,6 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
atomic_set(&sh->count, 1); atomic_set(&sh->count, 1);
sh->raid_conf = conf; sh->raid_conf = conf;
sh->log_start = MaxSector; sh->log_start = MaxSector;
for (i = 0; i < disks; i++) {
struct r5dev *dev = &sh->dev[i];
bio_init(&dev->req, NULL, &dev->vec, 1, 0);
bio_init(&dev->rreq, NULL, &dev->rvec, 1, 0);
}
if (raid5_has_ppl(conf)) { if (raid5_has_ppl(conf)) {
sh->ppl_page = alloc_page(gfp); sh->ppl_page = alloc_page(gfp);
...@@ -2677,7 +2670,6 @@ static void raid5_end_read_request(struct bio * bi) ...@@ -2677,7 +2670,6 @@ static void raid5_end_read_request(struct bio * bi)
(unsigned long long)sh->sector, i, atomic_read(&sh->count), (unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_status); bi->bi_status);
if (i == disks) { if (i == disks) {
bio_reset(bi, NULL, 0);
BUG(); BUG();
return; return;
} }
...@@ -2785,7 +2777,7 @@ static void raid5_end_read_request(struct bio * bi) ...@@ -2785,7 +2777,7 @@ static void raid5_end_read_request(struct bio * bi)
} }
} }
rdev_dec_pending(rdev, conf->mddev); rdev_dec_pending(rdev, conf->mddev);
bio_reset(bi, NULL, 0); bio_uninit(bi);
clear_bit(R5_LOCKED, &sh->dev[i].flags); clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh); raid5_release_stripe(sh);
...@@ -2823,7 +2815,6 @@ static void raid5_end_write_request(struct bio *bi) ...@@ -2823,7 +2815,6 @@ static void raid5_end_write_request(struct bio *bi)
(unsigned long long)sh->sector, i, atomic_read(&sh->count), (unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_status); bi->bi_status);
if (i == disks) { if (i == disks) {
bio_reset(bi, NULL, 0);
BUG(); BUG();
return; return;
} }
...@@ -2860,7 +2851,7 @@ static void raid5_end_write_request(struct bio *bi) ...@@ -2860,7 +2851,7 @@ static void raid5_end_write_request(struct bio *bi)
if (sh->batch_head && bi->bi_status && !replacement) if (sh->batch_head && bi->bi_status && !replacement)
set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
bio_reset(bi, NULL, 0); bio_uninit(bi);
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags); clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment