Commit f9c10152 authored by Javier González's avatar Javier González Committed by Jens Axboe

lightnvm: pblk: issue multiplane reads if possible

If a read request is sequential and its size aligns with a
multi-plane page size, use the multi-plane hint to process the I/O in
parallel in the controller.
Signed-off-by: default avatarJavier González <javier@cnexlabs.com>
Signed-off-by: default avatarMatias Bjørling <matias@cnexlabs.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 0880a9aa
...@@ -564,7 +564,6 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line, ...@@ -564,7 +564,6 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
int id = line->id; int id = line->id;
int rq_ppas, rq_len; int rq_ppas, rq_len;
int cmd_op, bio_op; int cmd_op, bio_op;
int flags;
int i, j; int i, j;
int ret; int ret;
DECLARE_COMPLETION_ONSTACK(wait); DECLARE_COMPLETION_ONSTACK(wait);
...@@ -572,11 +571,9 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line, ...@@ -572,11 +571,9 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
if (dir == WRITE) { if (dir == WRITE) {
bio_op = REQ_OP_WRITE; bio_op = REQ_OP_WRITE;
cmd_op = NVM_OP_PWRITE; cmd_op = NVM_OP_PWRITE;
flags = pblk_set_progr_mode(pblk, WRITE);
} else if (dir == READ) { } else if (dir == READ) {
bio_op = REQ_OP_READ; bio_op = REQ_OP_READ;
cmd_op = NVM_OP_PREAD; cmd_op = NVM_OP_PREAD;
flags = pblk_set_read_mode(pblk);
} else } else
return -EINVAL; return -EINVAL;
...@@ -601,7 +598,6 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line, ...@@ -601,7 +598,6 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
rqd.bio = bio; rqd.bio = bio;
rqd.opcode = cmd_op; rqd.opcode = cmd_op;
rqd.flags = flags;
rqd.nr_ppas = rq_ppas; rqd.nr_ppas = rq_ppas;
rqd.ppa_list = ppa_list; rqd.ppa_list = ppa_list;
rqd.dma_ppa_list = dma_ppa_list; rqd.dma_ppa_list = dma_ppa_list;
...@@ -609,6 +605,7 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line, ...@@ -609,6 +605,7 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
rqd.private = &wait; rqd.private = &wait;
if (dir == WRITE) { if (dir == WRITE) {
rqd.flags = pblk_set_progr_mode(pblk, WRITE);
for (i = 0; i < rqd.nr_ppas; ) { for (i = 0; i < rqd.nr_ppas; ) {
spin_lock(&line->lock); spin_lock(&line->lock);
paddr = __pblk_alloc_page(pblk, line, min); paddr = __pblk_alloc_page(pblk, line, min);
...@@ -621,6 +618,11 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line, ...@@ -621,6 +618,11 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
for (i = 0; i < rqd.nr_ppas; ) { for (i = 0; i < rqd.nr_ppas; ) {
struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id); struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
int pos = pblk_dev_ppa_to_pos(geo, ppa); int pos = pblk_dev_ppa_to_pos(geo, ppa);
int read_type = PBLK_READ_RANDOM;
if (pblk_io_aligned(pblk, rq_ppas))
read_type = PBLK_READ_SEQUENTIAL;
rqd.flags = pblk_set_read_mode(pblk, read_type);
while (test_bit(pos, line->blk_bitmap)) { while (test_bit(pos, line->blk_bitmap)) {
paddr += min; paddr += min;
...@@ -717,7 +719,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line, ...@@ -717,7 +719,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
} else if (dir == READ) { } else if (dir == READ) {
bio_op = REQ_OP_READ; bio_op = REQ_OP_READ;
cmd_op = NVM_OP_PREAD; cmd_op = NVM_OP_PREAD;
flags = pblk_set_read_mode(pblk); flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
} else } else
return -EINVAL; return -EINVAL;
......
...@@ -88,6 +88,11 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, ...@@ -88,6 +88,11 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE); bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
} }
if (pblk_io_aligned(pblk, nr_secs))
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
else
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
#ifdef CONFIG_NVM_DEBUG #ifdef CONFIG_NVM_DEBUG
atomic_long_add(nr_secs, &pblk->inflight_reads); atomic_long_add(nr_secs, &pblk->inflight_reads);
#endif #endif
...@@ -97,8 +102,6 @@ static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd) ...@@ -97,8 +102,6 @@ static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
{ {
int err; int err;
rqd->flags = pblk_set_read_mode(pblk);
err = pblk_submit_io(pblk, rqd); err = pblk_submit_io(pblk, rqd);
if (err) if (err)
return NVM_IO_ERR; return NVM_IO_ERR;
...@@ -177,6 +180,7 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd, ...@@ -177,6 +180,7 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
rqd->bio = new_bio; rqd->bio = new_bio;
rqd->nr_ppas = nr_holes; rqd->nr_ppas = nr_holes;
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
rqd->end_io = NULL; rqd->end_io = NULL;
if (unlikely(nr_secs > 1 && nr_holes == 1)) { if (unlikely(nr_secs > 1 && nr_holes == 1)) {
...@@ -290,6 +294,8 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, ...@@ -290,6 +294,8 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
} else { } else {
rqd->ppa_addr = ppa; rqd->ppa_addr = ppa;
} }
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
} }
int pblk_submit_read(struct pblk *pblk, struct bio *bio) int pblk_submit_read(struct pblk *pblk, struct bio *bio)
...@@ -497,6 +503,7 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data, ...@@ -497,6 +503,7 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
rqd.end_io = pblk_end_io_sync; rqd.end_io = pblk_end_io_sync;
rqd.private = &wait; rqd.private = &wait;
rqd.nr_ppas = *secs_to_gc; rqd.nr_ppas = *secs_to_gc;
rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
rqd.bio = bio; rqd.bio = bio;
ret = pblk_submit_read_io(pblk, &rqd); ret = pblk_submit_read_io(pblk, &rqd);
......
...@@ -257,7 +257,6 @@ static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line, ...@@ -257,7 +257,6 @@ static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line,
rqd->bio = bio; rqd->bio = bio;
rqd->opcode = NVM_OP_PREAD; rqd->opcode = NVM_OP_PREAD;
rqd->flags = pblk_set_read_mode(pblk);
rqd->meta_list = meta_list; rqd->meta_list = meta_list;
rqd->nr_ppas = rq_ppas; rqd->nr_ppas = rq_ppas;
rqd->ppa_list = ppa_list; rqd->ppa_list = ppa_list;
...@@ -266,6 +265,11 @@ static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line, ...@@ -266,6 +265,11 @@ static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line,
rqd->end_io = pblk_end_io_sync; rqd->end_io = pblk_end_io_sync;
rqd->private = &wait; rqd->private = &wait;
if (pblk_io_aligned(pblk, rq_ppas))
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
else
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
for (i = 0; i < rqd->nr_ppas; ) { for (i = 0; i < rqd->nr_ppas; ) {
struct ppa_addr ppa; struct ppa_addr ppa;
int pos; int pos;
...@@ -473,7 +477,6 @@ static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line, ...@@ -473,7 +477,6 @@ static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
rqd->bio = bio; rqd->bio = bio;
rqd->opcode = NVM_OP_PREAD; rqd->opcode = NVM_OP_PREAD;
rqd->flags = pblk_set_read_mode(pblk);
rqd->meta_list = meta_list; rqd->meta_list = meta_list;
rqd->nr_ppas = rq_ppas; rqd->nr_ppas = rq_ppas;
rqd->ppa_list = ppa_list; rqd->ppa_list = ppa_list;
...@@ -482,6 +485,11 @@ static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line, ...@@ -482,6 +485,11 @@ static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
rqd->end_io = pblk_end_io_sync; rqd->end_io = pblk_end_io_sync;
rqd->private = &wait; rqd->private = &wait;
if (pblk_io_aligned(pblk, rq_ppas))
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
else
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
for (i = 0; i < rqd->nr_ppas; ) { for (i = 0; i < rqd->nr_ppas; ) {
struct ppa_addr ppa; struct ppa_addr ppa;
int pos; int pos;
...@@ -607,7 +615,6 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line, ...@@ -607,7 +615,6 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
rqd->bio = bio; rqd->bio = bio;
rqd->opcode = NVM_OP_PREAD; rqd->opcode = NVM_OP_PREAD;
rqd->flags = pblk_set_read_mode(pblk);
rqd->meta_list = meta_list; rqd->meta_list = meta_list;
rqd->nr_ppas = rq_ppas; rqd->nr_ppas = rq_ppas;
rqd->ppa_list = ppa_list; rqd->ppa_list = ppa_list;
...@@ -616,6 +623,11 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line, ...@@ -616,6 +623,11 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
rqd->end_io = pblk_end_io_sync; rqd->end_io = pblk_end_io_sync;
rqd->private = &wait; rqd->private = &wait;
if (pblk_io_aligned(pblk, rq_ppas))
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
else
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
for (i = 0; i < rqd->nr_ppas; ) { for (i = 0; i < rqd->nr_ppas; ) {
struct ppa_addr ppa; struct ppa_addr ppa;
int pos; int pos;
......
...@@ -1075,9 +1075,27 @@ static inline int pblk_set_progr_mode(struct pblk *pblk, int type) ...@@ -1075,9 +1075,27 @@ static inline int pblk_set_progr_mode(struct pblk *pblk, int type)
return flags; return flags;
} }
static inline int pblk_set_read_mode(struct pblk *pblk) enum {
PBLK_READ_RANDOM = 0,
PBLK_READ_SEQUENTIAL = 1,
};
static inline int pblk_set_read_mode(struct pblk *pblk, int type)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
int flags;
flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE;
if (type == PBLK_READ_SEQUENTIAL)
flags |= geo->plane_mode >> 1;
return flags;
}
static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
{ {
return NVM_IO_SNGL_ACCESS | NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE; return !(nr_secs % pblk->min_write_pgs);
} }
#ifdef CONFIG_NVM_DEBUG #ifdef CONFIG_NVM_DEBUG
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment