Commit d7b68016 authored by Matias Bjørling's avatar Matias Bjørling Committed by Jens Axboe

lightnvm: combine 1.2 and 2.0 command flags

Add nvm_set_flags helper to enable core to appropriately
set the command flags for read/write/erase depending on which version
a drive supports.

The flags arguments can be distilled into the access hint,
scrambling, and program/erase suspend. Replace the access hint with
a "is_seq" parameter. The rest of the flags are dependent on the
command opcode, which is trivial to detect and set.
Signed-off-by: default avatarMatias Bjørling <mb@lightnvm.io>
Reviewed-by: default avatarJavier González <javier@cnexlabs.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 73569e11
...@@ -752,6 +752,24 @@ int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, ...@@ -752,6 +752,24 @@ int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
} }
EXPORT_SYMBOL(nvm_set_tgt_bb_tbl); EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
{
int flags = 0;
if (geo->version == NVM_OCSSD_SPEC_20)
return 0;
if (rqd->is_seq)
flags |= geo->pln_mode >> 1;
if (rqd->opcode == NVM_OP_PREAD)
flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
else if (rqd->opcode == NVM_OP_PWRITE)
flags |= NVM_IO_SCRAMBLE_ENABLE;
return flags;
}
int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{ {
struct nvm_dev *dev = tgt_dev->parent; struct nvm_dev *dev = tgt_dev->parent;
...@@ -763,6 +781,7 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) ...@@ -763,6 +781,7 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
nvm_rq_tgt_to_dev(tgt_dev, rqd); nvm_rq_tgt_to_dev(tgt_dev, rqd);
rqd->dev = tgt_dev; rqd->dev = tgt_dev;
rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
/* In case of error, fail with right address format */ /* In case of error, fail with right address format */
ret = dev->ops->submit_io(dev, rqd); ret = dev->ops->submit_io(dev, rqd);
...@@ -783,6 +802,7 @@ int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) ...@@ -783,6 +802,7 @@ int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
nvm_rq_tgt_to_dev(tgt_dev, rqd); nvm_rq_tgt_to_dev(tgt_dev, rqd);
rqd->dev = tgt_dev; rqd->dev = tgt_dev;
rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
/* In case of error, fail with right address format */ /* In case of error, fail with right address format */
ret = dev->ops->submit_io_sync(dev, rqd); ret = dev->ops->submit_io_sync(dev, rqd);
......
...@@ -688,7 +688,7 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line, ...@@ -688,7 +688,7 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
if (dir == PBLK_WRITE) { if (dir == PBLK_WRITE) {
struct pblk_sec_meta *meta_list = rqd.meta_list; struct pblk_sec_meta *meta_list = rqd.meta_list;
rqd.flags = pblk_set_progr_mode(pblk, PBLK_WRITE); rqd.is_seq = 1;
for (i = 0; i < rqd.nr_ppas; ) { for (i = 0; i < rqd.nr_ppas; ) {
spin_lock(&line->lock); spin_lock(&line->lock);
paddr = __pblk_alloc_page(pblk, line, min); paddr = __pblk_alloc_page(pblk, line, min);
...@@ -703,11 +703,9 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line, ...@@ -703,11 +703,9 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
for (i = 0; i < rqd.nr_ppas; ) { for (i = 0; i < rqd.nr_ppas; ) {
struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id); struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
int pos = pblk_ppa_to_pos(geo, ppa); int pos = pblk_ppa_to_pos(geo, ppa);
int read_type = PBLK_READ_RANDOM;
if (pblk_io_aligned(pblk, rq_ppas)) if (pblk_io_aligned(pblk, rq_ppas))
read_type = PBLK_READ_SEQUENTIAL; rqd.is_seq = 1;
rqd.flags = pblk_set_read_mode(pblk, read_type);
while (test_bit(pos, line->blk_bitmap)) { while (test_bit(pos, line->blk_bitmap)) {
paddr += min; paddr += min;
...@@ -787,17 +785,14 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line, ...@@ -787,17 +785,14 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
__le64 *lba_list = NULL; __le64 *lba_list = NULL;
int i, ret; int i, ret;
int cmd_op, bio_op; int cmd_op, bio_op;
int flags;
if (dir == PBLK_WRITE) { if (dir == PBLK_WRITE) {
bio_op = REQ_OP_WRITE; bio_op = REQ_OP_WRITE;
cmd_op = NVM_OP_PWRITE; cmd_op = NVM_OP_PWRITE;
flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
lba_list = emeta_to_lbas(pblk, line->emeta->buf); lba_list = emeta_to_lbas(pblk, line->emeta->buf);
} else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) { } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
bio_op = REQ_OP_READ; bio_op = REQ_OP_READ;
cmd_op = NVM_OP_PREAD; cmd_op = NVM_OP_PREAD;
flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
} else } else
return -EINVAL; return -EINVAL;
...@@ -822,7 +817,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line, ...@@ -822,7 +817,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
rqd.bio = bio; rqd.bio = bio;
rqd.opcode = cmd_op; rqd.opcode = cmd_op;
rqd.flags = flags; rqd.is_seq = 1;
rqd.nr_ppas = lm->smeta_sec; rqd.nr_ppas = lm->smeta_sec;
for (i = 0; i < lm->smeta_sec; i++, paddr++) { for (i = 0; i < lm->smeta_sec; i++, paddr++) {
...@@ -885,7 +880,7 @@ static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd, ...@@ -885,7 +880,7 @@ static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
rqd->opcode = NVM_OP_ERASE; rqd->opcode = NVM_OP_ERASE;
rqd->ppa_addr = ppa; rqd->ppa_addr = ppa;
rqd->nr_ppas = 1; rqd->nr_ppas = 1;
rqd->flags = pblk_set_progr_mode(pblk, PBLK_ERASE); rqd->is_seq = 1;
rqd->bio = NULL; rqd->bio = NULL;
} }
......
...@@ -93,9 +93,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, ...@@ -93,9 +93,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
} }
if (pblk_io_aligned(pblk, nr_secs)) if (pblk_io_aligned(pblk, nr_secs))
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL); rqd->is_seq = 1;
else
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
#ifdef CONFIG_NVM_PBLK_DEBUG #ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(nr_secs, &pblk->inflight_reads); atomic_long_add(nr_secs, &pblk->inflight_reads);
...@@ -344,7 +342,6 @@ static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd, ...@@ -344,7 +342,6 @@ static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
rqd->bio = new_bio; rqd->bio = new_bio;
rqd->nr_ppas = nr_holes; rqd->nr_ppas = nr_holes;
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
pr_ctx->ppa_ptr = NULL; pr_ctx->ppa_ptr = NULL;
pr_ctx->orig_bio = bio; pr_ctx->orig_bio = bio;
...@@ -438,8 +435,6 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio, ...@@ -438,8 +435,6 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
} else { } else {
rqd->ppa_addr = ppa; rqd->ppa_addr = ppa;
} }
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
} }
int pblk_submit_read(struct pblk *pblk, struct bio *bio) int pblk_submit_read(struct pblk *pblk, struct bio *bio)
...@@ -663,7 +658,6 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq) ...@@ -663,7 +658,6 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
rqd.opcode = NVM_OP_PREAD; rqd.opcode = NVM_OP_PREAD;
rqd.nr_ppas = gc_rq->secs_to_gc; rqd.nr_ppas = gc_rq->secs_to_gc;
rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
rqd.bio = bio; rqd.bio = bio;
if (pblk_submit_io_sync(pblk, &rqd)) { if (pblk_submit_io_sync(pblk, &rqd)) {
......
...@@ -159,9 +159,7 @@ static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line, ...@@ -159,9 +159,7 @@ static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line,
rqd->dma_meta_list = dma_meta_list; rqd->dma_meta_list = dma_meta_list;
if (pblk_io_aligned(pblk, rq_ppas)) if (pblk_io_aligned(pblk, rq_ppas))
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL); rqd->is_seq = 1;
else
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
for (i = 0; i < rqd->nr_ppas; ) { for (i = 0; i < rqd->nr_ppas; ) {
struct ppa_addr ppa; struct ppa_addr ppa;
...@@ -302,7 +300,7 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line, ...@@ -302,7 +300,7 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
rqd->bio = bio; rqd->bio = bio;
rqd->opcode = NVM_OP_PWRITE; rqd->opcode = NVM_OP_PWRITE;
rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE); rqd->is_seq = 1;
rqd->meta_list = meta_list; rqd->meta_list = meta_list;
rqd->nr_ppas = rq_ppas; rqd->nr_ppas = rq_ppas;
rqd->ppa_list = ppa_list; rqd->ppa_list = ppa_list;
...@@ -436,9 +434,7 @@ static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line, ...@@ -436,9 +434,7 @@ static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
rqd->dma_meta_list = dma_meta_list; rqd->dma_meta_list = dma_meta_list;
if (pblk_io_aligned(pblk, rq_ppas)) if (pblk_io_aligned(pblk, rq_ppas))
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL); rqd->is_seq = 1;
else
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
for (i = 0; i < rqd->nr_ppas; ) { for (i = 0; i < rqd->nr_ppas; ) {
struct ppa_addr ppa; struct ppa_addr ppa;
...@@ -567,9 +563,7 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line, ...@@ -567,9 +563,7 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
rqd->dma_meta_list = dma_meta_list; rqd->dma_meta_list = dma_meta_list;
if (pblk_io_aligned(pblk, rq_ppas)) if (pblk_io_aligned(pblk, rq_ppas))
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL); rqd->is_seq = 1;
else
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
for (i = 0; i < rqd->nr_ppas; ) { for (i = 0; i < rqd->nr_ppas; ) {
struct ppa_addr ppa; struct ppa_addr ppa;
......
...@@ -302,7 +302,7 @@ static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd, ...@@ -302,7 +302,7 @@ static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
/* Setup write request */ /* Setup write request */
rqd->opcode = NVM_OP_PWRITE; rqd->opcode = NVM_OP_PWRITE;
rqd->nr_ppas = nr_secs; rqd->nr_ppas = nr_secs;
rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE); rqd->is_seq = 1;
rqd->private = pblk; rqd->private = pblk;
rqd->end_io = end_io; rqd->end_io = end_io;
......
...@@ -1255,44 +1255,6 @@ static inline u32 pblk_calc_emeta_crc(struct pblk *pblk, ...@@ -1255,44 +1255,6 @@ static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
return crc; return crc;
} }
static inline int pblk_set_progr_mode(struct pblk *pblk, int type)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
int flags;
if (geo->version == NVM_OCSSD_SPEC_20)
return 0;
flags = geo->pln_mode >> 1;
if (type == PBLK_WRITE)
flags |= NVM_IO_SCRAMBLE_ENABLE;
return flags;
}
enum {
PBLK_READ_RANDOM = 0,
PBLK_READ_SEQUENTIAL = 1,
};
static inline int pblk_set_read_mode(struct pblk *pblk, int type)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
int flags;
if (geo->version == NVM_OCSSD_SPEC_20)
return 0;
flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE;
if (type == PBLK_READ_SEQUENTIAL)
flags |= geo->pln_mode >> 1;
return flags;
}
static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs) static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
{ {
return !(nr_secs % pblk->min_write_pgs); return !(nr_secs % pblk->min_write_pgs);
......
...@@ -305,6 +305,8 @@ struct nvm_rq { ...@@ -305,6 +305,8 @@ struct nvm_rq {
u64 ppa_status; /* ppa media status */ u64 ppa_status; /* ppa media status */
int error; int error;
int is_seq; /* Sequential hint flag. 1.2 only */
void *private; void *private;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment