Commit 6d5be959 authored by Javier González's avatar Javier González Committed by Jens Axboe

lightnvm: rename nr_pages to nr_ppas on nvm_rq

The number of ppas contained on a request is not necessarily the number
of pages that it maps to neither on the target nor on the device side.
In order to avoid confusion, rename nr_pages to nr_ppas since it is what
the variable actually contains.
Signed-off-by: default avatarJavier González <javier@cnexlabs.com>
Signed-off-by: default avatarMatias Bjørling <m@bjorling.me>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent df414b33
...@@ -226,8 +226,8 @@ void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd) ...@@ -226,8 +226,8 @@ void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
{ {
int i; int i;
if (rqd->nr_pages > 1) { if (rqd->nr_ppas > 1) {
for (i = 0; i < rqd->nr_pages; i++) for (i = 0; i < rqd->nr_ppas; i++)
rqd->ppa_list[i] = dev_to_generic_addr(dev, rqd->ppa_list[i] = dev_to_generic_addr(dev,
rqd->ppa_list[i]); rqd->ppa_list[i]);
} else { } else {
...@@ -240,8 +240,8 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) ...@@ -240,8 +240,8 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
{ {
int i; int i;
if (rqd->nr_pages > 1) { if (rqd->nr_ppas > 1) {
for (i = 0; i < rqd->nr_pages; i++) for (i = 0; i < rqd->nr_ppas; i++)
rqd->ppa_list[i] = generic_to_dev_addr(dev, rqd->ppa_list[i] = generic_to_dev_addr(dev,
rqd->ppa_list[i]); rqd->ppa_list[i]);
} else { } else {
...@@ -256,13 +256,13 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd, ...@@ -256,13 +256,13 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
int i, plane_cnt, pl_idx; int i, plane_cnt, pl_idx;
if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) { if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
rqd->nr_pages = nr_ppas; rqd->nr_ppas = nr_ppas;
rqd->ppa_addr = ppas[0]; rqd->ppa_addr = ppas[0];
return 0; return 0;
} }
rqd->nr_pages = nr_ppas; rqd->nr_ppas = nr_ppas;
rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list); rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
if (!rqd->ppa_list) { if (!rqd->ppa_list) {
pr_err("nvm: failed to allocate dma memory\n"); pr_err("nvm: failed to allocate dma memory\n");
...@@ -274,7 +274,7 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd, ...@@ -274,7 +274,7 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
rqd->ppa_list[i] = ppas[i]; rqd->ppa_list[i] = ppas[i];
} else { } else {
plane_cnt = dev->plane_mode; plane_cnt = dev->plane_mode;
rqd->nr_pages *= plane_cnt; rqd->nr_ppas *= plane_cnt;
for (i = 0; i < nr_ppas; i++) { for (i = 0; i < nr_ppas; i++) {
for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
...@@ -395,7 +395,7 @@ int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list, ...@@ -395,7 +395,7 @@ int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
memset(&rqd, 0, sizeof(struct nvm_rq)); memset(&rqd, 0, sizeof(struct nvm_rq));
rqd.nr_pages = nr_ppas; rqd.nr_ppas = nr_ppas;
if (nr_ppas > 1) if (nr_ppas > 1)
rqd.ppa_list = ppa_list; rqd.ppa_list = ppa_list;
else else
......
...@@ -446,7 +446,7 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) ...@@ -446,7 +446,7 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
nvm_addr_to_generic_mode(dev, rqd); nvm_addr_to_generic_mode(dev, rqd);
/* look up blocks and mark them as bad */ /* look up blocks and mark them as bad */
if (rqd->nr_pages == 1) { if (rqd->nr_ppas == 1) {
gennvm_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD); gennvm_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
return; return;
} }
......
...@@ -695,7 +695,7 @@ static void rrpc_end_io(struct nvm_rq *rqd) ...@@ -695,7 +695,7 @@ static void rrpc_end_io(struct nvm_rq *rqd)
{ {
struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
uint8_t npages = rqd->nr_pages; uint8_t npages = rqd->nr_ppas;
sector_t laddr = rrpc_get_laddr(rqd->bio) - npages; sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
if (bio_data_dir(rqd->bio) == WRITE) if (bio_data_dir(rqd->bio) == WRITE)
...@@ -883,7 +883,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, ...@@ -883,7 +883,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
bio_get(bio); bio_get(bio);
rqd->bio = bio; rqd->bio = bio;
rqd->ins = &rrpc->instance; rqd->ins = &rrpc->instance;
rqd->nr_pages = nr_pages; rqd->nr_ppas = nr_pages;
rrq->flags = flags; rrq->flags = flags;
err = nvm_submit_io(rrpc->dev, rqd); err = nvm_submit_io(rrpc->dev, rqd);
...@@ -892,7 +892,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, ...@@ -892,7 +892,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
bio_put(bio); bio_put(bio);
if (!(flags & NVM_IOTYPE_GC)) { if (!(flags & NVM_IOTYPE_GC)) {
rrpc_unlock_rq(rrpc, rqd); rrpc_unlock_rq(rrpc, rqd);
if (rqd->nr_pages > 1) if (rqd->nr_ppas > 1)
nvm_dev_dma_free(rrpc->dev, nvm_dev_dma_free(rrpc->dev,
rqd->ppa_list, rqd->dma_ppa_list); rqd->ppa_list, rqd->dma_ppa_list);
} }
......
...@@ -251,7 +251,7 @@ static inline void rrpc_unlock_laddr(struct rrpc *rrpc, ...@@ -251,7 +251,7 @@ static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd) static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
{ {
struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
uint8_t pages = rqd->nr_pages; uint8_t pages = rqd->nr_ppas;
BUG_ON((r->l_start + pages) > rrpc->nr_sects); BUG_ON((r->l_start + pages) > rrpc->nr_sects);
......
...@@ -280,7 +280,7 @@ static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type) ...@@ -280,7 +280,7 @@ static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1); nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1);
nvm_generic_to_addr_mode(dev, &rqd); nvm_generic_to_addr_mode(dev, &rqd);
ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_pages, type); ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
nvm_free_rqd_ppalist(dev, &rqd); nvm_free_rqd_ppalist(dev, &rqd);
if (ret) { if (ret) {
pr_err("nvm: sysblk failed bb mark\n"); pr_err("nvm: sysblk failed bb mark\n");
......
...@@ -471,7 +471,7 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd, ...@@ -471,7 +471,7 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa); c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list); c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
c->ph_rw.control = cpu_to_le16(rqd->flags); c->ph_rw.control = cpu_to_le16(rqd->flags);
c->ph_rw.length = cpu_to_le16(rqd->nr_pages - 1); c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD) if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns, c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
...@@ -542,7 +542,7 @@ static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd) ...@@ -542,7 +542,7 @@ static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
c.erase.opcode = NVM_OP_ERASE; c.erase.opcode = NVM_OP_ERASE;
c.erase.nsid = cpu_to_le32(ns->ns_id); c.erase.nsid = cpu_to_le32(ns->ns_id);
c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa); c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
c.erase.length = cpu_to_le16(rqd->nr_pages - 1); c.erase.length = cpu_to_le16(rqd->nr_ppas - 1);
return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0); return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
} }
......
...@@ -244,7 +244,7 @@ struct nvm_rq { ...@@ -244,7 +244,7 @@ struct nvm_rq {
nvm_end_io_fn *end_io; nvm_end_io_fn *end_io;
uint8_t opcode; uint8_t opcode;
uint16_t nr_pages; uint16_t nr_ppas;
uint16_t flags; uint16_t flags;
u64 ppa_status; /* ppa media status */ u64 ppa_status; /* ppa media status */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment