Commit 7386af27 authored by Matias Bjørling's avatar Matias Bjørling Committed by Jens Axboe

lightnvm: remove linear and device addr modes

The linear and device specific address modes can be replaced with a
simple offset and bit length conversion that is generic across all
devices.

This both simplifies the specification and removes the special case for
qemu nvme, that previously relied on the linear address mapping.
Signed-off-by: default avatarMatias Bjørling <m@bjorling.me>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent c1480ad5
...@@ -174,8 +174,7 @@ static int nvm_core_init(struct nvm_dev *dev) ...@@ -174,8 +174,7 @@ static int nvm_core_init(struct nvm_dev *dev)
dev->sec_size = grp->csecs; dev->sec_size = grp->csecs;
dev->oob_size = grp->sos; dev->oob_size = grp->sos;
dev->sec_per_pg = grp->fpg_sz / grp->csecs; dev->sec_per_pg = grp->fpg_sz / grp->csecs;
dev->addr_mode = id->ppat; memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
dev->addr_format = id->ppaf;
dev->plane_mode = NVM_PLANE_SINGLE; dev->plane_mode = NVM_PLANE_SINGLE;
dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
......
...@@ -73,7 +73,7 @@ static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks, ...@@ -73,7 +73,7 @@ static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
struct nvm_block *blk; struct nvm_block *blk;
int i; int i;
ppa = addr_to_generic_mode(gn->dev, ppa); ppa = dev_to_generic_addr(gn->dev, ppa);
lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun]; lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun];
for (i = 0; i < nr_blocks; i++) { for (i = 0; i < nr_blocks; i++) {
...@@ -179,7 +179,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) ...@@ -179,7 +179,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
ppa.ppa = 0; ppa.ppa = 0;
ppa.g.ch = lun->vlun.chnl_id; ppa.g.ch = lun->vlun.chnl_id;
ppa.g.lun = lun->vlun.id; ppa.g.lun = lun->vlun.id;
ppa = generic_to_addr_mode(dev, ppa); ppa = generic_to_dev_addr(dev, ppa);
ret = dev->ops->get_bb_tbl(dev->q, ppa, ret = dev->ops->get_bb_tbl(dev->q, ppa,
dev->blks_per_lun, dev->blks_per_lun,
...@@ -304,10 +304,10 @@ static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd) ...@@ -304,10 +304,10 @@ static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
if (rqd->nr_pages > 1) { if (rqd->nr_pages > 1) {
for (i = 0; i < rqd->nr_pages; i++) for (i = 0; i < rqd->nr_pages; i++)
rqd->ppa_list[i] = addr_to_generic_mode(dev, rqd->ppa_list[i] = dev_to_generic_addr(dev,
rqd->ppa_list[i]); rqd->ppa_list[i]);
} else { } else {
rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr); rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
} }
} }
...@@ -317,10 +317,10 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) ...@@ -317,10 +317,10 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
if (rqd->nr_pages > 1) { if (rqd->nr_pages > 1) {
for (i = 0; i < rqd->nr_pages; i++) for (i = 0; i < rqd->nr_pages; i++)
rqd->ppa_list[i] = generic_to_addr_mode(dev, rqd->ppa_list[i] = generic_to_dev_addr(dev,
rqd->ppa_list[i]); rqd->ppa_list[i]);
} else { } else {
rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr); rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
} }
} }
......
...@@ -123,12 +123,42 @@ static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk) ...@@ -123,12 +123,42 @@ static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
return blk->id * rrpc->dev->pgs_per_blk; return blk->id * rrpc->dev->pgs_per_blk;
} }
static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
struct ppa_addr r)
{
struct ppa_addr l;
int secs, pgs, blks, luns;
sector_t ppa = r.ppa;
l.ppa = 0;
div_u64_rem(ppa, dev->sec_per_pg, &secs);
l.g.sec = secs;
sector_div(ppa, dev->sec_per_pg);
div_u64_rem(ppa, dev->sec_per_blk, &pgs);
l.g.pg = pgs;
sector_div(ppa, dev->pgs_per_blk);
div_u64_rem(ppa, dev->blks_per_lun, &blks);
l.g.blk = blks;
sector_div(ppa, dev->blks_per_lun);
div_u64_rem(ppa, dev->luns_per_chnl, &luns);
l.g.lun = luns;
sector_div(ppa, dev->luns_per_chnl);
l.g.ch = ppa;
return l;
}
static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
{ {
struct ppa_addr paddr; struct ppa_addr paddr;
paddr.ppa = addr; paddr.ppa = addr;
return __linear_to_generic_addr(dev, paddr); return linear_to_generic_addr(dev, paddr);
} }
/* requires lun->lock taken */ /* requires lun->lock taken */
......
...@@ -198,8 +198,7 @@ struct nvme_nvm_id { ...@@ -198,8 +198,7 @@ struct nvme_nvm_id {
__le32 cap; __le32 cap;
__le32 dom; __le32 dom;
struct nvme_nvm_addr_format ppaf; struct nvme_nvm_addr_format ppaf;
__u8 ppat; __u8 resv[224];
__u8 resv[223];
struct nvme_nvm_id_group groups[4]; struct nvme_nvm_id_group groups[4];
} __packed; } __packed;
......
...@@ -99,7 +99,6 @@ struct nvm_id { ...@@ -99,7 +99,6 @@ struct nvm_id {
u32 cap; u32 cap;
u32 dom; u32 dom;
struct nvm_addr_format ppaf; struct nvm_addr_format ppaf;
u8 ppat;
struct nvm_id_group groups[4]; struct nvm_id_group groups[4];
} __packed; } __packed;
...@@ -119,39 +118,28 @@ struct nvm_tgt_instance { ...@@ -119,39 +118,28 @@ struct nvm_tgt_instance {
#define NVM_VERSION_MINOR 0 #define NVM_VERSION_MINOR 0
#define NVM_VERSION_PATCH 0 #define NVM_VERSION_PATCH 0
#define NVM_SEC_BITS (8)
#define NVM_PL_BITS (6)
#define NVM_PG_BITS (16)
#define NVM_BLK_BITS (16) #define NVM_BLK_BITS (16)
#define NVM_LUN_BITS (10) #define NVM_PG_BITS (16)
#define NVM_SEC_BITS (8)
#define NVM_PL_BITS (8)
#define NVM_LUN_BITS (8)
#define NVM_CH_BITS (8) #define NVM_CH_BITS (8)
struct ppa_addr { struct ppa_addr {
/* Generic structure for all addresses */
union { union {
/* Channel-based PPA format in nand 4x2x2x2x8x10 */
struct {
u64 ch : 4;
u64 sec : 2; /* 4 sectors per page */
u64 pl : 2; /* 4 planes per LUN */
u64 lun : 2; /* 4 LUNs per channel */
u64 pg : 8; /* 256 pages per block */
u64 blk : 10;/* 1024 blocks per plane */
u64 resved : 36;
} chnl;
/* Generic structure for all addresses */
struct { struct {
u64 blk : NVM_BLK_BITS;
u64 pg : NVM_PG_BITS;
u64 sec : NVM_SEC_BITS; u64 sec : NVM_SEC_BITS;
u64 pl : NVM_PL_BITS; u64 pl : NVM_PL_BITS;
u64 pg : NVM_PG_BITS;
u64 blk : NVM_BLK_BITS;
u64 lun : NVM_LUN_BITS; u64 lun : NVM_LUN_BITS;
u64 ch : NVM_CH_BITS; u64 ch : NVM_CH_BITS;
} g; } g;
u64 ppa; u64 ppa;
}; };
} __packed; };
struct nvm_rq { struct nvm_rq {
struct nvm_tgt_instance *ins; struct nvm_tgt_instance *ins;
...@@ -259,8 +247,7 @@ struct nvm_dev { ...@@ -259,8 +247,7 @@ struct nvm_dev {
int blks_per_lun; int blks_per_lun;
int sec_size; int sec_size;
int oob_size; int oob_size;
int addr_mode; struct nvm_addr_format ppaf;
struct nvm_addr_format addr_format;
/* Calculated/Cached values. These do not reflect the actual usable /* Calculated/Cached values. These do not reflect the actual usable
* blocks at run-time. * blocks at run-time.
...@@ -286,118 +273,45 @@ struct nvm_dev { ...@@ -286,118 +273,45 @@ struct nvm_dev {
char name[DISK_NAME_LEN]; char name[DISK_NAME_LEN];
}; };
/* fallback conversion */ static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev, struct ppa_addr r)
struct ppa_addr r)
{
struct ppa_addr l;
l.ppa = r.g.sec +
r.g.pg * dev->sec_per_pg +
r.g.blk * (dev->pgs_per_blk *
dev->sec_per_pg) +
r.g.lun * (dev->blks_per_lun *
dev->pgs_per_blk *
dev->sec_per_pg) +
r.g.ch * (dev->blks_per_lun *
dev->pgs_per_blk *
dev->luns_per_chnl *
dev->sec_per_pg);
return l;
}
/* fallback conversion */
static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev,
struct ppa_addr r)
{
struct ppa_addr l;
int secs, pgs, blks, luns;
sector_t ppa = r.ppa;
l.ppa = 0;
div_u64_rem(ppa, dev->sec_per_pg, &secs);
l.g.sec = secs;
sector_div(ppa, dev->sec_per_pg);
div_u64_rem(ppa, dev->sec_per_blk, &pgs);
l.g.pg = pgs;
sector_div(ppa, dev->pgs_per_blk);
div_u64_rem(ppa, dev->blks_per_lun, &blks);
l.g.blk = blks;
sector_div(ppa, dev->blks_per_lun);
div_u64_rem(ppa, dev->luns_per_chnl, &luns);
l.g.lun = luns;
sector_div(ppa, dev->luns_per_chnl);
l.g.ch = ppa;
return l;
}
static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r)
{ {
struct ppa_addr l; struct ppa_addr l;
l.ppa = 0; l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset;
l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset;
l.chnl.sec = r.g.sec; l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset;
l.chnl.pl = r.g.pl; l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset;
l.chnl.pg = r.g.pg; l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset;
l.chnl.blk = r.g.blk; l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset;
l.chnl.lun = r.g.lun;
l.chnl.ch = r.g.ch;
return l; return l;
} }
static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r) static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
struct ppa_addr r)
{ {
struct ppa_addr l; struct ppa_addr l;
l.ppa = 0; /*
* (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
l.g.sec = r.chnl.sec; */
l.g.pl = r.chnl.pl; l.g.blk = (r.ppa >> dev->ppaf.blk_offset) &
l.g.pg = r.chnl.pg; (((1 << dev->ppaf.blk_len) - 1));
l.g.blk = r.chnl.blk; l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) &
l.g.lun = r.chnl.lun; (((1 << dev->ppaf.pg_len) - 1));
l.g.ch = r.chnl.ch; l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) &
(((1 << dev->ppaf.sect_len) - 1));
l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) &
(((1 << dev->ppaf.pln_len) - 1));
l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) &
(((1 << dev->ppaf.lun_len) - 1));
l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) &
(((1 << dev->ppaf.ch_len) - 1));
return l; return l;
} }
static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev,
struct ppa_addr gppa)
{
switch (dev->addr_mode) {
case NVM_ADDRMODE_LINEAR:
return __linear_to_generic_addr(dev, gppa);
case NVM_ADDRMODE_CHANNEL:
return __chnl_to_generic_addr(gppa);
default:
BUG();
}
return gppa;
}
static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev,
struct ppa_addr gppa)
{
switch (dev->addr_mode) {
case NVM_ADDRMODE_LINEAR:
return __generic_to_linear_addr(dev, gppa);
case NVM_ADDRMODE_CHANNEL:
return __generic_to_chnl_addr(gppa);
default:
BUG();
}
return gppa;
}
static inline int ppa_empty(struct ppa_addr ppa_addr) static inline int ppa_empty(struct ppa_addr ppa_addr)
{ {
return (ppa_addr.ppa == ADDR_EMPTY); return (ppa_addr.ppa == ADDR_EMPTY);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment