Commit 4ce01c51 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
 "A round of fixes/updates for the current series.

  This looks a little bigger than it is, but that's mainly because we
  pushed the lightnvm enabled null_blk change out of the merge window so
  it could be updated a bit.  The rest of the volume is also mostly
  lightnvm.  In particular:

   - Lightnvm.  Various fixes, additions, updates from Matias and
     Javier, as well as from Wenwei Tao.

   - NVMe:
        - Fix for potential arithmetic overflow from Keith.
        - Also from Keith, ensure that we reap pending completions from
          a completion queue before deleting it.  Fixes kernel crashes
          when resetting a device with IO pending.
        - Various little lightnvm related tweaks from Matias.

   - Fixup flushes to go through the IO scheduler, for the cases where a
     flush is not required.  Fixes a case in CFQ where we would be
     idling and not see this request, hence not break the idling.  From
     Jan Kara.

   - Use list_{first,prev,next} in elevator.c for cleaner code.  From
     Gelian Tang.

   - Fix for a warning trigger on btrfs and raid on single queue blk-mq
     devices, where we would flush plug callbacks with preemption
     disabled.  From me.

   - A mac partition validation fix from Kees Cook.

   - Two merge fixes from Ming, marked stable.  A third part is adding a
     new warning so we'll notice this quicker in the future, if we screw
     up the accounting.

   - Cleanup of thread name/creation in mtip32xx from Rasmus Villemoes"

* 'for-linus' of git://git.kernel.dk/linux-block: (32 commits)
  blk-merge: warn if figured out segment number is bigger than nr_phys_segments
  blk-merge: fix blk_bio_segment_split
  block: fix segment split
  blk-mq: fix calling unplug callbacks with preempt disabled
  mac: validate mac_partition is within sector
  mtip32xx: use formatting capability of kthread_create_on_node
  NVMe: reap completion entries when deleting queue
  lightnvm: add free and bad lun info to show luns
  lightnvm: keep track of block counts
  nvme: lightnvm: use admin queues for admin cmds
  lightnvm: missing free on init error
  lightnvm: wrong return value and redundant free
  null_blk: do not del gendisk with lightnvm
  null_blk: use device addressing mode
  null_blk: use ppa_cache pool
  NVMe: Fix possible arithmetic overflow for max segments
  blk-flush: Queue through IO scheduler when flush not required
  null_blk: register as a LightNVM device
  elevator: use list_{first,prev,next}_entry
  lightnvm: cleanup queue before target removal
  ...
parents a2931547 12e57f59
...@@ -70,3 +70,6 @@ use_per_node_hctx=[0/1]: Default: 0 ...@@ -70,3 +70,6 @@ use_per_node_hctx=[0/1]: Default: 0
parameter. parameter.
1: The multi-queue block layer is instantiated with a hardware dispatch 1: The multi-queue block layer is instantiated with a hardware dispatch
queue for each CPU node in the system. queue for each CPU node in the system.
use_lightnvm=[0/1]: Default: 0
Register device with LightNVM. Requires blk-mq to be used.
...@@ -6366,6 +6366,7 @@ F: arch/*/include/asm/pmem.h ...@@ -6366,6 +6366,7 @@ F: arch/*/include/asm/pmem.h
LIGHTNVM PLATFORM SUPPORT LIGHTNVM PLATFORM SUPPORT
M: Matias Bjorling <mb@lightnvm.io> M: Matias Bjorling <mb@lightnvm.io>
W: http://github/OpenChannelSSD W: http://github/OpenChannelSSD
L: linux-block@vger.kernel.org
S: Maintained S: Maintained
F: drivers/lightnvm/ F: drivers/lightnvm/
F: include/linux/lightnvm.h F: include/linux/lightnvm.h
......
...@@ -422,7 +422,7 @@ void blk_insert_flush(struct request *rq) ...@@ -422,7 +422,7 @@ void blk_insert_flush(struct request *rq)
if (q->mq_ops) { if (q->mq_ops) {
blk_mq_insert_request(rq, false, false, true); blk_mq_insert_request(rq, false, false, true);
} else } else
list_add_tail(&rq->queuelist, &q->queue_head); q->elevator->type->ops.elevator_add_req_fn(q, rq);
return; return;
} }
......
...@@ -76,6 +76,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, ...@@ -76,6 +76,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
struct bio_vec bv, bvprv, *bvprvp = NULL; struct bio_vec bv, bvprv, *bvprvp = NULL;
struct bvec_iter iter; struct bvec_iter iter;
unsigned seg_size = 0, nsegs = 0, sectors = 0; unsigned seg_size = 0, nsegs = 0, sectors = 0;
unsigned front_seg_size = bio->bi_seg_front_size;
bool do_split = true;
struct bio *new = NULL;
bio_for_each_segment(bv, bio, iter) { bio_for_each_segment(bv, bio, iter) {
if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
...@@ -98,7 +101,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, ...@@ -98,7 +101,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
seg_size += bv.bv_len; seg_size += bv.bv_len;
bvprv = bv; bvprv = bv;
bvprvp = &bv; bvprvp = &bvprv;
sectors += bv.bv_len >> 9; sectors += bv.bv_len >> 9;
continue; continue;
} }
...@@ -108,16 +111,29 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, ...@@ -108,16 +111,29 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
nsegs++; nsegs++;
bvprv = bv; bvprv = bv;
bvprvp = &bv; bvprvp = &bvprv;
seg_size = bv.bv_len; seg_size = bv.bv_len;
sectors += bv.bv_len >> 9; sectors += bv.bv_len >> 9;
if (nsegs == 1 && seg_size > front_seg_size)
front_seg_size = seg_size;
} }
*segs = nsegs; do_split = false;
return NULL;
split: split:
*segs = nsegs; *segs = nsegs;
return bio_split(bio, sectors, GFP_NOIO, bs);
if (do_split) {
new = bio_split(bio, sectors, GFP_NOIO, bs);
if (new)
bio = new;
}
bio->bi_seg_front_size = front_seg_size;
if (seg_size > bio->bi_seg_back_size)
bio->bi_seg_back_size = seg_size;
return do_split ? new : NULL;
} }
void blk_queue_split(struct request_queue *q, struct bio **bio, void blk_queue_split(struct request_queue *q, struct bio **bio,
...@@ -412,6 +428,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, ...@@ -412,6 +428,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
if (sg) if (sg)
sg_mark_end(sg); sg_mark_end(sg);
/*
* Something must have been wrong if the figured number of
* segment is bigger than number of req's physical segments
*/
WARN_ON(nsegs > rq->nr_phys_segments);
return nsegs; return nsegs;
} }
EXPORT_SYMBOL(blk_rq_map_sg); EXPORT_SYMBOL(blk_rq_map_sg);
......
...@@ -1291,15 +1291,16 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1291,15 +1291,16 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_mq_bio_to_request(rq, bio); blk_mq_bio_to_request(rq, bio);
/* /*
* we do limited pluging. If bio can be merged, do merge. * We do limited pluging. If the bio can be merged, do that.
* Otherwise the existing request in the plug list will be * Otherwise the existing request in the plug list will be
* issued. So the plug list will have one request at most * issued. So the plug list will have one request at most
*/ */
if (plug) { if (plug) {
/* /*
* The plug list might get flushed before this. If that * The plug list might get flushed before this. If that
* happens, same_queue_rq is invalid and plug list is empty * happens, same_queue_rq is invalid and plug list is
**/ * empty
*/
if (same_queue_rq && !list_empty(&plug->mq_list)) { if (same_queue_rq && !list_empty(&plug->mq_list)) {
old_rq = same_queue_rq; old_rq = same_queue_rq;
list_del_init(&old_rq->queuelist); list_del_init(&old_rq->queuelist);
...@@ -1380,12 +1381,15 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1380,12 +1381,15 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
blk_mq_bio_to_request(rq, bio); blk_mq_bio_to_request(rq, bio);
if (!request_count) if (!request_count)
trace_block_plug(q); trace_block_plug(q);
else if (request_count >= BLK_MAX_REQUEST_COUNT) {
blk_mq_put_ctx(data.ctx);
if (request_count >= BLK_MAX_REQUEST_COUNT) {
blk_flush_plug_list(plug, false); blk_flush_plug_list(plug, false);
trace_block_plug(q); trace_block_plug(q);
} }
list_add_tail(&rq->queuelist, &plug->mq_list); list_add_tail(&rq->queuelist, &plug->mq_list);
blk_mq_put_ctx(data.ctx);
return cookie; return cookie;
} }
......
...@@ -21,10 +21,10 @@ static void noop_merged_requests(struct request_queue *q, struct request *rq, ...@@ -21,10 +21,10 @@ static void noop_merged_requests(struct request_queue *q, struct request *rq,
static int noop_dispatch(struct request_queue *q, int force) static int noop_dispatch(struct request_queue *q, int force)
{ {
struct noop_data *nd = q->elevator->elevator_data; struct noop_data *nd = q->elevator->elevator_data;
if (!list_empty(&nd->queue)) {
struct request *rq; struct request *rq;
rq = list_entry(nd->queue.next, struct request, queuelist);
rq = list_first_entry_or_null(&nd->queue, struct request, queuelist);
if (rq) {
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
elv_dispatch_sort(q, rq); elv_dispatch_sort(q, rq);
return 1; return 1;
...@@ -46,7 +46,7 @@ noop_former_request(struct request_queue *q, struct request *rq) ...@@ -46,7 +46,7 @@ noop_former_request(struct request_queue *q, struct request *rq)
if (rq->queuelist.prev == &nd->queue) if (rq->queuelist.prev == &nd->queue)
return NULL; return NULL;
return list_entry(rq->queuelist.prev, struct request, queuelist); return list_prev_entry(rq, queuelist);
} }
static struct request * static struct request *
...@@ -56,7 +56,7 @@ noop_latter_request(struct request_queue *q, struct request *rq) ...@@ -56,7 +56,7 @@ noop_latter_request(struct request_queue *q, struct request *rq)
if (rq->queuelist.next == &nd->queue) if (rq->queuelist.next == &nd->queue)
return NULL; return NULL;
return list_entry(rq->queuelist.next, struct request, queuelist); return list_next_entry(rq, queuelist);
} }
static int noop_init_queue(struct request_queue *q, struct elevator_type *e) static int noop_init_queue(struct request_queue *q, struct elevator_type *e)
......
...@@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state) ...@@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state)
Sector sect; Sector sect;
unsigned char *data; unsigned char *data;
int slot, blocks_in_map; int slot, blocks_in_map;
unsigned secsize; unsigned secsize, datasize, partoffset;
#ifdef CONFIG_PPC_PMAC #ifdef CONFIG_PPC_PMAC
int found_root = 0; int found_root = 0;
int found_root_goodness = 0; int found_root_goodness = 0;
...@@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state) ...@@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state)
} }
secsize = be16_to_cpu(md->block_size); secsize = be16_to_cpu(md->block_size);
put_dev_sector(sect); put_dev_sector(sect);
data = read_part_sector(state, secsize/512, &sect); datasize = round_down(secsize, 512);
data = read_part_sector(state, datasize / 512, &sect);
if (!data) if (!data)
return -1; return -1;
part = (struct mac_partition *) (data + secsize%512); partoffset = secsize % 512;
if (partoffset + sizeof(*part) > datasize)
return -1;
part = (struct mac_partition *) (data + partoffset);
if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
put_dev_sector(sect); put_dev_sector(sect);
return 0; /* not a MacOS disk */ return 0; /* not a MacOS disk */
......
...@@ -63,6 +63,7 @@ obj-$(CONFIG_FB_I810) += video/fbdev/i810/ ...@@ -63,6 +63,7 @@ obj-$(CONFIG_FB_I810) += video/fbdev/i810/
obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/
obj-$(CONFIG_PARPORT) += parport/ obj-$(CONFIG_PARPORT) += parport/
obj-$(CONFIG_NVM) += lightnvm/
obj-y += base/ block/ misc/ mfd/ nfc/ obj-y += base/ block/ misc/ mfd/ nfc/
obj-$(CONFIG_LIBNVDIMM) += nvdimm/ obj-$(CONFIG_LIBNVDIMM) += nvdimm/
obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
...@@ -70,7 +71,6 @@ obj-$(CONFIG_NUBUS) += nubus/ ...@@ -70,7 +71,6 @@ obj-$(CONFIG_NUBUS) += nubus/
obj-y += macintosh/ obj-y += macintosh/
obj-$(CONFIG_IDE) += ide/ obj-$(CONFIG_IDE) += ide/
obj-$(CONFIG_SCSI) += scsi/ obj-$(CONFIG_SCSI) += scsi/
obj-$(CONFIG_NVM) += lightnvm/
obj-y += nvme/ obj-y += nvme/
obj-$(CONFIG_ATA) += ata/ obj-$(CONFIG_ATA) += ata/
obj-$(CONFIG_TARGET_CORE) += target/ obj-$(CONFIG_TARGET_CORE) += target/
......
...@@ -3810,7 +3810,6 @@ static int mtip_block_initialize(struct driver_data *dd) ...@@ -3810,7 +3810,6 @@ static int mtip_block_initialize(struct driver_data *dd)
sector_t capacity; sector_t capacity;
unsigned int index = 0; unsigned int index = 0;
struct kobject *kobj; struct kobject *kobj;
unsigned char thd_name[16];
if (dd->disk) if (dd->disk)
goto skip_create_disk; /* hw init done, before rebuild */ goto skip_create_disk; /* hw init done, before rebuild */
...@@ -3958,10 +3957,9 @@ static int mtip_block_initialize(struct driver_data *dd) ...@@ -3958,10 +3957,9 @@ static int mtip_block_initialize(struct driver_data *dd)
} }
start_service_thread: start_service_thread:
sprintf(thd_name, "mtip_svc_thd_%02d", index);
dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread,
dd, dd->numa_node, "%s", dd, dd->numa_node,
thd_name); "mtip_svc_thd_%02d", index);
if (IS_ERR(dd->mtip_svc_handler)) { if (IS_ERR(dd->mtip_svc_handler)) {
dev_err(&dd->pdev->dev, "service thread failed to start\n"); dev_err(&dd->pdev->dev, "service thread failed to start\n");
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <linux/lightnvm.h>
struct nullb_cmd { struct nullb_cmd {
struct list_head list; struct list_head list;
...@@ -39,12 +40,14 @@ struct nullb { ...@@ -39,12 +40,14 @@ struct nullb {
struct nullb_queue *queues; struct nullb_queue *queues;
unsigned int nr_queues; unsigned int nr_queues;
char disk_name[DISK_NAME_LEN];
}; };
static LIST_HEAD(nullb_list); static LIST_HEAD(nullb_list);
static struct mutex lock; static struct mutex lock;
static int null_major; static int null_major;
static int nullb_indexes; static int nullb_indexes;
static struct kmem_cache *ppa_cache;
struct completion_queue { struct completion_queue {
struct llist_head list; struct llist_head list;
...@@ -119,6 +122,10 @@ static int nr_devices = 2; ...@@ -119,6 +122,10 @@ static int nr_devices = 2;
module_param(nr_devices, int, S_IRUGO); module_param(nr_devices, int, S_IRUGO);
MODULE_PARM_DESC(nr_devices, "Number of devices to register"); MODULE_PARM_DESC(nr_devices, "Number of devices to register");
static bool use_lightnvm;
module_param(use_lightnvm, bool, S_IRUGO);
MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
static int irqmode = NULL_IRQ_SOFTIRQ; static int irqmode = NULL_IRQ_SOFTIRQ;
static int null_set_irqmode(const char *str, const struct kernel_param *kp) static int null_set_irqmode(const char *str, const struct kernel_param *kp)
...@@ -427,15 +434,156 @@ static void null_del_dev(struct nullb *nullb) ...@@ -427,15 +434,156 @@ static void null_del_dev(struct nullb *nullb)
{ {
list_del_init(&nullb->list); list_del_init(&nullb->list);
if (use_lightnvm)
nvm_unregister(nullb->disk_name);
else
del_gendisk(nullb->disk); del_gendisk(nullb->disk);
blk_cleanup_queue(nullb->q); blk_cleanup_queue(nullb->q);
if (queue_mode == NULL_Q_MQ) if (queue_mode == NULL_Q_MQ)
blk_mq_free_tag_set(&nullb->tag_set); blk_mq_free_tag_set(&nullb->tag_set);
if (!use_lightnvm)
put_disk(nullb->disk); put_disk(nullb->disk);
cleanup_queues(nullb); cleanup_queues(nullb);
kfree(nullb); kfree(nullb);
} }
#ifdef CONFIG_NVM
static void null_lnvm_end_io(struct request *rq, int error)
{
struct nvm_rq *rqd = rq->end_io_data;
struct nvm_dev *dev = rqd->dev;
dev->mt->end_io(rqd, error);
blk_put_request(rq);
}
static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
{
struct request *rq;
struct bio *bio = rqd->bio;
rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
if (IS_ERR(rq))
return -ENOMEM;
rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->__sector = bio->bi_iter.bi_sector;
rq->ioprio = bio_prio(bio);
if (bio_has_data(bio))
rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->__data_len = bio->bi_iter.bi_size;
rq->bio = rq->biotail = bio;
rq->end_io_data = rqd;
blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
return 0;
}
static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
{
sector_t size = gb * 1024 * 1024 * 1024ULL;
sector_t blksize;
struct nvm_id_group *grp;
id->ver_id = 0x1;
id->vmnt = 0;
id->cgrps = 1;
id->cap = 0x3;
id->dom = 0x1;
id->ppaf.blk_offset = 0;
id->ppaf.blk_len = 16;
id->ppaf.pg_offset = 16;
id->ppaf.pg_len = 16;
id->ppaf.sect_offset = 32;
id->ppaf.sect_len = 8;
id->ppaf.pln_offset = 40;
id->ppaf.pln_len = 8;
id->ppaf.lun_offset = 48;
id->ppaf.lun_len = 8;
id->ppaf.ch_offset = 56;
id->ppaf.ch_len = 8;
do_div(size, bs); /* convert size to pages */
do_div(size, 256); /* concert size to pgs pr blk */
grp = &id->groups[0];
grp->mtype = 0;
grp->fmtype = 0;
grp->num_ch = 1;
grp->num_pg = 256;
blksize = size;
do_div(size, (1 << 16));
grp->num_lun = size + 1;
do_div(blksize, grp->num_lun);
grp->num_blk = blksize;
grp->num_pln = 1;
grp->fpg_sz = bs;
grp->csecs = bs;
grp->trdt = 25000;
grp->trdm = 25000;
grp->tprt = 500000;
grp->tprm = 500000;
grp->tbet = 1500000;
grp->tbem = 1500000;
grp->mpos = 0x010101; /* single plane rwe */
grp->cpar = hw_queue_depth;
return 0;
}
static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
{
mempool_t *virtmem_pool;
virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
if (!virtmem_pool) {
pr_err("null_blk: Unable to create virtual memory pool\n");
return NULL;
}
return virtmem_pool;
}
static void null_lnvm_destroy_dma_pool(void *pool)
{
mempool_destroy(pool);
}
static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
gfp_t mem_flags, dma_addr_t *dma_handler)
{
return mempool_alloc(pool, mem_flags);
}
static void null_lnvm_dev_dma_free(void *pool, void *entry,
dma_addr_t dma_handler)
{
mempool_free(entry, pool);
}
static struct nvm_dev_ops null_lnvm_dev_ops = {
.identity = null_lnvm_id,
.submit_io = null_lnvm_submit_io,
.create_dma_pool = null_lnvm_create_dma_pool,
.destroy_dma_pool = null_lnvm_destroy_dma_pool,
.dev_dma_alloc = null_lnvm_dev_dma_alloc,
.dev_dma_free = null_lnvm_dev_dma_free,
/* Simulate nvme protocol restriction */
.max_phys_sect = 64,
};
#else
static struct nvm_dev_ops null_lnvm_dev_ops;
#endif /* CONFIG_NVM */
static int null_open(struct block_device *bdev, fmode_t mode) static int null_open(struct block_device *bdev, fmode_t mode)
{ {
return 0; return 0;
...@@ -575,11 +723,6 @@ static int null_add_dev(void) ...@@ -575,11 +723,6 @@ static int null_add_dev(void)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
disk = nullb->disk = alloc_disk_node(1, home_node);
if (!disk) {
rv = -ENOMEM;
goto out_cleanup_blk_queue;
}
mutex_lock(&lock); mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list); list_add_tail(&nullb->list, &nullb_list);
...@@ -589,6 +732,21 @@ static int null_add_dev(void) ...@@ -589,6 +732,21 @@ static int null_add_dev(void)
blk_queue_logical_block_size(nullb->q, bs); blk_queue_logical_block_size(nullb->q, bs);
blk_queue_physical_block_size(nullb->q, bs); blk_queue_physical_block_size(nullb->q, bs);
sprintf(nullb->disk_name, "nullb%d", nullb->index);
if (use_lightnvm) {
rv = nvm_register(nullb->q, nullb->disk_name,
&null_lnvm_dev_ops);
if (rv)
goto out_cleanup_blk_queue;
goto done;
}
disk = nullb->disk = alloc_disk_node(1, home_node);
if (!disk) {
rv = -ENOMEM;
goto out_cleanup_lightnvm;
}
size = gb * 1024 * 1024 * 1024ULL; size = gb * 1024 * 1024 * 1024ULL;
set_capacity(disk, size >> 9); set_capacity(disk, size >> 9);
...@@ -598,10 +756,15 @@ static int null_add_dev(void) ...@@ -598,10 +756,15 @@ static int null_add_dev(void)
disk->fops = &null_fops; disk->fops = &null_fops;
disk->private_data = nullb; disk->private_data = nullb;
disk->queue = nullb->q; disk->queue = nullb->q;
sprintf(disk->disk_name, "nullb%d", nullb->index); strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
add_disk(disk); add_disk(disk);
done:
return 0; return 0;
out_cleanup_lightnvm:
if (use_lightnvm)
nvm_unregister(nullb->disk_name);
out_cleanup_blk_queue: out_cleanup_blk_queue:
blk_cleanup_queue(nullb->q); blk_cleanup_queue(nullb->q);
out_cleanup_tags: out_cleanup_tags:
...@@ -625,6 +788,18 @@ static int __init null_init(void) ...@@ -625,6 +788,18 @@ static int __init null_init(void)
bs = PAGE_SIZE; bs = PAGE_SIZE;
} }
if (use_lightnvm && bs != 4096) {
pr_warn("null_blk: LightNVM only supports 4k block size\n");
pr_warn("null_blk: defaults block size to 4k\n");
bs = 4096;
}
if (use_lightnvm && queue_mode != NULL_Q_MQ) {
pr_warn("null_blk: LightNVM only supported for blk-mq\n");
pr_warn("null_blk: defaults queue mode to blk-mq\n");
queue_mode = NULL_Q_MQ;
}
if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
if (submit_queues < nr_online_nodes) { if (submit_queues < nr_online_nodes) {
pr_warn("null_blk: submit_queues param is set to %u.", pr_warn("null_blk: submit_queues param is set to %u.",
...@@ -655,15 +830,27 @@ static int __init null_init(void) ...@@ -655,15 +830,27 @@ static int __init null_init(void)
if (null_major < 0) if (null_major < 0)
return null_major; return null_major;
if (use_lightnvm) {
ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
0, 0, NULL);
if (!ppa_cache) {
pr_err("null_blk: unable to create ppa cache\n");
return -ENOMEM;
}
}
for (i = 0; i < nr_devices; i++) { for (i = 0; i < nr_devices; i++) {
if (null_add_dev()) { if (null_add_dev()) {
unregister_blkdev(null_major, "nullb"); unregister_blkdev(null_major, "nullb");
return -EINVAL; goto err_ppa;
} }
} }
pr_info("null: module loaded\n"); pr_info("null: module loaded\n");
return 0; return 0;
err_ppa:
kmem_cache_destroy(ppa_cache);
return -EINVAL;
} }
static void __exit null_exit(void) static void __exit null_exit(void)
...@@ -678,6 +865,8 @@ static void __exit null_exit(void) ...@@ -678,6 +865,8 @@ static void __exit null_exit(void)
null_del_dev(nullb); null_del_dev(nullb);
} }
mutex_unlock(&lock); mutex_unlock(&lock);
kmem_cache_destroy(ppa_cache);
} }
module_init(null_init); module_init(null_init);
......
...@@ -160,11 +160,6 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk) ...@@ -160,11 +160,6 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
} }
EXPORT_SYMBOL(nvm_erase_blk); EXPORT_SYMBOL(nvm_erase_blk);
static void nvm_core_free(struct nvm_dev *dev)
{
kfree(dev);
}
static int nvm_core_init(struct nvm_dev *dev) static int nvm_core_init(struct nvm_dev *dev)
{ {
struct nvm_id *id = &dev->identity; struct nvm_id *id = &dev->identity;
...@@ -179,12 +174,21 @@ static int nvm_core_init(struct nvm_dev *dev) ...@@ -179,12 +174,21 @@ static int nvm_core_init(struct nvm_dev *dev)
dev->sec_size = grp->csecs; dev->sec_size = grp->csecs;
dev->oob_size = grp->sos; dev->oob_size = grp->sos;
dev->sec_per_pg = grp->fpg_sz / grp->csecs; dev->sec_per_pg = grp->fpg_sz / grp->csecs;
dev->addr_mode = id->ppat; memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
dev->addr_format = id->ppaf;
dev->plane_mode = NVM_PLANE_SINGLE; dev->plane_mode = NVM_PLANE_SINGLE;
dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
if (grp->mtype != 0) {
pr_err("nvm: memory type not supported\n");
return -EINVAL;
}
if (grp->fmtype != 0 && grp->fmtype != 1) {
pr_err("nvm: flash type not supported\n");
return -EINVAL;
}
if (grp->mpos & 0x020202) if (grp->mpos & 0x020202)
dev->plane_mode = NVM_PLANE_DOUBLE; dev->plane_mode = NVM_PLANE_DOUBLE;
if (grp->mpos & 0x040404) if (grp->mpos & 0x040404)
...@@ -213,21 +217,18 @@ static void nvm_free(struct nvm_dev *dev) ...@@ -213,21 +217,18 @@ static void nvm_free(struct nvm_dev *dev)
if (dev->mt) if (dev->mt)
dev->mt->unregister_mgr(dev); dev->mt->unregister_mgr(dev);
nvm_core_free(dev);
} }
static int nvm_init(struct nvm_dev *dev) static int nvm_init(struct nvm_dev *dev)
{ {
struct nvmm_type *mt; struct nvmm_type *mt;
int ret = 0; int ret = -EINVAL;
if (!dev->q || !dev->ops) if (!dev->q || !dev->ops)
return -EINVAL; return ret;
if (dev->ops->identity(dev->q, &dev->identity)) { if (dev->ops->identity(dev->q, &dev->identity)) {
pr_err("nvm: device could not be identified\n"); pr_err("nvm: device could not be identified\n");
ret = -EINVAL;
goto err; goto err;
} }
...@@ -273,7 +274,6 @@ static int nvm_init(struct nvm_dev *dev) ...@@ -273,7 +274,6 @@ static int nvm_init(struct nvm_dev *dev)
dev->nr_chnls); dev->nr_chnls);
return 0; return 0;
err: err:
nvm_free(dev);
pr_err("nvm: failed to initialize nvm\n"); pr_err("nvm: failed to initialize nvm\n");
return ret; return ret;
} }
...@@ -308,22 +308,24 @@ int nvm_register(struct request_queue *q, char *disk_name, ...@@ -308,22 +308,24 @@ int nvm_register(struct request_queue *q, char *disk_name,
if (ret) if (ret)
goto err_init; goto err_init;
down_write(&nvm_lock);
list_add(&dev->devices, &nvm_devices);
up_write(&nvm_lock);
if (dev->ops->max_phys_sect > 1) { if (dev->ops->max_phys_sect > 1) {
dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, dev->ppalist_pool = dev->ops->create_dma_pool(dev->q,
"ppalist"); "ppalist");
if (!dev->ppalist_pool) { if (!dev->ppalist_pool) {
pr_err("nvm: could not create ppa pool\n"); pr_err("nvm: could not create ppa pool\n");
return -ENOMEM; ret = -ENOMEM;
goto err_init;
} }
} else if (dev->ops->max_phys_sect > 256) { } else if (dev->ops->max_phys_sect > 256) {
pr_info("nvm: max sectors supported is 256.\n"); pr_info("nvm: max sectors supported is 256.\n");
return -EINVAL; ret = -EINVAL;
goto err_init;
} }
down_write(&nvm_lock);
list_add(&dev->devices, &nvm_devices);
up_write(&nvm_lock);
return 0; return 0;
err_init: err_init:
kfree(dev); kfree(dev);
...@@ -341,11 +343,12 @@ void nvm_unregister(char *disk_name) ...@@ -341,11 +343,12 @@ void nvm_unregister(char *disk_name)
return; return;
} }
nvm_exit(dev);
down_write(&nvm_lock); down_write(&nvm_lock);
list_del(&dev->devices); list_del(&dev->devices);
up_write(&nvm_lock); up_write(&nvm_lock);
nvm_exit(dev);
kfree(dev);
} }
EXPORT_SYMBOL(nvm_unregister); EXPORT_SYMBOL(nvm_unregister);
...@@ -457,11 +460,11 @@ static void nvm_remove_target(struct nvm_target *t) ...@@ -457,11 +460,11 @@ static void nvm_remove_target(struct nvm_target *t)
lockdep_assert_held(&nvm_lock); lockdep_assert_held(&nvm_lock);
del_gendisk(tdisk); del_gendisk(tdisk);
blk_cleanup_queue(q);
if (tt->exit) if (tt->exit)
tt->exit(tdisk->private_data); tt->exit(tdisk->private_data);
blk_cleanup_queue(q);
put_disk(tdisk); put_disk(tdisk);
list_del(&t->list); list_del(&t->list);
...@@ -541,7 +544,7 @@ static int nvm_configure_show(const char *val) ...@@ -541,7 +544,7 @@ static int nvm_configure_show(const char *val)
if (!dev->mt) if (!dev->mt)
return 0; return 0;
dev->mt->free_blocks_print(dev); dev->mt->lun_info_print(dev);
return 0; return 0;
} }
......
...@@ -60,23 +60,28 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn) ...@@ -60,23 +60,28 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
lun->vlun.lun_id = i % dev->luns_per_chnl; lun->vlun.lun_id = i % dev->luns_per_chnl;
lun->vlun.chnl_id = i / dev->luns_per_chnl; lun->vlun.chnl_id = i / dev->luns_per_chnl;
lun->vlun.nr_free_blocks = dev->blks_per_lun; lun->vlun.nr_free_blocks = dev->blks_per_lun;
lun->vlun.nr_inuse_blocks = 0;
lun->vlun.nr_bad_blocks = 0;
} }
return 0; return 0;
} }
static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
void *private) void *private)
{ {
struct gen_nvm *gn = private; struct gen_nvm *gn = private;
struct gen_lun *lun = &gn->luns[lun_id]; struct nvm_dev *dev = gn->dev;
struct gen_lun *lun;
struct nvm_block *blk; struct nvm_block *blk;
int i; int i;
if (unlikely(bitmap_empty(bb_bitmap, nr_blocks))) ppa = dev_to_generic_addr(gn->dev, ppa);
return 0; lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun];
for (i = 0; i < nr_blocks; i++) {
if (blks[i] == 0)
continue;
i = -1;
while ((i = find_next_bit(bb_bitmap, nr_blocks, i + 1)) < nr_blocks) {
blk = &lun->vlun.blocks[i]; blk = &lun->vlun.blocks[i];
if (!blk) { if (!blk) {
pr_err("gennvm: BB data is out of bounds.\n"); pr_err("gennvm: BB data is out of bounds.\n");
...@@ -84,6 +89,7 @@ static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, ...@@ -84,6 +89,7 @@ static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks,
} }
list_move_tail(&blk->list, &lun->bb_list); list_move_tail(&blk->list, &lun->bb_list);
lun->vlun.nr_bad_blocks++;
} }
return 0; return 0;
...@@ -136,6 +142,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) ...@@ -136,6 +142,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
list_move_tail(&blk->list, &lun->used_list); list_move_tail(&blk->list, &lun->used_list);
blk->type = 1; blk->type = 1;
lun->vlun.nr_free_blocks--; lun->vlun.nr_free_blocks--;
lun->vlun.nr_inuse_blocks++;
} }
} }
...@@ -164,15 +171,25 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) ...@@ -164,15 +171,25 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
block->id = cur_block_id++; block->id = cur_block_id++;
/* First block is reserved for device */ /* First block is reserved for device */
if (unlikely(lun_iter == 0 && blk_iter == 0)) if (unlikely(lun_iter == 0 && blk_iter == 0)) {
lun->vlun.nr_free_blocks--;
continue; continue;
}
list_add_tail(&block->list, &lun->free_list); list_add_tail(&block->list, &lun->free_list);
} }
if (dev->ops->get_bb_tbl) { if (dev->ops->get_bb_tbl) {
ret = dev->ops->get_bb_tbl(dev->q, lun->vlun.id, struct ppa_addr ppa;
dev->blks_per_lun, gennvm_block_bb, gn);
ppa.ppa = 0;
ppa.g.ch = lun->vlun.chnl_id;
ppa.g.lun = lun->vlun.id;
ppa = generic_to_dev_addr(dev, ppa);
ret = dev->ops->get_bb_tbl(dev->q, ppa,
dev->blks_per_lun,
gennvm_block_bb, gn);
if (ret) if (ret)
pr_err("gennvm: could not read BB table\n"); pr_err("gennvm: could not read BB table\n");
} }
...@@ -199,6 +216,7 @@ static int gennvm_register(struct nvm_dev *dev) ...@@ -199,6 +216,7 @@ static int gennvm_register(struct nvm_dev *dev)
if (!gn) if (!gn)
return -ENOMEM; return -ENOMEM;
gn->dev = dev;
gn->nr_luns = dev->nr_luns; gn->nr_luns = dev->nr_luns;
dev->mp = gn; dev->mp = gn;
...@@ -254,6 +272,7 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, ...@@ -254,6 +272,7 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
blk->type = 1; blk->type = 1;
lun->vlun.nr_free_blocks--; lun->vlun.nr_free_blocks--;
lun->vlun.nr_inuse_blocks++;
spin_unlock(&vlun->lock); spin_unlock(&vlun->lock);
out: out:
...@@ -271,16 +290,21 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) ...@@ -271,16 +290,21 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
case 1: case 1:
list_move_tail(&blk->list, &lun->free_list); list_move_tail(&blk->list, &lun->free_list);
lun->vlun.nr_free_blocks++; lun->vlun.nr_free_blocks++;
lun->vlun.nr_inuse_blocks--;
blk->type = 0; blk->type = 0;
break; break;
case 2: case 2:
list_move_tail(&blk->list, &lun->bb_list); list_move_tail(&blk->list, &lun->bb_list);
lun->vlun.nr_bad_blocks++;
lun->vlun.nr_inuse_blocks--;
break; break;
default: default:
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
pr_err("gennvm: erroneous block type (%lu -> %u)\n", pr_err("gennvm: erroneous block type (%lu -> %u)\n",
blk->id, blk->type); blk->id, blk->type);
list_move_tail(&blk->list, &lun->bb_list); list_move_tail(&blk->list, &lun->bb_list);
lun->vlun.nr_bad_blocks++;
lun->vlun.nr_inuse_blocks--;
} }
spin_unlock(&vlun->lock); spin_unlock(&vlun->lock);
...@@ -292,10 +316,10 @@ static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd) ...@@ -292,10 +316,10 @@ static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
if (rqd->nr_pages > 1) { if (rqd->nr_pages > 1) {
for (i = 0; i < rqd->nr_pages; i++) for (i = 0; i < rqd->nr_pages; i++)
rqd->ppa_list[i] = addr_to_generic_mode(dev, rqd->ppa_list[i] = dev_to_generic_addr(dev,
rqd->ppa_list[i]); rqd->ppa_list[i]);
} else { } else {
rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr); rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
} }
} }
...@@ -305,10 +329,10 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) ...@@ -305,10 +329,10 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
if (rqd->nr_pages > 1) { if (rqd->nr_pages > 1) {
for (i = 0; i < rqd->nr_pages; i++) for (i = 0; i < rqd->nr_pages; i++)
rqd->ppa_list[i] = generic_to_addr_mode(dev, rqd->ppa_list[i] = generic_to_dev_addr(dev,
rqd->ppa_list[i]); rqd->ppa_list[i]);
} else { } else {
rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr); rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
} }
} }
...@@ -354,10 +378,10 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) ...@@ -354,10 +378,10 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
{ {
int i; int i;
if (!dev->ops->set_bb) if (!dev->ops->set_bb_tbl)
return; return;
if (dev->ops->set_bb(dev->q, rqd, 1)) if (dev->ops->set_bb_tbl(dev->q, rqd, 1))
return; return;
gennvm_addr_to_generic_mode(dev, rqd); gennvm_addr_to_generic_mode(dev, rqd);
...@@ -440,15 +464,24 @@ static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid) ...@@ -440,15 +464,24 @@ static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
return &gn->luns[lunid].vlun; return &gn->luns[lunid].vlun;
} }
static void gennvm_free_blocks_print(struct nvm_dev *dev) static void gennvm_lun_info_print(struct nvm_dev *dev)
{ {
struct gen_nvm *gn = dev->mp; struct gen_nvm *gn = dev->mp;
struct gen_lun *lun; struct gen_lun *lun;
unsigned int i; unsigned int i;
gennvm_for_each_lun(gn, lun, i)
pr_info("%s: lun%8u\t%u\n", gennvm_for_each_lun(gn, lun, i) {
dev->name, i, lun->vlun.nr_free_blocks); spin_lock(&lun->vlun.lock);
pr_info("%s: lun%8u\t%u\t%u\t%u\n",
dev->name, i,
lun->vlun.nr_free_blocks,
lun->vlun.nr_inuse_blocks,
lun->vlun.nr_bad_blocks);
spin_unlock(&lun->vlun.lock);
}
} }
static struct nvmm_type gennvm = { static struct nvmm_type gennvm = {
...@@ -466,7 +499,7 @@ static struct nvmm_type gennvm = { ...@@ -466,7 +499,7 @@ static struct nvmm_type gennvm = {
.erase_blk = gennvm_erase_blk, .erase_blk = gennvm_erase_blk,
.get_lun = gennvm_get_lun, .get_lun = gennvm_get_lun,
.free_blocks_print = gennvm_free_blocks_print, .lun_info_print = gennvm_lun_info_print,
}; };
static int __init gennvm_module_init(void) static int __init gennvm_module_init(void)
......
...@@ -35,6 +35,8 @@ struct gen_lun { ...@@ -35,6 +35,8 @@ struct gen_lun {
}; };
struct gen_nvm { struct gen_nvm {
struct nvm_dev *dev;
int nr_luns; int nr_luns;
struct gen_lun *luns; struct gen_lun *luns;
}; };
......
...@@ -123,12 +123,42 @@ static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk) ...@@ -123,12 +123,42 @@ static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
return blk->id * rrpc->dev->pgs_per_blk; return blk->id * rrpc->dev->pgs_per_blk;
} }
static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
struct ppa_addr r)
{
struct ppa_addr l;
int secs, pgs, blks, luns;
sector_t ppa = r.ppa;
l.ppa = 0;
div_u64_rem(ppa, dev->sec_per_pg, &secs);
l.g.sec = secs;
sector_div(ppa, dev->sec_per_pg);
div_u64_rem(ppa, dev->sec_per_blk, &pgs);
l.g.pg = pgs;
sector_div(ppa, dev->pgs_per_blk);
div_u64_rem(ppa, dev->blks_per_lun, &blks);
l.g.blk = blks;
sector_div(ppa, dev->blks_per_lun);
div_u64_rem(ppa, dev->luns_per_chnl, &luns);
l.g.lun = luns;
sector_div(ppa, dev->luns_per_chnl);
l.g.ch = ppa;
return l;
}
static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
{ {
struct ppa_addr paddr; struct ppa_addr paddr;
paddr.ppa = addr; paddr.ppa = addr;
return __linear_to_generic_addr(dev, paddr); return linear_to_generic_addr(dev, paddr);
} }
/* requires lun->lock taken */ /* requires lun->lock taken */
......
...@@ -93,7 +93,7 @@ struct nvme_nvm_l2ptbl { ...@@ -93,7 +93,7 @@ struct nvme_nvm_l2ptbl {
__le16 cdw14[6]; __le16 cdw14[6];
}; };
struct nvme_nvm_bbtbl { struct nvme_nvm_getbbtbl {
__u8 opcode; __u8 opcode;
__u8 flags; __u8 flags;
__u16 command_id; __u16 command_id;
...@@ -101,10 +101,23 @@ struct nvme_nvm_bbtbl { ...@@ -101,10 +101,23 @@ struct nvme_nvm_bbtbl {
__u64 rsvd[2]; __u64 rsvd[2];
__le64 prp1; __le64 prp1;
__le64 prp2; __le64 prp2;
__le32 prp1_len; __le64 spba;
__le32 prp2_len; __u32 rsvd4[4];
__le32 lbb; };
__u32 rsvd11[3];
struct nvme_nvm_setbbtbl {
__u8 opcode;
__u8 flags;
__u16 command_id;
__le32 nsid;
__le64 rsvd[2];
__le64 prp1;
__le64 prp2;
__le64 spba;
__le16 nlb;
__u8 value;
__u8 rsvd3;
__u32 rsvd4[3];
}; };
struct nvme_nvm_erase_blk { struct nvme_nvm_erase_blk {
...@@ -129,8 +142,8 @@ struct nvme_nvm_command { ...@@ -129,8 +142,8 @@ struct nvme_nvm_command {
struct nvme_nvm_hb_rw hb_rw; struct nvme_nvm_hb_rw hb_rw;
struct nvme_nvm_ph_rw ph_rw; struct nvme_nvm_ph_rw ph_rw;
struct nvme_nvm_l2ptbl l2p; struct nvme_nvm_l2ptbl l2p;
struct nvme_nvm_bbtbl get_bb; struct nvme_nvm_getbbtbl get_bb;
struct nvme_nvm_bbtbl set_bb; struct nvme_nvm_setbbtbl set_bb;
struct nvme_nvm_erase_blk erase; struct nvme_nvm_erase_blk erase;
}; };
}; };
...@@ -142,11 +155,13 @@ struct nvme_nvm_id_group { ...@@ -142,11 +155,13 @@ struct nvme_nvm_id_group {
__u8 num_ch; __u8 num_ch;
__u8 num_lun; __u8 num_lun;
__u8 num_pln; __u8 num_pln;
__u8 rsvd1;
__le16 num_blk; __le16 num_blk;
__le16 num_pg; __le16 num_pg;
__le16 fpg_sz; __le16 fpg_sz;
__le16 csecs; __le16 csecs;
__le16 sos; __le16 sos;
__le16 rsvd2;
__le32 trdt; __le32 trdt;
__le32 trdm; __le32 trdm;
__le32 tprt; __le32 tprt;
...@@ -154,8 +169,9 @@ struct nvme_nvm_id_group { ...@@ -154,8 +169,9 @@ struct nvme_nvm_id_group {
__le32 tbet; __le32 tbet;
__le32 tbem; __le32 tbem;
__le32 mpos; __le32 mpos;
__le32 mccap;
__le16 cpar; __le16 cpar;
__u8 reserved[913]; __u8 reserved[906];
} __packed; } __packed;
struct nvme_nvm_addr_format { struct nvme_nvm_addr_format {
...@@ -178,15 +194,28 @@ struct nvme_nvm_id { ...@@ -178,15 +194,28 @@ struct nvme_nvm_id {
__u8 ver_id; __u8 ver_id;
__u8 vmnt; __u8 vmnt;
__u8 cgrps; __u8 cgrps;
__u8 res[5]; __u8 res;
__le32 cap; __le32 cap;
__le32 dom; __le32 dom;
struct nvme_nvm_addr_format ppaf; struct nvme_nvm_addr_format ppaf;
__u8 ppat; __u8 resv[228];
__u8 resv[223];
struct nvme_nvm_id_group groups[4]; struct nvme_nvm_id_group groups[4];
} __packed; } __packed;
struct nvme_nvm_bb_tbl {
__u8 tblid[4];
__le16 verid;
__le16 revid;
__le32 rvsd1;
__le32 tblks;
__le32 tfact;
__le32 tgrown;
__le32 tdresv;
__le32 thresv;
__le32 rsvd2[8];
__u8 blk[0];
};
/* /*
* Check we didn't inadvertently grow the command struct * Check we didn't inadvertently grow the command struct
*/ */
...@@ -195,12 +224,14 @@ static inline void _nvme_nvm_check_size(void) ...@@ -195,12 +224,14 @@ static inline void _nvme_nvm_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_bbtbl) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128);
BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512);
} }
static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
...@@ -234,6 +265,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) ...@@ -234,6 +265,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
dst->tbet = le32_to_cpu(src->tbet); dst->tbet = le32_to_cpu(src->tbet);
dst->tbem = le32_to_cpu(src->tbem); dst->tbem = le32_to_cpu(src->tbem);
dst->mpos = le32_to_cpu(src->mpos); dst->mpos = le32_to_cpu(src->mpos);
dst->mccap = le32_to_cpu(src->mccap);
dst->cpar = le16_to_cpu(src->cpar); dst->cpar = le16_to_cpu(src->cpar);
} }
...@@ -244,6 +276,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) ...@@ -244,6 +276,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
{ {
struct nvme_ns *ns = q->queuedata; struct nvme_ns *ns = q->queuedata;
struct nvme_dev *dev = ns->dev;
struct nvme_nvm_id *nvme_nvm_id; struct nvme_nvm_id *nvme_nvm_id;
struct nvme_nvm_command c = {}; struct nvme_nvm_command c = {};
int ret; int ret;
...@@ -256,8 +289,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) ...@@ -256,8 +289,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
if (!nvme_nvm_id) if (!nvme_nvm_id)
return -ENOMEM; return -ENOMEM;
ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, nvme_nvm_id, ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
sizeof(struct nvme_nvm_id)); nvme_nvm_id, sizeof(struct nvme_nvm_id));
if (ret) { if (ret) {
ret = -EIO; ret = -EIO;
goto out; goto out;
...@@ -268,6 +301,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) ...@@ -268,6 +301,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
nvm_id->cgrps = nvme_nvm_id->cgrps; nvm_id->cgrps = nvme_nvm_id->cgrps;
nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
sizeof(struct nvme_nvm_addr_format));
ret = init_grps(nvm_id, nvme_nvm_id); ret = init_grps(nvm_id, nvme_nvm_id);
out: out:
...@@ -281,7 +316,7 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, ...@@ -281,7 +316,7 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb,
struct nvme_ns *ns = q->queuedata; struct nvme_ns *ns = q->queuedata;
struct nvme_dev *dev = ns->dev; struct nvme_dev *dev = ns->dev;
struct nvme_nvm_command c = {}; struct nvme_nvm_command c = {};
u32 len = queue_max_hw_sectors(q) << 9; u32 len = queue_max_hw_sectors(dev->admin_q) << 9;
u32 nlb_pr_rq = len / sizeof(u64); u32 nlb_pr_rq = len / sizeof(u64);
u64 cmd_slba = slba; u64 cmd_slba = slba;
void *entries; void *entries;
...@@ -299,8 +334,8 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, ...@@ -299,8 +334,8 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb,
c.l2p.slba = cpu_to_le64(cmd_slba); c.l2p.slba = cpu_to_le64(cmd_slba);
c.l2p.nlb = cpu_to_le32(cmd_nlb); c.l2p.nlb = cpu_to_le32(cmd_nlb);
ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, ret = nvme_submit_sync_cmd(dev->admin_q,
entries, len); (struct nvme_command *)&c, entries, len);
if (ret) { if (ret) {
dev_err(dev->dev, "L2P table transfer failed (%d)\n", dev_err(dev->dev, "L2P table transfer failed (%d)\n",
ret); ret);
...@@ -322,43 +357,82 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, ...@@ -322,43 +357,82 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb,
return ret; return ret;
} }
static int nvme_nvm_get_bb_tbl(struct request_queue *q, int lunid, static int nvme_nvm_get_bb_tbl(struct request_queue *q, struct ppa_addr ppa,
unsigned int nr_blocks, int nr_blocks, nvm_bb_update_fn *update_bbtbl,
nvm_bb_update_fn *update_bbtbl, void *priv) void *priv)
{ {
struct nvme_ns *ns = q->queuedata; struct nvme_ns *ns = q->queuedata;
struct nvme_dev *dev = ns->dev; struct nvme_dev *dev = ns->dev;
struct nvme_nvm_command c = {}; struct nvme_nvm_command c = {};
void *bb_bitmap; struct nvme_nvm_bb_tbl *bb_tbl;
u16 bb_bitmap_size; int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
int ret = 0; int ret = 0;
c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
c.get_bb.nsid = cpu_to_le32(ns->ns_id); c.get_bb.nsid = cpu_to_le32(ns->ns_id);
c.get_bb.lbb = cpu_to_le32(lunid); c.get_bb.spba = cpu_to_le64(ppa.ppa);
bb_bitmap_size = ((nr_blocks >> 15) + 1) * PAGE_SIZE;
bb_bitmap = kmalloc(bb_bitmap_size, GFP_KERNEL);
if (!bb_bitmap)
return -ENOMEM;
bitmap_zero(bb_bitmap, nr_blocks); bb_tbl = kzalloc(tblsz, GFP_KERNEL);
if (!bb_tbl)
return -ENOMEM;
ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_bitmap, ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
bb_bitmap_size); bb_tbl, tblsz);
if (ret) { if (ret) {
dev_err(dev->dev, "get bad block table failed (%d)\n", ret); dev_err(dev->dev, "get bad block table failed (%d)\n", ret);
ret = -EIO; ret = -EIO;
goto out; goto out;
} }
ret = update_bbtbl(lunid, bb_bitmap, nr_blocks, priv); if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
dev_err(dev->dev, "bbt format mismatch\n");
ret = -EINVAL;
goto out;
}
if (le16_to_cpu(bb_tbl->verid) != 1) {
ret = -EINVAL;
dev_err(dev->dev, "bbt version not supported\n");
goto out;
}
if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
ret = -EINVAL;
dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)",
le32_to_cpu(bb_tbl->tblks), nr_blocks);
goto out;
}
ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv);
if (ret) { if (ret) {
ret = -EINTR; ret = -EINTR;
goto out; goto out;
} }
out: out:
kfree(bb_bitmap); kfree(bb_tbl);
return ret;
}
static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd,
int type)
{
struct nvme_ns *ns = q->queuedata;
struct nvme_dev *dev = ns->dev;
struct nvme_nvm_command c = {};
int ret = 0;
c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
c.set_bb.nsid = cpu_to_le32(ns->ns_id);
c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa);
c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
c.set_bb.value = type;
ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
NULL, 0);
if (ret)
dev_err(dev->dev, "set bad block table failed (%d)\n", ret);
return ret; return ret;
} }
...@@ -474,6 +548,7 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = { ...@@ -474,6 +548,7 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
.get_l2p_tbl = nvme_nvm_get_l2p_tbl, .get_l2p_tbl = nvme_nvm_get_l2p_tbl,
.get_bb_tbl = nvme_nvm_get_bb_tbl, .get_bb_tbl = nvme_nvm_get_bb_tbl,
.set_bb_tbl = nvme_nvm_set_bb_tbl,
.submit_io = nvme_nvm_submit_io, .submit_io = nvme_nvm_submit_io,
.erase_block = nvme_nvm_erase_block, .erase_block = nvme_nvm_erase_block,
......
...@@ -968,6 +968,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) ...@@ -968,6 +968,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
return; return;
if (likely(nvmeq->cq_vector >= 0))
writel(head, nvmeq->q_db + nvmeq->dev->db_stride); writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
nvmeq->cq_head = head; nvmeq->cq_head = head;
nvmeq->cq_phase = phase; nvmeq->cq_phase = phase;
...@@ -2268,7 +2269,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) ...@@ -2268,7 +2269,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
if (dev->max_hw_sectors) { if (dev->max_hw_sectors) {
blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
blk_queue_max_segments(ns->queue, blk_queue_max_segments(ns->queue,
((dev->max_hw_sectors << 9) / dev->page_size) + 1); (dev->max_hw_sectors / (dev->page_size >> 9)) + 1);
} }
if (dev->stripe_size) if (dev->stripe_size)
blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
...@@ -2787,6 +2788,10 @@ static void nvme_del_queue_end(struct nvme_queue *nvmeq) ...@@ -2787,6 +2788,10 @@ static void nvme_del_queue_end(struct nvme_queue *nvmeq)
{ {
struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
nvme_put_dq(dq); nvme_put_dq(dq);
spin_lock_irq(&nvmeq->q_lock);
nvme_process_cq(nvmeq);
spin_unlock_irq(&nvmeq->q_lock);
} }
static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
......
...@@ -58,7 +58,6 @@ enum { ...@@ -58,7 +58,6 @@ enum {
struct nvm_id_group { struct nvm_id_group {
u8 mtype; u8 mtype;
u8 fmtype; u8 fmtype;
u16 res16;
u8 num_ch; u8 num_ch;
u8 num_lun; u8 num_lun;
u8 num_pln; u8 num_pln;
...@@ -74,9 +73,9 @@ struct nvm_id_group { ...@@ -74,9 +73,9 @@ struct nvm_id_group {
u32 tbet; u32 tbet;
u32 tbem; u32 tbem;
u32 mpos; u32 mpos;
u32 mccap;
u16 cpar; u16 cpar;
u8 res[913]; };
} __packed;
struct nvm_addr_format { struct nvm_addr_format {
u8 ch_offset; u8 ch_offset;
...@@ -91,19 +90,15 @@ struct nvm_addr_format { ...@@ -91,19 +90,15 @@ struct nvm_addr_format {
u8 pg_len; u8 pg_len;
u8 sect_offset; u8 sect_offset;
u8 sect_len; u8 sect_len;
u8 res[4];
}; };
struct nvm_id { struct nvm_id {
u8 ver_id; u8 ver_id;
u8 vmnt; u8 vmnt;
u8 cgrps; u8 cgrps;
u8 res[5];
u32 cap; u32 cap;
u32 dom; u32 dom;
struct nvm_addr_format ppaf; struct nvm_addr_format ppaf;
u8 ppat;
u8 resv[224];
struct nvm_id_group groups[4]; struct nvm_id_group groups[4];
} __packed; } __packed;
...@@ -123,39 +118,28 @@ struct nvm_tgt_instance { ...@@ -123,39 +118,28 @@ struct nvm_tgt_instance {
#define NVM_VERSION_MINOR 0 #define NVM_VERSION_MINOR 0
#define NVM_VERSION_PATCH 0 #define NVM_VERSION_PATCH 0
#define NVM_SEC_BITS (8)
#define NVM_PL_BITS (6)
#define NVM_PG_BITS (16)
#define NVM_BLK_BITS (16) #define NVM_BLK_BITS (16)
#define NVM_LUN_BITS (10) #define NVM_PG_BITS (16)
#define NVM_SEC_BITS (8)
#define NVM_PL_BITS (8)
#define NVM_LUN_BITS (8)
#define NVM_CH_BITS (8) #define NVM_CH_BITS (8)
struct ppa_addr { struct ppa_addr {
union {
/* Channel-based PPA format in nand 4x2x2x2x8x10 */
struct {
u64 ch : 4;
u64 sec : 2; /* 4 sectors per page */
u64 pl : 2; /* 4 planes per LUN */
u64 lun : 2; /* 4 LUNs per channel */
u64 pg : 8; /* 256 pages per block */
u64 blk : 10;/* 1024 blocks per plane */
u64 resved : 36;
} chnl;
/* Generic structure for all addresses */ /* Generic structure for all addresses */
union {
struct { struct {
u64 blk : NVM_BLK_BITS;
u64 pg : NVM_PG_BITS;
u64 sec : NVM_SEC_BITS; u64 sec : NVM_SEC_BITS;
u64 pl : NVM_PL_BITS; u64 pl : NVM_PL_BITS;
u64 pg : NVM_PG_BITS;
u64 blk : NVM_BLK_BITS;
u64 lun : NVM_LUN_BITS; u64 lun : NVM_LUN_BITS;
u64 ch : NVM_CH_BITS; u64 ch : NVM_CH_BITS;
} g; } g;
u64 ppa; u64 ppa;
}; };
} __packed; };
struct nvm_rq { struct nvm_rq {
struct nvm_tgt_instance *ins; struct nvm_tgt_instance *ins;
...@@ -191,11 +175,11 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) ...@@ -191,11 +175,11 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
struct nvm_block; struct nvm_block;
typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *); typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *);
typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32,
nvm_l2p_update_fn *, void *); nvm_l2p_update_fn *, void *);
typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int, typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, struct ppa_addr, int,
nvm_bb_update_fn *, void *); nvm_bb_update_fn *, void *);
typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int);
typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *);
...@@ -210,7 +194,7 @@ struct nvm_dev_ops { ...@@ -210,7 +194,7 @@ struct nvm_dev_ops {
nvm_id_fn *identity; nvm_id_fn *identity;
nvm_get_l2p_tbl_fn *get_l2p_tbl; nvm_get_l2p_tbl_fn *get_l2p_tbl;
nvm_op_bb_tbl_fn *get_bb_tbl; nvm_op_bb_tbl_fn *get_bb_tbl;
nvm_op_set_bb_fn *set_bb; nvm_op_set_bb_fn *set_bb_tbl;
nvm_submit_io_fn *submit_io; nvm_submit_io_fn *submit_io;
nvm_erase_blk_fn *erase_block; nvm_erase_blk_fn *erase_block;
...@@ -220,7 +204,7 @@ struct nvm_dev_ops { ...@@ -220,7 +204,7 @@ struct nvm_dev_ops {
nvm_dev_dma_alloc_fn *dev_dma_alloc; nvm_dev_dma_alloc_fn *dev_dma_alloc;
nvm_dev_dma_free_fn *dev_dma_free; nvm_dev_dma_free_fn *dev_dma_free;
uint8_t max_phys_sect; unsigned int max_phys_sect;
}; };
struct nvm_lun { struct nvm_lun {
...@@ -229,7 +213,9 @@ struct nvm_lun { ...@@ -229,7 +213,9 @@ struct nvm_lun {
int lun_id; int lun_id;
int chnl_id; int chnl_id;
unsigned int nr_inuse_blocks; /* Number of used blocks */
unsigned int nr_free_blocks; /* Number of unused blocks */ unsigned int nr_free_blocks; /* Number of unused blocks */
unsigned int nr_bad_blocks; /* Number of bad blocks */
struct nvm_block *blocks; struct nvm_block *blocks;
spinlock_t lock; spinlock_t lock;
...@@ -263,8 +249,7 @@ struct nvm_dev { ...@@ -263,8 +249,7 @@ struct nvm_dev {
int blks_per_lun; int blks_per_lun;
int sec_size; int sec_size;
int oob_size; int oob_size;
int addr_mode; struct nvm_addr_format ppaf;
struct nvm_addr_format addr_format;
/* Calculated/Cached values. These do not reflect the actual usable /* Calculated/Cached values. These do not reflect the actual usable
* blocks at run-time. * blocks at run-time.
...@@ -290,118 +275,45 @@ struct nvm_dev { ...@@ -290,118 +275,45 @@ struct nvm_dev {
char name[DISK_NAME_LEN]; char name[DISK_NAME_LEN];
}; };
/* fallback conversion */ static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev,
struct ppa_addr r) struct ppa_addr r)
{ {
struct ppa_addr l; struct ppa_addr l;
l.ppa = r.g.sec + l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset;
r.g.pg * dev->sec_per_pg + l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset;
r.g.blk * (dev->pgs_per_blk * l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset;
dev->sec_per_pg) + l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset;
r.g.lun * (dev->blks_per_lun * l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset;
dev->pgs_per_blk * l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset;
dev->sec_per_pg) +
r.g.ch * (dev->blks_per_lun *
dev->pgs_per_blk *
dev->luns_per_chnl *
dev->sec_per_pg);
return l; return l;
} }
/* fallback conversion */ static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev,
struct ppa_addr r) struct ppa_addr r)
{ {
struct ppa_addr l; struct ppa_addr l;
int secs, pgs, blks, luns;
sector_t ppa = r.ppa;
l.ppa = 0;
div_u64_rem(ppa, dev->sec_per_pg, &secs);
l.g.sec = secs;
sector_div(ppa, dev->sec_per_pg);
div_u64_rem(ppa, dev->sec_per_blk, &pgs);
l.g.pg = pgs;
sector_div(ppa, dev->pgs_per_blk);
div_u64_rem(ppa, dev->blks_per_lun, &blks);
l.g.blk = blks;
sector_div(ppa, dev->blks_per_lun);
div_u64_rem(ppa, dev->luns_per_chnl, &luns);
l.g.lun = luns;
sector_div(ppa, dev->luns_per_chnl);
l.g.ch = ppa;
return l; /*
} * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
*/
static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r) l.g.blk = (r.ppa >> dev->ppaf.blk_offset) &
{ (((1 << dev->ppaf.blk_len) - 1));
struct ppa_addr l; l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) &
(((1 << dev->ppaf.pg_len) - 1));
l.ppa = 0; l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) &
(((1 << dev->ppaf.sect_len) - 1));
l.chnl.sec = r.g.sec; l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) &
l.chnl.pl = r.g.pl; (((1 << dev->ppaf.pln_len) - 1));
l.chnl.pg = r.g.pg; l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) &
l.chnl.blk = r.g.blk; (((1 << dev->ppaf.lun_len) - 1));
l.chnl.lun = r.g.lun; l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) &
l.chnl.ch = r.g.ch; (((1 << dev->ppaf.ch_len) - 1));
return l;
}
static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r)
{
struct ppa_addr l;
l.ppa = 0;
l.g.sec = r.chnl.sec;
l.g.pl = r.chnl.pl;
l.g.pg = r.chnl.pg;
l.g.blk = r.chnl.blk;
l.g.lun = r.chnl.lun;
l.g.ch = r.chnl.ch;
return l; return l;
} }
static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev,
struct ppa_addr gppa)
{
switch (dev->addr_mode) {
case NVM_ADDRMODE_LINEAR:
return __linear_to_generic_addr(dev, gppa);
case NVM_ADDRMODE_CHANNEL:
return __chnl_to_generic_addr(gppa);
default:
BUG();
}
return gppa;
}
static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev,
struct ppa_addr gppa)
{
switch (dev->addr_mode) {
case NVM_ADDRMODE_LINEAR:
return __generic_to_linear_addr(dev, gppa);
case NVM_ADDRMODE_CHANNEL:
return __generic_to_chnl_addr(gppa);
default:
BUG();
}
return gppa;
}
static inline int ppa_empty(struct ppa_addr ppa_addr) static inline int ppa_empty(struct ppa_addr ppa_addr)
{ {
return (ppa_addr.ppa == ADDR_EMPTY); return (ppa_addr.ppa == ADDR_EMPTY);
...@@ -468,7 +380,7 @@ typedef int (nvmm_end_io_fn)(struct nvm_rq *, int); ...@@ -468,7 +380,7 @@ typedef int (nvmm_end_io_fn)(struct nvm_rq *, int);
typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
unsigned long); unsigned long);
typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *); typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
struct nvmm_type { struct nvmm_type {
const char *name; const char *name;
...@@ -492,7 +404,7 @@ struct nvmm_type { ...@@ -492,7 +404,7 @@ struct nvmm_type {
nvmm_get_lun_fn *get_lun; nvmm_get_lun_fn *get_lun;
/* Statistics */ /* Statistics */
nvmm_free_blocks_print_fn *free_blocks_print; nvmm_lun_info_print_fn *lun_info_print;
struct list_head list; struct list_head list;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment