Commit 79f2b358 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

nvme: don't poll the CQ from the kthread

There is no reason to do unconditional polling of CQs per the NVMe
spec.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKeith Busch <keith.busch@intel.com>
Reviewed-by: default avatarSagi Grimberg <sagig@mellanox.com>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 9396dec9
......@@ -1156,9 +1156,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->qid = qid;
nvmeq->cq_vector = -1;
dev->queues[qid] = nvmeq;
/* make sure queue descriptor is set before queue count, for kthread */
mb();
dev->queue_count++;
return nvmeq;
......@@ -1345,7 +1342,6 @@ static int nvme_kthread(void *data)
set_current_state(TASK_INTERRUPTIBLE);
spin_lock(&dev_list_lock);
list_for_each_entry_safe(dev, next, &dev_list, node) {
int i;
u32 csts = readl(dev->bar + NVME_REG_CSTS);
/*
......@@ -1363,14 +1359,6 @@ static int nvme_kthread(void *data)
}
continue;
}
for (i = 0; i < dev->queue_count; i++) {
struct nvme_queue *nvmeq = dev->queues[i];
if (!nvmeq)
continue;
spin_lock_irq(&nvmeq->q_lock);
nvme_process_cq(nvmeq);
spin_unlock_irq(&nvmeq->q_lock);
}
}
spin_unlock(&dev_list_lock);
schedule_timeout(round_jiffies_relative(HZ));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment