Commit f9801a48 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

nvme-rdma: remove I/O polling support

The code was always a bit of a hack that digs far too much into
RDMA core internals.  Lets kick it out and reimplement proper
dedicated poll queues as needed.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3a7afd8e
...@@ -1738,29 +1738,6 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -1738,29 +1738,6 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_IOERR; return BLK_STS_IOERR;
} }
static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
{
struct nvme_rdma_queue *queue = hctx->driver_data;
struct ib_cq *cq = queue->ib_cq;
struct ib_wc wc;
int found = 0;
while (ib_poll_cq(cq, 1, &wc) > 0) {
struct ib_cqe *cqe = wc.wr_cqe;
if (cqe) {
if (cqe->done == nvme_rdma_recv_done) {
nvme_rdma_recv_done(cq, &wc);
found++;
} else {
cqe->done(cq, &wc);
}
}
}
return found;
}
static void nvme_rdma_complete_rq(struct request *rq) static void nvme_rdma_complete_rq(struct request *rq)
{ {
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
...@@ -1782,7 +1759,6 @@ static const struct blk_mq_ops nvme_rdma_mq_ops = { ...@@ -1782,7 +1759,6 @@ static const struct blk_mq_ops nvme_rdma_mq_ops = {
.init_request = nvme_rdma_init_request, .init_request = nvme_rdma_init_request,
.exit_request = nvme_rdma_exit_request, .exit_request = nvme_rdma_exit_request,
.init_hctx = nvme_rdma_init_hctx, .init_hctx = nvme_rdma_init_hctx,
.poll = nvme_rdma_poll,
.timeout = nvme_rdma_timeout, .timeout = nvme_rdma_timeout,
.map_queues = nvme_rdma_map_queues, .map_queues = nvme_rdma_map_queues,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment