nvme-rdma: remove I/O polling support

The code was always a bit of a hack that digs far too much into
RDMA core internals.  Lets kick it out and reimplement proper
dedicated poll queues as needed.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2018-12-02 17:46:24 +01:00 committed by Jens Axboe
parent 3a7afd8ee4
commit f9801a484a

View File

@ -1738,29 +1738,6 @@ err:
return BLK_STS_IOERR; return BLK_STS_IOERR;
} }
static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
{
struct nvme_rdma_queue *queue = hctx->driver_data;
struct ib_cq *cq = queue->ib_cq;
struct ib_wc wc;
int found = 0;
while (ib_poll_cq(cq, 1, &wc) > 0) {
struct ib_cqe *cqe = wc.wr_cqe;
if (cqe) {
if (cqe->done == nvme_rdma_recv_done) {
nvme_rdma_recv_done(cq, &wc);
found++;
} else {
cqe->done(cq, &wc);
}
}
}
return found;
}
static void nvme_rdma_complete_rq(struct request *rq) static void nvme_rdma_complete_rq(struct request *rq)
{ {
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
@ -1782,7 +1759,6 @@ static const struct blk_mq_ops nvme_rdma_mq_ops = {
.init_request = nvme_rdma_init_request, .init_request = nvme_rdma_init_request,
.exit_request = nvme_rdma_exit_request, .exit_request = nvme_rdma_exit_request,
.init_hctx = nvme_rdma_init_hctx, .init_hctx = nvme_rdma_init_hctx,
.poll = nvme_rdma_poll,
.timeout = nvme_rdma_timeout, .timeout = nvme_rdma_timeout,
.map_queues = nvme_rdma_map_queues, .map_queues = nvme_rdma_map_queues,
}; };