mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-18 07:27:20 +00:00
blk-mq: split make request handler for multi and single queue
We want slightly different behavior from them: - On single queue devices, we currently use the per-process plug for deferred IO and for merging. - On multi queue devices, we don't use the per-process plug, but we want to go straight to hardware for SYNC IO. Split blk_mq_make_request() into a blk_sq_make_request() for single queue devices, and retain blk_mq_make_request() for multi queue devices. Then we don't need multiple checks for q->nr_hw_queues in the request mapping. Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
484b4061e6
commit
07068d5b8e
209
block/blk-mq.c
209
block/blk-mq.c
@ -1072,43 +1072,57 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
|
|||||||
blk_account_io_start(rq, 1);
|
blk_account_io_start(rq, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
|
||||||
|
struct blk_mq_ctx *ctx,
|
||||||
|
struct request *rq, struct bio *bio)
|
||||||
|
{
|
||||||
|
struct request_queue *q = hctx->queue;
|
||||||
|
|
||||||
|
if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
|
||||||
|
blk_mq_bio_to_request(rq, bio);
|
||||||
|
spin_lock(&ctx->lock);
|
||||||
|
insert_rq:
|
||||||
|
__blk_mq_insert_request(hctx, rq, false);
|
||||||
|
spin_unlock(&ctx->lock);
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
spin_lock(&ctx->lock);
|
||||||
|
if (!blk_mq_attempt_merge(q, ctx, bio)) {
|
||||||
|
blk_mq_bio_to_request(rq, bio);
|
||||||
|
goto insert_rq;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock(&ctx->lock);
|
||||||
|
__blk_mq_free_request(hctx, ctx, rq);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct blk_map_ctx {
|
||||||
|
struct blk_mq_hw_ctx *hctx;
|
||||||
|
struct blk_mq_ctx *ctx;
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct request *blk_mq_map_request(struct request_queue *q,
|
||||||
|
struct bio *bio,
|
||||||
|
struct blk_map_ctx *data)
|
||||||
{
|
{
|
||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
struct blk_mq_ctx *ctx;
|
struct blk_mq_ctx *ctx;
|
||||||
const int is_sync = rw_is_sync(bio->bi_rw);
|
|
||||||
const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
|
|
||||||
int rw = bio_data_dir(bio);
|
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
unsigned int use_plug, request_count = 0;
|
int rw = bio_data_dir(bio);
|
||||||
|
|
||||||
/*
|
if (unlikely(blk_mq_queue_enter(q))) {
|
||||||
* If we have multiple hardware queues, just go directly to
|
|
||||||
* one of those for sync IO.
|
|
||||||
*/
|
|
||||||
use_plug = !is_flush_fua && ((q->nr_hw_queues == 1) || !is_sync);
|
|
||||||
|
|
||||||
blk_queue_bounce(q, &bio);
|
|
||||||
|
|
||||||
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
|
|
||||||
bio_endio(bio, -EIO);
|
bio_endio(bio, -EIO);
|
||||||
return;
|
return NULL;
|
||||||
}
|
|
||||||
|
|
||||||
if (use_plug && !blk_queue_nomerges(q) &&
|
|
||||||
blk_attempt_plug_merge(q, bio, &request_count))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (blk_mq_queue_enter(q)) {
|
|
||||||
bio_endio(bio, -EIO);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = blk_mq_get_ctx(q);
|
ctx = blk_mq_get_ctx(q);
|
||||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
||||||
|
|
||||||
if (is_sync)
|
if (rw_is_sync(bio->bi_rw))
|
||||||
rw |= REQ_SYNC;
|
rw |= REQ_SYNC;
|
||||||
|
|
||||||
trace_block_getrq(q, bio, rw);
|
trace_block_getrq(q, bio, rw);
|
||||||
rq = __blk_mq_alloc_request(hctx, ctx, GFP_ATOMIC, false);
|
rq = __blk_mq_alloc_request(hctx, ctx, GFP_ATOMIC, false);
|
||||||
if (likely(rq))
|
if (likely(rq))
|
||||||
@ -1123,6 +1137,109 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
}
|
}
|
||||||
|
|
||||||
hctx->queued++;
|
hctx->queued++;
|
||||||
|
data->hctx = hctx;
|
||||||
|
data->ctx = ctx;
|
||||||
|
return rq;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Multiple hardware queue variant. This will not use per-process plugs,
|
||||||
|
* but will attempt to bypass the hctx queueing if we can go straight to
|
||||||
|
* hardware for SYNC IO.
|
||||||
|
*/
|
||||||
|
static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
|
{
|
||||||
|
const int is_sync = rw_is_sync(bio->bi_rw);
|
||||||
|
const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
|
||||||
|
struct blk_map_ctx data;
|
||||||
|
struct request *rq;
|
||||||
|
|
||||||
|
blk_queue_bounce(q, &bio);
|
||||||
|
|
||||||
|
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
|
||||||
|
bio_endio(bio, -EIO);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
rq = blk_mq_map_request(q, bio, &data);
|
||||||
|
if (unlikely(!rq))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (unlikely(is_flush_fua)) {
|
||||||
|
blk_mq_bio_to_request(rq, bio);
|
||||||
|
blk_insert_flush(rq);
|
||||||
|
goto run_queue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_sync) {
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
blk_mq_bio_to_request(rq, bio);
|
||||||
|
blk_mq_start_request(rq, true);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For OK queue, we are done. For error, kill it. Any other
|
||||||
|
* error (busy), just add it to our list as we previously
|
||||||
|
* would have done
|
||||||
|
*/
|
||||||
|
ret = q->mq_ops->queue_rq(data.hctx, rq);
|
||||||
|
if (ret == BLK_MQ_RQ_QUEUE_OK)
|
||||||
|
goto done;
|
||||||
|
else {
|
||||||
|
__blk_mq_requeue_request(rq);
|
||||||
|
|
||||||
|
if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
|
||||||
|
rq->errors = -EIO;
|
||||||
|
blk_mq_end_io(rq, rq->errors);
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
|
||||||
|
/*
|
||||||
|
* For a SYNC request, send it to the hardware immediately. For
|
||||||
|
* an ASYNC request, just ensure that we run it later on. The
|
||||||
|
* latter allows for merging opportunities and more efficient
|
||||||
|
* dispatching.
|
||||||
|
*/
|
||||||
|
run_queue:
|
||||||
|
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
|
||||||
|
}
|
||||||
|
done:
|
||||||
|
blk_mq_put_ctx(data.ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Single hardware queue variant. This will attempt to use any per-process
|
||||||
|
* plug for merging and IO deferral.
|
||||||
|
*/
|
||||||
|
static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
|
{
|
||||||
|
const int is_sync = rw_is_sync(bio->bi_rw);
|
||||||
|
const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
|
||||||
|
unsigned int use_plug, request_count = 0;
|
||||||
|
struct blk_map_ctx data;
|
||||||
|
struct request *rq;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we have multiple hardware queues, just go directly to
|
||||||
|
* one of those for sync IO.
|
||||||
|
*/
|
||||||
|
use_plug = !is_flush_fua && !is_sync;
|
||||||
|
|
||||||
|
blk_queue_bounce(q, &bio);
|
||||||
|
|
||||||
|
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
|
||||||
|
bio_endio(bio, -EIO);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (use_plug && !blk_queue_nomerges(q) &&
|
||||||
|
blk_attempt_plug_merge(q, bio, &request_count))
|
||||||
|
return;
|
||||||
|
|
||||||
|
rq = blk_mq_map_request(q, bio, &data);
|
||||||
|
|
||||||
if (unlikely(is_flush_fua)) {
|
if (unlikely(is_flush_fua)) {
|
||||||
blk_mq_bio_to_request(rq, bio);
|
blk_mq_bio_to_request(rq, bio);
|
||||||
@ -1147,37 +1264,23 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
trace_block_plug(q);
|
trace_block_plug(q);
|
||||||
}
|
}
|
||||||
list_add_tail(&rq->queuelist, &plug->mq_list);
|
list_add_tail(&rq->queuelist, &plug->mq_list);
|
||||||
blk_mq_put_ctx(ctx);
|
blk_mq_put_ctx(data.ctx);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
|
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
|
||||||
blk_mq_bio_to_request(rq, bio);
|
/*
|
||||||
spin_lock(&ctx->lock);
|
* For a SYNC request, send it to the hardware immediately. For
|
||||||
insert_rq:
|
* an ASYNC request, just ensure that we run it later on. The
|
||||||
__blk_mq_insert_request(hctx, rq, false);
|
* latter allows for merging opportunities and more efficient
|
||||||
spin_unlock(&ctx->lock);
|
* dispatching.
|
||||||
} else {
|
*/
|
||||||
spin_lock(&ctx->lock);
|
run_queue:
|
||||||
if (!blk_mq_attempt_merge(q, ctx, bio)) {
|
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
|
||||||
blk_mq_bio_to_request(rq, bio);
|
|
||||||
goto insert_rq;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock(&ctx->lock);
|
|
||||||
__blk_mq_free_request(hctx, ctx, rq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
blk_mq_put_ctx(data.ctx);
|
||||||
/*
|
|
||||||
* For a SYNC request, send it to the hardware immediately. For an
|
|
||||||
* ASYNC request, just ensure that we run it later on. The latter
|
|
||||||
* allows for merging opportunities and more efficient dispatching.
|
|
||||||
*/
|
|
||||||
run_queue:
|
|
||||||
blk_mq_run_hw_queue(hctx, !is_sync || is_flush_fua);
|
|
||||||
blk_mq_put_ctx(ctx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1670,7 +1773,11 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|||||||
|
|
||||||
q->sg_reserved_size = INT_MAX;
|
q->sg_reserved_size = INT_MAX;
|
||||||
|
|
||||||
blk_queue_make_request(q, blk_mq_make_request);
|
if (q->nr_hw_queues > 1)
|
||||||
|
blk_queue_make_request(q, blk_mq_make_request);
|
||||||
|
else
|
||||||
|
blk_queue_make_request(q, blk_sq_make_request);
|
||||||
|
|
||||||
blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
|
blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
|
||||||
if (set->timeout)
|
if (set->timeout)
|
||||||
blk_queue_rq_timeout(q, set->timeout);
|
blk_queue_rq_timeout(q, set->timeout);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user