mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-02 04:38:08 +00:00
blk-mq: use QUEUE_FLAG_QUIESCED to quiesce queue
It is required that no dispatch can happen any more once blk_mq_quiesce_queue() returns, and we don't have such requirement on APIs of stopping queue. But blk_mq_quiesce_queue() still may not block/drain dispatch in the the case of BLK_MQ_S_START_ON_RUN, so use the new introduced flag of QUEUE_FLAG_QUIESCED and evaluate it inside RCU read-side critical sections for fixing this issue. Also blk_mq_quiesce_queue() is implemented via stopping queue, which limits its uses, and easy to cause race, because any queue restart in other paths may break blk_mq_quiesce_queue(). With the introduced flag of QUEUE_FLAG_QUIESCED, we don't need to depend on stopping queue for quiescing any more. Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Bart Van Assche <Bart.VanAssche@sandisk.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f660174e8b
commit
f4560ffe8c
@ -58,7 +58,8 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
|
|||||||
bool did_work = false;
|
bool did_work = false;
|
||||||
LIST_HEAD(rq_list);
|
LIST_HEAD(rq_list);
|
||||||
|
|
||||||
if (unlikely(blk_mq_hctx_stopped(hctx)))
|
/* RCU or SRCU read lock is needed before checking quiesced flag */
|
||||||
|
if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
hctx->run++;
|
hctx->run++;
|
||||||
|
@ -170,6 +170,10 @@ void blk_mq_quiesce_queue(struct request_queue *q)
|
|||||||
|
|
||||||
__blk_mq_stop_hw_queues(q, true);
|
__blk_mq_stop_hw_queues(q, true);
|
||||||
|
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
queue_flag_set(QUEUE_FLAG_QUIESCED, q);
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
||||||
queue_for_each_hw_ctx(q, hctx, i) {
|
queue_for_each_hw_ctx(q, hctx, i) {
|
||||||
if (hctx->flags & BLK_MQ_F_BLOCKING)
|
if (hctx->flags & BLK_MQ_F_BLOCKING)
|
||||||
synchronize_srcu(&hctx->queue_rq_srcu);
|
synchronize_srcu(&hctx->queue_rq_srcu);
|
||||||
@ -190,6 +194,10 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
|
|||||||
*/
|
*/
|
||||||
void blk_mq_unquiesce_queue(struct request_queue *q)
|
void blk_mq_unquiesce_queue(struct request_queue *q)
|
||||||
{
|
{
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
||||||
blk_mq_start_stopped_hw_queues(q, true);
|
blk_mq_start_stopped_hw_queues(q, true);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
|
EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
|
||||||
@ -1444,7 +1452,8 @@ static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
|||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
bool run_queue = true;
|
bool run_queue = true;
|
||||||
|
|
||||||
if (blk_mq_hctx_stopped(hctx)) {
|
/* RCU or SRCU read lock is needed before checking quiesced flag */
|
||||||
|
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
|
||||||
run_queue = false;
|
run_queue = false;
|
||||||
goto insert;
|
goto insert;
|
||||||
}
|
}
|
||||||
|
@ -268,6 +268,10 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
|
|||||||
*/
|
*/
|
||||||
static inline void blk_mq_quiesce_queue_nowait(struct request_queue *q)
|
static inline void blk_mq_quiesce_queue_nowait(struct request_queue *q)
|
||||||
{
|
{
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
queue_flag_set(QUEUE_FLAG_QUIESCED, q);
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
||||||
blk_mq_stop_hw_queues(q);
|
blk_mq_stop_hw_queues(q);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -619,6 +619,7 @@ struct request_queue {
|
|||||||
#define QUEUE_FLAG_POLL_STATS 28 /* collecting stats for hybrid polling */
|
#define QUEUE_FLAG_POLL_STATS 28 /* collecting stats for hybrid polling */
|
||||||
#define QUEUE_FLAG_REGISTERED 29 /* queue has been registered to a disk */
|
#define QUEUE_FLAG_REGISTERED 29 /* queue has been registered to a disk */
|
||||||
#define QUEUE_FLAG_SCSI_PASSTHROUGH 30 /* queue supports SCSI commands */
|
#define QUEUE_FLAG_SCSI_PASSTHROUGH 30 /* queue supports SCSI commands */
|
||||||
|
#define QUEUE_FLAG_QUIESCED 31 /* queue has been quiesced */
|
||||||
|
|
||||||
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
||||||
(1 << QUEUE_FLAG_STACKABLE) | \
|
(1 << QUEUE_FLAG_STACKABLE) | \
|
||||||
@ -715,6 +716,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
|
|||||||
#define blk_noretry_request(rq) \
|
#define blk_noretry_request(rq) \
|
||||||
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
|
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
|
||||||
REQ_FAILFAST_DRIVER))
|
REQ_FAILFAST_DRIVER))
|
||||||
|
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
|
||||||
|
|
||||||
static inline bool blk_account_rq(struct request *rq)
|
static inline bool blk_account_rq(struct request *rq)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user