Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -434,6 +434,7 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
mutex_init(&q->limits_lock);
mutex_init(&q->rq_qos_mutex);
spin_lock_init(&q->queue_lock);
atomic_set(&q->quiesce_depth, 0);

init_waitqueue_head(&q->mq_freeze_wq);
mutex_init(&q->mq_freeze_lock);
Expand Down
1 change: 0 additions & 1 deletion block/blk-mq-debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,6 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(INIT_DONE),
QUEUE_FLAG_NAME(STATS),
QUEUE_FLAG_NAME(REGISTERED),
QUEUE_FLAG_NAME(QUIESCED),
QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
QUEUE_FLAG_NAME(HCTX_ACTIVE),
QUEUE_FLAG_NAME(SQ_SCHED),
Expand Down
45 changes: 17 additions & 28 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -260,12 +260,12 @@ EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue_non_owner);
*/
void blk_mq_quiesce_queue_nowait(struct request_queue *q)
{
unsigned long flags;

spin_lock_irqsave(&q->queue_lock, flags);
if (!q->quiesce_depth++)
blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
spin_unlock_irqrestore(&q->queue_lock, flags);
atomic_inc(&q->quiesce_depth);
/*
* Ensure the store to quiesce_depth is visible before any
* subsequent loads in blk_mq_run_hw_queue().
*/
smp_mb__after_atomic();
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);

Expand Down Expand Up @@ -314,21 +314,18 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
*/
void blk_mq_unquiesce_queue(struct request_queue *q)
{
unsigned long flags;
bool run_queue = false;
int depth;

spin_lock_irqsave(&q->queue_lock, flags);
if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
;
} else if (!--q->quiesce_depth) {
blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
run_queue = true;
}
spin_unlock_irqrestore(&q->queue_lock, flags);
depth = atomic_dec_if_positive(&q->quiesce_depth);
if (WARN_ON_ONCE(depth < 0))
return;

/* dispatch requests which are inserted during quiescing */
if (run_queue)
if (depth == 0) {
/* Ensure the decrement is visible before running queues */
smp_mb__after_atomic();
/* dispatch requests which are inserted during quiescing */
blk_mq_run_hw_queues(q, true);
}
}
EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);

Expand Down Expand Up @@ -2362,17 +2359,9 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)

need_run = blk_mq_hw_queue_need_run(hctx);
if (!need_run) {
unsigned long flags;

/*
* Synchronize with blk_mq_unquiesce_queue(), because we check
* if hw queue is quiesced locklessly above, we need the use
* ->queue_lock to make sure we see the up-to-date status to
* not miss rerunning the hw queue.
*/
spin_lock_irqsave(&hctx->queue->queue_lock, flags);
/* Pairs with smp_mb__after_atomic() in blk_mq_unquiesce_queue() */
smp_rmb();
need_run = blk_mq_hw_queue_need_run(hctx);
spin_unlock_irqrestore(&hctx->queue->queue_lock, flags);

if (!need_run)
return;
Expand Down
9 changes: 6 additions & 3 deletions include/linux/blkdev.h
Original file line number Diff line number Diff line change
Expand Up @@ -515,7 +515,8 @@ struct request_queue {

spinlock_t queue_lock;

int quiesce_depth;
/* Atomic quiesce depth - also serves as quiesced indicator (depth > 0) */
atomic_t quiesce_depth;

struct gendisk *disk;

Expand Down Expand Up @@ -660,7 +661,6 @@ enum {
QUEUE_FLAG_INIT_DONE, /* queue is initialized */
QUEUE_FLAG_STATS, /* track IO start and completion times */
QUEUE_FLAG_REGISTERED, /* queue has been registered to a disk */
QUEUE_FLAG_QUIESCED, /* queue has been quiesced */
QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */
QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */
QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */
Expand Down Expand Up @@ -697,7 +697,10 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
static inline bool blk_queue_quiesced(struct request_queue *q)
{
return atomic_read(&q->quiesce_depth) > 0;
}
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
Expand Down
Loading