bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
- if (blk_mq_hctx_has_pending(hctx)) {
+ int srcu_idx;
+ bool need_run;
+
+ /*
+ * When queue is quiesced, we may be switching io scheduler, or
+ * updating nr_hw_queues, or other things, and we can't run queue
+ * any more, even __blk_mq_hctx_has_pending() can't be called safely.
+ *
+ * And queue will be rerun in blk_mq_unquiesce_queue() if it is
+ * quiesced.
+ */
+ if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
+ rcu_read_lock();
+ need_run = !blk_queue_quiesced(hctx->queue) &&
+ blk_mq_hctx_has_pending(hctx);
+ rcu_read_unlock();
+ } else {
+ srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
+ need_run = !blk_queue_quiesced(hctx->queue) &&
+ blk_mq_hctx_has_pending(hctx);
+ srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
+ }
+
+ if (need_run) {
__blk_mq_delay_run_hw_queue(hctx, async, 0);
return true;
}
return -EINVAL;
blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
ret = 0;
queue_for_each_hw_ctx(q, hctx, i) {
if (!ret)
q->nr_requests = nr;
+ blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
return ret;
int ret;
blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
if (q->elevator) {
if (q->elevator->registered)
blk_add_trace_msg(q, "elv switch: none");
out:
+ blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
return ret;
}