block, scsi: Make SCSI quiesce and resume work reliably
authorBart Van Assche <bart.vanassche@wdc.com>
Thu, 9 Nov 2017 18:49:58 +0000 (10:49 -0800)
committerJens Axboe <axboe@kernel.dk>
Sat, 11 Nov 2017 02:53:25 +0000 (19:53 -0700)
The contexts from which a SCSI device can be quiesced or resumed are:
* Writing into /sys/class/scsi_device/*/device/state.
* SCSI parallel (SPI) domain validation.
* The SCSI device power management methods. See also scsi_bus_pm_ops.

It is essential during suspend and resume that neither the filesystem
state nor the filesystem metadata in RAM changes. This is why while
the hibernation image is being written or restored that SCSI devices
are quiesced. The SCSI core quiesces devices through scsi_device_quiesce()
and scsi_device_resume(). In the SDEV_QUIESCE state execution of
non-preempt requests is deferred. This is realized by returning
BLKPREP_DEFER from inside scsi_prep_state_check() for quiesced SCSI
devices. Avoid that a full queue prevents power management requests
to be submitted by deferring allocation of non-preempt requests for
devices in the quiesced state. This patch has been tested by running
the following commands and by verifying that after each resume the
fio job was still running:

for ((i=0; i<10; i++)); do
  (
    cd /sys/block/md0/md &&
    while true; do
      [ "$(<sync_action)" = "idle" ] && echo check > sync_action
      sleep 1
    done
  ) &
  pids=($!)
  for d in /sys/class/block/sd*[a-z]; do
    bdev=${d#/sys/class/block/}
    hcil=$(readlink "$d/device")
    hcil=${hcil#../../../}
    echo 4 > "$d/queue/nr_requests"
    echo 1 > "/sys/class/scsi_device/$hcil/device/queue_depth"
    fio --name="$bdev" --filename="/dev/$bdev" --buffered=0 --bs=512 \
      --rw=randread --ioengine=libaio --numjobs=4 --iodepth=16       \
      --iodepth_batch=1 --thread --loops=$((2**31)) &
    pids+=($!)
  done
  sleep 1
  echo "$(date) Hibernating ..." >>hibernate-test-log.txt
  systemctl hibernate
  sleep 10
  kill "${pids[@]}"
  echo idle > /sys/block/md0/md/sync_action
  wait
  echo "$(date) Done." >>hibernate-test-log.txt
done

Reported-by: Oleksandr Natalenko <oleksandr@natalenko.name>
References: "I/O hangs after resuming from suspend-to-ram" (https://marc.info/?l=linux-block&m=150340235201348).
Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Tested-by: Martin Steigerwald <martin@lichtvoll.de>
Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Cc: Martin K. Petersen <martin.petersen@oracle.com>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c
block/blk-mq.c
drivers/scsi/scsi_lib.c
fs/block_dev.c
include/linux/blkdev.h
include/scsi/scsi_device.h

index edc27689911656cea2a38ec7624575256e79c135..29b08428ae459722c32a9927c8a50e6bd202a7a9 100644 (file)
@@ -374,6 +374,7 @@ void blk_clear_preempt_only(struct request_queue *q)
 
        spin_lock_irqsave(q->queue_lock, flags);
        queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
+       wake_up_all(&q->mq_freeze_wq);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
@@ -795,15 +796,38 @@ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(blk_alloc_queue);
 
-int blk_queue_enter(struct request_queue *q, bool nowait)
+/**
+ * blk_queue_enter() - try to increase q->q_usage_counter
+ * @q: request queue pointer
+ * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
+ */
+int blk_queue_enter(struct request_queue *q, unsigned int flags)
 {
+       const bool preempt = flags & BLK_MQ_REQ_PREEMPT;
+
        while (true) {
+               bool success = false;
                int ret;
 
-               if (percpu_ref_tryget_live(&q->q_usage_counter))
+               rcu_read_lock_sched();
+               if (percpu_ref_tryget_live(&q->q_usage_counter)) {
+                       /*
+                        * The code that sets the PREEMPT_ONLY flag is
+                        * responsible for ensuring that that flag is globally
+                        * visible before the queue is unfrozen.
+                        */
+                       if (preempt || !blk_queue_preempt_only(q)) {
+                               success = true;
+                       } else {
+                               percpu_ref_put(&q->q_usage_counter);
+                       }
+               }
+               rcu_read_unlock_sched();
+
+               if (success)
                        return 0;
 
-               if (nowait)
+               if (flags & BLK_MQ_REQ_NOWAIT)
                        return -EBUSY;
 
                /*
@@ -816,7 +840,8 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
                smp_rmb();
 
                ret = wait_event_interruptible(q->mq_freeze_wq,
-                               !atomic_read(&q->mq_freeze_depth) ||
+                               (atomic_read(&q->mq_freeze_depth) == 0 &&
+                                (preempt || !blk_queue_preempt_only(q))) ||
                                blk_queue_dying(q));
                if (blk_queue_dying(q))
                        return -ENODEV;
@@ -1445,8 +1470,7 @@ static struct request *blk_old_get_request(struct request_queue *q,
        /* create ioc upfront */
        create_io_context(gfp_mask, q->node);
 
-       ret = blk_queue_enter(q, !(gfp_mask & __GFP_DIRECT_RECLAIM) ||
-                             (op & REQ_NOWAIT));
+       ret = blk_queue_enter(q, flags);
        if (ret)
                return ERR_PTR(ret);
        spin_lock_irq(q->queue_lock);
@@ -2267,8 +2291,10 @@ blk_qc_t generic_make_request(struct bio *bio)
        current->bio_list = bio_list_on_stack;
        do {
                struct request_queue *q = bio->bi_disk->queue;
+               unsigned int flags = bio->bi_opf & REQ_NOWAIT ?
+                       BLK_MQ_REQ_NOWAIT : 0;
 
-               if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) {
+               if (likely(blk_queue_enter(q, flags) == 0)) {
                        struct bio_list lower, same;
 
                        /* Create a fresh bio_list for all subordinate requests */
@@ -2327,7 +2353,7 @@ blk_qc_t direct_make_request(struct bio *bio)
        if (!generic_make_request_checks(bio))
                return BLK_QC_T_NONE;
 
-       if (unlikely(blk_queue_enter(q, nowait))) {
+       if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) {
                if (nowait && !blk_queue_dying(q))
                        bio->bi_status = BLK_STS_AGAIN;
                else
index e21876778cecd0a3d647f912bd19ccf22aea2b67..211bc8a3e2cc48f37993fdedeee5ae9dd694f522 100644 (file)
@@ -389,7 +389,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
        struct request *rq;
        int ret;
 
-       ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
+       ret = blk_queue_enter(q, flags);
        if (ret)
                return ERR_PTR(ret);
 
@@ -428,7 +428,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
        if (hctx_idx >= q->nr_hw_queues)
                return ERR_PTR(-EIO);
 
-       ret = blk_queue_enter(q, true);
+       ret = blk_queue_enter(q, flags);
        if (ret)
                return ERR_PTR(ret);
 
index eb129dfc2ebef6c5929b318364e966170321c87f..f907e2f8c1ddb83fdff588cec9fc0daedd5dbc72 100644 (file)
@@ -2947,21 +2947,37 @@ static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
 int
 scsi_device_quiesce(struct scsi_device *sdev)
 {
+       struct request_queue *q = sdev->request_queue;
        int err;
 
+       /*
+        * It is allowed to call scsi_device_quiesce() multiple times from
+        * the same context but concurrent scsi_device_quiesce() calls are
+        * not allowed.
+        */
+       WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
+
+       blk_set_preempt_only(q);
+
+       blk_mq_freeze_queue(q);
+       /*
+        * Ensure that the effect of blk_set_preempt_only() will be visible
+        * for percpu_ref_tryget() callers that occur after the queue
+        * unfreeze even if the queue was already frozen before this function
+        * was called. See also https://lwn.net/Articles/573497/.
+        */
+       synchronize_rcu();
+       blk_mq_unfreeze_queue(q);
+
        mutex_lock(&sdev->state_mutex);
        err = scsi_device_set_state(sdev, SDEV_QUIESCE);
+       if (err == 0)
+               sdev->quiesced_by = current;
+       else
+               blk_clear_preempt_only(q);
        mutex_unlock(&sdev->state_mutex);
 
-       if (err)
-               return err;
-
-       scsi_run_queue(sdev->request_queue);
-       while (atomic_read(&sdev->device_busy)) {
-               msleep_interruptible(200);
-               scsi_run_queue(sdev->request_queue);
-       }
-       return 0;
+       return err;
 }
 EXPORT_SYMBOL(scsi_device_quiesce);
 
@@ -2981,9 +2997,11 @@ void scsi_device_resume(struct scsi_device *sdev)
         * device deleted during suspend)
         */
        mutex_lock(&sdev->state_mutex);
-       if (sdev->sdev_state == SDEV_QUIESCE &&
-           scsi_device_set_state(sdev, SDEV_RUNNING) == 0)
-               scsi_run_queue(sdev->request_queue);
+       WARN_ON_ONCE(!sdev->quiesced_by);
+       sdev->quiesced_by = NULL;
+       blk_clear_preempt_only(sdev->request_queue);
+       if (sdev->sdev_state == SDEV_QUIESCE)
+               scsi_device_set_state(sdev, SDEV_RUNNING);
        mutex_unlock(&sdev->state_mutex);
 }
 EXPORT_SYMBOL(scsi_device_resume);
index 4afa4d5ff969e962bd16cc454904af0de20d5467..04973f48442243a441364b2f4cf75b1c8d50257c 100644 (file)
@@ -662,7 +662,7 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
        if (!ops->rw_page || bdev_get_integrity(bdev))
                return result;
 
-       result = blk_queue_enter(bdev->bd_queue, false);
+       result = blk_queue_enter(bdev->bd_queue, 0);
        if (result)
                return result;
        result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, false);
@@ -698,7 +698,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
 
        if (!ops->rw_page || bdev_get_integrity(bdev))
                return -EOPNOTSUPP;
-       result = blk_queue_enter(bdev->bd_queue, false);
+       result = blk_queue_enter(bdev->bd_queue, 0);
        if (result)
                return result;
 
index 2147e2381a22cc95ba90fa3bb1cc704331223e87..402c9d536ae10e42d700d456f5ea39dc07b85b53 100644 (file)
@@ -959,7 +959,7 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
                         struct scsi_ioctl_command __user *);
 
-extern int blk_queue_enter(struct request_queue *q, bool nowait);
+extern int blk_queue_enter(struct request_queue *q, unsigned int flags);
 extern void blk_queue_exit(struct request_queue *q);
 extern void blk_start_queue(struct request_queue *q);
 extern void blk_start_queue_async(struct request_queue *q);
index 82e93ee94708c9f7de073b3e6c47826664525075..6f0f1e242e236da26c716887eb7d64519c721fc7 100644 (file)
@@ -219,6 +219,7 @@ struct scsi_device {
        unsigned char           access_state;
        struct mutex            state_mutex;
        enum scsi_device_state sdev_state;
+       struct task_struct      *quiesced_by;
        unsigned long           sdev_data[0];
 } __attribute__((aligned(sizeof(unsigned long))));