block: avoid ordered task state change for polled IO
authorJens Axboe <axboe@kernel.dk>
Fri, 16 Nov 2018 15:37:34 +0000 (08:37 -0700)
committerJens Axboe <axboe@kernel.dk>
Mon, 19 Nov 2018 15:34:49 +0000 (08:34 -0700)
For the core poll helper, the task state setting don't need to imply any
atomics, as it's the current task itself that is being modified and
we're not going to sleep.

For IRQ driven, the wakeup path have the necessary barriers to not need
us using the heavy handed version of the task state setting.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c
fs/block_dev.c
fs/iomap.c
mm/page_io.c

index 32b246ed44c0ac38b01bb8aeb161b63f52cc42a9..7fc4abb4cc36e437a4ce684ef9eb1cf3824bf553 100644 (file)
@@ -3331,12 +3331,12 @@ static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
                ret = q->mq_ops->poll(hctx, rq->tag);
                if (ret > 0) {
                        hctx->poll_success++;
-                       set_current_state(TASK_RUNNING);
+                       __set_current_state(TASK_RUNNING);
                        return true;
                }
 
                if (signal_pending_state(state, current))
-                       set_current_state(TASK_RUNNING);
+                       __set_current_state(TASK_RUNNING);
 
                if (current->state == TASK_RUNNING)
                        return true;
index 4d79bc80fb41edacffcfddae8a2e07202f821d2c..64ba27b8b7549e43caf5d7c85ab42fe18107f72a 100644 (file)
@@ -237,9 +237,11 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
 
        qc = submit_bio(&bio);
        for (;;) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
+               __set_current_state(TASK_UNINTERRUPTIBLE);
+
                if (!READ_ONCE(bio.bi_private))
                        break;
+
                if (!(iocb->ki_flags & IOCB_HIPRI) ||
                    !blk_poll(bdev_get_queue(bdev), qc))
                        io_schedule();
@@ -415,7 +417,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
                return -EIOCBQUEUED;
 
        for (;;) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
+               __set_current_state(TASK_UNINTERRUPTIBLE);
+
                if (!READ_ONCE(dio->waiter))
                        break;
 
index b0462b363badff9dc3896cf8c478c6afd1ed4bec..c5df035ace6f3058fd3472e2c302cdeb63a4203e 100644 (file)
@@ -1888,7 +1888,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                        return -EIOCBQUEUED;
 
                for (;;) {
-                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       __set_current_state(TASK_UNINTERRUPTIBLE);
+
                        if (!READ_ONCE(dio->submit.waiter))
                                break;
 
index 57572ff46016dde4945622ead210593d183e3806..a7271fa481f6e30f7217e191ef329b726866a231 100644 (file)
@@ -405,7 +405,8 @@ int swap_readpage(struct page *page, bool synchronous)
        bio_get(bio);
        qc = submit_bio(bio);
        while (synchronous) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
+               __set_current_state(TASK_UNINTERRUPTIBLE);
+
                if (!READ_ONCE(bio->bi_private))
                        break;