From: Jens Axboe Date: Sun, 18 Nov 2018 22:46:03 +0000 (-0700) Subject: Merge tag 'v4.20-rc3' into for-4.21/block X-Git-Url: http://git.cdn.openwrt.org/?a=commitdiff_plain;h=a78b03bc7300e4f17b1e510884bea1095d92b17b;p=openwrt%2Fstaging%2Fblogic.git Merge tag 'v4.20-rc3' into for-4.21/block Merge in -rc3 to resolve a few conflicts, but also to get a few important fixes that have gone into mainline since the block 4.21 branch was forked off (most notably the SCSI queue issue, which is both a conflict AND needed fix). Signed-off-by: Jens Axboe --- a78b03bc7300e4f17b1e510884bea1095d92b17b diff --cc block/blk-core.c index 0b684a520a11,deb56932f8c4..d6e8ab9ca99d --- a/block/blk-core.c +++ b/block/blk-core.c @@@ -352,11 -798,10 +352,10 @@@ void blk_cleanup_queue(struct request_q * dispatch may still be in-progress since we dispatch requests * from more than one contexts. * - * No need to quiesce queue if it isn't initialized yet since - * blk_freeze_queue() should be enough for cases of passthrough - * request. + * We rely on driver to deal with the race in case that queue + * initialization isn't done. */ - if (q->mq_ops && blk_queue_init_done(q)) + if (queue_is_mq(q) && blk_queue_init_done(q)) blk_mq_quiesce_queue(q); /* for synchronous bio-based driver finish in-flight integrity i/o */ diff --cc block/blk.h index 027a0ccc175e,0089fefdf771..816a9abb87cd --- a/block/blk.h +++ b/block/blk.h @@@ -233,6 -380,31 +233,16 @@@ static inline void req_set_nomerge(stru q->last_merge = NULL; } -/* - * Steal a bit from this field for legacy IO path atomic IO marking. Note that - * setting the deadline clears the bottom bit, potentially clearing the - * completed bit. The user has to be OK with this (current ones are fine). - */ -static inline void blk_rq_set_deadline(struct request *rq, unsigned long time) -{ - rq->__deadline = time & ~0x1UL; -} - -static inline unsigned long blk_rq_deadline(struct request *rq) -{ - return rq->__deadline & ~0x1UL; -} - + /* + * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size + * is defined as 'unsigned int', meantime it has to aligned to with logical + * block size which is the minimum accepted unit by hardware. + */ + static inline unsigned int bio_allowed_max_sectors(struct request_queue *q) + { + return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9; + } + /* * Internal io_context interface */ diff --cc drivers/scsi/scsi_lib.c index 5d83a162d03b,fa6e0c3b3aa6..0df15cb738d2 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@@ -601,23 -687,46 +601,30 @@@ static bool scsi_end_request(struct req destroy_rcu_head(&cmd->rcu); } - if (req->mq_ctx) { - /* - * In the MQ case the command gets freed by __blk_mq_end_request, - * so we have to do all cleanup that depends on it earlier. - * - * We also can't kick the queues from irq context, so we - * will have to defer it to a workqueue. - */ - scsi_mq_uninit_cmd(cmd); - - /* - * queue is still alive, so grab the ref for preventing it - * from being cleaned up during running queue. - */ - percpu_ref_get(&q->q_usage_counter); - - __blk_mq_end_request(req, error); - - if (scsi_target(sdev)->single_lun || - !list_empty(&sdev->host->starved_list)) - kblockd_schedule_work(&sdev->requeue_work); - else - blk_mq_run_hw_queues(q, true); - - percpu_ref_put(&q->q_usage_counter); - } else { - unsigned long flags; + /* + * In the MQ case the command gets freed by __blk_mq_end_request, + * so we have to do all cleanup that depends on it earlier. + * + * We also can't kick the queues from irq context, so we + * will have to defer it to a workqueue. + */ + scsi_mq_uninit_cmd(cmd); - if (bidi_bytes) - scsi_release_bidi_buffers(cmd); - scsi_release_buffers(cmd); - scsi_put_command(cmd); ++ /* ++ * queue is still alive, so grab the ref for preventing it ++ * from being cleaned up during running queue. ++ */ ++ percpu_ref_get(&q->q_usage_counter); + - spin_lock_irqsave(q->queue_lock, flags); - blk_finish_request(req, error); - spin_unlock_irqrestore(q->queue_lock, flags); + __blk_mq_end_request(req, error); - scsi_run_queue(q); - } + if (scsi_target(sdev)->single_lun || + !list_empty(&sdev->host->starved_list)) + kblockd_schedule_work(&sdev->requeue_work); + else + blk_mq_run_hw_queues(q, true); ++ percpu_ref_put(&q->q_usage_counter); put_device(&sdev->sdev_gendev); return false; }