Merge tag 'for-4.21/block-20181221' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 28 Dec 2018 21:19:59 +0000 (13:19 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 28 Dec 2018 21:19:59 +0000 (13:19 -0800)
Pull block updates from Jens Axboe:
 "This is the main pull request for block/storage for 4.21.

  Larger than usual, it was a busy round with lots of goodies queued up.
  Most notable is the removal of the old IO stack, which has been a long
  time coming. No new features for a while, everything coming in this
  week has all been fixes for things that were previously merged.

  This contains:

   - Use atomic counters instead of semaphores for mtip32xx (Arnd)

   - Cleanup of the mtip32xx request setup (Christoph)

   - Fix for circular locking dependency in loop (Jan, Tetsuo)

   - bcache (Coly, Guoju, Shenghui)
      * Optimizations for writeback caching
      * Various fixes and improvements

   - nvme (Chaitanya, Christoph, Sagi, Jay, me, Keith)
      * host and target support for NVMe over TCP
      * Error log page support
      * Support for separate read/write/poll queues
      * Much improved polling
      * discard OOM fallback
      * Tracepoint improvements

   - lightnvm (Hans, Hua, Igor, Matias, Javier)
      * Igor added packed metadata to pblk. Now drives without metadata
        per LBA can be used as well.
      * Fix from Geert on uninitialized value on chunk metadata reads.
      * Fixes from Hans and Javier to pblk recovery and write path.
      * Fix from Hua Su to fix a race condition in the pblk recovery
        code.
      * Scan optimization added to pblk recovery from Zhoujie.
      * Small geometry cleanup from me.

   - Conversion of the last few drivers that used the legacy path to
     blk-mq (me)

   - Removal of legacy IO path in SCSI (me, Christoph)

   - Removal of legacy IO stack and schedulers (me)

   - Support for much better polling, now without interrupts at all.
     blk-mq adds support for multiple queue maps, which enables us to
     have a map per type. This in turn enables nvme to have separate
     completion queues for polling, which can then be interrupt-less.
     Also means we're ready for async polled IO, which is hopefully
     coming in the next release.

   - Killing of (now) unused block exports (Christoph)

   - Unification of the blk-rq-qos and blk-wbt wait handling (Josef)

   - Support for zoned testing with null_blk (Masato)

   - sx8 conversion to per-host tag sets (Christoph)

   - IO priority improvements (Damien)

   - mq-deadline zoned fix (Damien)

   - Ref count blkcg series (Dennis)

   - Lots of blk-mq improvements and speedups (me)

   - sbitmap scalability improvements (me)

   - Make core inflight IO accounting per-cpu (Mikulas)

   - Export timeout setting in sysfs (Weiping)

   - Cleanup the direct issue path (Jianchao)

   - Export blk-wbt internals in block debugfs for easier debugging
     (Ming)

   - Lots of other fixes and improvements"

* tag 'for-4.21/block-20181221' of git://git.kernel.dk/linux-block: (364 commits)
  kyber: use sbitmap add_wait_queue/list_del wait helpers
  sbitmap: add helpers for add/del wait queue handling
  block: save irq state in blkg_lookup_create()
  dm: don't reuse bio for flushes
  nvme-pci: trace SQ status on completions
  nvme-rdma: implement polling queue map
  nvme-fabrics: allow user to pass in nr_poll_queues
  nvme-fabrics: allow nvmf_connect_io_queue to poll
  nvme-core: optionally poll sync commands
  block: make request_to_qc_t public
  nvme-tcp: fix spelling mistake "attepmpt" -> "attempt"
  nvme-tcp: fix endianess annotations
  nvmet-tcp: fix endianess annotations
  nvme-pci: refactor nvme_poll_irqdisable to make sparse happy
  nvme-pci: only set nr_maps to 2 if poll queues are supported
  nvmet: use a macro for default error location
  nvmet: fix comparison of a u16 with -1
  blk-mq: enable IO poll if .nr_queues of type poll > 0
  blk-mq: change blk_mq_queue_busy() to blk_mq_queue_inflight()
  blk-mq: skip zero-queue maps in blk_mq_map_swqueue
  ...

14 files changed:
1  2 
block/bio.c
block/blk-zoned.c
drivers/md/dm.c
drivers/mmc/core/block.c
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/sd.c
fs/aio.c
fs/iomap.c
include/linux/fs.h
include/linux/skbuff.h
init/main.c
kernel/cgroup/cgroup.c
net/core/datagram.c

diff --cc block/bio.c
Simple merge
Simple merge
diff --cc drivers/md/dm.c
Simple merge
Simple merge
Simple merge
index bd0a5c694a97eb0c32fce4f556b12f426fda8286,4a6ed2fc8c71c54f39d38d7f83cf3cbe6d996ce5..a1a44f52e0e8414202a46437a64b937604318e9b
@@@ -760,10 -759,9 +760,10 @@@ static blk_status_t sd_setup_unmap_cmnd
        unsigned int data_len = 24;
        char *buf;
  
 -      rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
 +      rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
        if (!rq->special_vec.bv_page)
-               return BLKPREP_DEFER;
+               return BLK_STS_RESOURCE;
 +      clear_highpage(rq->special_vec.bv_page);
        rq->special_vec.bv_offset = 0;
        rq->special_vec.bv_len = data_len;
        rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
@@@ -794,10 -793,9 +795,10 @@@ static blk_status_t sd_setup_write_same
        u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
        u32 data_len = sdp->sector_size;
  
 -      rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
 +      rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
        if (!rq->special_vec.bv_page)
-               return BLKPREP_DEFER;
+               return BLK_STS_RESOURCE;
 +      clear_highpage(rq->special_vec.bv_page);
        rq->special_vec.bv_offset = 0;
        rq->special_vec.bv_len = data_len;
        rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
@@@ -825,10 -824,9 +827,10 @@@ static blk_status_t sd_setup_write_same
        u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
        u32 data_len = sdp->sector_size;
  
 -      rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
 +      rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
        if (!rq->special_vec.bv_page)
-               return BLKPREP_DEFER;
+               return BLK_STS_RESOURCE;
 +      clear_highpage(rq->special_vec.bv_page);
        rq->special_vec.bv_offset = 0;
        rq->special_vec.bv_len = data_len;
        rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
diff --cc fs/aio.c
Simple merge
diff --cc fs/iomap.c
Simple merge
Simple merge
Simple merge
diff --cc init/main.c
Simple merge
Simple merge
index 4bf62b1afa3bc228067add76954b5652d8b21586,ef262282c8beb30223942de1547f4d393dfe9ad0..b2651bb6d2a31dde065000c59bbbf3dfdadd976e
@@@ -649,85 -693,53 +693,10 @@@ static int skb_copy_and_csum_datagram(c
                                      struct iov_iter *to, int len,
                                      __wsum *csump)
  {
-       int start = skb_headlen(skb);
-       int i, copy = start - offset, start_off = offset;
-       struct sk_buff *frag_iter;
-       int pos = 0;
-       int n;
-       /* Copy header. */
-       if (copy > 0) {
-               if (copy > len)
-                       copy = len;
-               n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
-               offset += n;
-               if (n != copy)
-                       goto fault;
-               if ((len -= copy) == 0)
-                       return 0;
-               pos = copy;
-       }
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               int end;
-               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-               WARN_ON(start > offset + len);
-               end = start + skb_frag_size(frag);
-               if ((copy = end - offset) > 0) {
-                       __wsum csum2 = 0;
-                       struct page *page = skb_frag_page(frag);
-                       u8  *vaddr = kmap(page);
-                       if (copy > len)
-                               copy = len;
-                       n = csum_and_copy_to_iter(vaddr + frag->page_offset +
-                                                 offset - start, copy,
-                                                 &csum2, to);
-                       kunmap(page);
-                       offset += n;
-                       if (n != copy)
-                               goto fault;
-                       *csump = csum_block_add(*csump, csum2, pos);
-                       if (!(len -= copy))
-                               return 0;
-                       pos += copy;
-               }
-               start = end;
-       }
-       skb_walk_frags(skb, frag_iter) {
-               int end;
-               WARN_ON(start > offset + len);
-               end = start + frag_iter->len;
-               if ((copy = end - offset) > 0) {
-                       __wsum csum2 = 0;
-                       if (copy > len)
-                               copy = len;
-                       if (skb_copy_and_csum_datagram(frag_iter,
-                                                      offset - start,
-                                                      to, copy,
-                                                      &csum2))
-                               goto fault;
-                       *csump = csum_block_add(*csump, csum2, pos);
-                       if ((len -= copy) == 0)
-                               return 0;
-                       offset += copy;
-                       pos += copy;
-               }
-               start = end;
-       }
-       if (!len)
-               return 0;
- fault:
-       iov_iter_revert(to, offset - start_off);
-       return -EFAULT;
+       return __skb_datagram_iter(skb, offset, to, len, true,
+                       csum_and_copy_to_iter, csump);
  }
  
 -__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
 -{
 -      __sum16 sum;
 -
 -      sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
 -      if (likely(!sum)) {
 -              if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
 -                  !skb->csum_complete_sw)
 -                      netdev_rx_csum_fault(skb->dev);
 -      }
 -      if (!skb_shared(skb))
 -              skb->csum_valid = !sum;
 -      return sum;
 -}
 -EXPORT_SYMBOL(__skb_checksum_complete_head);
 -
 -__sum16 __skb_checksum_complete(struct sk_buff *skb)
 -{
 -      __wsum csum;
 -      __sum16 sum;
 -
 -      csum = skb_checksum(skb, 0, skb->len, 0);
 -
 -      /* skb->csum holds pseudo checksum */
 -      sum = csum_fold(csum_add(skb->csum, csum));
 -      if (likely(!sum)) {
 -              if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
 -                  !skb->csum_complete_sw)
 -                      netdev_rx_csum_fault(skb->dev);
 -      }
 -
 -      if (!skb_shared(skb)) {
 -              /* Save full packet checksum */
 -              skb->csum = csum;
 -              skb->ip_summed = CHECKSUM_COMPLETE;
 -              skb->csum_complete_sw = 1;
 -              skb->csum_valid = !sum;
 -      }
 -
 -      return sum;
 -}
 -EXPORT_SYMBOL(__skb_checksum_complete);
 -
  /**
   *    skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec.
   *    @skb: skbuff