RDMA/hns: Add a group interfaces for optimizing buffers getting flow
authorLijun Ou <oulijun@huawei.com>
Sat, 8 Jun 2019 06:46:09 +0000 (14:46 +0800)
committerDoug Ledford <dledford@redhat.com>
Thu, 20 Jun 2019 16:56:34 +0000 (12:56 -0400)
Currently, the code for getting umem and kmem buffers exist many files,
this patch adds a group interfaces to simplify the buffers getting flow.

Signed-off-by: Xi Wang <wangxi11@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/hns/hns_roce_alloc.c
drivers/infiniband/hw/hns/hns_roce_device.h

index dac058d3df5314b30e977355aedb5fe0ad27dd3f..14fcc359599ccd6fb6557a4e38dff9500546675e 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/platform_device.h>
 #include <linux/vmalloc.h>
 #include "hns_roce_device.h"
+#include <rdma/ib_umem.h>
 
 int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
 {
@@ -238,6 +239,104 @@ err_free:
        return -ENOMEM;
 }
 
+int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
+                          int buf_cnt, int start, struct hns_roce_buf *buf)
+{
+       int i, end;
+       int total;
+
+       end = start + buf_cnt;
+       if (end > buf->npages) {
+               dev_err(hr_dev->dev,
+                       "invalid kmem region,offset %d,buf_cnt %d,total %d!\n",
+                       start, buf_cnt, buf->npages);
+               return -EINVAL;
+       }
+
+       total = 0;
+       for (i = start; i < end; i++)
+               if (buf->nbufs == 1)
+                       bufs[total++] = buf->direct.map +
+                                       (i << buf->page_shift);
+               else
+                       bufs[total++] = buf->page_list[i].map;
+
+       return total;
+}
+
+int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
+                          int buf_cnt, int start, struct ib_umem *umem,
+                          int page_shift)
+{
+       struct ib_block_iter biter;
+       int total = 0;
+       int idx = 0;
+       u64 addr;
+
+       if (page_shift < PAGE_SHIFT) {
+               dev_err(hr_dev->dev, "invalid page shift %d!\n", page_shift);
+               return -EINVAL;
+       }
+
+       /* convert system page cnt to hw page cnt */
+       rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
+                           1 << page_shift) {
+               addr = rdma_block_iter_dma_address(&biter);
+               if (idx >= start) {
+                       bufs[total++] = addr;
+                       if (total >= buf_cnt)
+                               goto done;
+               }
+               idx++;
+       }
+
+done:
+       return total;
+}
+
+void hns_roce_init_buf_region(struct hns_roce_buf_region *region, int hopnum,
+                             int offset, int buf_cnt)
+{
+       if (hopnum == HNS_ROCE_HOP_NUM_0)
+               region->hopnum = 0;
+       else
+               region->hopnum = hopnum;
+
+       region->offset = offset;
+       region->count = buf_cnt;
+}
+
+void hns_roce_free_buf_list(dma_addr_t **bufs, int region_cnt)
+{
+       int i;
+
+       for (i = 0; i < region_cnt; i++) {
+               kfree(bufs[i]);
+               bufs[i] = NULL;
+       }
+}
+
+int hns_roce_alloc_buf_list(struct hns_roce_buf_region *regions,
+                           dma_addr_t **bufs, int region_cnt)
+{
+       struct hns_roce_buf_region *r;
+       int i;
+
+       for (i = 0; i < region_cnt; i++) {
+               r = &regions[i];
+               bufs[i] = kcalloc(r->count, sizeof(dma_addr_t), GFP_KERNEL);
+               if (!bufs[i])
+                       goto err_alloc;
+       }
+
+       return 0;
+
+err_alloc:
+       hns_roce_free_buf_list(bufs, i);
+
+       return -ENOMEM;
+}
+
 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
 {
        if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
index ec2ed5cac3bc0de1bc165271719d9f0c4b42b2a4..4974c42a77decc427c8b7dbe68c47ecbac32ef8e 100644 (file)
@@ -1211,6 +1211,18 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
 int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
                               struct hns_roce_mtt *mtt, struct ib_umem *umem);
 
+void hns_roce_init_buf_region(struct hns_roce_buf_region *region, int hopnum,
+                             int offset, int buf_cnt);
+int hns_roce_alloc_buf_list(struct hns_roce_buf_region *regions,
+                           dma_addr_t **bufs, int count);
+void hns_roce_free_buf_list(dma_addr_t **bufs, int count);
+
+int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
+                          int buf_cnt, int start, struct hns_roce_buf *buf);
+int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
+                          int buf_cnt, int start, struct ib_umem *umem,
+                          int page_shift);
+
 int hns_roce_create_srq(struct ib_srq *srq,
                        struct ib_srq_init_attr *srq_init_attr,
                        struct ib_udata *udata);