From: Yamin Friedman Date: Mon, 8 Jul 2019 10:59:03 +0000 (+0300) Subject: RDMA/core: Provide RDMA DIM support for ULPs X-Git-Url: http://git.cdn.openwrt.org/?a=commitdiff_plain;h=da6629793aa6944db6c8a908ca1a52d87f1489aa;p=openwrt%2Fstaging%2Fblogic.git RDMA/core: Provide RDMA DIM support for ULPs Added the interface in the infiniband driver that applies the rdma_dim adaptive moderation. There is now a special function for allocating an ib_cq that uses rdma_dim. Performance improvement (ConnectX-5 100GbE, x86) running FIO benchmark over NVMf between two equal end-hosts with 56 cores across a Mellanox switch using null_blk device: READS without DIM: blk size | BW | IOPS | 99th percentile latency | 99.99th latency 512B | 3.8GiB/s | 7.7M | 1401 usec | 2442 usec 4k | 7.0GiB/s | 1.8M | 4817 usec | 6587 usec 64k | 10.7GiB/s| 175k | 9896 usec | 10028 usec IO WRITES without DIM: blk size | BW | IOPS | 99th percentile latency | 99.99th latency 512B | 3.6GiB/s | 7.5M | 1434 usec | 2474 usec 4k | 6.3GiB/s | 1.6M | 938 usec | 1221 usec 64k | 10.7GiB/s| 175k | 8979 usec | 12780 usec IO READS with DIM: blk size | BW | IOPS | 99th percentile latency | 99.99th latency 512B | 4GiB/s | 8.2M | 816 usec | 889 usec 4k | 10.1GiB/s| 2.65M| 3359 usec | 5080 usec 64k | 10.7GiB/s| 175k | 9896 usec | 10028 usec IO WRITES with DIM: blk size | BW | IOPS | 99th percentile latency | 99.99th latency 512B | 3.9GiB/s | 8.1M | 799 usec | 922 usec 4k | 9.6GiB/s | 2.5M | 717 usec | 1004 usec 64k | 10.7GiB/s| 176k | 8586 usec | 12256 usec The rdma_dim algorithm was designed to measure the effectiveness of moderation on the flow in a general way and thus should be appropriate for all RDMA storage protocols. rdma_dim is configured to be the default option based on performance improvement seen after extensive tests. Signed-off-by: Yamin Friedman Reviewed-by: Max Gurtovoy Reviewed-by: Sagi Grimberg Signed-off-by: Saeed Mahameed Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index 00d70f166209..ffd6e24109d5 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -18,6 +18,40 @@ #define IB_POLL_FLAGS \ (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) +static void ib_cq_rdma_dim_work(struct work_struct *w) +{ + struct dim *dim = container_of(w, struct dim, work); + struct ib_cq *cq = dim->priv; + + u16 usec = rdma_dim_prof[dim->profile_ix].usec; + u16 comps = rdma_dim_prof[dim->profile_ix].comps; + + dim->state = DIM_START_MEASURE; + + cq->device->ops.modify_cq(cq, comps, usec); +} + +static void rdma_dim_init(struct ib_cq *cq) +{ + struct dim *dim; + + if (!cq->device->ops.modify_cq || !cq->device->use_cq_dim || + cq->poll_ctx == IB_POLL_DIRECT) + return; + + dim = kzalloc(sizeof(struct dim), GFP_KERNEL); + if (!dim) + return; + + dim->state = DIM_START_MEASURE; + dim->tune_state = DIM_GOING_RIGHT; + dim->profile_ix = RDMA_DIM_START_PROFILE; + dim->priv = cq; + cq->dim = dim; + + INIT_WORK(&dim->work, ib_cq_rdma_dim_work); +} + static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, int batch) { @@ -78,6 +112,7 @@ static void ib_cq_completion_direct(struct ib_cq *cq, void *private) static int ib_poll_handler(struct irq_poll *iop, int budget) { struct ib_cq *cq = container_of(iop, struct ib_cq, iop); + struct dim *dim = cq->dim; int completed; completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); @@ -87,6 +122,9 @@ static int ib_poll_handler(struct irq_poll *iop, int budget) irq_poll_sched(&cq->iop); } + if (dim) + rdma_dim(dim, completed); + return completed; } @@ -105,6 +143,8 @@ static void ib_cq_poll_work(struct work_struct *work) if (completed >= IB_POLL_BUDGET_WORKQUEUE || ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) queue_work(cq->comp_wq, &cq->work); + else if (cq->dim) + rdma_dim(cq->dim, completed); } static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) @@ -161,6 +201,8 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, rdma_restrack_kadd(&cq->res); + rdma_dim_init(cq); + switch (cq->poll_ctx) { case IB_POLL_DIRECT: cq->comp_handler = ib_cq_completion_direct; @@ -223,6 +265,9 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata) rdma_restrack_del(&cq->res); cq->device->ops.destroy_cq(cq, udata); + if (cq->dim) + cancel_work_sync(&cq->dim->work); + kfree(cq->dim); kfree(cq->wc); kfree(cq); } diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 4053be51b7fa..c5f8a9f17063 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -61,6 +61,7 @@ #include #include #include +#include #include #include #include @@ -1509,6 +1510,7 @@ struct ib_cq { struct work_struct work; }; struct workqueue_struct *comp_wq; + struct dim *dim; /* * Implementation details of the RDMA core, don't use in drivers: */ @@ -2576,6 +2578,8 @@ struct ib_device { u16 is_switch:1; /* Indicates kernel verbs support, should not be used in drivers */ u16 kverbs_provider:1; + /* CQ adaptive moderation (RDMA DIM) */ + u16 use_cq_dim:1; u8 node_type; u8 phys_port_cnt; struct ib_device_attr attrs;