mlx5: more strict use of page_pool API
authorJesper Dangaard Brouer <brouer@redhat.com>
Tue, 18 Jun 2019 13:05:42 +0000 (15:05 +0200)
committerDavid S. Miller <davem@davemloft.net>
Wed, 19 Jun 2019 15:23:13 +0000 (11:23 -0400)
The mlx5 driver is using page_pool, but not for DMA-mapping (currently), and
is a little too relaxed about returning or releasing page resources, as it
is not strictly necessary, when not using DMA-mappings.

As this patchset is working towards tracking page_pool resources, to know
about in-flight frames on shutdown. Then fix places where mlx5 leak
page_pool resource.

In case of dma_mapping_error, then recycle into page_pool.

In mlx5e_free_rq() moved the page_pool_destroy() call to after the
mlx5e_page_release() calls, as it is more correct.

In mlx5e_page_release() when no recycle was requested, then release page
from the page_pool, via page_pool_release_page().

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Reviewed-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index 46323709ad4715a54a55215ea5883d9c41769da1..46b6a47bd1e3a9cc8a97fa5826a7ae778d73c3a6 100644 (file)
@@ -625,10 +625,6 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
        if (rq->xdp_prog)
                bpf_prog_put(rq->xdp_prog);
 
-       xdp_rxq_info_unreg(&rq->xdp_rxq);
-       if (rq->page_pool)
-               page_pool_destroy(rq->page_pool);
-
        switch (rq->wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
                kvfree(rq->mpwqe.info);
@@ -645,6 +641,11 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
 
                mlx5e_page_release(rq, dma_info, false);
        }
+
+       xdp_rxq_info_unreg(&rq->xdp_rxq);
+       if (rq->page_pool)
+               page_pool_destroy(rq->page_pool);
+
        mlx5_wq_destroy(&rq->wq_ctrl);
 }
 
index 935bf62eddc1c660dedbbe2c5eca38574a1045d1..234a3fd399013f0c4f0bd956c369bce2214bdb6c 100644 (file)
@@ -248,7 +248,7 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
        dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
                                      PAGE_SIZE, rq->buff.map_dir);
        if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
-               put_page(dma_info->page);
+               page_pool_recycle_direct(rq->page_pool, dma_info->page);
                dma_info->page = NULL;
                return -ENOMEM;
        }
@@ -272,6 +272,7 @@ void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
                page_pool_recycle_direct(rq->page_pool, dma_info->page);
        } else {
                mlx5e_page_dma_unmap(rq, dma_info);
+               page_pool_release_page(rq->page_pool, dma_info->page);
                put_page(dma_info->page);
        }
 }