#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
#define MLX5_RX_HEADROOM NET_SKB_PAD
+#define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
byte_count = rq->buff.wqe_sz;
/* calc the required page order */
- frag_sz = rq->rx_headroom +
- byte_count /* packet data */ +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- frag_sz = SKB_DATA_ALIGN(frag_sz);
-
+ frag_sz = MLX5_SKB_FRAG_SZ(rq->rx_headroom + byte_count);
npages = DIV_ROUND_UP(frag_sz, PAGE_SIZE);
rq->buff.page_order = order_base_2(npages);
void *va, *data;
u16 rx_headroom = rq->rx_headroom;
bool consumed;
+ u32 frag_size;
di = &rq->dma_info[wqe_counter];
va = page_address(di->page);
if (consumed)
return NULL; /* page/packet was consumed by XDP */
- skb = build_skb(va, RQ_PAGE_SIZE(rq));
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
+ skb = build_skb(va, frag_size);
if (unlikely(!skb)) {
rq->stats.buff_alloc_err++;
mlx5e_page_release(rq, di, true);