#define MLX5E_MAX_DSCP 64
#define MLX5E_MAX_NUM_TC 8
-#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
-#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
-#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
-
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1
-#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
-#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
-
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
-#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3
-#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
-
#define MLX5_RX_HEADROOM NET_SKB_PAD
#define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
-#define MLX5E_REQUIRED_MTTS(wqes) \
- (wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
-#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX)
+#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
+#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
+#define MLX5E_MAX_RQ_NUM_MTTS \
+ ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
+#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
+ (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
+#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
+ (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
+ (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
+
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
+#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
+
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
+ MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
+
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
#define MLX5_UMR_ALIGN (2048)
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256)
}
}
-static inline int mlx5_min_log_rq_size(int wq_type)
-{
- switch (wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
- default:
- return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
- }
-}
-
-static inline int mlx5_max_log_rq_size(int wq_type)
-{
- switch (wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW;
- default:
- return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
- }
-}
-
static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return is_kdump_kernel() ?
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
- u8 log_rq_size;
+ u8 log_rq_mtu_frames;
u16 num_channels;
u8 num_tc;
bool rx_cqe_compress_def;
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
-u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
-u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
-
void mlx5e_update_stats(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
}
-static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
- int num_wqe)
-{
- int packets_per_wqe;
- int stride_size;
- int num_strides;
- int wqe_size;
-
- if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
- return num_wqe;
-
- stride_size = 1 << mlx5e_mpwqe_get_log_stride_size(priv->mdev, &priv->channels.params);
- num_strides = 1 << mlx5e_mpwqe_get_log_num_strides(priv->mdev, &priv->channels.params);
- wqe_size = stride_size * num_strides;
-
- packets_per_wqe = wqe_size /
- ALIGN(ETH_DATA_LEN, stride_size);
- return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1));
-}
-
-static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
- int num_packets)
-{
- int packets_per_wqe;
- int stride_size;
- int num_strides;
- int wqe_size;
- int num_wqes;
-
- if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
- return num_packets;
-
- stride_size = 1 << mlx5e_mpwqe_get_log_stride_size(priv->mdev, &priv->channels.params);
- num_strides = 1 << mlx5e_mpwqe_get_log_num_strides(priv->mdev, &priv->channels.params);
- wqe_size = stride_size * num_strides;
-
- num_packets = (1 << order_base_2(num_packets));
-
- packets_per_wqe = wqe_size /
- ALIGN(ETH_DATA_LEN, stride_size);
- num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe);
- return 1 << (order_base_2(num_wqes));
-}
-
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param)
{
- int rq_wq_type = priv->channels.params.rq_wq_type;
-
- param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
- 1 << mlx5_max_log_rq_size(rq_wq_type));
+ param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
- param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
- 1 << priv->channels.params.log_rq_size);
+ param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
param->tx_pending = 1 << priv->channels.params.log_sq_size;
}
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param)
{
- int rq_wq_type = priv->channels.params.rq_wq_type;
struct mlx5e_channels new_channels = {};
- u32 rx_pending_wqes;
- u32 min_rq_size;
u8 log_rq_size;
u8 log_sq_size;
- u32 num_mtts;
int err = 0;
if (param->rx_jumbo_pending) {
return -EINVAL;
}
- min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
- 1 << mlx5_min_log_rq_size(rq_wq_type));
- rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
- param->rx_pending);
-
- if (param->rx_pending < min_rq_size) {
+ if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n",
__func__, param->rx_pending,
- min_rq_size);
- return -EINVAL;
- }
-
- num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
- if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
- !MLX5E_VALID_NUM_MTTS(num_mtts)) {
- netdev_info(priv->netdev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
- __func__, param->rx_pending);
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
return -EINVAL;
}
return -EINVAL;
}
- log_rq_size = order_base_2(rx_pending_wqes);
+ log_rq_size = order_base_2(param->rx_pending);
log_sq_size = order_base_2(param->tx_pending);
- if (log_rq_size == priv->channels.params.log_rq_size &&
+ if (log_rq_size == priv->channels.params.log_rq_mtu_frames &&
log_sq_size == priv->channels.params.log_sq_size)
return 0;
mutex_lock(&priv->state_lock);
new_channels.params = priv->channels.params;
- new_channels.params.log_rq_size = log_rq_size;
+ new_channels.params.log_rq_mtu_frames = log_rq_size;
new_channels.params.log_sq_size = log_sq_size;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
MLX5_CAP_ETH(mdev, reg_umr_sq);
}
-u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
+static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params)
+{
+ u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+
+ return hw_mtu;
+}
+
+static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
+{
+ u32 linear_frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params);
+
+ return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
+}
+
+static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params)
+{
+ if (params->log_rq_mtu_frames <
+ mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
+ return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
+
+ return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params);
+}
+
+static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
{
return MLX5E_MPWQE_STRIDE_SZ(mdev,
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
}
-u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
+static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
{
return MLX5_MPWRQ_LOG_WQE_SZ -
mlx5e_mpwqe_get_log_stride_size(mdev, params);
struct mlx5e_params *params)
{
params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ params->log_rq_mtu_frames = is_kdump_kernel() ?
+ MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
+ MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- params->log_rq_size = is_kdump_kernel() ?
- MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
- MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- params->log_rq_size = is_kdump_kernel() ?
- MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
- MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
-
/* Extra room needed for build_skb */
params->lro_wqe_sz -= mlx5e_get_rq_headroom(params) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
- BIT(params->log_rq_size),
+ BIT(params->log_rq_mtu_frames),
BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)),
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
}
u32 *in;
int err;
- if (!MLX5E_VALID_NUM_MTTS(npages))
- return -EINVAL;
-
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(wq, wq, log_wqe_stride_size,
mlx5e_mpwqe_get_log_stride_size(mdev, params) - 6);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+ MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params));
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
}
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
- MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size);
MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn);
MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
struct mlx5e_params *params,
struct mlx5e_cq_param *param)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
void *cqc = param->cqc;
u8 log_cq_size;
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- log_cq_size = params->log_rq_size +
- mlx5e_mpwqe_get_log_num_strides(priv->mdev, params);
+ log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) +
+ mlx5e_mpwqe_get_log_num_strides(mdev, params);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- log_cq_size = params->log_rq_size;
+ log_cq_size = params->log_rq_mtu_frames;
}
MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
- reset = !params->lro_en &&
- (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+ reset = !params->lro_en;
reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
+ new_channels.params = *params;
+ new_channels.params.sw_mtu = new_mtu;
+
+ if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) {
+ u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
+ u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
+
+ reset = reset && (ppw_old != ppw_new);
+ }
+
if (!reset) {
params->sw_mtu = new_mtu;
mlx5e_set_dev_port_mtu(priv);
goto out;
}
- new_channels.params = *params;
- new_channels.params.sw_mtu = new_mtu;
err = mlx5e_open_channels(priv, &new_channels);
if (err)
goto out;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
- params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+ params->log_rq_mtu_frames = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
mlx5e_init_rq_type_params(mdev, params);
/* RQ size in ipoib by default is 512 */
- params->log_rq_size = is_kdump_kernel() ?
+ params->log_rq_mtu_frames = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;
netdev->ethtool_ops = &mlx5i_pkey_ethtool_ops;
/* Use dummy rqs */
- priv->channels.params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+ priv->channels.params.log_rq_mtu_frames = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
}
/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */