Removed 'net' prefix from functions and structs used by external drivers.
Signed-off-by: Tal Gilboa <talgi@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
struct ethtool_coalesce *ec)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
- struct net_dim_cq_moder moder;
+ struct dim_cq_moder moder;
u32 usecs, pkts;
unsigned int i;
{
struct bcm_sysport_priv *priv =
container_of(napi, struct bcm_sysport_priv, napi);
- struct net_dim_sample dim_sample;
+ struct dim_sample dim_sample;
unsigned int work_done = 0;
work_done = bcm_sysport_desc_rx(priv, budget);
}
if (priv->dim.use_dim) {
- net_dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
- priv->dim.bytes, &dim_sample);
+ dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
+ priv->dim.bytes, &dim_sample);
net_dim(&priv->dim.dim, dim_sample);
}
static void bcm_sysport_dim_work(struct work_struct *work)
{
- struct net_dim *dim = container_of(work, struct net_dim, work);
+ struct dim *dim = container_of(work, struct dim, work);
struct bcm_sysport_net_dim *ndim =
container_of(dim, struct bcm_sysport_net_dim, dim);
struct bcm_sysport_priv *priv =
container_of(ndim, struct bcm_sysport_priv, dim);
- struct net_dim_cq_moder cur_profile =
- net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ struct dim_cq_moder cur_profile = net_dim_get_rx_moderation(dim->mode,
+ dim->profile_ix);
bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
dim->state = DIM_START_MEASURE;
static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
{
struct bcm_sysport_net_dim *dim = &priv->dim;
- struct net_dim_cq_moder moder;
+ struct dim_cq_moder moder;
u32 usecs, pkts;
usecs = priv->rx_coalesce_usecs;
u16 event_ctr;
unsigned long packets;
unsigned long bytes;
- struct net_dim dim;
+ struct dim dim;
};
/* Software view of the TX ring */
}
}
if (bp->flags & BNXT_FLAG_DIM) {
- struct net_dim_sample dim_sample;
+ struct dim_sample dim_sample;
- net_dim_update_sample(cpr->event_ctr,
- cpr->rx_packets,
- cpr->rx_bytes,
- &dim_sample);
+ dim_update_sample(cpr->event_ctr,
+ cpr->rx_packets,
+ cpr->rx_bytes,
+ &dim_sample);
net_dim(&cpr->dim, dim_sample);
}
return work_done;
u64 rx_bytes;
u64 event_ctr;
- struct net_dim dim;
+ struct dim dim;
union {
struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
char __user *buffer,
size_t count, loff_t *ppos)
{
- struct net_dim *dim = filep->private_data;
+ struct dim *dim = filep->private_data;
int len;
char *buf;
.read = debugfs_dim_read,
};
-static struct dentry *debugfs_dim_ring_init(struct net_dim *dim, int ring_idx,
+static struct dentry *debugfs_dim_ring_init(struct dim *dim, int ring_idx,
struct dentry *dd)
{
static char qname[16];
void bnxt_dim_work(struct work_struct *work)
{
- struct net_dim *dim = container_of(work, struct net_dim,
- work);
+ struct dim *dim = container_of(work, struct dim, work);
struct bnxt_cp_ring_info *cpr = container_of(dim,
struct bnxt_cp_ring_info,
dim);
struct bnxt_napi *bnapi = container_of(cpr,
struct bnxt_napi,
cp_ring);
- struct net_dim_cq_moder cur_moder =
+ struct dim_cq_moder cur_moder =
net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
cpr->rx_ring_coal.coal_ticks = cur_moder.usec;
static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
struct ethtool_coalesce *ec)
{
- struct net_dim_cq_moder moder;
+ struct dim_cq_moder moder;
u32 usecs, pkts;
ring->rx_coalesce_usecs = ec->rx_coalesce_usecs;
{
struct bcmgenet_rx_ring *ring = container_of(napi,
struct bcmgenet_rx_ring, napi);
- struct net_dim_sample dim_sample;
+ struct dim_sample dim_sample;
unsigned int work_done;
work_done = bcmgenet_desc_rx(ring, budget);
}
if (ring->dim.use_dim) {
- net_dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
- ring->dim.bytes, &dim_sample);
+ dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
+ ring->dim.bytes, &dim_sample);
net_dim(&ring->dim.dim, dim_sample);
}
static void bcmgenet_dim_work(struct work_struct *work)
{
- struct net_dim *dim = container_of(work, struct net_dim, work);
+ struct dim *dim = container_of(work, struct dim, work);
struct bcmgenet_net_dim *ndim =
container_of(dim, struct bcmgenet_net_dim, dim);
struct bcmgenet_rx_ring *ring =
container_of(ndim, struct bcmgenet_rx_ring, dim);
- struct net_dim_cq_moder cur_profile =
+ struct dim_cq_moder cur_profile =
net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
{
struct bcmgenet_net_dim *dim = &ring->dim;
- struct net_dim_cq_moder moder;
+ struct dim_cq_moder moder;
u32 usecs, pkts;
usecs = ring->rx_coalesce_usecs;
u16 event_ctr;
unsigned long packets;
unsigned long bytes;
- struct net_dim dim;
+ struct dim dim;
};
struct bcmgenet_rx_ring {
u16 num_channels;
u8 num_tc;
bool rx_cqe_compress_def;
- struct net_dim_cq_moder rx_cq_moderation;
- struct net_dim_cq_moder tx_cq_moderation;
bool tunneled_offload_en;
+ struct dim_cq_moder rx_cq_moderation;
+ struct dim_cq_moder tx_cq_moderation;
bool lro_en;
u8 tx_min_inline_mode;
bool vlan_strip_disable;
/* dirtied @completion */
u16 cc;
u32 dma_fifo_cc;
- struct net_dim dim; /* Adaptive Moderation */
+ struct dim dim; /* Adaptive Moderation */
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
int ix;
unsigned int hw_mtu;
- struct net_dim dim; /* Dynamic Interrupt Moderation */
+ struct dim dim; /* Dynamic Interrupt Moderation */
/* XDP */
struct bpf_prog *xdp_prog;
#include "en.h"
static void
-mlx5e_complete_dim_work(struct net_dim *dim, struct net_dim_cq_moder moder,
+mlx5e_complete_dim_work(struct dim *dim, struct dim_cq_moder moder,
struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq)
{
mlx5_core_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts);
void mlx5e_rx_dim_work(struct work_struct *work)
{
- struct net_dim *dim = container_of(work, struct net_dim, work);
+ struct dim *dim = container_of(work, struct dim, work);
struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
- struct net_dim_cq_moder cur_moder =
+ struct dim_cq_moder cur_moder =
net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
mlx5e_complete_dim_work(dim, cur_moder, rq->mdev, &rq->cq.mcq);
void mlx5e_tx_dim_work(struct work_struct *work)
{
- struct net_dim *dim = container_of(work, struct net_dim, work);
+ struct dim *dim = container_of(work, struct dim, work);
struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim);
- struct net_dim_cq_moder cur_moder =
+ struct dim_cq_moder cur_moder =
net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq);
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal)
{
- struct net_dim_cq_moder *rx_moder, *tx_moder;
+ struct dim_cq_moder *rx_moder, *tx_moder;
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -EOPNOTSUPP;
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal)
{
- struct net_dim_cq_moder *rx_moder, *tx_moder;
+ struct dim_cq_moder *rx_moder, *tx_moder;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
int err = 0;
}
static int mlx5e_open_cq(struct mlx5e_channel *c,
- struct net_dim_cq_moder moder,
+ struct dim_cq_moder moder,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
struct mlx5e_channel **cp)
{
int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
- struct net_dim_cq_moder icocq_moder = {0, 0};
+ struct dim_cq_moder icocq_moder = {0, 0};
struct net_device *netdev = priv->netdev;
struct mlx5e_channel *c;
unsigned int irq;
link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
}
-static struct net_dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
+static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
{
- struct net_dim_cq_moder moder;
+ struct dim_cq_moder moder;
moder.cq_period_mode = cq_period_mode;
moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
return moder;
}
-static struct net_dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
+static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
{
- struct net_dim_cq_moder moder;
+ struct dim_cq_moder moder;
moder.cq_period_mode = cq_period_mode;
moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
{
struct mlx5e_sq_stats *stats = sq->stats;
- struct net_dim_sample dim_sample;
+ struct dim_sample dim_sample;
if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
return;
- net_dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
+ dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
net_dim(&sq->dim, dim_sample);
}
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
{
struct mlx5e_rq_stats *stats = rq->stats;
- struct net_dim_sample dim_sample;
+ struct dim_sample dim_sample;
if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
return;
- net_dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
+ dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
net_dim(&rq->dim, dim_sample);
}
#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) \
& (BIT_ULL(bits) - 1))
-struct net_dim_cq_moder {
+struct dim_cq_moder {
u16 usec;
u16 pkts;
u8 cq_period_mode;
};
-struct net_dim_sample {
+struct dim_sample {
ktime_t time;
u32 pkt_ctr;
u32 byte_ctr;
int epms; /* events per msec */
};
-struct net_dim { /* Dynamic Interrupt Moderation */
+struct dim { /* Dynamic Interrupt Moderation */
u8 state;
struct dim_stats prev_stats;
- struct net_dim_sample start_sample;
+ struct dim_sample start_sample;
struct work_struct work;
u8 profile_ix;
u8 mode;
DIM_ON_EDGE,
};
-static inline bool dim_on_top(struct net_dim *dim)
+static inline bool dim_on_top(struct dim *dim)
{
switch (dim->tune_state) {
case DIM_PARKING_ON_TOP:
}
}
-static inline void dim_turn(struct net_dim *dim)
+static inline void dim_turn(struct dim *dim)
{
switch (dim->tune_state) {
case DIM_PARKING_ON_TOP:
}
}
-static inline void dim_park_on_top(struct net_dim *dim)
+static inline void dim_park_on_top(struct dim *dim)
{
dim->steps_right = 0;
dim->steps_left = 0;
dim->tune_state = DIM_PARKING_ON_TOP;
}
-static inline void dim_park_tired(struct net_dim *dim)
+static inline void dim_park_tired(struct dim *dim)
{
dim->steps_right = 0;
dim->steps_left = 0;
}
static inline void
-net_dim_update_sample(u16 event_ctr, u64 packets, u64 bytes,
- struct net_dim_sample *s)
+dim_update_sample(u16 event_ctr, u64 packets, u64 bytes, struct dim_sample *s)
{
s->time = ktime_get();
s->pkt_ctr = packets;
}
static inline void
-dim_calc_stats(struct net_dim_sample *start, struct net_dim_sample *end,
+dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
struct dim_stats *curr_stats)
{
/* u32 holds up to 71 minutes, should be enough */
{64, 32} \
}
-static const struct net_dim_cq_moder
+static const struct dim_cq_moder
rx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
NET_DIM_RX_EQE_PROFILES,
NET_DIM_RX_CQE_PROFILES,
};
-static const struct net_dim_cq_moder
+static const struct dim_cq_moder
tx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
NET_DIM_TX_EQE_PROFILES,
NET_DIM_TX_CQE_PROFILES,
};
-static inline struct net_dim_cq_moder
+static inline struct dim_cq_moder
net_dim_get_rx_moderation(u8 cq_period_mode, int ix)
{
- struct net_dim_cq_moder cq_moder = rx_profile[cq_period_mode][ix];
+ struct dim_cq_moder cq_moder = rx_profile[cq_period_mode][ix];
cq_moder.cq_period_mode = cq_period_mode;
return cq_moder;
}
-static inline struct net_dim_cq_moder
+static inline struct dim_cq_moder
net_dim_get_def_rx_moderation(u8 cq_period_mode)
{
u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
return net_dim_get_rx_moderation(cq_period_mode, profile_ix);
}
-static inline struct net_dim_cq_moder
+static inline struct dim_cq_moder
net_dim_get_tx_moderation(u8 cq_period_mode, int ix)
{
- struct net_dim_cq_moder cq_moder = tx_profile[cq_period_mode][ix];
+ struct dim_cq_moder cq_moder = tx_profile[cq_period_mode][ix];
cq_moder.cq_period_mode = cq_period_mode;
return cq_moder;
}
-static inline struct net_dim_cq_moder
+static inline struct dim_cq_moder
net_dim_get_def_tx_moderation(u8 cq_period_mode)
{
u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
return net_dim_get_tx_moderation(cq_period_mode, profile_ix);
}
-static inline int net_dim_step(struct net_dim *dim)
+static inline int net_dim_step(struct dim *dim)
{
if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
return DIM_TOO_TIRED;
return DIM_STEPPED;
}
-static inline void net_dim_exit_parking(struct net_dim *dim)
+static inline void net_dim_exit_parking(struct dim *dim)
{
dim->tune_state = dim->profile_ix ? DIM_GOING_LEFT :
DIM_GOING_RIGHT;
}
static inline bool net_dim_decision(struct dim_stats *curr_stats,
- struct net_dim *dim)
+ struct dim *dim)
{
int prev_state = dim->tune_state;
int prev_ix = dim->profile_ix;
return dim->profile_ix != prev_ix;
}
-static inline void net_dim(struct net_dim *dim,
- struct net_dim_sample end_sample)
+static inline void net_dim(struct dim *dim,
+ struct dim_sample end_sample)
{
struct dim_stats curr_stats;
u16 nevents;
}
/* fall through */
case DIM_START_MEASURE:
- net_dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr,
- end_sample.byte_ctr, &dim->start_sample);
+ dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr,
+ end_sample.byte_ctr, &dim->start_sample);
dim->state = DIM_MEASURE_IN_PROGRESS;
break;
case DIM_APPLY_NEW_PROFILE: