/* Ensure ndev does not disappear before we invoke dev_hold()
*/
- read_lock(&ibdev->roce[port_num - 1].netdev_lock);
- ndev = ibdev->roce[port_num - 1].netdev;
+ read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
+ ndev = ibdev->port[port_num - 1].roce.netdev;
if (ndev)
dev_hold(ndev);
- read_unlock(&ibdev->roce[port_num - 1].netdev_lock);
+ read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
out:
mlx5_ib_put_native_port_mdev(ibdev, port_num);
print_lib_caps(dev, context->lib_caps);
if (dev->lag_active) {
- u8 port = mlx5_core_native_port_num(dev->mdev);
+ u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
atomic_set(&context->tx_port_affinity,
atomic_add_return(
- 1, &dev->roce[port].tx_port_affinity));
+ 1, &dev->port[port].roce.tx_port_affinity));
}
return 0;
{
int err;
- dev->roce[port_num].nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier(&dev->roce[port_num].nb);
+ dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
+ err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
if (err) {
- dev->roce[port_num].nb.notifier_call = NULL;
+ dev->port[port_num].roce.nb.notifier_call = NULL;
return err;
}
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
- if (dev->roce[port_num].nb.notifier_call) {
- unregister_netdevice_notifier(&dev->roce[port_num].nb);
- dev->roce[port_num].nb.notifier_call = NULL;
+ if (dev->port[port_num].roce.nb.notifier_call) {
+ unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
+ dev->port[port_num].roce.nb.notifier_call = NULL;
}
}
mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
port_num + 1);
- ibdev->roce[port_num].last_port_state = IB_PORT_DOWN;
+ ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
}
/* The mlx5_ib_multiport_mutex should be held when calling this function */
for (i = 0; i < dev->num_ports; i++) {
spin_lock_init(&dev->port[i].mp.mpi_lock);
- rwlock_init(&dev->roce[i].netdev_lock);
+ rwlock_init(&dev->port[i].roce.netdev_lock);
}
err = mlx5_ib_init_multiport_master(dev);
int i;
for (i = 0; i < dev->num_ports; i++) {
- dev->roce[i].dev = dev;
- dev->roce[i].native_port_num = i + 1;
- dev->roce[i].last_port_state = IB_PORT_DOWN;
+ dev->port[i].roce.dev = dev;
+ dev->port[i].roce.native_port_num = i + 1;
+ dev->port[i].roce.last_port_state = IB_PORT_DOWN;
}
dev->ib_dev.uverbs_ex_cmd_mask |=
spinlock_t mpi_lock;
};
-struct mlx5_ib_port {
- struct mlx5_ib_counters cnts;
- struct mlx5_ib_multiport mp;
- struct mlx5_ib_dbg_cc_params *dbg_cc_params;
-};
-
struct mlx5_roce {
/* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
* netdev pointer
u8 native_port_num;
};
+struct mlx5_ib_port {
+ struct mlx5_ib_counters cnts;
+ struct mlx5_ib_multiport mp;
+ struct mlx5_ib_dbg_cc_params *dbg_cc_params;
+ struct mlx5_roce roce;
+};
+
struct mlx5_ib_dbg_param {
int offset;
struct mlx5_ib_dev *dev;
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
struct notifier_block mdev_events;
- struct mlx5_roce roce[MLX5_MAX_PORTS];
int num_ports;
/* serialize update of capability mask
*/
} else {
tx_port_affinity =
(unsigned int)atomic_add_return(
- 1, &dev->roce[port_num].tx_port_affinity) %
+ 1, &dev->port[port_num].roce.tx_port_affinity) %
MLX5_MAX_PORTS +
1;
mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
(ibqp->qp_type == IB_QPT_XRC_INI) ||
(ibqp->qp_type == IB_QPT_XRC_TGT)) {
if (dev->lag_active) {
- u8 p = mlx5_core_native_port_num(dev->mdev);
+ u8 p = mlx5_core_native_port_num(dev->mdev) - 1;
tx_affinity = get_tx_affinity(dev, pd, base, p,
udata);
context->flags |= cpu_to_be32(tx_affinity << 24);